aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
commitaecdc33e111b2c447b622e287c6003726daa1426 (patch)
tree3e7657eae4b785e1a1fb5dfb225dbae0b2f0cfc6
parenta20acf99f75e49271381d65db097c9763060a1e8 (diff)
parenta3a6cab5ea10cca64d036851fe0d932448f2fe4f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) GRE now works over ipv6, from Dmitry Kozlov. 2) Make SCTP more network namespace aware, from Eric Biederman. 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko. 4) Make openvswitch network namespace aware, from Pravin B Shelar. 5) IPV6 NAT implementation, from Patrick McHardy. 6) Server side support for TCP Fast Open, from Jerry Chu and others. 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel Borkmann. 8) Increate the loopback default MTU to 64K, from Eric Dumazet. 9) Use a per-task rather than per-socket page fragment allocator for outgoing networking traffic. This benefits processes that have very many mostly idle sockets, which is quite common. From Eric Dumazet. 10) Use up to 32K for page fragment allocations, with fallbacks to smaller sizes when higher order page allocations fail. Benefits are a) less segments for driver to process b) less calls to page allocator c) less waste of space. From Eric Dumazet. 11) Allow GRO to be used on GRE tunnels, from Eric Dumazet. 12) VXLAN device driver, one way to handle VLAN issues such as the limitation of 4096 VLAN IDs yet still have some level of isolation. From Stephen Hemminger. 13) As usual there is a large boatload of driver changes, with the scale perhaps tilted towards the wireless side this time around. Fix up various fairly trivial conflicts, mostly caused by the user namespace changes. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits) hyperv: Add buffer for extended info after the RNDIS response message. hyperv: Report actual status in receive completion packet hyperv: Remove extra allocated space for recv_pkt_list elements hyperv: Fix page buffer handling in rndis_filter_send_request() hyperv: Fix the missing return value in rndis_filter_set_packet_filter() hyperv: Fix the max_xfer_size in RNDIS initialization vxlan: put UDP socket in correct namespace vxlan: Depend on CONFIG_INET sfc: Fix the reported priorities of different filter types sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP sfc: Fix loopback self-test with separate_tx_channels=1 sfc: Fix MCDI structure field lookup sfc: Add parentheses around use of bitfield macro arguments sfc: Fix null function pointer in efx_sriov_channel_type vxlan: virtual extensible lan igmp: export symbol ip_mc_leave_group netlink: add attributes to fdb interface tg3: unconditionally select HWMON support when tg3 is enabled. Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT" gre: fix sparse warning ...
-rw-r--r--Documentation/ABI/testing/sysfs-ptp6
-rw-r--r--Documentation/devicetree/bindings/net/can/c_can.txt49
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt109
-rw-r--r--Documentation/devicetree/bindings/net/davinci-mdio.txt33
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt75
-rw-r--r--Documentation/filesystems/nfs/nfsroot.txt10
-rw-r--r--Documentation/infiniband/ipoib.txt3
-rw-r--r--Documentation/networking/batman-adv.txt7
-rw-r--r--Documentation/networking/bonding.txt30
-rw-r--r--Documentation/networking/ip-sysctl.txt37
-rw-r--r--Documentation/networking/stmmac.txt5
-rw-r--r--Documentation/networking/vxlan.txt47
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/mips/configs/ar7_defconfig1
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/jazz_defconfig1
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/markeins_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/x86/net/bpf_jit_comp.c34
-rw-r--r--crypto/crypto_user.c7
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/bcma/Kconfig4
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/core.c2
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c28
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c9
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c123
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/driver_pci_host.c8
-rw-r--r--drivers/bcma/host_pci.c12
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/bcma/main.c27
-rw-r--r--drivers/bcma/sprom.c2
-rw-r--r--drivers/bluetooth/bcm203x.c8
-rw-r--r--drivers/bluetooth/bfusb.c12
-rw-r--r--drivers/bluetooth/bluecard_cs.c7
-rw-r--r--drivers/bluetooth/bpa10x.c8
-rw-r--r--drivers/bluetooth/bt3c_cs.c5
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c18
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btuart_cs.c7
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/btwilink.c24
-rw-r--r--drivers/bluetooth/dtl1_cs.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c34
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c41
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c172
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c124
-rw-r--r--drivers/isdn/gigaset/common.c1
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bonding/bond_main.c140
-rw-r--r--drivers/net/can/c_can/c_can.c130
-rw-r--r--drivers/net/can/c_can/c_can.h14
-rw-r--r--drivers/net/can/c_can/c_can_pci.c6
-rw-r--r--drivers/net/can/c_can/c_can_platform.c123
-rw-r--r--drivers/net/can/flexcan.c29
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.c31
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c8
-rw-r--r--drivers/net/ethernet/Kconfig9
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1701
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c116
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c534
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c954
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c341
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c734
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h185
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c96
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c549
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h52
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h11
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c274
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig2
-rw-r--r--drivers/net/ethernet/i825xx/znet.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c198
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c711
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c677
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c300
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c573
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c272
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mipsnet.c345
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig7
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c250
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c108
-rw-r--r--drivers/net/ethernet/sfc/filter.h7
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c49
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h29
-rw-r--r--drivers/net/ethernet/sfc/mtd.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h78
-rw-r--r--drivers/net/ethernet/sfc/nic.c6
-rw-r--r--drivers/net/ethernet/sfc/nic.h36
-rw-r--r--drivers/net/ethernet/sfc/ptp.c1484
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/sfc/selftest.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
-rw-r--r--drivers/net/ethernet/sfc/tx.c627
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c39
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c179
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c41
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/Kconfig (renamed from drivers/ieee802154/Kconfig)11
-rw-r--r--drivers/net/ieee802154/Makefile (renamed from drivers/ieee802154/Makefile)1
-rw-r--r--drivers/net/ieee802154/at86rf230.c (renamed from drivers/ieee802154/at86rf230.c)12
-rw-r--r--drivers/net/ieee802154/fakehard.c (renamed from drivers/ieee802154/fakehard.c)1
-rw-r--r--drivers/net/ieee802154/fakelb.c (renamed from drivers/ieee802154/fakelb.c)0
-rw-r--r--drivers/net/ieee802154/mrf24j40.c767
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/lxt.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c132
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c171
-rw-r--r--drivers/net/phy/phy.c74
-rw-r--r--drivers/net/ppp/ppp_generic.c58
-rw-r--r--drivers/net/team/Kconfig4
-rw-r--r--drivers/net/team/team.c342
-rw-r--r--drivers/net/team/team_mode_broadcast.c8
-rw-r--r--drivers/net/team/team_mode_roundrobin.c8
-rw-r--r--drivers/net/usb/asix_devices.c40
-rw-r--r--drivers/net/usb/catc.c55
-rw-r--r--drivers/net/usb/cx82310_eth.c11
-rw-r--r--drivers/net/usb/gl620a.c10
-rw-r--r--drivers/net/usb/kaweth.c134
-rw-r--r--drivers/net/usb/net1080.c51
-rw-r--r--drivers/net/usb/qmi_wwan.c47
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/sierra_net.c25
-rw-r--r--drivers/net/usb/smsc75xx.c240
-rw-r--r--drivers/net/usb/smsc95xx.c560
-rw-r--r--drivers/net/usb/smsc95xx.h12
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c1219
-rw-r--r--drivers/net/wimax/i2400m/driver.c3
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c58
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c45
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c288
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h95
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h1231
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c819
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c16
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c16
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h10
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/phy_common.c17
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_n.c668
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2057.c141
-rw-r--r--drivers/net/wireless/b43/radio_2057.h430
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c75
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h10
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c27
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c73
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c65
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1047
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c353
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c3135
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h296
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h5
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c11
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c12
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c26
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h8
-rw-r--r--drivers/net/wireless/iwlegacy/common.c19
-rw-r--r--drivers/net/wireless/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c56
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c167
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c91
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c112
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c51
-rw-r--r--drivers/net/wireless/libertas/cmd.c16
-rw-r--r--drivers/net/wireless/libertas/cmd.h1
-rw-r--r--drivers/net/wireless/libertas/main.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c59
-rw-r--r--drivers/net/wireless/mwifiex/11n.c64
-rw-r--r--drivers/net/wireless/mwifiex/11n.h20
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c115
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h10
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c460
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h93
-rw-r--r--drivers/net/wireless/mwifiex/ie.c88
-rw-r--r--drivers/net/wireless/mwifiex/init.c126
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/mwifiex/main.c39
-rw-r--r--drivers/net/wireless/mwifiex/main.h87
-rw-r--r--drivers/net/wireless/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c150
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c74
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c124
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c62
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c290
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c340
-rw-r--r--drivers/net/wireless/mwifiex/util.c40
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c77
-rw-r--r--drivers/net/wireless/mwl8k.c17
-rw-r--r--drivers/net/wireless/orinoco/wext.c7
-rw-r--r--drivers/net/wireless/p54/eeprom.c108
-rw-r--r--drivers/net/wireless/p54/eeprom.h12
-rw-r--r--drivers/net/wireless/p54/lmac.h4
-rw-r--r--drivers/net/wireless/p54/main.c15
-rw-r--r--drivers/net/wireless/p54/p54pci.c88
-rw-r--r--drivers/net/wireless/p54/p54pci.h1
-rw-r--r--drivers/net/wireless/p54/txrx.c15
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h18
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c397
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c62
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h28
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h34
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c17
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h121
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c79
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h7
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c129
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h7
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c21
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h16
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c32
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c372
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/wireless/wl3501_cs.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c9
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nfc/Kconfig14
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/nfcwilink.c20
-rw-r--r--drivers/nfc/pn533.c107
-rw-r--r--drivers/nfc/pn544.c893
-rw-r--r--drivers/nfc/pn544_hci.c177
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_ixp46x.c2
-rw-r--r--drivers/ptp/ptp_pch.c2
-rw-r--r--drivers/ptp/ptp_private.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c78
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/scsi_netlink.c557
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/ssb/driver_mipscore.c28
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c4
-rw-r--r--drivers/staging/winbond/wbusb.c4
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/cxgb3/t3fw-7.10.0.bin.ihex1935
-rw-r--r--fs/dlm/netlink.c8
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h111
-rw-r--r--include/linux/bcma/bcma_regs.h4
-rw-r--r--include/linux/etherdevice.h11
-rw-r--r--include/linux/ethtool.h17
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/hash.h10
-rw-r--r--include/linux/ieee80211.h80
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_link.h34
-rw-r--r--include/linux/if_team.h11
-rw-r--r--include/linux/if_tunnel.h3
-rw-r--r--include/linux/if_vlan.h9
-rw-r--r--include/linux/inetdevice.h11
-rw-r--r--include/linux/ip6_tunnel.h17
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/jiffies.h6
-rw-r--r--include/linux/mdio.h83
-rw-r--r--include/linux/netdevice.h51
-rw-r--r--include/linux/netfilter.h14
-rw-r--r--include/linux/netfilter/ipset/ip_set.h15
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h106
-rw-r--r--include/linux/netfilter/nf_conntrack_amanda.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h7
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h15
-rw-r--r--include/linux/netfilter/nf_conntrack_irc.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h21
-rw-r--r--include/linux/netfilter/nf_nat.h8
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h1
-rw-r--r--include/linux/netfilter/xt_time.h5
-rw-r--r--include/linux/netfilter_ipv4.h1
-rw-r--r--include/linux/netfilter_ipv6/Kbuild1
-rw-r--r--include/linux/netfilter_ipv6/ip6t_NPT.h16
-rw-r--r--include/linux/netlink.h39
-rw-r--r--include/linux/nfc.h11
-rw-r--r--include/linux/nl80211.h54
-rw-r--r--include/linux/of_mdio.h33
-rw-r--r--include/linux/packet_diag.h72
-rw-r--r--include/linux/pps_kernel.h9
-rw-r--r--include/linux/ptp_clock_kernel.h21
-rw-r--r--include/linux/rfkill.h31
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/snmp.h16
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h4
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h49
-rw-r--r--include/linux/tcp_metrics.h54
-rw-r--r--include/linux/tipc_config.h8
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/bluetooth/bluetooth.h10
-rw-r--r--include/net/bluetooth/hci.h32
-rw-r--r--include/net/bluetooth/hci_core.h48
-rw-r--r--include/net/bluetooth/l2cap.h20
-rw-r--r--include/net/bluetooth/mgmt.h16
-rw-r--r--include/net/bluetooth/smp.h8
-rw-r--r--include/net/cfg80211.h68
-rw-r--r--include/net/checksum.h3
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/genetlink.h34
-rw-r--r--include/net/gro_cells.h103
-rw-r--r--include/net/ieee80211_radiotap.h11
-rw-r--r--include/net/inet_ecn.h76
-rw-r--r--include/net/inet_frag.h4
-rw-r--r--include/net/inet_sock.h4
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ip6_tunnel.h41
-rw-r--r--include/net/ip_vs.h16
-rw-r--r--include/net/ipip.h3
-rw-r--r--include/net/ipv6.h33
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/mac80211.h99
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h14
-rw-r--r--include/net/net_namespace.h15
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h32
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h20
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_core.h5
-rw-r--r--include/net/netfilter/nf_nat_helper.h11
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h52
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h72
-rw-r--r--include/net/netfilter/nf_nat_protocol.h67
-rw-r--r--include/net/netfilter/nf_nat_rule.h15
-rw-r--r--include/net/netlink.h124
-rw-r--r--include/net/netns/conntrack.h4
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h9
-rw-r--r--include/net/netns/packet.h4
-rw-r--r--include/net/netns/sctp.h131
-rw-r--r--include/net/nfc/hci.h21
-rw-r--r--include/net/nfc/llc.h54
-rw-r--r--include/net/nfc/nci.h29
-rw-r--r--include/net/nfc/nci_core.h5
-rw-r--r--include/net/nfc/nfc.h4
-rw-r--r--include/net/nfc/shdlc.h107
-rw-r--r--include/net/request_sock.h49
-rw-r--r--include/net/scm.h25
-rw-r--r--include/net/sctp/sctp.h69
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h154
-rw-r--r--include/net/snmp.h10
-rw-r--r--include/net/sock.h29
-rw-r--r--include/net/tcp.h90
-rw-r--r--include/net/xfrm.h8
-rw-r--r--include/scsi/scsi_netlink.h24
-rw-r--r--kernel/audit.c25
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/taskstats.c4
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--lib/nlattr.c4
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/Kconfig2
-rw-r--r--net/atm/resources.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c86
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c214
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h11
-rw-r--r--net/batman-adv/debugfs.c12
-rw-r--r--net/batman-adv/gateway_client.c53
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/main.c27
-rw-r--r--net/batman-adv/main.h29
-rw-r--r--net/batman-adv/packet.h35
-rw-r--r--net/batman-adv/routing.c85
-rw-r--r--net/batman-adv/send.c8
-rw-r--r--net/batman-adv/soft-interface.c79
-rw-r--r--net/batman-adv/soft-interface.h5
-rw-r--r--net/batman-adv/translation-table.c416
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h120
-rw-r--r--net/batman-adv/unicast.c16
-rw-r--r--net/batman-adv/vis.c144
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/bluetooth/a2mp.c16
-rw-r--r--net/bluetooth/af_bluetooth.c139
-rw-r--r--net/bluetooth/bnep/sock.c22
-rw-r--r--net/bluetooth/cmtp/sock.c23
-rw-r--r--net/bluetooth/hci_conn.c100
-rw-r--r--net/bluetooth/hci_core.c12
-rw-r--r--net/bluetooth/hci_event.c190
-rw-r--r--net/bluetooth/hci_sock.c13
-rw-r--r--net/bluetooth/hidp/sock.c22
-rw-r--r--net/bluetooth/l2cap_core.c37
-rw-r--r--net/bluetooth/l2cap_sock.c22
-rw-r--r--net/bluetooth/mgmt.c62
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c16
-rw-r--r--net/bridge/br_fdb.c17
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/bridge/netfilter/ebtable_filter.c4
-rw-r--r--net/bridge/netfilter/ebtable_nat.c4
-rw-r--r--net/can/gw.c2
-rw-r--r--net/core/dev.c109
-rw-r--r--net/core/dev_addr_lists.c40
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c12
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/filter.c27
-rw-r--r--net/core/link_watch.c8
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/net-sysfs.c18
-rw-r--r--net/core/netpoll.c5
-rw-r--r--net/core/netprio_cgroup.c41
-rw-r--r--net/core/request_sock.c95
-rw-r--r--net/core/rtnetlink.c38
-rw-r--r--net/core/scm.c17
-rw-r--r--net/core/secure_seq.c1
-rw-r--r--net/core/skbuff.c86
-rw-r--r--net/core/sock.c64
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/core/utils.c20
-rw-r--r--net/dcb/dcbnl.c18
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_route.c16
-rw-r--r--net/decnet/dn_table.c12
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c3
-rw-r--r--net/ieee802154/6lowpan.c53
-rw-r--r--net/ieee802154/nl-mac.c6
-rw-r--r--net/ieee802154/nl-phy.c6
-rw-r--r--net/ipv4/af_inet.c27
-rw-r--r--net/ipv4/devinet.c67
-rw-r--r--net/ipv4/fib_frontend.c25
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/igmp.c38
-rw-r--r--net/ipv4/inet_connection_sock.c57
-rw-r--r--net/ipv4/inet_diag.c32
-rw-r--r--net/ipv4/inet_fragment.c9
-rw-r--r--net/ipv4/ip_fragment.c13
-rw-r--r--net/ipv4/ip_gre.c128
-rw-r--r--net/ipv4/ip_output.c74
-rw-r--r--net/ipv4/ip_vti.c5
-rw-r--r--net/ipv4/ipconfig.c43
-rw-r--r--net/ipv4/ipip.c51
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter.c41
-rw-r--r--net/ipv4/netfilter/Kconfig90
-rw-r--r--net/ipv4/netfilter/Makefile18
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c18
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c98
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c110
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c3
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c2
-rw-r--r--net/ipv4/netfilter/iptable_filter.c10
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c10
-rw-r--r--net/ipv4/netfilter/iptable_nat.c (renamed from net/ipv4/netfilter/nf_nat_standalone.c)264
-rw-r--r--net/ipv4/netfilter/iptable_raw.c10
-rw-r--r--net/ipv4/netfilter/iptable_security.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c71
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c281
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c21
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c30
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c214
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/route.c30
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c45
-rw-r--r--net/ipv4/tcp.c135
-rw-r--r--net/ipv4/tcp_fastopen.c83
-rw-r--r--net/ipv4/tcp_input.c281
-rw-r--r--net/ipv4/tcp_ipv4.c326
-rw-r--r--net/ipv4/tcp_metrics.c354
-rw-r--r--net/ipv4/tcp_minisocks.c75
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c39
-rw-r--r--net/ipv4/udp_diag.c6
-rw-r--r--net/ipv6/Kconfig16
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/addrconf.c83
-rw-r--r--net/ipv6/addrlabel.c24
-rw-r--r--net/ipv6/ip6_fib.c20
-rw-r--r--net/ipv6/ip6_gre.c1770
-rw-r--r--net/ipv6/ip6_output.c85
-rw-r--r--net/ipv6/ip6_tunnel.c91
-rw-r--r--net/ipv6/ip6mr.c10
-rw-r--r--net/ipv6/netfilter.c8
-rw-r--r--net/ipv6/netfilter/Kconfig37
-rw-r--r--net/ipv6/netfilter/Makefile6
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c135
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c165
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c4
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c4
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c321
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c4
-rw-r--r--net/ipv6/netfilter/ip6table_security.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c137
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c218
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c288
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c90
-rw-r--r--net/ipv6/reassembly.c89
-rw-r--r--net/ipv6/route.c96
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c40
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/key/af_key.c39
-rw-r--r--net/l2tp/Kconfig1
-rw-r--r--net/l2tp/l2tp_eth.c3
-rw-r--r--net/l2tp/l2tp_netlink.c24
-rw-r--r--net/llc/llc_station.c600
-rw-r--r--net/llc/sysctl_net_llc.c7
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c103
-rw-r--r--net/mac80211/chan.c67
-rw-r--r--net/mac80211/debugfs.c36
-rw-r--r--net/mac80211/driver-ops.h11
-rw-r--r--net/mac80211/ibss.c50
-rw-r--r--net/mac80211/ieee80211_i.h53
-rw-r--r--net/mac80211/iface.c316
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/main.c31
-rw-r--r--net/mac80211/mesh.c49
-rw-r--r--net/mac80211/mesh.h5
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c44
-rw-r--r--net/mac80211/mesh_plink.c85
-rw-r--r--net/mac80211/mlme.c443
-rw-r--r--net/mac80211/offchannel.c9
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rx.c60
-rw-r--r--net/mac80211/scan.c53
-rw-r--r--net/mac80211/sta_info.c123
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c42
-rw-r--r--net/mac80211/trace.h11
-rw-r--r--net/mac80211/tx.c73
-rw-r--r--net/mac80211/util.c108
-rw-r--r--net/netfilter/Kconfig83
-rw-r--r--net/netfilter/Makefile21
-rw-r--r--net/netfilter/core.c21
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c19
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c18
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c9
-rw-r--r--net/netfilter/ipset/ip_set_core.c39
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c15
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c24
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c24
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c47
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c25
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c66
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c36
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c9
-rw-r--r--net/netfilter/ipvs/Kconfig3
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c58
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c76
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c111
-rw-r--r--net/netfilter/nf_conntrack_amanda.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c15
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_ftp.c24
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c232
-rw-r--r--net/netfilter/nf_conntrack_irc.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c114
-rw-r--r--net/netfilter/nf_conntrack_pptp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c8
-rw-r--r--net/netfilter/nf_conntrack_sip.c143
-rw-r--r--net/netfilter/nf_internals.h4
-rw-r--r--net/netfilter/nf_nat_amanda.c (renamed from net/ipv4/netfilter/nf_nat_amanda.c)4
-rw-r--r--net/netfilter/nf_nat_core.c (renamed from net/ipv4/netfilter/nf_nat_core.c)679
-rw-r--r--net/netfilter/nf_nat_ftp.c (renamed from net/ipv4/netfilter/nf_nat_ftp.c)34
-rw-r--r--net/netfilter/nf_nat_helper.c (renamed from net/ipv4/netfilter/nf_nat_helper.c)109
-rw-r--r--net/netfilter/nf_nat_irc.c (renamed from net/ipv4/netfilter/nf_nat_irc.c)10
-rw-r--r--net/netfilter/nf_nat_proto_common.c (renamed from net/ipv4/netfilter/nf_nat_proto_common.c)54
-rw-r--r--net/netfilter/nf_nat_proto_dccp.c (renamed from net/ipv4/netfilter/nf_nat_proto_dccp.c)56
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c (renamed from net/ipv4/netfilter/nf_nat_proto_sctp.c)53
-rw-r--r--net/netfilter/nf_nat_proto_tcp.c (renamed from net/ipv4/netfilter/nf_nat_proto_tcp.c)40
-rw-r--r--net/netfilter/nf_nat_proto_udp.c (renamed from net/ipv4/netfilter/nf_nat_proto_udp.c)42
-rw-r--r--net/netfilter/nf_nat_proto_udplite.c (renamed from net/ipv4/netfilter/nf_nat_proto_udplite.c)58
-rw-r--r--net/netfilter/nf_nat_proto_unknown.c (renamed from net/ipv4/netfilter/nf_nat_proto_unknown.c)16
-rw-r--r--net/netfilter/nf_nat_sip.c (renamed from net/ipv4/netfilter/nf_nat_sip.c)270
-rw-r--r--net/netfilter/nf_nat_tftp.c (renamed from net/ipv4/netfilter/nf_nat_tftp.c)1
-rw-r--r--net/netfilter/nf_queue.c10
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c16
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c12
-rw-r--r--net/netfilter/nfnetlink_log.c18
-rw-r--r--net/netfilter/nfnetlink_queue_core.c50
-rw-r--r--net/netfilter/xt_CT.c262
-rw-r--r--net/netfilter/xt_NETMAP.c165
-rw-r--r--net/netfilter/xt_NFQUEUE.c8
-rw-r--r--net/netfilter/xt_NOTRACK.c53
-rw-r--r--net/netfilter/xt_REDIRECT.c190
-rw-r--r--net/netfilter/xt_nat.c170
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/netfilter/xt_set.c22
-rw-r--r--net/netfilter/xt_socket.c12
-rw-r--r--net/netfilter/xt_time.c24
-rw-r--r--net/netlabel/netlabel_cipso_v4.c2
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/af_netlink.c210
-rw-r--r--net/netlink/genetlink.c48
-rw-r--r--net/nfc/core.c13
-rw-r--r--net/nfc/hci/Makefile4
-rw-r--r--net/nfc/hci/command.c45
-rw-r--r--net/nfc/hci/core.c336
-rw-r--r--net/nfc/hci/hci.h15
-rw-r--r--net/nfc/hci/hcp.c6
-rw-r--r--net/nfc/hci/llc.c170
-rw-r--r--net/nfc/hci/llc.h69
-rw-r--r--net/nfc/hci/llc_nop.c99
-rw-r--r--net/nfc/hci/llc_shdlc.c (renamed from net/nfc/hci/shdlc.c)544
-rw-r--r--net/nfc/llcp/commands.c2
-rw-r--r--net/nfc/llcp/llcp.c131
-rw-r--r--net/nfc/llcp/llcp.h6
-rw-r--r--net/nfc/llcp/sock.c93
-rw-r--r--net/nfc/nci/core.c91
-rw-r--r--net/nfc/nci/ntf.c52
-rw-r--r--net/nfc/nci/rsp.c14
-rw-r--r--net/nfc/netlink.c68
-rw-r--r--net/openvswitch/actions.c6
-rw-r--r--net/openvswitch/datapath.c453
-rw-r--r--net/openvswitch/datapath.h52
-rw-r--r--net/openvswitch/dp_notify.c8
-rw-r--r--net/openvswitch/flow.c21
-rw-r--r--net/openvswitch/flow.h3
-rw-r--r--net/openvswitch/vport-internal_dev.c7
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport.c27
-rw-r--r--net/openvswitch/vport.h13
-rw-r--r--net/packet/Kconfig8
-rw-r--r--net/packet/Makefile2
-rw-r--r--net/packet/af_packet.c143
-rw-r--r--net/packet/diag.c242
-rw-r--r--net/packet/internal.h121
-rw-r--r--net/phonet/pn_netlink.c14
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rfkill/core.c22
-rw-r--r--net/sched/act_api.c52
-rw-r--r--net/sched/cls_api.c14
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c44
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_generic.c27
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sctp/associola.c25
-rw-r--r--net/sctp/auth.c20
-rw-r--r--net/sctp/bind_addr.c20
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/endpointola.c25
-rw-r--r--net/sctp/input.c115
-rw-r--r--net/sctp/ipv6.c36
-rw-r--r--net/sctp/objcnt.c8
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c28
-rw-r--r--net/sctp/primitive.c4
-rw-r--r--net/sctp/proc.c55
-rw-r--r--net/sctp/protocol.c454
-rw-r--r--net/sctp/sm_make_chunk.c61
-rw-r--r--net/sctp/sm_sideeffect.c26
-rw-r--r--net/sctp/sm_statefuns.c725
-rw-r--r--net/sctp/sm_statetable.c17
-rw-r--r--net/sctp/socket.c119
-rw-r--r--net/sctp/sysctl.c198
-rw-r--r--net/sctp/transport.c23
-rw-r--r--net/sctp/ulpqueue.c18
-rw-r--r--net/socket.c89
-rw-r--r--net/tipc/bearer.c21
-rw-r--r--net/tipc/config.c48
-rw-r--r--net/tipc/core.c22
-rw-r--r--net/tipc/core.h18
-rw-r--r--net/tipc/eth_media.c29
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tipc/name_table.c16
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/subscr.c4
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/unix/diag.c14
-rw-r--r--net/wireless/chan.c7
-rw-r--r--net/wireless/core.c53
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mlme.c37
-rw-r--r--net/wireless/nl80211.c266
-rw-r--r--net/wireless/nl80211.h5
-rw-r--r--net/wireless/radiotap.c2
-rw-r--r--net/wireless/reg.c45
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/util.c36
-rw-r--r--net/wireless/wext-core.c8
-rw-r--r--net/xfrm/xfrm_policy.c83
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--net/xfrm/xfrm_user.c77
-rw-r--r--security/selinux/netlink.c5
967 files changed, 42560 insertions, 21732 deletions
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp
index d40d2b550502..05aeedf17794 100644
--- a/Documentation/ABI/testing/sysfs-ptp
+++ b/Documentation/ABI/testing/sysfs-ptp
@@ -19,7 +19,11 @@ Date: September 2010
19Contact: Richard Cochran <richardcochran@gmail.com> 19Contact: Richard Cochran <richardcochran@gmail.com>
20Description: 20Description:
21 This file contains the name of the PTP hardware clock 21 This file contains the name of the PTP hardware clock
22 as a human readable string. 22 as a human readable string. The purpose of this
23 attribute is to provide the user with a "friendly
24 name" and to help distinguish PHY based devices from
25 MAC based ones. The string does not necessarily have
26 to be any kind of unique id.
23 27
24What: /sys/class/ptp/ptpN/max_adjustment 28What: /sys/class/ptp/ptpN/max_adjustment
25Date: September 2010 29Date: September 2010
diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt
new file mode 100644
index 000000000000..8f1ae81228e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/c_can.txt
@@ -0,0 +1,49 @@
1Bosch C_CAN/D_CAN controller Device Tree Bindings
2-------------------------------------------------
3
4Required properties:
5- compatible : Should be "bosch,c_can" for C_CAN controllers and
6 "bosch,d_can" for D_CAN controllers.
7- reg : physical base address and size of the C_CAN/D_CAN
8 registers map
9- interrupts : property with a value describing the interrupt
10 number
11
12Optional properties:
13- ti,hwmods : Must be "d_can<n>" or "c_can<n>", n being the
14 instance number
15
16Note: "ti,hwmods" field is used to fetch the base address and irq
17resources from TI, omap hwmod data base during device registration.
18Future plan is to migrate hwmod data base contents into device tree
19blob so that, all the required data will be used from device tree dts
20file.
21
22Example:
23
24Step1: SoC common .dtsi file
25
26 dcan1: d_can@481d0000 {
27 compatible = "bosch,d_can";
28 reg = <0x481d0000 0x2000>;
29 interrupts = <55>;
30 interrupt-parent = <&intc>;
31 status = "disabled";
32 };
33
34(or)
35
36 dcan1: d_can@481d0000 {
37 compatible = "bosch,d_can";
38 ti,hwmods = "d_can1";
39 reg = <0x481d0000 0x2000>;
40 interrupts = <55>;
41 interrupt-parent = <&intc>;
42 status = "disabled";
43 };
44
45Step 2: board specific .dts file
46
47 &dcan1 {
48 status = "okay";
49 };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
new file mode 100644
index 000000000000..dcaabe9fe869
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -0,0 +1,109 @@
1TI SoC Ethernet Switch Controller Device Tree Bindings
2------------------------------------------------------
3
4Required properties:
5- compatible : Should be "ti,cpsw"
6- reg : physical base address and size of the cpsw
7 registers map
8- interrupts : property with a value describing the interrupt
9 number
10- interrupt-parent : The parent interrupt controller
11- cpdma_channels : Specifies number of channels in CPDMA
12- host_port_no : Specifies host port shift
13- cpdma_reg_ofs : Specifies CPDMA submodule register offset
14- cpdma_sram_ofs : Specifies CPDMA SRAM offset
15- ale_reg_ofs : Specifies ALE submodule register offset
16- ale_entries : Specifies No of entries ALE can hold
17- host_port_reg_ofs : Specifies host port register offset
18- hw_stats_reg_ofs : Specifies hardware statistics register offset
19- bd_ram_ofs : Specifies internal desciptor RAM offset
20- bd_ram_size : Specifies internal descriptor RAM size
21- rx_descs : Specifies number of Rx descriptors
22- mac_control : Specifies Default MAC control register content
23 for the specific platform
24- slaves : Specifies number for slaves
25- slave_reg_ofs : Specifies slave register offset
26- sliver_reg_ofs : Specifies slave sliver register offset
27- phy_id : Specifies slave phy id
28- mac-address : Specifies slave MAC address
29
30Optional properties:
31- ti,hwmods : Must be "cpgmac0"
32- no_bd_ram : Must be 0 or 1
33
34Note: "ti,hwmods" field is used to fetch the base address and irq
35resources from TI, omap hwmod data base during device registration.
36Future plan is to migrate hwmod data base contents into device tree
37blob so that, all the required data will be used from device tree dts
38file.
39
40Examples:
41
42 mac: ethernet@4A100000 {
43 compatible = "ti,cpsw";
44 reg = <0x4A100000 0x1000>;
45 interrupts = <55 0x4>;
46 interrupt-parent = <&intc>;
47 cpdma_channels = <8>;
48 host_port_no = <0>;
49 cpdma_reg_ofs = <0x800>;
50 cpdma_sram_ofs = <0xa00>;
51 ale_reg_ofs = <0xd00>;
52 ale_entries = <1024>;
53 host_port_reg_ofs = <0x108>;
54 hw_stats_reg_ofs = <0x900>;
55 bd_ram_ofs = <0x2000>;
56 bd_ram_size = <0x2000>;
57 no_bd_ram = <0>;
58 rx_descs = <64>;
59 mac_control = <0x20>;
60 slaves = <2>;
61 cpsw_emac0: slave@0 {
62 slave_reg_ofs = <0x208>;
63 sliver_reg_ofs = <0xd80>;
64 phy_id = "davinci_mdio.16:00";
65 /* Filled in by U-Boot */
66 mac-address = [ 00 00 00 00 00 00 ];
67 };
68 cpsw_emac1: slave@1 {
69 slave_reg_ofs = <0x308>;
70 sliver_reg_ofs = <0xdc0>;
71 phy_id = "davinci_mdio.16:01";
72 /* Filled in by U-Boot */
73 mac-address = [ 00 00 00 00 00 00 ];
74 };
75 };
76
77(or)
78 mac: ethernet@4A100000 {
79 compatible = "ti,cpsw";
80 ti,hwmods = "cpgmac0";
81 cpdma_channels = <8>;
82 host_port_no = <0>;
83 cpdma_reg_ofs = <0x800>;
84 cpdma_sram_ofs = <0xa00>;
85 ale_reg_ofs = <0xd00>;
86 ale_entries = <1024>;
87 host_port_reg_ofs = <0x108>;
88 hw_stats_reg_ofs = <0x900>;
89 bd_ram_ofs = <0x2000>;
90 bd_ram_size = <0x2000>;
91 no_bd_ram = <0>;
92 rx_descs = <64>;
93 mac_control = <0x20>;
94 slaves = <2>;
95 cpsw_emac0: slave@0 {
96 slave_reg_ofs = <0x208>;
97 sliver_reg_ofs = <0xd80>;
98 phy_id = "davinci_mdio.16:00";
99 /* Filled in by U-Boot */
100 mac-address = [ 00 00 00 00 00 00 ];
101 };
102 cpsw_emac1: slave@1 {
103 slave_reg_ofs = <0x308>;
104 sliver_reg_ofs = <0xdc0>;
105 phy_id = "davinci_mdio.16:01";
106 /* Filled in by U-Boot */
107 mac-address = [ 00 00 00 00 00 00 ];
108 };
109 };
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
new file mode 100644
index 000000000000..72efaaf764f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/davinci-mdio.txt
@@ -0,0 +1,33 @@
1TI SoC Davinci MDIO Controller Device Tree Bindings
2---------------------------------------------------
3
4Required properties:
5- compatible : Should be "ti,davinci_mdio"
6- reg : physical base address and size of the davinci mdio
7 registers map
8- bus_freq : Mdio Bus frequency
9
10Optional properties:
11- ti,hwmods : Must be "davinci_mdio"
12
13Note: "ti,hwmods" field is used to fetch the base address and irq
14resources from TI, omap hwmod data base during device registration.
15Future plan is to migrate hwmod data base contents into device tree
16blob so that, all the required data will be used from device tree dts
17file.
18
19Examples:
20
21 mdio: davinci_mdio@4A101000 {
22 compatible = "ti,cpsw";
23 reg = <0x4A101000 0x1000>;
24 bus_freq = <1000000>;
25 };
26
27(or)
28
29 mdio: davinci_mdio@4A101000 {
30 compatible = "ti,cpsw";
31 ti,hwmods = "davinci_mdio";
32 bus_freq = <1000000>;
33 };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
new file mode 100644
index 000000000000..8516929c7251
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
@@ -0,0 +1,75 @@
1Properties for an MDIO bus multiplexer controlled by a memory-mapped device
2
3This is a special case of a MDIO bus multiplexer. A memory-mapped device,
4like an FPGA, is used to control which child bus is connected. The mdio-mux
5node must be a child of the memory-mapped device. The driver currently only
6supports devices with eight-bit registers.
7
8Required properties in addition to the generic multiplexer properties:
9
10- compatible : string, must contain "mdio-mux-mmioreg"
11
12- reg : integer, contains the offset of the register that controls the bus
13 multiplexer. The size field in the 'reg' property is the size of
14 register, and must therefore be 1.
15
16- mux-mask : integer, contains an eight-bit mask that specifies which
17 bits in the register control the actual bus multiplexer. The
18 'reg' property of each child mdio-mux node must be constrained by
19 this mask.
20
21Example:
22
23The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
24For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
25A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
26BRDCFG1 that control the actual mux.
27
28 /* The FPGA node */
29 fpga: board-control@3,0 {
30 #address-cells = <1>;
31 #size-cells = <1>;
32 compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
33 reg = <3 0 0x30>;
34 ranges = <0 3 0 0x30>;
35
36 mdio-mux-emi2 {
37 compatible = "mdio-mux-mmioreg", "mdio-mux";
38 mdio-parent-bus = <&xmdio0>;
39 #address-cells = <1>;
40 #size-cells = <0>;
41 reg = <9 1>; // BRDCFG1
42 mux-mask = <0x6>; // EMI2
43
44 emi2_slot1: mdio@0 { // Slot 1 XAUI (FM2)
45 reg = <0>;
46 #address-cells = <1>;
47 #size-cells = <0>;
48
49 phy_xgmii_slot1: ethernet-phy@0 {
50 compatible = "ethernet-phy-ieee802.3-c45";
51 reg = <4>;
52 };
53 };
54
55 emi2_slot2: mdio@2 { // Slot 2 XAUI (FM1)
56 reg = <2>;
57 #address-cells = <1>;
58 #size-cells = <0>;
59
60 phy_xgmii_slot2: ethernet-phy@4 {
61 compatible = "ethernet-phy-ieee802.3-c45";
62 reg = <0>;
63 };
64 };
65 };
66 };
67
68 /* The parent MDIO bus. */
69 xmdio0: mdio@f1000 {
70 #address-cells = <1>;
71 #size-cells = <0>;
72 compatible = "fsl,fman-xmdio";
73 reg = <0xf1000 0x1000>;
74 interrupts = <100 1 0 0>;
75 };
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index ffdd9d866ad7..2d66ed688125 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -78,7 +78,8 @@ nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
78 flags = hard, nointr, noposix, cto, ac 78 flags = hard, nointr, noposix, cto, ac
79 79
80 80
81ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf> 81ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:
82 <dns0-ip>:<dns1-ip>
82 83
83 This parameter tells the kernel how to configure IP addresses of devices 84 This parameter tells the kernel how to configure IP addresses of devices
84 and also how to set up the IP routing table. It was originally called 85 and also how to set up the IP routing table. It was originally called
@@ -158,6 +159,13 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
158 159
159 Default: any 160 Default: any
160 161
162 <dns0-ip> IP address of first nameserver.
163 Value gets exported by /proc/net/pnp which is often linked
164 on embedded systems by /etc/resolv.conf.
165
166 <dns1-ip> IP address of secound nameserver.
167 Same as above.
168
161 169
162nfsrootdebug 170nfsrootdebug
163 171
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt
index 64eeb55d0c09..f2cfe265e836 100644
--- a/Documentation/infiniband/ipoib.txt
+++ b/Documentation/infiniband/ipoib.txt
@@ -24,6 +24,9 @@ Partitions and P_Keys
24 The P_Key for any interface is given by the "pkey" file, and the 24 The P_Key for any interface is given by the "pkey" file, and the
25 main interface for a subinterface is in "parent." 25 main interface for a subinterface is in "parent."
26 26
27 Child interface create/delete can also be done using IPoIB's
28 rtnl_link_ops, where childs created using either way behave the same.
29
27Datagram vs Connected modes 30Datagram vs Connected modes
28 31
29 The IPoIB driver supports two modes of operation: datagram and 32 The IPoIB driver supports two modes of operation: datagram and
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 8f3ae4a6147e..a173d2a879f5 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -75,9 +75,10 @@ folder:
75 75
76There is a special folder for debugging information: 76There is a special folder for debugging information:
77 77
78# ls /sys/kernel/debug/batman_adv/bat0/ 78# ls /sys/kernel/debug/batman_adv/bat0/
79# bla_claim_table log socket transtable_local 79# bla_backbone_table log transtable_global
80# gateways originators transtable_global vis_data 80# bla_claim_table originators transtable_local
81# gateways socket vis_data
81 82
82Some of the files contain all sort of status information regard- 83Some of the files contain all sort of status information regard-
83ing the mesh network. For example, you can view the table of 84ing the mesh network. For example, you can view the table of
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 6b1c7110534e..10a015c384b8 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -752,12 +752,22 @@ xmit_hash_policy
752 protocol information to generate the hash. 752 protocol information to generate the hash.
753 753
754 Uses XOR of hardware MAC addresses and IP addresses to 754 Uses XOR of hardware MAC addresses and IP addresses to
755 generate the hash. The formula is 755 generate the hash. The IPv4 formula is
756 756
757 (((source IP XOR dest IP) AND 0xffff) XOR 757 (((source IP XOR dest IP) AND 0xffff) XOR
758 ( source MAC XOR destination MAC )) 758 ( source MAC XOR destination MAC ))
759 modulo slave count 759 modulo slave count
760 760
761 The IPv6 formula is
762
763 hash = (source ip quad 2 XOR dest IP quad 2) XOR
764 (source ip quad 3 XOR dest IP quad 3) XOR
765 (source ip quad 4 XOR dest IP quad 4)
766
767 (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
768 XOR (source MAC XOR destination MAC))
769 modulo slave count
770
761 This algorithm will place all traffic to a particular 771 This algorithm will place all traffic to a particular
762 network peer on the same slave. For non-IP traffic, 772 network peer on the same slave. For non-IP traffic,
763 the formula is the same as for the layer2 transmit 773 the formula is the same as for the layer2 transmit
@@ -778,19 +788,29 @@ xmit_hash_policy
778 slaves, although a single connection will not span 788 slaves, although a single connection will not span
779 multiple slaves. 789 multiple slaves.
780 790
781 The formula for unfragmented TCP and UDP packets is 791 The formula for unfragmented IPv4 TCP and UDP packets is
782 792
783 ((source port XOR dest port) XOR 793 ((source port XOR dest port) XOR
784 ((source IP XOR dest IP) AND 0xffff) 794 ((source IP XOR dest IP) AND 0xffff)
785 modulo slave count 795 modulo slave count
786 796
787 For fragmented TCP or UDP packets and all other IP 797 The formula for unfragmented IPv6 TCP and UDP packets is
788 protocol traffic, the source and destination port 798
799 hash = (source port XOR dest port) XOR
800 ((source ip quad 2 XOR dest IP quad 2) XOR
801 (source ip quad 3 XOR dest IP quad 3) XOR
802 (source ip quad 4 XOR dest IP quad 4))
803
804 ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
805 modulo slave count
806
807 For fragmented TCP or UDP packets and all other IPv4 and
808 IPv6 protocol traffic, the source and destination port
789 information is omitted. For non-IP traffic, the 809 information is omitted. For non-IP traffic, the
790 formula is the same as for the layer2 transmit hash 810 formula is the same as for the layer2 transmit hash
791 policy. 811 policy.
792 812
793 This policy is intended to mimic the behavior of 813 The IPv4 policy is intended to mimic the behavior of
794 certain switches, notably Cisco switches with PFC2 as 814 certain switches, notably Cisco switches with PFC2 as
795 well as some Foundry and IBM products. 815 well as some Foundry and IBM products.
796 816
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ca447b35b833..c7fc10724948 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
439tcp_synack_retries - INTEGER 439tcp_synack_retries - INTEGER
440 Number of times SYNACKs for a passive TCP connection attempt will 440 Number of times SYNACKs for a passive TCP connection attempt will
441 be retransmitted. Should not be higher than 255. Default value 441 be retransmitted. Should not be higher than 255. Default value
442 is 5, which corresponds to ~180seconds. 442 is 5, which corresponds to 31seconds till the last retransmission
443 with the current initial RTO of 1second. With this the final timeout
444 for a passive TCP connection will happen after 63seconds.
443 445
444tcp_syncookies - BOOLEAN 446tcp_syncookies - BOOLEAN
445 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES 447 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
465tcp_fastopen - INTEGER 467tcp_fastopen - INTEGER
466 Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data 468 Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
467 in the opening SYN packet. To use this feature, the client application 469 in the opening SYN packet. To use this feature, the client application
468 must not use connect(). Instead, it should use sendmsg() or sendto() 470 must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
469 with MSG_FASTOPEN flag which performs a TCP handshake automatically. 471 connect() to perform a TCP handshake automatically.
470 472
471 The values (bitmap) are: 473 The values (bitmap) are
472 1: Enables sending data in the opening SYN on the client 474 1: Enables sending data in the opening SYN on the client.
473 5: Enables sending data in the opening SYN on the client regardless 475 2: Enables TCP Fast Open on the server side, i.e., allowing data in
474 of cookie availability. 476 a SYN packet to be accepted and passed to the application before
477 3-way hand shake finishes.
478 4: Send data in the opening SYN regardless of cookie availability and
479 without a cookie option.
480 0x100: Accept SYN data w/o validating the cookie.
481 0x200: Accept data-in-SYN w/o any cookie option present.
482 0x400/0x800: Enable Fast Open on all listeners regardless of the
483 TCP_FASTOPEN socket option. The two different flags designate two
484 different ways of setting max_qlen without the TCP_FASTOPEN socket
485 option.
475 486
476 Default: 0 487 Default: 0
477 488
489 Note that the client & server side Fast Open flags (1 and 2
490 respectively) must be also enabled before the rest of flags can take
491 effect.
492
493 See include/net/tcp.h and the code for more details.
494
478tcp_syn_retries - INTEGER 495tcp_syn_retries - INTEGER
479 Number of times initial SYNs for an active TCP connection attempt 496 Number of times initial SYNs for an active TCP connection attempt
480 will be retransmitted. Should not be higher than 255. Default value 497 will be retransmitted. Should not be higher than 255. Default value
481 is 5, which corresponds to ~180seconds. 498 is 6, which corresponds to 63seconds till the last restransmission
499 with the current initial RTO of 1second. With this the final timeout
500 for an active TCP connection attempt will happen after 127seconds.
482 501
483tcp_timestamps - BOOLEAN 502tcp_timestamps - BOOLEAN
484 Enable timestamps as defined in RFC1323. 503 Enable timestamps as defined in RFC1323.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index c676b9cedbd0..ef9ee71b4d7f 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -173,7 +173,6 @@ Where:
173For MDIO bus The we have: 173For MDIO bus The we have:
174 174
175 struct stmmac_mdio_bus_data { 175 struct stmmac_mdio_bus_data {
176 int bus_id;
177 int (*phy_reset)(void *priv); 176 int (*phy_reset)(void *priv);
178 unsigned int phy_mask; 177 unsigned int phy_mask;
179 int *irqs; 178 int *irqs;
@@ -181,7 +180,6 @@ For MDIO bus The we have:
181 }; 180 };
182 181
183Where: 182Where:
184 o bus_id: bus identifier;
185 o phy_reset: hook to reset the phy device attached to the bus. 183 o phy_reset: hook to reset the phy device attached to the bus.
186 o phy_mask: phy mask passed when register the MDIO bus within the driver. 184 o phy_mask: phy mask passed when register the MDIO bus within the driver.
187 o irqs: list of IRQs, one per PHY. 185 o irqs: list of IRQs, one per PHY.
@@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
230with fixed_link support. 228with fixed_link support.
231 229
232static struct stmmac_mdio_bus_data stmmac1_mdio_bus = { 230static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
233 .bus_id = 1,
234 |
235 |-> phy device on the bus_id 1
236 .phy_reset = phy_reset; 231 .phy_reset = phy_reset;
237 | 232 |
238 |-> function to provide the phy_reset on this board 233 |-> function to provide the phy_reset on this board
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
new file mode 100644
index 000000000000..5b34b762d7d5
--- /dev/null
+++ b/Documentation/networking/vxlan.txt
@@ -0,0 +1,47 @@
1Virtual eXtensible Local Area Networking documentation
2======================================================
3
4The VXLAN protocol is a tunnelling protocol that is designed to
5solve the problem of limited number of available VLAN's (4096).
6With VXLAN identifier is expanded to 24 bits.
7
8It is a draft RFC standard, that is implemented by Cisco Nexus,
9Vmware and Brocade. The protocol runs over UDP using a single
10destination port (still not standardized by IANA).
11This document describes the Linux kernel tunnel device,
12there is also an implantation of VXLAN for Openvswitch.
13
14Unlike most tunnels, a VXLAN is a 1 to N network, not just point
15to point. A VXLAN device can either dynamically learn the IP address
16of the other end, in a manner similar to a learning bridge, or the
17forwarding entries can be configured statically.
18
19The management of vxlan is done in a similar fashion to it's
20too closest neighbors GRE and VLAN. Configuring VXLAN requires
21the version of iproute2 that matches the kernel release
22where VXLAN was first merged upstream.
23
241. Create vxlan device
25 # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
26
27This creates a new device (vxlan0). The device uses the
28the multicast group 239.1.1.1 over eth1 to handle packets where
29no entry is in the forwarding table.
30
312. Delete vxlan device
32 # ip link delete vxlan0
33
343. Show vxlan info
35 # ip -d show vxlan0
36
37It is possible to create, destroy and display the vxlan
38forwarding table using the new bridge command.
39
401. Create forwarding table entry
41 # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
42
432. Delete forwarding table entry
44 # bridge fdb delete 00:17:42:8a:b4:05
45
463. Show forwarding table
47 # bridge fdb show dev vxlan0
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c22b7f6f41a..78336396a432 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4806,6 +4806,7 @@ M: Lauro Ramos Venancio <lauro.venancio@openbossa.org>
4806M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org> 4806M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
4807M: Samuel Ortiz <sameo@linux.intel.com> 4807M: Samuel Ortiz <sameo@linux.intel.com>
4808L: linux-wireless@vger.kernel.org 4808L: linux-wireless@vger.kernel.org
4809L: linux-nfc@lists.01.org (moderated for non-subscribers)
4809S: Maintained 4810S: Maintained
4810F: net/nfc/ 4811F: net/nfc/
4811F: include/linux/nfc.h 4812F: include/linux/nfc.h
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index e93fdae10b23..90d3109c82f4 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
67CONFIG_NETFILTER_XT_TARGET_MARK=m 67CONFIG_NETFILTER_XT_TARGET_MARK=m
68CONFIG_NETFILTER_XT_TARGET_NFLOG=m 68CONFIG_NETFILTER_XT_TARGET_NFLOG=m
69CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 69CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
70CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
71CONFIG_NETFILTER_XT_TARGET_TRACE=m 70CONFIG_NETFILTER_XT_TARGET_TRACE=m
72CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 71CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
73CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 72CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 66b26c1e848c..8f4f657fdbc6 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
67CONFIG_NETFILTER_XT_TARGET_MARK=m 67CONFIG_NETFILTER_XT_TARGET_MARK=m
68CONFIG_NETFILTER_XT_TARGET_NFLOG=m 68CONFIG_NETFILTER_XT_TARGET_NFLOG=m
69CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 69CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
70CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
71CONFIG_NETFILTER_XT_TARGET_TRACE=m 70CONFIG_NETFILTER_XT_TARGET_TRACE=m
72CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 71CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
73CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 72CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 151332515980..4571d33903fe 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
65CONFIG_NETFILTER_XT_TARGET_MARK=m 65CONFIG_NETFILTER_XT_TARGET_MARK=m
66CONFIG_NETFILTER_XT_TARGET_NFLOG=m 66CONFIG_NETFILTER_XT_TARGET_NFLOG=m
67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
68CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
69CONFIG_NETFILTER_XT_TARGET_TRACE=m 68CONFIG_NETFILTER_XT_TARGET_TRACE=m
70CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 69CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
71CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 70CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 67bb6fc117f4..12f211733ba0 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
65CONFIG_NETFILTER_XT_TARGET_MARK=m 65CONFIG_NETFILTER_XT_TARGET_MARK=m
66CONFIG_NETFILTER_XT_TARGET_NFLOG=m 66CONFIG_NETFILTER_XT_TARGET_NFLOG=m
67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
68CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
69CONFIG_NETFILTER_XT_TARGET_TRACE=m 68CONFIG_NETFILTER_XT_TARGET_TRACE=m
70CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 69CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
71CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 70CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 3e35ce5fa467..215389a5407f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
66CONFIG_NETFILTER_XT_TARGET_MARK=m 66CONFIG_NETFILTER_XT_TARGET_MARK=m
67CONFIG_NETFILTER_XT_TARGET_NFLOG=m 67CONFIG_NETFILTER_XT_TARGET_NFLOG=m
68CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 68CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
69CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
70CONFIG_NETFILTER_XT_TARGET_TRACE=m 69CONFIG_NETFILTER_XT_TARGET_TRACE=m
71CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 70CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
72CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 71CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index ae81e2d190c3..cb9dfb30b674 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
61CONFIG_NETFILTER_XT_TARGET_MARK=m 61CONFIG_NETFILTER_XT_TARGET_MARK=m
62CONFIG_NETFILTER_XT_TARGET_NFLOG=m 62CONFIG_NETFILTER_XT_TARGET_NFLOG=m
63CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 63CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
64CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
65CONFIG_NETFILTER_XT_TARGET_TRACE=m 64CONFIG_NETFILTER_XT_TARGET_TRACE=m
66CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 65CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
67CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 66CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 55d394edf633..8d5def4a31e0 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -80,7 +80,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
80CONFIG_NETFILTER_XT_TARGET_MARK=m 80CONFIG_NETFILTER_XT_TARGET_MARK=m
81CONFIG_NETFILTER_XT_TARGET_NFLOG=m 81CONFIG_NETFILTER_XT_TARGET_NFLOG=m
82CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 82CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
83CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
84CONFIG_NETFILTER_XT_TARGET_TRACE=m 83CONFIG_NETFILTER_XT_TARGET_TRACE=m
85CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 84CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
86CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 85CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index af773743ee11..e2af46f530c1 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -64,7 +64,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
64CONFIG_NETFILTER_XT_TARGET_MARK=m 64CONFIG_NETFILTER_XT_TARGET_MARK=m
65CONFIG_NETFILTER_XT_TARGET_NFLOG=m 65CONFIG_NETFILTER_XT_TARGET_NFLOG=m
66CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 66CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
67CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
68CONFIG_NETFILTER_XT_TARGET_TRACE=m 67CONFIG_NETFILTER_XT_TARGET_TRACE=m
69CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 68CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
70CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 69CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index cdb70d66e535..7c9402b2097f 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
65CONFIG_NETFILTER_XT_TARGET_MARK=m 65CONFIG_NETFILTER_XT_TARGET_MARK=m
66CONFIG_NETFILTER_XT_TARGET_NFLOG=m 66CONFIG_NETFILTER_XT_TARGET_NFLOG=m
67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 67CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
68CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
69CONFIG_NETFILTER_XT_TARGET_TRACE=m 68CONFIG_NETFILTER_XT_TARGET_TRACE=m
70CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 69CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
71CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 70CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 46bed78d0656..19d23db690a4 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
61CONFIG_NETFILTER_XT_TARGET_MARK=m 61CONFIG_NETFILTER_XT_TARGET_MARK=m
62CONFIG_NETFILTER_XT_TARGET_NFLOG=m 62CONFIG_NETFILTER_XT_TARGET_NFLOG=m
63CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 63CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
64CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
65CONFIG_NETFILTER_XT_TARGET_TRACE=m 64CONFIG_NETFILTER_XT_TARGET_TRACE=m
66CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 65CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
67CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 66CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 86f7772bafbe..ca6c0b4cab77 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
62CONFIG_NETFILTER_XT_TARGET_MARK=m 62CONFIG_NETFILTER_XT_TARGET_MARK=m
63CONFIG_NETFILTER_XT_TARGET_NFLOG=m 63CONFIG_NETFILTER_XT_TARGET_NFLOG=m
64CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 64CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
65CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
66CONFIG_NETFILTER_XT_TARGET_TRACE=m 65CONFIG_NETFILTER_XT_TARGET_TRACE=m
67CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 66CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
68CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 67CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 288261456e1f..c80941c7759e 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
62CONFIG_NETFILTER_XT_TARGET_MARK=m 62CONFIG_NETFILTER_XT_TARGET_MARK=m
63CONFIG_NETFILTER_XT_TARGET_NFLOG=m 63CONFIG_NETFILTER_XT_TARGET_NFLOG=m
64CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 64CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
65CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
66CONFIG_NETFILTER_XT_TARGET_TRACE=m 65CONFIG_NETFILTER_XT_TARGET_TRACE=m
67CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 66CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
68CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 67CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 6cd5a519ce5c..80e012fa409c 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -56,7 +56,6 @@ CONFIG_NF_CONNTRACK_MARK=y
56CONFIG_NF_CONNTRACK_FTP=m 56CONFIG_NF_CONNTRACK_FTP=m
57CONFIG_NF_CONNTRACK_IRC=m 57CONFIG_NF_CONNTRACK_IRC=m
58CONFIG_NF_CONNTRACK_TFTP=m 58CONFIG_NF_CONNTRACK_TFTP=m
59CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
60CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 59CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
61CONFIG_NETFILTER_XT_MATCH_LIMIT=m 60CONFIG_NETFILTER_XT_MATCH_LIMIT=m
62CONFIG_NETFILTER_XT_MATCH_MAC=m 61CONFIG_NETFILTER_XT_MATCH_MAC=m
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index ad15fb10322b..b6fde2bb51b6 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -96,7 +96,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
96CONFIG_NETFILTER_XT_TARGET_MARK=m 96CONFIG_NETFILTER_XT_TARGET_MARK=m
97CONFIG_NETFILTER_XT_TARGET_NFLOG=m 97CONFIG_NETFILTER_XT_TARGET_NFLOG=m
98CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 98CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
99CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
100CONFIG_NETFILTER_XT_TARGET_TRACE=m 99CONFIG_NETFILTER_XT_TARGET_TRACE=m
101CONFIG_NETFILTER_XT_TARGET_SECMARK=m 100CONFIG_NETFILTER_XT_TARGET_SECMARK=m
102CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 101CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index d1606569b001..936ec5a5ed8d 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -87,7 +87,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
87CONFIG_NETFILTER_XT_TARGET_MARK=m 87CONFIG_NETFILTER_XT_TARGET_MARK=m
88CONFIG_NETFILTER_XT_TARGET_NFLOG=m 88CONFIG_NETFILTER_XT_TARGET_NFLOG=m
89CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 89CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
90CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
91CONFIG_NETFILTER_XT_TARGET_TPROXY=m 90CONFIG_NETFILTER_XT_TARGET_TPROXY=m
92CONFIG_NETFILTER_XT_TARGET_TRACE=m 91CONFIG_NETFILTER_XT_TARGET_TRACE=m
93CONFIG_NETFILTER_XT_TARGET_SECMARK=m 92CONFIG_NETFILTER_XT_TARGET_SECMARK=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 92a60aecad5c..0315ee37a20b 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -60,7 +60,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
60CONFIG_NETFILTER_XT_TARGET_MARK=m 60CONFIG_NETFILTER_XT_TARGET_MARK=m
61CONFIG_NETFILTER_XT_TARGET_NFLOG=m 61CONFIG_NETFILTER_XT_TARGET_NFLOG=m
62CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 62CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
63CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
64CONFIG_NETFILTER_XT_TARGET_SECMARK=m 63CONFIG_NETFILTER_XT_TARGET_SECMARK=m
65CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 64CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
66CONFIG_NETFILTER_XT_MATCH_COMMENT=m 65CONFIG_NETFILTER_XT_MATCH_COMMENT=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 5527abbb7dea..cd732e5b4fd5 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -86,7 +86,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
86CONFIG_NETFILTER_XT_TARGET_MARK=m 86CONFIG_NETFILTER_XT_TARGET_MARK=m
87CONFIG_NETFILTER_XT_TARGET_NFLOG=m 87CONFIG_NETFILTER_XT_TARGET_NFLOG=m
88CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 88CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
89CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
90CONFIG_NETFILTER_XT_TARGET_TPROXY=m 89CONFIG_NETFILTER_XT_TARGET_TPROXY=m
91CONFIG_NETFILTER_XT_TARGET_TRACE=m 90CONFIG_NETFILTER_XT_TARGET_TRACE=m
92CONFIG_NETFILTER_XT_TARGET_SECMARK=m 91CONFIG_NETFILTER_XT_TARGET_SECMARK=m
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 9c9a123016c0..636f82b89fd3 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -59,7 +59,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
59CONFIG_NETFILTER_XT_TARGET_MARK=m 59CONFIG_NETFILTER_XT_TARGET_MARK=m
60CONFIG_NETFILTER_XT_TARGET_NFLOG=m 60CONFIG_NETFILTER_XT_TARGET_NFLOG=m
61CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 61CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
62CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
63CONFIG_NETFILTER_XT_TARGET_SECMARK=m 62CONFIG_NETFILTER_XT_TARGET_SECMARK=m
64CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 63CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
65CONFIG_NETFILTER_XT_MATCH_COMMENT=m 64CONFIG_NETFILTER_XT_MATCH_COMMENT=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 28c6b276c216..84624b17b769 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -108,7 +108,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
108CONFIG_NETFILTER_XT_TARGET_MARK=m 108CONFIG_NETFILTER_XT_TARGET_MARK=m
109CONFIG_NETFILTER_XT_TARGET_NFLOG=m 109CONFIG_NETFILTER_XT_TARGET_NFLOG=m
110CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 110CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
111CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
112CONFIG_NETFILTER_XT_TARGET_TPROXY=m 111CONFIG_NETFILTER_XT_TARGET_TPROXY=m
113CONFIG_NETFILTER_XT_TARGET_TRACE=m 112CONFIG_NETFILTER_XT_TARGET_TRACE=m
114CONFIG_NETFILTER_XT_TARGET_SECMARK=m 113CONFIG_NETFILTER_XT_TARGET_SECMARK=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 138f698d7c00..44b473420d51 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -109,7 +109,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
109CONFIG_NETFILTER_XT_TARGET_MARK=m 109CONFIG_NETFILTER_XT_TARGET_MARK=m
110CONFIG_NETFILTER_XT_TARGET_NFLOG=m 110CONFIG_NETFILTER_XT_TARGET_NFLOG=m
111CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 111CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
112CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
113CONFIG_NETFILTER_XT_TARGET_TPROXY=m 112CONFIG_NETFILTER_XT_TARGET_TPROXY=m
114CONFIG_NETFILTER_XT_TARGET_TRACE=m 113CONFIG_NETFILTER_XT_TARGET_TRACE=m
115CONFIG_NETFILTER_XT_TARGET_SECMARK=m 114CONFIG_NETFILTER_XT_TARGET_SECMARK=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 2c0230e76d20..59d9d2fdcd48 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -68,7 +68,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
68CONFIG_NETFILTER_XT_TARGET_MARK=m 68CONFIG_NETFILTER_XT_TARGET_MARK=m
69CONFIG_NETFILTER_XT_TARGET_NFLOG=m 69CONFIG_NETFILTER_XT_TARGET_NFLOG=m
70CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 70CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
71CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
72CONFIG_NETFILTER_XT_TARGET_SECMARK=m 71CONFIG_NETFILTER_XT_TARGET_SECMARK=m
73CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 72CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
74CONFIG_NETFILTER_XT_MATCH_COMMENT=m 73CONFIG_NETFILTER_XT_MATCH_COMMENT=m
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index f8b394a76ac3..29767a8dfea5 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -55,7 +55,6 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
55CONFIG_NETFILTER_XT_TARGET_MARK=m 55CONFIG_NETFILTER_XT_TARGET_MARK=m
56CONFIG_NETFILTER_XT_TARGET_NFLOG=m 56CONFIG_NETFILTER_XT_TARGET_NFLOG=m
57CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 57CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
58CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
59CONFIG_NETFILTER_XT_TARGET_TRACE=m 58CONFIG_NETFILTER_XT_TARGET_TRACE=m
60CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 59CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
61CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 60CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index db27c82e0542..06b56245d78c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -92,7 +92,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
92CONFIG_NETFILTER_XT_TARGET_MARK=m 92CONFIG_NETFILTER_XT_TARGET_MARK=m
93CONFIG_NETFILTER_XT_TARGET_NFLOG=m 93CONFIG_NETFILTER_XT_TARGET_NFLOG=m
94CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 94CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
95CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
96CONFIG_NETFILTER_XT_TARGET_TPROXY=m 95CONFIG_NETFILTER_XT_TARGET_TPROXY=m
97CONFIG_NETFILTER_XT_TARGET_TRACE=m 96CONFIG_NETFILTER_XT_TARGET_TRACE=m
98CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 97CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 7bd1763877ba..f55c27609fc6 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
66CONFIG_NETFILTER_XT_TARGET_MARK=m 66CONFIG_NETFILTER_XT_TARGET_MARK=m
67CONFIG_NETFILTER_XT_TARGET_NFLOG=m 67CONFIG_NETFILTER_XT_TARGET_NFLOG=m
68CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 68CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
69CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
70CONFIG_NETFILTER_XT_TARGET_TPROXY=m 69CONFIG_NETFILTER_XT_TARGET_TPROXY=m
71CONFIG_NETFILTER_XT_TARGET_TRACE=m 70CONFIG_NETFILTER_XT_TARGET_TRACE=m
72CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 71CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index c47f2becfbc3..be1cb6ea3a36 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -167,7 +167,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
167CONFIG_NETFILTER_XT_TARGET_MARK=m 167CONFIG_NETFILTER_XT_TARGET_MARK=m
168CONFIG_NETFILTER_XT_TARGET_NFLOG=m 168CONFIG_NETFILTER_XT_TARGET_NFLOG=m
169CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 169CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
170CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
171CONFIG_NETFILTER_XT_TARGET_TPROXY=m 170CONFIG_NETFILTER_XT_TARGET_TPROXY=m
172CONFIG_NETFILTER_XT_TARGET_TRACE=m 171CONFIG_NETFILTER_XT_TARGET_TRACE=m
173CONFIG_NETFILTER_XT_TARGET_SECMARK=m 172CONFIG_NETFILTER_XT_TARGET_SECMARK=m
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index e9073e9501b3..28368701ef79 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -464,8 +464,12 @@ void bpf_jit_compile(struct sk_filter *fp)
464 emit_alu_K(OR, K); 464 emit_alu_K(OR, K);
465 break; 465 break;
466 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 466 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
467 case BPF_S_ALU_XOR_X:
467 emit_alu_X(XOR); 468 emit_alu_X(XOR);
468 break; 469 break;
470 case BPF_S_ALU_XOR_K: /* A ^= K */
471 emit_alu_K(XOR, K);
472 break;
469 case BPF_S_ALU_LSH_X: /* A <<= X */ 473 case BPF_S_ALU_LSH_X: /* A <<= X */
470 emit_alu_X(SLL); 474 emit_alu_X(SLL);
471 break; 475 break;
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 0270620a1692..8c5eff6d6df5 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -134,7 +134,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
134CONFIG_NETFILTER_XT_TARGET_MARK=m 134CONFIG_NETFILTER_XT_TARGET_MARK=m
135CONFIG_NETFILTER_XT_TARGET_NFLOG=m 135CONFIG_NETFILTER_XT_TARGET_NFLOG=m
136CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 136CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
137CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
138CONFIG_NETFILTER_XT_TARGET_TEE=m 137CONFIG_NETFILTER_XT_TARGET_TEE=m
139CONFIG_NETFILTER_XT_TARGET_TPROXY=m 138CONFIG_NETFILTER_XT_TARGET_TPROXY=m
140CONFIG_NETFILTER_XT_TARGET_TRACE=m 139CONFIG_NETFILTER_XT_TARGET_TRACE=m
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index c11de27a9bcb..e7a3dfcbcda7 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -132,7 +132,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
132CONFIG_NETFILTER_XT_TARGET_MARK=m 132CONFIG_NETFILTER_XT_TARGET_MARK=m
133CONFIG_NETFILTER_XT_TARGET_NFLOG=m 133CONFIG_NETFILTER_XT_TARGET_NFLOG=m
134CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 134CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
135CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
136CONFIG_NETFILTER_XT_TARGET_TEE=m 135CONFIG_NETFILTER_XT_TARGET_TEE=m
137CONFIG_NETFILTER_XT_TARGET_TPROXY=m 136CONFIG_NETFILTER_XT_TARGET_TPROXY=m
138CONFIG_NETFILTER_XT_TARGET_TRACE=m 137CONFIG_NETFILTER_XT_TARGET_TRACE=m
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 33643a8bcbbb..520d2bd0b9c5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -280,6 +280,31 @@ void bpf_jit_compile(struct sk_filter *fp)
280 } 280 }
281 EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ 281 EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
282 break; 282 break;
283 case BPF_S_ALU_MOD_X: /* A %= X; */
284 seen |= SEEN_XREG;
285 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
286 if (pc_ret0 > 0) {
287 /* addrs[pc_ret0 - 1] is start address of target
288 * (addrs[i] - 6) is the address following this jmp
289 * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
290 */
291 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
292 (addrs[i] - 6));
293 } else {
294 EMIT_COND_JMP(X86_JNE, 2 + 5);
295 CLEAR_A();
296 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
297 }
298 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
299 EMIT2(0xf7, 0xf3); /* div %ebx */
300 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
301 break;
302 case BPF_S_ALU_MOD_K: /* A %= K; */
303 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
304 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
305 EMIT2(0xf7, 0xf1); /* div %ecx */
306 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
307 break;
283 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ 308 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
284 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ 309 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
285 EMIT(K, 4); 310 EMIT(K, 4);
@@ -310,9 +335,18 @@ void bpf_jit_compile(struct sk_filter *fp)
310 EMIT1_off32(0x0d, K); /* or imm32,%eax */ 335 EMIT1_off32(0x0d, K); /* or imm32,%eax */
311 break; 336 break;
312 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 337 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
338 case BPF_S_ALU_XOR_X:
313 seen |= SEEN_XREG; 339 seen |= SEEN_XREG;
314 EMIT2(0x31, 0xd8); /* xor %ebx,%eax */ 340 EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
315 break; 341 break;
342 case BPF_S_ALU_XOR_K: /* A ^= K; */
343 if (K == 0)
344 break;
345 if (is_imm8(K))
346 EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
347 else
348 EMIT1_off32(0x35, K); /* xor imm32,%eax */
349 break;
316 case BPF_S_ALU_LSH_X: /* A <<= X; */ 350 case BPF_S_ALU_LSH_X: /* A <<= X; */
317 seen |= SEEN_XREG; 351 seen |= SEEN_XREG;
318 EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ 352 EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index ba2c611154af..6bba414d0c61 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -166,7 +166,7 @@ static int crypto_report_alg(struct crypto_alg *alg,
166 struct crypto_user_alg *ualg; 166 struct crypto_user_alg *ualg;
167 int err = 0; 167 int err = 0;
168 168
169 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, info->nlmsg_seq, 169 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
170 CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags); 170 CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
171 if (!nlh) { 171 if (!nlh) {
172 err = -EMSGSIZE; 172 err = -EMSGSIZE;
@@ -216,7 +216,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
216 if (err) 216 if (err)
217 return err; 217 return err;
218 218
219 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).pid); 219 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
220} 220}
221 221
222static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) 222static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
@@ -500,8 +500,7 @@ static int __init crypto_user_init(void)
500 .input = crypto_netlink_rcv, 500 .input = crypto_netlink_rcv,
501 }; 501 };
502 502
503 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, 503 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
504 THIS_MODULE, &cfg);
505 if (!crypto_nlsk) 504 if (!crypto_nlsk)
506 return -ENOMEM; 505 return -ENOMEM;
507 506
diff --git a/drivers/Makefile b/drivers/Makefile
index acb48fa4531c..03da5b663aef 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -123,7 +123,6 @@ obj-$(CONFIG_VHOST_NET) += vhost/
123obj-$(CONFIG_VLYNQ) += vlynq/ 123obj-$(CONFIG_VLYNQ) += vlynq/
124obj-$(CONFIG_STAGING) += staging/ 124obj-$(CONFIG_STAGING) += staging/
125obj-y += platform/ 125obj-y += platform/
126obj-y += ieee802154/
127#common clk code 126#common clk code
128obj-y += clk/ 127obj-y += clk/
129 128
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 06b3207adebd..a533af218368 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
48 48
49config BCMA_SFLASH 49config BCMA_SFLASH
50 bool 50 bool
51 depends on BCMA_DRIVER_MIPS && BROKEN 51 depends on BCMA_DRIVER_MIPS
52 default y 52 default y
53 53
54config BCMA_NFLASH 54config BCMA_NFLASH
55 bool 55 bool
56 depends on BCMA_DRIVER_MIPS && BROKEN 56 depends on BCMA_DRIVER_MIPS
57 default y 57 default y
58 58
59config BCMA_DRIVER_GMAC_CMN 59config BCMA_DRIVER_GMAC_CMN
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 3cf9cc923cd2..169fc58427d3 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
54#ifdef CONFIG_BCMA_SFLASH 54#ifdef CONFIG_BCMA_SFLASH
55/* driver_chipcommon_sflash.c */ 55/* driver_chipcommon_sflash.c */
56int bcma_sflash_init(struct bcma_drv_cc *cc); 56int bcma_sflash_init(struct bcma_drv_cc *cc);
57extern struct platform_device bcma_sflash_dev;
57#else 58#else
58static inline int bcma_sflash_init(struct bcma_drv_cc *cc) 59static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
59{ 60{
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
65#ifdef CONFIG_BCMA_NFLASH 66#ifdef CONFIG_BCMA_NFLASH
66/* driver_chipcommon_nflash.c */ 67/* driver_chipcommon_nflash.c */
67int bcma_nflash_init(struct bcma_drv_cc *cc); 68int bcma_nflash_init(struct bcma_drv_cc *cc);
69extern struct platform_device bcma_nflash_dev;
68#else 70#else
69static inline int bcma_nflash_init(struct bcma_drv_cc *cc) 71static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
70{ 72{
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 63c8b470536f..03bbe104338f 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -65,7 +65,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
65 switch (clkmode) { 65 switch (clkmode) {
66 case BCMA_CLKMODE_FAST: 66 case BCMA_CLKMODE_FAST:
67 bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 67 bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
68 udelay(64); 68 usleep_range(64, 300);
69 for (i = 0; i < 1500; i++) { 69 for (i = 0; i < 1500; i++) {
70 if (bcma_read32(core, BCMA_CLKCTLST) & 70 if (bcma_read32(core, BCMA_CLKCTLST) &
71 BCMA_CLKCTLST_HAVEHT) { 71 BCMA_CLKCTLST_HAVEHT) {
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 574d62435bc2..9042781edec3 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,15 +5,37 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13struct platform_device bcma_nflash_dev = {
14 .name = "bcma_nflash",
15 .num_resources = 0,
16};
17
14/* Initialize NAND flash access */ 18/* Initialize NAND flash access */
15int bcma_nflash_init(struct bcma_drv_cc *cc) 19int bcma_nflash_init(struct bcma_drv_cc *cc)
16{ 20{
17 bcma_err(cc->core->bus, "NAND flash support is broken\n"); 21 struct bcma_bus *bus = cc->core->bus;
22
23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
24 cc->core->id.rev != 0x38) {
25 bcma_err(bus, "NAND flash on unsupported board!\n");
26 return -ENOTSUPP;
27 }
28
29 if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
30 bcma_err(bus, "NAND flash not present according to ChipCommon\n");
31 return -ENODEV;
32 }
33
34 cc->nflash.present = true;
35
36 /* Prepare platform device, but don't register it yet. It's too early,
37 * malloc (required by device_private_init) is not available yet. */
38 bcma_nflash_dev.dev.platform_data = &cc->nflash;
39
18 return 0; 40 return 0;
19} 41}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index c9a4f46c5143..201faf106b3f 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -76,7 +76,10 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
76 if (max_msk) 76 if (max_msk)
77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 77 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
78 78
79 /* Add some delay; allow resources to come up and settle. */ 79 /*
80 * Add some delay; allow resources to come up and settle.
81 * Delay is required for SoC (early init).
82 */
80 mdelay(2); 83 mdelay(2);
81} 84}
82 85
@@ -101,7 +104,7 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
101 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val); 104 bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
102} 105}
103 106
104void bcma_pmu_workarounds(struct bcma_drv_cc *cc) 107static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
105{ 108{
106 struct bcma_bus *bus = cc->core->bus; 109 struct bcma_bus *bus = cc->core->bus;
107 110
@@ -257,7 +260,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
257} 260}
258 261
259/* query bus clock frequency for PMU-enabled chipcommon */ 262/* query bus clock frequency for PMU-enabled chipcommon */
260u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) 263static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
261{ 264{
262 struct bcma_bus *bus = cc->core->bus; 265 struct bcma_bus *bus = cc->core->bus;
263 266
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 6e157a58a1d7..2c4eec2ca5a0 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,15 +5,132 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13static struct resource bcma_sflash_resource = {
14 .name = "bcma_sflash",
15 .start = BCMA_SFLASH,
16 .end = 0,
17 .flags = IORESOURCE_MEM | IORESOURCE_READONLY,
18};
19
20struct platform_device bcma_sflash_dev = {
21 .name = "bcma_sflash",
22 .resource = &bcma_sflash_resource,
23 .num_resources = 1,
24};
25
26struct bcma_sflash_tbl_e {
27 char *name;
28 u32 id;
29 u32 blocksize;
30 u16 numblocks;
31};
32
33static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
34 { "", 0x14, 0x10000, 32, },
35 { 0 },
36};
37
38static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
39 { 0 },
40};
41
42static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
43 { 0 },
44};
45
46static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
47{
48 int i;
49 bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
50 BCMA_CC_FLASHCTL_START | opcode);
51 for (i = 0; i < 1000; i++) {
52 if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
53 BCMA_CC_FLASHCTL_BUSY))
54 return;
55 cpu_relax();
56 }
57 bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
58}
59
14/* Initialize serial flash access */ 60/* Initialize serial flash access */
15int bcma_sflash_init(struct bcma_drv_cc *cc) 61int bcma_sflash_init(struct bcma_drv_cc *cc)
16{ 62{
17 bcma_err(cc->core->bus, "Serial flash support is broken\n"); 63 struct bcma_bus *bus = cc->core->bus;
64 struct bcma_sflash *sflash = &cc->sflash;
65 struct bcma_sflash_tbl_e *e;
66 u32 id, id2;
67
68 switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
69 case BCMA_CC_FLASHT_STSER:
70 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
71
72 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
73 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
74 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
75
76 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
77 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
78 id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
79
80 switch (id) {
81 case 0xbf:
82 for (e = bcma_sflash_sst_tbl; e->name; e++) {
83 if (e->id == id2)
84 break;
85 }
86 break;
87 default:
88 for (e = bcma_sflash_st_tbl; e->name; e++) {
89 if (e->id == id)
90 break;
91 }
92 break;
93 }
94 if (!e->name) {
95 bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
96 return -ENOTSUPP;
97 }
98
99 break;
100 case BCMA_CC_FLASHT_ATSER:
101 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
102 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
103
104 for (e = bcma_sflash_at_tbl; e->name; e++) {
105 if (e->id == id)
106 break;
107 }
108 if (!e->name) {
109 bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
110 return -ENOTSUPP;
111 }
112
113 break;
114 default:
115 bcma_err(bus, "Unsupported flash type\n");
116 return -ENOTSUPP;
117 }
118
119 sflash->window = BCMA_SFLASH;
120 sflash->blocksize = e->blocksize;
121 sflash->numblocks = e->numblocks;
122 sflash->size = sflash->blocksize * sflash->numblocks;
123 sflash->present = true;
124
125 bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
126 e->name, sflash->size / 1024, sflash->blocksize,
127 sflash->numblocks);
128
129 /* Prepare platform device, but don't register it yet. It's too early,
130 * malloc (required by device_private_init) is not available yet. */
131 bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
132 sflash->size;
133 bcma_sflash_dev.dev.platform_data = sflash;
134
18 return 0; 135 return 0;
19} 136}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c32ebd537abe..c39ee6d45850 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -51,7 +51,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
51 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); 51 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
52 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) 52 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
53 break; 53 break;
54 msleep(1); 54 usleep_range(1000, 2000);
55 } 55 }
56} 56}
57 57
@@ -92,7 +92,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
92 ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA); 92 ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
93 break; 93 break;
94 } 94 }
95 msleep(1); 95 usleep_range(1000, 2000);
96 } 96 }
97 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); 97 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
98 return ret; 98 return ret;
@@ -132,7 +132,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
132 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); 132 v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
133 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) 133 if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
134 break; 134 break;
135 msleep(1); 135 usleep_range(1000, 2000);
136 } 136 }
137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); 137 pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
138} 138}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index cbae2c231336..9baf886e82df 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -425,9 +425,9 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
425 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; 425 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
426 426
427 /* Reset RC */ 427 /* Reset RC */
428 udelay(3000); 428 usleep_range(3000, 5000);
429 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE); 429 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
430 udelay(1000); 430 usleep_range(1000, 2000);
431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST | 431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
432 BCMA_CORE_PCI_CTL_RST_OE); 432 BCMA_CORE_PCI_CTL_RST_OE);
433 433
@@ -481,7 +481,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
481 * before issuing configuration requests to PCI Express 481 * before issuing configuration requests to PCI Express
482 * devices. 482 * devices.
483 */ 483 */
484 udelay(100000); 484 msleep(100);
485 485
486 bcma_core_pci_enable_crs(pc); 486 bcma_core_pci_enable_crs(pc);
487 487
@@ -501,7 +501,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
501 set_io_port_base(pc_host->pci_controller.io_map_base); 501 set_io_port_base(pc_host->pci_controller.io_map_base);
502 /* Give some time to the PCI controller to configure itself with the new 502 /* Give some time to the PCI controller to configure itself with the new
503 * values. Not waiting at this point causes crashes of the machine. */ 503 * values. Not waiting at this point causes crashes of the machine. */
504 mdelay(10); 504 usleep_range(10000, 15000);
505 register_pci_controller(&pc_host->pci_controller); 505 register_pci_controller(&pc_host->pci_controller);
506 return; 506 return;
507} 507}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a6e5672c67e7..b6b4b5ebd4c2 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -77,8 +77,8 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
77} 77}
78 78
79#ifdef CONFIG_BCMA_BLOCKIO 79#ifdef CONFIG_BCMA_BLOCKIO
80void bcma_host_pci_block_read(struct bcma_device *core, void *buffer, 80static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
81 size_t count, u16 offset, u8 reg_width) 81 size_t count, u16 offset, u8 reg_width)
82{ 82{
83 void __iomem *addr = core->bus->mmio + offset; 83 void __iomem *addr = core->bus->mmio + offset;
84 if (core->bus->mapped_core != core) 84 if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@ void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
100 } 100 }
101} 101}
102 102
103void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer, 103static void bcma_host_pci_block_write(struct bcma_device *core,
104 size_t count, u16 offset, u8 reg_width) 104 const void *buffer, size_t count,
105 u16 offset, u8 reg_width)
105{ 106{
106 void __iomem *addr = core->bus->mmio + offset; 107 void __iomem *addr = core->bus->mmio + offset;
107 if (core->bus->mapped_core != core) 108 if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@ static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
139 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); 140 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
140} 141}
141 142
142const struct bcma_host_ops bcma_host_pci_ops = { 143static const struct bcma_host_ops bcma_host_pci_ops = {
143 .read8 = bcma_host_pci_read8, 144 .read8 = bcma_host_pci_read8,
144 .read16 = bcma_host_pci_read16, 145 .read16 = bcma_host_pci_read16,
145 .read32 = bcma_host_pci_read32, 146 .read32 = bcma_host_pci_read32,
@@ -272,6 +273,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, 277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
277 { 0, }, 279 { 0, },
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 3c381fb8f9c4..3475e600011a 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -143,7 +143,7 @@ static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
143 writel(value, core->io_wrap + offset); 143 writel(value, core->io_wrap + offset);
144} 144}
145 145
146const struct bcma_host_ops bcma_host_soc_ops = { 146static const struct bcma_host_ops bcma_host_soc_ops = {
147 .read8 = bcma_host_soc_read8, 147 .read8 = bcma_host_soc_read8,
148 .read16 = bcma_host_soc_read16, 148 .read16 = bcma_host_soc_read16,
149 .read32 = bcma_host_soc_read32, 149 .read32 = bcma_host_soc_read32,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 758af9ccdef0..432aeeedfd5e 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,6 +7,7 @@
7 7
8#include "bcma_private.h" 8#include "bcma_private.h"
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/platform_device.h>
10#include <linux/bcma/bcma.h> 11#include <linux/bcma/bcma.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12 13
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
136 dev_id++; 137 dev_id++;
137 } 138 }
138 139
140#ifdef CONFIG_BCMA_SFLASH
141 if (bus->drv_cc.sflash.present) {
142 err = platform_device_register(&bcma_sflash_dev);
143 if (err)
144 bcma_err(bus, "Error registering serial flash\n");
145 }
146#endif
147
148#ifdef CONFIG_BCMA_NFLASH
149 if (bus->drv_cc.nflash.present) {
150 err = platform_device_register(&bcma_nflash_dev);
151 if (err)
152 bcma_err(bus, "Error registering NAND flash\n");
153 }
154#endif
155
139 return 0; 156 return 0;
140} 157}
141 158
@@ -210,7 +227,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
210 227
211void bcma_bus_unregister(struct bcma_bus *bus) 228void bcma_bus_unregister(struct bcma_bus *bus)
212{ 229{
230 struct bcma_device *cores[3];
231
232 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
233 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
234 cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
235
213 bcma_unregister_cores(bus); 236 bcma_unregister_cores(bus);
237
238 kfree(cores[2]);
239 kfree(cores[1]);
240 kfree(cores[0]);
214} 241}
215 242
216int __init bcma_bus_early_register(struct bcma_bus *bus, 243int __init bcma_bus_early_register(struct bcma_bus *bus,
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 9ea4627dc0c2..0d546b64be34 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
510 case BCMA_CHIP_ID_BCM43227:
510 case BCMA_CHIP_ID_BCM43228: 511 case BCMA_CHIP_ID_BCM43228:
512 case BCMA_CHIP_ID_BCM43428:
511 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; 513 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
512 break; 514 break;
513 default: 515 default:
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 37ae175162f3..364f82b34d03 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -177,7 +177,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
177 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 177 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
178 return -ENODEV; 178 return -ENODEV;
179 179
180 data = kzalloc(sizeof(*data), GFP_KERNEL); 180 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
181 if (!data) { 181 if (!data) {
182 BT_ERR("Can't allocate memory for data structure"); 182 BT_ERR("Can't allocate memory for data structure");
183 return -ENOMEM; 183 return -ENOMEM;
@@ -189,14 +189,12 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
189 data->urb = usb_alloc_urb(0, GFP_KERNEL); 189 data->urb = usb_alloc_urb(0, GFP_KERNEL);
190 if (!data->urb) { 190 if (!data->urb) {
191 BT_ERR("Can't allocate URB"); 191 BT_ERR("Can't allocate URB");
192 kfree(data);
193 return -ENOMEM; 192 return -ENOMEM;
194 } 193 }
195 194
196 if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { 195 if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
197 BT_ERR("Mini driver request failed"); 196 BT_ERR("Mini driver request failed");
198 usb_free_urb(data->urb); 197 usb_free_urb(data->urb);
199 kfree(data);
200 return -EIO; 198 return -EIO;
201 } 199 }
202 200
@@ -209,7 +207,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
209 BT_ERR("Can't allocate memory for mini driver"); 207 BT_ERR("Can't allocate memory for mini driver");
210 release_firmware(firmware); 208 release_firmware(firmware);
211 usb_free_urb(data->urb); 209 usb_free_urb(data->urb);
212 kfree(data);
213 return -ENOMEM; 210 return -ENOMEM;
214 } 211 }
215 212
@@ -224,7 +221,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
224 BT_ERR("Firmware request failed"); 221 BT_ERR("Firmware request failed");
225 usb_free_urb(data->urb); 222 usb_free_urb(data->urb);
226 kfree(data->buffer); 223 kfree(data->buffer);
227 kfree(data);
228 return -EIO; 224 return -EIO;
229 } 225 }
230 226
@@ -236,7 +232,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
236 release_firmware(firmware); 232 release_firmware(firmware);
237 usb_free_urb(data->urb); 233 usb_free_urb(data->urb);
238 kfree(data->buffer); 234 kfree(data->buffer);
239 kfree(data);
240 return -ENOMEM; 235 return -ENOMEM;
241 } 236 }
242 237
@@ -271,7 +266,6 @@ static void bcm203x_disconnect(struct usb_interface *intf)
271 usb_free_urb(data->urb); 266 usb_free_urb(data->urb);
272 kfree(data->fw_data); 267 kfree(data->fw_data);
273 kfree(data->buffer); 268 kfree(data->buffer);
274 kfree(data);
275} 269}
276 270
277static struct usb_driver bcm203x_driver = { 271static struct usb_driver bcm203x_driver = {
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 32e825144fe9..995aee9cba22 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -653,7 +653,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
653 } 653 }
654 654
655 /* Initialize control structure and load firmware */ 655 /* Initialize control structure and load firmware */
656 data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL); 656 data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
657 if (!data) { 657 if (!data) {
658 BT_ERR("Can't allocate memory for control structure"); 658 BT_ERR("Can't allocate memory for control structure");
659 goto done; 659 goto done;
@@ -674,7 +674,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
674 674
675 if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { 675 if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
676 BT_ERR("Firmware request failed"); 676 BT_ERR("Firmware request failed");
677 goto error; 677 goto done;
678 } 678 }
679 679
680 BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); 680 BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
690 hdev = hci_alloc_dev(); 690 hdev = hci_alloc_dev();
691 if (!hdev) { 691 if (!hdev) {
692 BT_ERR("Can't allocate HCI device"); 692 BT_ERR("Can't allocate HCI device");
693 goto error; 693 goto done;
694 } 694 }
695 695
696 data->hdev = hdev; 696 data->hdev = hdev;
@@ -708,7 +708,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
708 if (hci_register_dev(hdev) < 0) { 708 if (hci_register_dev(hdev) < 0) {
709 BT_ERR("Can't register HCI device"); 709 BT_ERR("Can't register HCI device");
710 hci_free_dev(hdev); 710 hci_free_dev(hdev);
711 goto error; 711 goto done;
712 } 712 }
713 713
714 usb_set_intfdata(intf, data); 714 usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
718release: 718release:
719 release_firmware(firmware); 719 release_firmware(firmware);
720 720
721error:
722 kfree(data);
723
724done: 721done:
725 return -EIO; 722 return -EIO;
726} 723}
@@ -741,7 +738,6 @@ static void bfusb_disconnect(struct usb_interface *intf)
741 738
742 hci_unregister_dev(hdev); 739 hci_unregister_dev(hdev);
743 hci_free_dev(hdev); 740 hci_free_dev(hdev);
744 kfree(data);
745} 741}
746 742
747static struct usb_driver bfusb_driver = { 743static struct usb_driver bfusb_driver = {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 66c3a6770c41..0d26851d6e49 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -681,7 +681,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
681 case HCI_SCODATA_PKT: 681 case HCI_SCODATA_PKT:
682 hdev->stat.sco_tx++; 682 hdev->stat.sco_tx++;
683 break; 683 break;
684 }; 684 }
685 685
686 /* Prepend skb with frame type */ 686 /* Prepend skb with frame type */
687 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 687 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -849,7 +849,7 @@ static int bluecard_probe(struct pcmcia_device *link)
849 bluecard_info_t *info; 849 bluecard_info_t *info;
850 850
851 /* Create new info device */ 851 /* Create new info device */
852 info = kzalloc(sizeof(*info), GFP_KERNEL); 852 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
853 if (!info) 853 if (!info)
854 return -ENOMEM; 854 return -ENOMEM;
855 855
@@ -864,10 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link)
864 864
865static void bluecard_detach(struct pcmcia_device *link) 865static void bluecard_detach(struct pcmcia_device *link)
866{ 866{
867 bluecard_info_t *info = link->priv;
868
869 bluecard_release(link); 867 bluecard_release(link);
870 kfree(info);
871} 868}
872 869
873 870
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 29caaed2d715..2fe4a8031348 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -443,7 +443,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
443 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 443 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
444 return -ENODEV; 444 return -ENODEV;
445 445
446 data = kzalloc(sizeof(*data), GFP_KERNEL); 446 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
447 if (!data) 447 if (!data)
448 return -ENOMEM; 448 return -ENOMEM;
449 449
@@ -453,10 +453,8 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
453 init_usb_anchor(&data->rx_anchor); 453 init_usb_anchor(&data->rx_anchor);
454 454
455 hdev = hci_alloc_dev(); 455 hdev = hci_alloc_dev();
456 if (!hdev) { 456 if (!hdev)
457 kfree(data);
458 return -ENOMEM; 457 return -ENOMEM;
459 }
460 458
461 hdev->bus = HCI_USB; 459 hdev->bus = HCI_USB;
462 hci_set_drvdata(hdev, data); 460 hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
475 err = hci_register_dev(hdev); 473 err = hci_register_dev(hdev);
476 if (err < 0) { 474 if (err < 0) {
477 hci_free_dev(hdev); 475 hci_free_dev(hdev);
478 kfree(data);
479 return err; 476 return err;
480 } 477 }
481 478
@@ -500,7 +497,6 @@ static void bpa10x_disconnect(struct usb_interface *intf)
500 hci_free_dev(data->hdev); 497 hci_free_dev(data->hdev);
501 kfree_skb(data->rx_skb[0]); 498 kfree_skb(data->rx_skb[0]);
502 kfree_skb(data->rx_skb[1]); 499 kfree_skb(data->rx_skb[1]);
503 kfree(data);
504} 500}
505 501
506static struct usb_driver bpa10x_driver = { 502static struct usb_driver bpa10x_driver = {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 8925b6d672a6..7ffd3f407144 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -638,7 +638,7 @@ static int bt3c_probe(struct pcmcia_device *link)
638 bt3c_info_t *info; 638 bt3c_info_t *info;
639 639
640 /* Create new info device */ 640 /* Create new info device */
641 info = kzalloc(sizeof(*info), GFP_KERNEL); 641 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
642 if (!info) 642 if (!info)
643 return -ENOMEM; 643 return -ENOMEM;
644 644
@@ -654,10 +654,7 @@ static int bt3c_probe(struct pcmcia_device *link)
654 654
655static void bt3c_detach(struct pcmcia_device *link) 655static void bt3c_detach(struct pcmcia_device *link)
656{ 656{
657 bt3c_info_t *info = link->priv;
658
659 bt3c_release(link); 657 bt3c_release(link);
660 kfree(info);
661} 658}
662 659
663static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data) 660static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 6a9e9717d3ab..3f4bfc814dc7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -600,8 +600,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
600exit: 600exit:
601 if (ret) { 601 if (ret) {
602 hdev->stat.err_rx++; 602 hdev->stat.err_rx++;
603 if (skb) 603 kfree_skb(skb);
604 kfree_skb(skb);
605 } 604 }
606 605
607 return ret; 606 return ret;
@@ -956,11 +955,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
956 BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", 955 BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
957 id->vendor, id->device, id->class, func->num); 956 id->vendor, id->device, id->class, func->num);
958 957
959 card = kzalloc(sizeof(*card), GFP_KERNEL); 958 card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
960 if (!card) { 959 if (!card)
961 ret = -ENOMEM; 960 return -ENOMEM;
962 goto done;
963 }
964 961
965 card->func = func; 962 card->func = func;
966 963
@@ -974,8 +971,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
974 971
975 if (btmrvl_sdio_register_dev(card) < 0) { 972 if (btmrvl_sdio_register_dev(card) < 0) {
976 BT_ERR("Failed to register BT device!"); 973 BT_ERR("Failed to register BT device!");
977 ret = -ENODEV; 974 return -ENODEV;
978 goto free_card;
979 } 975 }
980 976
981 /* Disable the interrupts on the card */ 977 /* Disable the interrupts on the card */
@@ -1023,9 +1019,6 @@ disable_host_int:
1023 btmrvl_sdio_disable_host_int(card); 1019 btmrvl_sdio_disable_host_int(card);
1024unreg_dev: 1020unreg_dev:
1025 btmrvl_sdio_unregister_dev(card); 1021 btmrvl_sdio_unregister_dev(card);
1026free_card:
1027 kfree(card);
1028done:
1029 return ret; 1022 return ret;
1030} 1023}
1031 1024
@@ -1047,7 +1040,6 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
1047 BT_DBG("unregester dev"); 1040 BT_DBG("unregester dev");
1048 btmrvl_sdio_unregister_dev(card); 1041 btmrvl_sdio_unregister_dev(card);
1049 btmrvl_remove_card(card->priv); 1042 btmrvl_remove_card(card->priv);
1050 kfree(card);
1051 } 1043 }
1052 } 1044 }
1053} 1045}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index e10ea0347051..4a9909713874 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -304,7 +304,7 @@ static int btsdio_probe(struct sdio_func *func,
304 tuple = tuple->next; 304 tuple = tuple->next;
305 } 305 }
306 306
307 data = kzalloc(sizeof(*data), GFP_KERNEL); 307 data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
308 if (!data) 308 if (!data)
309 return -ENOMEM; 309 return -ENOMEM;
310 310
@@ -315,10 +315,8 @@ static int btsdio_probe(struct sdio_func *func,
315 skb_queue_head_init(&data->txq); 315 skb_queue_head_init(&data->txq);
316 316
317 hdev = hci_alloc_dev(); 317 hdev = hci_alloc_dev();
318 if (!hdev) { 318 if (!hdev)
319 kfree(data);
320 return -ENOMEM; 319 return -ENOMEM;
321 }
322 320
323 hdev->bus = HCI_SDIO; 321 hdev->bus = HCI_SDIO;
324 hci_set_drvdata(hdev, data); 322 hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@ static int btsdio_probe(struct sdio_func *func,
340 err = hci_register_dev(hdev); 338 err = hci_register_dev(hdev);
341 if (err < 0) { 339 if (err < 0) {
342 hci_free_dev(hdev); 340 hci_free_dev(hdev);
343 kfree(data);
344 return err; 341 return err;
345 } 342 }
346 343
@@ -366,7 +363,6 @@ static void btsdio_remove(struct sdio_func *func)
366 hci_unregister_dev(hdev); 363 hci_unregister_dev(hdev);
367 364
368 hci_free_dev(hdev); 365 hci_free_dev(hdev);
369 kfree(data);
370} 366}
371 367
372static struct sdio_driver btsdio_driver = { 368static struct sdio_driver btsdio_driver = {
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 21e803a6a281..35a553a90616 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -446,7 +446,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
446 case HCI_SCODATA_PKT: 446 case HCI_SCODATA_PKT:
447 hdev->stat.sco_tx++; 447 hdev->stat.sco_tx++;
448 break; 448 break;
449 }; 449 }
450 450
451 /* Prepend skb with frame type */ 451 /* Prepend skb with frame type */
452 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 452 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -567,7 +567,7 @@ static int btuart_probe(struct pcmcia_device *link)
567 btuart_info_t *info; 567 btuart_info_t *info;
568 568
569 /* Create new info device */ 569 /* Create new info device */
570 info = kzalloc(sizeof(*info), GFP_KERNEL); 570 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
571 if (!info) 571 if (!info)
572 return -ENOMEM; 572 return -ENOMEM;
573 573
@@ -583,10 +583,7 @@ static int btuart_probe(struct pcmcia_device *link)
583 583
584static void btuart_detach(struct pcmcia_device *link) 584static void btuart_detach(struct pcmcia_device *link)
585{ 585{
586 btuart_info_t *info = link->priv;
587
588 btuart_release(link); 586 btuart_release(link);
589 kfree(info);
590} 587}
591 588
592static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data) 589static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 654e248763ef..debda27df9b0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -96,11 +96,12 @@ static struct usb_device_id btusb_table[] = {
96 { USB_DEVICE(0x0c10, 0x0000) }, 96 { USB_DEVICE(0x0c10, 0x0000) },
97 97
98 /* Broadcom BCM20702A0 */ 98 /* Broadcom BCM20702A0 */
99 { USB_DEVICE(0x04ca, 0x2003) },
99 { USB_DEVICE(0x0489, 0xe042) }, 100 { USB_DEVICE(0x0489, 0xe042) },
100 { USB_DEVICE(0x413c, 0x8197) }, 101 { USB_DEVICE(0x413c, 0x8197) },
101 102
102 /* Foxconn - Hon Hai */ 103 /* Foxconn - Hon Hai */
103 { USB_DEVICE(0x0489, 0xe033) }, 104 { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
104 105
105 /*Broadcom devices with vendor specific id */ 106 /*Broadcom devices with vendor specific id */
106 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 107 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
@@ -956,7 +957,7 @@ static int btusb_probe(struct usb_interface *intf,
956 return -ENODEV; 957 return -ENODEV;
957 } 958 }
958 959
959 data = kzalloc(sizeof(*data), GFP_KERNEL); 960 data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
960 if (!data) 961 if (!data)
961 return -ENOMEM; 962 return -ENOMEM;
962 963
@@ -979,10 +980,8 @@ static int btusb_probe(struct usb_interface *intf,
979 } 980 }
980 } 981 }
981 982
982 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { 983 if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
983 kfree(data);
984 return -ENODEV; 984 return -ENODEV;
985 }
986 985
987 data->cmdreq_type = USB_TYPE_CLASS; 986 data->cmdreq_type = USB_TYPE_CLASS;
988 987
@@ -1002,10 +1001,8 @@ static int btusb_probe(struct usb_interface *intf,
1002 init_usb_anchor(&data->deferred); 1001 init_usb_anchor(&data->deferred);
1003 1002
1004 hdev = hci_alloc_dev(); 1003 hdev = hci_alloc_dev();
1005 if (!hdev) { 1004 if (!hdev)
1006 kfree(data);
1007 return -ENOMEM; 1005 return -ENOMEM;
1008 }
1009 1006
1010 hdev->bus = HCI_USB; 1007 hdev->bus = HCI_USB;
1011 hci_set_drvdata(hdev, data); 1008 hci_set_drvdata(hdev, data);
@@ -1073,7 +1070,6 @@ static int btusb_probe(struct usb_interface *intf,
1073 data->isoc, data); 1070 data->isoc, data);
1074 if (err < 0) { 1071 if (err < 0) {
1075 hci_free_dev(hdev); 1072 hci_free_dev(hdev);
1076 kfree(data);
1077 return err; 1073 return err;
1078 } 1074 }
1079 } 1075 }
@@ -1081,7 +1077,6 @@ static int btusb_probe(struct usb_interface *intf,
1081 err = hci_register_dev(hdev); 1077 err = hci_register_dev(hdev);
1082 if (err < 0) { 1078 if (err < 0) {
1083 hci_free_dev(hdev); 1079 hci_free_dev(hdev);
1084 kfree(data);
1085 return err; 1080 return err;
1086 } 1081 }
1087 1082
@@ -1114,7 +1109,6 @@ static void btusb_disconnect(struct usb_interface *intf)
1114 usb_driver_release_interface(&btusb_driver, data->isoc); 1109 usb_driver_release_interface(&btusb_driver, data->isoc);
1115 1110
1116 hci_free_dev(hdev); 1111 hci_free_dev(hdev);
1117 kfree(data);
1118} 1112}
1119 1113
1120#ifdef CONFIG_PM 1114#ifdef CONFIG_PM
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 88694697f34f..60abf596f60e 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -297,16 +297,14 @@ static int bt_ti_probe(struct platform_device *pdev)
297 struct hci_dev *hdev; 297 struct hci_dev *hdev;
298 int err; 298 int err;
299 299
300 hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL); 300 hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
301 if (!hst) 301 if (!hst)
302 return -ENOMEM; 302 return -ENOMEM;
303 303
304 /* Expose "hciX" device to user space */ 304 /* Expose "hciX" device to user space */
305 hdev = hci_alloc_dev(); 305 hdev = hci_alloc_dev();
306 if (!hdev) { 306 if (!hdev)
307 kfree(hst);
308 return -ENOMEM; 307 return -ENOMEM;
309 }
310 308
311 BT_DBG("hdev %p", hdev); 309 BT_DBG("hdev %p", hdev);
312 310
@@ -321,7 +319,6 @@ static int bt_ti_probe(struct platform_device *pdev)
321 err = hci_register_dev(hdev); 319 err = hci_register_dev(hdev);
322 if (err < 0) { 320 if (err < 0) {
323 BT_ERR("Can't register HCI device error %d", err); 321 BT_ERR("Can't register HCI device error %d", err);
324 kfree(hst);
325 hci_free_dev(hdev); 322 hci_free_dev(hdev);
326 return err; 323 return err;
327 } 324 }
@@ -347,7 +344,6 @@ static int bt_ti_remove(struct platform_device *pdev)
347 hci_unregister_dev(hdev); 344 hci_unregister_dev(hdev);
348 345
349 hci_free_dev(hdev); 346 hci_free_dev(hdev);
350 kfree(hst);
351 347
352 dev_set_drvdata(&pdev->dev, NULL); 348 dev_set_drvdata(&pdev->dev, NULL);
353 return 0; 349 return 0;
@@ -362,21 +358,7 @@ static struct platform_driver btwilink_driver = {
362 }, 358 },
363}; 359};
364 360
365/* ------- Module Init/Exit interfaces ------ */ 361module_platform_driver(btwilink_driver);
366static int __init btwilink_init(void)
367{
368 BT_INFO("Bluetooth Driver for TI WiLink - Version %s", VERSION);
369
370 return platform_driver_register(&btwilink_driver);
371}
372
373static void __exit btwilink_exit(void)
374{
375 platform_driver_unregister(&btwilink_driver);
376}
377
378module_init(btwilink_init);
379module_exit(btwilink_exit);
380 362
381/* ------ Module Info ------ */ 363/* ------ Module Info ------ */
382 364
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 97a7784db4a2..036cb366fe6e 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -550,7 +550,7 @@ static int dtl1_probe(struct pcmcia_device *link)
550 dtl1_info_t *info; 550 dtl1_info_t *info;
551 551
552 /* Create new info device */ 552 /* Create new info device */
553 info = kzalloc(sizeof(*info), GFP_KERNEL); 553 info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
554 if (!info) 554 if (!info)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
@@ -569,7 +569,6 @@ static void dtl1_detach(struct pcmcia_device *link)
569 569
570 dtl1_close(info); 570 dtl1_close(info);
571 pcmcia_disable_device(link); 571 pcmcia_disable_device(link);
572 kfree(info);
573} 572}
574 573
575static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) 574static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 74e0966b3ead..c8abce3d2d9c 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -531,7 +531,7 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
531 default: 531 default:
532 err = n_tty_ioctl_helper(tty, file, cmd, arg); 532 err = n_tty_ioctl_helper(tty, file, cmd, arg);
533 break; 533 break;
534 }; 534 }
535 535
536 return err; 536 return err;
537} 537}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index ff6d589c34a5..cfc767938589 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -481,7 +481,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
481 hu->hdev->stat.err_rx++; 481 hu->hdev->stat.err_rx++;
482 ptr++; count--; 482 ptr++; count--;
483 continue; 483 continue;
484 }; 484 }
485 485
486 ptr++; count--; 486 ptr++; count--;
487 487
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 3f72595a6017..d8b7aed6e4a9 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -156,7 +156,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
156 case HCI_SCODATA_PKT: 156 case HCI_SCODATA_PKT:
157 data->hdev->stat.sco_tx++; 157 data->hdev->stat.sco_tx++;
158 break; 158 break;
159 }; 159 }
160 160
161 return total; 161 return total;
162} 162}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 82fa4f0f91d6..965b7811e04f 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -264,8 +264,7 @@ static int __devinit cn_init(void)
264 .input = dev->input, 264 .input = dev->input,
265 }; 265 };
266 266
267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 267 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
268 THIS_MODULE, &cfg);
269 if (!dev->nls) 268 if (!dev->nls)
270 return -EIO; 269 return -EIO;
271 270
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 3ae2bfd31015..fe10a949aef9 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -177,7 +177,7 @@ int __init ibnl_init(void)
177 .input = ibnl_rcv, 177 .input = ibnl_rcv,
178 }; 178 };
179 179
180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg); 180 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
181 if (!nls) { 181 if (!nls) {
182 pr_warn("Failed to create netlink socket\n"); 182 pr_warn("Failed to create netlink socket\n");
183 return -ENOMEM; 183 return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 45aedf1d9338..5213bab2d19b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1155,7 +1155,7 @@ static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1155 */ 1155 */
1156 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) < 1156 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1157 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) { 1157 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
1158 writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db); 1158 writel(QID(qid) | PIDX(inc), qhp->wq.db);
1159 break; 1159 break;
1160 } 1160 }
1161 set_current_state(TASK_UNINTERRUPTIBLE); 1161 set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 3090100f0de7..e5430dd50764 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -5,7 +5,8 @@ ib_ipoib-y := ipoib_main.o \
5 ipoib_multicast.o \ 5 ipoib_multicast.o \
6 ipoib_verbs.o \ 6 ipoib_verbs.o \
7 ipoib_vlan.o \ 7 ipoib_vlan.o \
8 ipoib_ethtool.o 8 ipoib_ethtool.o \
9 ipoib_netlink.o
9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o 10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o 11ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
11 12
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0af216d21f87..196eb52f0035 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -104,6 +104,10 @@ enum {
104 104
105 MAX_SEND_CQE = 16, 105 MAX_SEND_CQE = 16,
106 IPOIB_CM_COPYBREAK = 256, 106 IPOIB_CM_COPYBREAK = 256,
107
108 IPOIB_NON_CHILD = 0,
109 IPOIB_LEGACY_CHILD = 1,
110 IPOIB_RTNL_CHILD = 2,
107}; 111};
108 112
109#define IPOIB_OP_RECV (1ul << 31) 113#define IPOIB_OP_RECV (1ul << 31)
@@ -353,6 +357,7 @@ struct ipoib_dev_priv {
353 struct net_device *parent; 357 struct net_device *parent;
354 struct list_head child_intfs; 358 struct list_head child_intfs;
355 struct list_head list; 359 struct list_head list;
360 int child_type;
356 361
357#ifdef CONFIG_INFINIBAND_IPOIB_CM 362#ifdef CONFIG_INFINIBAND_IPOIB_CM
358 struct ipoib_cm_dev_priv cm; 363 struct ipoib_cm_dev_priv cm;
@@ -512,6 +517,17 @@ void ipoib_event(struct ib_event_handler *handler,
512int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); 517int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
513int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); 518int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
514 519
520int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
521 u16 pkey, int child_type);
522
523int __init ipoib_netlink_init(void);
524void __exit ipoib_netlink_fini(void);
525
526void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
527int ipoib_set_mode(struct net_device *dev, const char *buf);
528
529void ipoib_setup(struct net_device *dev);
530
515void ipoib_pkey_poll(struct work_struct *work); 531void ipoib_pkey_poll(struct work_struct *work);
516int ipoib_pkey_dev_delay_open(struct net_device *dev); 532int ipoib_pkey_dev_delay_open(struct net_device *dev);
517void ipoib_drain_cq(struct net_device *dev); 533void ipoib_drain_cq(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 24683fda8e21..175581cf478c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1448,15 +1448,10 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1448 return sprintf(buf, "datagram\n"); 1448 return sprintf(buf, "datagram\n");
1449} 1449}
1450 1450
1451static ssize_t set_mode(struct device *d, struct device_attribute *attr, 1451int ipoib_set_mode(struct net_device *dev, const char *buf)
1452 const char *buf, size_t count)
1453{ 1452{
1454 struct net_device *dev = to_net_dev(d);
1455 struct ipoib_dev_priv *priv = netdev_priv(dev); 1453 struct ipoib_dev_priv *priv = netdev_priv(dev);
1456 1454
1457 if (!rtnl_trylock())
1458 return restart_syscall();
1459
1460 /* flush paths if we switch modes so that connections are restarted */ 1455 /* flush paths if we switch modes so that connections are restarted */
1461 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 1456 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1462 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1457 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
@@ -1467,7 +1462,8 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1467 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 1462 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1468 1463
1469 ipoib_flush_paths(dev); 1464 ipoib_flush_paths(dev);
1470 return count; 1465 rtnl_lock();
1466 return 0;
1471 } 1467 }
1472 1468
1473 if (!strcmp(buf, "datagram\n")) { 1469 if (!strcmp(buf, "datagram\n")) {
@@ -1476,14 +1472,32 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1476 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 1472 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1477 rtnl_unlock(); 1473 rtnl_unlock();
1478 ipoib_flush_paths(dev); 1474 ipoib_flush_paths(dev);
1479 1475 rtnl_lock();
1480 return count; 1476 return 0;
1481 } 1477 }
1482 rtnl_unlock();
1483 1478
1484 return -EINVAL; 1479 return -EINVAL;
1485} 1480}
1486 1481
1482static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1483 const char *buf, size_t count)
1484{
1485 struct net_device *dev = to_net_dev(d);
1486 int ret;
1487
1488 if (!rtnl_trylock())
1489 return restart_syscall();
1490
1491 ret = ipoib_set_mode(dev, buf);
1492
1493 rtnl_unlock();
1494
1495 if (!ret)
1496 return count;
1497
1498 return ret;
1499}
1500
1487static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); 1501static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1488 1502
1489int ipoib_cm_add_mode_attr(struct net_device *dev) 1503int ipoib_cm_add_mode_attr(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1e19b5ae7c47..3f9a9ba2f9ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -173,6 +173,11 @@ static int ipoib_stop(struct net_device *dev)
173 return 0; 173 return 0;
174} 174}
175 175
176static void ipoib_uninit(struct net_device *dev)
177{
178 ipoib_dev_cleanup(dev);
179}
180
176static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 181static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
177{ 182{
178 struct ipoib_dev_priv *priv = netdev_priv(dev); 183 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1257,6 +1262,9 @@ out:
1257void ipoib_dev_cleanup(struct net_device *dev) 1262void ipoib_dev_cleanup(struct net_device *dev)
1258{ 1263{
1259 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1264 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
1265 LIST_HEAD(head);
1266
1267 ASSERT_RTNL();
1260 1268
1261 ipoib_delete_debug_files(dev); 1269 ipoib_delete_debug_files(dev);
1262 1270
@@ -1265,10 +1273,9 @@ void ipoib_dev_cleanup(struct net_device *dev)
1265 /* Stop GC on child */ 1273 /* Stop GC on child */
1266 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1274 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
1267 cancel_delayed_work(&cpriv->neigh_reap_task); 1275 cancel_delayed_work(&cpriv->neigh_reap_task);
1268 unregister_netdev(cpriv->dev); 1276 unregister_netdevice_queue(cpriv->dev, &head);
1269 ipoib_dev_cleanup(cpriv->dev);
1270 free_netdev(cpriv->dev);
1271 } 1277 }
1278 unregister_netdevice_many(&head);
1272 1279
1273 ipoib_ib_dev_cleanup(dev); 1280 ipoib_ib_dev_cleanup(dev);
1274 1281
@@ -1286,6 +1293,7 @@ static const struct header_ops ipoib_header_ops = {
1286}; 1293};
1287 1294
1288static const struct net_device_ops ipoib_netdev_ops = { 1295static const struct net_device_ops ipoib_netdev_ops = {
1296 .ndo_uninit = ipoib_uninit,
1289 .ndo_open = ipoib_open, 1297 .ndo_open = ipoib_open,
1290 .ndo_stop = ipoib_stop, 1298 .ndo_stop = ipoib_stop,
1291 .ndo_change_mtu = ipoib_change_mtu, 1299 .ndo_change_mtu = ipoib_change_mtu,
@@ -1295,7 +1303,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
1295 .ndo_set_rx_mode = ipoib_set_mcast_list, 1303 .ndo_set_rx_mode = ipoib_set_mcast_list,
1296}; 1304};
1297 1305
1298static void ipoib_setup(struct net_device *dev) 1306void ipoib_setup(struct net_device *dev)
1299{ 1307{
1300 struct ipoib_dev_priv *priv = netdev_priv(dev); 1308 struct ipoib_dev_priv *priv = netdev_priv(dev);
1301 1309
@@ -1373,12 +1381,9 @@ static ssize_t show_umcast(struct device *dev,
1373 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1381 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1374} 1382}
1375 1383
1376static ssize_t set_umcast(struct device *dev, 1384void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
1377 struct device_attribute *attr,
1378 const char *buf, size_t count)
1379{ 1385{
1380 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1386 struct ipoib_dev_priv *priv = netdev_priv(ndev);
1381 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1382 1387
1383 if (umcast_val > 0) { 1388 if (umcast_val > 0) {
1384 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1389 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
@@ -1386,6 +1391,15 @@ static ssize_t set_umcast(struct device *dev,
1386 "by userspace\n"); 1391 "by userspace\n");
1387 } else 1392 } else
1388 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1393 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1394}
1395
1396static ssize_t set_umcast(struct device *dev,
1397 struct device_attribute *attr,
1398 const char *buf, size_t count)
1399{
1400 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1401
1402 ipoib_set_umcast(to_net_dev(dev), umcast_val);
1389 1403
1390 return count; 1404 return count;
1391} 1405}
@@ -1657,7 +1671,6 @@ static void ipoib_remove_one(struct ib_device *device)
1657 flush_workqueue(ipoib_workqueue); 1671 flush_workqueue(ipoib_workqueue);
1658 1672
1659 unregister_netdev(priv->dev); 1673 unregister_netdev(priv->dev);
1660 ipoib_dev_cleanup(priv->dev);
1661 free_netdev(priv->dev); 1674 free_netdev(priv->dev);
1662 } 1675 }
1663 1676
@@ -1709,8 +1722,15 @@ static int __init ipoib_init_module(void)
1709 if (ret) 1722 if (ret)
1710 goto err_sa; 1723 goto err_sa;
1711 1724
1725 ret = ipoib_netlink_init();
1726 if (ret)
1727 goto err_client;
1728
1712 return 0; 1729 return 0;
1713 1730
1731err_client:
1732 ib_unregister_client(&ipoib_client);
1733
1714err_sa: 1734err_sa:
1715 ib_sa_unregister_client(&ipoib_sa_client); 1735 ib_sa_unregister_client(&ipoib_sa_client);
1716 destroy_workqueue(ipoib_workqueue); 1736 destroy_workqueue(ipoib_workqueue);
@@ -1723,6 +1743,7 @@ err_fs:
1723 1743
1724static void __exit ipoib_cleanup_module(void) 1744static void __exit ipoib_cleanup_module(void)
1725{ 1745{
1746 ipoib_netlink_fini();
1726 ib_unregister_client(&ipoib_client); 1747 ib_unregister_client(&ipoib_client);
1727 ib_sa_unregister_client(&ipoib_sa_client); 1748 ib_sa_unregister_client(&ipoib_sa_client);
1728 ipoib_unregister_debugfs(); 1749 ipoib_unregister_debugfs();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644
index 000000000000..74685936c948
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/netdevice.h>
34#include <linux/module.h>
35#include <net/rtnetlink.h>
36#include "ipoib.h"
37
38static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
39 [IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
40 [IFLA_IPOIB_MODE] = { .type = NLA_U16 },
41 [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
42};
43
44static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
45{
46 struct ipoib_dev_priv *priv = netdev_priv(dev);
47 u16 val;
48
49 if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
50 goto nla_put_failure;
51
52 val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
53 if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
54 goto nla_put_failure;
55
56 val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
57 if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
58 goto nla_put_failure;
59
60 return 0;
61
62nla_put_failure:
63 return -EMSGSIZE;
64}
65
66static int ipoib_changelink(struct net_device *dev,
67 struct nlattr *tb[], struct nlattr *data[])
68{
69 u16 mode, umcast;
70 int ret = 0;
71
72 if (data[IFLA_IPOIB_MODE]) {
73 mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
74 if (mode == IPOIB_MODE_DATAGRAM)
75 ret = ipoib_set_mode(dev, "datagram\n");
76 else if (mode == IPOIB_MODE_CONNECTED)
77 ret = ipoib_set_mode(dev, "connected\n");
78 else
79 ret = -EINVAL;
80
81 if (ret < 0)
82 goto out_err;
83 }
84
85 if (data[IFLA_IPOIB_UMCAST]) {
86 umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
87 ipoib_set_umcast(dev, umcast);
88 }
89
90out_err:
91 return ret;
92}
93
94static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
95 struct nlattr *tb[], struct nlattr *data[])
96{
97 struct net_device *pdev;
98 struct ipoib_dev_priv *ppriv;
99 u16 child_pkey;
100 int err;
101
102 if (!tb[IFLA_LINK])
103 return -EINVAL;
104
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev)
107 return -ENODEV;
108
109 ppriv = netdev_priv(pdev);
110
111 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
112 ipoib_warn(ppriv, "child creation disallowed for child devices\n");
113 return -EINVAL;
114 }
115
116 if (!data || !data[IFLA_IPOIB_PKEY]) {
117 ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
118 child_pkey = ppriv->pkey;
119 } else
120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
121
122 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
123
124 if (!err && data)
125 err = ipoib_changelink(dev, tb, data);
126 return err;
127}
128
129static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
130{
131 struct ipoib_dev_priv *priv, *ppriv;
132
133 priv = netdev_priv(dev);
134 ppriv = netdev_priv(priv->parent);
135
136 mutex_lock(&ppriv->vlan_mutex);
137 unregister_netdevice_queue(dev, head);
138 list_del(&priv->list);
139 mutex_unlock(&ppriv->vlan_mutex);
140}
141
142static size_t ipoib_get_size(const struct net_device *dev)
143{
144 return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
145 nla_total_size(2) + /* IFLA_IPOIB_MODE */
146 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
147}
148
149static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
150 .kind = "ipoib",
151 .maxtype = IFLA_IPOIB_MAX,
152 .policy = ipoib_policy,
153 .priv_size = sizeof(struct ipoib_dev_priv),
154 .setup = ipoib_setup,
155 .newlink = ipoib_new_child_link,
156 .changelink = ipoib_changelink,
157 .dellink = ipoib_unregister_child_dev,
158 .get_size = ipoib_get_size,
159 .fill_info = ipoib_fill_info,
160};
161
162int __init ipoib_netlink_init(void)
163{
164 return rtnl_link_register(&ipoib_link_ops);
165}
166
167void __exit ipoib_netlink_fini(void)
168{
169 rtnl_link_unregister(&ipoib_link_ops);
170}
171
172MODULE_ALIAS_RTNL_LINK("ipoib");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index d7e9740c7248..8292554bccb5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -49,47 +49,11 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
49} 49}
50static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); 50static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
51 51
52int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) 52int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
53 u16 pkey, int type)
53{ 54{
54 struct ipoib_dev_priv *ppriv, *priv;
55 char intf_name[IFNAMSIZ];
56 int result; 55 int result;
57 56
58 if (!capable(CAP_NET_ADMIN))
59 return -EPERM;
60
61 ppriv = netdev_priv(pdev);
62
63 if (!rtnl_trylock())
64 return restart_syscall();
65 mutex_lock(&ppriv->vlan_mutex);
66
67 /*
68 * First ensure this isn't a duplicate. We check the parent device and
69 * then all of the child interfaces to make sure the Pkey doesn't match.
70 */
71 if (ppriv->pkey == pkey) {
72 result = -ENOTUNIQ;
73 priv = NULL;
74 goto err;
75 }
76
77 list_for_each_entry(priv, &ppriv->child_intfs, list) {
78 if (priv->pkey == pkey) {
79 result = -ENOTUNIQ;
80 priv = NULL;
81 goto err;
82 }
83 }
84
85 snprintf(intf_name, sizeof intf_name, "%s.%04x",
86 ppriv->dev->name, pkey);
87 priv = ipoib_intf_alloc(intf_name);
88 if (!priv) {
89 result = -ENOMEM;
90 goto err;
91 }
92
93 priv->max_ib_mtu = ppriv->max_ib_mtu; 57 priv->max_ib_mtu = ppriv->max_ib_mtu;
94 /* MTU will be reset when mcast join happens */ 58 /* MTU will be reset when mcast join happens */
95 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 59 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
@@ -124,24 +88,27 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
124 88
125 ipoib_create_debug_files(priv->dev); 89 ipoib_create_debug_files(priv->dev);
126 90
127 if (ipoib_cm_add_mode_attr(priv->dev)) 91 /* RTNL childs don't need proprietary sysfs entries */
128 goto sysfs_failed; 92 if (type == IPOIB_LEGACY_CHILD) {
129 if (ipoib_add_pkey_attr(priv->dev)) 93 if (ipoib_cm_add_mode_attr(priv->dev))
130 goto sysfs_failed; 94 goto sysfs_failed;
131 if (ipoib_add_umcast_attr(priv->dev)) 95 if (ipoib_add_pkey_attr(priv->dev))
132 goto sysfs_failed; 96 goto sysfs_failed;
133 97 if (ipoib_add_umcast_attr(priv->dev))
134 if (device_create_file(&priv->dev->dev, &dev_attr_parent)) 98 goto sysfs_failed;
135 goto sysfs_failed; 99
100 if (device_create_file(&priv->dev->dev, &dev_attr_parent))
101 goto sysfs_failed;
102 }
136 103
104 priv->child_type = type;
105 priv->dev->iflink = ppriv->dev->ifindex;
137 list_add_tail(&priv->list, &ppriv->child_intfs); 106 list_add_tail(&priv->list, &ppriv->child_intfs);
138 107
139 mutex_unlock(&ppriv->vlan_mutex);
140 rtnl_unlock();
141
142 return 0; 108 return 0;
143 109
144sysfs_failed: 110sysfs_failed:
111 result = -ENOMEM;
145 ipoib_delete_debug_files(priv->dev); 112 ipoib_delete_debug_files(priv->dev);
146 unregister_netdevice(priv->dev); 113 unregister_netdevice(priv->dev);
147 114
@@ -149,11 +116,60 @@ register_failed:
149 ipoib_dev_cleanup(priv->dev); 116 ipoib_dev_cleanup(priv->dev);
150 117
151err: 118err:
119 return result;
120}
121
122int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
123{
124 struct ipoib_dev_priv *ppriv, *priv;
125 char intf_name[IFNAMSIZ];
126 struct ipoib_dev_priv *tpriv;
127 int result;
128
129 if (!capable(CAP_NET_ADMIN))
130 return -EPERM;
131
132 ppriv = netdev_priv(pdev);
133
134 snprintf(intf_name, sizeof intf_name, "%s.%04x",
135 ppriv->dev->name, pkey);
136 priv = ipoib_intf_alloc(intf_name);
137 if (!priv)
138 return -ENOMEM;
139
140 if (!rtnl_trylock())
141 return restart_syscall();
142
143 mutex_lock(&ppriv->vlan_mutex);
144
145 /*
146 * First ensure this isn't a duplicate. We check the parent device and
147 * then all of the legacy child interfaces to make sure the Pkey
148 * doesn't match.
149 */
150 if (ppriv->pkey == pkey) {
151 result = -ENOTUNIQ;
152 goto out;
153 }
154
155 list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
156 if (tpriv->pkey == pkey &&
157 tpriv->child_type == IPOIB_LEGACY_CHILD) {
158 result = -ENOTUNIQ;
159 goto out;
160 }
161 }
162
163 result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
164
165out:
152 mutex_unlock(&ppriv->vlan_mutex); 166 mutex_unlock(&ppriv->vlan_mutex);
153 rtnl_unlock(); 167
154 if (priv) 168 if (result)
155 free_netdev(priv->dev); 169 free_netdev(priv->dev);
156 170
171 rtnl_unlock();
172
157 return result; 173 return result;
158} 174}
159 175
@@ -171,9 +187,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
171 return restart_syscall(); 187 return restart_syscall();
172 mutex_lock(&ppriv->vlan_mutex); 188 mutex_lock(&ppriv->vlan_mutex);
173 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 189 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
174 if (priv->pkey == pkey) { 190 if (priv->pkey == pkey &&
191 priv->child_type == IPOIB_LEGACY_CHILD) {
175 unregister_netdevice(priv->dev); 192 unregister_netdevice(priv->dev);
176 ipoib_dev_cleanup(priv->dev);
177 list_del(&priv->list); 193 list_del(&priv->list);
178 dev = priv->dev; 194 dev = priv->dev;
179 break; 195 break;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index aa41485bc594..30a6b174fbb0 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -1123,7 +1123,6 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
1123 return drv; 1123 return drv;
1124 1124
1125error: 1125error:
1126 kfree(drv->cs);
1127 kfree(drv); 1126 kfree(drv);
1128 return NULL; 1127 return NULL;
1129} 1128}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c2bd806950e..6a70184c3f23 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -107,8 +107,6 @@ config MII
107 or internal device. It is safe to say Y or M here even if your 107 or internal device. It is safe to say Y or M here even if your
108 ethernet card lacks MII. 108 ethernet card lacks MII.
109 109
110source "drivers/ieee802154/Kconfig"
111
112config IFB 110config IFB
113 tristate "Intermediate Functional Block support" 111 tristate "Intermediate Functional Block support"
114 depends on NET_CLS_ACT 112 depends on NET_CLS_ACT
@@ -151,6 +149,19 @@ config MACVTAP
151 To compile this driver as a module, choose M here: the module 149 To compile this driver as a module, choose M here: the module
152 will be called macvtap. 150 will be called macvtap.
153 151
152config VXLAN
153 tristate "Virtual eXtensible Local Area Network (VXLAN)"
154 depends on EXPERIMENTAL && INET
155 ---help---
156 This allows one to create vxlan virtual interfaces that provide
157 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
158 to tunnel virtual network infrastructure in virtualized environments.
159 For more information see:
160 http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-02
161
162 To compile this driver as a module, choose M here: the module
163 will be called vxlan.
164
154config NETCONSOLE 165config NETCONSOLE
155 tristate "Network console logging support" 166 tristate "Network console logging support"
156 ---help--- 167 ---help---
@@ -290,6 +301,8 @@ source "drivers/net/wimax/Kconfig"
290 301
291source "drivers/net/wan/Kconfig" 302source "drivers/net/wan/Kconfig"
292 303
304source "drivers/net/ieee802154/Kconfig"
305
293config XEN_NETDEV_FRONTEND 306config XEN_NETDEV_FRONTEND
294 tristate "Xen network device frontend driver" 307 tristate "Xen network device frontend driver"
295 depends on XEN 308 depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3d375ca128a6..335db78fd987 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_TEAM) += team/
21obj-$(CONFIG_TUN) += tun.o 21obj-$(CONFIG_TUN) += tun.o
22obj-$(CONFIG_VETH) += veth.o 22obj-$(CONFIG_VETH) += veth.o
23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 23obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
24obj-$(CONFIG_VXLAN) += vxlan.o
24 25
25# 26#
26# Networking Drivers 27# Networking Drivers
@@ -53,6 +54,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
53obj-$(CONFIG_WAN) += wan/ 54obj-$(CONFIG_WAN) += wan/
54obj-$(CONFIG_WLAN) += wireless/ 55obj-$(CONFIG_WLAN) += wireless/
55obj-$(CONFIG_WIMAX) += wimax/ 56obj-$(CONFIG_WIMAX) += wimax/
57obj-$(CONFIG_IEEE802154) += ieee802154/
56 58
57obj-$(CONFIG_VMXNET3) += vmxnet3/ 59obj-$(CONFIG_VMXNET3) += vmxnet3/
58obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o 60obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d688a8af432c..7858c58df4a3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1120 write_unlock_bh(&bond->curr_slave_lock); 1120 write_unlock_bh(&bond->curr_slave_lock);
1121 read_unlock(&bond->lock); 1121 read_unlock(&bond->lock);
1122 1122
1123 netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); 1123 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1124 if (should_notify_peers) 1124 if (should_notify_peers)
1125 netdev_bonding_change(bond->dev, 1125 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1126 NETDEV_NOTIFY_PEERS); 1126 bond->dev);
1127 1127
1128 read_lock(&bond->lock); 1128 read_lock(&bond->lock);
1129 write_lock_bh(&bond->curr_slave_lock); 1129 write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1558 bond_dev->name, 1558 bond_dev->name,
1559 bond_dev->type, slave_dev->type); 1559 bond_dev->type, slave_dev->type);
1560 1560
1561 res = netdev_bonding_change(bond_dev, 1561 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1562 NETDEV_PRE_TYPE_CHANGE); 1562 bond_dev);
1563 res = notifier_to_errno(res); 1563 res = notifier_to_errno(res);
1564 if (res) { 1564 if (res) {
1565 pr_err("%s: refused to change device type\n", 1565 pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1579 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1579 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1580 } 1580 }
1581 1581
1582 netdev_bonding_change(bond_dev, 1582 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1583 NETDEV_POST_TYPE_CHANGE); 1583 bond_dev);
1584 } 1584 }
1585 } else if (bond_dev->type != slave_dev->type) { 1585 } else if (bond_dev->type != slave_dev->type) {
1586 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", 1586 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1941 } 1941 }
1942 1942
1943 block_netpoll_tx(); 1943 block_netpoll_tx();
1944 netdev_bonding_change(bond_dev, NETDEV_RELEASE); 1944 call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
1945 write_lock_bh(&bond->lock); 1945 write_lock_bh(&bond->lock);
1946 1946
1947 slave = bond_get_slave_by_dev(bond, slave_dev); 1947 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
2584 read_unlock(&bond->lock); 2584 read_unlock(&bond->lock);
2585 return; 2585 return;
2586 } 2586 }
2587 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); 2587 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2588 rtnl_unlock(); 2588 rtnl_unlock();
2589 } 2589 }
2590} 2590}
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2811 arp_work.work); 2811 arp_work.work);
2812 struct slave *slave, *oldcurrent; 2812 struct slave *slave, *oldcurrent;
2813 int do_failover = 0; 2813 int do_failover = 0;
2814 int delta_in_ticks; 2814 int delta_in_ticks, extra_ticks;
2815 int i; 2815 int i;
2816 2816
2817 read_lock(&bond->lock); 2817 read_lock(&bond->lock);
2818 2818
2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2820 extra_ticks = delta_in_ticks / 2;
2820 2821
2821 if (bond->slave_cnt == 0) 2822 if (bond->slave_cnt == 0)
2822 goto re_arm; 2823 goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2839 if (slave->link != BOND_LINK_UP) { 2840 if (slave->link != BOND_LINK_UP) {
2840 if (time_in_range(jiffies, 2841 if (time_in_range(jiffies,
2841 trans_start - delta_in_ticks, 2842 trans_start - delta_in_ticks,
2842 trans_start + delta_in_ticks) && 2843 trans_start + delta_in_ticks + extra_ticks) &&
2843 time_in_range(jiffies, 2844 time_in_range(jiffies,
2844 slave->dev->last_rx - delta_in_ticks, 2845 slave->dev->last_rx - delta_in_ticks,
2845 slave->dev->last_rx + delta_in_ticks)) { 2846 slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
2846 2847
2847 slave->link = BOND_LINK_UP; 2848 slave->link = BOND_LINK_UP;
2848 bond_set_active_slave(slave); 2849 bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2872 */ 2873 */
2873 if (!time_in_range(jiffies, 2874 if (!time_in_range(jiffies,
2874 trans_start - delta_in_ticks, 2875 trans_start - delta_in_ticks,
2875 trans_start + 2 * delta_in_ticks) || 2876 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2876 !time_in_range(jiffies, 2877 !time_in_range(jiffies,
2877 slave->dev->last_rx - delta_in_ticks, 2878 slave->dev->last_rx - delta_in_ticks,
2878 slave->dev->last_rx + 2 * delta_in_ticks)) { 2879 slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
2879 2880
2880 slave->link = BOND_LINK_DOWN; 2881 slave->link = BOND_LINK_DOWN;
2881 bond_set_backup_slave(slave); 2882 bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 struct slave *slave; 2934 struct slave *slave;
2934 int i, commit = 0; 2935 int i, commit = 0;
2935 unsigned long trans_start; 2936 unsigned long trans_start;
2937 int extra_ticks;
2938
2939 /* All the time comparisons below need some extra time. Otherwise, on
2940 * fast networks the ARP probe/reply may arrive within the same jiffy
2941 * as it was sent. Then, the next time the ARP monitor is run, one
2942 * arp_interval will already have passed in the comparisons.
2943 */
2944 extra_ticks = delta_in_ticks / 2;
2936 2945
2937 bond_for_each_slave(bond, slave, i) { 2946 bond_for_each_slave(bond, slave, i) {
2938 slave->new_link = BOND_LINK_NOCHANGE; 2947 slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2940 if (slave->link != BOND_LINK_UP) { 2949 if (slave->link != BOND_LINK_UP) {
2941 if (time_in_range(jiffies, 2950 if (time_in_range(jiffies,
2942 slave_last_rx(bond, slave) - delta_in_ticks, 2951 slave_last_rx(bond, slave) - delta_in_ticks,
2943 slave_last_rx(bond, slave) + delta_in_ticks)) { 2952 slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
2944 2953
2945 slave->new_link = BOND_LINK_UP; 2954 slave->new_link = BOND_LINK_UP;
2946 commit++; 2955 commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2956 */ 2965 */
2957 if (time_in_range(jiffies, 2966 if (time_in_range(jiffies,
2958 slave->jiffies - delta_in_ticks, 2967 slave->jiffies - delta_in_ticks,
2959 slave->jiffies + 2 * delta_in_ticks)) 2968 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
2960 continue; 2969 continue;
2961 2970
2962 /* 2971 /*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2976 !bond->current_arp_slave && 2985 !bond->current_arp_slave &&
2977 !time_in_range(jiffies, 2986 !time_in_range(jiffies,
2978 slave_last_rx(bond, slave) - delta_in_ticks, 2987 slave_last_rx(bond, slave) - delta_in_ticks,
2979 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { 2988 slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
2980 2989
2981 slave->new_link = BOND_LINK_DOWN; 2990 slave->new_link = BOND_LINK_DOWN;
2982 commit++; 2991 commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2992 if (bond_is_active_slave(slave) && 3001 if (bond_is_active_slave(slave) &&
2993 (!time_in_range(jiffies, 3002 (!time_in_range(jiffies,
2994 trans_start - delta_in_ticks, 3003 trans_start - delta_in_ticks,
2995 trans_start + 2 * delta_in_ticks) || 3004 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2996 !time_in_range(jiffies, 3005 !time_in_range(jiffies,
2997 slave_last_rx(bond, slave) - delta_in_ticks, 3006 slave_last_rx(bond, slave) - delta_in_ticks,
2998 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { 3007 slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
2999 3008
3000 slave->new_link = BOND_LINK_DOWN; 3009 slave->new_link = BOND_LINK_DOWN;
3001 commit++; 3010 commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3027 if ((!bond->curr_active_slave && 3036 if ((!bond->curr_active_slave &&
3028 time_in_range(jiffies, 3037 time_in_range(jiffies,
3029 trans_start - delta_in_ticks, 3038 trans_start - delta_in_ticks,
3030 trans_start + delta_in_ticks)) || 3039 trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
3031 bond->curr_active_slave != slave) { 3040 bond->curr_active_slave != slave) {
3032 slave->link = BOND_LINK_UP; 3041 slave->link = BOND_LINK_UP;
3033 if (bond->current_arp_slave) { 3042 if (bond->current_arp_slave) {
@@ -3203,7 +3212,7 @@ re_arm:
3203 read_unlock(&bond->lock); 3212 read_unlock(&bond->lock);
3204 return; 3213 return;
3205 } 3214 }
3206 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); 3215 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
3207 rtnl_unlock(); 3216 rtnl_unlock();
3208 } 3217 }
3209} 3218}
@@ -3352,56 +3361,93 @@ static struct notifier_block bond_netdev_notifier = {
3352/*---------------------------- Hashing Policies -----------------------------*/ 3361/*---------------------------- Hashing Policies -----------------------------*/
3353 3362
3354/* 3363/*
3364 * Hash for the output device based upon layer 2 data
3365 */
3366static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3367{
3368 struct ethhdr *data = (struct ethhdr *)skb->data;
3369
3370 if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
3371 return (data->h_dest[5] ^ data->h_source[5]) % count;
3372
3373 return 0;
3374}
3375
3376/*
3355 * Hash for the output device based upon layer 2 and layer 3 data. If 3377 * Hash for the output device based upon layer 2 and layer 3 data. If
3356 * the packet is not IP mimic bond_xmit_hash_policy_l2() 3378 * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
3357 */ 3379 */
3358static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) 3380static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3359{ 3381{
3360 struct ethhdr *data = (struct ethhdr *)skb->data; 3382 struct ethhdr *data = (struct ethhdr *)skb->data;
3361 struct iphdr *iph = ip_hdr(skb); 3383 struct iphdr *iph;
3362 3384 struct ipv6hdr *ipv6h;
3363 if (skb->protocol == htons(ETH_P_IP)) { 3385 u32 v6hash;
3386 __be32 *s, *d;
3387
3388 if (skb->protocol == htons(ETH_P_IP) &&
3389 skb_network_header_len(skb) >= sizeof(*iph)) {
3390 iph = ip_hdr(skb);
3364 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3391 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3365 (data->h_dest[5] ^ data->h_source[5])) % count; 3392 (data->h_dest[5] ^ data->h_source[5])) % count;
3393 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3394 skb_network_header_len(skb) >= sizeof(*ipv6h)) {
3395 ipv6h = ipv6_hdr(skb);
3396 s = &ipv6h->saddr.s6_addr32[0];
3397 d = &ipv6h->daddr.s6_addr32[0];
3398 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3399 v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
3400 return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
3366 } 3401 }
3367 3402
3368 return (data->h_dest[5] ^ data->h_source[5]) % count; 3403 return bond_xmit_hash_policy_l2(skb, count);
3369} 3404}
3370 3405
3371/* 3406/*
3372 * Hash for the output device based upon layer 3 and layer 4 data. If 3407 * Hash for the output device based upon layer 3 and layer 4 data. If
3373 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3408 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3374 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3409 * altogether not IP, fall back on bond_xmit_hash_policy_l2()
3375 */ 3410 */
3376static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) 3411static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3377{ 3412{
3378 struct ethhdr *data = (struct ethhdr *)skb->data; 3413 u32 layer4_xor = 0;
3379 struct iphdr *iph = ip_hdr(skb); 3414 struct iphdr *iph;
3380 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); 3415 struct ipv6hdr *ipv6h;
3381 int layer4_xor = 0; 3416 __be32 *s, *d;
3382 3417 __be16 *layer4hdr;
3383 if (skb->protocol == htons(ETH_P_IP)) { 3418
3419 if (skb->protocol == htons(ETH_P_IP) &&
3420 skb_network_header_len(skb) >= sizeof(*iph)) {
3421 iph = ip_hdr(skb);
3384 if (!ip_is_fragment(iph) && 3422 if (!ip_is_fragment(iph) &&
3385 (iph->protocol == IPPROTO_TCP || 3423 (iph->protocol == IPPROTO_TCP ||
3386 iph->protocol == IPPROTO_UDP)) { 3424 iph->protocol == IPPROTO_UDP) &&
3387 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); 3425 (skb_headlen(skb) - skb_network_offset(skb) >=
3426 iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
3427 layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
3428 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
3388 } 3429 }
3389 return (layer4_xor ^ 3430 return (layer4_xor ^
3390 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; 3431 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
3391 3432 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3433 skb_network_header_len(skb) >= sizeof(*ipv6h)) {
3434 ipv6h = ipv6_hdr(skb);
3435 if ((ipv6h->nexthdr == IPPROTO_TCP ||
3436 ipv6h->nexthdr == IPPROTO_UDP) &&
3437 (skb_headlen(skb) - skb_network_offset(skb) >=
3438 sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
3439 layer4hdr = (__be16 *)(ipv6h + 1);
3440 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
3441 }
3442 s = &ipv6h->saddr.s6_addr32[0];
3443 d = &ipv6h->daddr.s6_addr32[0];
3444 layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3445 layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
3446 (layer4_xor >> 8);
3447 return layer4_xor % count;
3392 } 3448 }
3393 3449
3394 return (data->h_dest[5] ^ data->h_source[5]) % count; 3450 return bond_xmit_hash_policy_l2(skb, count);
3395}
3396
3397/*
3398 * Hash for the output device based upon layer 2 data
3399 */
3400static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3401{
3402 struct ethhdr *data = (struct ethhdr *)skb->data;
3403
3404 return (data->h_dest[5] ^ data->h_source[5]) % count;
3405} 3451}
3406 3452
3407/*-------------------------- Device entry points ----------------------------*/ 3453/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 4c538e388655..e5180dfddba5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -34,6 +34,7 @@
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/pm_runtime.h>
37 38
38#include <linux/can.h> 39#include <linux/can.h>
39#include <linux/can/dev.h> 40#include <linux/can/dev.h>
@@ -45,6 +46,9 @@
45#define IF_ENUM_REG_LEN 11 46#define IF_ENUM_REG_LEN 11
46#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN) 47#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
47 48
49/* control extension register D_CAN specific */
50#define CONTROL_EX_PDR BIT(8)
51
48/* control register */ 52/* control register */
49#define CONTROL_TEST BIT(7) 53#define CONTROL_TEST BIT(7)
50#define CONTROL_CCE BIT(6) 54#define CONTROL_CCE BIT(6)
@@ -64,6 +68,7 @@
64#define TEST_BASIC BIT(2) 68#define TEST_BASIC BIT(2)
65 69
66/* status register */ 70/* status register */
71#define STATUS_PDA BIT(10)
67#define STATUS_BOFF BIT(7) 72#define STATUS_BOFF BIT(7)
68#define STATUS_EWARN BIT(6) 73#define STATUS_EWARN BIT(6)
69#define STATUS_EPASS BIT(5) 74#define STATUS_EPASS BIT(5)
@@ -163,6 +168,9 @@
163/* minimum timeout for checking BUSY status */ 168/* minimum timeout for checking BUSY status */
164#define MIN_TIMEOUT_VALUE 6 169#define MIN_TIMEOUT_VALUE 6
165 170
171/* Wait for ~1 sec for INIT bit */
172#define INIT_WAIT_MS 1000
173
166/* napi related */ 174/* napi related */
167#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM 175#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
168 176
@@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = {
201 .brp_inc = 1, 209 .brp_inc = 1,
202}; 210};
203 211
212static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
213{
214 if (priv->device)
215 pm_runtime_enable(priv->device);
216}
217
218static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
219{
220 if (priv->device)
221 pm_runtime_disable(priv->device);
222}
223
224static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
225{
226 if (priv->device)
227 pm_runtime_get_sync(priv->device);
228}
229
230static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
231{
232 if (priv->device)
233 pm_runtime_put_sync(priv->device);
234}
235
204static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 236static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
205{ 237{
206 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + 238 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev,
673 unsigned int reg_err_counter; 705 unsigned int reg_err_counter;
674 struct c_can_priv *priv = netdev_priv(dev); 706 struct c_can_priv *priv = netdev_priv(dev);
675 707
708 c_can_pm_runtime_get_sync(priv);
709
676 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); 710 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
677 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> 711 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
678 ERR_CNT_REC_SHIFT; 712 ERR_CNT_REC_SHIFT;
679 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; 713 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
680 714
715 c_can_pm_runtime_put_sync(priv);
716
681 return 0; 717 return 0;
682} 718}
683 719
@@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev)
1053 int err; 1089 int err;
1054 struct c_can_priv *priv = netdev_priv(dev); 1090 struct c_can_priv *priv = netdev_priv(dev);
1055 1091
1092 c_can_pm_runtime_get_sync(priv);
1093
1056 /* open the can device */ 1094 /* open the can device */
1057 err = open_candev(dev); 1095 err = open_candev(dev);
1058 if (err) { 1096 if (err) {
1059 netdev_err(dev, "failed to open can device\n"); 1097 netdev_err(dev, "failed to open can device\n");
1060 return err; 1098 goto exit_open_fail;
1061 } 1099 }
1062 1100
1063 /* register interrupt handler */ 1101 /* register interrupt handler */
@@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev)
1079 1117
1080exit_irq_fail: 1118exit_irq_fail:
1081 close_candev(dev); 1119 close_candev(dev);
1120exit_open_fail:
1121 c_can_pm_runtime_put_sync(priv);
1082 return err; 1122 return err;
1083} 1123}
1084 1124
@@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev)
1091 c_can_stop(dev); 1131 c_can_stop(dev);
1092 free_irq(dev->irq, dev); 1132 free_irq(dev->irq, dev);
1093 close_candev(dev); 1133 close_candev(dev);
1134 c_can_pm_runtime_put_sync(priv);
1094 1135
1095 return 0; 1136 return 0;
1096} 1137}
@@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void)
1119} 1160}
1120EXPORT_SYMBOL_GPL(alloc_c_can_dev); 1161EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1121 1162
1163#ifdef CONFIG_PM
1164int c_can_power_down(struct net_device *dev)
1165{
1166 u32 val;
1167 unsigned long time_out;
1168 struct c_can_priv *priv = netdev_priv(dev);
1169
1170 if (!(dev->flags & IFF_UP))
1171 return 0;
1172
1173 WARN_ON(priv->type != BOSCH_D_CAN);
1174
1175 /* set PDR value so the device goes to power down mode */
1176 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1177 val |= CONTROL_EX_PDR;
1178 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1179
1180 /* Wait for the PDA bit to get set */
1181 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1182 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1183 time_after(time_out, jiffies))
1184 cpu_relax();
1185
1186 if (time_after(jiffies, time_out))
1187 return -ETIMEDOUT;
1188
1189 c_can_stop(dev);
1190
1191 c_can_pm_runtime_put_sync(priv);
1192
1193 return 0;
1194}
1195EXPORT_SYMBOL_GPL(c_can_power_down);
1196
1197int c_can_power_up(struct net_device *dev)
1198{
1199 u32 val;
1200 unsigned long time_out;
1201 struct c_can_priv *priv = netdev_priv(dev);
1202
1203 if (!(dev->flags & IFF_UP))
1204 return 0;
1205
1206 WARN_ON(priv->type != BOSCH_D_CAN);
1207
1208 c_can_pm_runtime_get_sync(priv);
1209
1210 /* Clear PDR and INIT bits */
1211 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1212 val &= ~CONTROL_EX_PDR;
1213 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1214 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1215 val &= ~CONTROL_INIT;
1216 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1217
1218 /* Wait for the PDA bit to get clear */
1219 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1220 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1221 time_after(time_out, jiffies))
1222 cpu_relax();
1223
1224 if (time_after(jiffies, time_out))
1225 return -ETIMEDOUT;
1226
1227 c_can_start(dev);
1228
1229 return 0;
1230}
1231EXPORT_SYMBOL_GPL(c_can_power_up);
1232#endif
1233
1122void free_c_can_dev(struct net_device *dev) 1234void free_c_can_dev(struct net_device *dev)
1123{ 1235{
1124 free_candev(dev); 1236 free_candev(dev);
@@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = {
1133 1245
1134int register_c_can_dev(struct net_device *dev) 1246int register_c_can_dev(struct net_device *dev)
1135{ 1247{
1248 struct c_can_priv *priv = netdev_priv(dev);
1249 int err;
1250
1251 c_can_pm_runtime_enable(priv);
1252
1136 dev->flags |= IFF_ECHO; /* we support local echo */ 1253 dev->flags |= IFF_ECHO; /* we support local echo */
1137 dev->netdev_ops = &c_can_netdev_ops; 1254 dev->netdev_ops = &c_can_netdev_ops;
1138 1255
1139 return register_candev(dev); 1256 err = register_candev(dev);
1257 if (err)
1258 c_can_pm_runtime_disable(priv);
1259
1260 return err;
1140} 1261}
1141EXPORT_SYMBOL_GPL(register_c_can_dev); 1262EXPORT_SYMBOL_GPL(register_c_can_dev);
1142 1263
@@ -1144,10 +1265,9 @@ void unregister_c_can_dev(struct net_device *dev)
1144{ 1265{
1145 struct c_can_priv *priv = netdev_priv(dev); 1266 struct c_can_priv *priv = netdev_priv(dev);
1146 1267
1147 /* disable all interrupts */
1148 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1149
1150 unregister_candev(dev); 1268 unregister_candev(dev);
1269
1270 c_can_pm_runtime_disable(priv);
1151} 1271}
1152EXPORT_SYMBOL_GPL(unregister_c_can_dev); 1272EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1153 1273
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 01a7049ab990..e5ed41dafa1b 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -24,6 +24,7 @@
24 24
25enum reg { 25enum reg {
26 C_CAN_CTRL_REG = 0, 26 C_CAN_CTRL_REG = 0,
27 C_CAN_CTRL_EX_REG,
27 C_CAN_STS_REG, 28 C_CAN_STS_REG,
28 C_CAN_ERR_CNT_REG, 29 C_CAN_ERR_CNT_REG,
29 C_CAN_BTR_REG, 30 C_CAN_BTR_REG,
@@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = {
104 105
105static const u16 reg_map_d_can[] = { 106static const u16 reg_map_d_can[] = {
106 [C_CAN_CTRL_REG] = 0x00, 107 [C_CAN_CTRL_REG] = 0x00,
108 [C_CAN_CTRL_EX_REG] = 0x02,
107 [C_CAN_STS_REG] = 0x04, 109 [C_CAN_STS_REG] = 0x04,
108 [C_CAN_ERR_CNT_REG] = 0x08, 110 [C_CAN_ERR_CNT_REG] = 0x08,
109 [C_CAN_BTR_REG] = 0x0C, 111 [C_CAN_BTR_REG] = 0x0C,
@@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = {
143}; 145};
144 146
145enum c_can_dev_id { 147enum c_can_dev_id {
146 C_CAN_DEVTYPE, 148 BOSCH_C_CAN_PLATFORM,
147 D_CAN_DEVTYPE, 149 BOSCH_C_CAN,
150 BOSCH_D_CAN,
148}; 151};
149 152
150/* c_can private data structure */ 153/* c_can private data structure */
@@ -152,6 +155,7 @@ struct c_can_priv {
152 struct can_priv can; /* must be the first member */ 155 struct can_priv can; /* must be the first member */
153 struct napi_struct napi; 156 struct napi_struct napi;
154 struct net_device *dev; 157 struct net_device *dev;
158 struct device *device;
155 int tx_object; 159 int tx_object;
156 int current_status; 160 int current_status;
157 int last_status; 161 int last_status;
@@ -164,6 +168,7 @@ struct c_can_priv {
164 unsigned int tx_echo; 168 unsigned int tx_echo;
165 void *priv; /* for board-specific data */ 169 void *priv; /* for board-specific data */
166 u16 irqstatus; 170 u16 irqstatus;
171 enum c_can_dev_id type;
167}; 172};
168 173
169struct net_device *alloc_c_can_dev(void); 174struct net_device *alloc_c_can_dev(void);
@@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev);
171int register_c_can_dev(struct net_device *dev); 176int register_c_can_dev(struct net_device *dev);
172void unregister_c_can_dev(struct net_device *dev); 177void unregister_c_can_dev(struct net_device *dev);
173 178
179#ifdef CONFIG_PM
180int c_can_power_up(struct net_device *dev);
181int c_can_power_down(struct net_device *dev);
182#endif
183
174#endif /* C_CAN_H */ 184#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 1011146ea513..3d7830bcd2bf 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev,
120 120
121 /* Configure CAN type */ 121 /* Configure CAN type */
122 switch (c_can_pci_data->type) { 122 switch (c_can_pci_data->type) {
123 case C_CAN_DEVTYPE: 123 case BOSCH_C_CAN:
124 priv->regs = reg_map_c_can; 124 priv->regs = reg_map_c_can;
125 break; 125 break;
126 case D_CAN_DEVTYPE: 126 case BOSCH_D_CAN:
127 priv->regs = reg_map_d_can; 127 priv->regs = reg_map_d_can;
128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 128 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
129 break; 129 break;
@@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev)
192} 192}
193 193
194static struct c_can_pci_data c_can_sta2x11= { 194static struct c_can_pci_data c_can_sta2x11= {
195 .type = C_CAN_DEVTYPE, 195 .type = BOSCH_C_CAN,
196 .reg_align = C_CAN_REG_ALIGN_32, 196 .reg_align = C_CAN_REG_ALIGN_32,
197 .freq = 52000000, /* 52 Mhz */ 197 .freq = 52000000, /* 52 Mhz */
198}; 198};
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6ff7ad006c30..ee1416132aba 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,6 +30,9 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/clk.h> 32#include <linux/clk.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/pinctrl/consumer.h>
33 36
34#include <linux/can/dev.h> 37#include <linux/can/dev.h>
35 38
@@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 writew(val, priv->base + 2 * priv->regs[index]); 68 writew(val, priv->base + 2 * priv->regs[index]);
66} 69}
67 70
71static struct platform_device_id c_can_id_table[] = {
72 [BOSCH_C_CAN_PLATFORM] = {
73 .name = KBUILD_MODNAME,
74 .driver_data = BOSCH_C_CAN,
75 },
76 [BOSCH_C_CAN] = {
77 .name = "c_can",
78 .driver_data = BOSCH_C_CAN,
79 },
80 [BOSCH_D_CAN] = {
81 .name = "d_can",
82 .driver_data = BOSCH_D_CAN,
83 }, {
84 }
85};
86
87static const struct of_device_id c_can_of_table[] = {
88 { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
89 { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
90 { /* sentinel */ },
91};
92
68static int __devinit c_can_plat_probe(struct platform_device *pdev) 93static int __devinit c_can_plat_probe(struct platform_device *pdev)
69{ 94{
70 int ret; 95 int ret;
71 void __iomem *addr; 96 void __iomem *addr;
72 struct net_device *dev; 97 struct net_device *dev;
73 struct c_can_priv *priv; 98 struct c_can_priv *priv;
99 const struct of_device_id *match;
74 const struct platform_device_id *id; 100 const struct platform_device_id *id;
101 struct pinctrl *pinctrl;
75 struct resource *mem; 102 struct resource *mem;
76 int irq; 103 int irq;
77 struct clk *clk; 104 struct clk *clk;
78 105
106 if (pdev->dev.of_node) {
107 match = of_match_device(c_can_of_table, &pdev->dev);
108 if (!match) {
109 dev_err(&pdev->dev, "Failed to find matching dt id\n");
110 ret = -EINVAL;
111 goto exit;
112 }
113 id = match->data;
114 } else {
115 id = platform_get_device_id(pdev);
116 }
117
118 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
119 if (IS_ERR(pinctrl))
120 dev_warn(&pdev->dev,
121 "failed to configure pins from driver\n");
122
79 /* get the appropriate clk */ 123 /* get the appropriate clk */
80 clk = clk_get(&pdev->dev, NULL); 124 clk = clk_get(&pdev->dev, NULL);
81 if (IS_ERR(clk)) { 125 if (IS_ERR(clk)) {
@@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
114 } 158 }
115 159
116 priv = netdev_priv(dev); 160 priv = netdev_priv(dev);
117 id = platform_get_device_id(pdev);
118 switch (id->driver_data) { 161 switch (id->driver_data) {
119 case C_CAN_DEVTYPE: 162 case BOSCH_C_CAN:
120 priv->regs = reg_map_c_can; 163 priv->regs = reg_map_c_can;
121 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { 164 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
122 case IORESOURCE_MEM_32BIT: 165 case IORESOURCE_MEM_32BIT:
@@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
130 break; 173 break;
131 } 174 }
132 break; 175 break;
133 case D_CAN_DEVTYPE: 176 case BOSCH_D_CAN:
134 priv->regs = reg_map_d_can; 177 priv->regs = reg_map_d_can;
135 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 178 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
136 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 179 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
@@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
143 186
144 dev->irq = irq; 187 dev->irq = irq;
145 priv->base = addr; 188 priv->base = addr;
189 priv->device = &pdev->dev;
146 priv->can.clock.freq = clk_get_rate(clk); 190 priv->can.clock.freq = clk_get_rate(clk);
147 priv->priv = clk; 191 priv->priv = clk;
192 priv->type = id->driver_data;
148 193
149 platform_set_drvdata(pdev, dev); 194 platform_set_drvdata(pdev, dev);
150 SET_NETDEV_DEV(dev, &pdev->dev); 195 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
195 return 0; 240 return 0;
196} 241}
197 242
198static const struct platform_device_id c_can_id_table[] = { 243#ifdef CONFIG_PM
199 { 244static int c_can_suspend(struct platform_device *pdev, pm_message_t state)
200 .name = KBUILD_MODNAME, 245{
201 .driver_data = C_CAN_DEVTYPE, 246 int ret;
202 }, { 247 struct net_device *ndev = platform_get_drvdata(pdev);
203 .name = "c_can", 248 struct c_can_priv *priv = netdev_priv(ndev);
204 .driver_data = C_CAN_DEVTYPE, 249
205 }, { 250 if (priv->type != BOSCH_D_CAN) {
206 .name = "d_can", 251 dev_warn(&pdev->dev, "Not supported\n");
207 .driver_data = D_CAN_DEVTYPE, 252 return 0;
208 }, {
209 } 253 }
210}; 254
255 if (netif_running(ndev)) {
256 netif_stop_queue(ndev);
257 netif_device_detach(ndev);
258 }
259
260 ret = c_can_power_down(ndev);
261 if (ret) {
262 netdev_err(ndev, "failed to enter power down mode\n");
263 return ret;
264 }
265
266 priv->can.state = CAN_STATE_SLEEPING;
267
268 return 0;
269}
270
271static int c_can_resume(struct platform_device *pdev)
272{
273 int ret;
274 struct net_device *ndev = platform_get_drvdata(pdev);
275 struct c_can_priv *priv = netdev_priv(ndev);
276
277 if (priv->type != BOSCH_D_CAN) {
278 dev_warn(&pdev->dev, "Not supported\n");
279 return 0;
280 }
281
282 ret = c_can_power_up(ndev);
283 if (ret) {
284 netdev_err(ndev, "Still in power down mode\n");
285 return ret;
286 }
287
288 priv->can.state = CAN_STATE_ERROR_ACTIVE;
289
290 if (netif_running(ndev)) {
291 netif_device_attach(ndev);
292 netif_start_queue(ndev);
293 }
294
295 return 0;
296}
297#else
298#define c_can_suspend NULL
299#define c_can_resume NULL
300#endif
211 301
212static struct platform_driver c_can_plat_driver = { 302static struct platform_driver c_can_plat_driver = {
213 .driver = { 303 .driver = {
214 .name = KBUILD_MODNAME, 304 .name = KBUILD_MODNAME,
215 .owner = THIS_MODULE, 305 .owner = THIS_MODULE,
306 .of_match_table = of_match_ptr(c_can_of_table),
216 }, 307 },
217 .probe = c_can_plat_probe, 308 .probe = c_can_plat_probe,
218 .remove = __devexit_p(c_can_plat_remove), 309 .remove = __devexit_p(c_can_plat_remove),
310 .suspend = c_can_suspend,
311 .resume = c_can_resume,
219 .id_table = c_can_id_table, 312 .id_table = c_can_id_table,
220}; 313};
221 314
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index c5f143165f80..c78ecfca1e45 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,10 @@
144 144
145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff) 145#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
146 146
147/* FLEXCAN hardware feature flags */
148#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
149#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */
150
147/* Structure of the message buffer */ 151/* Structure of the message buffer */
148struct flexcan_mb { 152struct flexcan_mb {
149 u32 can_ctrl; 153 u32 can_ctrl;
@@ -178,7 +182,7 @@ struct flexcan_regs {
178}; 182};
179 183
180struct flexcan_devtype_data { 184struct flexcan_devtype_data {
181 u32 hw_ver; /* hardware controller version */ 185 u32 features; /* hardware controller features */
182}; 186};
183 187
184struct flexcan_priv { 188struct flexcan_priv {
@@ -197,11 +201,11 @@ struct flexcan_priv {
197}; 201};
198 202
199static struct flexcan_devtype_data fsl_p1010_devtype_data = { 203static struct flexcan_devtype_data fsl_p1010_devtype_data = {
200 .hw_ver = 3, 204 .features = FLEXCAN_HAS_BROKEN_ERR_STATE,
201}; 205};
202 206static struct flexcan_devtype_data fsl_imx28_devtype_data;
203static struct flexcan_devtype_data fsl_imx6q_devtype_data = { 207static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
204 .hw_ver = 10, 208 .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
205}; 209};
206 210
207static const struct can_bittiming_const flexcan_bittiming_const = { 211static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -741,15 +745,19 @@ static int flexcan_chip_start(struct net_device *dev)
741 * enable tx and rx warning interrupt 745 * enable tx and rx warning interrupt
742 * enable bus off interrupt 746 * enable bus off interrupt
743 * (== FLEXCAN_CTRL_ERR_STATE) 747 * (== FLEXCAN_CTRL_ERR_STATE)
744 *
745 * _note_: we enable the "error interrupt"
746 * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
747 * warning or bus passive interrupts.
748 */ 748 */
749 reg_ctrl = flexcan_read(&regs->ctrl); 749 reg_ctrl = flexcan_read(&regs->ctrl);
750 reg_ctrl &= ~FLEXCAN_CTRL_TSYN; 750 reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
751 reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | 751 reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
752 FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK; 752 FLEXCAN_CTRL_ERR_STATE;
753 /*
754 * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
755 * on most Flexcan cores, too. Otherwise we don't get
756 * any error warning or passive interrupts.
757 */
758 if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
759 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
760 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
753 761
754 /* save for later use */ 762 /* save for later use */
755 priv->reg_ctrl_default = reg_ctrl; 763 priv->reg_ctrl_default = reg_ctrl;
@@ -772,7 +780,7 @@ static int flexcan_chip_start(struct net_device *dev)
772 flexcan_write(0x0, &regs->rx14mask); 780 flexcan_write(0x0, &regs->rx14mask);
773 flexcan_write(0x0, &regs->rx15mask); 781 flexcan_write(0x0, &regs->rx15mask);
774 782
775 if (priv->devtype_data->hw_ver >= 10) 783 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
776 flexcan_write(0x0, &regs->rxfgmask); 784 flexcan_write(0x0, &regs->rxfgmask);
777 785
778 flexcan_transceiver_switch(priv, 1); 786 flexcan_transceiver_switch(priv, 1);
@@ -954,6 +962,7 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
954 962
955static const struct of_device_id flexcan_of_match[] = { 963static const struct of_device_id flexcan_of_match[] = {
956 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, 964 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
965 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
957 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 966 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
958 { /* sentinel */ }, 967 { /* sentinel */ },
959}; 968};
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 8a8df82988d1..c975999bb055 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
181 181
182 if (!clock_name || !strcmp(clock_name, "sys")) { 182 if (!clock_name || !strcmp(clock_name, "sys")) {
183 sys_clk = clk_get(&ofdev->dev, "sys_clk"); 183 sys_clk = clk_get(&ofdev->dev, "sys_clk");
184 if (!sys_clk) { 184 if (IS_ERR(sys_clk)) {
185 dev_err(&ofdev->dev, "couldn't get sys_clk\n"); 185 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
186 goto exit_unmap; 186 goto exit_unmap;
187 } 187 }
@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
204 204
205 if (clocksrc < 0) { 205 if (clocksrc < 0) {
206 ref_clk = clk_get(&ofdev->dev, "ref_clk"); 206 ref_clk = clk_get(&ofdev->dev, "ref_clk");
207 if (!ref_clk) { 207 if (IS_ERR(ref_clk)) {
208 dev_err(&ofdev->dev, "couldn't get ref_clk\n"); 208 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
209 goto exit_unmap; 209 goto exit_unmap;
210 } 210 }
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 4c4f33d482d2..25011dbe1b96 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev)
156 } 156 }
157 157
158 /* set chip to normal mode */ 158 /* set chip to normal mode */
159 priv->write_reg(priv, REG_MOD, 0x00); 159 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
160 priv->write_reg(priv, REG_MOD, MOD_LOM);
161 else
162 priv->write_reg(priv, REG_MOD, 0x00);
163
160 udelay(10); 164 udelay(10);
165
161 status = priv->read_reg(priv, REG_MOD); 166 status = priv->read_reg(priv, REG_MOD);
162 } 167 }
163 168
@@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
310 315
311 can_put_echo_skb(skb, dev, 0); 316 can_put_echo_skb(skb, dev, 0);
312 317
313 sja1000_write_cmdreg(priv, CMD_TR); 318 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
319 sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
320 else
321 sja1000_write_cmdreg(priv, CMD_TR);
314 322
315 return NETDEV_TX_OK; 323 return NETDEV_TX_OK;
316} 324}
@@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
505 netdev_warn(dev, "wakeup interrupt\n"); 513 netdev_warn(dev, "wakeup interrupt\n");
506 514
507 if (isrc & IRQ_TI) { 515 if (isrc & IRQ_TI) {
508 /* transmission complete interrupt */ 516 /* transmission buffer released */
509 stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf; 517 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
510 stats->tx_packets++; 518 !(status & SR_TCS)) {
511 can_get_echo_skb(dev, 0); 519 stats->tx_errors++;
520 can_free_echo_skb(dev, 0);
521 } else {
522 /* transmission complete */
523 stats->tx_bytes +=
524 priv->read_reg(priv, REG_FI) & 0xf;
525 stats->tx_packets++;
526 can_get_echo_skb(dev, 0);
527 }
512 netif_wake_queue(dev); 528 netif_wake_queue(dev);
513 } 529 }
514 if (isrc & IRQ_RI) { 530 if (isrc & IRQ_RI) {
@@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
605 priv->can.do_set_mode = sja1000_set_mode; 621 priv->can.do_set_mode = sja1000_set_mode;
606 priv->can.do_get_berr_counter = sja1000_get_berr_counter; 622 priv->can.do_get_berr_counter = sja1000_get_berr_counter;
607 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 623 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
608 CAN_CTRLMODE_BERR_REPORTING; 624 CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
625 CAN_CTRLMODE_ONE_SHOT;
609 626
610 spin_lock_init(&priv->cmdreg_lock); 627 spin_lock_init(&priv->cmdreg_lock);
611 628
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d2f91f737871..c4643c400d46 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = {
53 * dump memory 53 * dump memory
54 */ 54 */
55#define DUMP_WIDTH 16 55#define DUMP_WIDTH 16
56void dump_mem(char *prompt, void *p, int l) 56void pcan_dump_mem(char *prompt, void *p, int l)
57{ 57{
58 pr_info("%s dumping %s (%d bytes):\n", 58 pr_info("%s dumping %s (%d bytes):\n",
59 PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); 59 PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb)
203 if (dev->state & PCAN_USB_STATE_STARTED) { 203 if (dev->state & PCAN_USB_STATE_STARTED) {
204 err = dev->adapter->dev_decode_buf(dev, urb); 204 err = dev->adapter->dev_decode_buf(dev, urb);
205 if (err) 205 if (err)
206 dump_mem("received usb message", 206 pcan_dump_mem("received usb message",
207 urb->transfer_buffer, 207 urb->transfer_buffer,
208 urb->transfer_buffer_length); 208 urb->transfer_buffer_length);
209 } 209 }
210 } 210 }
211 211
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 4c775b620be2..c8e5e91d7cb5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -131,7 +131,7 @@ struct peak_usb_device {
131 struct peak_usb_device *next_siblings; 131 struct peak_usb_device *next_siblings;
132}; 132};
133 133
134void dump_mem(char *prompt, void *p, int l); 134void pcan_dump_mem(char *prompt, void *p, int l);
135 135
136/* common timestamp management */ 136/* common timestamp management */
137void peak_usb_init_time_ref(struct peak_time_ref *time_ref, 137void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 629c4ba5d49d..e1626d92511a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
292 if (!rec_len) { 292 if (!rec_len) {
293 netdev_err(dev->netdev, 293 netdev_err(dev->netdev,
294 "got unprocessed record in msg\n"); 294 "got unprocessed record in msg\n");
295 dump_mem("rcvd rsp msg", pum->u.rec_buffer, 295 pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer,
296 actual_length); 296 actual_length);
297 break; 297 break;
298 } 298 }
299 299
@@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
756 756
757fail: 757fail:
758 if (err) 758 if (err)
759 dump_mem("received msg", 759 pcan_dump_mem("received msg",
760 urb->transfer_buffer, urb->actual_length); 760 urb->transfer_buffer, urb->actual_length);
761 761
762 return err; 762 return err;
763} 763}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index a11af5cc4844..e4ff38949112 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig"
89source "drivers/net/ethernet/mellanox/Kconfig" 89source "drivers/net/ethernet/mellanox/Kconfig"
90source "drivers/net/ethernet/micrel/Kconfig" 90source "drivers/net/ethernet/micrel/Kconfig"
91source "drivers/net/ethernet/microchip/Kconfig" 91source "drivers/net/ethernet/microchip/Kconfig"
92
93config MIPS_SIM_NET
94 tristate "MIPS simulator Network device"
95 depends on MIPS_SIM
96 ---help---
97 The MIPSNET device is a simple Ethernet network device which is
98 emulated by the MIPS Simulator.
99 If you are not using a MIPSsim or are unsure, say N.
100
101source "drivers/net/ethernet/myricom/Kconfig" 92source "drivers/net/ethernet/myricom/Kconfig"
102 93
103config FEALNX 94config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 878ad32b93f2..d4473072654a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
40obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ 40obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
41obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ 41obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
42obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ 42obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
43obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
44obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ 43obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
45obj-$(CONFIG_FEALNX) += fealnx.o 44obj-$(CONFIG_FEALNX) += fealnx.o
46obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ 45obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f15e72e81ac4..4bd416b72e65 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -101,6 +101,7 @@ config TIGON3
101 tristate "Broadcom Tigon3 support" 101 tristate "Broadcom Tigon3 support"
102 depends on PCI 102 depends on PCI
103 select PHYLIB 103 select PHYLIB
104 select HWMON
104 ---help--- 105 ---help---
105 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 106 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
106 107
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index eac25236856c..72897c47b8c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.51-0" 26#define DRV_MODULE_VERSION "1.78.00-0"
27#define DRV_MODULE_RELDATE "2012/06/18" 27#define DRV_MODULE_RELDATE "2012/09/27"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e97a7d1d06..30f04a389227 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2285 /* Wait for all pending SP commands to complete */ 2285 /* Wait for all pending SP commands to complete */
2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { 2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n"); 2287 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 2288 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2289 return -EBUSY; 2289 return -EBUSY;
2290 } 2290 }
2291 2291
@@ -2333,7 +2333,7 @@ load_error0:
2333} 2333}
2334 2334
2335/* must be called with rtnl_lock */ 2335/* must be called with rtnl_lock */
2336int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 2336int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2337{ 2337{
2338 int i; 2338 int i;
2339 bool global = false; 2339 bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2395 2395
2396 /* Cleanup the chip if needed */ 2396 /* Cleanup the chip if needed */
2397 if (unload_mode != UNLOAD_RECOVERY) 2397 if (unload_mode != UNLOAD_RECOVERY)
2398 bnx2x_chip_cleanup(bp, unload_mode); 2398 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2399 else { 2399 else {
2400 /* Send the UNLOAD_REQUEST to the MCP */ 2400 /* Send the UNLOAD_REQUEST to the MCP */
2401 bnx2x_send_unload_req(bp, unload_mode); 2401 bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2419 bnx2x_free_irq(bp); 2419 bnx2x_free_irq(bp);
2420 2420
2421 /* Report UNLOAD_DONE to MCP */ 2421 /* Report UNLOAD_DONE to MCP */
2422 bnx2x_send_unload_done(bp); 2422 bnx2x_send_unload_done(bp, false);
2423 } 2423 }
2424 2424
2425 /* 2425 /*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3026 first_bd = tx_start_bd; 3026 first_bd = tx_start_bd;
3027 3027
3028 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3028 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3029 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, 3029 SET_FLAG(tx_start_bd->general_data,
3030 mac_type); 3030 ETH_TX_START_BD_PARSE_NBDS,
3031 0);
3031 3032
3032 /* header nbd */ 3033 /* header nbd */
3033 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 3034 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3077 &pbd_e2->dst_mac_addr_lo, 3078 &pbd_e2->dst_mac_addr_lo,
3078 eth->h_dest); 3079 eth->h_dest);
3079 } 3080 }
3081
3082 SET_FLAG(pbd_e2_parsing_data,
3083 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3080 } else { 3084 } else {
3085 u16 global_data = 0;
3081 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 3086 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3082 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 3087 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3083 /* Set PBD in checksum offload case */ 3088 /* Set PBD in checksum offload case */
3084 if (xmit_type & XMIT_CSUM) 3089 if (xmit_type & XMIT_CSUM)
3085 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); 3090 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3086 3091
3092 SET_FLAG(global_data,
3093 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3094 pbd_e1x->global_data |= cpu_to_le16(global_data);
3087 } 3095 }
3088 3096
3089 /* Setup the data pointer of the first BD of the packet */ 3097 /* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
3770 if (unlikely(!netif_running(dev))) 3778 if (unlikely(!netif_running(dev)))
3771 return 0; 3779 return 0;
3772 3780
3773 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 3781 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3774 return bnx2x_nic_load(bp, LOAD_NORMAL); 3782 return bnx2x_nic_load(bp, LOAD_NORMAL);
3775} 3783}
3776 3784
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3967 3975
3968 netif_device_detach(dev); 3976 netif_device_detach(dev);
3969 3977
3970 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 3978 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
3971 3979
3972 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 3980 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3973 3981
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfd86a55f1dc..9c5ea6c5b4c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
84 * 84 *
85 * @bp: driver handle 85 * @bp: driver handle
86 * @keep_link: true iff link should be kept up
86 */ 87 */
87void bnx2x_send_unload_done(struct bnx2x *bp); 88void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
88 89
89/** 90/**
90 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 91 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
153void bnx2x_link_set(struct bnx2x *bp); 154void bnx2x_link_set(struct bnx2x *bp);
154 155
155/** 156/**
157 * bnx2x_force_link_reset - Forces link reset, and put the PHY
158 * in reset as well.
159 *
160 * @bp: driver handle
161 */
162void bnx2x_force_link_reset(struct bnx2x *bp);
163
164/**
156 * bnx2x_link_test - query link status. 165 * bnx2x_link_test - query link status.
157 * 166 *
158 * @bp: driver handle 167 * @bp: driver handle
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
312 * 321 *
313 * @bp: driver handle 322 * @bp: driver handle
314 * @unload_mode: COMMON, PORT, FUNCTION 323 * @unload_mode: COMMON, PORT, FUNCTION
324 * @keep_link: true iff link should be kept up.
315 * 325 *
316 * - Cleanup MAC configuration. 326 * - Cleanup MAC configuration.
317 * - Closes clients. 327 * - Closes clients.
318 * - etc. 328 * - etc.
319 */ 329 */
320void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 330void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
321 331
322/** 332/**
323 * bnx2x_acquire_hw_lock - acquire HW lock. 333 * bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
446bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); 456bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
447 457
448/* dev_close main block */ 458/* dev_close main block */
449int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 459int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
450 460
451/* dev_open main block */ 461/* dev_open main block */
452int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 462int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 8a73374e52a7..2245c3895409 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
91 /* 91 /*
92 * Rx COS configuration 92 * Rx COS configuration
93 * Changing PFC RX configuration . 93 * Changing PFC RX configuration .
94 * In RX COS0 will always be configured to lossy and COS1 to lossless 94 * In RX COS0 will always be configured to lossless and COS1 to lossy
95 */ 95 */
96 for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { 96 for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
97 pri_bit = 1 << i; 97 pri_bit = 1 << i;
98 98
99 if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) 99 if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
100 val |= 1 << (i * 4); 100 val |= 1 << (i * 4);
101 } 101 }
102 102
103 pfc_params.pkt_priority_to_cos = val; 103 pfc_params.pkt_priority_to_cos = val;
104 104
105 /* RX COS0 */ 105 /* RX COS0 */
106 pfc_params.llfc_low_priority_classes = 0; 106 pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
107 /* RX COS1 */ 107 /* RX COS1 */
108 pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); 108 pfc_params.llfc_high_priority_classes = 0;
109
110 /* BRB configuration */
111 pfc_params.cos0_pauseable = false;
112 pfc_params.cos1_pauseable = true;
113 109
114 bnx2x_acquire_phy_lock(bp); 110 bnx2x_acquire_phy_lock(bp);
115 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; 111 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ebf40cd7aa10..c65295dded39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
905 905
906 if (netif_running(dev)) { 906 if (netif_running(dev)) {
907 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 907 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
908 bnx2x_force_link_reset(bp);
908 bnx2x_link_set(bp); 909 bnx2x_link_set(bp);
909 } 910 }
910 911
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1606 return 0; 1607 return 0;
1607} 1608}
1608 1609
1609static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = { 1610static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
1610 "register_test (offline) ", 1611 "register_test (offline) ",
1611 "memory_test (offline) ", 1612 "memory_test (offline) ",
1612 "int_loopback_test (offline)", 1613 "int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1653 return -EOPNOTSUPP; 1654 return -EOPNOTSUPP;
1654 } 1655 }
1655 1656
1656 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); 1657 eee_cfg = bp->link_vars.eee_status;
1657 1658
1658 edata->supported = 1659 edata->supported =
1659 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> 1660 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1690 return -EOPNOTSUPP; 1691 return -EOPNOTSUPP;
1691 } 1692 }
1692 1693
1693 eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); 1694 eee_cfg = bp->link_vars.eee_status;
1694 1695
1695 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { 1696 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
1696 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); 1697 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1739 /* Restart link to propogate changes */ 1740 /* Restart link to propogate changes */
1740 if (netif_running(dev)) { 1741 if (netif_running(dev)) {
1741 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1742 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1743 bnx2x_force_link_reset(bp);
1742 bnx2x_link_set(bp); 1744 bnx2x_link_set(bp);
1743 } 1745 }
1744 1746
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
2038 u16 pkt_prod, bd_prod; 2040 u16 pkt_prod, bd_prod;
2039 struct sw_tx_bd *tx_buf; 2041 struct sw_tx_bd *tx_buf;
2040 struct eth_tx_start_bd *tx_start_bd; 2042 struct eth_tx_start_bd *tx_start_bd;
2041 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2042 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2043 dma_addr_t mapping; 2043 dma_addr_t mapping;
2044 union eth_rx_cqe *cqe; 2044 union eth_rx_cqe *cqe;
2045 u8 cqe_fp_flags, cqe_fp_type; 2045 u8 cqe_fp_flags, cqe_fp_type;
@@ -2131,21 +2131,32 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
2131 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 2131 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2132 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2132 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2133 SET_FLAG(tx_start_bd->general_data, 2133 SET_FLAG(tx_start_bd->general_data,
2134 ETH_TX_START_BD_ETH_ADDR_TYPE,
2135 UNICAST_ADDRESS);
2136 SET_FLAG(tx_start_bd->general_data,
2137 ETH_TX_START_BD_HDR_NBDS, 2134 ETH_TX_START_BD_HDR_NBDS,
2138 1); 2135 1);
2136 SET_FLAG(tx_start_bd->general_data,
2137 ETH_TX_START_BD_PARSE_NBDS,
2138 0);
2139 2139
2140 /* turn on parsing and get a BD */ 2140 /* turn on parsing and get a BD */
2141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2142 2142
2143 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 2143 if (CHIP_IS_E1x(bp)) {
2144 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 2144 u16 global_data = 0;
2145 2145 struct eth_tx_parse_bd_e1x *pbd_e1x =
2146 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2146 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2147 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2147 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2148 2148 SET_FLAG(global_data,
2149 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2150 pbd_e1x->global_data = cpu_to_le16(global_data);
2151 } else {
2152 u32 parsing_data = 0;
2153 struct eth_tx_parse_bd_e2 *pbd_e2 =
2154 &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2155 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2156 SET_FLAG(parsing_data,
2157 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2158 pbd_e2->parsing_data = cpu_to_le32(parsing_data);
2159 }
2149 wmb(); 2160 wmb();
2150 2161
2151 txdata->tx_db.data.prod += 2; 2162 txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2263 if (!netif_running(bp->dev)) 2274 if (!netif_running(bp->dev))
2264 return BNX2X_EXT_LOOPBACK_FAILED; 2275 return BNX2X_EXT_LOOPBACK_FAILED;
2265 2276
2266 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2277 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2267 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); 2278 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2268 if (rc) { 2279 if (rc) {
2269 DP(BNX2X_MSG_ETHTOOL, 2280 DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
2414 2425
2415 link_up = bp->link_vars.link_up; 2426 link_up = bp->link_vars.link_up;
2416 2427
2417 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2428 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2418 rc = bnx2x_nic_load(bp, LOAD_DIAG); 2429 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2419 if (rc) { 2430 if (rc) {
2420 etest->flags |= ETH_TEST_FL_FAILED; 2431 etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
2446 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 2457 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2447 } 2458 }
2448 2459
2449 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2460 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
2450 2461
2451 /* restore input for TX port IF */ 2462 /* restore input for TX port IF */
2452 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2463 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2534static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 2545static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2535{ 2546{
2536 struct bnx2x *bp = netdev_priv(dev); 2547 struct bnx2x *bp = netdev_priv(dev);
2537 int i, j, k, offset, start; 2548 int i, j, k, start;
2538 char queue_name[MAX_QUEUE_NAME_LEN+1]; 2549 char queue_name[MAX_QUEUE_NAME_LEN+1];
2539 2550
2540 switch (stringset) { 2551 switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2570 start = 0; 2581 start = 0;
2571 else 2582 else
2572 start = 4; 2583 start = 4;
2573 for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp)); 2584 memcpy(buf, bnx2x_tests_str_arr + start,
2574 i++, j++) { 2585 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
2575 offset = sprintf(buf+32*i, "%s",
2576 bnx2x_tests_str_arr[j]);
2577 *(buf+offset) = '\0';
2578 }
2579 break;
2580 } 2586 }
2581} 2587}
2582 2588
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
2940 bnx2x_change_num_queues(bp, channels->combined_count); 2946 bnx2x_change_num_queues(bp, channels->combined_count);
2941 return 0; 2947 return 0;
2942 } 2948 }
2943 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2949 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
2944 bnx2x_change_num_queues(bp, channels->combined_count); 2950 bnx2x_change_num_queues(bp, channels->combined_count);
2945 return bnx2x_nic_load(bp, LOAD_NORMAL); 2951 return bnx2x_nic_load(bp, LOAD_NORMAL);
2946} 2952}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index bbc66ced9c25..620fe939ecfd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -88,9 +88,6 @@
88#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) 88#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
89#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 89#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
90 (IRO[101].base + ((assertListEntry) * IRO[101].m1)) 90 (IRO[101].base + ((assertListEntry) * IRO[101].m1))
91#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
92#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
93 (IRO[108].base)
94#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ 91#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
95 (IRO[201].base + ((pfId) * IRO[201].m1)) 92 (IRO[201].base + ((pfId) * IRO[201].m1))
96#define TSTORM_FUNC_EN_OFFSET(funcId) \ 93#define TSTORM_FUNC_EN_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 76b6e65790f8..18704929e642 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
1286 #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 1286 #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
1287 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 1287 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
1288 1288
1289 #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
1290
1291 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
1289 u32 fw_mb_header; 1292 u32 fw_mb_header;
1290 #define FW_MSG_CODE_MASK 0xffff0000 1293 #define FW_MSG_CODE_MASK 0xffff0000
1291 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 1294 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
1909}; 1912};
1910/***END OF DCBX STRUCTURES DECLARATIONS***/ 1913/***END OF DCBX STRUCTURES DECLARATIONS***/
1911 1914
1915/***********************************************************/
1916/* Elink section */
1917/***********************************************************/
1918#define SHMEM_LINK_CONFIG_SIZE 2
1919struct shmem_lfa {
1920 u32 req_duplex;
1921 #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
1922 #define REQ_DUPLEX_PHY0_SHIFT 0
1923 #define REQ_DUPLEX_PHY1_MASK 0xffff0000
1924 #define REQ_DUPLEX_PHY1_SHIFT 16
1925 u32 req_flow_ctrl;
1926 #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
1927 #define REQ_FLOW_CTRL_PHY0_SHIFT 0
1928 #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
1929 #define REQ_FLOW_CTRL_PHY1_SHIFT 16
1930 u32 req_line_speed; /* Also determine AutoNeg */
1931 #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
1932 #define REQ_LINE_SPD_PHY0_SHIFT 0
1933 #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
1934 #define REQ_LINE_SPD_PHY1_SHIFT 16
1935 u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
1936 u32 additional_config;
1937 #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
1938 #define REQ_FC_AUTO_ADV0_SHIFT 0
1939 #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
1940 u32 lfa_sts;
1941 #define LFA_LINK_FLAP_REASON_OFFSET 0
1942 #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
1943 #define LFA_LINK_DOWN 0x1
1944 #define LFA_LOOPBACK_ENABLED 0x2
1945 #define LFA_DUPLEX_MISMATCH 0x3
1946 #define LFA_MFW_IS_TOO_OLD 0x4
1947 #define LFA_LINK_SPEED_MISMATCH 0x5
1948 #define LFA_FLOW_CTRL_MISMATCH 0x6
1949 #define LFA_SPEED_CAP_MISMATCH 0x7
1950 #define LFA_DCC_LFA_DISABLED 0x8
1951 #define LFA_EEE_MISMATCH 0x9
1952
1953 #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
1954 #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
1955
1956 #define LINK_FLAP_COUNT_OFFSET 16
1957 #define LINK_FLAP_COUNT_MASK 0x00ff0000
1958
1959 #define LFA_FLAGS_MASK 0xff000000
1960 #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
1961};
1962
1912struct ncsi_oem_fcoe_features { 1963struct ncsi_oem_fcoe_features {
1913 u32 fcoe_features1; 1964 u32 fcoe_features1;
1914 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF 1965 #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
2738}; 2789};
2739 2790
2740#define BCM_5710_FW_MAJOR_VERSION 7 2791#define BCM_5710_FW_MAJOR_VERSION 7
2741#define BCM_5710_FW_MINOR_VERSION 2 2792#define BCM_5710_FW_MINOR_VERSION 8
2742#define BCM_5710_FW_REVISION_VERSION 51 2793#define BCM_5710_FW_REVISION_VERSION 2
2743#define BCM_5710_FW_ENGINEERING_VERSION 0 2794#define BCM_5710_FW_ENGINEERING_VERSION 0
2744#define BCM_5710_FW_COMPILE_FLAGS 1 2795#define BCM_5710_FW_COMPILE_FLAGS 1
2745 2796
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
3861#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 3912#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
3862#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) 3913#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
3863#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 3914#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
3864#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) 3915#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
3865#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 3916#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
3866#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
3867#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
3868 u8 rss_result_mask; 3917 u8 rss_result_mask;
3869 u8 rss_mode; 3918 u8 rss_mode;
3870 __le32 __reserved2; 3919 __le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
4080#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 4129#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
4081#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) 4130#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
4082#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 4131#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
4083#define ETH_TX_START_BD_RESREVED (0x1<<5) 4132#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
4084#define ETH_TX_START_BD_RESREVED_SHIFT 5 4133#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
4085#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) 4134#define ETH_TX_START_BD_RESREVED (0x1<<7)
4086#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 4135#define ETH_TX_START_BD_RESREVED_SHIFT 7
4087}; 4136};
4088 4137
4089/* 4138/*
4090 * Tx parsing BD structure for ETH E1/E1h 4139 * Tx parsing BD structure for ETH E1/E1h
4091 */ 4140 */
4092struct eth_tx_parse_bd_e1x { 4141struct eth_tx_parse_bd_e1x {
4093 u8 global_data; 4142 __le16 global_data;
4094#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) 4143#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
4095#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 4144#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
4096#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) 4145#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
4097#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 4146#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
4098#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 4147#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
4099#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 4148#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
4100#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) 4149#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
4101#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 4150#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
4102#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) 4151#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
4103#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 4152#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
4153#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
4154#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
4104 u8 tcp_flags; 4155 u8 tcp_flags;
4105#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) 4156#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
4106#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 4157#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
4119#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) 4170#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
4120#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 4171#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
4121 u8 ip_hlen_w; 4172 u8 ip_hlen_w;
4122 s8 reserved;
4123 __le16 total_hlen_w; 4173 __le16 total_hlen_w;
4124 __le16 tcp_pseudo_csum; 4174 __le16 tcp_pseudo_csum;
4125 __le16 lso_mss; 4175 __le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
4138 __le16 src_mac_addr_mid; 4188 __le16 src_mac_addr_mid;
4139 __le16 src_mac_addr_hi; 4189 __le16 src_mac_addr_hi;
4140 __le32 parsing_data; 4190 __le32 parsing_data;
4141#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) 4191#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
4142#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 4192#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
4143#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) 4193#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
4144#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 4194#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
4145#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) 4195#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
4146#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 4196#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
4147#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) 4197#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
4148#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 4198#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
4199#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
4200#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
4149}; 4201};
4150 4202
4151/* 4203/*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
4913 * 4965 *
4914 */ 4966 */
4915struct function_start_data { 4967struct function_start_data {
4916 __le16 function_mode; 4968 u8 function_mode;
4969 u8 reserved;
4917 __le16 sd_vlan_tag; 4970 __le16 sd_vlan_tag;
4918 __le16 vif_id; 4971 __le16 vif_id;
4919 u8 path_id; 4972 u8 path_id;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 559c396d45cc..c8f10f0e8a0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -566,7 +566,7 @@ static const struct {
566 u32 e2; /* 57712 */ 566 u32 e2; /* 57712 */
567 u32 e3; /* 578xx */ 567 u32 e3; /* 578xx */
568 } reg_mask; /* Register mask (all valid bits) */ 568 } reg_mask; /* Register mask (all valid bits) */
569 char name[7]; /* Block's longest name is 6 characters long 569 char name[8]; /* Block's longest name is 7 characters long
570 * (name + suffix) 570 * (name + suffix)
571 */ 571 */
572} bnx2x_blocks_parity_data[] = { 572} bnx2x_blocks_parity_data[] = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b046beb435b2..e2e45ee5df33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -161,120 +161,6 @@
161#define EDC_MODE_LIMITING 0x0044 161#define EDC_MODE_LIMITING 0x0044
162#define EDC_MODE_PASSIVE_DAC 0x0055 162#define EDC_MODE_PASSIVE_DAC 0x0055
163 163
164/* BRB default for class 0 E2 */
165#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
166#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
167#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
168#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
169
170/* BRB thresholds for E2*/
171#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
172#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
173
174#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
175#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
176
177#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
178#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
179
180#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
181#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
182
183/* BRB default for class 0 E3A0 */
184#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
185#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
186#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
187#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
188
189/* BRB thresholds for E3A0 */
190#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
191#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
192
193#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
194#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
195
196#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
197#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
198
199#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
200#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
201
202/* BRB default for E3B0 */
203#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
204#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
205#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
206#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
207
208/* BRB thresholds for E3B0 2 port mode*/
209#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
210#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
211
212#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
213#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
214
215#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
216#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
217
218#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
219#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
220
221/* only for E3B0*/
222#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
223#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
224
225/* Lossy +Lossless GUARANTIED == GUART */
226#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
227/* Lossless +Lossless*/
228#define PFC_E3B0_2P_PAUSE_LB_GUART 236
229/* Lossy +Lossy*/
230#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
231
232/* Lossy +Lossless*/
233#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
234/* Lossless +Lossless*/
235#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
236/* Lossy +Lossy*/
237#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
238#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
239
240#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
241#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
242
243/* BRB thresholds for E3B0 4 port mode */
244#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
245#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
246
247#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
248#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
249
250#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
251#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
252
253#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
254#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
255
256/* only for E3B0*/
257#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
258#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
259#define PFC_E3B0_4P_LB_GUART 120
260
261#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
262#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
263
264#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
265#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
266
267/* Pause defines*/
268#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
269#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
270#define DEFAULT_E3B0_LB_GUART 40
271
272#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
273#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
274
275#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
276#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
277
278/* ETS defines*/ 164/* ETS defines*/
279#define DCBX_INVALID_COS (0xFF) 165#define DCBX_INVALID_COS (0xFF)
280 166
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
321 return val; 207 return val;
322} 208}
323 209
210/*
211 * bnx2x_check_lfa - This function checks if link reinitialization is required,
212 * or link flap can be avoided.
213 *
214 * @params: link parameters
215 * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
216 * condition code.
217 */
218static int bnx2x_check_lfa(struct link_params *params)
219{
220 u32 link_status, cfg_idx, lfa_mask, cfg_size;
221 u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
222 u32 saved_val, req_val, eee_status;
223 struct bnx2x *bp = params->bp;
224
225 additional_config =
226 REG_RD(bp, params->lfa_base +
227 offsetof(struct shmem_lfa, additional_config));
228
229 /* NOTE: must be first condition checked -
230 * to verify DCC bit is cleared in any case!
231 */
232 if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
233 DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
234 REG_WR(bp, params->lfa_base +
235 offsetof(struct shmem_lfa, additional_config),
236 additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
237 return LFA_DCC_LFA_DISABLED;
238 }
239
240 /* Verify that link is up */
241 link_status = REG_RD(bp, params->shmem_base +
242 offsetof(struct shmem_region,
243 port_mb[params->port].link_status));
244 if (!(link_status & LINK_STATUS_LINK_UP))
245 return LFA_LINK_DOWN;
246
247 /* Verify that loopback mode is not set */
248 if (params->loopback_mode)
249 return LFA_LOOPBACK_ENABLED;
250
251 /* Verify that MFW supports LFA */
252 if (!params->lfa_base)
253 return LFA_MFW_IS_TOO_OLD;
254
255 if (params->num_phys == 3) {
256 cfg_size = 2;
257 lfa_mask = 0xffffffff;
258 } else {
259 cfg_size = 1;
260 lfa_mask = 0xffff;
261 }
262
263 /* Compare Duplex */
264 saved_val = REG_RD(bp, params->lfa_base +
265 offsetof(struct shmem_lfa, req_duplex));
266 req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
267 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
268 DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
269 (saved_val & lfa_mask), (req_val & lfa_mask));
270 return LFA_DUPLEX_MISMATCH;
271 }
272 /* Compare Flow Control */
273 saved_val = REG_RD(bp, params->lfa_base +
274 offsetof(struct shmem_lfa, req_flow_ctrl));
275 req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
276 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
277 DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
278 (saved_val & lfa_mask), (req_val & lfa_mask));
279 return LFA_FLOW_CTRL_MISMATCH;
280 }
281 /* Compare Link Speed */
282 saved_val = REG_RD(bp, params->lfa_base +
283 offsetof(struct shmem_lfa, req_line_speed));
284 req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
285 if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
286 DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
287 (saved_val & lfa_mask), (req_val & lfa_mask));
288 return LFA_LINK_SPEED_MISMATCH;
289 }
290
291 for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
292 cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
293 offsetof(struct shmem_lfa,
294 speed_cap_mask[cfg_idx]));
295
296 if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
297 DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
298 cur_speed_cap_mask,
299 params->speed_cap_mask[cfg_idx]);
300 return LFA_SPEED_CAP_MISMATCH;
301 }
302 }
303
304 cur_req_fc_auto_adv =
305 REG_RD(bp, params->lfa_base +
306 offsetof(struct shmem_lfa, additional_config)) &
307 REQ_FC_AUTO_ADV_MASK;
308
309 if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
310 DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
311 cur_req_fc_auto_adv, params->req_fc_auto_adv);
312 return LFA_FLOW_CTRL_MISMATCH;
313 }
314
315 eee_status = REG_RD(bp, params->shmem2_base +
316 offsetof(struct shmem2_region,
317 eee_status[params->port]));
318
319 if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
320 (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
321 ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
322 (params->eee_mode & EEE_MODE_ADV_LPI))) {
323 DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
324 eee_status);
325 return LFA_EEE_MISMATCH;
326 }
327
328 /* LFA conditions are met */
329 return 0;
330}
324/******************************************************************/ 331/******************************************************************/
325/* EPIO/GPIO section */ 332/* EPIO/GPIO section */
326/******************************************************************/ 333/******************************************************************/
@@ -1307,93 +1314,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1307} 1314}
1308 1315
1309/******************************************************************/ 1316/******************************************************************/
1310/* EEE section */
1311/******************************************************************/
1312static u8 bnx2x_eee_has_cap(struct link_params *params)
1313{
1314 struct bnx2x *bp = params->bp;
1315
1316 if (REG_RD(bp, params->shmem2_base) <=
1317 offsetof(struct shmem2_region, eee_status[params->port]))
1318 return 0;
1319
1320 return 1;
1321}
1322
1323static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
1324{
1325 switch (nvram_mode) {
1326 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
1327 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
1328 break;
1329 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
1330 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
1331 break;
1332 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
1333 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
1334 break;
1335 default:
1336 *idle_timer = 0;
1337 break;
1338 }
1339
1340 return 0;
1341}
1342
1343static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
1344{
1345 switch (idle_timer) {
1346 case EEE_MODE_NVRAM_BALANCED_TIME:
1347 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
1348 break;
1349 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
1350 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
1351 break;
1352 case EEE_MODE_NVRAM_LATENCY_TIME:
1353 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
1354 break;
1355 default:
1356 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
1357 break;
1358 }
1359
1360 return 0;
1361}
1362
1363static u32 bnx2x_eee_calc_timer(struct link_params *params)
1364{
1365 u32 eee_mode, eee_idle;
1366 struct bnx2x *bp = params->bp;
1367
1368 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
1369 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
1370 /* time value in eee_mode --> used directly*/
1371 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
1372 } else {
1373 /* hsi value in eee_mode --> time */
1374 if (bnx2x_eee_nvram_to_time(params->eee_mode &
1375 EEE_MODE_NVRAM_MASK,
1376 &eee_idle))
1377 return 0;
1378 }
1379 } else {
1380 /* hsi values in nvram --> time*/
1381 eee_mode = ((REG_RD(bp, params->shmem_base +
1382 offsetof(struct shmem_region, dev_info.
1383 port_feature_config[params->port].
1384 eee_power_mode)) &
1385 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
1386 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
1387
1388 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
1389 return 0;
1390 }
1391
1392 return eee_idle;
1393}
1394
1395
1396/******************************************************************/
1397/* PFC section */ 1317/* PFC section */
1398/******************************************************************/ 1318/******************************************************************/
1399static void bnx2x_update_pfc_xmac(struct link_params *params, 1319static void bnx2x_update_pfc_xmac(struct link_params *params,
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
1606 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); 1526 NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
1607} 1527}
1608 1528
1609static void bnx2x_umac_disable(struct link_params *params) 1529static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
1610{ 1530{
1611 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 1531 u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1532 u32 val;
1612 struct bnx2x *bp = params->bp; 1533 struct bnx2x *bp = params->bp;
1613 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & 1534 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
1614 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) 1535 (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
1615 return; 1536 return;
1616 1537 val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
1538 if (en)
1539 val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
1540 UMAC_COMMAND_CONFIG_REG_RX_ENA);
1541 else
1542 val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
1543 UMAC_COMMAND_CONFIG_REG_RX_ENA);
1617 /* Disable RX and TX */ 1544 /* Disable RX and TX */
1618 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0); 1545 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1619} 1546}
1620 1547
1621static void bnx2x_umac_enable(struct link_params *params, 1548static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
1671 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1598 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1672 udelay(50); 1599 udelay(50);
1673 1600
1601 /* Configure UMAC for EEE */
1602 if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
1603 DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
1604 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
1605 UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
1606 REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
1607 } else {
1608 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
1609 }
1610
1674 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ 1611 /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
1675 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, 1612 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
1676 ((params->mac_addr[2] << 24) | 1613 ((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1766 1703
1767} 1704}
1768 1705
1769static void bnx2x_xmac_disable(struct link_params *params) 1706static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
1770{ 1707{
1771 u8 port = params->port; 1708 u8 port = params->port;
1772 struct bnx2x *bp = params->bp; 1709 struct bnx2x *bp = params->bp;
1773 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1710 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1711 u32 val;
1774 1712
1775 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1713 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1776 MISC_REGISTERS_RESET_REG_2_XMAC) { 1714 MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
1784 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, 1722 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1785 (pfc_ctrl | (1<<1))); 1723 (pfc_ctrl | (1<<1)));
1786 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); 1724 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
1787 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); 1725 val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
1726 if (en)
1727 val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
1728 else
1729 val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1788 } 1731 }
1789} 1732}
1790 1733
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
2087 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); 2030 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
2088} 2031}
2089 2032
2090/* PFC BRB internal port configuration params */
2091struct bnx2x_pfc_brb_threshold_val {
2092 u32 pause_xoff;
2093 u32 pause_xon;
2094 u32 full_xoff;
2095 u32 full_xon;
2096};
2097
2098struct bnx2x_pfc_brb_e3b0_val {
2099 u32 per_class_guaranty_mode;
2100 u32 lb_guarantied_hyst;
2101 u32 full_lb_xoff_th;
2102 u32 full_lb_xon_threshold;
2103 u32 lb_guarantied;
2104 u32 mac_0_class_t_guarantied;
2105 u32 mac_0_class_t_guarantied_hyst;
2106 u32 mac_1_class_t_guarantied;
2107 u32 mac_1_class_t_guarantied_hyst;
2108};
2109
2110struct bnx2x_pfc_brb_th_val {
2111 struct bnx2x_pfc_brb_threshold_val pauseable_th;
2112 struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
2113 struct bnx2x_pfc_brb_threshold_val default_class0;
2114 struct bnx2x_pfc_brb_threshold_val default_class1;
2115
2116};
2117static int bnx2x_pfc_brb_get_config_params(
2118 struct link_params *params,
2119 struct bnx2x_pfc_brb_th_val *config_val)
2120{
2121 struct bnx2x *bp = params->bp;
2122 DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
2123
2124 config_val->default_class1.pause_xoff = 0;
2125 config_val->default_class1.pause_xon = 0;
2126 config_val->default_class1.full_xoff = 0;
2127 config_val->default_class1.full_xon = 0;
2128
2129 if (CHIP_IS_E2(bp)) {
2130 /* Class0 defaults */
2131 config_val->default_class0.pause_xoff =
2132 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
2133 config_val->default_class0.pause_xon =
2134 DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
2135 config_val->default_class0.full_xoff =
2136 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
2137 config_val->default_class0.full_xon =
2138 DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
2139 /* Pause able*/
2140 config_val->pauseable_th.pause_xoff =
2141 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2142 config_val->pauseable_th.pause_xon =
2143 PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
2144 config_val->pauseable_th.full_xoff =
2145 PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
2146 config_val->pauseable_th.full_xon =
2147 PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
2148 /* Non pause able*/
2149 config_val->non_pauseable_th.pause_xoff =
2150 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2151 config_val->non_pauseable_th.pause_xon =
2152 PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2153 config_val->non_pauseable_th.full_xoff =
2154 PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2155 config_val->non_pauseable_th.full_xon =
2156 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2157 } else if (CHIP_IS_E3A0(bp)) {
2158 /* Class0 defaults */
2159 config_val->default_class0.pause_xoff =
2160 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
2161 config_val->default_class0.pause_xon =
2162 DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
2163 config_val->default_class0.full_xoff =
2164 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
2165 config_val->default_class0.full_xon =
2166 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
2167 /* Pause able */
2168 config_val->pauseable_th.pause_xoff =
2169 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2170 config_val->pauseable_th.pause_xon =
2171 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
2172 config_val->pauseable_th.full_xoff =
2173 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
2174 config_val->pauseable_th.full_xon =
2175 PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
2176 /* Non pause able*/
2177 config_val->non_pauseable_th.pause_xoff =
2178 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2179 config_val->non_pauseable_th.pause_xon =
2180 PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2181 config_val->non_pauseable_th.full_xoff =
2182 PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2183 config_val->non_pauseable_th.full_xon =
2184 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2185 } else if (CHIP_IS_E3B0(bp)) {
2186 /* Class0 defaults */
2187 config_val->default_class0.pause_xoff =
2188 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
2189 config_val->default_class0.pause_xon =
2190 DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
2191 config_val->default_class0.full_xoff =
2192 DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
2193 config_val->default_class0.full_xon =
2194 DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
2195
2196 if (params->phy[INT_PHY].flags &
2197 FLAGS_4_PORT_MODE) {
2198 config_val->pauseable_th.pause_xoff =
2199 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2200 config_val->pauseable_th.pause_xon =
2201 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2202 config_val->pauseable_th.full_xoff =
2203 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2204 config_val->pauseable_th.full_xon =
2205 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
2206 /* Non pause able*/
2207 config_val->non_pauseable_th.pause_xoff =
2208 PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2209 config_val->non_pauseable_th.pause_xon =
2210 PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2211 config_val->non_pauseable_th.full_xoff =
2212 PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2213 config_val->non_pauseable_th.full_xon =
2214 PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2215 } else {
2216 config_val->pauseable_th.pause_xoff =
2217 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2218 config_val->pauseable_th.pause_xon =
2219 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
2220 config_val->pauseable_th.full_xoff =
2221 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
2222 config_val->pauseable_th.full_xon =
2223 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
2224 /* Non pause able*/
2225 config_val->non_pauseable_th.pause_xoff =
2226 PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
2227 config_val->non_pauseable_th.pause_xon =
2228 PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
2229 config_val->non_pauseable_th.full_xoff =
2230 PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
2231 config_val->non_pauseable_th.full_xon =
2232 PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2233 }
2234 } else
2235 return -EINVAL;
2236
2237 return 0;
2238}
2239
2240static void bnx2x_pfc_brb_get_e3b0_config_params(
2241 struct link_params *params,
2242 struct bnx2x_pfc_brb_e3b0_val
2243 *e3b0_val,
2244 struct bnx2x_nig_brb_pfc_port_params *pfc_params,
2245 const u8 pfc_enabled)
2246{
2247 if (pfc_enabled && pfc_params) {
2248 e3b0_val->per_class_guaranty_mode = 1;
2249 e3b0_val->lb_guarantied_hyst = 80;
2250
2251 if (params->phy[INT_PHY].flags &
2252 FLAGS_4_PORT_MODE) {
2253 e3b0_val->full_lb_xoff_th =
2254 PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
2255 e3b0_val->full_lb_xon_threshold =
2256 PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
2257 e3b0_val->lb_guarantied =
2258 PFC_E3B0_4P_LB_GUART;
2259 e3b0_val->mac_0_class_t_guarantied =
2260 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
2261 e3b0_val->mac_0_class_t_guarantied_hyst =
2262 PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
2263 e3b0_val->mac_1_class_t_guarantied =
2264 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
2265 e3b0_val->mac_1_class_t_guarantied_hyst =
2266 PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
2267 } else {
2268 e3b0_val->full_lb_xoff_th =
2269 PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
2270 e3b0_val->full_lb_xon_threshold =
2271 PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
2272 e3b0_val->mac_0_class_t_guarantied_hyst =
2273 PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
2274 e3b0_val->mac_1_class_t_guarantied =
2275 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
2276 e3b0_val->mac_1_class_t_guarantied_hyst =
2277 PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
2278
2279 if (pfc_params->cos0_pauseable !=
2280 pfc_params->cos1_pauseable) {
2281 /* Nonpauseable= Lossy + pauseable = Lossless*/
2282 e3b0_val->lb_guarantied =
2283 PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
2284 e3b0_val->mac_0_class_t_guarantied =
2285 PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
2286 } else if (pfc_params->cos0_pauseable) {
2287 /* Lossless +Lossless*/
2288 e3b0_val->lb_guarantied =
2289 PFC_E3B0_2P_PAUSE_LB_GUART;
2290 e3b0_val->mac_0_class_t_guarantied =
2291 PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
2292 } else {
2293 /* Lossy +Lossy*/
2294 e3b0_val->lb_guarantied =
2295 PFC_E3B0_2P_NON_PAUSE_LB_GUART;
2296 e3b0_val->mac_0_class_t_guarantied =
2297 PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
2298 }
2299 }
2300 } else {
2301 e3b0_val->per_class_guaranty_mode = 0;
2302 e3b0_val->lb_guarantied_hyst = 0;
2303 e3b0_val->full_lb_xoff_th =
2304 DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
2305 e3b0_val->full_lb_xon_threshold =
2306 DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
2307 e3b0_val->lb_guarantied =
2308 DEFAULT_E3B0_LB_GUART;
2309 e3b0_val->mac_0_class_t_guarantied =
2310 DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
2311 e3b0_val->mac_0_class_t_guarantied_hyst =
2312 DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
2313 e3b0_val->mac_1_class_t_guarantied =
2314 DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
2315 e3b0_val->mac_1_class_t_guarantied_hyst =
2316 DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
2317 }
2318}
2319static int bnx2x_update_pfc_brb(struct link_params *params,
2320 struct link_vars *vars,
2321 struct bnx2x_nig_brb_pfc_port_params
2322 *pfc_params)
2323{
2324 struct bnx2x *bp = params->bp;
2325 struct bnx2x_pfc_brb_th_val config_val = { {0} };
2326 struct bnx2x_pfc_brb_threshold_val *reg_th_config =
2327 &config_val.pauseable_th;
2328 struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
2329 const int set_pfc = params->feature_config_flags &
2330 FEATURE_CONFIG_PFC_ENABLED;
2331 const u8 pfc_enabled = (set_pfc && pfc_params);
2332 int bnx2x_status = 0;
2333 u8 port = params->port;
2334
2335 /* default - pause configuration */
2336 reg_th_config = &config_val.pauseable_th;
2337 bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
2338 if (bnx2x_status)
2339 return bnx2x_status;
2340
2341 if (pfc_enabled) {
2342 /* First COS */
2343 if (pfc_params->cos0_pauseable)
2344 reg_th_config = &config_val.pauseable_th;
2345 else
2346 reg_th_config = &config_val.non_pauseable_th;
2347 } else
2348 reg_th_config = &config_val.default_class0;
2349 /* The number of free blocks below which the pause signal to class 0
2350 * of MAC #n is asserted. n=0,1
2351 */
2352 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2353 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2354 reg_th_config->pause_xoff);
2355 /* The number of free blocks above which the pause signal to class 0
2356 * of MAC #n is de-asserted. n=0,1
2357 */
2358 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2359 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2360 /* The number of free blocks below which the full signal to class 0
2361 * of MAC #n is asserted. n=0,1
2362 */
2363 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2364 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2365 /* The number of free blocks above which the full signal to class 0
2366 * of MAC #n is de-asserted. n=0,1
2367 */
2368 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
2369 BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
2370
2371 if (pfc_enabled) {
2372 /* Second COS */
2373 if (pfc_params->cos1_pauseable)
2374 reg_th_config = &config_val.pauseable_th;
2375 else
2376 reg_th_config = &config_val.non_pauseable_th;
2377 } else
2378 reg_th_config = &config_val.default_class1;
2379 /* The number of free blocks below which the pause signal to
2380 * class 1 of MAC #n is asserted. n=0,1
2381 */
2382 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2383 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2384 reg_th_config->pause_xoff);
2385
2386 /* The number of free blocks above which the pause signal to
2387 * class 1 of MAC #n is de-asserted. n=0,1
2388 */
2389 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2390 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2391 reg_th_config->pause_xon);
2392 /* The number of free blocks below which the full signal to
2393 * class 1 of MAC #n is asserted. n=0,1
2394 */
2395 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2396 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2397 reg_th_config->full_xoff);
2398 /* The number of free blocks above which the full signal to
2399 * class 1 of MAC #n is de-asserted. n=0,1
2400 */
2401 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
2402 BRB1_REG_FULL_1_XON_THRESHOLD_0,
2403 reg_th_config->full_xon);
2404
2405 if (CHIP_IS_E3B0(bp)) {
2406 bnx2x_pfc_brb_get_e3b0_config_params(
2407 params,
2408 &e3b0_val,
2409 pfc_params,
2410 pfc_enabled);
2411
2412 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
2413 e3b0_val.per_class_guaranty_mode);
2414
2415 /* The hysteresis on the guarantied buffer space for the Lb
2416 * port before signaling XON.
2417 */
2418 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
2419 e3b0_val.lb_guarantied_hyst);
2420
2421 /* The number of free blocks below which the full signal to the
2422 * LB port is asserted.
2423 */
2424 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2425 e3b0_val.full_lb_xoff_th);
2426 /* The number of free blocks above which the full signal to the
2427 * LB port is de-asserted.
2428 */
2429 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2430 e3b0_val.full_lb_xon_threshold);
2431 /* The number of blocks guarantied for the MAC #n port. n=0,1
2432 */
2433
2434 /* The number of blocks guarantied for the LB port. */
2435 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2436 e3b0_val.lb_guarantied);
2437
2438 /* The number of blocks guarantied for the MAC #n port. */
2439 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2440 2 * e3b0_val.mac_0_class_t_guarantied);
2441 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2442 2 * e3b0_val.mac_1_class_t_guarantied);
2443 /* The number of blocks guarantied for class #t in MAC0. t=0,1
2444 */
2445 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2446 e3b0_val.mac_0_class_t_guarantied);
2447 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2448 e3b0_val.mac_0_class_t_guarantied);
2449 /* The hysteresis on the guarantied buffer space for class in
2450 * MAC0. t=0,1
2451 */
2452 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
2453 e3b0_val.mac_0_class_t_guarantied_hyst);
2454 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2455 e3b0_val.mac_0_class_t_guarantied_hyst);
2456
2457 /* The number of blocks guarantied for class #t in MAC1.t=0,1
2458 */
2459 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2460 e3b0_val.mac_1_class_t_guarantied);
2461 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2462 e3b0_val.mac_1_class_t_guarantied);
2463 /* The hysteresis on the guarantied buffer space for class #t
2464 * in MAC1. t=0,1
2465 */
2466 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
2467 e3b0_val.mac_1_class_t_guarantied_hyst);
2468 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
2469 e3b0_val.mac_1_class_t_guarantied_hyst);
2470 }
2471
2472 return bnx2x_status;
2473}
2474
2475/****************************************************************************** 2033/******************************************************************************
2476* Description: 2034* Description:
2477* This function is needed because NIG ARB_CREDIT_WEIGHT_X are 2035* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2529 port_mb[params->port].link_status), link_status); 2087 port_mb[params->port].link_status), link_status);
2530} 2088}
2531 2089
2532static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2533{
2534 struct bnx2x *bp = params->bp;
2535
2536 if (bnx2x_eee_has_cap(params))
2537 REG_WR(bp, params->shmem2_base +
2538 offsetof(struct shmem2_region,
2539 eee_status[params->port]), eee_status);
2540}
2541
2542static void bnx2x_update_pfc_nig(struct link_params *params, 2090static void bnx2x_update_pfc_nig(struct link_params *params,
2543 struct link_vars *vars, 2091 struct link_vars *vars,
2544 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2092 struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
2658 /* Update NIG params */ 2206 /* Update NIG params */
2659 bnx2x_update_pfc_nig(params, vars, pfc_params); 2207 bnx2x_update_pfc_nig(params, vars, pfc_params);
2660 2208
2661 /* Update BRB params */
2662 bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
2663 if (bnx2x_status)
2664 return bnx2x_status;
2665
2666 if (!vars->link_up) 2209 if (!vars->link_up)
2667 return bnx2x_status; 2210 return bnx2x_status;
2668 2211
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2827 2370
2828static int bnx2x_bmac_enable(struct link_params *params, 2371static int bnx2x_bmac_enable(struct link_params *params,
2829 struct link_vars *vars, 2372 struct link_vars *vars,
2830 u8 is_lb) 2373 u8 is_lb, u8 reset_bmac)
2831{ 2374{
2832 int rc = 0; 2375 int rc = 0;
2833 u8 port = params->port; 2376 u8 port = params->port;
2834 struct bnx2x *bp = params->bp; 2377 struct bnx2x *bp = params->bp;
2835 u32 val; 2378 u32 val;
2836 /* Reset and unreset the BigMac */ 2379 /* Reset and unreset the BigMac */
2837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2380 if (reset_bmac) {
2838 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2381 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2839 usleep_range(1000, 2000); 2382 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2383 usleep_range(1000, 2000);
2384 }
2840 2385
2841 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 2386 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2842 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2387 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
2868 return rc; 2413 return rc;
2869} 2414}
2870 2415
2871static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 2416static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
2872{ 2417{
2873 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 2418 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2874 NIG_REG_INGRESS_BMAC0_MEM; 2419 NIG_REG_INGRESS_BMAC0_MEM;
2875 u32 wb_data[2]; 2420 u32 wb_data[2];
2876 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 2421 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
2877 2422
2423 if (CHIP_IS_E2(bp))
2424 bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
2425 else
2426 bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
2878 /* Only if the bmac is out of reset */ 2427 /* Only if the bmac is out of reset */
2879 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 2428 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2880 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 2429 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
2881 nig_bmac_enable) { 2430 nig_bmac_enable) {
2882 2431 /* Clear Rx Enable bit in BMAC_CONTROL register */
2883 if (CHIP_IS_E2(bp)) { 2432 REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
2884 /* Clear Rx Enable bit in BMAC_CONTROL register */ 2433 if (en)
2885 REG_RD_DMAE(bp, bmac_addr + 2434 wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
2886 BIGMAC2_REGISTER_BMAC_CONTROL, 2435 else
2887 wb_data, 2);
2888 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2889 REG_WR_DMAE(bp, bmac_addr +
2890 BIGMAC2_REGISTER_BMAC_CONTROL,
2891 wb_data, 2);
2892 } else {
2893 /* Clear Rx Enable bit in BMAC_CONTROL register */
2894 REG_RD_DMAE(bp, bmac_addr +
2895 BIGMAC_REGISTER_BMAC_CONTROL,
2896 wb_data, 2);
2897 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 2436 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
2898 REG_WR_DMAE(bp, bmac_addr + 2437 REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
2899 BIGMAC_REGISTER_BMAC_CONTROL,
2900 wb_data, 2);
2901 }
2902 usleep_range(1000, 2000); 2438 usleep_range(1000, 2000);
2903 } 2439 }
2904} 2440}
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3233 EMAC_MDIO_STATUS_10MB); 2769 EMAC_MDIO_STATUS_10MB);
3234 return rc; 2770 return rc;
3235} 2771}
2772
2773/******************************************************************/
2774/* EEE section */
2775/******************************************************************/
2776static u8 bnx2x_eee_has_cap(struct link_params *params)
2777{
2778 struct bnx2x *bp = params->bp;
2779
2780 if (REG_RD(bp, params->shmem2_base) <=
2781 offsetof(struct shmem2_region, eee_status[params->port]))
2782 return 0;
2783
2784 return 1;
2785}
2786
2787static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
2788{
2789 switch (nvram_mode) {
2790 case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
2791 *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
2792 break;
2793 case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
2794 *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
2795 break;
2796 case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
2797 *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
2798 break;
2799 default:
2800 *idle_timer = 0;
2801 break;
2802 }
2803
2804 return 0;
2805}
2806
2807static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
2808{
2809 switch (idle_timer) {
2810 case EEE_MODE_NVRAM_BALANCED_TIME:
2811 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
2812 break;
2813 case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
2814 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
2815 break;
2816 case EEE_MODE_NVRAM_LATENCY_TIME:
2817 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
2818 break;
2819 default:
2820 *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
2821 break;
2822 }
2823
2824 return 0;
2825}
2826
2827static u32 bnx2x_eee_calc_timer(struct link_params *params)
2828{
2829 u32 eee_mode, eee_idle;
2830 struct bnx2x *bp = params->bp;
2831
2832 if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
2833 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2834 /* time value in eee_mode --> used directly*/
2835 eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
2836 } else {
2837 /* hsi value in eee_mode --> time */
2838 if (bnx2x_eee_nvram_to_time(params->eee_mode &
2839 EEE_MODE_NVRAM_MASK,
2840 &eee_idle))
2841 return 0;
2842 }
2843 } else {
2844 /* hsi values in nvram --> time*/
2845 eee_mode = ((REG_RD(bp, params->shmem_base +
2846 offsetof(struct shmem_region, dev_info.
2847 port_feature_config[params->port].
2848 eee_power_mode)) &
2849 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
2850 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
2851
2852 if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
2853 return 0;
2854 }
2855
2856 return eee_idle;
2857}
2858
2859static int bnx2x_eee_set_timers(struct link_params *params,
2860 struct link_vars *vars)
2861{
2862 u32 eee_idle = 0, eee_mode;
2863 struct bnx2x *bp = params->bp;
2864
2865 eee_idle = bnx2x_eee_calc_timer(params);
2866
2867 if (eee_idle) {
2868 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
2869 eee_idle);
2870 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
2871 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
2872 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
2873 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
2874 return -EINVAL;
2875 }
2876
2877 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
2878 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
2879 /* eee_idle in 1u --> eee_status in 16u */
2880 eee_idle >>= 4;
2881 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
2882 SHMEM_EEE_TIME_OUTPUT_BIT;
2883 } else {
2884 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
2885 return -EINVAL;
2886 vars->eee_status |= eee_mode;
2887 }
2888
2889 return 0;
2890}
2891
2892static int bnx2x_eee_initial_config(struct link_params *params,
2893 struct link_vars *vars, u8 mode)
2894{
2895 vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
2896
2897 /* Propogate params' bits --> vars (for migration exposure) */
2898 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
2899 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
2900 else
2901 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
2902
2903 if (params->eee_mode & EEE_MODE_ADV_LPI)
2904 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
2905 else
2906 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
2907
2908 return bnx2x_eee_set_timers(params, vars);
2909}
2910
2911static int bnx2x_eee_disable(struct bnx2x_phy *phy,
2912 struct link_params *params,
2913 struct link_vars *vars)
2914{
2915 struct bnx2x *bp = params->bp;
2916
2917 /* Make Certain LPI is disabled */
2918 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
2919
2920 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
2921
2922 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
2923
2924 return 0;
2925}
2926
2927static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
2928 struct link_params *params,
2929 struct link_vars *vars, u8 modes)
2930{
2931 struct bnx2x *bp = params->bp;
2932 u16 val = 0;
2933
2934 /* Mask events preventing LPI generation */
2935 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
2936
2937 if (modes & SHMEM_EEE_10G_ADV) {
2938 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
2939 val |= 0x8;
2940 }
2941 if (modes & SHMEM_EEE_1G_ADV) {
2942 DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
2943 val |= 0x4;
2944 }
2945
2946 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
2947
2948 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
2949 vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
2950
2951 return 0;
2952}
2953
2954static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
2955{
2956 struct bnx2x *bp = params->bp;
2957
2958 if (bnx2x_eee_has_cap(params))
2959 REG_WR(bp, params->shmem2_base +
2960 offsetof(struct shmem2_region,
2961 eee_status[params->port]), eee_status);
2962}
2963
2964static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
2965 struct link_params *params,
2966 struct link_vars *vars)
2967{
2968 struct bnx2x *bp = params->bp;
2969 u16 adv = 0, lp = 0;
2970 u32 lp_adv = 0;
2971 u8 neg = 0;
2972
2973 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
2974 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
2975
2976 if (lp & 0x2) {
2977 lp_adv |= SHMEM_EEE_100M_ADV;
2978 if (adv & 0x2) {
2979 if (vars->line_speed == SPEED_100)
2980 neg = 1;
2981 DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
2982 }
2983 }
2984 if (lp & 0x14) {
2985 lp_adv |= SHMEM_EEE_1G_ADV;
2986 if (adv & 0x14) {
2987 if (vars->line_speed == SPEED_1000)
2988 neg = 1;
2989 DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
2990 }
2991 }
2992 if (lp & 0x68) {
2993 lp_adv |= SHMEM_EEE_10G_ADV;
2994 if (adv & 0x68) {
2995 if (vars->line_speed == SPEED_10000)
2996 neg = 1;
2997 DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
2998 }
2999 }
3000
3001 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
3002 vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
3003
3004 if (neg) {
3005 DP(NETIF_MSG_LINK, "EEE is active\n");
3006 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
3007 }
3008
3009}
3010
3236/******************************************************************/ 3011/******************************************************************/
3237/* BSC access functions from E3 */ 3012/* BSC access functions from E3 */
3238/******************************************************************/ 3013/******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3754 * init configuration, and set/clear SGMII flag. Internal 3529 * init configuration, and set/clear SGMII flag. Internal
3755 * phy init is done purely in phy_init stage. 3530 * phy init is done purely in phy_init stage.
3756 */ 3531 */
3532
3533static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3534 struct link_params *params)
3535{
3536 struct bnx2x *bp = params->bp;
3537
3538 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
3539 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3540 MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
3541 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3542 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3543}
3544
3757static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3758 struct link_params *params, 3546 struct link_params *params,
3759 struct link_vars *vars) { 3547 struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
4013 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3801 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4014 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); 3802 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
4015 3803
4016 /* Enable LPI pass through */ 3804 bnx2x_warpcore_set_lpi_passthrough(phy, params);
4017 DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
4018 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4019 MDIO_WC_REG_EEE_COMBO_CONTROL0,
4020 0x7c);
4021 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4022 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
4023 3805
4024 /* 10G XFI Full Duplex */ 3806 /* 10G XFI Full Duplex */
4025 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3807 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
4116 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3898 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4117 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); 3899 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
4118 3900
3901 bnx2x_warpcore_set_lpi_passthrough(phy, params);
3902
4119 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { 3903 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
4120 /* SGMII Autoneg */ 3904 /* SGMII Autoneg */
4121 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3905 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4409 "serdes_net_if = 0x%x\n", 4193 "serdes_net_if = 0x%x\n",
4410 vars->line_speed, serdes_net_if); 4194 vars->line_speed, serdes_net_if);
4411 bnx2x_set_aer_mmd(params, phy); 4195 bnx2x_set_aer_mmd(params, phy);
4412 4196 bnx2x_warpcore_reset_lane(bp, phy, 1);
4413 vars->phy_flags |= PHY_XGXS_FLAG; 4197 vars->phy_flags |= PHY_XGXS_FLAG;
4414 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || 4198 if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
4415 (phy->req_line_speed && 4199 (phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
4718 vars->link_status = REG_RD(bp, params->shmem_base + 4502 vars->link_status = REG_RD(bp, params->shmem_base +
4719 offsetof(struct shmem_region, 4503 offsetof(struct shmem_region,
4720 port_mb[port].link_status)); 4504 port_mb[port].link_status));
4505 if (bnx2x_eee_has_cap(params))
4506 vars->eee_status = REG_RD(bp, params->shmem2_base +
4507 offsetof(struct shmem2_region,
4508 eee_status[params->port]));
4721 4509
4722 vars->phy_flags = PHY_XGXS_FLAG; 4510 vars->phy_flags = PHY_XGXS_FLAG;
4723 bnx2x_sync_link(params, vars); 4511 bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
6530 usleep_range(10000, 20000); 6318 usleep_range(10000, 20000);
6531 /* Reset BigMac/Xmac */ 6319 /* Reset BigMac/Xmac */
6532 if (CHIP_IS_E1x(bp) || 6320 if (CHIP_IS_E1x(bp) ||
6533 CHIP_IS_E2(bp)) { 6321 CHIP_IS_E2(bp))
6534 bnx2x_bmac_rx_disable(bp, params->port); 6322 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
6535 REG_WR(bp, GRCBASE_MISC + 6323
6536 MISC_REGISTERS_RESET_REG_2_CLEAR,
6537 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6538 }
6539 if (CHIP_IS_E3(bp)) { 6324 if (CHIP_IS_E3(bp)) {
6540 /* Prevent LPI Generation by chip */ 6325 /* Prevent LPI Generation by chip */
6541 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 6326 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
6542 0); 6327 0);
6543 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
6544 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), 6328 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
6545 0); 6329 0);
6546 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | 6330 vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
6547 SHMEM_EEE_ACTIVE_BIT); 6331 SHMEM_EEE_ACTIVE_BIT);
6548 6332
6549 bnx2x_update_mng_eee(params, vars->eee_status); 6333 bnx2x_update_mng_eee(params, vars->eee_status);
6550 bnx2x_xmac_disable(params); 6334 bnx2x_set_xmac_rxtx(params, 0);
6551 bnx2x_umac_disable(params); 6335 bnx2x_set_umac_rxtx(params, 0);
6552 } 6336 }
6553 6337
6554 return 0; 6338 return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
6600 if ((CHIP_IS_E1x(bp) || 6384 if ((CHIP_IS_E1x(bp) ||
6601 CHIP_IS_E2(bp))) { 6385 CHIP_IS_E2(bp))) {
6602 if (link_10g) { 6386 if (link_10g) {
6603 if (bnx2x_bmac_enable(params, vars, 0) == 6387 if (bnx2x_bmac_enable(params, vars, 0, 1) ==
6604 -ESRCH) { 6388 -ESRCH) {
6605 DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); 6389 DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
6606 vars->link_up = 0; 6390 vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
7207 msleep(500); 6991 msleep(500);
7208} 6992}
7209 6993
6994static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
6995 struct link_params *params,
6996 u32 action)
6997{
6998 struct bnx2x *bp = params->bp;
6999 switch (action) {
7000 case PHY_INIT:
7001 /* Enable LASI */
7002 bnx2x_cl45_write(bp, phy,
7003 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7004 bnx2x_cl45_write(bp, phy,
7005 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
7006 break;
7007 }
7008}
7009
7210static int bnx2x_8073_config_init(struct bnx2x_phy *phy, 7010static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7211 struct link_params *params, 7011 struct link_params *params,
7212 struct link_vars *vars) 7012 struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7227 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 7027 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
7228 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 7028 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
7229 7029
7230 /* Enable LASI */ 7030 bnx2x_8073_specific_func(phy, params, PHY_INIT);
7231 bnx2x_cl45_write(bp, phy,
7232 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
7233 bnx2x_cl45_write(bp, phy,
7234 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
7235
7236 bnx2x_8073_set_pause_cl37(params, phy, vars); 7031 bnx2x_8073_set_pause_cl37(params, phy, vars);
7237 7032
7238 bnx2x_cl45_read(bp, phy, 7033 bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8267 u32 action) 8062 u32 action)
8268{ 8063{
8269 struct bnx2x *bp = params->bp; 8064 struct bnx2x *bp = params->bp;
8270 8065 u16 val;
8271 switch (action) { 8066 switch (action) {
8272 case DISABLE_TX: 8067 case DISABLE_TX:
8273 bnx2x_sfp_set_transmitter(params, phy, 0); 8068 bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8276 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 8071 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
8277 bnx2x_sfp_set_transmitter(params, phy, 1); 8072 bnx2x_sfp_set_transmitter(params, phy, 1);
8278 break; 8073 break;
8074 case PHY_INIT:
8075 bnx2x_cl45_write(bp, phy,
8076 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
8077 (1<<2) | (1<<5));
8078 bnx2x_cl45_write(bp, phy,
8079 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
8080 0);
8081 bnx2x_cl45_write(bp, phy,
8082 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
8083 /* Make MOD_ABS give interrupt on change */
8084 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
8085 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8086 &val);
8087 val |= (1<<12);
8088 if (phy->flags & FLAGS_NOC)
8089 val |= (3<<5);
8090 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
8091 * status which reflect SFP+ module over-current
8092 */
8093 if (!(phy->flags & FLAGS_NOC))
8094 val &= 0xff8f; /* Reset bits 4-6 */
8095 bnx2x_cl45_write(bp, phy,
8096 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8097 val);
8098
8099 /* Set 2-wire transfer rate of SFP+ module EEPROM
8100 * to 100Khz since some DACs(direct attached cables) do
8101 * not work at 400Khz.
8102 */
8103 bnx2x_cl45_write(bp, phy,
8104 MDIO_PMA_DEVAD,
8105 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
8106 0xa001);
8107 break;
8279 default: 8108 default:
8280 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 8109 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
8281 action); 8110 action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9058 struct link_vars *vars) 8887 struct link_vars *vars)
9059{ 8888{
9060 u32 tx_en_mode; 8889 u32 tx_en_mode;
9061 u16 tmp1, val, mod_abs, tmp2; 8890 u16 tmp1, mod_abs, tmp2;
9062 u16 rx_alarm_ctrl_val;
9063 u16 lasi_ctrl_val;
9064 struct bnx2x *bp = params->bp; 8891 struct bnx2x *bp = params->bp;
9065 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 8892 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
9066 8893
9067 bnx2x_wait_reset_complete(bp, phy, params); 8894 bnx2x_wait_reset_complete(bp, phy, params);
9068 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
9069 /* Should be 0x6 to enable XS on Tx side. */
9070 lasi_ctrl_val = 0x0006;
9071 8895
9072 DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); 8896 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
9073 /* Enable LASI */
9074 bnx2x_cl45_write(bp, phy,
9075 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
9076 rx_alarm_ctrl_val);
9077 bnx2x_cl45_write(bp, phy,
9078 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
9079 0);
9080 bnx2x_cl45_write(bp, phy,
9081 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
9082 8897
8898 bnx2x_8727_specific_func(phy, params, PHY_INIT);
9083 /* Initially configure MOD_ABS to interrupt when module is 8899 /* Initially configure MOD_ABS to interrupt when module is
9084 * presence( bit 8) 8900 * presence( bit 8)
9085 */ 8901 */
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9095 bnx2x_cl45_write(bp, phy, 8911 bnx2x_cl45_write(bp, phy,
9096 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 8912 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9097 8913
9098
9099 /* Enable/Disable PHY transmitter output */ 8914 /* Enable/Disable PHY transmitter output */
9100 bnx2x_set_disable_pmd_transmit(params, phy, 0); 8915 bnx2x_set_disable_pmd_transmit(params, phy, 0);
9101 8916
9102 /* Make MOD_ABS give interrupt on change */
9103 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
9104 &val);
9105 val |= (1<<12);
9106 if (phy->flags & FLAGS_NOC)
9107 val |= (3<<5);
9108
9109 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9110 * status which reflect SFP+ module over-current
9111 */
9112 if (!(phy->flags & FLAGS_NOC))
9113 val &= 0xff8f; /* Reset bits 4-6 */
9114 bnx2x_cl45_write(bp, phy,
9115 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
9116
9117 bnx2x_8727_power_module(bp, phy, 1); 8917 bnx2x_8727_power_module(bp, phy, 1);
9118 8918
9119 bnx2x_cl45_read(bp, phy, 8919 bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9123 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); 8923 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
9124 8924
9125 bnx2x_8727_config_speed(phy, params); 8925 bnx2x_8727_config_speed(phy, params);
9126 /* Set 2-wire transfer rate of SFP+ module EEPROM 8926
9127 * to 100Khz since some DACs(direct attached cables) do
9128 * not work at 400Khz.
9129 */
9130 bnx2x_cl45_write(bp, phy,
9131 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
9132 0xa001);
9133 8927
9134 /* Set TX PreEmphasis if needed */ 8928 /* Set TX PreEmphasis if needed */
9135 if ((params->feature_config_flags & 8929 if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9558 0xFFFB, 0xFFFD); 9352 0xFFFB, 0xFFFD);
9559} 9353}
9560 9354
9355static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9356 struct link_params *params,
9357 u32 action)
9358{
9359 struct bnx2x *bp = params->bp;
9360 switch (action) {
9361 case PHY_INIT:
9362 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9363 /* Save spirom version */
9364 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9365 }
9366 /* This phy uses the NIG latch mechanism since link indication
9367 * arrives through its LED4 and not via its LASI signal, so we
9368 * get steady signal instead of clear on read
9369 */
9370 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
9371 1 << NIG_LATCH_BC_ENABLE_MI_INT);
9372
9373 bnx2x_848xx_set_led(bp, phy);
9374 break;
9375 }
9376}
9377
9561static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, 9378static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9562 struct link_params *params, 9379 struct link_params *params,
9563 struct link_vars *vars) 9380 struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9565 struct bnx2x *bp = params->bp; 9382 struct bnx2x *bp = params->bp;
9566 u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; 9383 u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
9567 9384
9568 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9385 bnx2x_848xx_specific_func(phy, params, PHY_INIT);
9569 /* Save spirom version */
9570 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9571 }
9572 /* This phy uses the NIG latch mechanism since link indication
9573 * arrives through its LED4 and not via its LASI signal, so we
9574 * get steady signal instead of clear on read
9575 */
9576 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
9577 1 << NIG_LATCH_BC_ENABLE_MI_INT);
9578
9579 bnx2x_cl45_write(bp, phy, 9386 bnx2x_cl45_write(bp, phy,
9580 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); 9387 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
9581 9388
9582 bnx2x_848xx_set_led(bp, phy);
9583
9584 /* set 1000 speed advertisement */ 9389 /* set 1000 speed advertisement */
9585 bnx2x_cl45_read(bp, phy, 9390 bnx2x_cl45_read(bp, phy,
9586 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9391 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
9887 return 0; 9692 return 0;
9888} 9693}
9889 9694
9890static int bnx2x_8483x_eee_timers(struct link_params *params,
9891 struct link_vars *vars)
9892{
9893 u32 eee_idle = 0, eee_mode;
9894 struct bnx2x *bp = params->bp;
9895
9896 eee_idle = bnx2x_eee_calc_timer(params);
9897
9898 if (eee_idle) {
9899 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
9900 eee_idle);
9901 } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
9902 (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
9903 (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
9904 DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
9905 return -EINVAL;
9906 }
9907
9908 vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
9909 if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
9910 /* eee_idle in 1u --> eee_status in 16u */
9911 eee_idle >>= 4;
9912 vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
9913 SHMEM_EEE_TIME_OUTPUT_BIT;
9914 } else {
9915 if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
9916 return -EINVAL;
9917 vars->eee_status |= eee_mode;
9918 }
9919
9920 return 0;
9921}
9922
9923static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, 9695static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9924 struct link_params *params, 9696 struct link_params *params,
9925 struct link_vars *vars) 9697 struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9930 9702
9931 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 9703 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
9932 9704
9933 /* Make Certain LPI is disabled */
9934 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
9935 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
9936
9937 /* Prevent Phy from working in EEE and advertising it */ 9705 /* Prevent Phy from working in EEE and advertising it */
9938 rc = bnx2x_84833_cmd_hdlr(phy, params, 9706 rc = bnx2x_84833_cmd_hdlr(phy, params,
9939 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 9707 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
9942 return rc; 9710 return rc;
9943 } 9711 }
9944 9712
9945 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0); 9713 return bnx2x_eee_disable(phy, params, vars);
9946 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9947
9948 return 0;
9949} 9714}
9950 9715
9951static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, 9716static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9956 struct bnx2x *bp = params->bp; 9721 struct bnx2x *bp = params->bp;
9957 u16 cmd_args = 1; 9722 u16 cmd_args = 1;
9958 9723
9959 DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
9960
9961 rc = bnx2x_84833_cmd_hdlr(phy, params, 9724 rc = bnx2x_84833_cmd_hdlr(phy, params,
9962 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); 9725 PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
9963 if (rc) { 9726 if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
9965 return rc; 9728 return rc;
9966 } 9729 }
9967 9730
9968 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8); 9731 return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
9969
9970 /* Mask events preventing LPI generation */
9971 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
9972
9973 vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
9974 vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
9975
9976 return 0;
9977} 9732}
9978 9733
9979#define PHY84833_CONSTANT_LATENCY 1193 9734#define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10105 MDIO_84833_TOP_CFG_FW_REV, &val); 9860 MDIO_84833_TOP_CFG_FW_REV, &val);
10106 9861
10107 /* Configure EEE support */ 9862 /* Configure EEE support */
10108 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) { 9863 if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
10109 phy->flags |= FLAGS_EEE_10GBT; 9864 (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
10110 vars->eee_status |= SHMEM_EEE_10G_ADV << 9865 bnx2x_eee_has_cap(params)) {
10111 SHMEM_EEE_SUPPORTED_SHIFT; 9866 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
10112 /* Propogate params' bits --> vars (for migration exposure) */
10113 if (params->eee_mode & EEE_MODE_ENABLE_LPI)
10114 vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
10115 else
10116 vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
10117
10118 if (params->eee_mode & EEE_MODE_ADV_LPI)
10119 vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
10120 else
10121 vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
10122
10123 rc = bnx2x_8483x_eee_timers(params, vars);
10124 if (rc) { 9867 if (rc) {
10125 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); 9868 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10126 bnx2x_8483x_disable_eee(phy, params, vars); 9869 bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10139 return rc; 9882 return rc;
10140 } 9883 }
10141 } else { 9884 } else {
10142 phy->flags &= ~FLAGS_EEE_10GBT;
10143 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 9885 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10144 } 9886 }
10145 9887
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10278 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10020 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10279 10021
10280 /* Determine if EEE was negotiated */ 10022 /* Determine if EEE was negotiated */
10281 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10023 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10282 u32 eee_shmem = 0; 10024 bnx2x_eee_an_resolve(phy, params, vars);
10283
10284 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10285 MDIO_AN_REG_EEE_ADV, &val1);
10286 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10287 MDIO_AN_REG_LP_EEE_ADV, &val2);
10288 if ((val1 & val2) & 0x8) {
10289 DP(NETIF_MSG_LINK, "EEE negotiated\n");
10290 vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
10291 }
10292
10293 if (val2 & 0x12)
10294 eee_shmem |= SHMEM_EEE_100M_ADV;
10295 if (val2 & 0x4)
10296 eee_shmem |= SHMEM_EEE_1G_ADV;
10297 if (val2 & 0x68)
10298 eee_shmem |= SHMEM_EEE_10G_ADV;
10299
10300 vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
10301 vars->eee_status |= (eee_shmem <<
10302 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
10303 }
10304 } 10025 }
10305 10026
10306 return link_up; 10027 return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10569/******************************************************************/ 10290/******************************************************************/
10570/* 54618SE PHY SECTION */ 10291/* 54618SE PHY SECTION */
10571/******************************************************************/ 10292/******************************************************************/
10293static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
10294 struct link_params *params,
10295 u32 action)
10296{
10297 struct bnx2x *bp = params->bp;
10298 u16 temp;
10299 switch (action) {
10300 case PHY_INIT:
10301 /* Configure LED4: set to INTR (0x6). */
10302 /* Accessing shadow register 0xe. */
10303 bnx2x_cl22_write(bp, phy,
10304 MDIO_REG_GPHY_SHADOW,
10305 MDIO_REG_GPHY_SHADOW_LED_SEL2);
10306 bnx2x_cl22_read(bp, phy,
10307 MDIO_REG_GPHY_SHADOW,
10308 &temp);
10309 temp &= ~(0xf << 4);
10310 temp |= (0x6 << 4);
10311 bnx2x_cl22_write(bp, phy,
10312 MDIO_REG_GPHY_SHADOW,
10313 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10314 /* Configure INTR based on link status change. */
10315 bnx2x_cl22_write(bp, phy,
10316 MDIO_REG_INTR_MASK,
10317 ~MDIO_REG_INTR_MASK_LINK_STATUS);
10318 break;
10319 }
10320}
10321
10572static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 10322static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10573 struct link_params *params, 10323 struct link_params *params,
10574 struct link_vars *vars) 10324 struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10606 /* Wait for GPHY to reset */ 10356 /* Wait for GPHY to reset */
10607 msleep(50); 10357 msleep(50);
10608 10358
10609 /* Configure LED4: set to INTR (0x6). */
10610 /* Accessing shadow register 0xe. */
10611 bnx2x_cl22_write(bp, phy,
10612 MDIO_REG_GPHY_SHADOW,
10613 MDIO_REG_GPHY_SHADOW_LED_SEL2);
10614 bnx2x_cl22_read(bp, phy,
10615 MDIO_REG_GPHY_SHADOW,
10616 &temp);
10617 temp &= ~(0xf << 4);
10618 temp |= (0x6 << 4);
10619 bnx2x_cl22_write(bp, phy,
10620 MDIO_REG_GPHY_SHADOW,
10621 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10622 /* Configure INTR based on link status change. */
10623 bnx2x_cl22_write(bp, phy,
10624 MDIO_REG_INTR_MASK,
10625 ~MDIO_REG_INTR_MASK_LINK_STATUS);
10626 10359
10360 bnx2x_54618se_specific_func(phy, params, PHY_INIT);
10627 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ 10361 /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
10628 bnx2x_cl22_write(bp, phy, 10362 bnx2x_cl22_write(bp, phy,
10629 MDIO_REG_GPHY_SHADOW, 10363 MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10728 DP(NETIF_MSG_LINK, "Setting 10M force\n"); 10462 DP(NETIF_MSG_LINK, "Setting 10M force\n");
10729 } 10463 }
10730 10464
10731 /* Check if we should turn on Auto-GrEEEn */ 10465 if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
10732 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); 10466 int rc;
10733 if (temp == MDIO_REG_GPHY_ID_54618SE) { 10467
10734 if (params->feature_config_flags & 10468 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
10735 FEATURE_CONFIG_AUTOGREEEN_ENABLED) { 10469 MDIO_REG_GPHY_EXP_ACCESS_TOP |
10736 temp = 6; 10470 MDIO_REG_GPHY_EXP_TOP_2K_BUF);
10737 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); 10471 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
10472 temp &= 0xfffe;
10473 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
10474
10475 rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
10476 if (rc) {
10477 DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
10478 bnx2x_eee_disable(phy, params, vars);
10479 } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
10480 (phy->req_duplex == DUPLEX_FULL) &&
10481 (bnx2x_eee_calc_timer(params) ||
10482 !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
10483 /* Need to advertise EEE only when requested,
10484 * and either no LPI assertion was requested,
10485 * or it was requested and a valid timer was set.
10486 * Also notice full duplex is required for EEE.
10487 */
10488 bnx2x_eee_advertise(phy, params, vars,
10489 SHMEM_EEE_1G_ADV);
10738 } else { 10490 } else {
10739 temp = 0; 10491 DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
10740 DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); 10492 bnx2x_eee_disable(phy, params, vars);
10493 }
10494 } else {
10495 vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
10496 SHMEM_EEE_SUPPORTED_SHIFT;
10497
10498 if (phy->flags & FLAGS_EEE) {
10499 /* Handle legacy auto-grEEEn */
10500 if (params->feature_config_flags &
10501 FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
10502 temp = 6;
10503 DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
10504 } else {
10505 temp = 0;
10506 DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
10507 }
10508 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10509 MDIO_AN_REG_EEE_ADV, temp);
10741 } 10510 }
10742 bnx2x_cl22_write(bp, phy,
10743 MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
10744 bnx2x_cl22_write(bp, phy,
10745 MDIO_REG_GPHY_CL45_DATA_REG,
10746 MDIO_REG_GPHY_EEE_ADV);
10747 bnx2x_cl22_write(bp, phy,
10748 MDIO_REG_GPHY_CL45_ADDR_REG,
10749 (0x1 << 14) | MDIO_AN_DEVAD);
10750 bnx2x_cl22_write(bp, phy,
10751 MDIO_REG_GPHY_CL45_DATA_REG,
10752 temp);
10753 } 10511 }
10754 10512
10755 bnx2x_cl22_write(bp, phy, 10513 bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10896 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", 10654 DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
10897 vars->line_speed); 10655 vars->line_speed);
10898 10656
10899 /* Report whether EEE is resolved. */
10900 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
10901 if (val == MDIO_REG_GPHY_ID_54618SE) {
10902 if (vars->link_status &
10903 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
10904 val = 0;
10905 else {
10906 bnx2x_cl22_write(bp, phy,
10907 MDIO_REG_GPHY_CL45_ADDR_REG,
10908 MDIO_AN_DEVAD);
10909 bnx2x_cl22_write(bp, phy,
10910 MDIO_REG_GPHY_CL45_DATA_REG,
10911 MDIO_REG_GPHY_EEE_RESOLVED);
10912 bnx2x_cl22_write(bp, phy,
10913 MDIO_REG_GPHY_CL45_ADDR_REG,
10914 (0x1 << 14) | MDIO_AN_DEVAD);
10915 bnx2x_cl22_read(bp, phy,
10916 MDIO_REG_GPHY_CL45_DATA_REG,
10917 &val);
10918 }
10919 DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
10920 }
10921
10922 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10657 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10923 10658
10924 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 10659 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10948 if (val & (1<<11)) 10683 if (val & (1<<11))
10949 vars->link_status |= 10684 vars->link_status |=
10950 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; 10685 LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
10686
10687 if ((phy->flags & FLAGS_EEE) &&
10688 bnx2x_eee_has_cap(params))
10689 bnx2x_eee_an_resolve(phy, params, vars);
10951 } 10690 }
10952 } 10691 }
10953 return link_up; 10692 return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
11353 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, 11092 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
11354 .hw_reset = (hw_reset_t)NULL, 11093 .hw_reset = (hw_reset_t)NULL,
11355 .set_link_led = (set_link_led_t)NULL, 11094 .set_link_led = (set_link_led_t)NULL,
11356 .phy_specific_func = (phy_specific_func_t)NULL 11095 .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
11357}; 11096};
11358static struct bnx2x_phy phy_8705 = { 11097static struct bnx2x_phy phy_8705 = {
11359 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, 11098 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
11546 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11285 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
11547 .hw_reset = (hw_reset_t)NULL, 11286 .hw_reset = (hw_reset_t)NULL,
11548 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11287 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11549 .phy_specific_func = (phy_specific_func_t)NULL 11288 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11550}; 11289};
11551 11290
11552static struct bnx2x_phy phy_84833 = { 11291static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
11555 .def_md_devad = 0, 11294 .def_md_devad = 0,
11556 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 11295 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11557 FLAGS_REARM_LATCH_SIGNAL | 11296 FLAGS_REARM_LATCH_SIGNAL |
11558 FLAGS_TX_ERROR_CHECK | 11297 FLAGS_TX_ERROR_CHECK),
11559 FLAGS_EEE_10GBT),
11560 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11298 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11561 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11299 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11562 .mdio_ctrl = 0, 11300 .mdio_ctrl = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
11582 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 11320 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
11583 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 11321 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11584 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 11322 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11585 .phy_specific_func = (phy_specific_func_t)NULL 11323 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11586}; 11324};
11587 11325
11588static struct bnx2x_phy phy_54618se = { 11326static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
11616 .format_fw_ver = (format_fw_ver_t)NULL, 11354 .format_fw_ver = (format_fw_ver_t)NULL,
11617 .hw_reset = (hw_reset_t)NULL, 11355 .hw_reset = (hw_reset_t)NULL,
11618 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, 11356 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
11619 .phy_specific_func = (phy_specific_func_t)NULL 11357 .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
11620}; 11358};
11621/*****************************************************************/ 11359/*****************************************************************/
11622/* */ 11360/* */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: 11600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
11863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: 11601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
11864 *phy = phy_54618se; 11602 *phy = phy_54618se;
11603 if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
11604 phy->flags |= FLAGS_EEE;
11865 break; 11605 break;
11866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 11606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
11867 *phy = phy_7101; 11607 *phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
12141 bnx2x_xgxs_deassert(params); 11881 bnx2x_xgxs_deassert(params);
12142 11882
12143 /* set bmac loopback */ 11883 /* set bmac loopback */
12144 bnx2x_bmac_enable(params, vars, 1); 11884 bnx2x_bmac_enable(params, vars, 1, 1);
12145 11885
12146 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 11886 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12147} 11887}
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12233 if (USES_WARPCORE(bp)) 11973 if (USES_WARPCORE(bp))
12234 bnx2x_xmac_enable(params, vars, 0); 11974 bnx2x_xmac_enable(params, vars, 0);
12235 else 11975 else
12236 bnx2x_bmac_enable(params, vars, 0); 11976 bnx2x_bmac_enable(params, vars, 0, 1);
12237 } 11977 }
12238 11978
12239 if (params->loopback_mode == LOOPBACK_XGXS) { 11979 if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12258 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 11998 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
12259} 11999}
12260 12000
12001static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
12002{
12003 struct bnx2x *bp = params->bp;
12004 u8 val = en * 0x1F;
12005
12006 /* Open the gate between the NIG to the BRB */
12007 if (!CHIP_IS_E1x(bp))
12008 val |= en * 0x20;
12009 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
12010
12011 if (!CHIP_IS_E1(bp)) {
12012 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
12013 en*0x3);
12014 }
12015
12016 REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
12017 NIG_REG_LLH0_BRB1_NOT_MCP), en);
12018}
12019static int bnx2x_avoid_link_flap(struct link_params *params,
12020 struct link_vars *vars)
12021{
12022 u32 phy_idx;
12023 u32 dont_clear_stat, lfa_sts;
12024 struct bnx2x *bp = params->bp;
12025
12026 /* Sync the link parameters */
12027 bnx2x_link_status_update(params, vars);
12028
12029 /*
12030 * The module verification was already done by previous link owner,
12031 * so this call is meant only to get warning message
12032 */
12033
12034 for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
12035 struct bnx2x_phy *phy = &params->phy[phy_idx];
12036 if (phy->phy_specific_func) {
12037 DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
12038 phy->phy_specific_func(phy, params, PHY_INIT);
12039 }
12040 if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
12041 (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
12042 (phy->media_type == ETH_PHY_DA_TWINAX))
12043 bnx2x_verify_sfp_module(phy, params);
12044 }
12045 lfa_sts = REG_RD(bp, params->lfa_base +
12046 offsetof(struct shmem_lfa,
12047 lfa_sts));
12048
12049 dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
12050
12051 /* Re-enable the NIG/MAC */
12052 if (CHIP_IS_E3(bp)) {
12053 if (!dont_clear_stat) {
12054 REG_WR(bp, GRCBASE_MISC +
12055 MISC_REGISTERS_RESET_REG_2_CLEAR,
12056 (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
12057 params->port));
12058 REG_WR(bp, GRCBASE_MISC +
12059 MISC_REGISTERS_RESET_REG_2_SET,
12060 (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
12061 params->port));
12062 }
12063 if (vars->line_speed < SPEED_10000)
12064 bnx2x_umac_enable(params, vars, 0);
12065 else
12066 bnx2x_xmac_enable(params, vars, 0);
12067 } else {
12068 if (vars->line_speed < SPEED_10000)
12069 bnx2x_emac_enable(params, vars, 0);
12070 else
12071 bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
12072 }
12073
12074 /* Increment LFA count */
12075 lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
12076 (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
12077 LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
12078 << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
12079 /* Clear link flap reason */
12080 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
12081
12082 REG_WR(bp, params->lfa_base +
12083 offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
12084
12085 /* Disable NIG DRAIN */
12086 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12087
12088 /* Enable interrupts */
12089 bnx2x_link_int_enable(params);
12090 return 0;
12091}
12092
12093static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
12094 struct link_vars *vars,
12095 int lfa_status)
12096{
12097 u32 lfa_sts, cfg_idx, tmp_val;
12098 struct bnx2x *bp = params->bp;
12099
12100 bnx2x_link_reset(params, vars, 1);
12101
12102 if (!params->lfa_base)
12103 return;
12104 /* Store the new link parameters */
12105 REG_WR(bp, params->lfa_base +
12106 offsetof(struct shmem_lfa, req_duplex),
12107 params->req_duplex[0] | (params->req_duplex[1] << 16));
12108
12109 REG_WR(bp, params->lfa_base +
12110 offsetof(struct shmem_lfa, req_flow_ctrl),
12111 params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
12112
12113 REG_WR(bp, params->lfa_base +
12114 offsetof(struct shmem_lfa, req_line_speed),
12115 params->req_line_speed[0] | (params->req_line_speed[1] << 16));
12116
12117 for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
12118 REG_WR(bp, params->lfa_base +
12119 offsetof(struct shmem_lfa,
12120 speed_cap_mask[cfg_idx]),
12121 params->speed_cap_mask[cfg_idx]);
12122 }
12123
12124 tmp_val = REG_RD(bp, params->lfa_base +
12125 offsetof(struct shmem_lfa, additional_config));
12126 tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
12127 tmp_val |= params->req_fc_auto_adv;
12128
12129 REG_WR(bp, params->lfa_base +
12130 offsetof(struct shmem_lfa, additional_config), tmp_val);
12131
12132 lfa_sts = REG_RD(bp, params->lfa_base +
12133 offsetof(struct shmem_lfa, lfa_sts));
12134
12135 /* Clear the "Don't Clear Statistics" bit, and set reason */
12136 lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
12137
12138 /* Set link flap reason */
12139 lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
12140 lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
12141 LFA_LINK_FLAP_REASON_OFFSET);
12142
12143 /* Increment link flap counter */
12144 lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
12145 (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
12146 LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
12147 << LINK_FLAP_COUNT_OFFSET));
12148 REG_WR(bp, params->lfa_base +
12149 offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
12150 /* Proceed with regular link initialization */
12151}
12152
12261int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 12153int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12262{ 12154{
12155 int lfa_status;
12263 struct bnx2x *bp = params->bp; 12156 struct bnx2x *bp = params->bp;
12264 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 12157 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
12265 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", 12158 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12274 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12167 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12275 vars->mac_type = MAC_TYPE_NONE; 12168 vars->mac_type = MAC_TYPE_NONE;
12276 vars->phy_flags = 0; 12169 vars->phy_flags = 0;
12170 /* Driver opens NIG-BRB filters */
12171 bnx2x_set_rx_filter(params, 1);
12172 /* Check if link flap can be avoided */
12173 lfa_status = bnx2x_check_lfa(params);
12174
12175 if (lfa_status == 0) {
12176 DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
12177 return bnx2x_avoid_link_flap(params, vars);
12178 }
12179
12180 DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
12181 lfa_status);
12182 bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
12277 12183
12278 /* Disable attentions */ 12184 /* Disable attentions */
12279 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 12185 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12356 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); 12262 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
12357 } 12263 }
12358 12264
12359 /* Stop BigMac rx */ 12265 if (!CHIP_IS_E3(bp)) {
12360 if (!CHIP_IS_E3(bp)) 12266 bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
12361 bnx2x_bmac_rx_disable(bp, port); 12267 } else {
12362 else { 12268 bnx2x_set_xmac_rxtx(params, 0);
12363 bnx2x_xmac_disable(params); 12269 bnx2x_set_umac_rxtx(params, 0);
12364 bnx2x_umac_disable(params); 12270 }
12365 }
12366 /* Disable emac */ 12271 /* Disable emac */
12367 if (!CHIP_IS_E3(bp)) 12272 if (!CHIP_IS_E3(bp))
12368 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 12273 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12420 vars->phy_flags = 0; 12325 vars->phy_flags = 0;
12421 return 0; 12326 return 0;
12422} 12327}
12328int bnx2x_lfa_reset(struct link_params *params,
12329 struct link_vars *vars)
12330{
12331 struct bnx2x *bp = params->bp;
12332 vars->link_up = 0;
12333 vars->phy_flags = 0;
12334 if (!params->lfa_base)
12335 return bnx2x_link_reset(params, vars, 1);
12336 /*
12337 * Activate NIG drain so that during this time the device won't send
12338 * anything while it is unable to response.
12339 */
12340 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
12341
12342 /*
12343 * Close gracefully the gate from BMAC to NIG such that no half packets
12344 * are passed.
12345 */
12346 if (!CHIP_IS_E3(bp))
12347 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
12348
12349 if (CHIP_IS_E3(bp)) {
12350 bnx2x_set_xmac_rxtx(params, 0);
12351 bnx2x_set_umac_rxtx(params, 0);
12352 }
12353 /* Wait 10ms for the pipe to clean up*/
12354 usleep_range(10000, 20000);
12355
12356 /* Clean the NIG-BRB using the network filters in a way that will
12357 * not cut a packet in the middle.
12358 */
12359 bnx2x_set_rx_filter(params, 0);
12360
12361 /*
12362 * Re-open the gate between the BMAC and the NIG, after verifying the
12363 * gate to the BRB is closed, otherwise packets may arrive to the
12364 * firmware before driver had initialized it. The target is to achieve
12365 * minimum management protocol down time.
12366 */
12367 if (!CHIP_IS_E3(bp))
12368 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
12369
12370 if (CHIP_IS_E3(bp)) {
12371 bnx2x_set_xmac_rxtx(params, 1);
12372 bnx2x_set_umac_rxtx(params, 1);
12373 }
12374 /* Disable NIG drain */
12375 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12376 return 0;
12377}
12423 12378
12424/****************************************************************************/ 12379/****************************************************************************/
12425/* Common function */ 12380/* Common function */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 51cac8130051..9165b89a4b19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -155,7 +155,7 @@ struct bnx2x_phy {
155#define FLAGS_DUMMY_READ (1<<9) 155#define FLAGS_DUMMY_READ (1<<9)
156#define FLAGS_MDC_MDIO_WA_B0 (1<<10) 156#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
157#define FLAGS_TX_ERROR_CHECK (1<<12) 157#define FLAGS_TX_ERROR_CHECK (1<<12)
158#define FLAGS_EEE_10GBT (1<<13) 158#define FLAGS_EEE (1<<13)
159 159
160 /* preemphasis values for the rx side */ 160 /* preemphasis values for the rx side */
161 u16 rx_preemphasis[4]; 161 u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
216 phy_specific_func_t phy_specific_func; 216 phy_specific_func_t phy_specific_func;
217#define DISABLE_TX 1 217#define DISABLE_TX 1
218#define ENABLE_TX 2 218#define ENABLE_TX 2
219#define PHY_INIT 3
219}; 220};
220 221
221/* Inputs parameters to the CLC */ 222/* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
304 struct bnx2x *bp; 305 struct bnx2x *bp;
305 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when 306 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
306 req_flow_ctrl is set to AUTO */ 307 req_flow_ctrl is set to AUTO */
308 u16 rsrv1;
309 u32 lfa_base;
307}; 310};
308 311
309/* Output parameters */ 312/* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
356 to 0 */ 359 to 0 */
357int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 360int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
358 u8 reset_ext_phy); 361 u8 reset_ext_phy);
359 362int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
360/* bnx2x_link_update should be called upon link interrupt */ 363/* bnx2x_link_update should be called upon link interrupt */
361int bnx2x_link_update(struct link_params *params, struct link_vars *vars); 364int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
362 365
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e11485ca037d..f7ed122f4071 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2166,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
2166{ 2166{
2167 if (!BP_NOMCP(bp)) { 2167 if (!BP_NOMCP(bp)) {
2168 bnx2x_acquire_phy_lock(bp); 2168 bnx2x_acquire_phy_lock(bp);
2169 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2170 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2169 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2171 bnx2x_release_phy_lock(bp); 2170 bnx2x_release_phy_lock(bp);
2172 2171
@@ -2179,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2179{ 2178{
2180 if (!BP_NOMCP(bp)) { 2179 if (!BP_NOMCP(bp)) {
2181 bnx2x_acquire_phy_lock(bp); 2180 bnx2x_acquire_phy_lock(bp);
2182 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2181 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2183 bnx2x_release_phy_lock(bp); 2182 bnx2x_release_phy_lock(bp);
2184 } else 2183 } else
2185 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2184 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2186} 2185}
2187 2186
2187void bnx2x_force_link_reset(struct bnx2x *bp)
2188{
2189 bnx2x_acquire_phy_lock(bp);
2190 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2191 bnx2x_release_phy_lock(bp);
2192}
2193
2188u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2194u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2189{ 2195{
2190 u8 rc = 0; 2196 u8 rc = 0;
@@ -6751,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6751 u32 low, high; 6757 u32 low, high;
6752 u32 val; 6758 u32 val;
6753 6759
6754 bnx2x__link_reset(bp);
6755 6760
6756 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 6761 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
6757 6762
@@ -8244,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8244 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8249 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8245 * 8250 *
8246 * @bp: driver handle 8251 * @bp: driver handle
8252 * @keep_link: true iff link should be kept up
8247 */ 8253 */
8248void bnx2x_send_unload_done(struct bnx2x *bp) 8254void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8249{ 8255{
8256 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8257
8250 /* Report UNLOAD_DONE to MCP */ 8258 /* Report UNLOAD_DONE to MCP */
8251 if (!BP_NOMCP(bp)) 8259 if (!BP_NOMCP(bp))
8252 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8260 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8253} 8261}
8254 8262
8255static int bnx2x_func_wait_started(struct bnx2x *bp) 8263static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8318,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
8318 return 0; 8326 return 0;
8319} 8327}
8320 8328
8321void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 8329void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8322{ 8330{
8323 int port = BP_PORT(bp); 8331 int port = BP_PORT(bp);
8324 int i, rc = 0; 8332 int i, rc = 0;
@@ -8440,7 +8448,7 @@ unload_error:
8440 8448
8441 8449
8442 /* Report UNLOAD_DONE to MCP */ 8450 /* Report UNLOAD_DONE to MCP */
8443 bnx2x_send_unload_done(bp); 8451 bnx2x_send_unload_done(bp, keep_link);
8444} 8452}
8445 8453
8446void bnx2x_disable_close_the_gate(struct bnx2x *bp) 8454void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8852,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
8852 * driver is owner of the HW 8860 * driver is owner of the HW
8853 */ 8861 */
8854 if (!global && !BP_NOMCP(bp)) { 8862 if (!global && !BP_NOMCP(bp)) {
8855 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 8863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
8864 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
8856 if (!load_code) { 8865 if (!load_code) {
8857 BNX2X_ERR("MCP response failure, aborting\n"); 8866 BNX2X_ERR("MCP response failure, aborting\n");
8858 rc = -EAGAIN; 8867 rc = -EAGAIN;
@@ -8958,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
8958 8967
8959 /* Stop the driver */ 8968 /* Stop the driver */
8960 /* If interface has been removed - break */ 8969 /* If interface has been removed - break */
8961 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) 8970 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
8962 return; 8971 return;
8963 8972
8964 bp->recovery_state = BNX2X_RECOVERY_WAIT; 8973 bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9124,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
9124 bp->sp_rtnl_state = 0; 9133 bp->sp_rtnl_state = 0;
9125 smp_mb(); 9134 smp_mb();
9126 9135
9127 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 9136 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9128 bnx2x_nic_load(bp, LOAD_NORMAL); 9137 bnx2x_nic_load(bp, LOAD_NORMAL);
9129 9138
9130 goto sp_rtnl_exit; 9139 goto sp_rtnl_exit;
@@ -9310,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
9310 9319
9311static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9320static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
9312{ 9321{
9313 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9322 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9323 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9314 if (!rc) { 9324 if (!rc) {
9315 BNX2X_ERR("MCP response failure, aborting\n"); 9325 BNX2X_ERR("MCP response failure, aborting\n");
9316 return -EBUSY; 9326 return -EBUSY;
@@ -11000,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
11000 struct bnx2x *bp = netdev_priv(dev); 11010 struct bnx2x *bp = netdev_priv(dev);
11001 11011
11002 /* Unload the driver, release IRQs */ 11012 /* Unload the driver, release IRQs */
11003 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 11013 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11004 11014
11005 /* Power off */ 11015 /* Power off */
11006 bnx2x_set_power_state(bp, PCI_D3hot); 11016 bnx2x_set_power_state(bp, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 28a0bcfe61ff..1b1999d34c71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -4949,6 +4949,10 @@
4949#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) 4949#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
4950#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) 4950#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
4951#define UMAC_REG_COMMAND_CONFIG 0x8 4951#define UMAC_REG_COMMAND_CONFIG 0x8
4952/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
4953 * state from LPI state when it receives packet for transmission. The
4954 * decrement unit is 1 micro-second. */
4955#define UMAC_REG_EEE_WAKE_TIMER 0x6c
4952/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers 4956/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
4953 * to bit 17 of the MAC address etc. */ 4957 * to bit 17 of the MAC address etc. */
4954#define UMAC_REG_MAC_ADDR0 0xc 4958#define UMAC_REG_MAC_ADDR0 0xc
@@ -4958,6 +4962,8 @@
4958/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive 4962/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
4959 * logic to check frames. */ 4963 * logic to check frames. */
4960#define UMAC_REG_MAXFR 0x14 4964#define UMAC_REG_MAXFR 0x14
4965#define UMAC_REG_UMAC_EEE_CTRL 0x64
4966#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3)
4961/* [RW 8] The event id for aggregated interrupt 0 */ 4967/* [RW 8] The event id for aggregated interrupt 0 */
4962#define USDM_REG_AGG_INT_EVENT_0 0xc4038 4968#define USDM_REG_AGG_INT_EVENT_0 0xc4038
4963#define USDM_REG_AGG_INT_EVENT_1 0xc403c 4969#define USDM_REG_AGG_INT_EVENT_1 0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
6992/* BCM84833 only */ 6998/* BCM84833 only */
6993#define MDIO_84833_TOP_CFG_FW_REV 0x400f 6999#define MDIO_84833_TOP_CFG_FW_REV 0x400f
6994#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 7000#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
7001#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
6995#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a 7002#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
6996#define MDIO_84833_SUPER_ISOLATE 0x8000 7003#define MDIO_84833_SUPER_ISOLATE 0x8000
6997/* These are mailbox register set used by 84833. */ 7004/* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
7160#define MDIO_REG_GPHY_ID_54618SE 0x5cd5 7167#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
7161#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd 7168#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
7162#define MDIO_REG_GPHY_CL45_DATA_REG 0xe 7169#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
7163#define MDIO_REG_GPHY_EEE_ADV 0x3c
7164#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
7165#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
7166#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e 7170#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
7171#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
7172#define MDIO_REG_GPHY_EXP_ACCESS 0x17
7173#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
7174#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
7167#define MDIO_REG_GPHY_AUX_STATUS 0x19 7175#define MDIO_REG_GPHY_AUX_STATUS 0x19
7168#define MDIO_REG_INTR_STATUS 0x1a 7176#define MDIO_REG_INTR_STATUS 0x1a
7169#define MDIO_REG_INTR_MASK 0x1b 7177#define MDIO_REG_INTR_MASK 0x1b
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 62f754bd0dfe..71971a161bd1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
229 */ 229 */
230 list_add_tail(&spacer.link, &o->pending_comp); 230 list_add_tail(&spacer.link, &o->pending_comp);
231 mb(); 231 mb();
232 list_del(&elem->link); 232 list_move_tail(&elem->link, &o->pending_comp);
233 list_add_tail(&elem->link, &o->pending_comp);
234 list_del(&spacer.link); 233 list_del(&spacer.link);
235 } else 234 } else
236 break; 235 break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5620 memset(rdata, 0, sizeof(*rdata)); 5619 memset(rdata, 0, sizeof(*rdata));
5621 5620
5622 /* Fill the ramrod data with provided parameters */ 5621 /* Fill the ramrod data with provided parameters */
5623 rdata->function_mode = cpu_to_le16(start_params->mf_mode); 5622 rdata->function_mode = (u8)start_params->mf_mode;
5624 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5623 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5625 rdata->path_id = BP_PATH(bp); 5624 rdata->path_id = BP_PATH(bp);
5626 rdata->network_cos_mode = start_params->network_cos_mode; 5625 rdata->network_cos_mode = start_params->network_cos_mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a1d0446b39b3..348ed02d3c69 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
39#endif 39#endif
40} 40}
41 41
42static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) 42static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
43{ 43{
44 u16 res = sizeof(struct host_port_stats) >> 2; 44 u16 res = 0;
45 45
46 /* if PFC stats are not supported by the MFW, don't DMA them */ 46 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) 47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 res -= (sizeof(u32)*4) >> 2; 48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
49 if (size)
50 res = size;
49 51
52 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
55 }
56
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
59 */
60 if (!res) {
61 res = offsetof(struct host_port_stats, not_used) + 4;
62
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
66 pfc_frames_rx_lo) -
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
69 }
70 }
71
72 res >>= 2;
73
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
50 return res; 75 return res;
51} 76}
52 77
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3b4fc61f24cf..cc8434fd606e 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
823 } 823 }
824} 824}
825 825
826static void __cnic_free_uio(struct cnic_uio_dev *udev) 826static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
827{ 827{
828 uio_unregister_device(&udev->cnic_uinfo);
829
830 if (udev->l2_buf) { 828 if (udev->l2_buf) {
831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 829 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
832 udev->l2_buf, udev->l2_buf_map); 830 udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
839 udev->l2_ring = NULL; 837 udev->l2_ring = NULL;
840 } 838 }
841 839
840}
841
842static void __cnic_free_uio(struct cnic_uio_dev *udev)
843{
844 uio_unregister_device(&udev->cnic_uinfo);
845
846 __cnic_free_uio_rings(udev);
847
842 pci_dev_put(udev->pdev); 848 pci_dev_put(udev->pdev);
843 kfree(udev); 849 kfree(udev);
844} 850}
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
862 if (udev) { 868 if (udev) {
863 udev->dev = NULL; 869 udev->dev = NULL;
864 cp->udev = NULL; 870 cp->udev = NULL;
871 if (udev->uio_dev == -1)
872 __cnic_free_uio_rings(udev);
865 } 873 }
866 874
867 cnic_free_context(dev); 875 cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
996 return 0; 1004 return 0;
997} 1005}
998 1006
1007static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1008{
1009 struct cnic_local *cp = udev->dev->cnic_priv;
1010
1011 if (udev->l2_ring)
1012 return 0;
1013
1014 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016 &udev->l2_ring_map,
1017 GFP_KERNEL | __GFP_COMP);
1018 if (!udev->l2_ring)
1019 return -ENOMEM;
1020
1021 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1022 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1023 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1024 &udev->l2_buf_map,
1025 GFP_KERNEL | __GFP_COMP);
1026 if (!udev->l2_buf) {
1027 __cnic_free_uio_rings(udev);
1028 return -ENOMEM;
1029 }
1030
1031 return 0;
1032
1033}
1034
999static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 1035static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1000{ 1036{
1001 struct cnic_local *cp = dev->cnic_priv; 1037 struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1005 list_for_each_entry(udev, &cnic_udev_list, list) { 1041 list_for_each_entry(udev, &cnic_udev_list, list) {
1006 if (udev->pdev == dev->pcidev) { 1042 if (udev->pdev == dev->pcidev) {
1007 udev->dev = dev; 1043 udev->dev = dev;
1044 if (__cnic_alloc_uio_rings(udev, pages)) {
1045 udev->dev = NULL;
1046 read_unlock(&cnic_dev_lock);
1047 return -ENOMEM;
1048 }
1008 cp->udev = udev; 1049 cp->udev = udev;
1009 read_unlock(&cnic_dev_lock); 1050 read_unlock(&cnic_dev_lock);
1010 return 0; 1051 return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1020 1061
1021 udev->dev = dev; 1062 udev->dev = dev;
1022 udev->pdev = dev->pcidev; 1063 udev->pdev = dev->pcidev;
1023 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1024 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1025 &udev->l2_ring_map,
1026 GFP_KERNEL | __GFP_COMP);
1027 if (!udev->l2_ring)
1028 goto err_udev;
1029 1064
1030 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1065 if (__cnic_alloc_uio_rings(udev, pages))
1031 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 1066 goto err_udev;
1032 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1033 &udev->l2_buf_map,
1034 GFP_KERNEL | __GFP_COMP);
1035 if (!udev->l2_buf)
1036 goto err_dma;
1037 1067
1038 write_lock(&cnic_dev_lock); 1068 write_lock(&cnic_dev_lock);
1039 list_add(&udev->list, &cnic_udev_list); 1069 list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1044 cp->udev = udev; 1074 cp->udev = udev;
1045 1075
1046 return 0; 1076 return 0;
1047 err_dma: 1077
1048 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1049 udev->l2_ring, udev->l2_ring_map);
1050 err_udev: 1078 err_udev:
1051 kfree(udev); 1079 kfree(udev);
1052 return -ENOMEM; 1080 return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1260 if (ret) 1288 if (ret)
1261 goto error; 1289 goto error;
1262 1290
1263 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1291 if (CNIC_SUPPORTS_FCOE(cp)) {
1264 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1265 if (ret) 1293 if (ret)
1266 goto error; 1294 goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1275 if (ret) 1303 if (ret)
1276 goto error; 1304 goto error;
1277 1305
1306 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1307 return 0;
1308
1278 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1309 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1279 1310
1280 cp->l2_rx_ring_size = 15; 1311 cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3050 IGU_INT_DISABLE, 0); 3081 IGU_INT_DISABLE, 0);
3051} 3082}
3052 3083
3084static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3085{
3086 struct cnic_local *cp = dev->cnic_priv;
3087
3088 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3089 IGU_INT_ENABLE, 1);
3090}
3091
3092static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3093{
3094 struct cnic_local *cp = dev->cnic_priv;
3095
3096 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3097 IGU_INT_ENABLE, 1);
3098}
3099
3053static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 3100static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3054{ 3101{
3055 u32 last_status = *info->status_idx_ptr; 3102 u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3086 CNIC_WR16(dev, cp->kcq1.io_addr, 3133 CNIC_WR16(dev, cp->kcq1.io_addr,
3087 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3134 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3088 3135
3089 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 3136 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
3090 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 3137 cp->arm_int(dev, status_idx);
3091 status_idx, IGU_INT_ENABLE, 1);
3092 break; 3138 break;
3093 } 3139 }
3094 3140
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4845 buf_map = udev->l2_buf_map; 4891 buf_map = udev->l2_buf_map;
4846 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4892 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4847 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4893 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4894 struct eth_tx_parse_bd_e1x *pbd_e1x =
4895 &((txbd + 1)->parse_bd_e1x);
4896 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4848 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4897 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4849 4898
4850 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4899 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4854 start_bd->nbytes = cpu_to_le16(0x10); 4903 start_bd->nbytes = cpu_to_le16(0x10);
4855 start_bd->nbd = cpu_to_le16(3); 4904 start_bd->nbd = cpu_to_le16(3);
4856 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4905 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4857 start_bd->general_data = (UNICAST_ADDRESS << 4906 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4858 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4859 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4907 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4860 4908
4909 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
4910 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4911 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4912 else
4913 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4914 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4861 } 4915 }
4862 4916
4863 val = (u64) ring_map >> 32; 4917 val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
5308 /* Need to wait for the ring shutdown event to complete 5362 /* Need to wait for the ring shutdown event to complete
5309 * before clearing the CNIC_UP flag. 5363 * before clearing the CNIC_UP flag.
5310 */ 5364 */
5311 while (cp->udev->uio_dev != -1 && i < 15) { 5365 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5312 msleep(100); 5366 msleep(100);
5313 i++; 5367 i++;
5314 } 5368 }
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5473 5527
5474 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5528 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5475 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5529 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5476 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 5530 if (CNIC_SUPPORTS_FCOE(cp))
5477 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5478 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5531 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5479 5532
5480 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5533 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5492 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5545 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5493 cp->enable_int = cnic_enable_bnx2x_int; 5546 cp->enable_int = cnic_enable_bnx2x_int;
5494 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5547 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5495 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 5548 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5496 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5549 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5497 else 5550 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5551 } else {
5498 cp->ack_int = cnic_ack_bnx2x_msix; 5552 cp->ack_int = cnic_ack_bnx2x_msix;
5553 cp->arm_int = cnic_arm_bnx2x_msix;
5554 }
5499 cp->close_conn = cnic_close_bnx2x_conn; 5555 cp->close_conn = cnic_close_bnx2x_conn;
5500 return cdev; 5556 return cdev;
5501} 5557}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 30328097f516..148604c3fa0c 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -334,6 +334,7 @@ struct cnic_local {
334 void (*enable_int)(struct cnic_dev *); 334 void (*enable_int)(struct cnic_dev *);
335 void (*disable_int_sync)(struct cnic_dev *); 335 void (*disable_int_sync)(struct cnic_dev *);
336 void (*ack_int)(struct cnic_dev *); 336 void (*ack_int)(struct cnic_dev *);
337 void (*arm_int)(struct cnic_dev *, u32 index);
337 void (*close_conn)(struct cnic_sock *, u32 opcode); 338 void (*close_conn)(struct cnic_sock *, u32 opcode);
338}; 339};
339 340
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
474 MAX_STAT_COUNTER_ID_E1)) 475 MAX_STAT_COUNTER_ID_E1))
475#endif 476#endif
476 477
478#define CNIC_SUPPORTS_FCOE(cp) \
479 (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
480 !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
481
477#define CNIC_RAMROD_TMO (HZ / 4) 482#define CNIC_RAMROD_TMO (HZ / 4)
478 483
479#endif 484#endif
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 382c98b0cc0c..ede3db35d757 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
896 u32 snd_nxt; 896 u32 snd_nxt;
897 u32 rtt_seq; 897 u32 rtt_seq;
898 u32 rtt_time; 898 u32 rtt_time;
899 u32 __reserved66; 899 u32 wnd_right_edge_local;
900 u32 wnd_right_edge; 900 u32 wnd_right_edge;
901 u32 tcp_agg_vars1; 901 u32 tcp_agg_vars1;
902#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) 902#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 5cb88881bba1..865095aad1f6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.12" 17#define CNIC_MODULE_VERSION "2.5.14"
18#define CNIC_MODULE_RELDATE "June 29, 2012" 18#define CNIC_MODULE_RELDATE "Sep 30, 2012"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 388d32213937..46280ba4c5d4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,10 +44,8 @@
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
46#include <linux/firmware.h> 46#include <linux/firmware.h>
47#if IS_ENABLED(CONFIG_HWMON)
48#include <linux/hwmon.h> 47#include <linux/hwmon.h>
49#include <linux/hwmon-sysfs.h> 48#include <linux/hwmon-sysfs.h>
50#endif
51 49
52#include <net/checksum.h> 50#include <net/checksum.h>
53#include <net/ip.h> 51#include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 90
93#define DRV_MODULE_NAME "tg3" 91#define DRV_MODULE_NAME "tg3"
94#define TG3_MAJ_NUM 3 92#define TG3_MAJ_NUM 3
95#define TG3_MIN_NUM 124 93#define TG3_MIN_NUM 125
96#define DRV_MODULE_VERSION \ 94#define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98#define DRV_MODULE_RELDATE "March 21, 2012" 96#define DRV_MODULE_RELDATE "September 26, 2012"
99 97
100#define RESET_KIND_SHUTDOWN 0 98#define RESET_KIND_SHUTDOWN 0
101#define RESET_KIND_INIT 1 99#define RESET_KIND_INIT 1
@@ -6263,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6263 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 6261 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6264 6262
6265 tp->rx_refill = false; 6263 tp->rx_refill = false;
6266 for (i = 1; i < tp->irq_cnt; i++) 6264 for (i = 1; i <= tp->rxq_cnt; i++)
6267 err |= tg3_rx_prodring_xfer(tp, dpr, 6265 err |= tg3_rx_prodring_xfer(tp, dpr,
6268 &tp->napi[i].prodring); 6266 &tp->napi[i].prodring);
6269 6267
@@ -7592,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
7592 return 0; 7590 return 0;
7593} 7591}
7594 7592
7595/* 7593static void tg3_mem_tx_release(struct tg3 *tp)
7596 * Must not be invoked with interrupt sources disabled and
7597 * the hardware shutdown down.
7598 */
7599static void tg3_free_consistent(struct tg3 *tp)
7600{ 7594{
7601 int i; 7595 int i;
7602 7596
7603 for (i = 0; i < tp->irq_cnt; i++) { 7597 for (i = 0; i < tp->irq_max; i++) {
7604 struct tg3_napi *tnapi = &tp->napi[i]; 7598 struct tg3_napi *tnapi = &tp->napi[i];
7605 7599
7606 if (tnapi->tx_ring) { 7600 if (tnapi->tx_ring) {
@@ -7611,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
7611 7605
7612 kfree(tnapi->tx_buffers); 7606 kfree(tnapi->tx_buffers);
7613 tnapi->tx_buffers = NULL; 7607 tnapi->tx_buffers = NULL;
7608 }
7609}
7614 7610
7615 if (tnapi->rx_rcb) { 7611static int tg3_mem_tx_acquire(struct tg3 *tp)
7616 dma_free_coherent(&tp->pdev->dev, 7612{
7617 TG3_RX_RCB_RING_BYTES(tp), 7613 int i;
7618 tnapi->rx_rcb, 7614 struct tg3_napi *tnapi = &tp->napi[0];
7619 tnapi->rx_rcb_mapping); 7615
7620 tnapi->rx_rcb = NULL; 7616 /* If multivector TSS is enabled, vector 0 does not handle
7621 } 7617 * tx interrupts. Don't allocate any resources for it.
7618 */
7619 if (tg3_flag(tp, ENABLE_TSS))
7620 tnapi++;
7621
7622 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7623 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7624 TG3_TX_RING_SIZE, GFP_KERNEL);
7625 if (!tnapi->tx_buffers)
7626 goto err_out;
7627
7628 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7629 TG3_TX_RING_BYTES,
7630 &tnapi->tx_desc_mapping,
7631 GFP_KERNEL);
7632 if (!tnapi->tx_ring)
7633 goto err_out;
7634 }
7635
7636 return 0;
7637
7638err_out:
7639 tg3_mem_tx_release(tp);
7640 return -ENOMEM;
7641}
7642
7643static void tg3_mem_rx_release(struct tg3 *tp)
7644{
7645 int i;
7646
7647 for (i = 0; i < tp->irq_max; i++) {
7648 struct tg3_napi *tnapi = &tp->napi[i];
7622 7649
7623 tg3_rx_prodring_fini(tp, &tnapi->prodring); 7650 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7624 7651
7652 if (!tnapi->rx_rcb)
7653 continue;
7654
7655 dma_free_coherent(&tp->pdev->dev,
7656 TG3_RX_RCB_RING_BYTES(tp),
7657 tnapi->rx_rcb,
7658 tnapi->rx_rcb_mapping);
7659 tnapi->rx_rcb = NULL;
7660 }
7661}
7662
7663static int tg3_mem_rx_acquire(struct tg3 *tp)
7664{
7665 unsigned int i, limit;
7666
7667 limit = tp->rxq_cnt;
7668
7669 /* If RSS is enabled, we need a (dummy) producer ring
7670 * set on vector zero. This is the true hw prodring.
7671 */
7672 if (tg3_flag(tp, ENABLE_RSS))
7673 limit++;
7674
7675 for (i = 0; i < limit; i++) {
7676 struct tg3_napi *tnapi = &tp->napi[i];
7677
7678 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7679 goto err_out;
7680
7681 /* If multivector RSS is enabled, vector 0
7682 * does not handle rx or tx interrupts.
7683 * Don't allocate any resources for it.
7684 */
7685 if (!i && tg3_flag(tp, ENABLE_RSS))
7686 continue;
7687
7688 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7689 TG3_RX_RCB_RING_BYTES(tp),
7690 &tnapi->rx_rcb_mapping,
7691 GFP_KERNEL);
7692 if (!tnapi->rx_rcb)
7693 goto err_out;
7694
7695 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7696 }
7697
7698 return 0;
7699
7700err_out:
7701 tg3_mem_rx_release(tp);
7702 return -ENOMEM;
7703}
7704
7705/*
7706 * Must not be invoked with interrupt sources disabled and
7707 * the hardware shutdown down.
7708 */
7709static void tg3_free_consistent(struct tg3 *tp)
7710{
7711 int i;
7712
7713 for (i = 0; i < tp->irq_cnt; i++) {
7714 struct tg3_napi *tnapi = &tp->napi[i];
7715
7625 if (tnapi->hw_status) { 7716 if (tnapi->hw_status) {
7626 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, 7717 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7627 tnapi->hw_status, 7718 tnapi->hw_status,
@@ -7630,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
7630 } 7721 }
7631 } 7722 }
7632 7723
7724 tg3_mem_rx_release(tp);
7725 tg3_mem_tx_release(tp);
7726
7633 if (tp->hw_stats) { 7727 if (tp->hw_stats) {
7634 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 7728 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7635 tp->hw_stats, tp->stats_mapping); 7729 tp->hw_stats, tp->stats_mapping);
@@ -7668,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
7668 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 7762 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7669 sblk = tnapi->hw_status; 7763 sblk = tnapi->hw_status;
7670 7764
7671 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) 7765 if (tg3_flag(tp, ENABLE_RSS)) {
7672 goto err_out; 7766 u16 *prodptr = 0;
7673 7767
7674 /* If multivector TSS is enabled, vector 0 does not handle 7768 /*
7675 * tx interrupts. Don't allocate any resources for it. 7769 * When RSS is enabled, the status block format changes
7676 */ 7770 * slightly. The "rx_jumbo_consumer", "reserved",
7677 if ((!i && !tg3_flag(tp, ENABLE_TSS)) || 7771 * and "rx_mini_consumer" members get mapped to the
7678 (i && tg3_flag(tp, ENABLE_TSS))) { 7772 * other three rx return ring producer indexes.
7679 tnapi->tx_buffers = kzalloc( 7773 */
7680 sizeof(struct tg3_tx_ring_info) * 7774 switch (i) {
7681 TG3_TX_RING_SIZE, GFP_KERNEL); 7775 case 1:
7682 if (!tnapi->tx_buffers) 7776 prodptr = &sblk->idx[0].rx_producer;
7683 goto err_out; 7777 break;
7684 7778 case 2:
7685 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, 7779 prodptr = &sblk->rx_jumbo_consumer;
7686 TG3_TX_RING_BYTES, 7780 break;
7687 &tnapi->tx_desc_mapping, 7781 case 3:
7688 GFP_KERNEL); 7782 prodptr = &sblk->reserved;
7689 if (!tnapi->tx_ring) 7783 break;
7690 goto err_out; 7784 case 4:
7691 } 7785 prodptr = &sblk->rx_mini_consumer;
7692
7693 /*
7694 * When RSS is enabled, the status block format changes
7695 * slightly. The "rx_jumbo_consumer", "reserved",
7696 * and "rx_mini_consumer" members get mapped to the
7697 * other three rx return ring producer indexes.
7698 */
7699 switch (i) {
7700 default:
7701 if (tg3_flag(tp, ENABLE_RSS)) {
7702 tnapi->rx_rcb_prod_idx = NULL;
7703 break; 7786 break;
7704 } 7787 }
7705 /* Fall through */ 7788 tnapi->rx_rcb_prod_idx = prodptr;
7706 case 1: 7789 } else {
7707 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; 7790 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7708 break;
7709 case 2:
7710 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7711 break;
7712 case 3:
7713 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7714 break;
7715 case 4:
7716 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7717 break;
7718 } 7791 }
7719
7720 /*
7721 * If multivector RSS is enabled, vector 0 does not handle
7722 * rx or tx interrupts. Don't allocate any resources for it.
7723 */
7724 if (!i && tg3_flag(tp, ENABLE_RSS))
7725 continue;
7726
7727 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7728 TG3_RX_RCB_RING_BYTES(tp),
7729 &tnapi->rx_rcb_mapping,
7730 GFP_KERNEL);
7731 if (!tnapi->rx_rcb)
7732 goto err_out;
7733
7734 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7735 } 7792 }
7736 7793
7794 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7795 goto err_out;
7796
7737 return 0; 7797 return 0;
7738 7798
7739err_out: 7799err_out:
@@ -8247,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8247 nic_addr); 8307 nic_addr);
8248} 8308}
8249 8309
8250static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 8310
8311static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8251{ 8312{
8252 int i; 8313 int i = 0;
8253 8314
8254 if (!tg3_flag(tp, ENABLE_TSS)) { 8315 if (!tg3_flag(tp, ENABLE_TSS)) {
8255 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 8316 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8259,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8259 tw32(HOSTCC_TXCOL_TICKS, 0); 8320 tw32(HOSTCC_TXCOL_TICKS, 0);
8260 tw32(HOSTCC_TXMAX_FRAMES, 0); 8321 tw32(HOSTCC_TXMAX_FRAMES, 0);
8261 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 8322 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8323
8324 for (; i < tp->txq_cnt; i++) {
8325 u32 reg;
8326
8327 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8328 tw32(reg, ec->tx_coalesce_usecs);
8329 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8330 tw32(reg, ec->tx_max_coalesced_frames);
8331 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8332 tw32(reg, ec->tx_max_coalesced_frames_irq);
8333 }
8262 } 8334 }
8263 8335
8336 for (; i < tp->irq_max - 1; i++) {
8337 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8338 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8339 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8340 }
8341}
8342
8343static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8344{
8345 int i = 0;
8346 u32 limit = tp->rxq_cnt;
8347
8264 if (!tg3_flag(tp, ENABLE_RSS)) { 8348 if (!tg3_flag(tp, ENABLE_RSS)) {
8265 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 8349 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8266 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 8350 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8267 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 8351 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8352 limit--;
8268 } else { 8353 } else {
8269 tw32(HOSTCC_RXCOL_TICKS, 0); 8354 tw32(HOSTCC_RXCOL_TICKS, 0);
8270 tw32(HOSTCC_RXMAX_FRAMES, 0); 8355 tw32(HOSTCC_RXMAX_FRAMES, 0);
8271 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 8356 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8272 } 8357 }
8273 8358
8274 if (!tg3_flag(tp, 5705_PLUS)) { 8359 for (; i < limit; i++) {
8275 u32 val = ec->stats_block_coalesce_usecs;
8276
8277 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8278 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8279
8280 if (!netif_carrier_ok(tp->dev))
8281 val = 0;
8282
8283 tw32(HOSTCC_STAT_COAL_TICKS, val);
8284 }
8285
8286 for (i = 0; i < tp->irq_cnt - 1; i++) {
8287 u32 reg; 8360 u32 reg;
8288 8361
8289 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; 8362 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8292,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8292 tw32(reg, ec->rx_max_coalesced_frames); 8365 tw32(reg, ec->rx_max_coalesced_frames);
8293 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 8366 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8294 tw32(reg, ec->rx_max_coalesced_frames_irq); 8367 tw32(reg, ec->rx_max_coalesced_frames_irq);
8295
8296 if (tg3_flag(tp, ENABLE_TSS)) {
8297 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8298 tw32(reg, ec->tx_coalesce_usecs);
8299 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8300 tw32(reg, ec->tx_max_coalesced_frames);
8301 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8302 tw32(reg, ec->tx_max_coalesced_frames_irq);
8303 }
8304 } 8368 }
8305 8369
8306 for (; i < tp->irq_max - 1; i++) { 8370 for (; i < tp->irq_max - 1; i++) {
8307 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); 8371 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8308 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 8372 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8309 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 8373 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8374 }
8375}
8310 8376
8311 if (tg3_flag(tp, ENABLE_TSS)) { 8377static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8312 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 8378{
8313 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 8379 tg3_coal_tx_init(tp, ec);
8314 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 8380 tg3_coal_rx_init(tp, ec);
8315 } 8381
8382 if (!tg3_flag(tp, 5705_PLUS)) {
8383 u32 val = ec->stats_block_coalesce_usecs;
8384
8385 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8386 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8387
8388 if (!netif_carrier_ok(tp->dev))
8389 val = 0;
8390
8391 tw32(HOSTCC_STAT_COAL_TICKS, val);
8316 } 8392 }
8317} 8393}
8318 8394
@@ -8570,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
8570 } 8646 }
8571} 8647}
8572 8648
8573static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) 8649static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8574{ 8650{
8575 int i; 8651 int i;
8576 8652
8577 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) 8653 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8578 tp->rss_ind_tbl[i] = 8654 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8579 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8580} 8655}
8581 8656
8582static void tg3_rss_check_indir_tbl(struct tg3 *tp) 8657static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8598,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8598 } 8673 }
8599 8674
8600 if (i != TG3_RSS_INDIR_TBL_SIZE) 8675 if (i != TG3_RSS_INDIR_TBL_SIZE)
8601 tg3_rss_init_dflt_indir_tbl(tp); 8676 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8602} 8677}
8603 8678
8604static void tg3_rss_write_indir_tbl(struct tg3 *tp) 8679static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9495,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9495 return tg3_reset_hw(tp, reset_phy); 9570 return tg3_reset_hw(tp, reset_phy);
9496} 9571}
9497 9572
9498#if IS_ENABLED(CONFIG_HWMON)
9499static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) 9573static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9500{ 9574{
9501 int i; 9575 int i;
@@ -9548,22 +9622,17 @@ static const struct attribute_group tg3_group = {
9548 .attrs = tg3_attributes, 9622 .attrs = tg3_attributes,
9549}; 9623};
9550 9624
9551#endif
9552
9553static void tg3_hwmon_close(struct tg3 *tp) 9625static void tg3_hwmon_close(struct tg3 *tp)
9554{ 9626{
9555#if IS_ENABLED(CONFIG_HWMON)
9556 if (tp->hwmon_dev) { 9627 if (tp->hwmon_dev) {
9557 hwmon_device_unregister(tp->hwmon_dev); 9628 hwmon_device_unregister(tp->hwmon_dev);
9558 tp->hwmon_dev = NULL; 9629 tp->hwmon_dev = NULL;
9559 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); 9630 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9560 } 9631 }
9561#endif
9562} 9632}
9563 9633
9564static void tg3_hwmon_open(struct tg3 *tp) 9634static void tg3_hwmon_open(struct tg3 *tp)
9565{ 9635{
9566#if IS_ENABLED(CONFIG_HWMON)
9567 int i, err; 9636 int i, err;
9568 u32 size = 0; 9637 u32 size = 0;
9569 struct pci_dev *pdev = tp->pdev; 9638 struct pci_dev *pdev = tp->pdev;
@@ -9595,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
9595 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); 9664 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9596 sysfs_remove_group(&pdev->dev.kobj, &tg3_group); 9665 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9597 } 9666 }
9598#endif
9599} 9667}
9600 9668
9601 9669
@@ -10119,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
10119 return 0; 10187 return 0;
10120} 10188}
10121 10189
10122static bool tg3_enable_msix(struct tg3 *tp) 10190static u32 tg3_irq_count(struct tg3 *tp)
10123{ 10191{
10124 int i, rc; 10192 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10125 struct msix_entry msix_ent[tp->irq_max];
10126 10193
10127 tp->irq_cnt = netif_get_num_default_rss_queues(); 10194 if (irq_cnt > 1) {
10128 if (tp->irq_cnt > 1) {
10129 /* We want as many rx rings enabled as there are cpus. 10195 /* We want as many rx rings enabled as there are cpus.
10130 * In multiqueue MSI-X mode, the first MSI-X vector 10196 * In multiqueue MSI-X mode, the first MSI-X vector
10131 * only deals with link interrupts, etc, so we add 10197 * only deals with link interrupts, etc, so we add
10132 * one to the number of vectors we are requesting. 10198 * one to the number of vectors we are requesting.
10133 */ 10199 */
10134 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max); 10200 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10135 } 10201 }
10136 10202
10203 return irq_cnt;
10204}
10205
10206static bool tg3_enable_msix(struct tg3 *tp)
10207{
10208 int i, rc;
10209 struct msix_entry msix_ent[tp->irq_max];
10210
10211 tp->txq_cnt = tp->txq_req;
10212 tp->rxq_cnt = tp->rxq_req;
10213 if (!tp->rxq_cnt)
10214 tp->rxq_cnt = netif_get_num_default_rss_queues();
10215 if (tp->rxq_cnt > tp->rxq_max)
10216 tp->rxq_cnt = tp->rxq_max;
10217
10218 /* Disable multiple TX rings by default. Simple round-robin hardware
10219 * scheduling of the TX rings can cause starvation of rings with
10220 * small packets when other rings have TSO or jumbo packets.
10221 */
10222 if (!tp->txq_req)
10223 tp->txq_cnt = 1;
10224
10225 tp->irq_cnt = tg3_irq_count(tp);
10226
10137 for (i = 0; i < tp->irq_max; i++) { 10227 for (i = 0; i < tp->irq_max; i++) {
10138 msix_ent[i].entry = i; 10228 msix_ent[i].entry = i;
10139 msix_ent[i].vector = 0; 10229 msix_ent[i].vector = 0;
@@ -10148,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
10148 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 10238 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10149 tp->irq_cnt, rc); 10239 tp->irq_cnt, rc);
10150 tp->irq_cnt = rc; 10240 tp->irq_cnt = rc;
10241 tp->rxq_cnt = max(rc - 1, 1);
10242 if (tp->txq_cnt)
10243 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10151 } 10244 }
10152 10245
10153 for (i = 0; i < tp->irq_max; i++) 10246 for (i = 0; i < tp->irq_max; i++)
10154 tp->napi[i].irq_vec = msix_ent[i].vector; 10247 tp->napi[i].irq_vec = msix_ent[i].vector;
10155 10248
10156 netif_set_real_num_tx_queues(tp->dev, 1); 10249 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10157 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10158 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10159 pci_disable_msix(tp->pdev); 10250 pci_disable_msix(tp->pdev);
10160 return false; 10251 return false;
10161 } 10252 }
10162 10253
10163 if (tp->irq_cnt > 1) { 10254 if (tp->irq_cnt == 1)
10164 tg3_flag_set(tp, ENABLE_RSS); 10255 return true;
10165 10256
10166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 10257 tg3_flag_set(tp, ENABLE_RSS);
10167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { 10258
10168 tg3_flag_set(tp, ENABLE_TSS); 10259 if (tp->txq_cnt > 1)
10169 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); 10260 tg3_flag_set(tp, ENABLE_TSS);
10170 } 10261
10171 } 10262 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10172 10263
10173 return true; 10264 return true;
10174} 10265}
@@ -10202,6 +10293,11 @@ defcfg:
10202 if (!tg3_flag(tp, USING_MSIX)) { 10293 if (!tg3_flag(tp, USING_MSIX)) {
10203 tp->irq_cnt = 1; 10294 tp->irq_cnt = 1;
10204 tp->napi[0].irq_vec = tp->pdev->irq; 10295 tp->napi[0].irq_vec = tp->pdev->irq;
10296 }
10297
10298 if (tp->irq_cnt == 1) {
10299 tp->txq_cnt = 1;
10300 tp->rxq_cnt = 1;
10205 netif_set_real_num_tx_queues(tp->dev, 1); 10301 netif_set_real_num_tx_queues(tp->dev, 1);
10206 netif_set_real_num_rx_queues(tp->dev, 1); 10302 netif_set_real_num_rx_queues(tp->dev, 1);
10207 } 10303 }
@@ -10219,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
10219 tg3_flag_clear(tp, ENABLE_TSS); 10315 tg3_flag_clear(tp, ENABLE_TSS);
10220} 10316}
10221 10317
10222static int tg3_open(struct net_device *dev) 10318static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10223{ 10319{
10224 struct tg3 *tp = netdev_priv(dev); 10320 struct net_device *dev = tp->dev;
10225 int i, err; 10321 int i, err;
10226 10322
10227 if (tp->fw_needed) {
10228 err = tg3_request_firmware(tp);
10229 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10230 if (err)
10231 return err;
10232 } else if (err) {
10233 netdev_warn(tp->dev, "TSO capability disabled\n");
10234 tg3_flag_clear(tp, TSO_CAPABLE);
10235 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10236 netdev_notice(tp->dev, "TSO capability restored\n");
10237 tg3_flag_set(tp, TSO_CAPABLE);
10238 }
10239 }
10240
10241 netif_carrier_off(tp->dev);
10242
10243 err = tg3_power_up(tp);
10244 if (err)
10245 return err;
10246
10247 tg3_full_lock(tp, 0);
10248
10249 tg3_disable_ints(tp);
10250 tg3_flag_clear(tp, INIT_COMPLETE);
10251
10252 tg3_full_unlock(tp);
10253
10254 /* 10323 /*
10255 * Setup interrupts first so we know how 10324 * Setup interrupts first so we know how
10256 * many NAPI resources to allocate 10325 * many NAPI resources to allocate
@@ -10284,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
10284 10353
10285 tg3_full_lock(tp, 0); 10354 tg3_full_lock(tp, 0);
10286 10355
10287 err = tg3_init_hw(tp, 1); 10356 err = tg3_init_hw(tp, reset_phy);
10288 if (err) { 10357 if (err) {
10289 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10358 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10290 tg3_free_rings(tp); 10359 tg3_free_rings(tp);
@@ -10295,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
10295 if (err) 10364 if (err)
10296 goto err_out3; 10365 goto err_out3;
10297 10366
10298 if (tg3_flag(tp, USING_MSI)) { 10367 if (test_irq && tg3_flag(tp, USING_MSI)) {
10299 err = tg3_test_msi(tp); 10368 err = tg3_test_msi(tp);
10300 10369
10301 if (err) { 10370 if (err) {
@@ -10351,20 +10420,18 @@ err_out2:
10351 10420
10352err_out1: 10421err_out1:
10353 tg3_ints_fini(tp); 10422 tg3_ints_fini(tp);
10354 tg3_frob_aux_power(tp, false); 10423
10355 pci_set_power_state(tp->pdev, PCI_D3hot);
10356 return err; 10424 return err;
10357} 10425}
10358 10426
10359static int tg3_close(struct net_device *dev) 10427static void tg3_stop(struct tg3 *tp)
10360{ 10428{
10361 int i; 10429 int i;
10362 struct tg3 *tp = netdev_priv(dev);
10363 10430
10364 tg3_napi_disable(tp); 10431 tg3_napi_disable(tp);
10365 tg3_reset_task_cancel(tp); 10432 tg3_reset_task_cancel(tp);
10366 10433
10367 netif_tx_stop_all_queues(dev); 10434 netif_tx_disable(tp->dev);
10368 10435
10369 tg3_timer_stop(tp); 10436 tg3_timer_stop(tp);
10370 10437
@@ -10389,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
10389 10456
10390 tg3_ints_fini(tp); 10457 tg3_ints_fini(tp);
10391 10458
10392 /* Clear stats across close / open calls */
10393 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10394 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10395
10396 tg3_napi_fini(tp); 10459 tg3_napi_fini(tp);
10397 10460
10398 tg3_free_consistent(tp); 10461 tg3_free_consistent(tp);
10462}
10463
10464static int tg3_open(struct net_device *dev)
10465{
10466 struct tg3 *tp = netdev_priv(dev);
10467 int err;
10468
10469 if (tp->fw_needed) {
10470 err = tg3_request_firmware(tp);
10471 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10472 if (err)
10473 return err;
10474 } else if (err) {
10475 netdev_warn(tp->dev, "TSO capability disabled\n");
10476 tg3_flag_clear(tp, TSO_CAPABLE);
10477 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10478 netdev_notice(tp->dev, "TSO capability restored\n");
10479 tg3_flag_set(tp, TSO_CAPABLE);
10480 }
10481 }
10482
10483 netif_carrier_off(tp->dev);
10484
10485 err = tg3_power_up(tp);
10486 if (err)
10487 return err;
10488
10489 tg3_full_lock(tp, 0);
10490
10491 tg3_disable_ints(tp);
10492 tg3_flag_clear(tp, INIT_COMPLETE);
10493
10494 tg3_full_unlock(tp);
10495
10496 err = tg3_start(tp, true, true);
10497 if (err) {
10498 tg3_frob_aux_power(tp, false);
10499 pci_set_power_state(tp->pdev, PCI_D3hot);
10500 }
10501 return err;
10502}
10503
10504static int tg3_close(struct net_device *dev)
10505{
10506 struct tg3 *tp = netdev_priv(dev);
10507
10508 tg3_stop(tp);
10509
10510 /* Clear stats across close / open calls */
10511 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10512 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10399 10513
10400 tg3_power_down(tp); 10514 tg3_power_down(tp);
10401 10515
@@ -11185,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11185 switch (info->cmd) { 11299 switch (info->cmd) {
11186 case ETHTOOL_GRXRINGS: 11300 case ETHTOOL_GRXRINGS:
11187 if (netif_running(tp->dev)) 11301 if (netif_running(tp->dev))
11188 info->data = tp->irq_cnt; 11302 info->data = tp->rxq_cnt;
11189 else { 11303 else {
11190 info->data = num_online_cpus(); 11304 info->data = num_online_cpus();
11191 if (info->data > TG3_IRQ_MAX_VECS_RSS) 11305 if (info->data > TG3_RSS_MAX_NUM_QS)
11192 info->data = TG3_IRQ_MAX_VECS_RSS; 11306 info->data = TG3_RSS_MAX_NUM_QS;
11193 } 11307 }
11194 11308
11195 /* The first interrupt vector only 11309 /* The first interrupt vector only
@@ -11246,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11246 return 0; 11360 return 0;
11247} 11361}
11248 11362
11363static void tg3_get_channels(struct net_device *dev,
11364 struct ethtool_channels *channel)
11365{
11366 struct tg3 *tp = netdev_priv(dev);
11367 u32 deflt_qs = netif_get_num_default_rss_queues();
11368
11369 channel->max_rx = tp->rxq_max;
11370 channel->max_tx = tp->txq_max;
11371
11372 if (netif_running(dev)) {
11373 channel->rx_count = tp->rxq_cnt;
11374 channel->tx_count = tp->txq_cnt;
11375 } else {
11376 if (tp->rxq_req)
11377 channel->rx_count = tp->rxq_req;
11378 else
11379 channel->rx_count = min(deflt_qs, tp->rxq_max);
11380
11381 if (tp->txq_req)
11382 channel->tx_count = tp->txq_req;
11383 else
11384 channel->tx_count = min(deflt_qs, tp->txq_max);
11385 }
11386}
11387
11388static int tg3_set_channels(struct net_device *dev,
11389 struct ethtool_channels *channel)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392
11393 if (!tg3_flag(tp, SUPPORT_MSIX))
11394 return -EOPNOTSUPP;
11395
11396 if (channel->rx_count > tp->rxq_max ||
11397 channel->tx_count > tp->txq_max)
11398 return -EINVAL;
11399
11400 tp->rxq_req = channel->rx_count;
11401 tp->txq_req = channel->tx_count;
11402
11403 if (!netif_running(dev))
11404 return 0;
11405
11406 tg3_stop(tp);
11407
11408 netif_carrier_off(dev);
11409
11410 tg3_start(tp, true, false);
11411
11412 return 0;
11413}
11414
11249static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 11415static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11250{ 11416{
11251 switch (stringset) { 11417 switch (stringset) {
@@ -12494,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12494 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 12660 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12495 .get_rxfh_indir = tg3_get_rxfh_indir, 12661 .get_rxfh_indir = tg3_get_rxfh_indir,
12496 .set_rxfh_indir = tg3_set_rxfh_indir, 12662 .set_rxfh_indir = tg3_set_rxfh_indir,
12663 .get_channels = tg3_get_channels,
12664 .set_channels = tg3_set_channels,
12497 .get_ts_info = ethtool_op_get_ts_info, 12665 .get_ts_info = ethtool_op_get_ts_info,
12498}; 12666};
12499 12667
@@ -14510,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14510 if (tg3_flag(tp, 57765_PLUS)) { 14678 if (tg3_flag(tp, 57765_PLUS)) {
14511 tg3_flag_set(tp, SUPPORT_MSIX); 14679 tg3_flag_set(tp, SUPPORT_MSIX);
14512 tp->irq_max = TG3_IRQ_MAX_VECS; 14680 tp->irq_max = TG3_IRQ_MAX_VECS;
14513 tg3_rss_init_dflt_indir_tbl(tp);
14514 } 14681 }
14515 } 14682 }
14516 14683
14684 tp->txq_max = 1;
14685 tp->rxq_max = 1;
14686 if (tp->irq_max > 1) {
14687 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14688 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14689
14690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14692 tp->txq_max = tp->irq_max - 1;
14693 }
14694
14517 if (tg3_flag(tp, 5755_PLUS) || 14695 if (tg3_flag(tp, 5755_PLUS) ||
14518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14519 tg3_flag_set(tp, SHORT_DMA_BUG); 14697 tg3_flag_set(tp, SHORT_DMA_BUG);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 6d52cb286826..d9308c32102e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
2860 dma_addr_t rx_jmb_mapping; 2860 dma_addr_t rx_jmb_mapping;
2861}; 2861};
2862 2862
2863#define TG3_IRQ_MAX_VECS_RSS 5 2863#define TG3_RSS_MAX_NUM_QS 4
2864#define TG3_IRQ_MAX_VECS_RSS (TG3_RSS_MAX_NUM_QS + 1)
2864#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS 2865#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
2865 2866
2866struct tg3_napi { 2867struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
3037 void (*write32_tx_mbox) (struct tg3 *, u32, 3038 void (*write32_tx_mbox) (struct tg3 *, u32,
3038 u32); 3039 u32);
3039 u32 dma_limit; 3040 u32 dma_limit;
3041 u32 txq_req;
3042 u32 txq_cnt;
3043 u32 txq_max;
3040 3044
3041 /* begin "rx thread" cacheline section */ 3045 /* begin "rx thread" cacheline section */
3042 struct tg3_napi napi[TG3_IRQ_MAX_VECS]; 3046 struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
3051 u32 rx_std_max_post; 3055 u32 rx_std_max_post;
3052 u32 rx_offset; 3056 u32 rx_offset;
3053 u32 rx_pkt_map_sz; 3057 u32 rx_pkt_map_sz;
3058 u32 rxq_req;
3059 u32 rxq_cnt;
3060 u32 rxq_max;
3054 bool rx_refill; 3061 bool rx_refill;
3055 3062
3056 3063
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b441f33258e7..ce1eac529470 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev,
3268 * Output : using_dac = 1 for 64 bit DMA 3268 * Output : using_dac = 1 for 64 bit DMA
3269 * = 0 for 32 bit DMA 3269 * = 0 for 32 bit DMA
3270 */ 3270 */
3271 using_dac = false;
3271 err = bnad_pci_init(bnad, pdev, &using_dac); 3272 err = bnad_pci_init(bnad, pdev, &using_dac);
3272 if (err) 3273 if (err)
3273 goto unlock_mutex; 3274 goto unlock_mutex;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ec2dafe8ae5b..745a1f53361f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -67,12 +67,12 @@ enum {
67}; 67};
68 68
69enum { 69enum {
70 MEMWIN0_APERTURE = 65536, 70 MEMWIN0_APERTURE = 2048,
71 MEMWIN0_BASE = 0x30000, 71 MEMWIN0_BASE = 0x1b800,
72 MEMWIN1_APERTURE = 32768, 72 MEMWIN1_APERTURE = 32768,
73 MEMWIN1_BASE = 0x28000, 73 MEMWIN1_BASE = 0x28000,
74 MEMWIN2_APERTURE = 2048, 74 MEMWIN2_APERTURE = 65536,
75 MEMWIN2_BASE = 0x1b800, 75 MEMWIN2_BASE = 0x30000,
76}; 76};
77 77
78enum dev_master { 78enum dev_master {
@@ -211,6 +211,9 @@ struct tp_err_stats {
211struct tp_params { 211struct tp_params {
212 unsigned int ntxchan; /* # of Tx channels */ 212 unsigned int ntxchan; /* # of Tx channels */
213 unsigned int tre; /* log2 of core clocks per TP tick */ 213 unsigned int tre; /* log2 of core clocks per TP tick */
214
215 uint32_t dack_re; /* DACK timer resolution */
216 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
214}; 217};
215 218
216struct vpd_params { 219struct vpd_params {
@@ -315,6 +318,10 @@ enum { /* adapter flags */
315 USING_MSI = (1 << 1), 318 USING_MSI = (1 << 1),
316 USING_MSIX = (1 << 2), 319 USING_MSIX = (1 << 2),
317 FW_OK = (1 << 4), 320 FW_OK = (1 << 4),
321 RSS_TNLALLLOOKUP = (1 << 5),
322 USING_SOFT_PARAMS = (1 << 6),
323 MASTER_PF = (1 << 7),
324 FW_OFLD_CONN = (1 << 9),
318}; 325};
319 326
320struct rx_sw_desc; 327struct rx_sw_desc;
@@ -467,6 +474,11 @@ struct sge {
467 u16 rdma_rxq[NCHAN]; 474 u16 rdma_rxq[NCHAN];
468 u16 timer_val[SGE_NTIMERS]; 475 u16 timer_val[SGE_NTIMERS];
469 u8 counter_val[SGE_NCOUNTERS]; 476 u8 counter_val[SGE_NCOUNTERS];
477 u32 fl_pg_order; /* large page allocation size */
478 u32 stat_len; /* length of status page at ring end */
479 u32 pktshift; /* padding between CPL & packet data */
480 u32 fl_align; /* response queue message alignment */
481 u32 fl_starve_thres; /* Free List starvation threshold */
470 unsigned int starve_thres; 482 unsigned int starve_thres;
471 u8 idma_state[2]; 483 u8 idma_state[2];
472 unsigned int egr_start; 484 unsigned int egr_start;
@@ -511,6 +523,8 @@ struct adapter {
511 struct net_device *port[MAX_NPORTS]; 523 struct net_device *port[MAX_NPORTS];
512 u8 chan_map[NCHAN]; /* channel -> port map */ 524 u8 chan_map[NCHAN]; /* channel -> port map */
513 525
526 unsigned int l2t_start;
527 unsigned int l2t_end;
514 struct l2t_data *l2t; 528 struct l2t_data *l2t;
515 void *uld_handle[CXGB4_ULD_MAX]; 529 void *uld_handle[CXGB4_ULD_MAX];
516 struct list_head list_node; 530 struct list_head list_node;
@@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
619int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 633int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
620 struct net_device *dev, unsigned int iqid); 634 struct net_device *dev, unsigned int iqid);
621irqreturn_t t4_sge_intr_msix(int irq, void *cookie); 635irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
622void t4_sge_init(struct adapter *adap); 636int t4_sge_init(struct adapter *adap);
623void t4_sge_start(struct adapter *adap); 637void t4_sge_start(struct adapter *adap);
624void t4_sge_stop(struct adapter *adap); 638void t4_sge_stop(struct adapter *adap);
625extern int dbfifo_int_thresh; 639extern int dbfifo_int_thresh;
@@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
638 return (us * adap->params.vpd.cclk) / 1000; 652 return (us * adap->params.vpd.cclk) / 1000;
639} 653}
640 654
655static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
656 unsigned int ticks)
657{
658 /* add Core Clock / 2 to round ticks to nearest uS */
659 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
660 adapter->params.vpd.cclk);
661}
662
641void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, 663void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
642 u32 val); 664 u32 val);
643 665
@@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
656 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); 678 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
657} 679}
658 680
681void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
682 unsigned int data_reg, const u32 *vals,
683 unsigned int nregs, unsigned int start_idx);
659void t4_intr_enable(struct adapter *adapter); 684void t4_intr_enable(struct adapter *adapter);
660void t4_intr_disable(struct adapter *adapter); 685void t4_intr_disable(struct adapter *adapter);
661int t4_slow_intr_handler(struct adapter *adapter); 686int t4_slow_intr_handler(struct adapter *adapter);
@@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap);
664int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 689int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
665 struct link_config *lc); 690 struct link_config *lc);
666int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 691int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
692int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
693 __be32 *buf);
667int t4_seeprom_wp(struct adapter *adapter, bool enable); 694int t4_seeprom_wp(struct adapter *adapter, bool enable);
695int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
668int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 696int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
697unsigned int t4_flash_cfg_addr(struct adapter *adapter);
669int t4_check_fw_version(struct adapter *adapter); 698int t4_check_fw_version(struct adapter *adapter);
670int t4_prep_adapter(struct adapter *adapter); 699int t4_prep_adapter(struct adapter *adapter);
671int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 700int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
680 709
681void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 710void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
682void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 711void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
712void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
713 unsigned int mask, unsigned int val);
683void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 714void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
684 struct tp_tcp_stats *v6); 715 struct tp_tcp_stats *v6);
685void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 716void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
695int t4_fw_bye(struct adapter *adap, unsigned int mbox); 726int t4_fw_bye(struct adapter *adap, unsigned int mbox);
696int t4_early_init(struct adapter *adap, unsigned int mbox); 727int t4_early_init(struct adapter *adap, unsigned int mbox);
697int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); 728int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
729int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
730int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
731int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
732 const u8 *fw_data, unsigned int size, int force);
733int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
734 unsigned int mtype, unsigned int maddr,
735 u32 *finiver, u32 *finicsum, u32 *cfcsum);
736int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
737 unsigned int cache_line_size);
738int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
698int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 739int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
699 unsigned int vf, unsigned int nparams, const u32 *params, 740 unsigned int vf, unsigned int nparams, const u32 *params,
700 u32 *val); 741 u32 *val);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 933985420acb..6b9f6bb2f7ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -78,28 +78,45 @@
78 */ 78 */
79#define MAX_SGE_TIMERVAL 200U 79#define MAX_SGE_TIMERVAL 200U
80 80
81#ifdef CONFIG_PCI_IOV
82/*
83 * Virtual Function provisioning constants. We need two extra Ingress Queues
84 * with Interrupt capability to serve as the VF's Firmware Event Queue and
85 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
86 * Lists associated with them). For each Ethernet/Control Egress Queue and
87 * for each Free List, we need an Egress Context.
88 */
89enum { 81enum {
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
90 VFRES_NPORTS = 1, /* # of "ports" per VF */ 106 VFRES_NPORTS = 1, /* # of "ports" per VF */
91 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ 107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
92 108
93 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ 109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
94 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ 110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
95 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ 111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
96 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
97 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
98 VFRES_TC = 0, /* PCI-E traffic class */ 114 VFRES_TC = 0, /* PCI-E traffic class */
99 VFRES_NEXACTF = 16, /* # of exact MPS filters */ 115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
100 116
101 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, 117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
102 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, 118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119#endif
103}; 120};
104 121
105/* 122/*
@@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
146 } 163 }
147 /*NOTREACHED*/ 164 /*NOTREACHED*/
148} 165}
149#endif
150 166
151enum { 167enum {
152 MAX_TXQ_ENTRIES = 16384, 168 MAX_TXQ_ENTRIES = 16384,
@@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
193}; 209};
194 210
195#define FW_FNAME "cxgb4/t4fw.bin" 211#define FW_FNAME "cxgb4/t4fw.bin"
212#define FW_CFNAME "cxgb4/t4-config.txt"
196 213
197MODULE_DESCRIPTION(DRV_DESC); 214MODULE_DESCRIPTION(DRV_DESC);
198MODULE_AUTHOR("Chelsio Communications"); 215MODULE_AUTHOR("Chelsio Communications");
@@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION);
201MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 218MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
202MODULE_FIRMWARE(FW_FNAME); 219MODULE_FIRMWARE(FW_FNAME);
203 220
221/*
222 * Normally we're willing to become the firmware's Master PF but will be happy
223 * if another PF has already become the Master and initialized the adapter.
224 * Setting "force_init" will cause this driver to forcibly establish itself as
225 * the Master PF and initialize the adapter.
226 */
227static uint force_init;
228
229module_param(force_init, uint, 0644);
230MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
231
232/*
233 * Normally if the firmware we connect to has Configuration File support, we
234 * use that and only fall back to the old Driver-based initialization if the
235 * Configuration File fails for some reason. If force_old_init is set, then
236 * we'll always use the old Driver-based initialization sequence.
237 */
238static uint force_old_init;
239
240module_param(force_old_init, uint, 0644);
241MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
242
204static int dflt_msg_enable = DFLT_MSG_ENABLE; 243static int dflt_msg_enable = DFLT_MSG_ENABLE;
205 244
206module_param(dflt_msg_enable, int, 0644); 245module_param(dflt_msg_enable, int, 0644);
@@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644);
236MODULE_PARM_DESC(intr_cnt, 275MODULE_PARM_DESC(intr_cnt,
237 "thresholds 1..3 for queue interrupt packet counters"); 276 "thresholds 1..3 for queue interrupt packet counters");
238 277
278/*
279 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
280 * offset by 2 bytes in order to have the IP headers line up on 4-byte
281 * boundaries. This is a requirement for many architectures which will throw
282 * a machine check fault if an attempt is made to access one of the 4-byte IP
283 * header fields on a non-4-byte boundary. And it's a major performance issue
284 * even on some architectures which allow it like some implementations of the
285 * x86 ISA. However, some architectures don't mind this and for some very
286 * edge-case performance sensitive applications (like forwarding large volumes
287 * of small packets), setting this DMA offset to 0 will decrease the number of
288 * PCI-E Bus transfers enough to measurably affect performance.
289 */
290static int rx_dma_offset = 2;
291
239static bool vf_acls; 292static bool vf_acls;
240 293
241#ifdef CONFIG_PCI_IOV 294#ifdef CONFIG_PCI_IOV
@@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644);
248MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 301MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
249#endif 302#endif
250 303
304/*
305 * The filter TCAM has a fixed portion and a variable portion. The fixed
306 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
307 * ports. The variable portion is 36 bits which can include things like Exact
308 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
309 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
310 * far exceed the 36-bit budget for this "compressed" header portion of the
311 * filter. Thus, we have a scarce resource which must be carefully managed.
312 *
313 * By default we set this up to mostly match the set of filter matching
314 * capabilities of T3 but with accommodations for some of T4's more
315 * interesting features:
316 *
317 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
318 * [Inner] VLAN (17), Port (3), FCoE (1) }
319 */
320enum {
321 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
322 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
323 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
324};
325
326static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327
251static struct dentry *cxgb4_debugfs_root; 328static struct dentry *cxgb4_debugfs_root;
252 329
253static LIST_HEAD(adapter_list); 330static LIST_HEAD(adapter_list);
@@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap)
852 */ 929 */
853 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || 930 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
854 vers > adap->params.fw_vers) { 931 vers > adap->params.fw_vers) {
855 ret = -t4_load_fw(adap, fw->data, fw->size); 932 dev_info(dev, "upgrading firmware ...\n");
933 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
934 /*force=*/false);
856 if (!ret) 935 if (!ret)
857 dev_info(dev, "firmware upgraded to version %pI4 from " 936 dev_info(dev, "firmware successfully upgraded to "
858 FW_FNAME "\n", &hdr->fw_ver); 937 FW_FNAME " (%d.%d.%d.%d)\n",
938 FW_HDR_FW_VER_MAJOR_GET(vers),
939 FW_HDR_FW_VER_MINOR_GET(vers),
940 FW_HDR_FW_VER_MICRO_GET(vers),
941 FW_HDR_FW_VER_BUILD_GET(vers));
942 else
943 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
944 } else {
945 /*
946 * Tell our caller that we didn't upgrade the firmware.
947 */
948 ret = -EINVAL;
859 } 949 }
950
860out: release_firmware(fw); 951out: release_firmware(fw);
861 return ret; 952 return ret;
862} 953}
@@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2470 else 2561 else
2471 delta = size - hw_pidx + pidx; 2562 delta = size - hw_pidx + pidx;
2472 wmb(); 2563 wmb();
2473 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 2564 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2474 V_QID(qid) | V_PIDX(delta)); 2565 QID(qid) | PIDX(delta));
2475 } 2566 }
2476out: 2567out:
2477 return ret; 2568 return ret;
@@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2579 else 2670 else
2580 delta = q->size - hw_pidx + q->db_pidx; 2671 delta = q->size - hw_pidx + q->db_pidx;
2581 wmb(); 2672 wmb();
2582 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 2673 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2583 V_QID(q->cntxt_id) | V_PIDX(delta)); 2674 QID(q->cntxt_id) | PIDX(delta));
2584 } 2675 }
2585out: 2676out:
2586 q->db_disabled = 0; 2677 q->db_disabled = 0;
@@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work)
2617 2708
2618 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 2709 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619 drain_db_fifo(adap, dbfifo_drain_delay); 2710 drain_db_fifo(adap, dbfifo_drain_delay);
2620 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, 2711 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2621 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 2712 DBFIFO_HP_INT | DBFIFO_LP_INT,
2622 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); 2713 DBFIFO_HP_INT | DBFIFO_LP_INT);
2623 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 2714 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2624} 2715}
2625 2716
@@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work)
2639 2730
2640void t4_db_full(struct adapter *adap) 2731void t4_db_full(struct adapter *adap)
2641{ 2732{
2642 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, 2733 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2643 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); 2734 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
2644 queue_work(workq, &adap->db_full_task); 2735 queue_work(workq, &adap->db_full_task);
2645} 2736}
2646 2737
@@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap)
3076 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 3167 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3077 (bar0 + MEMWIN2_BASE) | BIR(0) | 3168 (bar0 + MEMWIN2_BASE) | BIR(0) |
3078 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 3169 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3170}
3171
3172static void setup_memwin_rdma(struct adapter *adap)
3173{
3079 if (adap->vres.ocq.size) { 3174 if (adap->vres.ocq.size) {
3080 unsigned int start, sz_kb; 3175 unsigned int start, sz_kb;
3081 3176
@@ -3155,6 +3250,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3155 3250
3156/* 3251/*
3157 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 3252 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3253 *
3254 * If the firmware we're dealing with has Configuration File support, then
3255 * we use that to perform all configuration
3256 */
3257
3258/*
3259 * Tweak configuration based on module parameters, etc. Most of these have
3260 * defaults assigned to them by Firmware Configuration Files (if we're using
3261 * them) but need to be explicitly set if we're using hard-coded
3262 * initialization. But even in the case of using Firmware Configuration
3263 * Files, we'd like to expose the ability to change these via module
3264 * parameters so these are essentially common tweaks/settings for
3265 * Configuration Files and hard-coded initialization ...
3266 */
3267static int adap_init0_tweaks(struct adapter *adapter)
3268{
3269 /*
3270 * Fix up various Host-Dependent Parameters like Page Size, Cache
3271 * Line Size, etc. The firmware default is for a 4KB Page Size and
3272 * 64B Cache Line Size ...
3273 */
3274 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3275
3276 /*
3277 * Process module parameters which affect early initialization.
3278 */
3279 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3280 dev_err(&adapter->pdev->dev,
3281 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3282 rx_dma_offset);
3283 rx_dma_offset = 2;
3284 }
3285 t4_set_reg_field(adapter, SGE_CONTROL,
3286 PKTSHIFT_MASK,
3287 PKTSHIFT(rx_dma_offset));
3288
3289 /*
3290 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3291 * adds the pseudo header itself.
3292 */
3293 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3294 CSUM_HAS_PSEUDO_HDR, 0);
3295
3296 return 0;
3297}
3298
3299/*
3300 * Attempt to initialize the adapter via a Firmware Configuration File.
3301 */
3302static int adap_init0_config(struct adapter *adapter, int reset)
3303{
3304 struct fw_caps_config_cmd caps_cmd;
3305 const struct firmware *cf;
3306 unsigned long mtype = 0, maddr = 0;
3307 u32 finiver, finicsum, cfcsum;
3308 int ret, using_flash;
3309
3310 /*
3311 * Reset device if necessary.
3312 */
3313 if (reset) {
3314 ret = t4_fw_reset(adapter, adapter->mbox,
3315 PIORSTMODE | PIORST);
3316 if (ret < 0)
3317 goto bye;
3318 }
3319
3320 /*
3321 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3322 * then use that. Otherwise, use the configuration file stored
3323 * in the adapter flash ...
3324 */
3325 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3326 if (ret < 0) {
3327 using_flash = 1;
3328 mtype = FW_MEMTYPE_CF_FLASH;
3329 maddr = t4_flash_cfg_addr(adapter);
3330 } else {
3331 u32 params[7], val[7];
3332
3333 using_flash = 0;
3334 if (cf->size >= FLASH_CFG_MAX_SIZE)
3335 ret = -ENOMEM;
3336 else {
3337 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3338 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3339 ret = t4_query_params(adapter, adapter->mbox,
3340 adapter->fn, 0, 1, params, val);
3341 if (ret == 0) {
3342 /*
3343 * For t4_memory_write() below addresses and
3344 * sizes have to be in terms of multiples of 4
3345 * bytes. So, if the Configuration File isn't
3346 * a multiple of 4 bytes in length we'll have
3347 * to write that out separately since we can't
3348 * guarantee that the bytes following the
3349 * residual byte in the buffer returned by
3350 * request_firmware() are zeroed out ...
3351 */
3352 size_t resid = cf->size & 0x3;
3353 size_t size = cf->size & ~0x3;
3354 __be32 *data = (__be32 *)cf->data;
3355
3356 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3357 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3358
3359 ret = t4_memory_write(adapter, mtype, maddr,
3360 size, data);
3361 if (ret == 0 && resid != 0) {
3362 union {
3363 __be32 word;
3364 char buf[4];
3365 } last;
3366 int i;
3367
3368 last.word = data[size >> 2];
3369 for (i = resid; i < 4; i++)
3370 last.buf[i] = 0;
3371 ret = t4_memory_write(adapter, mtype,
3372 maddr + size,
3373 4, &last.word);
3374 }
3375 }
3376 }
3377
3378 release_firmware(cf);
3379 if (ret)
3380 goto bye;
3381 }
3382
3383 /*
3384 * Issue a Capability Configuration command to the firmware to get it
3385 * to parse the Configuration File. We don't use t4_fw_config_file()
3386 * because we want the ability to modify various features after we've
3387 * processed the configuration file ...
3388 */
3389 memset(&caps_cmd, 0, sizeof(caps_cmd));
3390 caps_cmd.op_to_write =
3391 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3392 FW_CMD_REQUEST |
3393 FW_CMD_READ);
3394 caps_cmd.retval_len16 =
3395 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3396 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3397 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3398 FW_LEN16(caps_cmd));
3399 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3400 &caps_cmd);
3401 if (ret < 0)
3402 goto bye;
3403
3404 finiver = ntohl(caps_cmd.finiver);
3405 finicsum = ntohl(caps_cmd.finicsum);
3406 cfcsum = ntohl(caps_cmd.cfcsum);
3407 if (finicsum != cfcsum)
3408 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3409 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3410 finicsum, cfcsum);
3411
3412 /*
3413 * If we're a pure NIC driver then disable all offloading facilities.
3414 * This will allow the firmware to optimize aspects of the hardware
3415 * configuration which will result in improved performance.
3416 */
3417 caps_cmd.ofldcaps = 0;
3418 caps_cmd.iscsicaps = 0;
3419 caps_cmd.rdmacaps = 0;
3420 caps_cmd.fcoecaps = 0;
3421
3422 /*
3423 * And now tell the firmware to use the configuration we just loaded.
3424 */
3425 caps_cmd.op_to_write =
3426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3427 FW_CMD_REQUEST |
3428 FW_CMD_WRITE);
3429 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3430 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3431 NULL);
3432 if (ret < 0)
3433 goto bye;
3434
3435 /*
3436 * Tweak configuration based on system architecture, module
3437 * parameters, etc.
3438 */
3439 ret = adap_init0_tweaks(adapter);
3440 if (ret < 0)
3441 goto bye;
3442
3443 /*
3444 * And finally tell the firmware to initialize itself using the
3445 * parameters from the Configuration File.
3446 */
3447 ret = t4_fw_initialize(adapter, adapter->mbox);
3448 if (ret < 0)
3449 goto bye;
3450
3451 /*
3452 * Return successfully and note that we're operating with parameters
3453 * not supplied by the driver, rather than from hard-wired
3454 * initialization constants burried in the driver.
3455 */
3456 adapter->flags |= USING_SOFT_PARAMS;
3457 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3458 "Configuration File %s, version %#x, computed checksum %#x\n",
3459 (using_flash
3460 ? "in device FLASH"
3461 : "/lib/firmware/" FW_CFNAME),
3462 finiver, cfcsum);
3463 return 0;
3464
3465 /*
3466 * Something bad happened. Return the error ... (If the "error"
3467 * is that there's no Configuration File on the adapter we don't
3468 * want to issue a warning since this is fairly common.)
3469 */
3470bye:
3471 if (ret != -ENOENT)
3472 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3473 -ret);
3474 return ret;
3475}
3476
3477/*
3478 * Attempt to initialize the adapter via hard-coded, driver supplied
3479 * parameters ...
3480 */
3481static int adap_init0_no_config(struct adapter *adapter, int reset)
3482{
3483 struct sge *s = &adapter->sge;
3484 struct fw_caps_config_cmd caps_cmd;
3485 u32 v;
3486 int i, ret;
3487
3488 /*
3489 * Reset device if necessary
3490 */
3491 if (reset) {
3492 ret = t4_fw_reset(adapter, adapter->mbox,
3493 PIORSTMODE | PIORST);
3494 if (ret < 0)
3495 goto bye;
3496 }
3497
3498 /*
3499 * Get device capabilities and select which we'll be using.
3500 */
3501 memset(&caps_cmd, 0, sizeof(caps_cmd));
3502 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3503 FW_CMD_REQUEST | FW_CMD_READ);
3504 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3505 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3506 &caps_cmd);
3507 if (ret < 0)
3508 goto bye;
3509
3510#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3511 /*
3512 * If we're a pure NIC driver then disable all offloading facilities.
3513 * This will allow the firmware to optimize aspects of the hardware
3514 * configuration which will result in improved performance.
3515 */
3516 caps_cmd.ofldcaps = 0;
3517 caps_cmd.iscsicaps = 0;
3518 caps_cmd.rdmacaps = 0;
3519 caps_cmd.fcoecaps = 0;
3520#endif
3521
3522 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3523 if (!vf_acls)
3524 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3525 else
3526 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3527 } else if (vf_acls) {
3528 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3529 goto bye;
3530 }
3531 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3532 FW_CMD_REQUEST | FW_CMD_WRITE);
3533 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3534 NULL);
3535 if (ret < 0)
3536 goto bye;
3537
3538 /*
3539 * Tweak configuration based on system architecture, module
3540 * parameters, etc.
3541 */
3542 ret = adap_init0_tweaks(adapter);
3543 if (ret < 0)
3544 goto bye;
3545
3546 /*
3547 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3548 * mode which maps each Virtual Interface to its own section of
3549 * the RSS Table and we turn on all map and hash enables ...
3550 */
3551 adapter->flags |= RSS_TNLALLLOOKUP;
3552 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3553 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3554 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3555 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3556 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3557 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3558 if (ret < 0)
3559 goto bye;
3560
3561 /*
3562 * Set up our own fundamental resource provisioning ...
3563 */
3564 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3565 PFRES_NEQ, PFRES_NETHCTRL,
3566 PFRES_NIQFLINT, PFRES_NIQ,
3567 PFRES_TC, PFRES_NVI,
3568 FW_PFVF_CMD_CMASK_MASK,
3569 pfvfres_pmask(adapter, adapter->fn, 0),
3570 PFRES_NEXACTF,
3571 PFRES_R_CAPS, PFRES_WX_CAPS);
3572 if (ret < 0)
3573 goto bye;
3574
3575 /*
3576 * Perform low level SGE initialization. We need to do this before we
3577 * send the firmware the INITIALIZE command because that will cause
3578 * any other PF Drivers which are waiting for the Master
3579 * Initialization to proceed forward.
3580 */
3581 for (i = 0; i < SGE_NTIMERS - 1; i++)
3582 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3583 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3584 s->counter_val[0] = 1;
3585 for (i = 1; i < SGE_NCOUNTERS; i++)
3586 s->counter_val[i] = min(intr_cnt[i - 1],
3587 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3588 t4_sge_init(adapter);
3589
3590#ifdef CONFIG_PCI_IOV
3591 /*
3592 * Provision resource limits for Virtual Functions. We currently
3593 * grant them all the same static resource limits except for the Port
3594 * Access Rights Mask which we're assigning based on the PF. All of
3595 * the static provisioning stuff for both the PF and VF really needs
3596 * to be managed in a persistent manner for each device which the
3597 * firmware controls.
3598 */
3599 {
3600 int pf, vf;
3601
3602 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3603 if (num_vf[pf] <= 0)
3604 continue;
3605
3606 /* VF numbering starts at 1! */
3607 for (vf = 1; vf <= num_vf[pf]; vf++) {
3608 ret = t4_cfg_pfvf(adapter, adapter->mbox,
3609 pf, vf,
3610 VFRES_NEQ, VFRES_NETHCTRL,
3611 VFRES_NIQFLINT, VFRES_NIQ,
3612 VFRES_TC, VFRES_NVI,
3613 FW_PFVF_CMD_CMASK_GET(
3614 FW_PFVF_CMD_CMASK_MASK),
3615 pfvfres_pmask(
3616 adapter, pf, vf),
3617 VFRES_NEXACTF,
3618 VFRES_R_CAPS, VFRES_WX_CAPS);
3619 if (ret < 0)
3620 dev_warn(adapter->pdev_dev,
3621 "failed to "\
3622 "provision pf/vf=%d/%d; "
3623 "err=%d\n", pf, vf, ret);
3624 }
3625 }
3626 }
3627#endif
3628
3629 /*
3630 * Set up the default filter mode. Later we'll want to implement this
3631 * via a firmware command, etc. ... This needs to be done before the
3632 * firmare initialization command ... If the selected set of fields
3633 * isn't equal to the default value, we'll need to make sure that the
3634 * field selections will fit in the 36-bit budget.
3635 */
3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3637 int i, bits = 0;
3638
3639 for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
3640 switch (tp_vlan_pri_map & (1 << i)) {
3641 case 0:
3642 /* compressed filter field not enabled */
3643 break;
3644 case FCOE_MASK:
3645 bits += 1;
3646 break;
3647 case PORT_MASK:
3648 bits += 3;
3649 break;
3650 case VNIC_ID_MASK:
3651 bits += 17;
3652 break;
3653 case VLAN_MASK:
3654 bits += 17;
3655 break;
3656 case TOS_MASK:
3657 bits += 8;
3658 break;
3659 case PROTOCOL_MASK:
3660 bits += 8;
3661 break;
3662 case ETHERTYPE_MASK:
3663 bits += 16;
3664 break;
3665 case MACMATCH_MASK:
3666 bits += 9;
3667 break;
3668 case MPSHITTYPE_MASK:
3669 bits += 3;
3670 break;
3671 case FRAGMENTATION_MASK:
3672 bits += 1;
3673 break;
3674 }
3675
3676 if (bits > 36) {
3677 dev_err(adapter->pdev_dev,
3678 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
3679 " using %#x\n", tp_vlan_pri_map, bits,
3680 TP_VLAN_PRI_MAP_DEFAULT);
3681 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
3682 }
3683 }
3684 v = tp_vlan_pri_map;
3685 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
3686 &v, 1, TP_VLAN_PRI_MAP);
3687
3688 /*
3689 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
3690 * to support any of the compressed filter fields above. Newer
3691 * versions of the firmware do this automatically but it doesn't hurt
3692 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
3693 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
3694 * since the firmware automatically turns this on and off when we have
3695 * a non-zero number of filters active (since it does have a
3696 * performance impact).
3697 */
3698 if (tp_vlan_pri_map)
3699 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
3700 FIVETUPLELOOKUP_MASK,
3701 FIVETUPLELOOKUP_MASK);
3702
3703 /*
3704 * Tweak some settings.
3705 */
3706 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
3707 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
3708 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
3709 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
3710
3711 /*
3712 * Get basic stuff going by issuing the Firmware Initialize command.
3713 * Note that this _must_ be after all PFVF commands ...
3714 */
3715 ret = t4_fw_initialize(adapter, adapter->mbox);
3716 if (ret < 0)
3717 goto bye;
3718
3719 /*
3720 * Return successfully!
3721 */
3722 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
3723 "driver parameters\n");
3724 return 0;
3725
3726 /*
3727 * Something bad happened. Return the error ...
3728 */
3729bye:
3730 return ret;
3731}
3732
3733/*
3734 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3158 */ 3735 */
3159static int adap_init0(struct adapter *adap) 3736static int adap_init0(struct adapter *adap)
3160{ 3737{
@@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap)
3162 u32 v, port_vec; 3739 u32 v, port_vec;
3163 enum dev_state state; 3740 enum dev_state state;
3164 u32 params[7], val[7]; 3741 u32 params[7], val[7];
3165 struct fw_caps_config_cmd c; 3742 int reset = 1, j;
3166
3167 ret = t4_check_fw_version(adap);
3168 if (ret == -EINVAL || ret > 0) {
3169 if (upgrade_fw(adap) >= 0) /* recache FW version */
3170 ret = t4_check_fw_version(adap);
3171 }
3172 if (ret < 0)
3173 return ret;
3174 3743
3175 /* contact FW, request master */ 3744 /*
3176 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); 3745 * Contact FW, advertising Master capability (and potentially forcing
3746 * ourselves as the Master PF if our module parameter force_init is
3747 * set).
3748 */
3749 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
3750 force_init ? MASTER_MUST : MASTER_MAY,
3751 &state);
3177 if (ret < 0) { 3752 if (ret < 0) {
3178 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3753 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3179 ret); 3754 ret);
3180 return ret; 3755 return ret;
3181 } 3756 }
3757 if (ret == adap->mbox)
3758 adap->flags |= MASTER_PF;
3759 if (force_init && state == DEV_STATE_INIT)
3760 state = DEV_STATE_UNINIT;
3182 3761
3183 /* reset device */ 3762 /*
3184 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); 3763 * If we're the Master PF Driver and the device is uninitialized,
3185 if (ret < 0) 3764 * then let's consider upgrading the firmware ... (We always want
3186 goto bye; 3765 * to check the firmware version number in order to A. get it for
3187 3766 * later reporting and B. to warn if the currently loaded firmware
3188 for (v = 0; v < SGE_NTIMERS - 1; v++) 3767 * is excessively mismatched relative to the driver.)
3189 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); 3768 */
3190 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; 3769 ret = t4_check_fw_version(adap);
3191 adap->sge.counter_val[0] = 1; 3770 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3192 for (v = 1; v < SGE_NCOUNTERS; v++) 3771 if (ret == -EINVAL || ret > 0) {
3193 adap->sge.counter_val[v] = min(intr_cnt[v - 1], 3772 if (upgrade_fw(adap) >= 0) {
3194 THRESHOLD_3_MASK); 3773 /*
3195#define FW_PARAM_DEV(param) \ 3774 * Note that the chip was reset as part of the
3196 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3775 * firmware upgrade so we don't reset it again
3197 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3776 * below and grab the new firmware version.
3777 */
3778 reset = 0;
3779 ret = t4_check_fw_version(adap);
3780 }
3781 }
3782 if (ret < 0)
3783 return ret;
3784 }
3198 3785
3199 params[0] = FW_PARAM_DEV(CCLK); 3786 /*
3200 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); 3787 * Grab VPD parameters. This should be done after we establish a
3788 * connection to the firmware since some of the VPD parameters
3789 * (notably the Core Clock frequency) are retrieved via requests to
3790 * the firmware. On the other hand, we need these fairly early on
3791 * so we do this right after getting ahold of the firmware.
3792 */
3793 ret = get_vpd_params(adap, &adap->params.vpd);
3201 if (ret < 0) 3794 if (ret < 0)
3202 goto bye; 3795 goto bye;
3203 adap->params.vpd.cclk = val[0];
3204 3796
3205 ret = adap_init1(adap, &c); 3797 /*
3798 * Find out what ports are available to us. Note that we need to do
3799 * this before calling adap_init0_no_config() since it needs nports
3800 * and portvec ...
3801 */
3802 v =
3803 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3804 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
3805 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
3206 if (ret < 0) 3806 if (ret < 0)
3207 goto bye; 3807 goto bye;
3208 3808
3809 adap->params.nports = hweight32(port_vec);
3810 adap->params.portvec = port_vec;
3811
3812 /*
3813 * If the firmware is initialized already (and we're not forcing a
3814 * master initialization), note that we're living with existing
3815 * adapter parameters. Otherwise, it's time to try initializing the
3816 * adapter ...
3817 */
3818 if (state == DEV_STATE_INIT) {
3819 dev_info(adap->pdev_dev, "Coming up as %s: "\
3820 "Adapter already initialized\n",
3821 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3822 adap->flags |= USING_SOFT_PARAMS;
3823 } else {
3824 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3825 "Initializing adapter\n");
3826
3827 /*
3828 * If the firmware doesn't support Configuration
3829 * Files warn user and exit,
3830 */
3831 if (ret < 0)
3832 dev_warn(adap->pdev_dev, "Firmware doesn't support "
3833 "configuration file.\n");
3834 if (force_old_init)
3835 ret = adap_init0_no_config(adap, reset);
3836 else {
3837 /*
3838 * Find out whether we're dealing with a version of
3839 * the firmware which has configuration file support.
3840 */
3841 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3842 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3843 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
3844 params, val);
3845
3846 /*
3847 * If the firmware doesn't support Configuration
3848 * Files, use the old Driver-based, hard-wired
3849 * initialization. Otherwise, try using the
3850 * Configuration File support and fall back to the
3851 * Driver-based initialization if there's no
3852 * Configuration File found.
3853 */
3854 if (ret < 0)
3855 ret = adap_init0_no_config(adap, reset);
3856 else {
3857 /*
3858 * The firmware provides us with a memory
3859 * buffer where we can load a Configuration
3860 * File from the host if we want to override
3861 * the Configuration File in flash.
3862 */
3863
3864 ret = adap_init0_config(adap, reset);
3865 if (ret == -ENOENT) {
3866 dev_info(adap->pdev_dev,
3867 "No Configuration File present "
3868 "on adapter. Using hard-wired "
3869 "configuration parameters.\n");
3870 ret = adap_init0_no_config(adap, reset);
3871 }
3872 }
3873 }
3874 if (ret < 0) {
3875 dev_err(adap->pdev_dev,
3876 "could not initialize adapter, error %d\n",
3877 -ret);
3878 goto bye;
3879 }
3880 }
3881
3882 /*
3883 * If we're living with non-hard-coded parameters (either from a
3884 * Firmware Configuration File or values programmed by a different PF
3885 * Driver), give the SGE code a chance to pull in anything that it
3886 * needs ... Note that this must be called after we retrieve our VPD
3887 * parameters in order to know how to convert core ticks to seconds.
3888 */
3889 if (adap->flags & USING_SOFT_PARAMS) {
3890 ret = t4_sge_init(adap);
3891 if (ret < 0)
3892 goto bye;
3893 }
3894
3895 /*
3896 * Grab some of our basic fundamental operating parameters.
3897 */
3898#define FW_PARAM_DEV(param) \
3899 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3900 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3901
3209#define FW_PARAM_PFVF(param) \ 3902#define FW_PARAM_PFVF(param) \
3210 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3903 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3211 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ 3904 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3212 FW_PARAMS_PARAM_Y(adap->fn)) 3905 FW_PARAMS_PARAM_Y(0) | \
3906 FW_PARAMS_PARAM_Z(0)
3213 3907
3214 params[0] = FW_PARAM_DEV(PORTVEC); 3908 params[0] = FW_PARAM_PFVF(EQ_START);
3215 params[1] = FW_PARAM_PFVF(L2T_START); 3909 params[1] = FW_PARAM_PFVF(L2T_START);
3216 params[2] = FW_PARAM_PFVF(L2T_END); 3910 params[2] = FW_PARAM_PFVF(L2T_END);
3217 params[3] = FW_PARAM_PFVF(FILTER_START); 3911 params[3] = FW_PARAM_PFVF(FILTER_START);
3218 params[4] = FW_PARAM_PFVF(FILTER_END); 3912 params[4] = FW_PARAM_PFVF(FILTER_END);
3219 params[5] = FW_PARAM_PFVF(IQFLINT_START); 3913 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3220 params[6] = FW_PARAM_PFVF(EQ_START); 3914 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
3221 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3222 if (ret < 0) 3915 if (ret < 0)
3223 goto bye; 3916 goto bye;
3224 port_vec = val[0]; 3917 adap->sge.egr_start = val[0];
3918 adap->l2t_start = val[1];
3919 adap->l2t_end = val[2];
3225 adap->tids.ftid_base = val[3]; 3920 adap->tids.ftid_base = val[3];
3226 adap->tids.nftids = val[4] - val[3] + 1; 3921 adap->tids.nftids = val[4] - val[3] + 1;
3227 adap->sge.ingr_start = val[5]; 3922 adap->sge.ingr_start = val[5];
3228 adap->sge.egr_start = val[6];
3229 3923
3230 if (c.ofldcaps) { 3924 /* query params related to active filter region */
3925 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3926 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3927 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3928 /* If Active filter size is set we enable establishing
3929 * offload connection through firmware work request
3930 */
3931 if ((val[0] != val[1]) && (ret >= 0)) {
3932 adap->flags |= FW_OFLD_CONN;
3933 adap->tids.aftid_base = val[0];
3934 adap->tids.aftid_end = val[1];
3935 }
3936
3937#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3938 /*
3939 * Get device capabilities so we can determine what resources we need
3940 * to manage.
3941 */
3942 memset(&caps_cmd, 0, sizeof(caps_cmd));
3943 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3944 FW_CMD_REQUEST | FW_CMD_READ);
3945 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3946 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3947 &caps_cmd);
3948 if (ret < 0)
3949 goto bye;
3950
3951 if (caps_cmd.ofldcaps) {
3231 /* query offload-related parameters */ 3952 /* query offload-related parameters */
3232 params[0] = FW_PARAM_DEV(NTID); 3953 params[0] = FW_PARAM_DEV(NTID);
3233 params[1] = FW_PARAM_PFVF(SERVER_START); 3954 params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap)
3235 params[3] = FW_PARAM_PFVF(TDDP_START); 3956 params[3] = FW_PARAM_PFVF(TDDP_START);
3236 params[4] = FW_PARAM_PFVF(TDDP_END); 3957 params[4] = FW_PARAM_PFVF(TDDP_END);
3237 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3958 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3238 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 3959 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3239 val); 3960 params, val);
3240 if (ret < 0) 3961 if (ret < 0)
3241 goto bye; 3962 goto bye;
3242 adap->tids.ntids = val[0]; 3963 adap->tids.ntids = val[0];
3243 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); 3964 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3244 adap->tids.stid_base = val[1]; 3965 adap->tids.stid_base = val[1];
3245 adap->tids.nstids = val[2] - val[1] + 1; 3966 adap->tids.nstids = val[2] - val[1] + 1;
3967 /*
3968 * Setup server filter region. Divide the availble filter
3969 * region into two parts. Regular filters get 1/3rd and server
3970 * filters get 2/3rd part. This is only enabled if workarond
3971 * path is enabled.
3972 * 1. For regular filters.
3973 * 2. Server filter: This are special filters which are used
3974 * to redirect SYN packets to offload queue.
3975 */
3976 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3977 adap->tids.sftid_base = adap->tids.ftid_base +
3978 DIV_ROUND_UP(adap->tids.nftids, 3);
3979 adap->tids.nsftids = adap->tids.nftids -
3980 DIV_ROUND_UP(adap->tids.nftids, 3);
3981 adap->tids.nftids = adap->tids.sftid_base -
3982 adap->tids.ftid_base;
3983 }
3246 adap->vres.ddp.start = val[3]; 3984 adap->vres.ddp.start = val[3];
3247 adap->vres.ddp.size = val[4] - val[3] + 1; 3985 adap->vres.ddp.size = val[4] - val[3] + 1;
3248 adap->params.ofldq_wr_cred = val[5]; 3986 adap->params.ofldq_wr_cred = val[5];
3987
3988 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3989 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3991 params, val);
3992 if ((val[0] != val[1]) && (ret >= 0)) {
3993 adap->tids.uotid_base = val[0];
3994 adap->tids.nuotids = val[1] - val[0] + 1;
3995 }
3996
3249 adap->params.offload = 1; 3997 adap->params.offload = 1;
3250 } 3998 }
3251 if (c.rdmacaps) { 3999 if (caps_cmd.rdmacaps) {
3252 params[0] = FW_PARAM_PFVF(STAG_START); 4000 params[0] = FW_PARAM_PFVF(STAG_START);
3253 params[1] = FW_PARAM_PFVF(STAG_END); 4001 params[1] = FW_PARAM_PFVF(STAG_END);
3254 params[2] = FW_PARAM_PFVF(RQ_START); 4002 params[2] = FW_PARAM_PFVF(RQ_START);
3255 params[3] = FW_PARAM_PFVF(RQ_END); 4003 params[3] = FW_PARAM_PFVF(RQ_END);
3256 params[4] = FW_PARAM_PFVF(PBL_START); 4004 params[4] = FW_PARAM_PFVF(PBL_START);
3257 params[5] = FW_PARAM_PFVF(PBL_END); 4005 params[5] = FW_PARAM_PFVF(PBL_END);
3258 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 4006 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3259 val); 4007 params, val);
3260 if (ret < 0) 4008 if (ret < 0)
3261 goto bye; 4009 goto bye;
3262 adap->vres.stag.start = val[0]; 4010 adap->vres.stag.start = val[0];
@@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap)
3272 params[3] = FW_PARAM_PFVF(CQ_END); 4020 params[3] = FW_PARAM_PFVF(CQ_END);
3273 params[4] = FW_PARAM_PFVF(OCQ_START); 4021 params[4] = FW_PARAM_PFVF(OCQ_START);
3274 params[5] = FW_PARAM_PFVF(OCQ_END); 4022 params[5] = FW_PARAM_PFVF(OCQ_END);
3275 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, 4023 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
3276 val);
3277 if (ret < 0) 4024 if (ret < 0)
3278 goto bye; 4025 goto bye;
3279 adap->vres.qp.start = val[0]; 4026 adap->vres.qp.start = val[0];
@@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap)
3283 adap->vres.ocq.start = val[4]; 4030 adap->vres.ocq.start = val[4];
3284 adap->vres.ocq.size = val[5] - val[4] + 1; 4031 adap->vres.ocq.size = val[5] - val[4] + 1;
3285 } 4032 }
3286 if (c.iscsicaps) { 4033 if (caps_cmd.iscsicaps) {
3287 params[0] = FW_PARAM_PFVF(ISCSI_START); 4034 params[0] = FW_PARAM_PFVF(ISCSI_START);
3288 params[1] = FW_PARAM_PFVF(ISCSI_END); 4035 params[1] = FW_PARAM_PFVF(ISCSI_END);
3289 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, 4036 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3290 val); 4037 params, val);
3291 if (ret < 0) 4038 if (ret < 0)
3292 goto bye; 4039 goto bye;
3293 adap->vres.iscsi.start = val[0]; 4040 adap->vres.iscsi.start = val[0];
@@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap)
3295 } 4042 }
3296#undef FW_PARAM_PFVF 4043#undef FW_PARAM_PFVF
3297#undef FW_PARAM_DEV 4044#undef FW_PARAM_DEV
4045#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
3298 4046
3299 adap->params.nports = hweight32(port_vec); 4047 /*
3300 adap->params.portvec = port_vec; 4048 * These are finalized by FW initialization, load their values now.
3301 adap->flags |= FW_OK; 4049 */
3302
3303 /* These are finalized by FW initialization, load their values now */
3304 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 4050 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3305 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 4051 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4052 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3306 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 4053 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3307 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 4054 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3308 adap->params.b_wnd); 4055 adap->params.b_wnd);
3309 4056
3310#ifdef CONFIG_PCI_IOV 4057 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3311 /* 4058 for (j = 0; j < NCHAN; j++)
3312 * Provision resource limits for Virtual Functions. We currently 4059 adap->params.tp.tx_modq[j] = j;
3313 * grant them all the same static resource limits except for the Port
3314 * Access Rights Mask which we're assigning based on the PF. All of
3315 * the static provisioning stuff for both the PF and VF really needs
3316 * to be managed in a persistent manner for each device which the
3317 * firmware controls.
3318 */
3319 {
3320 int pf, vf;
3321
3322 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3323 if (num_vf[pf] <= 0)
3324 continue;
3325 4060
3326 /* VF numbering starts at 1! */ 4061 adap->flags |= FW_OK;
3327 for (vf = 1; vf <= num_vf[pf]; vf++) {
3328 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3329 VFRES_NEQ, VFRES_NETHCTRL,
3330 VFRES_NIQFLINT, VFRES_NIQ,
3331 VFRES_TC, VFRES_NVI,
3332 FW_PFVF_CMD_CMASK_MASK,
3333 pfvfres_pmask(adap, pf, vf),
3334 VFRES_NEXACTF,
3335 VFRES_R_CAPS, VFRES_WX_CAPS);
3336 if (ret < 0)
3337 dev_warn(adap->pdev_dev, "failed to "
3338 "provision pf/vf=%d/%d; "
3339 "err=%d\n", pf, vf, ret);
3340 }
3341 }
3342 }
3343#endif
3344
3345 setup_memwin(adap);
3346 return 0; 4062 return 0;
3347 4063
3348 /* 4064 /*
3349 * If a command timed out or failed with EIO FW does not operate within 4065 * Something bad happened. If a command timed out or failed with EIO
3350 * its spec or something catastrophic happened to HW/FW, stop issuing 4066 * FW does not operate within its spec or something catastrophic
3351 * commands. 4067 * happened to HW/FW, stop issuing commands.
3352 */ 4068 */
3353bye: if (ret != -ETIMEDOUT && ret != -EIO) 4069bye:
3354 t4_fw_bye(adap, adap->fn); 4070 if (ret != -ETIMEDOUT && ret != -EIO)
4071 t4_fw_bye(adap, adap->mbox);
3355 return ret; 4072 return ret;
3356} 4073}
3357 4074
@@ -3806,7 +4523,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3806 err = t4_prep_adapter(adapter); 4523 err = t4_prep_adapter(adapter);
3807 if (err) 4524 if (err)
3808 goto out_unmap_bar; 4525 goto out_unmap_bar;
4526 setup_memwin(adapter);
3809 err = adap_init0(adapter); 4527 err = adap_init0(adapter);
4528 setup_memwin_rdma(adapter);
3810 if (err) 4529 if (err)
3811 goto out_unmap_bar; 4530 goto out_unmap_bar;
3812 4531
@@ -3948,8 +4667,11 @@ static void __devexit remove_one(struct pci_dev *pdev)
3948{ 4667{
3949 struct adapter *adapter = pci_get_drvdata(pdev); 4668 struct adapter *adapter = pci_get_drvdata(pdev);
3950 4669
4670#ifdef CONFIG_PCI_IOV
3951 pci_disable_sriov(pdev); 4671 pci_disable_sriov(pdev);
3952 4672
4673#endif
4674
3953 if (adapter) { 4675 if (adapter) {
3954 int i; 4676 int i;
3955 4677
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d79980c5fc63..1b899fea1a91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -100,6 +100,8 @@ struct tid_info {
100 100
101 unsigned int nftids; 101 unsigned int nftids;
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base;
104 unsigned int aftid_end;
103 105
104 spinlock_t atid_lock ____cacheline_aligned_in_smp; 106 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree; 107 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index d49933ed551f..3ecc087d732d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -68,9 +68,6 @@
68 */ 68 */
69#define RX_PKT_SKB_LEN 512 69#define RX_PKT_SKB_LEN 512
70 70
71/* Ethernet header padding prepended to RX_PKTs */
72#define RX_PKT_PAD 2
73
74/* 71/*
75 * Max number of Tx descriptors we clean up at a time. Should be modest as 72 * Max number of Tx descriptors we clean up at a time. Should be modest as
76 * freeing skbs isn't cheap and it happens while holding locks. We just need 73 * freeing skbs isn't cheap and it happens while holding locks. We just need
@@ -137,13 +134,6 @@
137 */ 134 */
138#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 135#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
139 136
140enum {
141 /* packet alignment in FL buffers */
142 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
143 /* egress status entry size */
144 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
145};
146
147struct tx_sw_desc { /* SW state per Tx descriptor */ 137struct tx_sw_desc { /* SW state per Tx descriptor */
148 struct sk_buff *skb; 138 struct sk_buff *skb;
149 struct ulptx_sgl *sgl; 139 struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
155}; 145};
156 146
157/* 147/*
158 * The low bits of rx_sw_desc.dma_addr have special meaning. 148 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150 * We could easily support more but there doesn't seem to be much need for
151 * that ...
152 */
153#define FL_MTU_SMALL 1500
154#define FL_MTU_LARGE 9000
155
156static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157 unsigned int mtu)
158{
159 struct sge *s = &adapter->sge;
160
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162}
163
164#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166
167/*
168 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169 * these to specify the buffer size as an index into the SGE Free List Buffer
170 * Size register array. We also use bit 4, when the buffer has been unmapped
171 * for DMA, but this is of course never sent to the hardware and is only used
172 * to prevent double unmappings. All of the above requires that the Free List
173 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175 * Free List Buffer alignment is 32 bytes, this works out for us ...
159 */ 176 */
160enum { 177enum {
161 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ 178 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
162 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ 179 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
181
182 /*
183 * XXX We shouldn't depend on being able to use these indices.
184 * XXX Especially when some other Master PF has initialized the
185 * XXX adapter or we use the Firmware Configuration File. We
186 * XXX should really search through the Host Buffer Size register
187 * XXX array for the appropriately sized buffer indices.
188 */
189 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
191
192 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
163}; 194};
164 195
165static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 196static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
166{ 197{
167 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); 198 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
168} 199}
169 200
170static inline bool is_buf_mapped(const struct rx_sw_desc *d) 201static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 } 423 }
393} 424}
394 425
395static inline int get_buf_size(const struct rx_sw_desc *d) 426static inline int get_buf_size(struct adapter *adapter,
427 const struct rx_sw_desc *d)
396{ 428{
397#if FL_PG_ORDER > 0 429 struct sge *s = &adapter->sge;
398 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : 430 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
399 PAGE_SIZE; 431 int buf_size;
400#else 432
401 return PAGE_SIZE; 433 switch (rx_buf_size_idx) {
402#endif 434 case RX_SMALL_PG_BUF:
435 buf_size = PAGE_SIZE;
436 break;
437
438 case RX_LARGE_PG_BUF:
439 buf_size = PAGE_SIZE << s->fl_pg_order;
440 break;
441
442 case RX_SMALL_MTU_BUF:
443 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
444 break;
445
446 case RX_LARGE_MTU_BUF:
447 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
448 break;
449
450 default:
451 BUG_ON(1);
452 }
453
454 return buf_size;
403} 455}
404 456
405/** 457/**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
418 470
419 if (is_buf_mapped(d)) 471 if (is_buf_mapped(d))
420 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
421 get_buf_size(d), PCI_DMA_FROMDEVICE); 473 get_buf_size(adap, d),
474 PCI_DMA_FROMDEVICE);
422 put_page(d->page); 475 put_page(d->page);
423 d->page = NULL; 476 d->page = NULL;
424 if (++q->cidx == q->size) 477 if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
444 497
445 if (is_buf_mapped(d)) 498 if (is_buf_mapped(d))
446 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 499 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
447 get_buf_size(d), PCI_DMA_FROMDEVICE); 500 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
448 d->page = NULL; 501 d->page = NULL;
449 if (++q->cidx == q->size) 502 if (++q->cidx == q->size)
450 q->cidx = 0; 503 q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
485static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 538static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
486 gfp_t gfp) 539 gfp_t gfp)
487{ 540{
541 struct sge *s = &adap->sge;
488 struct page *pg; 542 struct page *pg;
489 dma_addr_t mapping; 543 dma_addr_t mapping;
490 unsigned int cred = q->avail; 544 unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
493 547
494 gfp |= __GFP_NOWARN | __GFP_COLD; 548 gfp |= __GFP_NOWARN | __GFP_COLD;
495 549
496#if FL_PG_ORDER > 0 550 if (s->fl_pg_order == 0)
551 goto alloc_small_pages;
552
497 /* 553 /*
498 * Prefer large buffers 554 * Prefer large buffers
499 */ 555 */
500 while (n) { 556 while (n) {
501 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); 557 pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
502 if (unlikely(!pg)) { 558 if (unlikely(!pg)) {
503 q->large_alloc_failed++; 559 q->large_alloc_failed++;
504 break; /* fall back to single pages */ 560 break; /* fall back to single pages */
505 } 561 }
506 562
507 mapping = dma_map_page(adap->pdev_dev, pg, 0, 563 mapping = dma_map_page(adap->pdev_dev, pg, 0,
508 PAGE_SIZE << FL_PG_ORDER, 564 PAGE_SIZE << s->fl_pg_order,
509 PCI_DMA_FROMDEVICE); 565 PCI_DMA_FROMDEVICE);
510 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 566 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
511 __free_pages(pg, FL_PG_ORDER); 567 __free_pages(pg, s->fl_pg_order);
512 goto out; /* do not try small pages for this error */ 568 goto out; /* do not try small pages for this error */
513 } 569 }
514 mapping |= RX_LARGE_BUF; 570 mapping |= RX_LARGE_PG_BUF;
515 *d++ = cpu_to_be64(mapping); 571 *d++ = cpu_to_be64(mapping);
516 572
517 set_rx_sw_desc(sd, pg, mapping); 573 set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
525 } 581 }
526 n--; 582 n--;
527 } 583 }
528#endif
529 584
585alloc_small_pages:
530 while (n--) { 586 while (n--) {
531 pg = __skb_alloc_page(gfp, NULL); 587 pg = __skb_alloc_page(gfp, NULL);
532 if (unlikely(!pg)) { 588 if (unlikely(!pg)) {
@@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
769 wmb(); /* write descriptors before telling HW */ 825 wmb(); /* write descriptors before telling HW */
770 spin_lock(&q->db_lock); 826 spin_lock(&q->db_lock);
771 if (!q->db_disabled) { 827 if (!q->db_disabled) {
772 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), 828 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
773 V_QID(q->cntxt_id) | V_PIDX(n)); 829 QID(q->cntxt_id) | PIDX(n));
774 } 830 }
775 q->db_pidx = q->pidx; 831 q->db_pidx = q->pidx;
776 spin_unlock(&q->db_lock); 832 spin_unlock(&q->db_lock);
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1519static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1575static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1520 const struct cpl_rx_pkt *pkt) 1576 const struct cpl_rx_pkt *pkt)
1521{ 1577{
1578 struct adapter *adapter = rxq->rspq.adap;
1579 struct sge *s = &adapter->sge;
1522 int ret; 1580 int ret;
1523 struct sk_buff *skb; 1581 struct sk_buff *skb;
1524 1582
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1529 return; 1587 return;
1530 } 1588 }
1531 1589
1532 copy_frags(skb, gl, RX_PKT_PAD); 1590 copy_frags(skb, gl, s->pktshift);
1533 skb->len = gl->tot_len - RX_PKT_PAD; 1591 skb->len = gl->tot_len - s->pktshift;
1534 skb->data_len = skb->len; 1592 skb->data_len = skb->len;
1535 skb->truesize += skb->data_len; 1593 skb->truesize += skb->data_len;
1536 skb->ip_summed = CHECKSUM_UNNECESSARY; 1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1566 struct sk_buff *skb; 1624 struct sk_buff *skb;
1567 const struct cpl_rx_pkt *pkt; 1625 const struct cpl_rx_pkt *pkt;
1568 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1626 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627 struct sge *s = &q->adap->sge;
1569 1628
1570 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1629 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1571 return handle_trace_pkt(q->adap, si); 1630 return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1585 return 0; 1644 return 0;
1586 } 1645 }
1587 1646
1588 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1647 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1589 skb->protocol = eth_type_trans(skb, q->netdev); 1648 skb->protocol = eth_type_trans(skb, q->netdev);
1590 skb_record_rx_queue(skb, q->idx); 1649 skb_record_rx_queue(skb, q->idx);
1591 if (skb->dev->features & NETIF_F_RXHASH) 1650 if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
1696 int budget_left = budget; 1755 int budget_left = budget;
1697 const struct rsp_ctrl *rc; 1756 const struct rsp_ctrl *rc;
1698 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1757 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1758 struct adapter *adapter = q->adap;
1759 struct sge *s = &adapter->sge;
1699 1760
1700 while (likely(budget_left)) { 1761 while (likely(budget_left)) {
1701 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 1762 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1722 /* gather packet fragments */ 1783 /* gather packet fragments */
1723 for (frags = 0, fp = si.frags; ; frags++, fp++) { 1784 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1724 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1785 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1725 bufsz = get_buf_size(rsd); 1786 bufsz = get_buf_size(adapter, rsd);
1726 fp->page = rsd->page; 1787 fp->page = rsd->page;
1727 fp->offset = q->offset; 1788 fp->offset = q->offset;
1728 fp->size = min(bufsz, len); 1789 fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1747 si.nfrags = frags + 1; 1808 si.nfrags = frags + 1;
1748 ret = q->handler(q, q->cur_desc, &si); 1809 ret = q->handler(q, q->cur_desc, &si);
1749 if (likely(ret == 0)) 1810 if (likely(ret == 0))
1750 q->offset += ALIGN(fp->size, FL_ALIGN); 1811 q->offset += ALIGN(fp->size, s->fl_align);
1751 else 1812 else
1752 restore_rx_bufs(&si, &rxq->fl, frags); 1813 restore_rx_bufs(&si, &rxq->fl, frags);
1753 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1814 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1983{ 2044{
1984 int ret, flsz = 0; 2045 int ret, flsz = 0;
1985 struct fw_iq_cmd c; 2046 struct fw_iq_cmd c;
2047 struct sge *s = &adap->sge;
1986 struct port_info *pi = netdev_priv(dev); 2048 struct port_info *pi = netdev_priv(dev);
1987 2049
1988 /* Size needs to be multiple of 16, including status entry. */ 2050 /* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2015 fl->size = roundup(fl->size, 8); 2077 fl->size = roundup(fl->size, 8);
2016 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 2078 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2017 sizeof(struct rx_sw_desc), &fl->addr, 2079 sizeof(struct rx_sw_desc), &fl->addr,
2018 &fl->sdesc, STAT_LEN, NUMA_NO_NODE); 2080 &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2019 if (!fl->desc) 2081 if (!fl->desc)
2020 goto fl_nomem; 2082 goto fl_nomem;
2021 2083
2022 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); 2084 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2023 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | 2085 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2024 FW_IQ_CMD_FL0FETCHRO(1) | 2086 FW_IQ_CMD_FL0FETCHRO(1) |
2025 FW_IQ_CMD_FL0DATARO(1) | 2087 FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2096{ 2158{
2097 int ret, nentries; 2159 int ret, nentries;
2098 struct fw_eq_eth_cmd c; 2160 struct fw_eq_eth_cmd c;
2161 struct sge *s = &adap->sge;
2099 struct port_info *pi = netdev_priv(dev); 2162 struct port_info *pi = netdev_priv(dev);
2100 2163
2101 /* Add status entries */ 2164 /* Add status entries */
2102 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2165 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2103 2166
2104 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2167 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2105 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2168 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2106 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2169 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2107 netdev_queue_numa_node_read(netdevq)); 2170 netdev_queue_numa_node_read(netdevq));
2108 if (!txq->q.desc) 2171 if (!txq->q.desc)
2109 return -ENOMEM; 2172 return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2149{ 2212{
2150 int ret, nentries; 2213 int ret, nentries;
2151 struct fw_eq_ctrl_cmd c; 2214 struct fw_eq_ctrl_cmd c;
2215 struct sge *s = &adap->sge;
2152 struct port_info *pi = netdev_priv(dev); 2216 struct port_info *pi = netdev_priv(dev);
2153 2217
2154 /* Add status entries */ 2218 /* Add status entries */
2155 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2219 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2156 2220
2157 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 2221 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2158 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 2222 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2200{ 2264{
2201 int ret, nentries; 2265 int ret, nentries;
2202 struct fw_eq_ofld_cmd c; 2266 struct fw_eq_ofld_cmd c;
2267 struct sge *s = &adap->sge;
2203 struct port_info *pi = netdev_priv(dev); 2268 struct port_info *pi = netdev_priv(dev);
2204 2269
2205 /* Add status entries */ 2270 /* Add status entries */
2206 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2271 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2207 2272
2208 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2273 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2209 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2274 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2210 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2275 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2211 NUMA_NO_NODE); 2276 NUMA_NO_NODE);
2212 if (!txq->q.desc) 2277 if (!txq->q.desc)
2213 return -ENOMEM; 2278 return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2251 2316
2252static void free_txq(struct adapter *adap, struct sge_txq *q) 2317static void free_txq(struct adapter *adap, struct sge_txq *q)
2253{ 2318{
2319 struct sge *s = &adap->sge;
2320
2254 dma_free_coherent(adap->pdev_dev, 2321 dma_free_coherent(adap->pdev_dev,
2255 q->size * sizeof(struct tx_desc) + STAT_LEN, 2322 q->size * sizeof(struct tx_desc) + s->stat_len,
2256 q->desc, q->phys_addr); 2323 q->desc, q->phys_addr);
2257 q->cntxt_id = 0; 2324 q->cntxt_id = 0;
2258 q->sdesc = NULL; 2325 q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
2262static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 2329static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2263 struct sge_fl *fl) 2330 struct sge_fl *fl)
2264{ 2331{
2332 struct sge *s = &adap->sge;
2265 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2333 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2266 2334
2267 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 2335 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2276 2344
2277 if (fl) { 2345 if (fl) {
2278 free_rx_bufs(adap, fl, fl->avail); 2346 free_rx_bufs(adap, fl, fl->avail);
2279 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, 2347 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2280 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2281 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2282 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
2408 * Performs SGE initialization needed every time after a chip reset. 2476 * Performs SGE initialization needed every time after a chip reset.
2409 * We do not initialize any of the queues here, instead the driver 2477 * We do not initialize any of the queues here, instead the driver
2410 * top-level must request them individually. 2478 * top-level must request them individually.
2479 *
2480 * Called in two different modes:
2481 *
2482 * 1. Perform actual hardware initialization and record hard-coded
2483 * parameters which were used. This gets used when we're the
2484 * Master PF and the Firmware Configuration File support didn't
2485 * work for some reason.
2486 *
2487 * 2. We're not the Master PF or initialization was performed with
2488 * a Firmware Configuration File. In this case we need to grab
2489 * any of the SGE operating parameters that we need to have in
2490 * order to do our job and make sure we can live with them ...
2411 */ 2491 */
2412void t4_sge_init(struct adapter *adap) 2492
2493static int t4_sge_init_soft(struct adapter *adap)
2413{ 2494{
2414 unsigned int i, v;
2415 struct sge *s = &adap->sge; 2495 struct sge *s = &adap->sge;
2416 unsigned int fl_align_log = ilog2(FL_ALIGN); 2496 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2497 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2498 u32 ingress_rx_threshold;
2417 2499
2418 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | 2500 /*
2419 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, 2501 * Verify that CPL messages are going to the Ingress Queue for
2420 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | 2502 * process_responses() and that only packet data is going to the
2421 RXPKTCPLMODE | 2503 * Free Lists.
2422 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); 2504 */
2505 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2506 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2507 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2508 return -EINVAL;
2509 }
2510
2511 /*
2512 * Validate the Host Buffer Register Array indices that we want to
2513 * use ...
2514 *
2515 * XXX Note that we should really read through the Host Buffer Size
2516 * XXX register array and find the indices of the Buffer Sizes which
2517 * XXX meet our needs!
2518 */
2519 #define READ_FL_BUF(x) \
2520 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2521
2522 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2523 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2524 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2525 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2526
2527 #undef READ_FL_BUF
2528
2529 if (fl_small_pg != PAGE_SIZE ||
2530 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
2531 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2532 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2533 fl_small_pg, fl_large_pg);
2534 return -EINVAL;
2535 }
2536 if (fl_large_pg)
2537 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2538
2539 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2540 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2541 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2542 fl_small_mtu, fl_large_mtu);
2543 return -EINVAL;
2544 }
2545
2546 /*
2547 * Retrieve our RX interrupt holdoff timer values and counter
2548 * threshold values from the SGE parameters.
2549 */
2550 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2551 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2552 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2553 s->timer_val[0] = core_ticks_to_us(adap,
2554 TIMERVALUE0_GET(timer_value_0_and_1));
2555 s->timer_val[1] = core_ticks_to_us(adap,
2556 TIMERVALUE1_GET(timer_value_0_and_1));
2557 s->timer_val[2] = core_ticks_to_us(adap,
2558 TIMERVALUE2_GET(timer_value_2_and_3));
2559 s->timer_val[3] = core_ticks_to_us(adap,
2560 TIMERVALUE3_GET(timer_value_2_and_3));
2561 s->timer_val[4] = core_ticks_to_us(adap,
2562 TIMERVALUE4_GET(timer_value_4_and_5));
2563 s->timer_val[5] = core_ticks_to_us(adap,
2564 TIMERVALUE5_GET(timer_value_4_and_5));
2565
2566 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2567 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2568 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2569 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2570 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2571
2572 return 0;
2573}
2574
2575static int t4_sge_init_hard(struct adapter *adap)
2576{
2577 struct sge *s = &adap->sge;
2578
2579 /*
2580 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2581 * Queue and Packet Date to the Free List.
2582 */
2583 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2584 RXPKTCPLMODE_MASK);
2423 2585
2424 /* 2586 /*
2425 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2587 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
2433 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2595 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2434 F_ENABLE_DROP); 2596 F_ENABLE_DROP);
2435 2597
2436 for (i = v = 0; i < 32; i += 4) 2598 /*
2437 v |= (PAGE_SHIFT - 10) << i; 2599 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2438 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); 2600 * t4_fixup_host_params().
2439 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); 2601 */
2440#if FL_PG_ORDER > 0 2602 s->fl_pg_order = FL_PG_ORDER;
2441 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); 2603 if (s->fl_pg_order)
2442#endif 2604 t4_write_reg(adap,
2605 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2606 PAGE_SIZE << FL_PG_ORDER);
2607 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2608 FL_MTU_SMALL_BUFSIZE(adap));
2609 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2610 FL_MTU_LARGE_BUFSIZE(adap));
2611
2612 /*
2613 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2614 * Timer Holdoff values must be supplied by our caller.
2615 */
2443 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, 2616 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2444 THRESHOLD_0(s->counter_val[0]) | 2617 THRESHOLD_0(s->counter_val[0]) |
2445 THRESHOLD_1(s->counter_val[1]) | 2618 THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
2449 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | 2622 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2450 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); 2623 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2451 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, 2624 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2452 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | 2625 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2453 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); 2626 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2454 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, 2627 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2455 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | 2628 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2456 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); 2629 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2630
2631 return 0;
2632}
2633
2634int t4_sge_init(struct adapter *adap)
2635{
2636 struct sge *s = &adap->sge;
2637 u32 sge_control;
2638 int ret;
2639
2640 /*
2641 * Ingress Padding Boundary and Egress Status Page Size are set up by
2642 * t4_fixup_host_params().
2643 */
2644 sge_control = t4_read_reg(adap, SGE_CONTROL);
2645 s->pktshift = PKTSHIFT_GET(sge_control);
2646 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2647 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2648 X_INGPADBOUNDARY_SHIFT);
2649
2650 if (adap->flags & USING_SOFT_PARAMS)
2651 ret = t4_sge_init_soft(adap);
2652 else
2653 ret = t4_sge_init_hard(adap);
2654 if (ret < 0)
2655 return ret;
2656
2657 /*
2658 * A FL with <= fl_starve_thres buffers is starving and a periodic
2659 * timer will attempt to refill it. This needs to be larger than the
2660 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2661 * stuck waiting for new packets while the SGE is waiting for us to
2662 * give it more Free List entries. (Note that the SGE's Egress
2663 * Congestion Threshold is in units of 2 Free List pointers.)
2664 */
2665 s->fl_starve_thres
2666 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2667
2457 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2668 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2458 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2669 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2459 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2670 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2460 s->idma_state[0] = s->idma_state[1] = 0; 2671 s->idma_state[0] = s->idma_state[1] = 0;
2461 spin_lock_init(&s->intrq_lock); 2672 spin_lock_init(&s->intrq_lock);
2673
2674 return 0;
2462} 2675}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index af1601323173..35b81d8b59e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
123/* 145/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */ 147 */
@@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330 return 0; 352 return 0;
331} 353}
332 354
355/*
356 * t4_mem_win_rw - read/write memory through PCIE memory window
357 * @adap: the adapter
358 * @addr: address of first byte requested
359 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
360 * @dir: direction of transfer 1 => read, 0 => write
361 *
362 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
363 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
364 * address @addr.
365 */
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{
368 int i;
369
370 /*
371 * Setup offset into PCIE memory window. Address must be a
372 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
373 * ensure that changes propagate before we attempt to use the new
374 * values.)
375 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1));
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
382 if (dir)
383 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
384 else
385 t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
386 }
387
388 return 0;
389}
390
391/**
392 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
393 * @adap: the adapter
394 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
395 * @addr: address within indicated memory type
396 * @len: amount of memory to transfer
397 * @buf: host memory buffer
398 * @dir: direction of transfer 1 => read, 0 => write
399 *
400 * Reads/writes an [almost] arbitrary memory region in the firmware: the
401 * firmware memory address, length and host buffer must be aligned on
402 * 32-bit boudaries. The memory is transferred as a raw byte sequence
403 * from/to the firmware's memory. If this memory contains data
404 * structures which contain multi-byte integers, it's the callers
405 * responsibility to perform appropriate byte order conversions.
406 */
407static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
408 __be32 *buf, int dir)
409{
410 u32 pos, start, end, offset, memoffset;
411 int ret;
412
413 /*
414 * Argument sanity checks ...
415 */
416 if ((addr & 0x3) || (len & 0x3))
417 return -EINVAL;
418
419 /*
420 * Offset into the region of memory which is being accessed
421 * MEM_EDC0 = 0
422 * MEM_EDC1 = 1
423 * MEM_MC = 2
424 */
425 memoffset = (mtype * (5 * 1024 * 1024));
426
427 /* Determine the PCIE_MEM_ACCESS_OFFSET */
428 addr = addr + memoffset;
429
430 /*
431 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
432 * at a time so we need to round down the start and round up the end.
433 * We'll start copying out of the first line at (addr - start) a word
434 * at a time.
435 */
436 start = addr & ~(MEMWIN0_APERTURE-1);
437 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
438 offset = (addr - start)/sizeof(__be32);
439
440 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
441 __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
442
443 /*
444 * If we're writing, copy the data from the caller's memory
445 * buffer
446 */
447 if (!dir) {
448 /*
449 * If we're doing a partial write, then we need to do
450 * a read-modify-write ...
451 */
452 if (offset || len < MEMWIN0_APERTURE) {
453 ret = t4_mem_win_rw(adap, pos, data, 1);
454 if (ret)
455 return ret;
456 }
457 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
458 len > 0) {
459 data[offset++] = *buf++;
460 len -= sizeof(__be32);
461 }
462 }
463
464 /*
465 * Transfer a block of memory and bail if there's an error.
466 */
467 ret = t4_mem_win_rw(adap, pos, data, dir);
468 if (ret)
469 return ret;
470
471 /*
472 * If we're reading, copy the data into the caller's memory
473 * buffer.
474 */
475 if (dir)
476 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
477 len > 0) {
478 *buf++ = data[offset++];
479 len -= sizeof(__be32);
480 }
481 }
482
483 return 0;
484}
485
486int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
487 __be32 *buf)
488{
489 return t4_memory_rw(adap, mtype, addr, len, buf, 0);
490}
491
333#define EEPROM_STAT_ADDR 0x7bfc 492#define EEPROM_STAT_ADDR 0x7bfc
334#define VPD_BASE 0 493#define VPD_BASE 0
335#define VPD_LEN 512 494#define VPD_LEN 512
@@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
355 * 514 *
356 * Reads card parameters stored in VPD EEPROM. 515 * Reads card parameters stored in VPD EEPROM.
357 */ 516 */
358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 517int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
359{ 518{
519 u32 cclk_param, cclk_val;
360 int i, ret; 520 int i, ret;
361 int ec, sn; 521 int ec, sn;
362 u8 vpd[VPD_LEN], csum; 522 u8 vpd[VPD_LEN], csum;
@@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
418 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 578 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
419 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 579 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
420 strim(p->sn); 580 strim(p->sn);
581
582 /*
583 * Ask firmware for the Core Clock since it knows how to translate the
584 * Reference Clock ('V2') VPD field into a Core Clock value ...
585 */
586 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
587 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
588 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
589 1, &cclk_param, &cclk_val);
590 if (ret)
591 return ret;
592 p->cclk = cclk_val;
593
421 return 0; 594 return 0;
422} 595}
423 596
@@ -718,6 +891,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
718} 891}
719 892
720/** 893/**
894 * t4_flash_cfg_addr - return the address of the flash configuration file
895 * @adapter: the adapter
896 *
897 * Return the address within the flash where the Firmware Configuration
898 * File is stored.
899 */
900unsigned int t4_flash_cfg_addr(struct adapter *adapter)
901{
902 if (adapter->params.sf_size == 0x100000)
903 return FLASH_FPGA_CFG_START;
904 else
905 return FLASH_CFG_START;
906}
907
908/**
909 * t4_load_cfg - download config file
910 * @adap: the adapter
911 * @cfg_data: the cfg text file to write
912 * @size: text file size
913 *
914 * Write the supplied config text file to the card's serial flash.
915 */
916int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
917{
918 int ret, i, n;
919 unsigned int addr;
920 unsigned int flash_cfg_start_sec;
921 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
922
923 addr = t4_flash_cfg_addr(adap);
924 flash_cfg_start_sec = addr / SF_SEC_SIZE;
925
926 if (size > FLASH_CFG_MAX_SIZE) {
927 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
928 FLASH_CFG_MAX_SIZE);
929 return -EFBIG;
930 }
931
932 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
933 sf_sec_size);
934 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
935 flash_cfg_start_sec + i - 1);
936 /*
937 * If size == 0 then we're simply erasing the FLASH sectors associated
938 * with the on-adapter Firmware Configuration File.
939 */
940 if (ret || size == 0)
941 goto out;
942
943 /* this will write to the flash up to SF_PAGE_SIZE at a time */
944 for (i = 0; i < size; i += SF_PAGE_SIZE) {
945 if ((size - i) < SF_PAGE_SIZE)
946 n = size - i;
947 else
948 n = SF_PAGE_SIZE;
949 ret = t4_write_flash(adap, addr, n, cfg_data);
950 if (ret)
951 goto out;
952
953 addr += SF_PAGE_SIZE;
954 cfg_data += SF_PAGE_SIZE;
955 }
956
957out:
958 if (ret)
959 dev_err(adap->pdev_dev, "config file %s failed %d\n",
960 (size == 0 ? "clear" : "download"), ret);
961 return ret;
962}
963
964/**
721 * t4_load_fw - download firmware 965 * t4_load_fw - download firmware
722 * @adap: the adapter 966 * @adap: the adapter
723 * @fw_data: the firmware image to write 967 * @fw_data: the firmware image to write
@@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter)
1018 { ERR_INVALID_CIDX_INC, 1262 { ERR_INVALID_CIDX_INC,
1019 "SGE GTS CIDX increment too large", -1, 0 }, 1263 "SGE GTS CIDX increment too large", -1, 0 },
1020 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1264 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1021 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1265 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1022 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1266 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1023 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1267 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1024 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1268 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1025 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1269 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1026 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1270 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter)
1520 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1764 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1521 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1765 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1522 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1766 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1523 F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | 1767 DBFIFO_HP_INT | DBFIFO_LP_INT |
1524 EGRESS_SIZE_ERR); 1768 EGRESS_SIZE_ERR);
1525 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1769 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1526 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1770 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1717,6 +1961,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1717} 1961}
1718 1962
1719/** 1963/**
1964 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
1965 * @adap: the adapter
1966 * @addr: the indirect TP register address
1967 * @mask: specifies the field within the register to modify
1968 * @val: new value for the field
1969 *
1970 * Sets a field of an indirect TP register to the given value.
1971 */
1972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1973 unsigned int mask, unsigned int val)
1974{
1975 t4_write_reg(adap, TP_PIO_ADDR, addr);
1976 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
1977 t4_write_reg(adap, TP_PIO_DATA, val);
1978}
1979
1980/**
1720 * init_cong_ctrl - initialize congestion control parameters 1981 * init_cong_ctrl - initialize congestion control parameters
1721 * @a: the alpha values for congestion control 1982 * @a: the alpha values for congestion control
1722 * @b: the beta values for congestion control 1983 * @b: the beta values for congestion control
@@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2000 struct fw_ldst_cmd c; 2261 struct fw_ldst_cmd c;
2001 2262
2002 memset(&c, 0, sizeof(c)); 2263 memset(&c, 0, sizeof(c));
2003 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | 2264 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2004 F_FW_CMD_WRITE | 2265 FW_CMD_WRITE |
2005 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2266 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2006 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2267 c.cycles_to_len16 = htonl(FW_LEN16(c));
2007 c.u.addrval.addr = htonl(addr); 2268 c.u.addrval.addr = htonl(addr);
2008 c.u.addrval.val = htonl(val); 2269 c.u.addrval.val = htonl(val);
@@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2033 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2294 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2034 return -EINVAL; 2295 return -EINVAL;
2035 2296
2036 t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); 2297 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
2037 t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); 2298 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2038 2299
2039 for (i = 0; i < len; i += 4) 2300 for (i = 0; i < len; i += 4)
2040 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); 2301 *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
@@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2102} 2363}
2103 2364
2104/** 2365/**
2105 * t4_fw_hello - establish communication with FW 2366 * t4_fw_hello - establish communication with FW
2106 * @adap: the adapter 2367 * @adap: the adapter
2107 * @mbox: mailbox to use for the FW command 2368 * @mbox: mailbox to use for the FW command
2108 * @evt_mbox: mailbox to receive async FW events 2369 * @evt_mbox: mailbox to receive async FW events
2109 * @master: specifies the caller's willingness to be the device master 2370 * @master: specifies the caller's willingness to be the device master
2110 * @state: returns the current device state 2371 * @state: returns the current device state (if non-NULL)
2111 * 2372 *
2112 * Issues a command to establish communication with FW. 2373 * Issues a command to establish communication with FW. Returns either
2374 * an error (negative integer) or the mailbox of the Master PF.
2113 */ 2375 */
2114int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2376int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2115 enum dev_master master, enum dev_state *state) 2377 enum dev_master master, enum dev_state *state)
2116{ 2378{
2117 int ret; 2379 int ret;
2118 struct fw_hello_cmd c; 2380 struct fw_hello_cmd c;
2381 u32 v;
2382 unsigned int master_mbox;
2383 int retries = FW_CMD_HELLO_RETRIES;
2119 2384
2385retry:
2386 memset(&c, 0, sizeof(c));
2120 INIT_CMD(c, HELLO, WRITE); 2387 INIT_CMD(c, HELLO, WRITE);
2121 c.err_to_mbasyncnot = htonl( 2388 c.err_to_mbasyncnot = htonl(
2122 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2389 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2123 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2390 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2124 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | 2391 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2125 FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); 2392 FW_HELLO_CMD_MBMASTER_MASK) |
2393 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2394 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2395 FW_HELLO_CMD_CLEARINIT);
2126 2396
2397 /*
2398 * Issue the HELLO command to the firmware. If it's not successful
2399 * but indicates that we got a "busy" or "timeout" condition, retry
2400 * the HELLO until we exhaust our retry limit.
2401 */
2127 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2402 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2128 if (ret == 0 && state) { 2403 if (ret < 0) {
2129 u32 v = ntohl(c.err_to_mbasyncnot); 2404 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2130 if (v & FW_HELLO_CMD_INIT) 2405 goto retry;
2131 *state = DEV_STATE_INIT; 2406 return ret;
2132 else if (v & FW_HELLO_CMD_ERR) 2407 }
2408
2409 v = ntohl(c.err_to_mbasyncnot);
2410 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2411 if (state) {
2412 if (v & FW_HELLO_CMD_ERR)
2133 *state = DEV_STATE_ERR; 2413 *state = DEV_STATE_ERR;
2414 else if (v & FW_HELLO_CMD_INIT)
2415 *state = DEV_STATE_INIT;
2134 else 2416 else
2135 *state = DEV_STATE_UNINIT; 2417 *state = DEV_STATE_UNINIT;
2136 } 2418 }
2137 return ret; 2419
2420 /*
2421 * If we're not the Master PF then we need to wait around for the
2422 * Master PF Driver to finish setting up the adapter.
2423 *
2424 * Note that we also do this wait if we're a non-Master-capable PF and
2425 * there is no current Master PF; a Master PF may show up momentarily
2426 * and we wouldn't want to fail pointlessly. (This can happen when an
2427 * OS loads lots of different drivers rapidly at the same time). In
2428 * this case, the Master PF returned by the firmware will be
2429 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2430 */
2431 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2432 master_mbox != mbox) {
2433 int waiting = FW_CMD_HELLO_TIMEOUT;
2434
2435 /*
2436 * Wait for the firmware to either indicate an error or
2437 * initialized state. If we see either of these we bail out
2438 * and report the issue to the caller. If we exhaust the
2439 * "hello timeout" and we haven't exhausted our retries, try
2440 * again. Otherwise bail with a timeout error.
2441 */
2442 for (;;) {
2443 u32 pcie_fw;
2444
2445 msleep(50);
2446 waiting -= 50;
2447
2448 /*
2449 * If neither Error nor Initialialized are indicated
2450 * by the firmware keep waiting till we exaust our
2451 * timeout ... and then retry if we haven't exhausted
2452 * our retries ...
2453 */
2454 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2455 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2456 if (waiting <= 0) {
2457 if (retries-- > 0)
2458 goto retry;
2459
2460 return -ETIMEDOUT;
2461 }
2462 continue;
2463 }
2464
2465 /*
2466 * We either have an Error or Initialized condition
2467 * report errors preferentially.
2468 */
2469 if (state) {
2470 if (pcie_fw & FW_PCIE_FW_ERR)
2471 *state = DEV_STATE_ERR;
2472 else if (pcie_fw & FW_PCIE_FW_INIT)
2473 *state = DEV_STATE_INIT;
2474 }
2475
2476 /*
2477 * If we arrived before a Master PF was selected and
2478 * there's not a valid Master PF, grab its identity
2479 * for our caller.
2480 */
2481 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2482 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2483 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2484 break;
2485 }
2486 }
2487
2488 return master_mbox;
2138} 2489}
2139 2490
2140/** 2491/**
@@ -2186,6 +2537,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2186} 2537}
2187 2538
2188/** 2539/**
2540 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2541 * @adap: the adapter
2542 * @mbox: mailbox to use for the FW RESET command (if desired)
2543 * @force: force uP into RESET even if FW RESET command fails
2544 *
2545 * Issues a RESET command to firmware (if desired) with a HALT indication
2546 * and then puts the microprocessor into RESET state. The RESET command
2547 * will only be issued if a legitimate mailbox is provided (mbox <=
2548 * FW_PCIE_FW_MASTER_MASK).
2549 *
2550 * This is generally used in order for the host to safely manipulate the
2551 * adapter without fear of conflicting with whatever the firmware might
2552 * be doing. The only way out of this state is to RESTART the firmware
2553 * ...
2554 */
2555int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2556{
2557 int ret = 0;
2558
2559 /*
2560 * If a legitimate mailbox is provided, issue a RESET command
2561 * with a HALT indication.
2562 */
2563 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2564 struct fw_reset_cmd c;
2565
2566 memset(&c, 0, sizeof(c));
2567 INIT_CMD(c, RESET, WRITE);
2568 c.val = htonl(PIORST | PIORSTMODE);
2569 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2570 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2571 }
2572
2573 /*
2574 * Normally we won't complete the operation if the firmware RESET
2575 * command fails but if our caller insists we'll go ahead and put the
2576 * uP into RESET. This can be useful if the firmware is hung or even
2577 * missing ... We'll have to take the risk of putting the uP into
2578 * RESET without the cooperation of firmware in that case.
2579 *
2580 * We also force the firmware's HALT flag to be on in case we bypassed
2581 * the firmware RESET command above or we're dealing with old firmware
2582 * which doesn't have the HALT capability. This will serve as a flag
2583 * for the incoming firmware to know that it's coming out of a HALT
2584 * rather than a RESET ... if it's new enough to understand that ...
2585 */
2586 if (ret == 0 || force) {
2587 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2588 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2589 FW_PCIE_FW_HALT);
2590 }
2591
2592 /*
2593 * And we always return the result of the firmware RESET command
2594 * even when we force the uP into RESET ...
2595 */
2596 return ret;
2597}
2598
2599/**
2600 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2601 * @adap: the adapter
2602 * @reset: if we want to do a RESET to restart things
2603 *
2604 * Restart firmware previously halted by t4_fw_halt(). On successful
2605 * return the previous PF Master remains as the new PF Master and there
2606 * is no need to issue a new HELLO command, etc.
2607 *
2608 * We do this in two ways:
2609 *
2610 * 1. If we're dealing with newer firmware we'll simply want to take
2611 * the chip's microprocessor out of RESET. This will cause the
2612 * firmware to start up from its start vector. And then we'll loop
2613 * until the firmware indicates it's started again (PCIE_FW.HALT
2614 * reset to 0) or we timeout.
2615 *
2616 * 2. If we're dealing with older firmware then we'll need to RESET
2617 * the chip since older firmware won't recognize the PCIE_FW.HALT
2618 * flag and automatically RESET itself on startup.
2619 */
2620int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2621{
2622 if (reset) {
2623 /*
2624 * Since we're directing the RESET instead of the firmware
2625 * doing it automatically, we need to clear the PCIE_FW.HALT
2626 * bit.
2627 */
2628 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
2629
2630 /*
2631 * If we've been given a valid mailbox, first try to get the
2632 * firmware to do the RESET. If that works, great and we can
2633 * return success. Otherwise, if we haven't been given a
2634 * valid mailbox or the RESET command failed, fall back to
2635 * hitting the chip with a hammer.
2636 */
2637 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2638 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2639 msleep(100);
2640 if (t4_fw_reset(adap, mbox,
2641 PIORST | PIORSTMODE) == 0)
2642 return 0;
2643 }
2644
2645 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
2646 msleep(2000);
2647 } else {
2648 int ms;
2649
2650 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
2651 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2652 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
2653 return 0;
2654 msleep(100);
2655 ms += 100;
2656 }
2657 return -ETIMEDOUT;
2658 }
2659 return 0;
2660}
2661
2662/**
2663 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
2664 * @adap: the adapter
2665 * @mbox: mailbox to use for the FW RESET command (if desired)
2666 * @fw_data: the firmware image to write
2667 * @size: image size
2668 * @force: force upgrade even if firmware doesn't cooperate
2669 *
2670 * Perform all of the steps necessary for upgrading an adapter's
2671 * firmware image. Normally this requires the cooperation of the
2672 * existing firmware in order to halt all existing activities
2673 * but if an invalid mailbox token is passed in we skip that step
2674 * (though we'll still put the adapter microprocessor into RESET in
2675 * that case).
2676 *
2677 * On successful return the new firmware will have been loaded and
2678 * the adapter will have been fully RESET losing all previous setup
2679 * state. On unsuccessful return the adapter may be completely hosed ...
2680 * positive errno indicates that the adapter is ~probably~ intact, a
2681 * negative errno indicates that things are looking bad ...
2682 */
2683int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
2684 const u8 *fw_data, unsigned int size, int force)
2685{
2686 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
2687 int reset, ret;
2688
2689 ret = t4_fw_halt(adap, mbox, force);
2690 if (ret < 0 && !force)
2691 return ret;
2692
2693 ret = t4_load_fw(adap, fw_data, size);
2694 if (ret < 0)
2695 return ret;
2696
2697 /*
2698 * Older versions of the firmware don't understand the new
2699 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
2700 * restart. So for newly loaded older firmware we'll have to do the
2701 * RESET for it so it starts up on a clean slate. We can tell if
2702 * the newly loaded firmware will handle this right by checking
2703 * its header flags to see if it advertises the capability.
2704 */
2705 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
2706 return t4_fw_restart(adap, mbox, reset);
2707}
2708
2709
2710/**
2711 * t4_fw_config_file - setup an adapter via a Configuration File
2712 * @adap: the adapter
2713 * @mbox: mailbox to use for the FW command
2714 * @mtype: the memory type where the Configuration File is located
2715 * @maddr: the memory address where the Configuration File is located
2716 * @finiver: return value for CF [fini] version
2717 * @finicsum: return value for CF [fini] checksum
2718 * @cfcsum: return value for CF computed checksum
2719 *
2720 * Issue a command to get the firmware to process the Configuration
2721 * File located at the specified mtype/maddress. If the Configuration
2722 * File is processed successfully and return value pointers are
2723 * provided, the Configuration File "[fini] section version and
2724 * checksum values will be returned along with the computed checksum.
2725 * It's up to the caller to decide how it wants to respond to the
2726 * checksums not matching but it recommended that a prominant warning
2727 * be emitted in order to help people rapidly identify changed or
2728 * corrupted Configuration Files.
2729 *
2730 * Also note that it's possible to modify things like "niccaps",
2731 * "toecaps",etc. between processing the Configuration File and telling
2732 * the firmware to use the new configuration. Callers which want to
2733 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
2734 * Configuration Files if they want to do this.
2735 */
2736int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
2737 unsigned int mtype, unsigned int maddr,
2738 u32 *finiver, u32 *finicsum, u32 *cfcsum)
2739{
2740 struct fw_caps_config_cmd caps_cmd;
2741 int ret;
2742
2743 /*
2744 * Tell the firmware to process the indicated Configuration File.
2745 * If there are no errors and the caller has provided return value
2746 * pointers for the [fini] section version, checksum and computed
2747 * checksum, pass those back to the caller.
2748 */
2749 memset(&caps_cmd, 0, sizeof(caps_cmd));
2750 caps_cmd.op_to_write =
2751 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2752 FW_CMD_REQUEST |
2753 FW_CMD_READ);
2754 caps_cmd.retval_len16 =
2755 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
2756 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2757 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
2758 FW_LEN16(caps_cmd));
2759 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
2760 if (ret < 0)
2761 return ret;
2762
2763 if (finiver)
2764 *finiver = ntohl(caps_cmd.finiver);
2765 if (finicsum)
2766 *finicsum = ntohl(caps_cmd.finicsum);
2767 if (cfcsum)
2768 *cfcsum = ntohl(caps_cmd.cfcsum);
2769
2770 /*
2771 * And now tell the firmware to use the configuration we just loaded.
2772 */
2773 caps_cmd.op_to_write =
2774 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2775 FW_CMD_REQUEST |
2776 FW_CMD_WRITE);
2777 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
2778 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
2779}
2780
2781/**
2782 * t4_fixup_host_params - fix up host-dependent parameters
2783 * @adap: the adapter
2784 * @page_size: the host's Base Page Size
2785 * @cache_line_size: the host's Cache Line Size
2786 *
2787 * Various registers in T4 contain values which are dependent on the
2788 * host's Base Page and Cache Line Sizes. This function will fix all of
2789 * those registers with the appropriate values as passed in ...
2790 */
2791int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2792 unsigned int cache_line_size)
2793{
2794 unsigned int page_shift = fls(page_size) - 1;
2795 unsigned int sge_hps = page_shift - 10;
2796 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2797 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2798 unsigned int fl_align_log = fls(fl_align) - 1;
2799
2800 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
2801 HOSTPAGESIZEPF0(sge_hps) |
2802 HOSTPAGESIZEPF1(sge_hps) |
2803 HOSTPAGESIZEPF2(sge_hps) |
2804 HOSTPAGESIZEPF3(sge_hps) |
2805 HOSTPAGESIZEPF4(sge_hps) |
2806 HOSTPAGESIZEPF5(sge_hps) |
2807 HOSTPAGESIZEPF6(sge_hps) |
2808 HOSTPAGESIZEPF7(sge_hps));
2809
2810 t4_set_reg_field(adap, SGE_CONTROL,
2811 INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
2812 EGRSTATUSPAGESIZE_MASK,
2813 INGPADBOUNDARY(fl_align_log - 5) |
2814 EGRSTATUSPAGESIZE(stat_len != 64));
2815
2816 /*
2817 * Adjust various SGE Free List Host Buffer Sizes.
2818 *
2819 * This is something of a crock since we're using fixed indices into
2820 * the array which are also known by the sge.c code and the T4
2821 * Firmware Configuration File. We need to come up with a much better
2822 * approach to managing this array. For now, the first four entries
2823 * are:
2824 *
2825 * 0: Host Page Size
2826 * 1: 64KB
2827 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2828 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2829 *
2830 * For the single-MTU buffers in unpacked mode we need to include
2831 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2832 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2833 * Padding boundry. All of these are accommodated in the Factory
2834 * Default Firmware Configuration File but we need to adjust it for
2835 * this host's cache line size.
2836 */
2837 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
2838 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
2839 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
2840 & ~(fl_align-1));
2841 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
2842 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
2843 & ~(fl_align-1));
2844
2845 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
2846
2847 return 0;
2848}
2849
2850/**
2851 * t4_fw_initialize - ask FW to initialize the device
2852 * @adap: the adapter
2853 * @mbox: mailbox to use for the FW command
2854 *
2855 * Issues a command to FW to partially initialize the device. This
2856 * performs initialization that generally doesn't depend on user input.
2857 */
2858int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2859{
2860 struct fw_initialize_cmd c;
2861
2862 memset(&c, 0, sizeof(c));
2863 INIT_CMD(c, INITIALIZE, WRITE);
2864 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2865}
2866
2867/**
2189 * t4_query_params - query FW or device parameters 2868 * t4_query_params - query FW or device parameters
2190 * @adap: the adapter 2869 * @adap: the adapter
2191 * @mbox: mailbox to use for the FW command 2870 * @mbox: mailbox to use for the FW command
@@ -2835,10 +3514,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
2835 return ret; 3514 return ret;
2836 } 3515 }
2837 3516
2838 ret = get_vpd_params(adapter, &adapter->params.vpd);
2839 if (ret < 0)
2840 return ret;
2841
2842 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3517 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2843 3518
2844 /* 3519 /*
@@ -2846,6 +3521,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
2846 */ 3521 */
2847 adapter->params.nports = 1; 3522 adapter->params.nports = 1;
2848 adapter->params.portvec = 1; 3523 adapter->params.portvec = 1;
3524 adapter->params.vpd.cclk = 50000;
2849 return 0; 3525 return 0;
2850} 3526}
2851 3527
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c26b455f37de..f534ed7e10e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -58,6 +58,7 @@ enum {
58 58
59enum { 59enum {
60 SF_PAGE_SIZE = 256, /* serial flash page size */ 60 SF_PAGE_SIZE = 256, /* serial flash page size */
61 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61}; 62};
62 63
63enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ 64enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -137,4 +138,83 @@ struct rsp_ctrl {
137#define QINTR_CNT_EN 0x1 138#define QINTR_CNT_EN 0x1
138#define QINTR_TIMER_IDX(x) ((x) << 1) 139#define QINTR_TIMER_IDX(x) ((x) << 1)
139#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) 140#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
141
142/*
143 * Flash layout.
144 */
145#define FLASH_START(start) ((start) * SF_SEC_SIZE)
146#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
147
148enum {
149 /*
150 * Various Expansion-ROM boot images, etc.
151 */
152 FLASH_EXP_ROM_START_SEC = 0,
153 FLASH_EXP_ROM_NSECS = 6,
154 FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
155 FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
156
157 /*
158 * iSCSI Boot Firmware Table (iBFT) and other driver-related
159 * parameters ...
160 */
161 FLASH_IBFT_START_SEC = 6,
162 FLASH_IBFT_NSECS = 1,
163 FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
164 FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
165
166 /*
167 * Boot configuration data.
168 */
169 FLASH_BOOTCFG_START_SEC = 7,
170 FLASH_BOOTCFG_NSECS = 1,
171 FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
172 FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
173
174 /*
175 * Location of firmware image in FLASH.
176 */
177 FLASH_FW_START_SEC = 8,
178 FLASH_FW_NSECS = 8,
179 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
180 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
181
182 /*
183 * iSCSI persistent/crash information.
184 */
185 FLASH_ISCSI_CRASH_START_SEC = 29,
186 FLASH_ISCSI_CRASH_NSECS = 1,
187 FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
188 FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
189
190 /*
191 * FCoE persistent/crash information.
192 */
193 FLASH_FCOE_CRASH_START_SEC = 30,
194 FLASH_FCOE_CRASH_NSECS = 1,
195 FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
196 FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
197
198 /*
199 * Location of Firmware Configuration File in FLASH. Since the FPGA
200 * "FLASH" is smaller we need to store the Configuration File in a
201 * different location -- which will overlap the end of the firmware
202 * image if firmware ever gets that large ...
203 */
204 FLASH_CFG_START_SEC = 31,
205 FLASH_CFG_NSECS = 1,
206 FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
207 FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
208
209 FLASH_FPGA_CFG_START_SEC = 15,
210 FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
211
212 /*
213 * Sectors 32-63 are reserved for FLASH failover.
214 */
215};
216
217#undef FLASH_START
218#undef FLASH_MAX_SIZE
219
140#endif /* __T4_HW_H */ 220#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 111fc323f155..a1a8b57200f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -86,10 +86,17 @@
86#define CIDXINC_SHIFT 0 86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT) 87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88 88
89#define X_RXPKTCPLMODE_SPLIT 1
90#define X_INGPADBOUNDARY_SHIFT 5
91
89#define SGE_CONTROL 0x1008 92#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U 93#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U 94#define RXPKTCPLMODE_MASK 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U 95#define RXPKTCPLMODE_SHIFT 18
96#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
97#define EGRSTATUSPAGESIZE_MASK 0x00020000U
98#define EGRSTATUSPAGESIZE_SHIFT 17
99#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
93#define PKTSHIFT_MASK 0x00001c00U 100#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10 101#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 102#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
@@ -108,6 +115,35 @@
108#define GLOBALENABLE 0x00000001U 115#define GLOBALENABLE 0x00000001U
109 116
110#define SGE_HOST_PAGE_SIZE 0x100c 117#define SGE_HOST_PAGE_SIZE 0x100c
118
119#define HOSTPAGESIZEPF7_MASK 0x0000000fU
120#define HOSTPAGESIZEPF7_SHIFT 28
121#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
122
123#define HOSTPAGESIZEPF6_MASK 0x0000000fU
124#define HOSTPAGESIZEPF6_SHIFT 24
125#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT)
126
127#define HOSTPAGESIZEPF5_MASK 0x0000000fU
128#define HOSTPAGESIZEPF5_SHIFT 20
129#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT)
130
131#define HOSTPAGESIZEPF4_MASK 0x0000000fU
132#define HOSTPAGESIZEPF4_SHIFT 16
133#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT)
134
135#define HOSTPAGESIZEPF3_MASK 0x0000000fU
136#define HOSTPAGESIZEPF3_SHIFT 12
137#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT)
138
139#define HOSTPAGESIZEPF2_MASK 0x0000000fU
140#define HOSTPAGESIZEPF2_SHIFT 8
141#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
142
143#define HOSTPAGESIZEPF1_MASK 0x0000000fU
144#define HOSTPAGESIZEPF1_SHIFT 4
145#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT)
146
111#define HOSTPAGESIZEPF0_MASK 0x0000000fU 147#define HOSTPAGESIZEPF0_MASK 0x0000000fU
112#define HOSTPAGESIZEPF0_SHIFT 0 148#define HOSTPAGESIZEPF0_SHIFT 0
113#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) 149#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
@@ -155,6 +191,8 @@
155#define SGE_INT_ENABLE3 0x1040 191#define SGE_INT_ENABLE3 0x1040
156#define SGE_FL_BUFFER_SIZE0 0x1044 192#define SGE_FL_BUFFER_SIZE0 0x1044
157#define SGE_FL_BUFFER_SIZE1 0x1048 193#define SGE_FL_BUFFER_SIZE1 0x1048
194#define SGE_FL_BUFFER_SIZE2 0x104c
195#define SGE_FL_BUFFER_SIZE3 0x1050
158#define SGE_INGRESS_RX_THRESHOLD 0x10a0 196#define SGE_INGRESS_RX_THRESHOLD 0x10a0
159#define THRESHOLD_0_MASK 0x3f000000U 197#define THRESHOLD_0_MASK 0x3f000000U
160#define THRESHOLD_0_SHIFT 24 198#define THRESHOLD_0_SHIFT 24
@@ -173,6 +211,12 @@
173#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) 211#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
174#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) 212#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
175 213
214#define SGE_CONM_CTRL 0x1094
215#define EGRTHRESHOLD_MASK 0x00003f00U
216#define EGRTHRESHOLDshift 8
217#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
218#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
219
176#define SGE_TIMER_VALUE_0_AND_1 0x10b8 220#define SGE_TIMER_VALUE_0_AND_1 0x10b8
177#define TIMERVALUE0_MASK 0xffff0000U 221#define TIMERVALUE0_MASK 0xffff0000U
178#define TIMERVALUE0_SHIFT 16 222#define TIMERVALUE0_SHIFT 16
@@ -184,64 +228,54 @@
184#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) 228#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
185 229
186#define SGE_TIMER_VALUE_2_AND_3 0x10bc 230#define SGE_TIMER_VALUE_2_AND_3 0x10bc
231#define TIMERVALUE2_MASK 0xffff0000U
232#define TIMERVALUE2_SHIFT 16
233#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
234#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
235#define TIMERVALUE3_MASK 0x0000ffffU
236#define TIMERVALUE3_SHIFT 0
237#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
238#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
239
187#define SGE_TIMER_VALUE_4_AND_5 0x10c0 240#define SGE_TIMER_VALUE_4_AND_5 0x10c0
241#define TIMERVALUE4_MASK 0xffff0000U
242#define TIMERVALUE4_SHIFT 16
243#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
244#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
245#define TIMERVALUE5_MASK 0x0000ffffU
246#define TIMERVALUE5_SHIFT 0
247#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
248#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
249
188#define SGE_DEBUG_INDEX 0x10cc 250#define SGE_DEBUG_INDEX 0x10cc
189#define SGE_DEBUG_DATA_HIGH 0x10d0 251#define SGE_DEBUG_DATA_HIGH 0x10d0
190#define SGE_DEBUG_DATA_LOW 0x10d4 252#define SGE_DEBUG_DATA_LOW 0x10d4
191#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 253#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
192 254
193#define S_LP_INT_THRESH 12
194#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
195#define S_HP_INT_THRESH 28 255#define S_HP_INT_THRESH 28
256#define M_HP_INT_THRESH 0xfU
196#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) 257#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
258#define M_HP_COUNT 0x7ffU
259#define S_HP_COUNT 16
260#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
261#define S_LP_INT_THRESH 12
262#define M_LP_INT_THRESH 0xfU
263#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
264#define M_LP_COUNT 0x7ffU
265#define S_LP_COUNT 0
266#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
197#define A_SGE_DBFIFO_STATUS 0x10a4 267#define A_SGE_DBFIFO_STATUS 0x10a4
198 268
199#define S_ENABLE_DROP 13 269#define S_ENABLE_DROP 13
200#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) 270#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
201#define F_ENABLE_DROP V_ENABLE_DROP(1U) 271#define F_ENABLE_DROP V_ENABLE_DROP(1U)
202#define A_SGE_DOORBELL_CONTROL 0x10a8
203
204#define A_SGE_CTXT_CMD 0x11fc
205#define A_SGE_DBQ_CTXT_BADDR 0x1084
206
207#define A_SGE_PF_KDOORBELL 0x0
208
209#define S_QID 15
210#define V_QID(x) ((x) << S_QID)
211
212#define S_PIDX 0
213#define V_PIDX(x) ((x) << S_PIDX)
214
215#define M_LP_COUNT 0x7ffU
216#define S_LP_COUNT 0
217#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
218
219#define M_HP_COUNT 0x7ffU
220#define S_HP_COUNT 16
221#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
222
223#define A_SGE_INT_ENABLE3 0x1040
224
225#define S_DBFIFO_HP_INT 8
226#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
227#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
228
229#define S_DBFIFO_LP_INT 7
230#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
231#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
232
233#define S_DROPPED_DB 0 272#define S_DROPPED_DB 0
234#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) 273#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
235#define F_DROPPED_DB V_DROPPED_DB(1U) 274#define F_DROPPED_DB V_DROPPED_DB(1U)
275#define A_SGE_DOORBELL_CONTROL 0x10a8
236 276
237#define S_ERR_DROPPED_DB 18 277#define A_SGE_CTXT_CMD 0x11fc
238#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) 278#define A_SGE_DBQ_CTXT_BADDR 0x1084
239#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
240
241#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
242
243#define M_HP_INT_THRESH 0xfU
244#define M_LP_INT_THRESH 0xfU
245 279
246#define PCIE_PF_CLI 0x44 280#define PCIE_PF_CLI 0x44
247#define PCIE_INT_CAUSE 0x3004 281#define PCIE_INT_CAUSE 0x3004
@@ -287,6 +321,8 @@
287#define WINDOW(x) ((x) << WINDOW_SHIFT) 321#define WINDOW(x) ((x) << WINDOW_SHIFT)
288#define PCIE_MEM_ACCESS_OFFSET 0x306c 322#define PCIE_MEM_ACCESS_OFFSET 0x306c
289 323
324#define PCIE_FW 0x30b8
325
290#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 326#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
291#define RNPP 0x80000000U 327#define RNPP 0x80000000U
292#define RPCP 0x20000000U 328#define RPCP 0x20000000U
@@ -364,7 +400,7 @@
364#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU 400#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
365#define MEM_WRAP_CLIENT_NUM_SHIFT 0 401#define MEM_WRAP_CLIENT_NUM_SHIFT 0
366#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 402#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
367 403#define MA_PCIE_FW 0x30b8
368#define MA_PARITY_ERROR_STATUS 0x77f4 404#define MA_PARITY_ERROR_STATUS 0x77f4
369 405
370#define EDC_0_BASE_ADDR 0x7900 406#define EDC_0_BASE_ADDR 0x7900
@@ -385,6 +421,7 @@
385 421
386#define CIM_BOOT_CFG 0x7b00 422#define CIM_BOOT_CFG 0x7b00
387#define BOOTADDR_MASK 0xffffff00U 423#define BOOTADDR_MASK 0xffffff00U
424#define UPCRST 0x1U
388 425
389#define CIM_PF_MAILBOX_DATA 0x240 426#define CIM_PF_MAILBOX_DATA 0x240
390#define CIM_PF_MAILBOX_CTRL 0x280 427#define CIM_PF_MAILBOX_CTRL 0x280
@@ -457,6 +494,13 @@
457#define VLANEXTENABLE_MASK 0x0000f000U 494#define VLANEXTENABLE_MASK 0x0000f000U
458#define VLANEXTENABLE_SHIFT 12 495#define VLANEXTENABLE_SHIFT 12
459 496
497#define TP_GLOBAL_CONFIG 0x7d08
498#define FIVETUPLELOOKUP_SHIFT 17
499#define FIVETUPLELOOKUP_MASK 0x00060000U
500#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT)
501#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
502 FIVETUPLELOOKUP_SHIFT)
503
460#define TP_PARA_REG2 0x7d68 504#define TP_PARA_REG2 0x7d68
461#define MAXRXDATA_MASK 0xffff0000U 505#define MAXRXDATA_MASK 0xffff0000U
462#define MAXRXDATA_SHIFT 16 506#define MAXRXDATA_SHIFT 16
@@ -466,8 +510,47 @@
466#define TIMERRESOLUTION_MASK 0x00ff0000U 510#define TIMERRESOLUTION_MASK 0x00ff0000U
467#define TIMERRESOLUTION_SHIFT 16 511#define TIMERRESOLUTION_SHIFT 16
468#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) 512#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
513#define DELAYEDACKRESOLUTION_MASK 0x000000ffU
514#define DELAYEDACKRESOLUTION_SHIFT 0
515#define DELAYEDACKRESOLUTION_GET(x) \
516 (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
469 517
470#define TP_SHIFT_CNT 0x7dc0 518#define TP_SHIFT_CNT 0x7dc0
519#define SYNSHIFTMAX_SHIFT 24
520#define SYNSHIFTMAX_MASK 0xff000000U
521#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT)
522#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \
523 SYNSHIFTMAX_SHIFT)
524#define RXTSHIFTMAXR1_SHIFT 20
525#define RXTSHIFTMAXR1_MASK 0x00f00000U
526#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT)
527#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \
528 RXTSHIFTMAXR1_SHIFT)
529#define RXTSHIFTMAXR2_SHIFT 16
530#define RXTSHIFTMAXR2_MASK 0x000f0000U
531#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT)
532#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \
533 RXTSHIFTMAXR2_SHIFT)
534#define PERSHIFTBACKOFFMAX_SHIFT 12
535#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U
536#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT)
537#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
538 PERSHIFTBACKOFFMAX_SHIFT)
539#define PERSHIFTMAX_SHIFT 8
540#define PERSHIFTMAX_MASK 0x00000f00U
541#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT)
542#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \
543 PERSHIFTMAX_SHIFT)
544#define KEEPALIVEMAXR1_SHIFT 4
545#define KEEPALIVEMAXR1_MASK 0x000000f0U
546#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT)
547#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \
548 KEEPALIVEMAXR1_SHIFT)
549#define KEEPALIVEMAXR2_SHIFT 0
550#define KEEPALIVEMAXR2_MASK 0x0000000fU
551#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT)
552#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \
553 KEEPALIVEMAXR2_SHIFT)
471 554
472#define TP_CCTRL_TABLE 0x7ddc 555#define TP_CCTRL_TABLE 0x7ddc
473#define TP_MTU_TABLE 0x7de4 556#define TP_MTU_TABLE 0x7de4
@@ -501,6 +584,20 @@
501#define TP_INT_CAUSE 0x7e74 584#define TP_INT_CAUSE 0x7e74
502#define FLMTXFLSTEMPTY 0x40000000U 585#define FLMTXFLSTEMPTY 0x40000000U
503 586
587#define TP_VLAN_PRI_MAP 0x140
588#define FRAGMENTATION_SHIFT 9
589#define FRAGMENTATION_MASK 0x00000200U
590#define MPSHITTYPE_MASK 0x00000100U
591#define MACMATCH_MASK 0x00000080U
592#define ETHERTYPE_MASK 0x00000040U
593#define PROTOCOL_MASK 0x00000020U
594#define TOS_MASK 0x00000010U
595#define VLAN_MASK 0x00000008U
596#define VNIC_ID_MASK 0x00000004U
597#define PORT_MASK 0x00000002U
598#define FCOE_SHIFT 0
599#define FCOE_MASK 0x00000001U
600
504#define TP_INGRESS_CONFIG 0x141 601#define TP_INGRESS_CONFIG 0x141
505#define VNIC 0x00000800U 602#define VNIC 0x00000800U
506#define CSUM_HAS_PSEUDO_HDR 0x00000400U 603#define CSUM_HAS_PSEUDO_HDR 0x00000400U
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ad53f796b574..a6364632b490 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -79,6 +79,8 @@ struct fw_wr_hdr {
79#define FW_WR_FLOWID(x) ((x) << 8) 79#define FW_WR_FLOWID(x) ((x) << 8)
80#define FW_WR_LEN16(x) ((x) << 0) 80#define FW_WR_LEN16(x) ((x) << 0)
81 81
82#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
83
82struct fw_ulptx_wr { 84struct fw_ulptx_wr {
83 __be32 op_to_compl; 85 __be32 op_to_compl;
84 __be32 flowid_len16; 86 __be32 flowid_len16;
@@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr {
155 157
156#define FW_CMD_MAX_TIMEOUT 3000 158#define FW_CMD_MAX_TIMEOUT 3000
157 159
160/*
161 * If a host driver does a HELLO and discovers that there's already a MASTER
162 * selected, we may have to wait for that MASTER to finish issuing RESET,
163 * configuration and INITIALIZE commands. Also, there's a possibility that
164 * our own HELLO may get lost if it happens right as the MASTER is issuign a
165 * RESET command, so we need to be willing to make a few retries of our HELLO.
166 */
167#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
168#define FW_CMD_HELLO_RETRIES 3
169
170
158enum fw_cmd_opcodes { 171enum fw_cmd_opcodes {
159 FW_LDST_CMD = 0x01, 172 FW_LDST_CMD = 0x01,
160 FW_RESET_CMD = 0x03, 173 FW_RESET_CMD = 0x03,
@@ -304,7 +317,17 @@ struct fw_reset_cmd {
304 __be32 op_to_write; 317 __be32 op_to_write;
305 __be32 retval_len16; 318 __be32 retval_len16;
306 __be32 val; 319 __be32 val;
307 __be32 r3; 320 __be32 halt_pkd;
321};
322
323#define FW_RESET_CMD_HALT_SHIFT 31
324#define FW_RESET_CMD_HALT_MASK 0x1
325#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT)
326#define FW_RESET_CMD_HALT_GET(x) \
327 (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
328
329enum fw_hellow_cmd {
330 fw_hello_cmd_stage_os = 0x0
308}; 331};
309 332
310struct fw_hello_cmd { 333struct fw_hello_cmd {
@@ -315,8 +338,14 @@ struct fw_hello_cmd {
315#define FW_HELLO_CMD_INIT (1U << 30) 338#define FW_HELLO_CMD_INIT (1U << 30)
316#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) 339#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
317#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) 340#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
318#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) 341#define FW_HELLO_CMD_MBMASTER_MASK 0xfU
342#define FW_HELLO_CMD_MBMASTER_SHIFT 24
343#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
344#define FW_HELLO_CMD_MBMASTER_GET(x) \
345 (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
319#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) 346#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
347#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
348#define FW_HELLO_CMD_CLEARINIT (1U << 16)
320 __be32 fwrev; 349 __be32 fwrev;
321}; 350};
322 351
@@ -401,6 +430,14 @@ enum fw_caps_config_fcoe {
401 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, 430 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
402}; 431};
403 432
433enum fw_memtype_cf {
434 FW_MEMTYPE_CF_EDC0 = 0x0,
435 FW_MEMTYPE_CF_EDC1 = 0x1,
436 FW_MEMTYPE_CF_EXTMEM = 0x2,
437 FW_MEMTYPE_CF_FLASH = 0x4,
438 FW_MEMTYPE_CF_INTERNAL = 0x5,
439};
440
404struct fw_caps_config_cmd { 441struct fw_caps_config_cmd {
405 __be32 op_to_write; 442 __be32 op_to_write;
406 __be32 retval_len16; 443 __be32 retval_len16;
@@ -416,10 +453,15 @@ struct fw_caps_config_cmd {
416 __be16 r4; 453 __be16 r4;
417 __be16 iscsicaps; 454 __be16 iscsicaps;
418 __be16 fcoecaps; 455 __be16 fcoecaps;
419 __be32 r5; 456 __be32 cfcsum;
420 __be64 r6; 457 __be32 finiver;
458 __be32 finicsum;
421}; 459};
422 460
461#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
462#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
463#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
464
423/* 465/*
424 * params command mnemonics 466 * params command mnemonics
425 */ 467 */
@@ -451,6 +493,7 @@ enum fw_params_param_dev {
451 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, 493 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
452 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 494 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
453 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 495 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
496 FW_PARAMS_PARAM_DEV_CF = 0x0D,
454}; 497};
455 498
456/* 499/*
@@ -492,6 +535,8 @@ enum fw_params_param_pfvf {
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, 535 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, 536 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, 537 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
538 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
539 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
495}; 540};
496 541
497/* 542/*
@@ -507,8 +552,16 @@ enum fw_params_param_dmaq {
507 552
508#define FW_PARAMS_MNEM(x) ((x) << 24) 553#define FW_PARAMS_MNEM(x) ((x) << 24)
509#define FW_PARAMS_PARAM_X(x) ((x) << 16) 554#define FW_PARAMS_PARAM_X(x) ((x) << 16)
510#define FW_PARAMS_PARAM_Y(x) ((x) << 8) 555#define FW_PARAMS_PARAM_Y_SHIFT 8
511#define FW_PARAMS_PARAM_Z(x) ((x) << 0) 556#define FW_PARAMS_PARAM_Y_MASK 0xffU
557#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
558#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
559 FW_PARAMS_PARAM_Y_MASK)
560#define FW_PARAMS_PARAM_Z_SHIFT 0
561#define FW_PARAMS_PARAM_Z_MASK 0xffu
562#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
563#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
564 FW_PARAMS_PARAM_Z_MASK)
512#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) 565#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
513#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) 566#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
514 567
@@ -1599,6 +1652,16 @@ struct fw_debug_cmd {
1599 } u; 1652 } u;
1600}; 1653};
1601 1654
1655#define FW_PCIE_FW_ERR (1U << 31)
1656#define FW_PCIE_FW_INIT (1U << 30)
1657#define FW_PCIE_FW_HALT (1U << 29)
1658#define FW_PCIE_FW_MASTER_VLD (1U << 15)
1659#define FW_PCIE_FW_MASTER_MASK 0x7
1660#define FW_PCIE_FW_MASTER_SHIFT 12
1661#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
1662#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
1663 FW_PCIE_FW_MASTER_MASK)
1664
1602struct fw_hdr { 1665struct fw_hdr {
1603 u8 ver; 1666 u8 ver;
1604 u8 reserved1; 1667 u8 reserved1;
@@ -1613,7 +1676,11 @@ struct fw_hdr {
1613 u8 intfver_iscsi; 1676 u8 intfver_iscsi;
1614 u8 intfver_fcoe; 1677 u8 intfver_fcoe;
1615 u8 reserved2; 1678 u8 reserved2;
1616 __be32 reserved3[27]; 1679 __u32 reserved3;
1680 __u32 reserved4;
1681 __u32 reserved5;
1682 __be32 flags;
1683 __be32 reserved6[23];
1617}; 1684};
1618 1685
1619#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) 1686#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
@@ -1621,18 +1688,8 @@ struct fw_hdr {
1621#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) 1688#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
1622#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) 1689#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
1623 1690
1624#define S_FW_CMD_OP 24 1691enum fw_hdr_flags {
1625#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) 1692 FW_HDR_FLAGS_RESET_HALT = 0x00000001,
1626 1693};
1627#define S_FW_CMD_REQUEST 23
1628#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
1629#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
1630
1631#define S_FW_CMD_WRITE 21
1632#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
1633#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
1634
1635#define S_FW_LDST_CMD_ADDRSPACE 0
1636#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
1637 1694
1638#endif /* _T4FW_INTERFACE_H_ */ 1695#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8877fbfefb63..f16745f4b36b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
2421 fl0, fl1); 2421 fl0, fl1);
2422 return -EINVAL; 2422 return -EINVAL;
2423 } 2423 }
2424 if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { 2424 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2426 return -EINVAL; 2426 return -EINVAL;
2427 } 2427 }
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
2431 */ 2431 */
2432 if (fl1) 2432 if (fl1)
2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); 2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2435 ? 128 : 64);
2435 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2436 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
2436 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2437 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2437 SGE_INGPADBOUNDARY_SHIFT); 2438 SGE_INGPADBOUNDARY_SHIFT);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d266c86a53f7..cf4c05bdf5fe 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
112 112
113#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
113#define FW_VER_LEN 32 114#define FW_VER_LEN 32
114 115
115struct be_dma_mem { 116struct be_dma_mem {
@@ -336,7 +337,6 @@ struct phy_info {
336 u16 auto_speeds_supported; 337 u16 auto_speeds_supported;
337 u16 fixed_speeds_supported; 338 u16 fixed_speeds_supported;
338 int link_speed; 339 int link_speed;
339 int forced_port_speed;
340 u32 dac_cable_len; 340 u32 dac_cable_len;
341 u32 advertising; 341 u32 advertising;
342 u32 supported; 342 u32 supported;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8c63d06ab12b..af60bb26e330 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
120 120
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev, 122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n", 123 "VF is not privileged to issue opcode %d-%d\n",
124 opcode, subsystem); 124 opcode, subsystem);
125 } else { 125 } else {
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -165,14 +165,13 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
165 } 165 }
166} 166}
167 167
168/* Grp5 QOS Speed evt */ 168/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
169static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 169static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
170 struct be_async_event_grp5_qos_link_speed *evt) 170 struct be_async_event_grp5_qos_link_speed *evt)
171{ 171{
172 if (evt->physical_port == adapter->port_num) { 172 if (adapter->phy.link_speed >= 0 &&
173 /* qos_link_speed is in units of 10 Mbps */ 173 evt->physical_port == adapter->port_num)
174 adapter->phy.link_speed = evt->qos_link_speed * 10; 174 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
175 }
176} 175}
177 176
178/*Grp5 PVID evt*/ 177/*Grp5 PVID evt*/
@@ -717,7 +716,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
717 716
718/* Use MCC */ 717/* Use MCC */
719int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 718int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
720 u8 type, bool permanent, u32 if_handle, u32 pmac_id) 719 bool permanent, u32 if_handle, u32 pmac_id)
721{ 720{
722 struct be_mcc_wrb *wrb; 721 struct be_mcc_wrb *wrb;
723 struct be_cmd_req_mac_query *req; 722 struct be_cmd_req_mac_query *req;
@@ -734,7 +733,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
734 733
735 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 734 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
736 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 735 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
737 req->type = type; 736 req->type = MAC_ADDRESS_TYPE_NETWORK;
738 if (permanent) { 737 if (permanent) {
739 req->permanent = 1; 738 req->permanent = 1;
740 } else { 739 } else {
@@ -1326,9 +1325,28 @@ err:
1326 return status; 1325 return status;
1327} 1326}
1328 1327
1329/* Uses synchronous mcc */ 1328static int be_mac_to_link_speed(int mac_speed)
1330int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1329{
1331 u16 *link_speed, u8 *link_status, u32 dom) 1330 switch (mac_speed) {
1331 case PHY_LINK_SPEED_ZERO:
1332 return 0;
1333 case PHY_LINK_SPEED_10MBPS:
1334 return 10;
1335 case PHY_LINK_SPEED_100MBPS:
1336 return 100;
1337 case PHY_LINK_SPEED_1GBPS:
1338 return 1000;
1339 case PHY_LINK_SPEED_10GBPS:
1340 return 10000;
1341 }
1342 return 0;
1343}
1344
1345/* Uses synchronous mcc
1346 * Returns link_speed in Mbps
1347 */
1348int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1349 u8 *link_status, u32 dom)
1332{ 1350{
1333 struct be_mcc_wrb *wrb; 1351 struct be_mcc_wrb *wrb;
1334 struct be_cmd_req_link_status *req; 1352 struct be_cmd_req_link_status *req;
@@ -1357,11 +1375,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1357 status = be_mcc_notify_wait(adapter); 1375 status = be_mcc_notify_wait(adapter);
1358 if (!status) { 1376 if (!status) {
1359 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1377 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1360 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { 1378 if (link_speed) {
1361 if (link_speed) 1379 *link_speed = resp->link_speed ?
1362 *link_speed = le16_to_cpu(resp->link_speed); 1380 le16_to_cpu(resp->link_speed) * 10 :
1363 if (mac_speed) 1381 be_mac_to_link_speed(resp->mac_speed);
1364 *mac_speed = resp->mac_speed; 1382
1383 if (!resp->logical_link_status)
1384 *link_speed = 0;
1365 } 1385 }
1366 if (link_status) 1386 if (link_status)
1367 *link_status = resp->logical_link_status; 1387 *link_status = resp->logical_link_status;
@@ -2405,6 +2425,9 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
2405 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 2425 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2406 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 2426 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2407 CAPABILITY_BE3_NATIVE_ERX_API; 2427 CAPABILITY_BE3_NATIVE_ERX_API;
2428 if (!adapter->be3_native)
2429 dev_warn(&adapter->pdev->dev,
2430 "adapter not in advanced mode\n");
2408 } 2431 }
2409err: 2432err:
2410 mutex_unlock(&adapter->mbox_lock); 2433 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 250f19b5f7b6..0936e21e3cff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1687,7 +1687,7 @@ struct be_cmd_req_set_ext_fat_caps {
1687extern int be_pci_fnum_get(struct be_adapter *adapter); 1687extern int be_pci_fnum_get(struct be_adapter *adapter);
1688extern int be_fw_wait_ready(struct be_adapter *adapter); 1688extern int be_fw_wait_ready(struct be_adapter *adapter);
1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1690 u8 type, bool permanent, u32 if_handle, u32 pmac_id); 1690 bool permanent, u32 if_handle, u32 pmac_id);
1691extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1691extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1692 u32 if_id, u32 *pmac_id, u32 domain); 1692 u32 if_id, u32 *pmac_id, u32 domain);
1693extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, 1693extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
@@ -1714,8 +1714,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1714 int type); 1714 int type);
1715extern int be_cmd_rxq_destroy(struct be_adapter *adapter, 1715extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
1716 struct be_queue_info *q); 1716 struct be_queue_info *q);
1717extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, 1717extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1718 u16 *link_speed, u8 *link_status, u32 dom); 1718 u8 *link_status, u32 dom);
1719extern int be_cmd_reset(struct be_adapter *adapter); 1719extern int be_cmd_reset(struct be_adapter *adapter);
1720extern int be_cmd_get_stats(struct be_adapter *adapter, 1720extern int be_cmd_get_stats(struct be_adapter *adapter,
1721 struct be_dma_mem *nonemb_cmd); 1721 struct be_dma_mem *nonemb_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c0e700653f96..8e6fb0ba6aa9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -512,28 +512,6 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
512 return val; 512 return val;
513} 513}
514 514
515static int convert_to_et_speed(u32 be_speed)
516{
517 int et_speed = SPEED_10000;
518
519 switch (be_speed) {
520 case PHY_LINK_SPEED_10MBPS:
521 et_speed = SPEED_10;
522 break;
523 case PHY_LINK_SPEED_100MBPS:
524 et_speed = SPEED_100;
525 break;
526 case PHY_LINK_SPEED_1GBPS:
527 et_speed = SPEED_1000;
528 break;
529 case PHY_LINK_SPEED_10GBPS:
530 et_speed = SPEED_10000;
531 break;
532 }
533
534 return et_speed;
535}
536
537bool be_pause_supported(struct be_adapter *adapter) 515bool be_pause_supported(struct be_adapter *adapter)
538{ 516{
539 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || 517 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
@@ -544,27 +522,16 @@ bool be_pause_supported(struct be_adapter *adapter)
544static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 522static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
545{ 523{
546 struct be_adapter *adapter = netdev_priv(netdev); 524 struct be_adapter *adapter = netdev_priv(netdev);
547 u8 port_speed = 0;
548 u16 link_speed = 0;
549 u8 link_status; 525 u8 link_status;
550 u32 et_speed = 0; 526 u16 link_speed = 0;
551 int status; 527 int status;
552 528
553 if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) { 529 if (adapter->phy.link_speed < 0) {
554 if (adapter->phy.forced_port_speed < 0) { 530 status = be_cmd_link_status_query(adapter, &link_speed,
555 status = be_cmd_link_status_query(adapter, &port_speed, 531 &link_status, 0);
556 &link_speed, &link_status, 0); 532 if (!status)
557 if (!status) 533 be_link_status_update(adapter, link_status);
558 be_link_status_update(adapter, link_status); 534 ethtool_cmd_speed_set(ecmd, link_speed);
559 if (link_speed)
560 et_speed = link_speed * 10;
561 else if (link_status)
562 et_speed = convert_to_et_speed(port_speed);
563 } else {
564 et_speed = adapter->phy.forced_port_speed;
565 }
566
567 ethtool_cmd_speed_set(ecmd, et_speed);
568 535
569 status = be_cmd_get_phy_info(adapter); 536 status = be_cmd_get_phy_info(adapter);
570 if (status) 537 if (status)
@@ -773,8 +740,8 @@ static void
773be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 740be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
774{ 741{
775 struct be_adapter *adapter = netdev_priv(netdev); 742 struct be_adapter *adapter = netdev_priv(netdev);
776 u8 mac_speed = 0; 743 int status;
777 u16 qos_link_speed = 0; 744 u8 link_status = 0;
778 745
779 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 746 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
780 747
@@ -798,11 +765,11 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
798 test->flags |= ETH_TEST_FL_FAILED; 765 test->flags |= ETH_TEST_FL_FAILED;
799 } 766 }
800 767
801 if (be_cmd_link_status_query(adapter, &mac_speed, 768 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
802 &qos_link_speed, NULL, 0) != 0) { 769 if (status) {
803 test->flags |= ETH_TEST_FL_FAILED; 770 test->flags |= ETH_TEST_FL_FAILED;
804 data[4] = -1; 771 data[4] = -1;
805 } else if (!mac_speed) { 772 } else if (!link_status) {
806 test->flags |= ETH_TEST_FL_FAILED; 773 test->flags |= ETH_TEST_FL_FAILED;
807 data[4] = 1; 774 data[4] = 1;
808 } 775 }
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 95d10472f236..eb3f2cb3b93b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -20,6 +20,7 @@
20#include "be.h" 20#include "be.h"
21#include "be_cmds.h" 21#include "be_cmds.h"
22#include <asm/div64.h> 22#include <asm/div64.h>
23#include <linux/aer.h>
23 24
24MODULE_VERSION(DRV_VER); 25MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids); 26MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -240,9 +241,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
240 if (!is_valid_ether_addr(addr->sa_data)) 241 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL; 242 return -EADDRNOTAVAIL;
242 243
243 status = be_cmd_mac_addr_query(adapter, current_mac, 244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
244 MAC_ADDRESS_TYPE_NETWORK, false, 245 adapter->if_handle, 0);
245 adapter->if_handle, 0);
246 if (status) 246 if (status)
247 goto err; 247 goto err;
248 248
@@ -1075,7 +1075,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1075static int be_find_vfs(struct be_adapter *adapter, int vf_state) 1075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{ 1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev; 1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn; 1078 int vfs = 0, assigned_vfs = 0, pos;
1079 u16 offset, stride; 1079 u16 offset, stride;
1080 1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -1086,9 +1086,7 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1086 1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); 1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) { 1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF; 1089 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
1092 vfs++; 1090 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 1091 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++; 1092 assigned_vfs++;
@@ -1896,6 +1894,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
1896 return status; 1894 return status;
1897 } 1895 }
1898 1896
1897 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1898 adapter->num_tx_qs);
1899 return 0; 1899 return 0;
1900} 1900}
1901 1901
@@ -1946,10 +1946,9 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1946 return rc; 1946 return rc;
1947 } 1947 }
1948 1948
1949 if (adapter->num_rx_qs != MAX_RX_QS) 1949 dev_info(&adapter->pdev->dev,
1950 dev_info(&adapter->pdev->dev, 1950 "created %d RSS queue(s) and 1 default RX queue\n",
1951 "Created only %d receive queues\n", adapter->num_rx_qs); 1951 adapter->num_rx_qs - 1);
1952
1953 return 0; 1952 return 0;
1954} 1953}
1955 1954
@@ -2176,8 +2175,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
2176{ 2175{
2177 u32 num = 0; 2176 u32 num = 0;
2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2177 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 !sriov_want(adapter) && be_physfn(adapter) && 2178 !sriov_want(adapter) && be_physfn(adapter)) {
2180 !be_is_mc(adapter)) {
2181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); 2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 } 2181 }
@@ -2188,6 +2186,7 @@ static void be_msix_enable(struct be_adapter *adapter)
2188{ 2186{
2189#define BE_MIN_MSIX_VECTORS 1 2187#define BE_MIN_MSIX_VECTORS 1
2190 int i, status, num_vec, num_roce_vec = 0; 2188 int i, status, num_vec, num_roce_vec = 0;
2189 struct device *dev = &adapter->pdev->dev;
2191 2190
2192 /* If RSS queues are not used, need a vec for default RX Q */ 2191 /* If RSS queues are not used, need a vec for default RX Q */
2193 num_vec = min(be_num_rss_want(adapter), num_online_cpus()); 2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
@@ -2212,6 +2211,8 @@ static void be_msix_enable(struct be_adapter *adapter)
2212 num_vec) == 0) 2211 num_vec) == 0)
2213 goto done; 2212 goto done;
2214 } 2213 }
2214
2215 dev_warn(dev, "MSIx enable failed\n");
2215 return; 2216 return;
2216done: 2217done:
2217 if (be_roce_supported(adapter)) { 2218 if (be_roce_supported(adapter)) {
@@ -2225,6 +2226,7 @@ done:
2225 } 2226 }
2226 } else 2227 } else
2227 adapter->num_msix_vec = num_vec; 2228 adapter->num_msix_vec = num_vec;
2229 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2228 return; 2230 return;
2229} 2231}
2230 2232
@@ -2441,8 +2443,7 @@ static int be_open(struct net_device *netdev)
2441 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2443 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2442 } 2444 }
2443 2445
2444 status = be_cmd_link_status_query(adapter, NULL, NULL, 2446 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2445 &link_status, 0);
2446 if (!status) 2447 if (!status)
2447 be_link_status_update(adapter, link_status); 2448 be_link_status_update(adapter, link_status);
2448 2449
@@ -2646,8 +2647,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2646 } 2647 }
2647 2648
2648 for_all_vfs(adapter, vf_cfg, vf) { 2649 for_all_vfs(adapter, vf_cfg, vf) {
2649 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, 2650 lnk_speed = 1000;
2650 NULL, vf + 1); 2651 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2651 if (status) 2652 if (status)
2652 goto err; 2653 goto err;
2653 vf_cfg->tx_rate = lnk_speed * 10; 2654 vf_cfg->tx_rate = lnk_speed * 10;
@@ -2671,7 +2672,6 @@ static void be_setup_init(struct be_adapter *adapter)
2671 adapter->be3_native = false; 2672 adapter->be3_native = false;
2672 adapter->promiscuous = false; 2673 adapter->promiscuous = false;
2673 adapter->eq_next_idx = 0; 2674 adapter->eq_next_idx = 0;
2674 adapter->phy.forced_port_speed = -1;
2675} 2675}
2676 2676
2677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 2677static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2693,21 +2693,16 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2693 status = be_cmd_get_mac_from_list(adapter, mac, 2693 status = be_cmd_get_mac_from_list(adapter, mac,
2694 active_mac, pmac_id, 0); 2694 active_mac, pmac_id, 0);
2695 if (*active_mac) { 2695 if (*active_mac) {
2696 status = be_cmd_mac_addr_query(adapter, mac, 2696 status = be_cmd_mac_addr_query(adapter, mac, false,
2697 MAC_ADDRESS_TYPE_NETWORK, 2697 if_handle, *pmac_id);
2698 false, if_handle,
2699 *pmac_id);
2700 } 2698 }
2701 } else if (be_physfn(adapter)) { 2699 } else if (be_physfn(adapter)) {
2702 /* For BE3, for PF get permanent MAC */ 2700 /* For BE3, for PF get permanent MAC */
2703 status = be_cmd_mac_addr_query(adapter, mac, 2701 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2704 MAC_ADDRESS_TYPE_NETWORK, true,
2705 0, 0);
2706 *active_mac = false; 2702 *active_mac = false;
2707 } else { 2703 } else {
2708 /* For BE3, for VF get soft MAC assigned by PF*/ 2704 /* For BE3, for VF get soft MAC assigned by PF*/
2709 status = be_cmd_mac_addr_query(adapter, mac, 2705 status = be_cmd_mac_addr_query(adapter, mac, false,
2710 MAC_ADDRESS_TYPE_NETWORK, false,
2711 if_handle, 0); 2706 if_handle, 0);
2712 *active_mac = true; 2707 *active_mac = true;
2713 } 2708 }
@@ -2724,6 +2719,8 @@ static int be_get_config(struct be_adapter *adapter)
2724 if (pos) { 2719 if (pos) {
2725 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, 2720 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2726 &dev_num_vfs); 2721 &dev_num_vfs);
2722 if (!lancer_chip(adapter))
2723 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs; 2724 adapter->dev_num_vfs = dev_num_vfs;
2728 } 2725 }
2729 return 0; 2726 return 0;
@@ -3437,6 +3434,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
3437 if (mem->va) 3434 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 3435 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma); 3436 mem->dma);
3437 kfree(adapter->pmac_id);
3440} 3438}
3441 3439
3442static int be_ctrl_init(struct be_adapter *adapter) 3440static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3471,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
3473 } 3471 }
3474 memset(rx_filter->va, 0, rx_filter->size); 3472 memset(rx_filter->va, 0, rx_filter->size);
3475 3473
3474 /* primary mac needs 1 pmac entry */
3475 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3476 sizeof(*adapter->pmac_id), GFP_KERNEL);
3477 if (!adapter->pmac_id)
3478 return -ENOMEM;
3479
3476 mutex_init(&adapter->mbox_lock); 3480 mutex_init(&adapter->mbox_lock);
3477 spin_lock_init(&adapter->mcc_lock); 3481 spin_lock_init(&adapter->mcc_lock);
3478 spin_lock_init(&adapter->mcc_cq_lock); 3482 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3543,6 +3547,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
3543 3547
3544 be_ctrl_cleanup(adapter); 3548 be_ctrl_cleanup(adapter);
3545 3549
3550 pci_disable_pcie_error_reporting(pdev);
3551
3546 pci_set_drvdata(pdev, NULL); 3552 pci_set_drvdata(pdev, NULL);
3547 pci_release_regions(pdev); 3553 pci_release_regions(pdev);
3548 pci_disable_device(pdev); 3554 pci_disable_device(pdev);
@@ -3609,12 +3615,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
3609 else 3615 else
3610 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; 3616 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3611 3617
3612 /* primary mac needs 1 pmac entry */
3613 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3614 sizeof(u32), GFP_KERNEL);
3615 if (!adapter->pmac_id)
3616 return -ENOMEM;
3617
3618 status = be_cmd_get_cntl_attributes(adapter); 3618 status = be_cmd_get_cntl_attributes(adapter);
3619 if (status) 3619 if (status)
3620 return status; 3620 return status;
@@ -3800,6 +3800,23 @@ static bool be_reset_required(struct be_adapter *adapter)
3800 return be_find_vfs(adapter, ENABLED) > 0 ? false : true; 3800 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3801} 3801}
3802 3802
3803static char *mc_name(struct be_adapter *adapter)
3804{
3805 if (adapter->function_mode & FLEX10_MODE)
3806 return "FLEX10";
3807 else if (adapter->function_mode & VNIC_MODE)
3808 return "vNIC";
3809 else if (adapter->function_mode & UMC_ENABLED)
3810 return "UMC";
3811 else
3812 return "";
3813}
3814
3815static inline char *func_name(struct be_adapter *adapter)
3816{
3817 return be_physfn(adapter) ? "PF" : "VF";
3818}
3819
3803static int __devinit be_probe(struct pci_dev *pdev, 3820static int __devinit be_probe(struct pci_dev *pdev,
3804 const struct pci_device_id *pdev_id) 3821 const struct pci_device_id *pdev_id)
3805{ 3822{
@@ -3844,6 +3861,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
3844 } 3861 }
3845 } 3862 }
3846 3863
3864 status = pci_enable_pcie_error_reporting(pdev);
3865 if (status)
3866 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3867
3847 status = be_ctrl_init(adapter); 3868 status = be_ctrl_init(adapter);
3848 if (status) 3869 if (status)
3849 goto free_netdev; 3870 goto free_netdev;
@@ -3886,7 +3907,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
3886 3907
3887 status = be_setup(adapter); 3908 status = be_setup(adapter);
3888 if (status) 3909 if (status)
3889 goto msix_disable; 3910 goto stats_clean;
3890 3911
3891 be_netdev_init(netdev); 3912 be_netdev_init(netdev);
3892 status = register_netdev(netdev); 3913 status = register_netdev(netdev);
@@ -3900,15 +3921,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
3900 3921
3901 be_cmd_query_port_name(adapter, &port_name); 3922 be_cmd_query_port_name(adapter, &port_name);
3902 3923
3903 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev), 3924 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3904 port_name); 3925 func_name(adapter), mc_name(adapter), port_name);
3905 3926
3906 return 0; 3927 return 0;
3907 3928
3908unsetup: 3929unsetup:
3909 be_clear(adapter); 3930 be_clear(adapter);
3910msix_disable:
3911 be_msix_disable(adapter);
3912stats_clean: 3931stats_clean:
3913 be_stats_cleanup(adapter); 3932 be_stats_cleanup(adapter);
3914ctrl_clean: 3933ctrl_clean:
@@ -4066,6 +4085,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4066 if (status) 4085 if (status)
4067 return PCI_ERS_RESULT_DISCONNECT; 4086 return PCI_ERS_RESULT_DISCONNECT;
4068 4087
4088 pci_cleanup_aer_uncorrect_error_status(pdev);
4069 return PCI_ERS_RESULT_RECOVERED; 4089 return PCI_ERS_RESULT_RECOVERED;
4070} 4090}
4071 4091
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3574e1499dfc..feff51664dcf 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
62 ---help--- 62 ---help---
63 This driver supports the MDIO bus used by the gianfar and UCC drivers. 63 This driver supports the MDIO bus used by the gianfar and UCC drivers.
64 64
65config FSL_XGMAC_MDIO
66 tristate "Freescale XGMAC MDIO"
67 depends on FSL_SOC
68 select PHYLIB
69 ---help---
70 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
71
65config UCC_GETH 72config UCC_GETH
66 tristate "Freescale QE Gigabit Ethernet" 73 tristate "Freescale QE Gigabit Ethernet"
67 depends on QUICC_ENGINE 74 depends on QUICC_ENGINE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 1752488c9ee5..3d1839afff65 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
9endif 9endif
10obj-$(CONFIG_FS_ENET) += fs_enet/ 10obj-$(CONFIG_FS_ENET) += fs_enet/
11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o 11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
12obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
12obj-$(CONFIG_GIANFAR) += gianfar_driver.o 13obj-$(CONFIG_GIANFAR) += gianfar_driver.o
13obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o 14obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
14gianfar_driver-objs := gianfar.o \ 15gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9527b28d70d1..c93a05654b46 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -19,54 +19,90 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <linux/delay.h> 24#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h> 25#include <linux/module.h>
33#include <linux/platform_device.h>
34#include <linux/crc32.h>
35#include <linux/mii.h> 26#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/of.h>
38#include <linux/of_address.h> 27#include <linux/of_address.h>
39#include <linux/of_mdio.h> 28#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 29#include <linux/of_device.h>
41 30
42#include <asm/io.h> 31#include <asm/io.h>
43#include <asm/irq.h> 32#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
44#include <asm/uaccess.h>
45#include <asm/ucc.h>
46 33
47#include "gianfar.h" 34#include "gianfar.h"
48#include "fsl_pq_mdio.h" 35
36#define MIIMIND_BUSY 0x00000001
37#define MIIMIND_NOTVALID 0x00000004
38#define MIIMCFG_INIT_VALUE 0x00000007
39#define MIIMCFG_RESET 0x80000000
40
41#define MII_READ_COMMAND 0x00000001
42
43struct fsl_pq_mii {
44 u32 miimcfg; /* MII management configuration reg */
45 u32 miimcom; /* MII management command reg */
46 u32 miimadd; /* MII management address reg */
47 u32 miimcon; /* MII management control reg */
48 u32 miimstat; /* MII management status reg */
49 u32 miimind; /* MII management indication reg */
50};
51
52struct fsl_pq_mdio {
53 u8 res1[16];
54 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
55 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
56 u8 res2[4];
57 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
58 u8 res3[1280];
59 struct fsl_pq_mii mii;
60 u8 res4[28];
61 u32 utbipar; /* TBI phy address reg (only on UCC) */
62 u8 res5[2728];
63} __packed;
49 64
50/* Number of microseconds to wait for an MII register to respond */ 65/* Number of microseconds to wait for an MII register to respond */
51#define MII_TIMEOUT 1000 66#define MII_TIMEOUT 1000
52 67
53struct fsl_pq_mdio_priv { 68struct fsl_pq_mdio_priv {
54 void __iomem *map; 69 void __iomem *map;
55 struct fsl_pq_mdio __iomem *regs; 70 struct fsl_pq_mii __iomem *regs;
71 int irqs[PHY_MAX_ADDR];
72};
73
74/*
75 * Per-device-type data. Each type of device tree node that we support gets
76 * one of these.
77 *
78 * @mii_offset: the offset of the MII registers within the memory map of the
79 * node. Some nodes define only the MII registers, and some define the whole
80 * MAC (which includes the MII registers).
81 *
82 * @get_tbipa: determines the address of the TBIPA register
83 *
84 * @ucc_configure: a special function for extra QE configuration
85 */
86struct fsl_pq_mdio_data {
87 unsigned int mii_offset; /* offset of the MII registers */
88 uint32_t __iomem * (*get_tbipa)(void __iomem *p);
89 void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
56}; 90};
57 91
58/* 92/*
59 * Write value to the PHY at mii_id at register regnum, 93 * Write value to the PHY at mii_id at register regnum, on the bus attached
60 * on the bus attached to the local interface, which may be different from the 94 * to the local interface, which may be different from the generic mdio bus
61 * generic mdio bus (tied to a single interface), waiting until the write is 95 * (tied to a single interface), waiting until the write is done before
62 * done before returning. This is helpful in programming interfaces like 96 * returning. This is helpful in programming interfaces like the TBI which
63 * the TBI which control interfaces like onchip SERDES and are always tied to 97 * control interfaces like onchip SERDES and are always tied to the local
64 * the local mdio pins, which may not be the same as system mdio bus, used for 98 * mdio pins, which may not be the same as system mdio bus, used for
65 * controlling the external PHYs, for example. 99 * controlling the external PHYs, for example.
66 */ 100 */
67int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 101static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
68 int regnum, u16 value) 102 u16 value)
69{ 103{
104 struct fsl_pq_mdio_priv *priv = bus->priv;
105 struct fsl_pq_mii __iomem *regs = priv->regs;
70 u32 status; 106 u32 status;
71 107
72 /* Set the PHY address and the register address we want to write */ 108 /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
83} 119}
84 120
85/* 121/*
86 * Read the bus for PHY at addr mii_id, register regnum, and 122 * Read the bus for PHY at addr mii_id, register regnum, and return the value.
87 * return the value. Clears miimcom first. All PHY operation 123 * Clears miimcom first.
88 * done on the bus attached to the local interface, 124 *
89 * which may be different from the generic mdio bus 125 * All PHY operation done on the bus attached to the local interface, which
90 * This is helpful in programming interfaces like 126 * may be different from the generic mdio bus. This is helpful in programming
91 * the TBI which, in turn, control interfaces like onchip SERDES 127 * interfaces like the TBI which, in turn, control interfaces like on-chip
92 * and are always tied to the local mdio pins, which may not be the 128 * SERDES and are always tied to the local mdio pins, which may not be the
93 * same as system mdio bus, used for controlling the external PHYs, for eg. 129 * same as system mdio bus, used for controlling the external PHYs, for eg.
94 */ 130 */
95int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, 131static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
96 int mii_id, int regnum)
97{ 132{
98 u16 value; 133 struct fsl_pq_mdio_priv *priv = bus->priv;
134 struct fsl_pq_mii __iomem *regs = priv->regs;
99 u32 status; 135 u32 status;
136 u16 value;
100 137
101 /* Set the PHY address and the register address we want to read */ 138 /* Set the PHY address and the register address we want to read */
102 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 139 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
115 /* Grab the value of the register from miimstat */ 152 /* Grab the value of the register from miimstat */
116 value = in_be32(&regs->miimstat); 153 value = in_be32(&regs->miimstat);
117 154
155 dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
118 return value; 156 return value;
119} 157}
120 158
121static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
122{
123 struct fsl_pq_mdio_priv *priv = bus->priv;
124
125 return priv->regs;
126}
127
128/*
129 * Write value to the PHY at mii_id at register regnum,
130 * on the bus, waiting until the write is done before returning.
131 */
132int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
133{
134 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
135
136 /* Write to the local MII regs */
137 return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
138}
139
140/*
141 * Read the bus for PHY at addr mii_id, register regnum, and
142 * return the value. Clears miimcom first.
143 */
144int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
145{
146 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
147
148 /* Read the local MII regs */
149 return fsl_pq_local_mdio_read(regs, mii_id, regnum);
150}
151
152/* Reset the MIIM registers, and wait for the bus to free */ 159/* Reset the MIIM registers, and wait for the bus to free */
153static int fsl_pq_mdio_reset(struct mii_bus *bus) 160static int fsl_pq_mdio_reset(struct mii_bus *bus)
154{ 161{
155 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 162 struct fsl_pq_mdio_priv *priv = bus->priv;
163 struct fsl_pq_mii __iomem *regs = priv->regs;
156 u32 status; 164 u32 status;
157 165
158 mutex_lock(&bus->mdio_lock); 166 mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
170 mutex_unlock(&bus->mdio_lock); 178 mutex_unlock(&bus->mdio_lock);
171 179
172 if (!status) { 180 if (!status) {
173 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 181 dev_err(&bus->dev, "timeout waiting for MII bus\n");
174 bus->name);
175 return -EBUSY; 182 return -EBUSY;
176 } 183 }
177 184
178 return 0; 185 return 0;
179} 186}
180 187
181void fsl_pq_mdio_bus_name(char *name, struct device_node *np) 188#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
189/*
190 * This is mildly evil, but so is our hardware for doing this.
191 * Also, we have to cast back to struct gfar because of
192 * definition weirdness done in gianfar.h.
193 */
194static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
182{ 195{
183 const u32 *addr; 196 struct gfar __iomem *enet_regs = p;
184 u64 taddr = OF_BAD_ADDR;
185
186 addr = of_get_address(np, 0, NULL, NULL);
187 if (addr)
188 taddr = of_translate_address(np, addr);
189 197
190 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, 198 return &enet_regs->tbipa;
191 (unsigned long long)taddr);
192} 199}
193EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
194 200
201/*
202 * Return the TBIPAR address for an eTSEC2 node
203 */
204static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
205{
206 return p;
207}
208#endif
195 209
196static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 210#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
211/*
212 * Return the TBIPAR address for a QE MDIO node
213 */
214static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
197{ 215{
198#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 216 struct fsl_pq_mdio __iomem *mdio = p;
199 struct gfar __iomem *enet_regs;
200 217
201 /* 218 return &mdio->utbipar;
202 * This is mildly evil, but so is our hardware for doing this.
203 * Also, we have to cast back to struct gfar because of
204 * definition weirdness done in gianfar.h.
205 */
206 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
207 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
208 of_device_is_compatible(np, "gianfar")) {
209 enet_regs = (struct gfar __iomem *)regs;
210 return &enet_regs->tbipa;
211 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
212 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
213 return of_iomap(np, 1);
214 }
215#endif
216 return NULL;
217} 219}
218 220
219 221/*
220static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 222 * Find the UCC node that controls the given MDIO node
223 *
224 * For some reason, the QE MDIO nodes are not children of the UCC devices
225 * that control them. Therefore, we need to scan all UCC nodes looking for
226 * the one that encompases the given MDIO node. We do this by comparing
227 * physical addresses. The 'start' and 'end' addresses of the MDIO node are
228 * passed, and the correct UCC node will cover the entire address range.
229 *
230 * This assumes that there is only one QE MDIO node in the entire device tree.
231 */
232static void ucc_configure(phys_addr_t start, phys_addr_t end)
221{ 233{
222#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) 234 static bool found_mii_master;
223 struct device_node *np = NULL; 235 struct device_node *np = NULL;
224 int err = 0;
225 236
226 for_each_compatible_node(np, NULL, "ucc_geth") { 237 if (found_mii_master)
227 struct resource tempres; 238 return;
228 239
229 err = of_address_to_resource(np, 0, &tempres); 240 for_each_compatible_node(np, NULL, "ucc_geth") {
230 if (err) 241 struct resource res;
242 const uint32_t *iprop;
243 uint32_t id;
244 int ret;
245
246 ret = of_address_to_resource(np, 0, &res);
247 if (ret < 0) {
248 pr_debug("fsl-pq-mdio: no address range in node %s\n",
249 np->full_name);
231 continue; 250 continue;
251 }
232 252
233 /* if our mdio regs fall within this UCC regs range */ 253 /* if our mdio regs fall within this UCC regs range */
234 if ((start >= tempres.start) && (end <= tempres.end)) { 254 if ((start < res.start) || (end > res.end))
235 /* Find the id of the UCC */ 255 continue;
236 const u32 *id; 256
237 257 iprop = of_get_property(np, "cell-index", NULL);
238 id = of_get_property(np, "cell-index", NULL); 258 if (!iprop) {
239 if (!id) { 259 iprop = of_get_property(np, "device-id", NULL);
240 id = of_get_property(np, "device-id", NULL); 260 if (!iprop) {
241 if (!id) 261 pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
242 continue; 262 np->full_name);
263 continue;
243 } 264 }
265 }
244 266
245 *ucc_id = *id; 267 id = be32_to_cpup(iprop);
246 268
247 return 0; 269 /*
270 * cell-index and device-id for QE nodes are
271 * numbered from 1, not 0.
272 */
273 if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
274 pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
275 np->full_name);
276 continue;
248 } 277 }
278
279 pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
280 found_mii_master = true;
249 } 281 }
282}
250 283
251 if (err)
252 return err;
253 else
254 return -EINVAL;
255#else
256 return -ENODEV;
257#endif 284#endif
258}
259 285
260static int fsl_pq_mdio_probe(struct platform_device *ofdev) 286static struct of_device_id fsl_pq_mdio_match[] = {
287#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
288 {
289 .compatible = "fsl,gianfar-tbi",
290 .data = &(struct fsl_pq_mdio_data) {
291 .mii_offset = 0,
292 .get_tbipa = get_gfar_tbipa,
293 },
294 },
295 {
296 .compatible = "fsl,gianfar-mdio",
297 .data = &(struct fsl_pq_mdio_data) {
298 .mii_offset = 0,
299 .get_tbipa = get_gfar_tbipa,
300 },
301 },
302 {
303 .type = "mdio",
304 .compatible = "gianfar",
305 .data = &(struct fsl_pq_mdio_data) {
306 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
307 .get_tbipa = get_gfar_tbipa,
308 },
309 },
310 {
311 .compatible = "fsl,etsec2-tbi",
312 .data = &(struct fsl_pq_mdio_data) {
313 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
314 .get_tbipa = get_etsec_tbipa,
315 },
316 },
317 {
318 .compatible = "fsl,etsec2-mdio",
319 .data = &(struct fsl_pq_mdio_data) {
320 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
321 .get_tbipa = get_etsec_tbipa,
322 },
323 },
324#endif
325#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
326 {
327 .compatible = "fsl,ucc-mdio",
328 .data = &(struct fsl_pq_mdio_data) {
329 .mii_offset = 0,
330 .get_tbipa = get_ucc_tbipa,
331 .ucc_configure = ucc_configure,
332 },
333 },
334 {
335 /* Legacy UCC MDIO node */
336 .type = "mdio",
337 .compatible = "ucc_geth_phy",
338 .data = &(struct fsl_pq_mdio_data) {
339 .mii_offset = 0,
340 .get_tbipa = get_ucc_tbipa,
341 .ucc_configure = ucc_configure,
342 },
343 },
344#endif
345 /* No Kconfig option for Fman support yet */
346 {
347 .compatible = "fsl,fman-mdio",
348 .data = &(struct fsl_pq_mdio_data) {
349 .mii_offset = 0,
350 /* Fman TBI operations are handled elsewhere */
351 },
352 },
353
354 {},
355};
356MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
357
358static int fsl_pq_mdio_probe(struct platform_device *pdev)
261{ 359{
262 struct device_node *np = ofdev->dev.of_node; 360 const struct of_device_id *id =
361 of_match_device(fsl_pq_mdio_match, &pdev->dev);
362 const struct fsl_pq_mdio_data *data = id->data;
363 struct device_node *np = pdev->dev.of_node;
364 struct resource res;
263 struct device_node *tbi; 365 struct device_node *tbi;
264 struct fsl_pq_mdio_priv *priv; 366 struct fsl_pq_mdio_priv *priv;
265 struct fsl_pq_mdio __iomem *regs = NULL;
266 void __iomem *map;
267 u32 __iomem *tbipa;
268 struct mii_bus *new_bus; 367 struct mii_bus *new_bus;
269 int tbiaddr = -1;
270 const u32 *addrp;
271 u64 addr = 0, size = 0;
272 int err; 368 int err;
273 369
274 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 370 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
275 if (!priv)
276 return -ENOMEM;
277 371
278 new_bus = mdiobus_alloc(); 372 new_bus = mdiobus_alloc_size(sizeof(*priv));
279 if (!new_bus) { 373 if (!new_bus)
280 err = -ENOMEM; 374 return -ENOMEM;
281 goto err_free_priv;
282 }
283 375
376 priv = new_bus->priv;
284 new_bus->name = "Freescale PowerQUICC MII Bus", 377 new_bus->name = "Freescale PowerQUICC MII Bus",
285 new_bus->read = &fsl_pq_mdio_read, 378 new_bus->read = &fsl_pq_mdio_read;
286 new_bus->write = &fsl_pq_mdio_write, 379 new_bus->write = &fsl_pq_mdio_write;
287 new_bus->reset = &fsl_pq_mdio_reset, 380 new_bus->reset = &fsl_pq_mdio_reset;
288 new_bus->priv = priv; 381 new_bus->irq = priv->irqs;
289 fsl_pq_mdio_bus_name(new_bus->id, np); 382
290 383 err = of_address_to_resource(np, 0, &res);
291 addrp = of_get_address(np, 0, &size, NULL); 384 if (err < 0) {
292 if (!addrp) { 385 dev_err(&pdev->dev, "could not obtain address information\n");
293 err = -EINVAL; 386 goto error;
294 goto err_free_bus;
295 } 387 }
296 388
297 /* Set the PHY base address */ 389 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
298 addr = of_translate_address(np, addrp); 390 (unsigned long long)res.start);
299 if (addr == OF_BAD_ADDR) {
300 err = -EINVAL;
301 goto err_free_bus;
302 }
303 391
304 map = ioremap(addr, size); 392 priv->map = of_iomap(np, 0);
305 if (!map) { 393 if (!priv->map) {
306 err = -ENOMEM; 394 err = -ENOMEM;
307 goto err_free_bus; 395 goto error;
308 } 396 }
309 priv->map = map;
310
311 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
312 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
313 of_device_is_compatible(np, "fsl,ucc-mdio") ||
314 of_device_is_compatible(np, "ucc_geth_phy"))
315 map -= offsetof(struct fsl_pq_mdio, miimcfg);
316 regs = map;
317 priv->regs = regs;
318
319 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
320 397
321 if (NULL == new_bus->irq) { 398 /*
322 err = -ENOMEM; 399 * Some device tree nodes represent only the MII registers, and
323 goto err_unmap_regs; 400 * others represent the MAC and MII registers. The 'mii_offset' field
401 * contains the offset of the MII registers inside the mapped register
402 * space.
403 */
404 if (data->mii_offset > resource_size(&res)) {
405 dev_err(&pdev->dev, "invalid register map\n");
406 err = -EINVAL;
407 goto error;
324 } 408 }
409 priv->regs = priv->map + data->mii_offset;
325 410
326 new_bus->parent = &ofdev->dev; 411 new_bus->parent = &pdev->dev;
327 dev_set_drvdata(&ofdev->dev, new_bus); 412 dev_set_drvdata(&pdev->dev, new_bus);
328
329 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
330 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
331 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
332 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
333 of_device_is_compatible(np, "gianfar")) {
334 tbipa = get_gfar_tbipa(regs, np);
335 if (!tbipa) {
336 err = -EINVAL;
337 goto err_free_irqs;
338 }
339 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
340 of_device_is_compatible(np, "ucc_geth_phy")) {
341 u32 id;
342 static u32 mii_mng_master;
343
344 tbipa = &regs->utbipar;
345
346 if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
347 goto err_free_irqs;
348 413
349 if (!mii_mng_master) { 414 if (data->get_tbipa) {
350 mii_mng_master = id; 415 for_each_child_of_node(np, tbi) {
351 ucc_set_qe_mux_mii_mng(id - 1); 416 if (strcmp(tbi->type, "tbi-phy") == 0) {
417 dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
418 strrchr(tbi->full_name, '/') + 1);
419 break;
420 }
352 } 421 }
353 } else {
354 err = -ENODEV;
355 goto err_free_irqs;
356 }
357 422
358 for_each_child_of_node(np, tbi) { 423 if (tbi) {
359 if (!strncmp(tbi->type, "tbi-phy", 8)) 424 const u32 *prop = of_get_property(tbi, "reg", NULL);
360 break; 425 uint32_t __iomem *tbipa;
361 }
362 426
363 if (tbi) { 427 if (!prop) {
364 const u32 *prop = of_get_property(tbi, "reg", NULL); 428 dev_err(&pdev->dev,
429 "missing 'reg' property in node %s\n",
430 tbi->full_name);
431 err = -EBUSY;
432 goto error;
433 }
365 434
366 if (prop) 435 tbipa = data->get_tbipa(priv->map);
367 tbiaddr = *prop;
368 436
369 if (tbiaddr == -1) { 437 out_be32(tbipa, be32_to_cpup(prop));
370 err = -EBUSY;
371 goto err_free_irqs;
372 } else {
373 out_be32(tbipa, tbiaddr);
374 } 438 }
375 } 439 }
376 440
441 if (data->ucc_configure)
442 data->ucc_configure(res.start, res.end);
443
377 err = of_mdiobus_register(new_bus, np); 444 err = of_mdiobus_register(new_bus, np);
378 if (err) { 445 if (err) {
379 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 446 dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
380 new_bus->name); 447 new_bus->name);
381 goto err_free_irqs; 448 goto error;
382 } 449 }
383 450
384 return 0; 451 return 0;
385 452
386err_free_irqs: 453error:
387 kfree(new_bus->irq); 454 if (priv->map)
388err_unmap_regs: 455 iounmap(priv->map);
389 iounmap(priv->map); 456
390err_free_bus:
391 kfree(new_bus); 457 kfree(new_bus);
392err_free_priv: 458
393 kfree(priv);
394 return err; 459 return err;
395} 460}
396 461
397 462
398static int fsl_pq_mdio_remove(struct platform_device *ofdev) 463static int fsl_pq_mdio_remove(struct platform_device *pdev)
399{ 464{
400 struct device *device = &ofdev->dev; 465 struct device *device = &pdev->dev;
401 struct mii_bus *bus = dev_get_drvdata(device); 466 struct mii_bus *bus = dev_get_drvdata(device);
402 struct fsl_pq_mdio_priv *priv = bus->priv; 467 struct fsl_pq_mdio_priv *priv = bus->priv;
403 468
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
406 dev_set_drvdata(device, NULL); 471 dev_set_drvdata(device, NULL);
407 472
408 iounmap(priv->map); 473 iounmap(priv->map);
409 bus->priv = NULL;
410 mdiobus_free(bus); 474 mdiobus_free(bus);
411 kfree(priv);
412 475
413 return 0; 476 return 0;
414} 477}
415 478
416static struct of_device_id fsl_pq_mdio_match[] = {
417 {
418 .type = "mdio",
419 .compatible = "ucc_geth_phy",
420 },
421 {
422 .type = "mdio",
423 .compatible = "gianfar",
424 },
425 {
426 .compatible = "fsl,ucc-mdio",
427 },
428 {
429 .compatible = "fsl,gianfar-tbi",
430 },
431 {
432 .compatible = "fsl,gianfar-mdio",
433 },
434 {
435 .compatible = "fsl,etsec2-tbi",
436 },
437 {
438 .compatible = "fsl,etsec2-mdio",
439 },
440 {},
441};
442MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
443
444static struct platform_driver fsl_pq_mdio_driver = { 479static struct platform_driver fsl_pq_mdio_driver = {
445 .driver = { 480 .driver = {
446 .name = "fsl-pq_mdio", 481 .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644
index bd17a2a0139b..000000000000
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 *
5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
7 *
8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#ifndef __FSL_PQ_MDIO_H
17#define __FSL_PQ_MDIO_H
18
19#define MIIMIND_BUSY 0x00000001
20#define MIIMIND_NOTVALID 0x00000004
21#define MIIMCFG_INIT_VALUE 0x00000007
22#define MIIMCFG_RESET 0x80000000
23
24#define MII_READ_COMMAND 0x00000001
25
26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
33 u32 miimcfg; /* MII management configuration reg */
34 u32 miimcom; /* MII management command reg */
35 u32 miimadd; /* MII management address reg */
36 u32 miimcon; /* MII management control reg */
37 u32 miimstat; /* MII management status reg */
38 u32 miimind; /* MII management indication reg */
39 u8 reserved[28]; /* Space holder */
40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
42} __packed;
43
44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
47 int regnum, u16 value);
48int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
49int __init fsl_pq_mdio_init(void);
50void fsl_pq_mdio_exit(void);
51void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
52#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d3233f59a82e..a1b52ec3b930 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -100,7 +100,6 @@
100#include <linux/of_net.h> 100#include <linux/of_net.h>
101 101
102#include "gianfar.h" 102#include "gianfar.h"
103#include "fsl_pq_mdio.h"
104 103
105#define TX_TIMEOUT (1*HZ) 104#define TX_TIMEOUT (1*HZ)
106 105
@@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
395 if (ndev->features & NETIF_F_IP_CSUM) 394 if (ndev->features & NETIF_F_IP_CSUM)
396 tctrl |= TCTRL_INIT_CSUM; 395 tctrl |= TCTRL_INIT_CSUM;
397 396
398 tctrl |= TCTRL_TXSCHED_PRIO; 397 if (priv->prio_sched_en)
398 tctrl |= TCTRL_TXSCHED_PRIO;
399 else {
400 tctrl |= TCTRL_TXSCHED_WRRS;
401 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
402 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
403 }
399 404
400 gfar_write(&regs->tctrl, tctrl); 405 gfar_write(&regs->tctrl, tctrl);
401 406
@@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev)
1161 priv->rx_filer_enable = 1; 1166 priv->rx_filer_enable = 1;
1162 /* Enable most messages by default */ 1167 /* Enable most messages by default */
1163 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1168 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1169 /* use pritority h/w tx queue scheduling for single queue devices */
1170 if (priv->num_tx_queues == 1)
1171 priv->prio_sched_en = 1;
1164 1172
1165 /* Carrier starts down, phylib will bring it up */ 1173 /* Carrier starts down, phylib will bring it up */
1166 netif_carrier_off(dev); 1174 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2136c7ff5e6d..4141ef2ddafc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
301#define TCTRL_TFCPAUSE 0x00000008 301#define TCTRL_TFCPAUSE 0x00000008
302#define TCTRL_TXSCHED_MASK 0x00000006 302#define TCTRL_TXSCHED_MASK 0x00000006
303#define TCTRL_TXSCHED_INIT 0x00000000 303#define TCTRL_TXSCHED_INIT 0x00000000
304/* priority scheduling */
304#define TCTRL_TXSCHED_PRIO 0x00000002 305#define TCTRL_TXSCHED_PRIO 0x00000002
306/* weighted round-robin scheduling (WRRS) */
305#define TCTRL_TXSCHED_WRRS 0x00000004 307#define TCTRL_TXSCHED_WRRS 0x00000004
308/* default WRRS weight and policy setting,
309 * tailored to the tr03wt and tr47wt registers:
310 * equal weight for all Tx Qs, measured in 64byte units
311 */
312#define DEFAULT_WRRS_WEIGHT 0x18181818
313
306#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 314#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
307 315
308#define IEVENT_INIT_CLEAR 0xffffffff 316#define IEVENT_INIT_CLEAR 0xffffffff
@@ -1098,7 +1106,8 @@ struct gfar_private {
1098 extended_hash:1, 1106 extended_hash:1,
1099 bd_stash_en:1, 1107 bd_stash_en:1,
1100 rx_filer_enable:1, 1108 rx_filer_enable:1,
1101 wol_en:1; /* Wake-on-LAN enabled */ 1109 wol_en:1, /* Wake-on-LAN enabled */
1110 prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
1102 unsigned short padding; 1111 unsigned short padding;
1103 1112
1104 /* PHY stuff */ 1113 /* PHY stuff */
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 0daa66b8eca0..b9db0e040563 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
510 510
511 spin_unlock_irqrestore(&etsects->lock, flags); 511 spin_unlock_irqrestore(&etsects->lock, flags);
512 512
513 etsects->clock = ptp_clock_register(&etsects->caps); 513 etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
514 if (IS_ERR(etsects->clock)) { 514 if (IS_ERR(etsects->clock)) {
515 err = PTR_ERR(etsects->clock); 515 err = PTR_ERR(etsects->clock);
516 goto no_clock; 516 goto no_clock;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 21c6574c5f15..164288439220 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,7 +42,6 @@
42#include <asm/machdep.h> 42#include <asm/machdep.h>
43 43
44#include "ucc_geth.h" 44#include "ucc_geth.h"
45#include "fsl_pq_mdio.h"
46 45
47#undef DEBUG 46#undef DEBUG
48 47
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000000..1afb5ea2a984
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,274 @@
1/*
2 * QorIQ 10G MDIO Controller
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 *
6 * Authors: Andy Fleming <afleming@freescale.com>
7 * Timur Tabi <timur@freescale.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/phy.h>
19#include <linux/mdio.h>
20#include <linux/of_platform.h>
21#include <linux/of_mdio.h>
22
23/* Number of microseconds to wait for a register to respond */
24#define TIMEOUT 1000
25
26struct tgec_mdio_controller {
27 __be32 reserved[12];
28 __be32 mdio_stat; /* MDIO configuration and status */
29 __be32 mdio_ctl; /* MDIO control */
30 __be32 mdio_data; /* MDIO data */
31 __be32 mdio_addr; /* MDIO address */
32} __packed;
33
34#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
35#define MDIO_STAT_BSY (1 << 0)
36#define MDIO_STAT_RD_ER (1 << 1)
37#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
38#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
39#define MDIO_CTL_PRE_DIS (1 << 10)
40#define MDIO_CTL_SCAN_EN (1 << 11)
41#define MDIO_CTL_POST_INC (1 << 14)
42#define MDIO_CTL_READ (1 << 15)
43
44#define MDIO_DATA(x) (x & 0xffff)
45#define MDIO_DATA_BSY (1 << 31)
46
47/*
48 * Wait untill the MDIO bus is free
49 */
50static int xgmac_wait_until_free(struct device *dev,
51 struct tgec_mdio_controller __iomem *regs)
52{
53 uint32_t status;
54
55 /* Wait till the bus is free */
56 status = spin_event_timeout(
57 !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
58 if (!status) {
59 dev_err(dev, "timeout waiting for bus to be free\n");
60 return -ETIMEDOUT;
61 }
62
63 return 0;
64}
65
66/*
67 * Wait till the MDIO read or write operation is complete
68 */
69static int xgmac_wait_until_done(struct device *dev,
70 struct tgec_mdio_controller __iomem *regs)
71{
72 uint32_t status;
73
74 /* Wait till the MDIO write is complete */
75 status = spin_event_timeout(
76 !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
77 if (!status) {
78 dev_err(dev, "timeout waiting for operation to complete\n");
79 return -ETIMEDOUT;
80 }
81
82 return 0;
83}
84
85/*
86 * Write value to the PHY for this device to the register at regnum,waiting
87 * until the write is done before it returns. All PHY configuration has to be
88 * done through the TSEC1 MIIM regs.
89 */
90static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
91{
92 struct tgec_mdio_controller __iomem *regs = bus->priv;
93 uint16_t dev_addr = regnum >> 16;
94 int ret;
95
96 /* Setup the MII Mgmt clock speed */
97 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
98
99 ret = xgmac_wait_until_free(&bus->dev, regs);
100 if (ret)
101 return ret;
102
103 /* Set the port and dev addr */
104 out_be32(&regs->mdio_ctl,
105 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
106
107 /* Set the register address */
108 out_be32(&regs->mdio_addr, regnum & 0xffff);
109
110 ret = xgmac_wait_until_free(&bus->dev, regs);
111 if (ret)
112 return ret;
113
114 /* Write the value to the register */
115 out_be32(&regs->mdio_data, MDIO_DATA(value));
116
117 ret = xgmac_wait_until_done(&bus->dev, regs);
118 if (ret)
119 return ret;
120
121 return 0;
122}
123
124/*
125 * Reads from register regnum in the PHY for device dev, returning the value.
126 * Clears miimcom first. All PHY configuration has to be done through the
127 * TSEC1 MIIM regs.
128 */
129static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
130{
131 struct tgec_mdio_controller __iomem *regs = bus->priv;
132 uint16_t dev_addr = regnum >> 16;
133 uint32_t mdio_ctl;
134 uint16_t value;
135 int ret;
136
137 /* Setup the MII Mgmt clock speed */
138 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
139
140 ret = xgmac_wait_until_free(&bus->dev, regs);
141 if (ret)
142 return ret;
143
144 /* Set the Port and Device Addrs */
145 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
146 out_be32(&regs->mdio_ctl, mdio_ctl);
147
148 /* Set the register address */
149 out_be32(&regs->mdio_addr, regnum & 0xffff);
150
151 ret = xgmac_wait_until_free(&bus->dev, regs);
152 if (ret)
153 return ret;
154
155 /* Initiate the read */
156 out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
157
158 ret = xgmac_wait_until_done(&bus->dev, regs);
159 if (ret)
160 return ret;
161
162 /* Return all Fs if nothing was there */
163 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
164 dev_err(&bus->dev, "MDIO read error\n");
165 return 0xffff;
166 }
167
168 value = in_be32(&regs->mdio_data) & 0xffff;
169 dev_dbg(&bus->dev, "read %04x\n", value);
170
171 return value;
172}
173
174/* Reset the MIIM registers, and wait for the bus to free */
175static int xgmac_mdio_reset(struct mii_bus *bus)
176{
177 struct tgec_mdio_controller __iomem *regs = bus->priv;
178 int ret;
179
180 mutex_lock(&bus->mdio_lock);
181
182 /* Setup the MII Mgmt clock speed */
183 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
184
185 ret = xgmac_wait_until_free(&bus->dev, regs);
186
187 mutex_unlock(&bus->mdio_lock);
188
189 return ret;
190}
191
192static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
193{
194 struct device_node *np = pdev->dev.of_node;
195 struct mii_bus *bus;
196 struct resource res;
197 int ret;
198
199 ret = of_address_to_resource(np, 0, &res);
200 if (ret) {
201 dev_err(&pdev->dev, "could not obtain address\n");
202 return ret;
203 }
204
205 bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
206 if (!bus)
207 return -ENOMEM;
208
209 bus->name = "Freescale XGMAC MDIO Bus";
210 bus->read = xgmac_mdio_read;
211 bus->write = xgmac_mdio_write;
212 bus->reset = xgmac_mdio_reset;
213 bus->irq = bus->priv;
214 bus->parent = &pdev->dev;
215 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
216
217 /* Set the PHY base address */
218 bus->priv = of_iomap(np, 0);
219 if (!bus->priv) {
220 ret = -ENOMEM;
221 goto err_ioremap;
222 }
223
224 ret = of_mdiobus_register(bus, np);
225 if (ret) {
226 dev_err(&pdev->dev, "cannot register MDIO bus\n");
227 goto err_registration;
228 }
229
230 dev_set_drvdata(&pdev->dev, bus);
231
232 return 0;
233
234err_registration:
235 iounmap(bus->priv);
236
237err_ioremap:
238 mdiobus_free(bus);
239
240 return ret;
241}
242
243static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
244{
245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
246
247 mdiobus_unregister(bus);
248 iounmap(bus->priv);
249 mdiobus_free(bus);
250
251 return 0;
252}
253
254static struct of_device_id xgmac_mdio_match[] = {
255 {
256 .compatible = "fsl,fman-xmdio",
257 },
258 {},
259};
260MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
261
262static struct platform_driver xgmac_mdio_driver = {
263 .driver = {
264 .name = "fsl-fman_xmdio",
265 .of_match_table = xgmac_mdio_match,
266 },
267 .probe = xgmac_mdio_probe,
268 .remove = xgmac_mdio_remove,
269};
270
271module_platform_driver(xgmac_mdio_driver);
272
273MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
274MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index fed5080a6b62..959faf7388e2 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -150,7 +150,7 @@ config SUN3_82586
150 150
151config ZNET 151config ZNET
152 tristate "Zenith Z-Note support (EXPERIMENTAL)" 152 tristate "Zenith Z-Note support (EXPERIMENTAL)"
153 depends on EXPERIMENTAL && ISA_DMA_API 153 depends on EXPERIMENTAL && ISA_DMA_API && X86
154 ---help--- 154 ---help---
155 The Zenith Z-Note notebook computer has a built-in network 155 The Zenith Z-Note notebook computer has a built-in network
156 (Ethernet) card, and this is the Linux driver for it. Note that the 156 (Ethernet) card, and this is the Linux driver for it. Note that the
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
index ba4e0cea3506..c9479e081b8a 100644
--- a/drivers/net/ethernet/i825xx/znet.c
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -865,14 +865,14 @@ static void hardware_init(struct net_device *dev)
865 disable_dma(znet->rx_dma); /* reset by an interrupting task. */ 865 disable_dma(znet->rx_dma); /* reset by an interrupting task. */
866 clear_dma_ff(znet->rx_dma); 866 clear_dma_ff(znet->rx_dma);
867 set_dma_mode(znet->rx_dma, DMA_RX_MODE); 867 set_dma_mode(znet->rx_dma, DMA_RX_MODE);
868 set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start); 868 set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
869 set_dma_count(znet->rx_dma, RX_BUF_SIZE); 869 set_dma_count(znet->rx_dma, RX_BUF_SIZE);
870 enable_dma(znet->rx_dma); 870 enable_dma(znet->rx_dma);
871 /* Now set up the Tx channel. */ 871 /* Now set up the Tx channel. */
872 disable_dma(znet->tx_dma); 872 disable_dma(znet->tx_dma);
873 clear_dma_ff(znet->tx_dma); 873 clear_dma_ff(znet->tx_dma);
874 set_dma_mode(znet->tx_dma, DMA_TX_MODE); 874 set_dma_mode(znet->tx_dma, DMA_TX_MODE);
875 set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start); 875 set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
876 set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); 876 set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
877 enable_dma(znet->tx_dma); 877 enable_dma(znet->tx_dma);
878 release_dma_lock(flags); 878 release_dma_lock(flags);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 736a7d987db5..9089d00f1421 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
174 174
175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 175 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
176 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 176 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
177
178 /* MDI-X => 1; MDI => 0 */
179 if ((hw->media_type == e1000_media_type_copper) &&
180 netif_carrier_ok(netdev))
181 ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
182 ETH_TP_MDI_X :
183 ETH_TP_MDI);
184 else
185 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
186
187 if (hw->mdix == AUTO_ALL_MODES)
188 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
189 else
190 ecmd->eth_tp_mdix_ctrl = hw->mdix;
177 return 0; 191 return 0;
178} 192}
179 193
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
183 struct e1000_adapter *adapter = netdev_priv(netdev); 197 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 198 struct e1000_hw *hw = &adapter->hw;
185 199
200 /*
201 * MDI setting is only allowed when autoneg enabled because
202 * some hardware doesn't allow MDI setting when speed or
203 * duplex is forced.
204 */
205 if (ecmd->eth_tp_mdix_ctrl) {
206 if (hw->media_type != e1000_media_type_copper)
207 return -EOPNOTSUPP;
208
209 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
210 (ecmd->autoneg != AUTONEG_ENABLE)) {
211 e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
212 return -EINVAL;
213 }
214 }
215
186 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 216 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
187 msleep(1); 217 msleep(1);
188 218
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
199 ecmd->advertising = hw->autoneg_advertised; 229 ecmd->advertising = hw->autoneg_advertised;
200 } else { 230 } else {
201 u32 speed = ethtool_cmd_speed(ecmd); 231 u32 speed = ethtool_cmd_speed(ecmd);
232 /* calling this overrides forced MDI setting */
202 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 233 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
203 clear_bit(__E1000_RESETTING, &adapter->flags); 234 clear_bit(__E1000_RESETTING, &adapter->flags);
204 return -EINVAL; 235 return -EINVAL;
205 } 236 }
206 } 237 }
207 238
239 /* MDI-X => 2; MDI => 1; Auto => 3 */
240 if (ecmd->eth_tp_mdix_ctrl) {
241 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
242 hw->mdix = AUTO_ALL_MODES;
243 else
244 hw->mdix = ecmd->eth_tp_mdix_ctrl;
245 }
246
208 /* reset the link */ 247 /* reset the link */
209 248
210 if (netif_running(adapter->netdev)) { 249 if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f3f9aeb7d1e1..222bfaff4622 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2015 } 2015 }
2016 2016
2017 netdev_reset_queue(adapter->netdev);
2017 size = sizeof(struct e1000_buffer) * tx_ring->count; 2018 size = sizeof(struct e1000_buffer) * tx_ring->count;
2018 memset(tx_ring->buffer_info, 0, size); 2019 memset(tx_ring->buffer_info, 0, size);
2019 2020
@@ -3273,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3273 nr_frags, mss); 3274 nr_frags, mss);
3274 3275
3275 if (count) { 3276 if (count) {
3277 netdev_sent_queue(netdev, skb->len);
3276 skb_tx_timestamp(skb); 3278 skb_tx_timestamp(skb);
3277 3279
3278 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3280 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3860,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3860 unsigned int i, eop; 3862 unsigned int i, eop;
3861 unsigned int count = 0; 3863 unsigned int count = 0;
3862 unsigned int total_tx_bytes=0, total_tx_packets=0; 3864 unsigned int total_tx_bytes=0, total_tx_packets=0;
3865 unsigned int bytes_compl = 0, pkts_compl = 0;
3863 3866
3864 i = tx_ring->next_to_clean; 3867 i = tx_ring->next_to_clean;
3865 eop = tx_ring->buffer_info[i].next_to_watch; 3868 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3877,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3877 if (cleaned) { 3880 if (cleaned) {
3878 total_tx_packets += buffer_info->segs; 3881 total_tx_packets += buffer_info->segs;
3879 total_tx_bytes += buffer_info->bytecount; 3882 total_tx_bytes += buffer_info->bytecount;
3883 if (buffer_info->skb) {
3884 bytes_compl += buffer_info->skb->len;
3885 pkts_compl++;
3886 }
3887
3880 } 3888 }
3881 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3889 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3882 tx_desc->upper.data = 0; 3890 tx_desc->upper.data = 0;
@@ -3890,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3890 3898
3891 tx_ring->next_to_clean = i; 3899 tx_ring->next_to_clean = i;
3892 3900
3901 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3902
3893#define TX_WAKE_THRESHOLD 32 3903#define TX_WAKE_THRESHOLD 32
3894 if (unlikely(count && netif_carrier_ok(netdev) && 3904 if (unlikely(count && netif_carrier_ok(netdev) &&
3895 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3905 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4950,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4950 default: 4960 default:
4951 goto err_inval; 4961 goto err_inval;
4952 } 4962 }
4963
4964 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4965 hw->mdix = AUTO_ALL_MODES;
4966
4953 return 0; 4967 return 0;
4954 4968
4955err_inval: 4969err_inval:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 080c89093feb..c98586408005 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
653 **/ 653 **/
654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) 654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
655{ 655{
656 u16 data = er32(POEMB); 656 u32 data = er32(POEMB);
657 657
658 if (active) 658 if (active)
659 data |= E1000_PHY_CTRL_D0A_LPLU; 659 data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
677 **/ 677 **/
678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) 678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
679{ 679{
680 u16 data = er32(POEMB); 680 u32 data = er32(POEMB);
681 681
682 if (!active) { 682 if (!active) {
683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU; 683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 0349e2478df8..c11ac2756667 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
199 else 199 else
200 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 200 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
201 201
202 if (hw->phy.mdix == AUTO_ALL_MODES)
203 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
204 else
205 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
206
202 return 0; 207 return 0;
203} 208}
204 209
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
241 default: 246 default:
242 goto err_inval; 247 goto err_inval;
243 } 248 }
249
250 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
251 adapter->hw.phy.mdix = AUTO_ALL_MODES;
252
244 return 0; 253 return 0;
245 254
246err_inval: 255err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
264 return -EINVAL; 273 return -EINVAL;
265 } 274 }
266 275
276 /*
277 * MDI setting is only allowed when autoneg enabled because
278 * some hardware doesn't allow MDI setting when speed or
279 * duplex is forced.
280 */
281 if (ecmd->eth_tp_mdix_ctrl) {
282 if (hw->phy.media_type != e1000_media_type_copper)
283 return -EOPNOTSUPP;
284
285 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
286 (ecmd->autoneg != AUTONEG_ENABLE)) {
287 e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
288 return -EINVAL;
289 }
290 }
291
267 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 292 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
268 usleep_range(1000, 2000); 293 usleep_range(1000, 2000);
269 294
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
282 hw->fc.requested_mode = e1000_fc_default; 307 hw->fc.requested_mode = e1000_fc_default;
283 } else { 308 } else {
284 u32 speed = ethtool_cmd_speed(ecmd); 309 u32 speed = ethtool_cmd_speed(ecmd);
310 /* calling this overrides forced MDI setting */
285 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { 311 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
286 clear_bit(__E1000_RESETTING, &adapter->state); 312 clear_bit(__E1000_RESETTING, &adapter->state);
287 return -EINVAL; 313 return -EINVAL;
288 } 314 }
289 } 315 }
290 316
317 /* MDI-X => 2; MDI => 1; Auto => 3 */
318 if (ecmd->eth_tp_mdix_ctrl) {
319 /*
320 * fix up the value for auto (3 => 0) as zero is mapped
321 * internally to auto
322 */
323 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
324 hw->phy.mdix = AUTO_ALL_MODES;
325 else
326 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
327 }
328
291 /* reset the link */ 329 /* reset the link */
292 330
293 if (netif_running(adapter->netdev)) { 331 if (netif_running(adapter->netdev)) {
294 e1000e_down(adapter); 332 e1000e_down(adapter);
295 e1000e_up(adapter); 333 e1000e_up(adapter);
296 } else { 334 } else
297 e1000e_reset(adapter); 335 e1000e_reset(adapter);
298 }
299 336
300 clear_bit(__E1000_RESETTING, &adapter->state); 337 clear_bit(__E1000_RESETTING, &adapter->state);
301 return 0; 338 return 0;
@@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
1905 return -EINVAL; 1942 return -EINVAL;
1906 1943
1907 if (ec->rx_coalesce_usecs == 4) { 1944 if (ec->rx_coalesce_usecs == 4) {
1908 adapter->itr = adapter->itr_setting = 4; 1945 adapter->itr_setting = 4;
1946 adapter->itr = adapter->itr_setting;
1909 } else if (ec->rx_coalesce_usecs <= 3) { 1947 } else if (ec->rx_coalesce_usecs <= 3) {
1910 adapter->itr = 20000; 1948 adapter->itr = 20000;
1911 adapter->itr_setting = ec->rx_coalesce_usecs; 1949 adapter->itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3f0223ac4c7c..fb659dd8db03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION 59#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3446 3446
3447 /* 3447 /*
3448 * if short on Rx space, Rx wins and must trump Tx 3448 * if short on Rx space, Rx wins and must trump Tx
3449 * adjustment or use Early Receive if available 3449 * adjustment
3450 */ 3450 */
3451 if (pba < min_rx_space) 3451 if (pba < min_rx_space)
3452 pba = min_rx_space; 3452 pba = min_rx_space;
@@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3755 e_dbg("icr is %08X\n", icr); 3755 e_dbg("icr is %08X\n", icr);
3756 if (icr & E1000_ICR_RXSEQ) { 3756 if (icr & E1000_ICR_RXSEQ) {
3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3758 /*
3759 * Force memory writes to complete before acknowledging the
3760 * interrupt is handled.
3761 */
3758 wmb(); 3762 wmb();
3759 } 3763 }
3760 3764
@@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3796 goto msi_test_failed; 3800 goto msi_test_failed;
3797 } 3801 }
3798 3802
3803 /*
3804 * Force memory writes to complete before enabling and firing an
3805 * interrupt.
3806 */
3799 wmb(); 3807 wmb();
3800 3808
3801 e1000_irq_enable(adapter); 3809 e1000_irq_enable(adapter);
@@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3807 3815
3808 e1000_irq_disable(adapter); 3816 e1000_irq_disable(adapter);
3809 3817
3810 rmb(); 3818 rmb(); /* read flags after interrupt has been fired */
3811 3819
3812 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3820 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3813 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3821 adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4670 struct e1000_buffer *buffer_info; 4678 struct e1000_buffer *buffer_info;
4671 unsigned int i; 4679 unsigned int i;
4672 u32 cmd_length = 0; 4680 u32 cmd_length = 0;
4673 u16 ipcse = 0, tucse, mss; 4681 u16 ipcse = 0, mss;
4674 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4682 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4675 4683
4676 if (!skb_is_gso(skb)) 4684 if (!skb_is_gso(skb))
@@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4704 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4712 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4705 tucss = skb_transport_offset(skb); 4713 tucss = skb_transport_offset(skb);
4706 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4714 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4707 tucse = 0;
4708 4715
4709 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4716 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4710 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4717 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4718 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4725 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4719 context_desc->upper_setup.tcp_fields.tucss = tucss; 4726 context_desc->upper_setup.tcp_fields.tucss = tucss;
4720 context_desc->upper_setup.tcp_fields.tucso = tucso; 4727 context_desc->upper_setup.tcp_fields.tucso = tucso;
4721 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4728 context_desc->upper_setup.tcp_fields.tucse = 0;
4722 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4729 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4723 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4730 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4724 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4731 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b860d4f7ea2a..fc62a3f3a5be 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
84#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 84#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
85 85
86/* I82577 PHY Control 2 */ 86/* I82577 PHY Control 2 */
87#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 87#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
88#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 88#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
89#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
89 90
90/* I82577 PHY Diagnostics Status */ 91/* I82577 PHY Diagnostics Status */
91#define I82577_DSTATUS_CABLE_LENGTH 0x03FC 92#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
702 if (ret_val) 703 if (ret_val)
703 return ret_val; 704 return ret_val;
704 705
706 /* Set MDI/MDIX mode */
707 ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
708 if (ret_val)
709 return ret_val;
710 phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
711 /*
712 * Options:
713 * 0 - Auto (default)
714 * 1 - MDI mode
715 * 2 - MDI-X mode
716 */
717 switch (hw->phy.mdix) {
718 case 1:
719 break;
720 case 2:
721 phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
722 break;
723 case 0:
724 default:
725 phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
726 break;
727 }
728 ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
729 if (ret_val)
730 return ret_val;
731
705 return e1000_set_master_slave_mode(hw); 732 return e1000_set_master_slave_mode(hw);
706} 733}
707 734
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ba994fb4cec6..ca4641e2f748 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2223,11 +2223,10 @@ out:
2223s32 igb_set_eee_i350(struct e1000_hw *hw) 2223s32 igb_set_eee_i350(struct e1000_hw *hw)
2224{ 2224{
2225 s32 ret_val = 0; 2225 s32 ret_val = 0;
2226 u32 ipcnfg, eeer, ctrl_ext; 2226 u32 ipcnfg, eeer;
2227 2227
2228 ctrl_ext = rd32(E1000_CTRL_EXT); 2228 if ((hw->mac.type < e1000_i350) ||
2229 if ((hw->mac.type != e1000_i350) || 2229 (hw->phy.media_type != e1000_media_type_copper))
2230 (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
2231 goto out; 2230 goto out;
2232 ipcnfg = rd32(E1000_IPCNFG); 2231 ipcnfg = rd32(E1000_IPCNFG);
2233 eeer = rd32(E1000_EEER); 2232 eeer = rd32(E1000_EEER);
@@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2240 E1000_EEER_RX_LPI_EN | 2239 E1000_EEER_RX_LPI_EN |
2241 E1000_EEER_LPI_FC); 2240 E1000_EEER_LPI_FC);
2242 2241
2242 /* keep the LPI clock running before EEE is enabled */
2243 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
2244 u32 eee_su;
2245 eee_su = rd32(E1000_EEE_SU);
2246 eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
2247 wr32(E1000_EEE_SU, eee_su);
2248 }
2249
2243 } else { 2250 } else {
2244 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2251 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2245 E1000_IPCNFG_EEE_100M_AN); 2252 E1000_IPCNFG_EEE_100M_AN);
@@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2249 } 2256 }
2250 wr32(E1000_IPCNFG, ipcnfg); 2257 wr32(E1000_IPCNFG, ipcnfg);
2251 wr32(E1000_EEER, eeer); 2258 wr32(E1000_EEER, eeer);
2259 rd32(E1000_IPCNFG);
2260 rd32(E1000_EEER);
2252out: 2261out:
2253 2262
2254 return ret_val; 2263 return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ec7e4fe3e3ee..de4b41ec3c40 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -322,6 +322,9 @@
322#define E1000_FCRTC_RTH_COAL_SHIFT 4 322#define E1000_FCRTC_RTH_COAL_SHIFT 4
323#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ 323#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
324 324
325/* Timestamp in Rx buffer */
326#define E1000_RXPBS_CFG_TS_EN 0x80000000
327
325/* SerDes Control */ 328/* SerDes Control */
326#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 329#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
327 330
@@ -360,6 +363,7 @@
360#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 363#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
361#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 364#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
362#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ 365#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
366#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
363#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ 367#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
364/* If this bit asserted, the driver should claim the interrupt */ 368/* If this bit asserted, the driver should claim the interrupt */
365#define E1000_ICR_INT_ASSERTED 0x80000000 369#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -399,6 +403,7 @@
399#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 403#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
400#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 404#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
401#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ 405#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
406#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
402#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 407#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
403#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 408#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
404#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 409#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
@@ -510,6 +515,9 @@
510 515
511#define E1000_TIMINCA_16NS_SHIFT 24 516#define E1000_TIMINCA_16NS_SHIFT 24
512 517
518#define E1000_TSICR_TXTS 0x00000002
519#define E1000_TSIM_TXTS 0x00000002
520
513#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 521#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
514#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ 522#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
515#define E1000_MDICNFG_PHY_MASK 0x03E00000 523#define E1000_MDICNFG_PHY_MASK 0x03E00000
@@ -849,8 +857,9 @@
849#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ 857#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
850#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ 858#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
851#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ 859#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
852#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ 860#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
853#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 861#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
862#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
854 863
855/* SerDes Control */ 864/* SerDes Control */
856#define E1000_GEN_CTL_READY 0x80000000 865#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7be98b6f1052..3404bc79f4ca 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
464 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; 464 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
465 465
466 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); 466 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
467 if (ret_val)
468 goto out;
469
470 /* Set MDI/MDIX mode */
471 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
472 if (ret_val)
473 goto out;
474 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
475 /*
476 * Options:
477 * 0 - Auto (default)
478 * 1 - MDI mode
479 * 2 - MDI-X mode
480 */
481 switch (hw->phy.mdix) {
482 case 1:
483 break;
484 case 2:
485 phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
486 break;
487 case 0:
488 default:
489 phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
490 break;
491 }
492 ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
467 493
468out: 494out:
469 return ret_val; 495 return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2246 if (ret_val) 2272 if (ret_val)
2247 goto out; 2273 goto out;
2248 2274
2249 phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; 2275 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
2250 phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
2251 2276
2252 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); 2277 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
2253 if (ret_val) 2278 if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 34e40619f16b..6ac3299bfcb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
111#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 111#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
112 112
113/* I82580 PHY Control 2 */ 113/* I82580 PHY Control 2 */
114#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 114#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
115#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 115#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
116#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
116 117
117/* I82580 PHY Diagnostics Status */ 118/* I82580 PHY Diagnostics Status */
118#define I82580_DSTATUS_CABLE_LENGTH 0x03FC 119#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 28394bea5253..e5db48594e8a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -91,6 +91,8 @@
91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ 91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ 92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ 93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
94#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
95#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
94 96
95/* Filtering Registers */ 97/* Filtering Registers */
96#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 98#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -347,6 +349,7 @@
347/* Energy Efficient Ethernet "EEE" register */ 349/* Energy Efficient Ethernet "EEE" register */
348#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ 350#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
349#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ 351#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
352#define E1000_EEE_SU 0X0E34 /* EEE Setup */
350 353
351/* Thermal Sensor Register */ 354/* Thermal Sensor Register */
352#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 355#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9e572dd29ab2..8aad230c0592 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,9 +34,11 @@
34#include "e1000_mac.h" 34#include "e1000_mac.h"
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#ifdef CONFIG_IGB_PTP
37#include <linux/clocksource.h> 38#include <linux/clocksource.h>
38#include <linux/net_tstamp.h> 39#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h> 40#include <linux/ptp_clock_kernel.h>
41#endif /* CONFIG_IGB_PTP */
40#include <linux/bitops.h> 42#include <linux/bitops.h>
41#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
42 44
@@ -99,7 +101,6 @@ struct vf_data_storage {
99 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 101 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
100 u16 pf_qos; 102 u16 pf_qos;
101 u16 tx_rate; 103 u16 tx_rate;
102 struct pci_dev *vfdev;
103}; 104};
104 105
105#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 106#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -131,9 +132,9 @@ struct vf_data_storage {
131#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
132 133
133/* Supported Rx Buffer Sizes */ 134/* Supported Rx Buffer Sizes */
134#define IGB_RXBUFFER_512 512 135#define IGB_RXBUFFER_256 256
135#define IGB_RXBUFFER_16384 16384 136#define IGB_RXBUFFER_16384 16384
136#define IGB_RX_HDR_LEN IGB_RXBUFFER_512 137#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
137 138
138/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 139/* How many Tx Descriptors do we need to call netif_wake_queue ? */
139#define IGB_TX_QUEUE_WAKE 16 140#define IGB_TX_QUEUE_WAKE 16
@@ -167,8 +168,8 @@ struct igb_tx_buffer {
167 unsigned int bytecount; 168 unsigned int bytecount;
168 u16 gso_segs; 169 u16 gso_segs;
169 __be16 protocol; 170 __be16 protocol;
170 dma_addr_t dma; 171 DEFINE_DMA_UNMAP_ADDR(dma);
171 u32 length; 172 DEFINE_DMA_UNMAP_LEN(len);
172 u32 tx_flags; 173 u32 tx_flags;
173}; 174};
174 175
@@ -212,7 +213,6 @@ struct igb_q_vector {
212 struct igb_ring_container rx, tx; 213 struct igb_ring_container rx, tx;
213 214
214 struct napi_struct napi; 215 struct napi_struct napi;
215 int numa_node;
216 216
217 u16 itr_val; 217 u16 itr_val;
218 u8 set_itr; 218 u8 set_itr;
@@ -257,7 +257,6 @@ struct igb_ring {
257 }; 257 };
258 /* Items past this point are only used during ring alloc / free */ 258 /* Items past this point are only used during ring alloc / free */
259 dma_addr_t dma; /* phys address of the ring */ 259 dma_addr_t dma; /* phys address of the ring */
260 int numa_node; /* node to alloc ring memory on */
261}; 260};
262 261
263enum e1000_ring_flags_t { 262enum e1000_ring_flags_t {
@@ -342,7 +341,6 @@ struct igb_adapter {
342 341
343 /* OS defined structs */ 342 /* OS defined structs */
344 struct pci_dev *pdev; 343 struct pci_dev *pdev;
345 struct hwtstamp_config hwtstamp_config;
346 344
347 spinlock_t stats64_lock; 345 spinlock_t stats64_lock;
348 struct rtnl_link_stats64 stats64; 346 struct rtnl_link_stats64 stats64;
@@ -373,15 +371,19 @@ struct igb_adapter {
373 int vf_rate_link_speed; 371 int vf_rate_link_speed;
374 u32 rss_queues; 372 u32 rss_queues;
375 u32 wvbr; 373 u32 wvbr;
376 int node;
377 u32 *shadow_vfta; 374 u32 *shadow_vfta;
378 375
376#ifdef CONFIG_IGB_PTP
379 struct ptp_clock *ptp_clock; 377 struct ptp_clock *ptp_clock;
380 struct ptp_clock_info caps; 378 struct ptp_clock_info ptp_caps;
381 struct delayed_work overflow_work; 379 struct delayed_work ptp_overflow_work;
380 struct work_struct ptp_tx_work;
381 struct sk_buff *ptp_tx_skb;
382 spinlock_t tmreg_lock; 382 spinlock_t tmreg_lock;
383 struct cyclecounter cc; 383 struct cyclecounter cc;
384 struct timecounter tc; 384 struct timecounter tc;
385#endif /* CONFIG_IGB_PTP */
386
385 char fw_version[32]; 387 char fw_version[32];
386}; 388};
387 389
@@ -390,6 +392,7 @@ struct igb_adapter {
390#define IGB_FLAG_QUAD_PORT_A (1 << 2) 392#define IGB_FLAG_QUAD_PORT_A (1 << 2)
391#define IGB_FLAG_QUEUE_PAIRS (1 << 3) 393#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
392#define IGB_FLAG_DMAC (1 << 4) 394#define IGB_FLAG_DMAC (1 << 4)
395#define IGB_FLAG_PTP (1 << 5)
393 396
394/* DMA Coalescing defines */ 397/* DMA Coalescing defines */
395#define IGB_MIN_TXPBSIZE 20408 398#define IGB_MIN_TXPBSIZE 20408
@@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *);
435extern void igb_set_fw_version(struct igb_adapter *); 438extern void igb_set_fw_version(struct igb_adapter *);
436#ifdef CONFIG_IGB_PTP 439#ifdef CONFIG_IGB_PTP
437extern void igb_ptp_init(struct igb_adapter *adapter); 440extern void igb_ptp_init(struct igb_adapter *adapter);
438extern void igb_ptp_remove(struct igb_adapter *adapter); 441extern void igb_ptp_stop(struct igb_adapter *adapter);
439 442extern void igb_ptp_reset(struct igb_adapter *adapter);
440extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter, 443extern void igb_ptp_tx_work(struct work_struct *work);
441 struct skb_shared_hwtstamps *hwtstamps, 444extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
442 u64 systim); 445extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
446 union e1000_adv_rx_desc *rx_desc,
447 struct sk_buff *skb);
448extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
449 struct ifreq *ifr, int cmd);
450#endif /* CONFIG_IGB_PTP */
443 451
444#endif
445static inline s32 igb_reset_phy(struct e1000_hw *hw) 452static inline s32 igb_reset_phy(struct e1000_hw *hw)
446{ 453{
447 if (hw->phy.ops.reset) 454 if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 70591117051b..2ea012849825 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
148 SUPPORTED_100baseT_Full | 148 SUPPORTED_100baseT_Full |
149 SUPPORTED_1000baseT_Full| 149 SUPPORTED_1000baseT_Full|
150 SUPPORTED_Autoneg | 150 SUPPORTED_Autoneg |
151 SUPPORTED_TP); 151 SUPPORTED_TP |
152 ecmd->advertising = (ADVERTISED_TP | 152 SUPPORTED_Pause);
153 ADVERTISED_Pause); 153 ecmd->advertising = ADVERTISED_TP;
154 154
155 if (hw->mac.autoneg == 1) { 155 if (hw->mac.autoneg == 1) {
156 ecmd->advertising |= ADVERTISED_Autoneg; 156 ecmd->advertising |= ADVERTISED_Autoneg;
@@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
158 ecmd->advertising |= hw->phy.autoneg_advertised; 158 ecmd->advertising |= hw->phy.autoneg_advertised;
159 } 159 }
160 160
161 if (hw->mac.autoneg != 1)
162 ecmd->advertising &= ~(ADVERTISED_Pause |
163 ADVERTISED_Asym_Pause);
164
165 if (hw->fc.requested_mode == e1000_fc_full)
166 ecmd->advertising |= ADVERTISED_Pause;
167 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
168 ecmd->advertising |= (ADVERTISED_Pause |
169 ADVERTISED_Asym_Pause);
170 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
171 ecmd->advertising |= ADVERTISED_Asym_Pause;
172 else
173 ecmd->advertising &= ~(ADVERTISED_Pause |
174 ADVERTISED_Asym_Pause);
175
161 ecmd->port = PORT_TP; 176 ecmd->port = PORT_TP;
162 ecmd->phy_address = hw->phy.addr; 177 ecmd->phy_address = hw->phy.addr;
163 } else { 178 } else {
@@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
198 } 213 }
199 214
200 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 215 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
216
217 /* MDI-X => 2; MDI =>1; Invalid =>0 */
218 if (hw->phy.media_type == e1000_media_type_copper)
219 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
220 ETH_TP_MDI;
221 else
222 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
223
224 if (hw->phy.mdix == AUTO_ALL_MODES)
225 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
226 else
227 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
228
201 return 0; 229 return 0;
202} 230}
203 231
@@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
214 return -EINVAL; 242 return -EINVAL;
215 } 243 }
216 244
245 /*
246 * MDI setting is only allowed when autoneg enabled because
247 * some hardware doesn't allow MDI setting when speed or
248 * duplex is forced.
249 */
250 if (ecmd->eth_tp_mdix_ctrl) {
251 if (hw->phy.media_type != e1000_media_type_copper)
252 return -EOPNOTSUPP;
253
254 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
255 (ecmd->autoneg != AUTONEG_ENABLE)) {
256 dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
257 return -EINVAL;
258 }
259 }
260
217 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 261 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
218 msleep(1); 262 msleep(1);
219 263
@@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
227 hw->fc.requested_mode = e1000_fc_default; 271 hw->fc.requested_mode = e1000_fc_default;
228 } else { 272 } else {
229 u32 speed = ethtool_cmd_speed(ecmd); 273 u32 speed = ethtool_cmd_speed(ecmd);
274 /* calling this overrides forced MDI setting */
230 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { 275 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
231 clear_bit(__IGB_RESETTING, &adapter->state); 276 clear_bit(__IGB_RESETTING, &adapter->state);
232 return -EINVAL; 277 return -EINVAL;
233 } 278 }
234 } 279 }
235 280
281 /* MDI-X => 2; MDI => 1; Auto => 3 */
282 if (ecmd->eth_tp_mdix_ctrl) {
283 /*
284 * fix up the value for auto (3 => 0) as zero is mapped
285 * internally to auto
286 */
287 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
288 hw->phy.mdix = AUTO_ALL_MODES;
289 else
290 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
291 }
292
236 /* reset the link */ 293 /* reset the link */
237 if (netif_running(adapter->netdev)) { 294 if (netif_running(adapter->netdev)) {
238 igb_down(adapter); 295 igb_down(adapter);
@@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1469{ 1526{
1470 struct e1000_hw *hw = &adapter->hw; 1527 struct e1000_hw *hw = &adapter->hw;
1471 u32 ctrl_reg = 0; 1528 u32 ctrl_reg = 0;
1472 u16 phy_reg = 0;
1473 1529
1474 hw->mac.autoneg = false; 1530 hw->mac.autoneg = false;
1475 1531
1476 switch (hw->phy.type) { 1532 if (hw->phy.type == e1000_phy_m88) {
1477 case e1000_phy_m88: 1533 if (hw->phy.id != I210_I_PHY_ID) {
1478 /* Auto-MDI/MDIX Off */ 1534 /* Auto-MDI/MDIX Off */
1479 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1535 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1480 /* reset to update Auto-MDI/MDIX */ 1536 /* reset to update Auto-MDI/MDIX */
1481 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1537 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1482 /* autoneg off */ 1538 /* autoneg off */
1483 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1539 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1484 break; 1540 } else {
1485 case e1000_phy_82580: 1541 /* force 1000, set loopback */
1486 /* enable MII loopback */ 1542 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1487 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); 1543 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1488 break; 1544 }
1489 case e1000_phy_i210:
1490 /* set loopback speed in PHY */
1491 igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1492 &phy_reg);
1493 phy_reg |= GS40G_MAC_SPEED_1G;
1494 igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
1495 phy_reg);
1496 ctrl_reg = rd32(E1000_CTRL_EXT);
1497 default:
1498 break;
1499 } 1545 }
1500 1546
1501 /* add small delay to avoid loopback test failure */ 1547 /* add small delay to avoid loopback test failure */
@@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1513 E1000_CTRL_FD | /* Force Duplex to FULL */ 1559 E1000_CTRL_FD | /* Force Duplex to FULL */
1514 E1000_CTRL_SLU); /* Set link up enable bit */ 1560 E1000_CTRL_SLU); /* Set link up enable bit */
1515 1561
1516 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) 1562 if (hw->phy.type == e1000_phy_m88)
1517 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1563 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1518 1564
1519 wr32(E1000_CTRL, ctrl_reg); 1565 wr32(E1000_CTRL, ctrl_reg);
@@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1521 /* Disable the receiver on the PHY so when a cable is plugged in, the 1567 /* Disable the receiver on the PHY so when a cable is plugged in, the
1522 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1568 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1523 */ 1569 */
1524 if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) 1570 if (hw->phy.type == e1000_phy_m88)
1525 igb_phy_disable_receiver(adapter); 1571 igb_phy_disable_receiver(adapter);
1526 1572
1527 udelay(500); 1573 mdelay(500);
1528
1529 return 0; 1574 return 0;
1530} 1575}
1531 1576
@@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1785 *data = 0; 1830 *data = 0;
1786 goto out; 1831 goto out;
1787 } 1832 }
1788 if ((adapter->hw.mac.type == e1000_i210)
1789 || (adapter->hw.mac.type == e1000_i211)) {
1790 dev_err(&adapter->pdev->dev,
1791 "Loopback test not supported on this part at this time.\n");
1792 *data = 0;
1793 goto out;
1794 }
1795 *data = igb_setup_desc_rings(adapter); 1833 *data = igb_setup_desc_rings(adapter);
1796 if (*data) 1834 if (*data)
1797 goto out; 1835 goto out;
@@ -2257,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2257 } 2295 }
2258} 2296}
2259 2297
2298static int igb_get_ts_info(struct net_device *dev,
2299 struct ethtool_ts_info *info)
2300{
2301 struct igb_adapter *adapter = netdev_priv(dev);
2302
2303 switch (adapter->hw.mac.type) {
2304#ifdef CONFIG_IGB_PTP
2305 case e1000_82576:
2306 case e1000_82580:
2307 case e1000_i350:
2308 case e1000_i210:
2309 case e1000_i211:
2310 info->so_timestamping =
2311 SOF_TIMESTAMPING_TX_HARDWARE |
2312 SOF_TIMESTAMPING_RX_HARDWARE |
2313 SOF_TIMESTAMPING_RAW_HARDWARE;
2314
2315 if (adapter->ptp_clock)
2316 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2317 else
2318 info->phc_index = -1;
2319
2320 info->tx_types =
2321 (1 << HWTSTAMP_TX_OFF) |
2322 (1 << HWTSTAMP_TX_ON);
2323
2324 info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
2325
2326 /* 82576 does not support timestamping all packets. */
2327 if (adapter->hw.mac.type >= e1000_82580)
2328 info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
2329 else
2330 info->rx_filters |=
2331 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2332 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2333 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2334 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2335 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2336 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2337 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2338
2339 return 0;
2340#endif /* CONFIG_IGB_PTP */
2341 default:
2342 return -EOPNOTSUPP;
2343 }
2344}
2345
2260static int igb_ethtool_begin(struct net_device *netdev) 2346static int igb_ethtool_begin(struct net_device *netdev)
2261{ 2347{
2262 struct igb_adapter *adapter = netdev_priv(netdev); 2348 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2270,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev)
2270 pm_runtime_put(&adapter->pdev->dev); 2356 pm_runtime_put(&adapter->pdev->dev);
2271} 2357}
2272 2358
2273#ifdef CONFIG_IGB_PTP
2274static int igb_ethtool_get_ts_info(struct net_device *dev,
2275 struct ethtool_ts_info *info)
2276{
2277 struct igb_adapter *adapter = netdev_priv(dev);
2278
2279 info->so_timestamping =
2280 SOF_TIMESTAMPING_TX_HARDWARE |
2281 SOF_TIMESTAMPING_RX_HARDWARE |
2282 SOF_TIMESTAMPING_RAW_HARDWARE;
2283
2284 if (adapter->ptp_clock)
2285 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2286 else
2287 info->phc_index = -1;
2288
2289 info->tx_types =
2290 (1 << HWTSTAMP_TX_OFF) |
2291 (1 << HWTSTAMP_TX_ON);
2292
2293 info->rx_filters =
2294 (1 << HWTSTAMP_FILTER_NONE) |
2295 (1 << HWTSTAMP_FILTER_ALL) |
2296 (1 << HWTSTAMP_FILTER_SOME) |
2297 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2298 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2299 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2300
2301 return 0;
2302}
2303
2304#endif
2305static const struct ethtool_ops igb_ethtool_ops = { 2359static const struct ethtool_ops igb_ethtool_ops = {
2306 .get_settings = igb_get_settings, 2360 .get_settings = igb_get_settings,
2307 .set_settings = igb_set_settings, 2361 .set_settings = igb_set_settings,
@@ -2328,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
2328 .get_ethtool_stats = igb_get_ethtool_stats, 2382 .get_ethtool_stats = igb_get_ethtool_stats,
2329 .get_coalesce = igb_get_coalesce, 2383 .get_coalesce = igb_get_coalesce,
2330 .set_coalesce = igb_set_coalesce, 2384 .set_coalesce = igb_set_coalesce,
2385 .get_ts_info = igb_get_ts_info,
2331 .begin = igb_ethtool_begin, 2386 .begin = igb_ethtool_begin,
2332 .complete = igb_ethtool_complete, 2387 .complete = igb_ethtool_complete,
2333#ifdef CONFIG_IGB_PTP
2334 .get_ts_info = igb_ethtool_get_ts_info,
2335#endif
2336}; 2388};
2337 2389
2338void igb_set_ethtool_ops(struct net_device *netdev) 2390void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f88c822e57a6..e1ceb37ef12e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
172 172
173#ifdef CONFIG_PCI_IOV 173#ifdef CONFIG_PCI_IOV
174static int igb_vf_configure(struct igb_adapter *adapter, int vf); 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
175static int igb_find_enabled_vfs(struct igb_adapter *adapter); 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
176static int igb_check_vf_assignment(struct igb_adapter *adapter);
177#endif 176#endif
178 177
179#ifdef CONFIG_PM 178#ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
404 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
405 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 n, tx_ring->next_to_use, tx_ring->next_to_clean, 405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 (u64)buffer_info->dma, 406 (u64)dma_unmap_addr(buffer_info, dma),
408 buffer_info->length, 407 dma_unmap_len(buffer_info, len),
409 buffer_info->next_to_watch, 408 buffer_info->next_to_watch,
410 (u64)buffer_info->time_stamp); 409 (u64)buffer_info->time_stamp);
411 } 410 }
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
456 " %04X %p %016llX %p%s\n", i, 455 " %04X %p %016llX %p%s\n", i,
457 le64_to_cpu(u0->a), 456 le64_to_cpu(u0->a),
458 le64_to_cpu(u0->b), 457 le64_to_cpu(u0->b),
459 (u64)buffer_info->dma, 458 (u64)dma_unmap_addr(buffer_info, dma),
460 buffer_info->length, 459 dma_unmap_len(buffer_info, len),
461 buffer_info->next_to_watch, 460 buffer_info->next_to_watch,
462 (u64)buffer_info->time_stamp, 461 (u64)buffer_info->time_stamp,
463 buffer_info->skb, next_desc); 462 buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
466 print_hex_dump(KERN_INFO, "", 465 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS, 466 DUMP_PREFIX_ADDRESS,
468 16, 1, buffer_info->skb->data, 467 16, 1, buffer_info->skb->data,
469 buffer_info->length, true); 468 dma_unmap_len(buffer_info, len),
469 true);
470 } 470 }
471 } 471 }
472 472
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
683{ 683{
684 struct igb_ring *ring; 684 struct igb_ring *ring;
685 int i; 685 int i;
686 int orig_node = adapter->node;
687 686
688 for (i = 0; i < adapter->num_tx_queues; i++) { 687 for (i = 0; i < adapter->num_tx_queues; i++) {
689 if (orig_node == -1) { 688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
690 int cur_node = next_online_node(adapter->node);
691 if (cur_node == MAX_NUMNODES)
692 cur_node = first_online_node;
693 adapter->node = cur_node;
694 }
695 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
696 adapter->node);
697 if (!ring)
698 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
699 if (!ring) 689 if (!ring)
700 goto err; 690 goto err;
701 ring->count = adapter->tx_ring_count; 691 ring->count = adapter->tx_ring_count;
702 ring->queue_index = i; 692 ring->queue_index = i;
703 ring->dev = &adapter->pdev->dev; 693 ring->dev = &adapter->pdev->dev;
704 ring->netdev = adapter->netdev; 694 ring->netdev = adapter->netdev;
705 ring->numa_node = adapter->node;
706 /* For 82575, context index must be unique per ring. */ 695 /* For 82575, context index must be unique per ring. */
707 if (adapter->hw.mac.type == e1000_82575) 696 if (adapter->hw.mac.type == e1000_82575)
708 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); 697 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
709 adapter->tx_ring[i] = ring; 698 adapter->tx_ring[i] = ring;
710 } 699 }
711 /* Restore the adapter's original node */
712 adapter->node = orig_node;
713 700
714 for (i = 0; i < adapter->num_rx_queues; i++) { 701 for (i = 0; i < adapter->num_rx_queues; i++) {
715 if (orig_node == -1) { 702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
716 int cur_node = next_online_node(adapter->node);
717 if (cur_node == MAX_NUMNODES)
718 cur_node = first_online_node;
719 adapter->node = cur_node;
720 }
721 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
722 adapter->node);
723 if (!ring)
724 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
725 if (!ring) 703 if (!ring)
726 goto err; 704 goto err;
727 ring->count = adapter->rx_ring_count; 705 ring->count = adapter->rx_ring_count;
728 ring->queue_index = i; 706 ring->queue_index = i;
729 ring->dev = &adapter->pdev->dev; 707 ring->dev = &adapter->pdev->dev;
730 ring->netdev = adapter->netdev; 708 ring->netdev = adapter->netdev;
731 ring->numa_node = adapter->node;
732 /* set flag indicating ring supports SCTP checksum offload */ 709 /* set flag indicating ring supports SCTP checksum offload */
733 if (adapter->hw.mac.type >= e1000_82576) 710 if (adapter->hw.mac.type >= e1000_82576)
734 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 711 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
742 719
743 adapter->rx_ring[i] = ring; 720 adapter->rx_ring[i] = ring;
744 } 721 }
745 /* Restore the adapter's original node */
746 adapter->node = orig_node;
747 722
748 igb_cache_ring_register(adapter); 723 igb_cache_ring_register(adapter);
749 724
750 return 0; 725 return 0;
751 726
752err: 727err:
753 /* Restore the adapter's original node */
754 adapter->node = orig_node;
755 igb_free_queues(adapter); 728 igb_free_queues(adapter);
756 729
757 return -ENOMEM; 730 return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1117 struct igb_q_vector *q_vector; 1090 struct igb_q_vector *q_vector;
1118 struct e1000_hw *hw = &adapter->hw; 1091 struct e1000_hw *hw = &adapter->hw;
1119 int v_idx; 1092 int v_idx;
1120 int orig_node = adapter->node;
1121 1093
1122 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1094 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1123 if ((adapter->num_q_vectors == (adapter->num_rx_queues + 1095 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 adapter->num_tx_queues)) && 1096 GFP_KERNEL);
1125 (adapter->num_rx_queues == v_idx))
1126 adapter->node = orig_node;
1127 if (orig_node == -1) {
1128 int cur_node = next_online_node(adapter->node);
1129 if (cur_node == MAX_NUMNODES)
1130 cur_node = first_online_node;
1131 adapter->node = cur_node;
1132 }
1133 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1134 adapter->node);
1135 if (!q_vector)
1136 q_vector = kzalloc(sizeof(struct igb_q_vector),
1137 GFP_KERNEL);
1138 if (!q_vector) 1097 if (!q_vector)
1139 goto err_out; 1098 goto err_out;
1140 q_vector->adapter = adapter; 1099 q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1143 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1102 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1144 adapter->q_vector[v_idx] = q_vector; 1103 adapter->q_vector[v_idx] = q_vector;
1145 } 1104 }
1146 /* Restore the adapter's original node */
1147 adapter->node = orig_node;
1148 1105
1149 return 0; 1106 return 0;
1150 1107
1151err_out: 1108err_out:
1152 /* Restore the adapter's original node */
1153 adapter->node = orig_node;
1154 igb_free_q_vectors(adapter); 1109 igb_free_q_vectors(adapter);
1155 return -ENOMEM; 1110 return -ENOMEM;
1156} 1111}
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
1751 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1706 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1752 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1707 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1753 1708
1709#ifdef CONFIG_IGB_PTP
1710 /* Re-enable PTP, where applicable. */
1711 igb_ptp_reset(adapter);
1712#endif /* CONFIG_IGB_PTP */
1713
1754 igb_get_phy_info(hw); 1714 igb_get_phy_info(hw);
1755} 1715}
1756 1716
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2180 } 2140 }
2181 2141
2182#endif 2142#endif
2143
2183#ifdef CONFIG_IGB_PTP 2144#ifdef CONFIG_IGB_PTP
2184 /* do hw tstamp init after resetting */ 2145 /* do hw tstamp init after resetting */
2185 igb_ptp_init(adapter); 2146 igb_ptp_init(adapter);
2147#endif /* CONFIG_IGB_PTP */
2186 2148
2187#endif
2188 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2149 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2189 /* print bus type/speed/width info */ 2150 /* print bus type/speed/width info */
2190 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2151 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2259 2220
2260 pm_runtime_get_noresume(&pdev->dev); 2221 pm_runtime_get_noresume(&pdev->dev);
2261#ifdef CONFIG_IGB_PTP 2222#ifdef CONFIG_IGB_PTP
2262 igb_ptp_remove(adapter); 2223 igb_ptp_stop(adapter);
2224#endif /* CONFIG_IGB_PTP */
2263 2225
2264#endif
2265 /* 2226 /*
2266 * The watchdog timer may be rescheduled, so explicitly 2227 * The watchdog timer may be rescheduled, so explicitly
2267 * disable watchdog from being rescheduled. 2228 * disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2294 /* reclaim resources allocated to VFs */ 2255 /* reclaim resources allocated to VFs */
2295 if (adapter->vf_data) { 2256 if (adapter->vf_data) {
2296 /* disable iov and allow time for transactions to clear */ 2257 /* disable iov and allow time for transactions to clear */
2297 if (!igb_check_vf_assignment(adapter)) { 2258 if (igb_vfs_are_assigned(adapter)) {
2259 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2260 } else {
2298 pci_disable_sriov(pdev); 2261 pci_disable_sriov(pdev);
2299 msleep(500); 2262 msleep(500);
2300 } else {
2301 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2302 } 2263 }
2303 2264
2304 kfree(adapter->vf_data); 2265 kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2338#ifdef CONFIG_PCI_IOV 2299#ifdef CONFIG_PCI_IOV
2339 struct pci_dev *pdev = adapter->pdev; 2300 struct pci_dev *pdev = adapter->pdev;
2340 struct e1000_hw *hw = &adapter->hw; 2301 struct e1000_hw *hw = &adapter->hw;
2341 int old_vfs = igb_find_enabled_vfs(adapter); 2302 int old_vfs = pci_num_vf(adapter->pdev);
2342 int i; 2303 int i;
2343 2304
2344 /* Virtualization features not supported on i210 family. */ 2305 /* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2418 VLAN_HLEN; 2379 VLAN_HLEN;
2419 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2380 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2420 2381
2421 adapter->node = -1;
2422
2423 spin_lock_init(&adapter->stats64_lock); 2382 spin_lock_init(&adapter->stats64_lock);
2424#ifdef CONFIG_PCI_IOV 2383#ifdef CONFIG_PCI_IOV
2425 switch (hw->mac.type) { 2384 switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
2666int igb_setup_tx_resources(struct igb_ring *tx_ring) 2625int igb_setup_tx_resources(struct igb_ring *tx_ring)
2667{ 2626{
2668 struct device *dev = tx_ring->dev; 2627 struct device *dev = tx_ring->dev;
2669 int orig_node = dev_to_node(dev);
2670 int size; 2628 int size;
2671 2629
2672 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 2630 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2673 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); 2631
2674 if (!tx_ring->tx_buffer_info) 2632 tx_ring->tx_buffer_info = vzalloc(size);
2675 tx_ring->tx_buffer_info = vzalloc(size);
2676 if (!tx_ring->tx_buffer_info) 2633 if (!tx_ring->tx_buffer_info)
2677 goto err; 2634 goto err;
2678 2635
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2680 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2637 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2681 tx_ring->size = ALIGN(tx_ring->size, 4096); 2638 tx_ring->size = ALIGN(tx_ring->size, 4096);
2682 2639
2683 set_dev_node(dev, tx_ring->numa_node);
2684 tx_ring->desc = dma_alloc_coherent(dev, 2640 tx_ring->desc = dma_alloc_coherent(dev,
2685 tx_ring->size, 2641 tx_ring->size,
2686 &tx_ring->dma, 2642 &tx_ring->dma,
2687 GFP_KERNEL); 2643 GFP_KERNEL);
2688 set_dev_node(dev, orig_node);
2689 if (!tx_ring->desc)
2690 tx_ring->desc = dma_alloc_coherent(dev,
2691 tx_ring->size,
2692 &tx_ring->dma,
2693 GFP_KERNEL);
2694
2695 if (!tx_ring->desc) 2644 if (!tx_ring->desc)
2696 goto err; 2645 goto err;
2697 2646
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2702 2651
2703err: 2652err:
2704 vfree(tx_ring->tx_buffer_info); 2653 vfree(tx_ring->tx_buffer_info);
2705 dev_err(dev, 2654 tx_ring->tx_buffer_info = NULL;
2706 "Unable to allocate memory for the transmit descriptor ring\n"); 2655 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2707 return -ENOMEM; 2656 return -ENOMEM;
2708} 2657}
2709 2658
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2820int igb_setup_rx_resources(struct igb_ring *rx_ring) 2769int igb_setup_rx_resources(struct igb_ring *rx_ring)
2821{ 2770{
2822 struct device *dev = rx_ring->dev; 2771 struct device *dev = rx_ring->dev;
2823 int orig_node = dev_to_node(dev); 2772 int size;
2824 int size, desc_len;
2825 2773
2826 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 2774 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2827 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); 2775
2828 if (!rx_ring->rx_buffer_info) 2776 rx_ring->rx_buffer_info = vzalloc(size);
2829 rx_ring->rx_buffer_info = vzalloc(size);
2830 if (!rx_ring->rx_buffer_info) 2777 if (!rx_ring->rx_buffer_info)
2831 goto err; 2778 goto err;
2832 2779
2833 desc_len = sizeof(union e1000_adv_rx_desc);
2834 2780
2835 /* Round up to nearest 4K */ 2781 /* Round up to nearest 4K */
2836 rx_ring->size = rx_ring->count * desc_len; 2782 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
2837 rx_ring->size = ALIGN(rx_ring->size, 4096); 2783 rx_ring->size = ALIGN(rx_ring->size, 4096);
2838 2784
2839 set_dev_node(dev, rx_ring->numa_node);
2840 rx_ring->desc = dma_alloc_coherent(dev, 2785 rx_ring->desc = dma_alloc_coherent(dev,
2841 rx_ring->size, 2786 rx_ring->size,
2842 &rx_ring->dma, 2787 &rx_ring->dma,
2843 GFP_KERNEL); 2788 GFP_KERNEL);
2844 set_dev_node(dev, orig_node);
2845 if (!rx_ring->desc)
2846 rx_ring->desc = dma_alloc_coherent(dev,
2847 rx_ring->size,
2848 &rx_ring->dma,
2849 GFP_KERNEL);
2850
2851 if (!rx_ring->desc) 2789 if (!rx_ring->desc)
2852 goto err; 2790 goto err;
2853 2791
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2859err: 2797err:
2860 vfree(rx_ring->rx_buffer_info); 2798 vfree(rx_ring->rx_buffer_info);
2861 rx_ring->rx_buffer_info = NULL; 2799 rx_ring->rx_buffer_info = NULL;
2862 dev_err(dev, "Unable to allocate memory for the receive descriptor" 2800 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
2863 " ring\n");
2864 return -ENOMEM; 2801 return -ENOMEM;
2865} 2802}
2866 2803
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2898{ 2835{
2899 struct e1000_hw *hw = &adapter->hw; 2836 struct e1000_hw *hw = &adapter->hw;
2900 u32 mrqc, rxcsum; 2837 u32 mrqc, rxcsum;
2901 u32 j, num_rx_queues, shift = 0, shift2 = 0; 2838 u32 j, num_rx_queues, shift = 0;
2902 union e1000_reta { 2839 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
2903 u32 dword; 2840 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
2904 u8 bytes[4]; 2841 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
2905 } reta; 2842 0xFA01ACBE };
2906 static const u8 rsshash[40] = {
2907 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2908 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2909 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2910 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2911 2843
2912 /* Fill out hash function seeds */ 2844 /* Fill out hash function seeds */
2913 for (j = 0; j < 10; j++) { 2845 for (j = 0; j < 10; j++)
2914 u32 rsskey = rsshash[(j * 4)]; 2846 wr32(E1000_RSSRK(j), rsskey[j]);
2915 rsskey |= rsshash[(j * 4) + 1] << 8;
2916 rsskey |= rsshash[(j * 4) + 2] << 16;
2917 rsskey |= rsshash[(j * 4) + 3] << 24;
2918 array_wr32(E1000_RSSRK(0), j, rsskey);
2919 }
2920 2847
2921 num_rx_queues = adapter->rss_queues; 2848 num_rx_queues = adapter->rss_queues;
2922 2849
2923 if (adapter->vfs_allocated_count) { 2850 switch (hw->mac.type) {
2924 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2851 case e1000_82575:
2925 switch (hw->mac.type) { 2852 shift = 6;
2926 case e1000_i350: 2853 break;
2927 case e1000_82580: 2854 case e1000_82576:
2928 num_rx_queues = 1; 2855 /* 82576 supports 2 RSS queues for SR-IOV */
2929 shift = 0; 2856 if (adapter->vfs_allocated_count) {
2930 break;
2931 case e1000_82576:
2932 shift = 3; 2857 shift = 3;
2933 num_rx_queues = 2; 2858 num_rx_queues = 2;
2934 break;
2935 case e1000_82575:
2936 shift = 2;
2937 shift2 = 6;
2938 default:
2939 break;
2940 } 2859 }
2941 } else { 2860 break;
2942 if (hw->mac.type == e1000_82575) 2861 default:
2943 shift = 6; 2862 break;
2944 } 2863 }
2945 2864
2946 for (j = 0; j < (32 * 4); j++) { 2865 /*
2947 reta.bytes[j & 3] = (j % num_rx_queues) << shift; 2866 * Populate the indirection table 4 entries at a time. To do this
2948 if (shift2) 2867 * we are generating the results for n and n+2 and then interleaving
2949 reta.bytes[j & 3] |= num_rx_queues << shift2; 2868 * those with the results with n+1 and n+3.
2950 if ((j & 3) == 3) 2869 */
2951 wr32(E1000_RETA(j >> 2), reta.dword); 2870 for (j = 0; j < 32; j++) {
2871 /* first pass generates n and n+2 */
2872 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
2873 u32 reta = (base & 0x07800780) >> (7 - shift);
2874
2875 /* second pass generates n+1 and n+3 */
2876 base += 0x00010001 * num_rx_queues;
2877 reta |= (base & 0x07800780) << (1 + shift);
2878
2879 wr32(E1000_RETA(j), reta);
2952 } 2880 }
2953 2881
2954 /* 2882 /*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3184 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; 3112 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3185#endif 3113#endif
3186 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 3114 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3115#ifdef CONFIG_IGB_PTP
3187 if (hw->mac.type >= e1000_82580) 3116 if (hw->mac.type >= e1000_82580)
3188 srrctl |= E1000_SRRCTL_TIMESTAMP; 3117 srrctl |= E1000_SRRCTL_TIMESTAMP;
3118#endif /* CONFIG_IGB_PTP */
3189 /* Only set Drop Enable if we are supporting multiple queues */ 3119 /* Only set Drop Enable if we are supporting multiple queues */
3190 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 3120 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3191 srrctl |= E1000_SRRCTL_DROP_EN; 3121 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3269{ 3199{
3270 if (tx_buffer->skb) { 3200 if (tx_buffer->skb) {
3271 dev_kfree_skb_any(tx_buffer->skb); 3201 dev_kfree_skb_any(tx_buffer->skb);
3272 if (tx_buffer->dma) 3202 if (dma_unmap_len(tx_buffer, len))
3273 dma_unmap_single(ring->dev, 3203 dma_unmap_single(ring->dev,
3274 tx_buffer->dma, 3204 dma_unmap_addr(tx_buffer, dma),
3275 tx_buffer->length, 3205 dma_unmap_len(tx_buffer, len),
3276 DMA_TO_DEVICE); 3206 DMA_TO_DEVICE);
3277 } else if (tx_buffer->dma) { 3207 } else if (dma_unmap_len(tx_buffer, len)) {
3278 dma_unmap_page(ring->dev, 3208 dma_unmap_page(ring->dev,
3279 tx_buffer->dma, 3209 dma_unmap_addr(tx_buffer, dma),
3280 tx_buffer->length, 3210 dma_unmap_len(tx_buffer, len),
3281 DMA_TO_DEVICE); 3211 DMA_TO_DEVICE);
3282 } 3212 }
3283 tx_buffer->next_to_watch = NULL; 3213 tx_buffer->next_to_watch = NULL;
3284 tx_buffer->skb = NULL; 3214 tx_buffer->skb = NULL;
3285 tx_buffer->dma = 0; 3215 dma_unmap_len_set(tx_buffer, len, 0);
3286 /* buffer_info must be completely set up in the transmit path */ 3216 /* buffer_info must be completely set up in the transmit path */
3287} 3217}
3288 3218
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
4229 if (tx_flags & IGB_TX_FLAGS_VLAN) 4159 if (tx_flags & IGB_TX_FLAGS_VLAN)
4230 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); 4160 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4231 4161
4162#ifdef CONFIG_IGB_PTP
4232 /* set timestamp bit if present */ 4163 /* set timestamp bit if present */
4233 if (tx_flags & IGB_TX_FLAGS_TSTAMP) 4164 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
4234 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); 4165 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4166#endif /* CONFIG_IGB_PTP */
4235 4167
4236 /* set segmentation bits for TSO */ 4168 /* set segmentation bits for TSO */
4237 if (tx_flags & IGB_TX_FLAGS_TSO) 4169 if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4275 const u8 hdr_len) 4207 const u8 hdr_len)
4276{ 4208{
4277 struct sk_buff *skb = first->skb; 4209 struct sk_buff *skb = first->skb;
4278 struct igb_tx_buffer *tx_buffer_info; 4210 struct igb_tx_buffer *tx_buffer;
4279 union e1000_adv_tx_desc *tx_desc; 4211 union e1000_adv_tx_desc *tx_desc;
4280 dma_addr_t dma; 4212 dma_addr_t dma;
4281 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 4213 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4296 goto dma_error; 4228 goto dma_error;
4297 4229
4298 /* record length, and DMA address */ 4230 /* record length, and DMA address */
4299 first->length = size; 4231 dma_unmap_len_set(first, len, size);
4300 first->dma = dma; 4232 dma_unmap_addr_set(first, dma, dma);
4301 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4233 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4302 4234
4303 for (;;) { 4235 for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4339 if (dma_mapping_error(tx_ring->dev, dma)) 4271 if (dma_mapping_error(tx_ring->dev, dma))
4340 goto dma_error; 4272 goto dma_error;
4341 4273
4342 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4274 tx_buffer = &tx_ring->tx_buffer_info[i];
4343 tx_buffer_info->length = size; 4275 dma_unmap_len_set(tx_buffer, len, size);
4344 tx_buffer_info->dma = dma; 4276 dma_unmap_addr_set(tx_buffer, dma, dma);
4345 4277
4346 tx_desc->read.olinfo_status = 0; 4278 tx_desc->read.olinfo_status = 0;
4347 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4279 tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
4392 4324
4393 /* clear dma mappings for failed tx_buffer_info map */ 4325 /* clear dma mappings for failed tx_buffer_info map */
4394 for (;;) { 4326 for (;;) {
4395 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4327 tx_buffer = &tx_ring->tx_buffer_info[i];
4396 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4328 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4397 if (tx_buffer_info == first) 4329 if (tx_buffer == first)
4398 break; 4330 break;
4399 if (i == 0) 4331 if (i == 0)
4400 i = tx_ring->count; 4332 i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4440netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4372netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4441 struct igb_ring *tx_ring) 4373 struct igb_ring *tx_ring)
4442{ 4374{
4375#ifdef CONFIG_IGB_PTP
4376 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4377#endif /* CONFIG_IGB_PTP */
4443 struct igb_tx_buffer *first; 4378 struct igb_tx_buffer *first;
4444 int tso; 4379 int tso;
4445 u32 tx_flags = 0; 4380 u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4462 first->bytecount = skb->len; 4397 first->bytecount = skb->len;
4463 first->gso_segs = 1; 4398 first->gso_segs = 1;
4464 4399
4465 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 4400#ifdef CONFIG_IGB_PTP
4401 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4402 !(adapter->ptp_tx_skb))) {
4466 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4467 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4404 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4405
4406 adapter->ptp_tx_skb = skb_get(skb);
4407 if (adapter->hw.mac.type == e1000_82576)
4408 schedule_work(&adapter->ptp_tx_work);
4468 } 4409 }
4410#endif /* CONFIG_IGB_PTP */
4469 4411
4470 if (vlan_tx_tag_present(skb)) { 4412 if (vlan_tx_tag_present(skb)) {
4471 tx_flags |= IGB_TX_FLAGS_VLAN; 4413 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
4661 bytes = 0; 4603 bytes = 0;
4662 packets = 0; 4604 packets = 0;
4663 for (i = 0; i < adapter->num_rx_queues; i++) { 4605 for (i = 0; i < adapter->num_rx_queues; i++) {
4664 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4606 u32 rqdpc = rd32(E1000_RQDPC(i));
4665 struct igb_ring *ring = adapter->rx_ring[i]; 4607 struct igb_ring *ring = adapter->rx_ring[i];
4666 4608
4667 ring->rx_stats.drops += rqdpc_tmp; 4609 if (rqdpc) {
4668 net_stats->rx_fifo_errors += rqdpc_tmp; 4610 ring->rx_stats.drops += rqdpc;
4611 net_stats->rx_fifo_errors += rqdpc;
4612 }
4669 4613
4670 do { 4614 do {
4671 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 4615 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
4755 reg = rd32(E1000_CTRL_EXT); 4699 reg = rd32(E1000_CTRL_EXT);
4756 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { 4700 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4757 adapter->stats.rxerrc += rd32(E1000_RXERRC); 4701 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4758 adapter->stats.tncrs += rd32(E1000_TNCRS); 4702
4703 /* this stat has invalid values on i210/i211 */
4704 if ((hw->mac.type != e1000_i210) &&
4705 (hw->mac.type != e1000_i211))
4706 adapter->stats.tncrs += rd32(E1000_TNCRS);
4759 } 4707 }
4760 4708
4761 adapter->stats.tsctc += rd32(E1000_TSCTC); 4709 adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4852 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4800 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4853 } 4801 }
4854 4802
4803#ifdef CONFIG_IGB_PTP
4804 if (icr & E1000_ICR_TS) {
4805 u32 tsicr = rd32(E1000_TSICR);
4806
4807 if (tsicr & E1000_TSICR_TXTS) {
4808 /* acknowledge the interrupt */
4809 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4810 /* retrieve hardware timestamp */
4811 schedule_work(&adapter->ptp_tx_work);
4812 }
4813 }
4814#endif /* CONFIG_IGB_PTP */
4815
4855 wr32(E1000_EIMS, adapter->eims_other); 4816 wr32(E1000_EIMS, adapter->eims_other);
4856 4817
4857 return IRQ_HANDLED; 4818 return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5002static int igb_vf_configure(struct igb_adapter *adapter, int vf) 4963static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5003{ 4964{
5004 unsigned char mac_addr[ETH_ALEN]; 4965 unsigned char mac_addr[ETH_ALEN];
5005 struct pci_dev *pdev = adapter->pdev;
5006 struct e1000_hw *hw = &adapter->hw;
5007 struct pci_dev *pvfdev;
5008 unsigned int device_id;
5009 u16 thisvf_devfn;
5010 4966
5011 eth_random_addr(mac_addr); 4967 eth_random_addr(mac_addr);
5012 igb_set_vf_mac(adapter, vf, mac_addr); 4968 igb_set_vf_mac(adapter, vf, mac_addr);
5013 4969
5014 switch (adapter->hw.mac.type) { 4970 return 0;
5015 case e1000_82576:
5016 device_id = IGB_82576_VF_DEV_ID;
5017 /* VF Stride for 82576 is 2 */
5018 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
5019 (pdev->devfn & 1);
5020 break;
5021 case e1000_i350:
5022 device_id = IGB_I350_VF_DEV_ID;
5023 /* VF Stride for I350 is 4 */
5024 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
5025 (pdev->devfn & 3);
5026 break;
5027 default:
5028 device_id = 0;
5029 thisvf_devfn = 0;
5030 break;
5031 }
5032
5033 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5034 while (pvfdev) {
5035 if (pvfdev->devfn == thisvf_devfn)
5036 break;
5037 pvfdev = pci_get_device(hw->vendor_id,
5038 device_id, pvfdev);
5039 }
5040
5041 if (pvfdev)
5042 adapter->vf_data[vf].vfdev = pvfdev;
5043 else
5044 dev_err(&pdev->dev,
5045 "Couldn't find pci dev ptr for VF %4.4x\n",
5046 thisvf_devfn);
5047 return pvfdev != NULL;
5048} 4971}
5049 4972
5050static int igb_find_enabled_vfs(struct igb_adapter *adapter) 4973static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
5051{ 4974{
5052 struct e1000_hw *hw = &adapter->hw;
5053 struct pci_dev *pdev = adapter->pdev; 4975 struct pci_dev *pdev = adapter->pdev;
5054 struct pci_dev *pvfdev; 4976 struct pci_dev *vfdev;
5055 u16 vf_devfn = 0; 4977 int dev_id;
5056 u16 vf_stride;
5057 unsigned int device_id;
5058 int vfs_found = 0;
5059 4978
5060 switch (adapter->hw.mac.type) { 4979 switch (adapter->hw.mac.type) {
5061 case e1000_82576: 4980 case e1000_82576:
5062 device_id = IGB_82576_VF_DEV_ID; 4981 dev_id = IGB_82576_VF_DEV_ID;
5063 /* VF Stride for 82576 is 2 */
5064 vf_stride = 2;
5065 break; 4982 break;
5066 case e1000_i350: 4983 case e1000_i350:
5067 device_id = IGB_I350_VF_DEV_ID; 4984 dev_id = IGB_I350_VF_DEV_ID;
5068 /* VF Stride for I350 is 4 */
5069 vf_stride = 4;
5070 break; 4985 break;
5071 default: 4986 default:
5072 device_id = 0; 4987 return false;
5073 vf_stride = 0;
5074 break;
5075 }
5076
5077 vf_devfn = pdev->devfn + 0x80;
5078 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5079 while (pvfdev) {
5080 if (pvfdev->devfn == vf_devfn &&
5081 (pvfdev->bus->number >= pdev->bus->number))
5082 vfs_found++;
5083 vf_devfn += vf_stride;
5084 pvfdev = pci_get_device(hw->vendor_id,
5085 device_id, pvfdev);
5086 } 4988 }
5087 4989
5088 return vfs_found; 4990 /* loop through all the VFs to see if we own any that are assigned */
5089} 4991 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5090 4992 while (vfdev) {
5091static int igb_check_vf_assignment(struct igb_adapter *adapter) 4993 /* if we don't own it we don't care */
5092{ 4994 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5093 int i; 4995 /* if it is assigned we cannot release it */
5094 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4996 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
5095 if (adapter->vf_data[i].vfdev) {
5096 if (adapter->vf_data[i].vfdev->dev_flags &
5097 PCI_DEV_FLAGS_ASSIGNED)
5098 return true; 4997 return true;
5099 } 4998 }
4999
5000 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
5100 } 5001 }
5002
5101 return false; 5003 return false;
5102} 5004}
5103 5005
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5643 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5545 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5644 } 5546 }
5645 5547
5548#ifdef CONFIG_IGB_PTP
5549 if (icr & E1000_ICR_TS) {
5550 u32 tsicr = rd32(E1000_TSICR);
5551
5552 if (tsicr & E1000_TSICR_TXTS) {
5553 /* acknowledge the interrupt */
5554 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5555 /* retrieve hardware timestamp */
5556 schedule_work(&adapter->ptp_tx_work);
5557 }
5558 }
5559#endif /* CONFIG_IGB_PTP */
5560
5646 napi_schedule(&q_vector->napi); 5561 napi_schedule(&q_vector->napi);
5647 5562
5648 return IRQ_HANDLED; 5563 return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
5684 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5599 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5685 } 5600 }
5686 5601
5602#ifdef CONFIG_IGB_PTP
5603 if (icr & E1000_ICR_TS) {
5604 u32 tsicr = rd32(E1000_TSICR);
5605
5606 if (tsicr & E1000_TSICR_TXTS) {
5607 /* acknowledge the interrupt */
5608 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5609 /* retrieve hardware timestamp */
5610 schedule_work(&adapter->ptp_tx_work);
5611 }
5612 }
5613#endif /* CONFIG_IGB_PTP */
5614
5687 napi_schedule(&q_vector->napi); 5615 napi_schedule(&q_vector->napi);
5688 5616
5689 return IRQ_HANDLED; 5617 return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
5743 return 0; 5671 return 0;
5744} 5672}
5745 5673
5746#ifdef CONFIG_IGB_PTP
5747/**
5748 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5749 * @q_vector: pointer to q_vector containing needed info
5750 * @buffer: pointer to igb_tx_buffer structure
5751 *
5752 * If we were asked to do hardware stamping and such a time stamp is
5753 * available, then it must have been for this skb here because we only
5754 * allow only one such packet into the queue.
5755 */
5756static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5757 struct igb_tx_buffer *buffer_info)
5758{
5759 struct igb_adapter *adapter = q_vector->adapter;
5760 struct e1000_hw *hw = &adapter->hw;
5761 struct skb_shared_hwtstamps shhwtstamps;
5762 u64 regval;
5763
5764 /* if skb does not support hw timestamp or TX stamp not valid exit */
5765 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
5766 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5767 return;
5768
5769 regval = rd32(E1000_TXSTMPL);
5770 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5771
5772 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
5773 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5774}
5775
5776#endif
5777/** 5674/**
5778 * igb_clean_tx_irq - Reclaim resources after transmit completes 5675 * igb_clean_tx_irq - Reclaim resources after transmit completes
5779 * @q_vector: pointer to q_vector containing needed info 5676 * @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5785 struct igb_adapter *adapter = q_vector->adapter; 5682 struct igb_adapter *adapter = q_vector->adapter;
5786 struct igb_ring *tx_ring = q_vector->tx.ring; 5683 struct igb_ring *tx_ring = q_vector->tx.ring;
5787 struct igb_tx_buffer *tx_buffer; 5684 struct igb_tx_buffer *tx_buffer;
5788 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5685 union e1000_adv_tx_desc *tx_desc;
5789 unsigned int total_bytes = 0, total_packets = 0; 5686 unsigned int total_bytes = 0, total_packets = 0;
5790 unsigned int budget = q_vector->tx.work_limit; 5687 unsigned int budget = q_vector->tx.work_limit;
5791 unsigned int i = tx_ring->next_to_clean; 5688 unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5797 tx_desc = IGB_TX_DESC(tx_ring, i); 5694 tx_desc = IGB_TX_DESC(tx_ring, i);
5798 i -= tx_ring->count; 5695 i -= tx_ring->count;
5799 5696
5800 for (; budget; budget--) { 5697 do {
5801 eop_desc = tx_buffer->next_to_watch; 5698 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
5802
5803 /* prevent any other reads prior to eop_desc */
5804 rmb();
5805 5699
5806 /* if next_to_watch is not set then there is no work pending */ 5700 /* if next_to_watch is not set then there is no work pending */
5807 if (!eop_desc) 5701 if (!eop_desc)
5808 break; 5702 break;
5809 5703
5704 /* prevent any other reads prior to eop_desc */
5705 rmb();
5706
5810 /* if DD is not set pending work has not been completed */ 5707 /* if DD is not set pending work has not been completed */
5811 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) 5708 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5812 break; 5709 break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5818 total_bytes += tx_buffer->bytecount; 5715 total_bytes += tx_buffer->bytecount;
5819 total_packets += tx_buffer->gso_segs; 5716 total_packets += tx_buffer->gso_segs;
5820 5717
5821#ifdef CONFIG_IGB_PTP
5822 /* retrieve hardware timestamp */
5823 igb_tx_hwtstamp(q_vector, tx_buffer);
5824
5825#endif
5826 /* free the skb */ 5718 /* free the skb */
5827 dev_kfree_skb_any(tx_buffer->skb); 5719 dev_kfree_skb_any(tx_buffer->skb);
5828 tx_buffer->skb = NULL;
5829 5720
5830 /* unmap skb header data */ 5721 /* unmap skb header data */
5831 dma_unmap_single(tx_ring->dev, 5722 dma_unmap_single(tx_ring->dev,
5832 tx_buffer->dma, 5723 dma_unmap_addr(tx_buffer, dma),
5833 tx_buffer->length, 5724 dma_unmap_len(tx_buffer, len),
5834 DMA_TO_DEVICE); 5725 DMA_TO_DEVICE);
5835 5726
5727 /* clear tx_buffer data */
5728 tx_buffer->skb = NULL;
5729 dma_unmap_len_set(tx_buffer, len, 0);
5730
5836 /* clear last DMA location and unmap remaining buffers */ 5731 /* clear last DMA location and unmap remaining buffers */
5837 while (tx_desc != eop_desc) { 5732 while (tx_desc != eop_desc) {
5838 tx_buffer->dma = 0;
5839
5840 tx_buffer++; 5733 tx_buffer++;
5841 tx_desc++; 5734 tx_desc++;
5842 i++; 5735 i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5847 } 5740 }
5848 5741
5849 /* unmap any remaining paged data */ 5742 /* unmap any remaining paged data */
5850 if (tx_buffer->dma) { 5743 if (dma_unmap_len(tx_buffer, len)) {
5851 dma_unmap_page(tx_ring->dev, 5744 dma_unmap_page(tx_ring->dev,
5852 tx_buffer->dma, 5745 dma_unmap_addr(tx_buffer, dma),
5853 tx_buffer->length, 5746 dma_unmap_len(tx_buffer, len),
5854 DMA_TO_DEVICE); 5747 DMA_TO_DEVICE);
5748 dma_unmap_len_set(tx_buffer, len, 0);
5855 } 5749 }
5856 } 5750 }
5857 5751
5858 /* clear last DMA location */
5859 tx_buffer->dma = 0;
5860
5861 /* move us one more past the eop_desc for start of next pkt */ 5752 /* move us one more past the eop_desc for start of next pkt */
5862 tx_buffer++; 5753 tx_buffer++;
5863 tx_desc++; 5754 tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5867 tx_buffer = tx_ring->tx_buffer_info; 5758 tx_buffer = tx_ring->tx_buffer_info;
5868 tx_desc = IGB_TX_DESC(tx_ring, 0); 5759 tx_desc = IGB_TX_DESC(tx_ring, 0);
5869 } 5760 }
5870 } 5761
5762 /* issue prefetch for next Tx descriptor */
5763 prefetch(tx_desc);
5764
5765 /* update budget accounting */
5766 budget--;
5767 } while (likely(budget));
5871 5768
5872 netdev_tx_completed_queue(txring_txq(tx_ring), 5769 netdev_tx_completed_queue(txring_txq(tx_ring),
5873 total_packets, total_bytes); 5770 total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5883 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 5780 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
5884 struct e1000_hw *hw = &adapter->hw; 5781 struct e1000_hw *hw = &adapter->hw;
5885 5782
5886 eop_desc = tx_buffer->next_to_watch;
5887
5888 /* Detect a transmit hang in hardware, this serializes the 5783 /* Detect a transmit hang in hardware, this serializes the
5889 * check with the clearing of time_stamp and movement of i */ 5784 * check with the clearing of time_stamp and movement of i */
5890 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5785 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5891 if (eop_desc && 5786 if (tx_buffer->next_to_watch &&
5892 time_after(jiffies, tx_buffer->time_stamp + 5787 time_after(jiffies, tx_buffer->time_stamp +
5893 (adapter->tx_timeout_factor * HZ)) && 5788 (adapter->tx_timeout_factor * HZ)) &&
5894 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5789 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5912 tx_ring->next_to_use, 5807 tx_ring->next_to_use,
5913 tx_ring->next_to_clean, 5808 tx_ring->next_to_clean,
5914 tx_buffer->time_stamp, 5809 tx_buffer->time_stamp,
5915 eop_desc, 5810 tx_buffer->next_to_watch,
5916 jiffies, 5811 jiffies,
5917 eop_desc->wb.status); 5812 tx_buffer->next_to_watch->wb.status);
5918 netif_stop_subqueue(tx_ring->netdev, 5813 netif_stop_subqueue(tx_ring->netdev,
5919 tx_ring->queue_index); 5814 tx_ring->queue_index);
5920 5815
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5994 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 5889 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5995} 5890}
5996 5891
5997#ifdef CONFIG_IGB_PTP
5998static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5999 union e1000_adv_rx_desc *rx_desc,
6000 struct sk_buff *skb)
6001{
6002 struct igb_adapter *adapter = q_vector->adapter;
6003 struct e1000_hw *hw = &adapter->hw;
6004 u64 regval;
6005
6006 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
6007 E1000_RXDADV_STAT_TS))
6008 return;
6009
6010 /*
6011 * If this bit is set, then the RX registers contain the time stamp. No
6012 * other packet will be time stamped until we read these registers, so
6013 * read the registers to make them available again. Because only one
6014 * packet can be time stamped at a time, we know that the register
6015 * values must belong to this one here and therefore we don't need to
6016 * compare any of the additional attributes stored for it.
6017 *
6018 * If nothing went wrong, then it should have a shared tx_flags that we
6019 * can turn into a skb_shared_hwtstamps.
6020 */
6021 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6022 u32 *stamp = (u32 *)skb->data;
6023 regval = le32_to_cpu(*(stamp + 2));
6024 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
6025 skb_pull(skb, IGB_TS_HDR_LEN);
6026 } else {
6027 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
6028 return;
6029
6030 regval = rd32(E1000_RXSTMPL);
6031 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
6032 }
6033
6034 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6035}
6036
6037#endif
6038static void igb_rx_vlan(struct igb_ring *ring, 5892static void igb_rx_vlan(struct igb_ring *ring,
6039 union e1000_adv_rx_desc *rx_desc, 5893 union e1000_adv_rx_desc *rx_desc,
6040 struct sk_buff *skb) 5894 struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6146 } 6000 }
6147 6001
6148#ifdef CONFIG_IGB_PTP 6002#ifdef CONFIG_IGB_PTP
6149 igb_rx_hwtstamp(q_vector, rx_desc, skb); 6003 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
6150#endif 6004#endif /* CONFIG_IGB_PTP */
6151 igb_rx_hash(rx_ring, rx_desc, skb); 6005 igb_rx_hash(rx_ring, rx_desc, skb);
6152 igb_rx_checksum(rx_ring, rx_desc, skb); 6006 igb_rx_checksum(rx_ring, rx_desc, skb);
6153 igb_rx_vlan(rx_ring, rx_desc, skb); 6007 igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6341,181 +6195,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6341} 6195}
6342 6196
6343/** 6197/**
6344 * igb_hwtstamp_ioctl - control hardware time stamping
6345 * @netdev:
6346 * @ifreq:
6347 * @cmd:
6348 *
6349 * Outgoing time stamping can be enabled and disabled. Play nice and
6350 * disable it when requested, although it shouldn't case any overhead
6351 * when no packet needs it. At most one packet in the queue may be
6352 * marked for time stamping, otherwise it would be impossible to tell
6353 * for sure to which packet the hardware time stamp belongs.
6354 *
6355 * Incoming time stamping has to be configured via the hardware
6356 * filters. Not all combinations are supported, in particular event
6357 * type has to be specified. Matching the kind of event packet is
6358 * not supported, with the exception of "all V2 events regardless of
6359 * level 2 or 4".
6360 *
6361 **/
6362static int igb_hwtstamp_ioctl(struct net_device *netdev,
6363 struct ifreq *ifr, int cmd)
6364{
6365 struct igb_adapter *adapter = netdev_priv(netdev);
6366 struct e1000_hw *hw = &adapter->hw;
6367 struct hwtstamp_config config;
6368 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6369 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6370 u32 tsync_rx_cfg = 0;
6371 bool is_l4 = false;
6372 bool is_l2 = false;
6373 u32 regval;
6374
6375 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6376 return -EFAULT;
6377
6378 /* reserved for future extensions */
6379 if (config.flags)
6380 return -EINVAL;
6381
6382 switch (config.tx_type) {
6383 case HWTSTAMP_TX_OFF:
6384 tsync_tx_ctl = 0;
6385 case HWTSTAMP_TX_ON:
6386 break;
6387 default:
6388 return -ERANGE;
6389 }
6390
6391 switch (config.rx_filter) {
6392 case HWTSTAMP_FILTER_NONE:
6393 tsync_rx_ctl = 0;
6394 break;
6395 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6396 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6397 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6398 case HWTSTAMP_FILTER_ALL:
6399 /*
6400 * register TSYNCRXCFG must be set, therefore it is not
6401 * possible to time stamp both Sync and Delay_Req messages
6402 * => fall back to time stamping all packets
6403 */
6404 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6405 config.rx_filter = HWTSTAMP_FILTER_ALL;
6406 break;
6407 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6408 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6409 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
6410 is_l4 = true;
6411 break;
6412 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6413 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
6414 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
6415 is_l4 = true;
6416 break;
6417 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6418 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6419 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6420 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
6421 is_l2 = true;
6422 is_l4 = true;
6423 config.rx_filter = HWTSTAMP_FILTER_SOME;
6424 break;
6425 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6426 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6427 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
6428 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
6429 is_l2 = true;
6430 is_l4 = true;
6431 config.rx_filter = HWTSTAMP_FILTER_SOME;
6432 break;
6433 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6434 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6435 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6436 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
6437 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
6438 is_l2 = true;
6439 is_l4 = true;
6440 break;
6441 default:
6442 return -ERANGE;
6443 }
6444
6445 if (hw->mac.type == e1000_82575) {
6446 if (tsync_rx_ctl | tsync_tx_ctl)
6447 return -EINVAL;
6448 return 0;
6449 }
6450
6451 /*
6452 * Per-packet timestamping only works if all packets are
6453 * timestamped, so enable timestamping in all packets as
6454 * long as one rx filter was configured.
6455 */
6456 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
6457 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6458 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6459 }
6460
6461 /* enable/disable TX */
6462 regval = rd32(E1000_TSYNCTXCTL);
6463 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6464 regval |= tsync_tx_ctl;
6465 wr32(E1000_TSYNCTXCTL, regval);
6466
6467 /* enable/disable RX */
6468 regval = rd32(E1000_TSYNCRXCTL);
6469 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6470 regval |= tsync_rx_ctl;
6471 wr32(E1000_TSYNCRXCTL, regval);
6472
6473 /* define which PTP packets are time stamped */
6474 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
6475
6476 /* define ethertype filter for timestamped packets */
6477 if (is_l2)
6478 wr32(E1000_ETQF(3),
6479 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6480 E1000_ETQF_1588 | /* enable timestamping */
6481 ETH_P_1588)); /* 1588 eth protocol type */
6482 else
6483 wr32(E1000_ETQF(3), 0);
6484
6485#define PTP_PORT 319
6486 /* L4 Queue Filter[3]: filter by destination port and protocol */
6487 if (is_l4) {
6488 u32 ftqf = (IPPROTO_UDP /* UDP */
6489 | E1000_FTQF_VF_BP /* VF not compared */
6490 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6491 | E1000_FTQF_MASK); /* mask all inputs */
6492 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
6493
6494 wr32(E1000_IMIR(3), htons(PTP_PORT));
6495 wr32(E1000_IMIREXT(3),
6496 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6497 if (hw->mac.type == e1000_82576) {
6498 /* enable source port check */
6499 wr32(E1000_SPQF(3), htons(PTP_PORT));
6500 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6501 }
6502 wr32(E1000_FTQF(3), ftqf);
6503 } else {
6504 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6505 }
6506 wrfl();
6507
6508 adapter->hwtstamp_config = config;
6509
6510 /* clear TX/RX time stamp registers, just to be sure */
6511 regval = rd32(E1000_TXSTMPH);
6512 regval = rd32(E1000_RXSTMPH);
6513
6514 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6515 -EFAULT : 0;
6516}
6517
6518/**
6519 * igb_ioctl - 6198 * igb_ioctl -
6520 * @netdev: 6199 * @netdev:
6521 * @ifreq: 6200 * @ifreq:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6528 case SIOCGMIIREG: 6207 case SIOCGMIIREG:
6529 case SIOCSMIIREG: 6208 case SIOCSMIIREG:
6530 return igb_mii_ioctl(netdev, ifr, cmd); 6209 return igb_mii_ioctl(netdev, ifr, cmd);
6210#ifdef CONFIG_IGB_PTP
6531 case SIOCSHWTSTAMP: 6211 case SIOCSHWTSTAMP:
6532 return igb_hwtstamp_ioctl(netdev, ifr, cmd); 6212 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
6213#endif /* CONFIG_IGB_PTP */
6533 default: 6214 default:
6534 return -EOPNOTSUPP; 6215 return -EOPNOTSUPP;
6535 } 6216 }
@@ -6667,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6667 default: 6348 default:
6668 goto err_inval; 6349 goto err_inval;
6669 } 6350 }
6351
6352 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6353 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6354
6670 return 0; 6355 return 0;
6671 6356
6672err_inval: 6357err_inval:
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c846ea9131a3..ee21445157a3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,22 +69,22 @@
69 * 2^40 * 10^-9 / 60 = 18.3 minutes. 69 * 2^40 * 10^-9 / 60 = 18.3 minutes.
70 */ 70 */
71 71
72#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9) 72#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) 74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40 76#define IGB_NBITS_82580 40
77 77
78/* 78/*
79 * SYSTIM read access for the 82576 79 * SYSTIM read access for the 82576
80 */ 80 */
81 81
82static cycle_t igb_82576_systim_read(const struct cyclecounter *cc) 82static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
83{ 83{
84 u64 val;
85 u32 lo, hi;
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 84 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
87 struct e1000_hw *hw = &igb->hw; 85 struct e1000_hw *hw = &igb->hw;
86 u64 val;
87 u32 lo, hi;
88 88
89 lo = rd32(E1000_SYSTIML); 89 lo = rd32(E1000_SYSTIML);
90 hi = rd32(E1000_SYSTIMH); 90 hi = rd32(E1000_SYSTIMH);
@@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
99 * SYSTIM read access for the 82580 99 * SYSTIM read access for the 82580
100 */ 100 */
101 101
102static cycle_t igb_82580_systim_read(const struct cyclecounter *cc) 102static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
103{ 103{
104 u64 val;
105 u32 lo, hi, jk;
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 104 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
107 struct e1000_hw *hw = &igb->hw; 105 struct e1000_hw *hw = &igb->hw;
106 u64 val;
107 u32 lo, hi, jk;
108 108
109 /* 109 /*
110 * The timestamp latches on lowest register read. For the 82580 110 * The timestamp latches on lowest register read. For the 82580
@@ -122,16 +122,101 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
122} 122}
123 123
124/* 124/*
125 * SYSTIM read access for I210/I211
126 */
127
128static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
129{
130 struct e1000_hw *hw = &adapter->hw;
131 u32 sec, nsec, jk;
132
133 /*
134 * The timestamp latches on lowest register read. For I210/I211, the
135 * lowest register is SYSTIMR. Since we only need to provide nanosecond
136 * resolution, we can ignore it.
137 */
138 jk = rd32(E1000_SYSTIMR);
139 nsec = rd32(E1000_SYSTIML);
140 sec = rd32(E1000_SYSTIMH);
141
142 ts->tv_sec = sec;
143 ts->tv_nsec = nsec;
144}
145
146static void igb_ptp_write_i210(struct igb_adapter *adapter,
147 const struct timespec *ts)
148{
149 struct e1000_hw *hw = &adapter->hw;
150
151 /*
152 * Writing the SYSTIMR register is not necessary as it only provides
153 * sub-nanosecond resolution.
154 */
155 wr32(E1000_SYSTIML, ts->tv_nsec);
156 wr32(E1000_SYSTIMH, ts->tv_sec);
157}
158
159/**
160 * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
161 * @adapter: board private structure
162 * @hwtstamps: timestamp structure to update
163 * @systim: unsigned 64bit system time value.
164 *
165 * We need to convert the system time value stored in the RX/TXSTMP registers
166 * into a hwtstamp which can be used by the upper level timestamping functions.
167 *
168 * The 'tmreg_lock' spinlock is used to protect the consistency of the
169 * system time value. This is needed because reading the 64 bit time
170 * value involves reading two (or three) 32 bit registers. The first
171 * read latches the value. Ditto for writing.
172 *
173 * In addition, here have extended the system time with an overflow
174 * counter in software.
175 **/
176static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
177 struct skb_shared_hwtstamps *hwtstamps,
178 u64 systim)
179{
180 unsigned long flags;
181 u64 ns;
182
183 switch (adapter->hw.mac.type) {
184 case e1000_82576:
185 case e1000_82580:
186 case e1000_i350:
187 spin_lock_irqsave(&adapter->tmreg_lock, flags);
188
189 ns = timecounter_cyc2time(&adapter->tc, systim);
190
191 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
192
193 memset(hwtstamps, 0, sizeof(*hwtstamps));
194 hwtstamps->hwtstamp = ns_to_ktime(ns);
195 break;
196 case e1000_i210:
197 case e1000_i211:
198 memset(hwtstamps, 0, sizeof(*hwtstamps));
199 /* Upper 32 bits contain s, lower 32 bits contain ns. */
200 hwtstamps->hwtstamp = ktime_set(systim >> 32,
201 systim & 0xFFFFFFFF);
202 break;
203 default:
204 break;
205 }
206}
207
208/*
125 * PTP clock operations 209 * PTP clock operations
126 */ 210 */
127 211
128static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 212static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
129{ 213{
214 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
215 ptp_caps);
216 struct e1000_hw *hw = &igb->hw;
217 int neg_adj = 0;
130 u64 rate; 218 u64 rate;
131 u32 incvalue; 219 u32 incvalue;
132 int neg_adj = 0;
133 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
134 struct e1000_hw *hw = &igb->hw;
135 220
136 if (ppb < 0) { 221 if (ppb < 0) {
137 neg_adj = 1; 222 neg_adj = 1;
@@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
153 return 0; 238 return 0;
154} 239}
155 240
156static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 241static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
157{ 242{
243 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
244 ptp_caps);
245 struct e1000_hw *hw = &igb->hw;
246 int neg_adj = 0;
158 u64 rate; 247 u64 rate;
159 u32 inca; 248 u32 inca;
160 int neg_adj = 0;
161 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
162 struct e1000_hw *hw = &igb->hw;
163 249
164 if (ppb < 0) { 250 if (ppb < 0) {
165 neg_adj = 1; 251 neg_adj = 1;
@@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
178 return 0; 264 return 0;
179} 265}
180 266
181static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta) 267static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
182{ 268{
183 s64 now; 269 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
270 ptp_caps);
184 unsigned long flags; 271 unsigned long flags;
185 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); 272 s64 now;
186 273
187 spin_lock_irqsave(&igb->tmreg_lock, flags); 274 spin_lock_irqsave(&igb->tmreg_lock, flags);
188 275
@@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
195 return 0; 282 return 0;
196} 283}
197 284
198static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts) 285static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
199{ 286{
287 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
288 ptp_caps);
289 unsigned long flags;
290 struct timespec now, then = ns_to_timespec(delta);
291
292 spin_lock_irqsave(&igb->tmreg_lock, flags);
293
294 igb_ptp_read_i210(igb, &now);
295 now = timespec_add(now, then);
296 igb_ptp_write_i210(igb, (const struct timespec *)&now);
297
298 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
299
300 return 0;
301}
302
303static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
304 struct timespec *ts)
305{
306 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
307 ptp_caps);
308 unsigned long flags;
200 u64 ns; 309 u64 ns;
201 u32 remainder; 310 u32 remainder;
202 unsigned long flags;
203 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
204 311
205 spin_lock_irqsave(&igb->tmreg_lock, flags); 312 spin_lock_irqsave(&igb->tmreg_lock, flags);
206 313
@@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
214 return 0; 321 return 0;
215} 322}
216 323
217static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts) 324static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
325 struct timespec *ts)
218{ 326{
219 u64 ns; 327 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
328 ptp_caps);
220 unsigned long flags; 329 unsigned long flags;
221 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); 330
331 spin_lock_irqsave(&igb->tmreg_lock, flags);
332
333 igb_ptp_read_i210(igb, ts);
334
335 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
336
337 return 0;
338}
339
340static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
341 const struct timespec *ts)
342{
343 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
344 ptp_caps);
345 unsigned long flags;
346 u64 ns;
222 347
223 ns = ts->tv_sec * 1000000000ULL; 348 ns = ts->tv_sec * 1000000000ULL;
224 ns += ts->tv_nsec; 349 ns += ts->tv_nsec;
@@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
232 return 0; 357 return 0;
233} 358}
234 359
235static int ptp_82576_enable(struct ptp_clock_info *ptp, 360static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
236 struct ptp_clock_request *rq, int on) 361 const struct timespec *ts)
237{ 362{
238 return -EOPNOTSUPP; 363 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
364 ptp_caps);
365 unsigned long flags;
366
367 spin_lock_irqsave(&igb->tmreg_lock, flags);
368
369 igb_ptp_write_i210(igb, ts);
370
371 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
372
373 return 0;
239} 374}
240 375
241static int ptp_82580_enable(struct ptp_clock_info *ptp, 376static int igb_ptp_enable(struct ptp_clock_info *ptp,
242 struct ptp_clock_request *rq, int on) 377 struct ptp_clock_request *rq, int on)
243{ 378{
244 return -EOPNOTSUPP; 379 return -EOPNOTSUPP;
245} 380}
246 381
247static void igb_overflow_check(struct work_struct *work) 382/**
383 * igb_ptp_tx_work
384 * @work: pointer to work struct
385 *
386 * This work function polls the TSYNCTXCTL valid bit to determine when a
387 * timestamp has been taken for the current stored skb.
388 */
389void igb_ptp_tx_work(struct work_struct *work)
390{
391 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
392 ptp_tx_work);
393 struct e1000_hw *hw = &adapter->hw;
394 u32 tsynctxctl;
395
396 if (!adapter->ptp_tx_skb)
397 return;
398
399 tsynctxctl = rd32(E1000_TSYNCTXCTL);
400 if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
401 igb_ptp_tx_hwtstamp(adapter);
402 else
403 /* reschedule to check later */
404 schedule_work(&adapter->ptp_tx_work);
405}
406
407static void igb_ptp_overflow_check(struct work_struct *work)
248{ 408{
249 struct timespec ts;
250 struct igb_adapter *igb = 409 struct igb_adapter *igb =
251 container_of(work, struct igb_adapter, overflow_work.work); 410 container_of(work, struct igb_adapter, ptp_overflow_work.work);
411 struct timespec ts;
252 412
253 igb_gettime(&igb->caps, &ts); 413 igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
254 414
255 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); 415 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
256 416
257 schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD); 417 schedule_delayed_work(&igb->ptp_overflow_work,
418 IGB_SYSTIM_OVERFLOW_PERIOD);
419}
420
421/**
422 * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
423 * @adapter: Board private structure.
424 *
425 * If we were asked to do hardware stamping and such a time stamp is
426 * available, then it must have been for this skb here because we only
427 * allow only one such packet into the queue.
428 */
429void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
430{
431 struct e1000_hw *hw = &adapter->hw;
432 struct skb_shared_hwtstamps shhwtstamps;
433 u64 regval;
434
435 regval = rd32(E1000_TXSTMPL);
436 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
437
438 igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
439 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
440 dev_kfree_skb_any(adapter->ptp_tx_skb);
441 adapter->ptp_tx_skb = NULL;
442}
443
444void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
445 union e1000_adv_rx_desc *rx_desc,
446 struct sk_buff *skb)
447{
448 struct igb_adapter *adapter = q_vector->adapter;
449 struct e1000_hw *hw = &adapter->hw;
450 u64 regval;
451
452 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
453 E1000_RXDADV_STAT_TS))
454 return;
455
456 /*
457 * If this bit is set, then the RX registers contain the time stamp. No
458 * other packet will be time stamped until we read these registers, so
459 * read the registers to make them available again. Because only one
460 * packet can be time stamped at a time, we know that the register
461 * values must belong to this one here and therefore we don't need to
462 * compare any of the additional attributes stored for it.
463 *
464 * If nothing went wrong, then it should have a shared tx_flags that we
465 * can turn into a skb_shared_hwtstamps.
466 */
467 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
468 u32 *stamp = (u32 *)skb->data;
469 regval = le32_to_cpu(*(stamp + 2));
470 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
471 skb_pull(skb, IGB_TS_HDR_LEN);
472 } else {
473 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
474 return;
475
476 regval = rd32(E1000_RXSTMPL);
477 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
478 }
479
480 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
481}
482
483/**
484 * igb_ptp_hwtstamp_ioctl - control hardware time stamping
485 * @netdev:
486 * @ifreq:
487 * @cmd:
488 *
489 * Outgoing time stamping can be enabled and disabled. Play nice and
490 * disable it when requested, although it shouldn't case any overhead
491 * when no packet needs it. At most one packet in the queue may be
492 * marked for time stamping, otherwise it would be impossible to tell
493 * for sure to which packet the hardware time stamp belongs.
494 *
495 * Incoming time stamping has to be configured via the hardware
496 * filters. Not all combinations are supported, in particular event
497 * type has to be specified. Matching the kind of event packet is
498 * not supported, with the exception of "all V2 events regardless of
499 * level 2 or 4".
500 *
501 **/
502int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
503 struct ifreq *ifr, int cmd)
504{
505 struct igb_adapter *adapter = netdev_priv(netdev);
506 struct e1000_hw *hw = &adapter->hw;
507 struct hwtstamp_config config;
508 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
509 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
510 u32 tsync_rx_cfg = 0;
511 bool is_l4 = false;
512 bool is_l2 = false;
513 u32 regval;
514
515 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
516 return -EFAULT;
517
518 /* reserved for future extensions */
519 if (config.flags)
520 return -EINVAL;
521
522 switch (config.tx_type) {
523 case HWTSTAMP_TX_OFF:
524 tsync_tx_ctl = 0;
525 case HWTSTAMP_TX_ON:
526 break;
527 default:
528 return -ERANGE;
529 }
530
531 switch (config.rx_filter) {
532 case HWTSTAMP_FILTER_NONE:
533 tsync_rx_ctl = 0;
534 break;
535 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
536 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
537 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
538 case HWTSTAMP_FILTER_ALL:
539 /*
540 * register TSYNCRXCFG must be set, therefore it is not
541 * possible to time stamp both Sync and Delay_Req messages
542 * => fall back to time stamping all packets
543 */
544 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
545 config.rx_filter = HWTSTAMP_FILTER_ALL;
546 break;
547 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
548 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
549 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
550 is_l4 = true;
551 break;
552 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
553 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
554 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
555 is_l4 = true;
556 break;
557 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
558 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
559 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
560 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
561 is_l2 = true;
562 is_l4 = true;
563 config.rx_filter = HWTSTAMP_FILTER_SOME;
564 break;
565 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
566 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
567 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
568 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
569 is_l2 = true;
570 is_l4 = true;
571 config.rx_filter = HWTSTAMP_FILTER_SOME;
572 break;
573 case HWTSTAMP_FILTER_PTP_V2_EVENT:
574 case HWTSTAMP_FILTER_PTP_V2_SYNC:
575 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
576 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
577 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
578 is_l2 = true;
579 is_l4 = true;
580 break;
581 default:
582 return -ERANGE;
583 }
584
585 if (hw->mac.type == e1000_82575) {
586 if (tsync_rx_ctl | tsync_tx_ctl)
587 return -EINVAL;
588 return 0;
589 }
590
591 /*
592 * Per-packet timestamping only works if all packets are
593 * timestamped, so enable timestamping in all packets as
594 * long as one rx filter was configured.
595 */
596 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
597 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
598 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
599
600 if ((hw->mac.type == e1000_i210) ||
601 (hw->mac.type == e1000_i211)) {
602 regval = rd32(E1000_RXPBS);
603 regval |= E1000_RXPBS_CFG_TS_EN;
604 wr32(E1000_RXPBS, regval);
605 }
606 }
607
608 /* enable/disable TX */
609 regval = rd32(E1000_TSYNCTXCTL);
610 regval &= ~E1000_TSYNCTXCTL_ENABLED;
611 regval |= tsync_tx_ctl;
612 wr32(E1000_TSYNCTXCTL, regval);
613
614 /* enable/disable RX */
615 regval = rd32(E1000_TSYNCRXCTL);
616 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
617 regval |= tsync_rx_ctl;
618 wr32(E1000_TSYNCRXCTL, regval);
619
620 /* define which PTP packets are time stamped */
621 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
622
623 /* define ethertype filter for timestamped packets */
624 if (is_l2)
625 wr32(E1000_ETQF(3),
626 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
627 E1000_ETQF_1588 | /* enable timestamping */
628 ETH_P_1588)); /* 1588 eth protocol type */
629 else
630 wr32(E1000_ETQF(3), 0);
631
632#define PTP_PORT 319
633 /* L4 Queue Filter[3]: filter by destination port and protocol */
634 if (is_l4) {
635 u32 ftqf = (IPPROTO_UDP /* UDP */
636 | E1000_FTQF_VF_BP /* VF not compared */
637 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
638 | E1000_FTQF_MASK); /* mask all inputs */
639 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
640
641 wr32(E1000_IMIR(3), htons(PTP_PORT));
642 wr32(E1000_IMIREXT(3),
643 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
644 if (hw->mac.type == e1000_82576) {
645 /* enable source port check */
646 wr32(E1000_SPQF(3), htons(PTP_PORT));
647 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
648 }
649 wr32(E1000_FTQF(3), ftqf);
650 } else {
651 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
652 }
653 wrfl();
654
655 /* clear TX/RX time stamp registers, just to be sure */
656 regval = rd32(E1000_TXSTMPL);
657 regval = rd32(E1000_TXSTMPH);
658 regval = rd32(E1000_RXSTMPL);
659 regval = rd32(E1000_RXSTMPH);
660
661 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
662 -EFAULT : 0;
258} 663}
259 664
260void igb_ptp_init(struct igb_adapter *adapter) 665void igb_ptp_init(struct igb_adapter *adapter)
261{ 666{
262 struct e1000_hw *hw = &adapter->hw; 667 struct e1000_hw *hw = &adapter->hw;
668 struct net_device *netdev = adapter->netdev;
263 669
264 switch (hw->mac.type) { 670 switch (hw->mac.type) {
265 case e1000_i210: 671 case e1000_82576:
266 case e1000_i211: 672 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
267 case e1000_i350: 673 adapter->ptp_caps.owner = THIS_MODULE;
674 adapter->ptp_caps.max_adj = 1000000000;
675 adapter->ptp_caps.n_ext_ts = 0;
676 adapter->ptp_caps.pps = 0;
677 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
678 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
679 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
680 adapter->ptp_caps.settime = igb_ptp_settime_82576;
681 adapter->ptp_caps.enable = igb_ptp_enable;
682 adapter->cc.read = igb_ptp_read_82576;
683 adapter->cc.mask = CLOCKSOURCE_MASK(64);
684 adapter->cc.mult = 1;
685 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
686 /* Dial the nominal frequency. */
687 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
688 break;
268 case e1000_82580: 689 case e1000_82580:
269 adapter->caps.owner = THIS_MODULE; 690 case e1000_i350:
270 strcpy(adapter->caps.name, "igb-82580"); 691 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
271 adapter->caps.max_adj = 62499999; 692 adapter->ptp_caps.owner = THIS_MODULE;
272 adapter->caps.n_ext_ts = 0; 693 adapter->ptp_caps.max_adj = 62499999;
273 adapter->caps.pps = 0; 694 adapter->ptp_caps.n_ext_ts = 0;
274 adapter->caps.adjfreq = ptp_82580_adjfreq; 695 adapter->ptp_caps.pps = 0;
275 adapter->caps.adjtime = igb_adjtime; 696 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
276 adapter->caps.gettime = igb_gettime; 697 adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
277 adapter->caps.settime = igb_settime; 698 adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
278 adapter->caps.enable = ptp_82580_enable; 699 adapter->ptp_caps.settime = igb_ptp_settime_82576;
279 adapter->cc.read = igb_82580_systim_read; 700 adapter->ptp_caps.enable = igb_ptp_enable;
280 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 701 adapter->cc.read = igb_ptp_read_82580;
281 adapter->cc.mult = 1; 702 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
282 adapter->cc.shift = 0; 703 adapter->cc.mult = 1;
704 adapter->cc.shift = 0;
283 /* Enable the timer functions by clearing bit 31. */ 705 /* Enable the timer functions by clearing bit 31. */
284 wr32(E1000_TSAUXC, 0x0); 706 wr32(E1000_TSAUXC, 0x0);
285 break; 707 break;
286 708 case e1000_i210:
287 case e1000_82576: 709 case e1000_i211:
288 adapter->caps.owner = THIS_MODULE; 710 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
289 strcpy(adapter->caps.name, "igb-82576"); 711 adapter->ptp_caps.owner = THIS_MODULE;
290 adapter->caps.max_adj = 1000000000; 712 adapter->ptp_caps.max_adj = 62499999;
291 adapter->caps.n_ext_ts = 0; 713 adapter->ptp_caps.n_ext_ts = 0;
292 adapter->caps.pps = 0; 714 adapter->ptp_caps.pps = 0;
293 adapter->caps.adjfreq = ptp_82576_adjfreq; 715 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
294 adapter->caps.adjtime = igb_adjtime; 716 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
295 adapter->caps.gettime = igb_gettime; 717 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
296 adapter->caps.settime = igb_settime; 718 adapter->ptp_caps.settime = igb_ptp_settime_i210;
297 adapter->caps.enable = ptp_82576_enable; 719 adapter->ptp_caps.enable = igb_ptp_enable;
298 adapter->cc.read = igb_82576_systim_read; 720 /* Enable the timer functions by clearing bit 31. */
299 adapter->cc.mask = CLOCKSOURCE_MASK(64); 721 wr32(E1000_TSAUXC, 0x0);
300 adapter->cc.mult = 1;
301 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
302 /* Dial the nominal frequency. */
303 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
304 break; 722 break;
305
306 default: 723 default:
307 adapter->ptp_clock = NULL; 724 adapter->ptp_clock = NULL;
308 return; 725 return;
@@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter)
310 727
311 wrfl(); 728 wrfl();
312 729
313 timecounter_init(&adapter->tc, &adapter->cc, 730 spin_lock_init(&adapter->tmreg_lock);
314 ktime_to_ns(ktime_get_real())); 731 INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
732
733 /* Initialize the clock and overflow work for devices that need it. */
734 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
735 struct timespec ts = ktime_to_timespec(ktime_get_real());
315 736
316 INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check); 737 igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
738 } else {
739 timecounter_init(&adapter->tc, &adapter->cc,
740 ktime_to_ns(ktime_get_real()));
317 741
318 spin_lock_init(&adapter->tmreg_lock); 742 INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
743 igb_ptp_overflow_check);
319 744
320 schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD); 745 schedule_delayed_work(&adapter->ptp_overflow_work,
746 IGB_SYSTIM_OVERFLOW_PERIOD);
747 }
748
749 /* Initialize the time sync interrupts for devices that support it. */
750 if (hw->mac.type >= e1000_82580) {
751 wr32(E1000_TSIM, E1000_TSIM_TXTS);
752 wr32(E1000_IMS, E1000_IMS_TS);
753 }
321 754
322 adapter->ptp_clock = ptp_clock_register(&adapter->caps); 755 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
756 &adapter->pdev->dev);
323 if (IS_ERR(adapter->ptp_clock)) { 757 if (IS_ERR(adapter->ptp_clock)) {
324 adapter->ptp_clock = NULL; 758 adapter->ptp_clock = NULL;
325 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); 759 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
326 } else 760 } else {
327 dev_info(&adapter->pdev->dev, "added PHC on %s\n", 761 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
328 adapter->netdev->name); 762 adapter->netdev->name);
763 adapter->flags |= IGB_FLAG_PTP;
764 }
329} 765}
330 766
331void igb_ptp_remove(struct igb_adapter *adapter) 767/**
768 * igb_ptp_stop - Disable PTP device and stop the overflow check.
769 * @adapter: Board private structure.
770 *
771 * This function stops the PTP support and cancels the delayed work.
772 **/
773void igb_ptp_stop(struct igb_adapter *adapter)
332{ 774{
333 switch (adapter->hw.mac.type) { 775 switch (adapter->hw.mac.type) {
334 case e1000_i211:
335 case e1000_i210:
336 case e1000_i350:
337 case e1000_82580:
338 case e1000_82576: 776 case e1000_82576:
339 cancel_delayed_work_sync(&adapter->overflow_work); 777 case e1000_82580:
778 case e1000_i350:
779 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
780 break;
781 case e1000_i210:
782 case e1000_i211:
783 /* No delayed work to cancel. */
340 break; 784 break;
341 default: 785 default:
342 return; 786 return;
343 } 787 }
344 788
789 cancel_work_sync(&adapter->ptp_tx_work);
790
345 if (adapter->ptp_clock) { 791 if (adapter->ptp_clock) {
346 ptp_clock_unregister(adapter->ptp_clock); 792 ptp_clock_unregister(adapter->ptp_clock);
347 dev_info(&adapter->pdev->dev, "removed PHC on %s\n", 793 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
348 adapter->netdev->name); 794 adapter->netdev->name);
795 adapter->flags &= ~IGB_FLAG_PTP;
349 } 796 }
350} 797}
351 798
352/** 799/**
353 * igb_systim_to_hwtstamp - convert system time value to hw timestamp 800 * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
354 * @adapter: board private structure 801 * @adapter: Board private structure.
355 * @hwtstamps: timestamp structure to update
356 * @systim: unsigned 64bit system time value.
357 *
358 * We need to convert the system time value stored in the RX/TXSTMP registers
359 * into a hwtstamp which can be used by the upper level timestamping functions.
360 * 802 *
361 * The 'tmreg_lock' spinlock is used to protect the consistency of the 803 * This function handles the reset work required to re-enable the PTP device.
362 * system time value. This is needed because reading the 64 bit time
363 * value involves reading two (or three) 32 bit registers. The first
364 * read latches the value. Ditto for writing.
365 *
366 * In addition, here have extended the system time with an overflow
367 * counter in software.
368 **/ 804 **/
369void igb_systim_to_hwtstamp(struct igb_adapter *adapter, 805void igb_ptp_reset(struct igb_adapter *adapter)
370 struct skb_shared_hwtstamps *hwtstamps,
371 u64 systim)
372{ 806{
373 u64 ns; 807 struct e1000_hw *hw = &adapter->hw;
374 unsigned long flags; 808
809 if (!(adapter->flags & IGB_FLAG_PTP))
810 return;
375 811
376 switch (adapter->hw.mac.type) { 812 switch (adapter->hw.mac.type) {
813 case e1000_82576:
814 /* Dial the nominal frequency. */
815 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
816 break;
817 case e1000_82580:
818 case e1000_i350:
377 case e1000_i210: 819 case e1000_i210:
378 case e1000_i211: 820 case e1000_i211:
379 case e1000_i350: 821 /* Enable the timer functions and interrupts. */
380 case e1000_82580: 822 wr32(E1000_TSAUXC, 0x0);
381 case e1000_82576: 823 wr32(E1000_TSIM, E1000_TSIM_TXTS);
824 wr32(E1000_IMS, E1000_IMS_TS);
382 break; 825 break;
383 default: 826 default:
827 /* No work to do. */
384 return; 828 return;
385 } 829 }
386 830
387 spin_lock_irqsave(&adapter->tmreg_lock, flags); 831 /* Re-initialize the timer. */
832 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
833 struct timespec ts = ktime_to_timespec(ktime_get_real());
388 834
389 ns = timecounter_cyc2time(&adapter->tc, systim); 835 igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
390 836 } else {
391 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 837 timecounter_init(&adapter->tc, &adapter->cc,
392 838 ktime_to_ns(ktime_get_real()));
393 memset(hwtstamps, 0, sizeof(*hwtstamps)); 839 }
394 hwtstamps->hwtstamp = ns_to_ktime(ns);
395} 840}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5fd5d04c26c9..89f40e51fc13 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -32,7 +32,7 @@
32 32
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
38 38
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b9623e9ea895..5bd26763554c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -78,6 +78,9 @@
78 78
79/* Supported Rx Buffer Sizes */ 79/* Supported Rx Buffer Sizes */
80#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ 80#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
81#define IXGBE_RXBUFFER_2K 2048
82#define IXGBE_RXBUFFER_3K 3072
83#define IXGBE_RXBUFFER_4K 4096
81#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 84#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
82 85
83/* 86/*
@@ -104,6 +107,7 @@
104#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) 107#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
105#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) 108#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
106#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) 109#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
110#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
107#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 111#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
108#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 112#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
109#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 113#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
293 * this is twice the size of a half page we need to double the page order 297 * this is twice the size of a half page we need to double the page order
294 * for FCoE enabled Rx queues. 298 * for FCoE enabled Rx queues.
295 */ 299 */
296#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) 300static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
297static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
298{ 301{
299 return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; 302#ifdef IXGBE_FCOE
303 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
304 return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
305 IXGBE_RXBUFFER_3K;
306#endif
307 return IXGBE_RXBUFFER_2K;
300} 308}
301#else 309
302#define ixgbe_rx_pg_order(_ring) 0 310static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
311{
312#ifdef IXGBE_FCOE
313 if (test_bit(__IXGBE_RX_FCOE, &ring->state))
314 return (PAGE_SIZE < 8192) ? 1 : 0;
303#endif 315#endif
316 return 0;
317}
304#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) 318#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
305#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
306 319
307struct ixgbe_ring_container { 320struct ixgbe_ring_container {
308 struct ixgbe_ring *ring; /* pointer to linked list of rings */ 321 struct ixgbe_ring *ring; /* pointer to linked list of rings */
@@ -584,6 +597,9 @@ struct ixgbe_adapter {
584#ifdef CONFIG_IXGBE_HWMON 597#ifdef CONFIG_IXGBE_HWMON
585 struct hwmon_buff ixgbe_hwmon_buff; 598 struct hwmon_buff ixgbe_hwmon_buff;
586#endif /* CONFIG_IXGBE_HWMON */ 599#endif /* CONFIG_IXGBE_HWMON */
600#ifdef CONFIG_DEBUG_FS
601 struct dentry *ixgbe_dbg_adapter;
602#endif /*CONFIG_DEBUG_FS*/
587}; 603};
588 604
589struct ixgbe_fdir_filter { 605struct ixgbe_fdir_filter {
@@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
712 struct netdev_fcoe_hbainfo *info); 728 struct netdev_fcoe_hbainfo *info);
713extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); 729extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
714#endif /* IXGBE_FCOE */ 730#endif /* IXGBE_FCOE */
715 731#ifdef CONFIG_DEBUG_FS
732extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
733extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
734extern void ixgbe_dbg_init(void);
735extern void ixgbe_dbg_exit(void);
736#endif /* CONFIG_DEBUG_FS */
716static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 737static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
717{ 738{
718 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 739 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
new file mode 100644
index 000000000000..8d3a21889099
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -0,0 +1,300 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifdef CONFIG_DEBUG_FS
29
30#include <linux/debugfs.h>
31#include <linux/module.h>
32
33#include "ixgbe.h"
34
35static struct dentry *ixgbe_dbg_root;
36
37static char ixgbe_dbg_reg_ops_buf[256] = "";
38
39/**
40 * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
41 * @inode: inode that was opened
42 * @filp: file info
43 *
44 * Stash the adapter pointer hiding in the inode into the file pointer where
45 * we can find it later in the read and write calls
46 **/
47static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
48{
49 filp->private_data = inode->i_private;
50 return 0;
51}
52
53/**
54 * ixgbe_dbg_reg_ops_read - read for reg_ops datum
55 * @filp: the opened file
56 * @buffer: where to write the data for the user to read
57 * @count: the size of the user's buffer
58 * @ppos: file position offset
59 **/
60static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
61 size_t count, loff_t *ppos)
62{
63 struct ixgbe_adapter *adapter = filp->private_data;
64 char buf[256];
65 int bytes_not_copied;
66 int len;
67
68 /* don't allow partial reads */
69 if (*ppos != 0)
70 return 0;
71
72 len = snprintf(buf, sizeof(buf), "%s: %s\n",
73 adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
74 if (count < len)
75 return -ENOSPC;
76 bytes_not_copied = copy_to_user(buffer, buf, len);
77 if (bytes_not_copied < 0)
78 return bytes_not_copied;
79
80 *ppos = len;
81 return len;
82}
83
84/**
85 * ixgbe_dbg_reg_ops_write - write into reg_ops datum
86 * @filp: the opened file
87 * @buffer: where to find the user's data
88 * @count: the length of the user's data
89 * @ppos: file position offset
90 **/
91static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
92 const char __user *buffer,
93 size_t count, loff_t *ppos)
94{
95 struct ixgbe_adapter *adapter = filp->private_data;
96 int bytes_not_copied;
97
98 /* don't allow partial writes */
99 if (*ppos != 0)
100 return 0;
101 if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
102 return -ENOSPC;
103
104 bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
105 if (bytes_not_copied < 0)
106 return bytes_not_copied;
107 else if (bytes_not_copied < count)
108 count -= bytes_not_copied;
109 else
110 return -ENOSPC;
111 ixgbe_dbg_reg_ops_buf[count] = '\0';
112
113 if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
114 u32 reg, value;
115 int cnt;
116 cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
117 if (cnt == 2) {
118 IXGBE_WRITE_REG(&adapter->hw, reg, value);
119 value = IXGBE_READ_REG(&adapter->hw, reg);
120 e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
121 } else {
122 e_dev_info("write <reg> <value>\n");
123 }
124 } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) {
125 u32 reg, value;
126 int cnt;
127 cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg);
128 if (cnt == 1) {
129 value = IXGBE_READ_REG(&adapter->hw, reg);
130 e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
131 } else {
132 e_dev_info("read <reg>\n");
133 }
134 } else {
135 e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf);
136 e_dev_info("Available commands:\n");
137 e_dev_info(" read <reg>\n");
138 e_dev_info(" write <reg> <value>\n");
139 }
140 return count;
141}
142
143static const struct file_operations ixgbe_dbg_reg_ops_fops = {
144 .owner = THIS_MODULE,
145 .open = ixgbe_dbg_reg_ops_open,
146 .read = ixgbe_dbg_reg_ops_read,
147 .write = ixgbe_dbg_reg_ops_write,
148};
149
150static char ixgbe_dbg_netdev_ops_buf[256] = "";
151
152/**
153 * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
154 * @inode: inode that was opened
155 * @filp: file info
156 *
157 * Stash the adapter pointer hiding in the inode into the file pointer
158 * where we can find it later in the read and write calls
159 **/
160static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
161{
162 filp->private_data = inode->i_private;
163 return 0;
164}
165
166/**
167 * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
168 * @filp: the opened file
169 * @buffer: where to write the data for the user to read
170 * @count: the size of the user's buffer
171 * @ppos: file position offset
172 **/
173static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
174 char __user *buffer,
175 size_t count, loff_t *ppos)
176{
177 struct ixgbe_adapter *adapter = filp->private_data;
178 char buf[256];
179 int bytes_not_copied;
180 int len;
181
182 /* don't allow partial reads */
183 if (*ppos != 0)
184 return 0;
185
186 len = snprintf(buf, sizeof(buf), "%s: %s\n",
187 adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
188 if (count < len)
189 return -ENOSPC;
190 bytes_not_copied = copy_to_user(buffer, buf, len);
191 if (bytes_not_copied < 0)
192 return bytes_not_copied;
193
194 *ppos = len;
195 return len;
196}
197
198/**
199 * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum
200 * @filp: the opened file
201 * @buffer: where to find the user's data
202 * @count: the length of the user's data
203 * @ppos: file position offset
204 **/
205static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
206 const char __user *buffer,
207 size_t count, loff_t *ppos)
208{
209 struct ixgbe_adapter *adapter = filp->private_data;
210 int bytes_not_copied;
211
212 /* don't allow partial writes */
213 if (*ppos != 0)
214 return 0;
215 if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
216 return -ENOSPC;
217
218 bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
219 buffer, count);
220 if (bytes_not_copied < 0)
221 return bytes_not_copied;
222 else if (bytes_not_copied < count)
223 count -= bytes_not_copied;
224 else
225 return -ENOSPC;
226 ixgbe_dbg_netdev_ops_buf[count] = '\0';
227
228 if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
229 adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
230 e_dev_info("tx_timeout called\n");
231 } else {
232 e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
233 e_dev_info("Available commands:\n");
234 e_dev_info(" tx_timeout\n");
235 }
236 return count;
237}
238
239static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
240 .owner = THIS_MODULE,
241 .open = ixgbe_dbg_netdev_ops_open,
242 .read = ixgbe_dbg_netdev_ops_read,
243 .write = ixgbe_dbg_netdev_ops_write,
244};
245
246/**
247 * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter
248 * @adapter: the adapter that is starting up
249 **/
250void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
251{
252 const char *name = pci_name(adapter->pdev);
253 struct dentry *pfile;
254 adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
255 if (adapter->ixgbe_dbg_adapter) {
256 pfile = debugfs_create_file("reg_ops", 0600,
257 adapter->ixgbe_dbg_adapter, adapter,
258 &ixgbe_dbg_reg_ops_fops);
259 if (!pfile)
260 e_dev_err("debugfs reg_ops for %s failed\n", name);
261 pfile = debugfs_create_file("netdev_ops", 0600,
262 adapter->ixgbe_dbg_adapter, adapter,
263 &ixgbe_dbg_netdev_ops_fops);
264 if (!pfile)
265 e_dev_err("debugfs netdev_ops for %s failed\n", name);
266 } else {
267 e_dev_err("debugfs entry for %s failed\n", name);
268 }
269}
270
271/**
272 * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
273 * @pf: the pf that is stopping
274 **/
275void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
276{
277 if (adapter->ixgbe_dbg_adapter)
278 debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
279 adapter->ixgbe_dbg_adapter = NULL;
280}
281
282/**
283 * ixgbe_dbg_init - start up debugfs for the driver
284 **/
285void ixgbe_dbg_init(void)
286{
287 ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
288 if (ixgbe_dbg_root == NULL)
289 pr_err("init of debugfs failed\n");
290}
291
292/**
293 * ixgbe_dbg_exit - clean out the driver's debugfs entries
294 **/
295void ixgbe_dbg_exit(void)
296{
297 debugfs_remove_recursive(ixgbe_dbg_root);
298}
299
300#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ee61819d6088..868af6938219 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1167 } 1167 }
1168 1168
1169 bi->dma = dma; 1169 bi->dma = dma;
1170 bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); 1170 bi->page_offset = 0;
1171 1171
1172 return true; 1172 return true;
1173} 1173}
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1320 return max_len; 1320 return max_len;
1321} 1321}
1322 1322
1323static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
1324 union ixgbe_adv_rx_desc *rx_desc,
1325 struct sk_buff *skb)
1326{
1327 __le32 rsc_enabled;
1328 u32 rsc_cnt;
1329
1330 if (!ring_is_rsc_enabled(rx_ring))
1331 return;
1332
1333 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1334 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1335
1336 /* If this is an RSC frame rsc_cnt should be non-zero */
1337 if (!rsc_enabled)
1338 return;
1339
1340 rsc_cnt = le32_to_cpu(rsc_enabled);
1341 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1342
1343 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1344}
1345
1346static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1323static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1347 struct sk_buff *skb) 1324 struct sk_buff *skb)
1348{ 1325{
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1440 1417
1441 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 1418 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1442 1419
1443 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) 1420 /* update RSC append count if present */
1444 return false; 1421 if (ring_is_rsc_enabled(rx_ring)) {
1422 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1423 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1424
1425 if (unlikely(rsc_enabled)) {
1426 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1427
1428 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1429 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1445 1430
1446 /* append_cnt indicates packet is RSC, if so fetch nextp */ 1431 /* update ntc based on RSC value */
1447 if (IXGBE_CB(skb)->append_cnt) { 1432 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1448 ntc = le32_to_cpu(rx_desc->wb.upper.status_error); 1433 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1449 ntc &= IXGBE_RXDADV_NEXTP_MASK; 1434 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1450 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; 1435 }
1451 } 1436 }
1452 1437
1438 /* if we are the last buffer then there is nothing else to do */
1439 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1440 return false;
1441
1453 /* place skb in next buffer to be received */ 1442 /* place skb in next buffer to be received */
1454 rx_ring->rx_buffer_info[ntc].skb = skb; 1443 rx_ring->rx_buffer_info[ntc].skb = skb;
1455 rx_ring->rx_stats.non_eop_descs++; 1444 rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1458} 1447}
1459 1448
1460/** 1449/**
1450 * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1451 * @rx_ring: rx descriptor ring packet is being transacted on
1452 * @skb: pointer to current skb being adjusted
1453 *
1454 * This function is an ixgbe specific version of __pskb_pull_tail. The
1455 * main difference between this version and the original function is that
1456 * this function can make several assumptions about the state of things
1457 * that allow for significant optimizations versus the standard function.
1458 * As a result we can do things like drop a frag and maintain an accurate
1459 * truesize for the skb.
1460 */
1461static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1462 struct sk_buff *skb)
1463{
1464 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1465 unsigned char *va;
1466 unsigned int pull_len;
1467
1468 /*
1469 * it is valid to use page_address instead of kmap since we are
1470 * working with pages allocated out of the lomem pool per
1471 * alloc_page(GFP_ATOMIC)
1472 */
1473 va = skb_frag_address(frag);
1474
1475 /*
1476 * we need the header to contain the greater of either ETH_HLEN or
1477 * 60 bytes if the skb->len is less than 60 for skb_pad.
1478 */
1479 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1480
1481 /* align pull length to size of long to optimize memcpy performance */
1482 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1483
1484 /* update all of the pointers */
1485 skb_frag_size_sub(frag, pull_len);
1486 frag->page_offset += pull_len;
1487 skb->data_len -= pull_len;
1488 skb->tail += pull_len;
1489}
1490
1491/**
1492 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1493 * @rx_ring: rx descriptor ring packet is being transacted on
1494 * @skb: pointer to current skb being updated
1495 *
1496 * This function provides a basic DMA sync up for the first fragment of an
1497 * skb. The reason for doing this is that the first fragment cannot be
1498 * unmapped until we have reached the end of packet descriptor for a buffer
1499 * chain.
1500 */
1501static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1502 struct sk_buff *skb)
1503{
1504 /* if the page was released unmap it, else just sync our portion */
1505 if (unlikely(IXGBE_CB(skb)->page_released)) {
1506 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1507 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1508 IXGBE_CB(skb)->page_released = false;
1509 } else {
1510 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1511
1512 dma_sync_single_range_for_cpu(rx_ring->dev,
1513 IXGBE_CB(skb)->dma,
1514 frag->page_offset,
1515 ixgbe_rx_bufsz(rx_ring),
1516 DMA_FROM_DEVICE);
1517 }
1518 IXGBE_CB(skb)->dma = 0;
1519}
1520
1521/**
1461 * ixgbe_cleanup_headers - Correct corrupted or empty headers 1522 * ixgbe_cleanup_headers - Correct corrupted or empty headers
1462 * @rx_ring: rx descriptor ring packet is being transacted on 1523 * @rx_ring: rx descriptor ring packet is being transacted on
1463 * @rx_desc: pointer to the EOP Rx descriptor 1524 * @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1479 union ixgbe_adv_rx_desc *rx_desc, 1540 union ixgbe_adv_rx_desc *rx_desc,
1480 struct sk_buff *skb) 1541 struct sk_buff *skb)
1481{ 1542{
1482 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1483 struct net_device *netdev = rx_ring->netdev; 1543 struct net_device *netdev = rx_ring->netdev;
1484 unsigned char *va;
1485 unsigned int pull_len;
1486
1487 /* if the page was released unmap it, else just sync our portion */
1488 if (unlikely(IXGBE_CB(skb)->page_released)) {
1489 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1490 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1491 IXGBE_CB(skb)->page_released = false;
1492 } else {
1493 dma_sync_single_range_for_cpu(rx_ring->dev,
1494 IXGBE_CB(skb)->dma,
1495 frag->page_offset,
1496 ixgbe_rx_bufsz(rx_ring),
1497 DMA_FROM_DEVICE);
1498 }
1499 IXGBE_CB(skb)->dma = 0;
1500 1544
1501 /* verify that the packet does not have any known errors */ 1545 /* verify that the packet does not have any known errors */
1502 if (unlikely(ixgbe_test_staterr(rx_desc, 1546 if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1506 return true; 1550 return true;
1507 } 1551 }
1508 1552
1509 /* 1553 /* place header in linear portion of buffer */
1510 * it is valid to use page_address instead of kmap since we are 1554 if (skb_is_nonlinear(skb))
1511 * working with pages allocated out of the lomem pool per 1555 ixgbe_pull_tail(rx_ring, skb);
1512 * alloc_page(GFP_ATOMIC)
1513 */
1514 va = skb_frag_address(frag);
1515
1516 /*
1517 * we need the header to contain the greater of either ETH_HLEN or
1518 * 60 bytes if the skb->len is less than 60 for skb_pad.
1519 */
1520 pull_len = skb_frag_size(frag);
1521 if (pull_len > IXGBE_RX_HDR_SIZE)
1522 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1523
1524 /* align pull length to size of long to optimize memcpy performance */
1525 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1526
1527 /* update all of the pointers */
1528 skb_frag_size_sub(frag, pull_len);
1529 frag->page_offset += pull_len;
1530 skb->data_len -= pull_len;
1531 skb->tail += pull_len;
1532
1533 /*
1534 * if we sucked the frag empty then we should free it,
1535 * if there are other frags here something is screwed up in hardware
1536 */
1537 if (skb_frag_size(frag) == 0) {
1538 BUG_ON(skb_shinfo(skb)->nr_frags != 1);
1539 skb_shinfo(skb)->nr_frags = 0;
1540 __skb_frag_unref(frag);
1541 skb->truesize -= ixgbe_rx_bufsz(rx_ring);
1542 }
1543 1556
1544#ifdef IXGBE_FCOE 1557#ifdef IXGBE_FCOE
1545 /* do not attempt to pad FCoE Frames as this will disrupt DDP */ 1558 /* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1560} 1573}
1561 1574
1562/** 1575/**
1563 * ixgbe_can_reuse_page - determine if we can reuse a page
1564 * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
1565 *
1566 * Returns true if page can be reused in another Rx buffer
1567 **/
1568static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
1569{
1570 struct page *page = rx_buffer->page;
1571
1572 /* if we are only owner of page and it is local we can reuse it */
1573 return likely(page_count(page) == 1) &&
1574 likely(page_to_nid(page) == numa_node_id());
1575}
1576
1577/**
1578 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring 1576 * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1579 * @rx_ring: rx descriptor ring to store buffers on 1577 * @rx_ring: rx descriptor ring to store buffers on
1580 * @old_buff: donor buffer to have page reused 1578 * @old_buff: donor buffer to have page reused
1581 * 1579 *
1582 * Syncronizes page for reuse by the adapter 1580 * Synchronizes page for reuse by the adapter
1583 **/ 1581 **/
1584static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, 1582static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1585 struct ixgbe_rx_buffer *old_buff) 1583 struct ixgbe_rx_buffer *old_buff)
1586{ 1584{
1587 struct ixgbe_rx_buffer *new_buff; 1585 struct ixgbe_rx_buffer *new_buff;
1588 u16 nta = rx_ring->next_to_alloc; 1586 u16 nta = rx_ring->next_to_alloc;
1589 u16 bufsz = ixgbe_rx_bufsz(rx_ring);
1590 1587
1591 new_buff = &rx_ring->rx_buffer_info[nta]; 1588 new_buff = &rx_ring->rx_buffer_info[nta];
1592 1589
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1597 /* transfer page from old buffer to new buffer */ 1594 /* transfer page from old buffer to new buffer */
1598 new_buff->page = old_buff->page; 1595 new_buff->page = old_buff->page;
1599 new_buff->dma = old_buff->dma; 1596 new_buff->dma = old_buff->dma;
1600 1597 new_buff->page_offset = old_buff->page_offset;
1601 /* flip page offset to other buffer and store to new_buff */
1602 new_buff->page_offset = old_buff->page_offset ^ bufsz;
1603 1598
1604 /* sync the buffer for use by the device */ 1599 /* sync the buffer for use by the device */
1605 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1600 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1606 new_buff->page_offset, bufsz, 1601 new_buff->page_offset,
1602 ixgbe_rx_bufsz(rx_ring),
1607 DMA_FROM_DEVICE); 1603 DMA_FROM_DEVICE);
1608
1609 /* bump ref count on page before it is given to the stack */
1610 get_page(new_buff->page);
1611} 1604}
1612 1605
1613/** 1606/**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1617 * @rx_desc: descriptor containing length of buffer written by hardware 1610 * @rx_desc: descriptor containing length of buffer written by hardware
1618 * @skb: sk_buff to place the data into 1611 * @skb: sk_buff to place the data into
1619 * 1612 *
1620 * This function is based on skb_add_rx_frag. I would have used that 1613 * This function will add the data contained in rx_buffer->page to the skb.
1621 * function however it doesn't handle the truesize case correctly since we 1614 * This is done either through a direct copy if the data in the buffer is
1622 * are allocating more memory than might be used for a single receive. 1615 * less than the skb header size, otherwise it will just attach the page as
1616 * a frag to the skb.
1617 *
1618 * The function will then update the page offset if necessary and return
1619 * true if the buffer can be reused by the adapter.
1623 **/ 1620 **/
1624static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, 1621static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1625 struct ixgbe_rx_buffer *rx_buffer, 1622 struct ixgbe_rx_buffer *rx_buffer,
1626 struct sk_buff *skb, int size) 1623 union ixgbe_adv_rx_desc *rx_desc,
1624 struct sk_buff *skb)
1627{ 1625{
1628 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1626 struct page *page = rx_buffer->page;
1629 rx_buffer->page, rx_buffer->page_offset, 1627 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1630 size); 1628#if (PAGE_SIZE < 8192)
1631 skb->len += size; 1629 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1632 skb->data_len += size; 1630#else
1633 skb->truesize += ixgbe_rx_bufsz(rx_ring); 1631 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1632 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1633 ixgbe_rx_bufsz(rx_ring);
1634#endif
1635
1636 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1637 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1638
1639 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1640
1641 /* we can reuse buffer as-is, just make sure it is local */
1642 if (likely(page_to_nid(page) == numa_node_id()))
1643 return true;
1644
1645 /* this page cannot be reused so discard it */
1646 put_page(page);
1647 return false;
1648 }
1649
1650 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1651 rx_buffer->page_offset, size, truesize);
1652
1653 /* avoid re-using remote pages */
1654 if (unlikely(page_to_nid(page) != numa_node_id()))
1655 return false;
1656
1657#if (PAGE_SIZE < 8192)
1658 /* if we are only owner of page we can reuse it */
1659 if (unlikely(page_count(page) != 1))
1660 return false;
1661
1662 /* flip page offset to other buffer */
1663 rx_buffer->page_offset ^= truesize;
1664
1665 /*
1666 * since we are the only owner of the page and we need to
1667 * increment it, just set the value to 2 in order to avoid
1668 * an unecessary locked operation
1669 */
1670 atomic_set(&page->_count, 2);
1671#else
1672 /* move offset up to the next cache line */
1673 rx_buffer->page_offset += truesize;
1674
1675 if (rx_buffer->page_offset > last_offset)
1676 return false;
1677
1678 /* bump ref count on page before it is given to the stack */
1679 get_page(page);
1680#endif
1681
1682 return true;
1683}
1684
1685static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1686 union ixgbe_adv_rx_desc *rx_desc)
1687{
1688 struct ixgbe_rx_buffer *rx_buffer;
1689 struct sk_buff *skb;
1690 struct page *page;
1691
1692 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1693 page = rx_buffer->page;
1694 prefetchw(page);
1695
1696 skb = rx_buffer->skb;
1697
1698 if (likely(!skb)) {
1699 void *page_addr = page_address(page) +
1700 rx_buffer->page_offset;
1701
1702 /* prefetch first cache line of first page */
1703 prefetch(page_addr);
1704#if L1_CACHE_BYTES < 128
1705 prefetch(page_addr + L1_CACHE_BYTES);
1706#endif
1707
1708 /* allocate a skb to store the frags */
1709 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1710 IXGBE_RX_HDR_SIZE);
1711 if (unlikely(!skb)) {
1712 rx_ring->rx_stats.alloc_rx_buff_failed++;
1713 return NULL;
1714 }
1715
1716 /*
1717 * we will be copying header into skb->data in
1718 * pskb_may_pull so it is in our interest to prefetch
1719 * it now to avoid a possible cache miss
1720 */
1721 prefetchw(skb->data);
1722
1723 /*
1724 * Delay unmapping of the first packet. It carries the
1725 * header information, HW may still access the header
1726 * after the writeback. Only unmap it when EOP is
1727 * reached
1728 */
1729 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1730 goto dma_sync;
1731
1732 IXGBE_CB(skb)->dma = rx_buffer->dma;
1733 } else {
1734 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1735 ixgbe_dma_sync_frag(rx_ring, skb);
1736
1737dma_sync:
1738 /* we are reusing so sync this buffer for CPU use */
1739 dma_sync_single_range_for_cpu(rx_ring->dev,
1740 rx_buffer->dma,
1741 rx_buffer->page_offset,
1742 ixgbe_rx_bufsz(rx_ring),
1743 DMA_FROM_DEVICE);
1744 }
1745
1746 /* pull page into skb */
1747 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1748 /* hand second half of page back to the ring */
1749 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1750 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1751 /* the page has been released from the ring */
1752 IXGBE_CB(skb)->page_released = true;
1753 } else {
1754 /* we are not reusing the buffer so unmap it */
1755 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1756 ixgbe_rx_pg_size(rx_ring),
1757 DMA_FROM_DEVICE);
1758 }
1759
1760 /* clear contents of buffer_info */
1761 rx_buffer->skb = NULL;
1762 rx_buffer->dma = 0;
1763 rx_buffer->page = NULL;
1764
1765 return skb;
1634} 1766}
1635 1767
1636/** 1768/**
@@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1653 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1785 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1654#ifdef IXGBE_FCOE 1786#ifdef IXGBE_FCOE
1655 struct ixgbe_adapter *adapter = q_vector->adapter; 1787 struct ixgbe_adapter *adapter = q_vector->adapter;
1656 int ddp_bytes = 0; 1788 int ddp_bytes;
1789 unsigned int mss = 0;
1657#endif /* IXGBE_FCOE */ 1790#endif /* IXGBE_FCOE */
1658 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 1791 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1659 1792
1660 do { 1793 do {
1661 struct ixgbe_rx_buffer *rx_buffer;
1662 union ixgbe_adv_rx_desc *rx_desc; 1794 union ixgbe_adv_rx_desc *rx_desc;
1663 struct sk_buff *skb; 1795 struct sk_buff *skb;
1664 struct page *page;
1665 u16 ntc;
1666 1796
1667 /* return some buffers to hardware, one at a time is too slow */ 1797 /* return some buffers to hardware, one at a time is too slow */
1668 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1798 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1670 cleaned_count = 0; 1800 cleaned_count = 0;
1671 } 1801 }
1672 1802
1673 ntc = rx_ring->next_to_clean; 1803 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1674 rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
1675 rx_buffer = &rx_ring->rx_buffer_info[ntc];
1676 1804
1677 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 1805 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1678 break; 1806 break;
@@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1684 */ 1812 */
1685 rmb(); 1813 rmb();
1686 1814
1687 page = rx_buffer->page; 1815 /* retrieve a buffer from the ring */
1688 prefetchw(page); 1816 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
1689
1690 skb = rx_buffer->skb;
1691
1692 if (likely(!skb)) {
1693 void *page_addr = page_address(page) +
1694 rx_buffer->page_offset;
1695
1696 /* prefetch first cache line of first page */
1697 prefetch(page_addr);
1698#if L1_CACHE_BYTES < 128
1699 prefetch(page_addr + L1_CACHE_BYTES);
1700#endif
1701 1817
1702 /* allocate a skb to store the frags */ 1818 /* exit if we failed to retrieve a buffer */
1703 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 1819 if (!skb)
1704 IXGBE_RX_HDR_SIZE); 1820 break;
1705 if (unlikely(!skb)) {
1706 rx_ring->rx_stats.alloc_rx_buff_failed++;
1707 break;
1708 }
1709
1710 /*
1711 * we will be copying header into skb->data in
1712 * pskb_may_pull so it is in our interest to prefetch
1713 * it now to avoid a possible cache miss
1714 */
1715 prefetchw(skb->data);
1716
1717 /*
1718 * Delay unmapping of the first packet. It carries the
1719 * header information, HW may still access the header
1720 * after the writeback. Only unmap it when EOP is
1721 * reached
1722 */
1723 IXGBE_CB(skb)->dma = rx_buffer->dma;
1724 } else {
1725 /* we are reusing so sync this buffer for CPU use */
1726 dma_sync_single_range_for_cpu(rx_ring->dev,
1727 rx_buffer->dma,
1728 rx_buffer->page_offset,
1729 ixgbe_rx_bufsz(rx_ring),
1730 DMA_FROM_DEVICE);
1731 }
1732
1733 /* pull page into skb */
1734 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
1735 le16_to_cpu(rx_desc->wb.upper.length));
1736
1737 if (ixgbe_can_reuse_page(rx_buffer)) {
1738 /* hand second half of page back to the ring */
1739 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1740 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1741 /* the page has been released from the ring */
1742 IXGBE_CB(skb)->page_released = true;
1743 } else {
1744 /* we are not reusing the buffer so unmap it */
1745 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1746 ixgbe_rx_pg_size(rx_ring),
1747 DMA_FROM_DEVICE);
1748 }
1749
1750 /* clear contents of buffer_info */
1751 rx_buffer->skb = NULL;
1752 rx_buffer->dma = 0;
1753 rx_buffer->page = NULL;
1754
1755 ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
1756 1821
1757 cleaned_count++; 1822 cleaned_count++;
1758 1823
@@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1775 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1840 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1776 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { 1841 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1777 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 1842 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1843 /* include DDPed FCoE data */
1844 if (ddp_bytes > 0) {
1845 if (!mss) {
1846 mss = rx_ring->netdev->mtu -
1847 sizeof(struct fcoe_hdr) -
1848 sizeof(struct fc_frame_header) -
1849 sizeof(struct fcoe_crc_eof);
1850 if (mss > 512)
1851 mss &= ~511;
1852 }
1853 total_rx_bytes += ddp_bytes;
1854 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
1855 mss);
1856 }
1778 if (!ddp_bytes) { 1857 if (!ddp_bytes) {
1779 dev_kfree_skb_any(skb); 1858 dev_kfree_skb_any(skb);
1780 continue; 1859 continue;
@@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1788 budget--; 1867 budget--;
1789 } while (likely(budget)); 1868 } while (likely(budget));
1790 1869
1791#ifdef IXGBE_FCOE
1792 /* include DDPed FCoE data */
1793 if (ddp_bytes > 0) {
1794 unsigned int mss;
1795
1796 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1797 sizeof(struct fc_frame_header) -
1798 sizeof(struct fcoe_crc_eof);
1799 if (mss > 512)
1800 mss &= ~511;
1801 total_rx_bytes += ddp_bytes;
1802 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1803 }
1804
1805#endif /* IXGBE_FCOE */
1806 u64_stats_update_begin(&rx_ring->syncp); 1870 u64_stats_update_begin(&rx_ring->syncp);
1807 rx_ring->stats.packets += total_rx_packets; 1871 rx_ring->stats.packets += total_rx_packets;
1808 rx_ring->stats.bytes += total_rx_bytes; 1872 rx_ring->stats.bytes += total_rx_bytes;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2868 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 2932 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
2869 2933
2870 /* configure the packet buffer length */ 2934 /* configure the packet buffer length */
2871#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
2872 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2873#else
2874 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2935 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2875#endif
2876 2936
2877 /* configure descriptor type */ 2937 /* configure descriptor type */
2878 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2938 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2980 * total size of max desc * buf_len is not greater 3040 * total size of max desc * buf_len is not greater
2981 * than 65536 3041 * than 65536
2982 */ 3042 */
2983#if (PAGE_SIZE <= 8192)
2984 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 3043 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2985#elif (PAGE_SIZE <= 16384)
2986 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2987#else
2988 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2989#endif
2990 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 3044 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2991} 3045}
2992 3046
@@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3606 if (hw->mac.type == ixgbe_mac_82598EB) 3660 if (hw->mac.type == ixgbe_mac_82598EB)
3607 netif_set_gso_max_size(adapter->netdev, 32768); 3661 netif_set_gso_max_size(adapter->netdev, 32768);
3608 3662
3609 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3610
3611#ifdef IXGBE_FCOE 3663#ifdef IXGBE_FCOE
3612 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 3664 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3613 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3665 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3807#ifdef CONFIG_IXGBE_DCB 3859#ifdef CONFIG_IXGBE_DCB
3808 ixgbe_configure_dcb(adapter); 3860 ixgbe_configure_dcb(adapter);
3809#endif 3861#endif
3862 /*
3863 * We must restore virtualization before VLANs or else
3864 * the VLVF registers will not be populated
3865 */
3866 ixgbe_configure_virtualization(adapter);
3810 3867
3811 ixgbe_set_rx_mode(adapter->netdev); 3868 ixgbe_set_rx_mode(adapter->netdev);
3812 ixgbe_restore_vlan(adapter); 3869 ixgbe_restore_vlan(adapter);
@@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3838 break; 3895 break;
3839 } 3896 }
3840 3897
3841 ixgbe_configure_virtualization(adapter);
3842
3843#ifdef IXGBE_FCOE 3898#ifdef IXGBE_FCOE
3844 /* configure FCoE L2 filters, redirection table, and Rx control */ 3899 /* configure FCoE L2 filters, redirection table, and Rx control */
3845 ixgbe_configure_fcoe(adapter); 3900 ixgbe_configure_fcoe(adapter);
@@ -4130,27 +4185,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4130} 4185}
4131 4186
4132/** 4187/**
4133 * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
4134 * @rx_ring: ring to setup
4135 *
4136 * On many IA platforms the L1 cache has a critical stride of 4K, this
4137 * results in each receive buffer starting in the same cache set. To help
4138 * reduce the pressure on this cache set we can interleave the offsets so
4139 * that only every other buffer will be in the same cache set.
4140 **/
4141static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
4142{
4143 struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
4144 u16 i;
4145
4146 for (i = 0; i < rx_ring->count; i += 2) {
4147 rx_buffer[0].page_offset = 0;
4148 rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
4149 rx_buffer = &rx_buffer[2];
4150 }
4151}
4152
4153/**
4154 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 4188 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4155 * @rx_ring: ring to free buffers from 4189 * @rx_ring: ring to free buffers from
4156 **/ 4190 **/
@@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4195 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4229 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4196 memset(rx_ring->rx_buffer_info, 0, size); 4230 memset(rx_ring->rx_buffer_info, 0, size);
4197 4231
4198 ixgbe_init_rx_page_offset(rx_ring);
4199
4200 /* Zero out the descriptor ring */ 4232 /* Zero out the descriptor ring */
4201 memset(rx_ring->desc, 0, rx_ring->size); 4233 memset(rx_ring->desc, 0, rx_ring->size);
4202 4234
@@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4646 rx_ring->next_to_clean = 0; 4678 rx_ring->next_to_clean = 0;
4647 rx_ring->next_to_use = 0; 4679 rx_ring->next_to_use = 0;
4648 4680
4649 ixgbe_init_rx_page_offset(rx_ring);
4650
4651 return 0; 4681 return 0;
4652err: 4682err:
4653 vfree(rx_ring->rx_buffer_info); 4683 vfree(rx_ring->rx_buffer_info);
@@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5530{ 5560{
5531 u32 ssvpc; 5561 u32 ssvpc;
5532 5562
5533 /* Do not perform spoof check for 82598 */ 5563 /* Do not perform spoof check for 82598 or if not in IOV mode */
5534 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 5564 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
5565 adapter->num_vfs == 0)
5535 return; 5566 return;
5536 5567
5537 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); 5568 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5543 if (!ssvpc) 5574 if (!ssvpc)
5544 return; 5575 return;
5545 5576
5546 e_warn(drv, "%d Spoofed packets detected\n", ssvpc); 5577 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
5547} 5578}
5548 5579
5549/** 5580/**
@@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
5874 u32 type_tucmd = 0; 5905 u32 type_tucmd = 0;
5875 5906
5876 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5907 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5877 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && 5908 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
5878 !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) 5909 if (unlikely(skb->no_fcs))
5879 return; 5910 first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
5911 if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
5912 return;
5913 }
5880 } else { 5914 } else {
5881 u8 l4_hdr = 0; 5915 u8 l4_hdr = 0;
5882 switch (first->protocol) { 5916 switch (first->protocol) {
@@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5938{ 5972{
5939 /* set type for advanced descriptor with frame checksum insertion */ 5973 /* set type for advanced descriptor with frame checksum insertion */
5940 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | 5974 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
5941 IXGBE_ADVTXD_DCMD_IFCS |
5942 IXGBE_ADVTXD_DCMD_DEXT); 5975 IXGBE_ADVTXD_DCMD_DEXT);
5943 5976
5944 /* set HW vlan bit if vlan is present */ 5977 /* set HW vlan bit if vlan is present */
@@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5958#endif 5991#endif
5959 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); 5992 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
5960 5993
5994 /* insert frame checksum */
5995 if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
5996 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
5997
5961 return cmd_type; 5998 return cmd_type;
5962} 5999}
5963 6000
@@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6063 if (likely(!data_len)) 6100 if (likely(!data_len))
6064 break; 6101 break;
6065 6102
6066 if (unlikely(skb->no_fcs))
6067 cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
6068 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 6103 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
6069 6104
6070 i++; 6105 i++;
@@ -6854,9 +6889,9 @@ static int ixgbe_set_features(struct net_device *netdev,
6854 return 0; 6889 return 0;
6855} 6890}
6856 6891
6857static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, 6892static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6858 struct net_device *dev, 6893 struct net_device *dev,
6859 unsigned char *addr, 6894 const unsigned char *addr,
6860 u16 flags) 6895 u16 flags)
6861{ 6896{
6862 struct ixgbe_adapter *adapter = netdev_priv(dev); 6897 struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6893 6928
6894static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, 6929static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6895 struct net_device *dev, 6930 struct net_device *dev,
6896 unsigned char *addr) 6931 const unsigned char *addr)
6897{ 6932{
6898 struct ixgbe_adapter *adapter = netdev_priv(dev); 6933 struct ixgbe_adapter *adapter = netdev_priv(dev);
6899 int err = -EOPNOTSUPP; 6934 int err = -EOPNOTSUPP;
@@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7136 goto err_ioremap; 7171 goto err_ioremap;
7137 } 7172 }
7138 7173
7139 for (i = 1; i <= 5; i++) {
7140 if (pci_resource_len(pdev, i) == 0)
7141 continue;
7142 }
7143
7144 netdev->netdev_ops = &ixgbe_netdev_ops; 7174 netdev->netdev_ops = &ixgbe_netdev_ops;
7145 ixgbe_set_ethtool_ops(netdev); 7175 ixgbe_set_ethtool_ops(netdev);
7146 netdev->watchdog_timeo = 5 * HZ; 7176 netdev->watchdog_timeo = 5 * HZ;
@@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7419 e_err(probe, "failed to allocate sysfs resources\n"); 7449 e_err(probe, "failed to allocate sysfs resources\n");
7420#endif /* CONFIG_IXGBE_HWMON */ 7450#endif /* CONFIG_IXGBE_HWMON */
7421 7451
7452#ifdef CONFIG_DEBUG_FS
7453 ixgbe_dbg_adapter_init(adapter);
7454#endif /* CONFIG_DEBUG_FS */
7455
7422 return 0; 7456 return 0;
7423 7457
7424err_register: 7458err_register:
@@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7453 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 7487 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7454 struct net_device *netdev = adapter->netdev; 7488 struct net_device *netdev = adapter->netdev;
7455 7489
7490#ifdef CONFIG_DEBUG_FS
7491 ixgbe_dbg_adapter_exit(adapter);
7492#endif /*CONFIG_DEBUG_FS */
7493
7456 set_bit(__IXGBE_DOWN, &adapter->state); 7494 set_bit(__IXGBE_DOWN, &adapter->state);
7457 cancel_work_sync(&adapter->service_task); 7495 cancel_work_sync(&adapter->service_task);
7458 7496
@@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void)
7708 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 7746 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7709 pr_info("%s\n", ixgbe_copyright); 7747 pr_info("%s\n", ixgbe_copyright);
7710 7748
7749#ifdef CONFIG_DEBUG_FS
7750 ixgbe_dbg_init();
7751#endif /* CONFIG_DEBUG_FS */
7752
7711#ifdef CONFIG_IXGBE_DCA 7753#ifdef CONFIG_IXGBE_DCA
7712 dca_register_notify(&dca_notifier); 7754 dca_register_notify(&dca_notifier);
7713#endif 7755#endif
@@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void)
7730 dca_unregister_notify(&dca_notifier); 7772 dca_unregister_notify(&dca_notifier);
7731#endif 7773#endif
7732 pci_unregister_driver(&ixgbe_driver); 7774 pci_unregister_driver(&ixgbe_driver);
7775
7776#ifdef CONFIG_DEBUG_FS
7777 ixgbe_dbg_exit();
7778#endif /* CONFIG_DEBUG_FS */
7779
7733 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 7780 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7734} 7781}
7735 7782
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 3456d5617143..39881cb17a4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
960 /* (Re)start the overflow check */ 960 /* (Re)start the overflow check */
961 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; 961 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
962 962
963 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); 963 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
964 &adapter->pdev->dev);
964 if (IS_ERR(adapter->ptp_clock)) { 965 if (IS_ERR(adapter->ptp_clock)) {
965 adapter->ptp_clock = NULL; 966 adapter->ptp_clock = NULL;
966 e_dev_err("ptp_clock_register failed\n"); 967 e_dev_err("ptp_clock_register failed\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 4fea8716ab64..dce48bf64d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
346static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 346static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
347 u32 vf) 347 u32 vf)
348{ 348{
349 /* VLAN 0 is a special case, don't allow it to be removed */
350 if (!vid && !add)
351 return 0;
352
349 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 353 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
350} 354}
351 355
@@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
414 VLAN_PRIO_SHIFT)), vf); 418 VLAN_PRIO_SHIFT)), vf);
415 ixgbe_set_vmolr(hw, vf, false); 419 ixgbe_set_vmolr(hw, vf, false);
416 } else { 420 } else {
421 ixgbe_set_vf_vlan(adapter, true, 0, vf);
417 ixgbe_set_vmvir(adapter, 0, vf); 422 ixgbe_set_vmvir(adapter, 0, vf);
418 ixgbe_set_vmolr(hw, vf, true); 423 ixgbe_set_vmolr(hw, vf, true);
419 } 424 }
@@ -810,9 +815,9 @@ out:
810 return err; 815 return err;
811} 816}
812 817
813static int ixgbe_link_mbps(int internal_link_speed) 818static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
814{ 819{
815 switch (internal_link_speed) { 820 switch (adapter->link_speed) {
816 case IXGBE_LINK_SPEED_100_FULL: 821 case IXGBE_LINK_SPEED_100_FULL:
817 return 100; 822 return 100;
818 case IXGBE_LINK_SPEED_1GB_FULL: 823 case IXGBE_LINK_SPEED_1GB_FULL:
@@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
824 } 829 }
825} 830}
826 831
827static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, 832static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
828 int link_speed)
829{ 833{
830 int rf_dec, rf_int; 834 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
831 u32 bcnrc_val; 835 struct ixgbe_hw *hw = &adapter->hw;
836 u32 bcnrc_val = 0;
837 u16 queue, queues_per_pool;
838 u16 tx_rate = adapter->vfinfo[vf].tx_rate;
839
840 if (tx_rate) {
841 /* start with base link speed value */
842 bcnrc_val = adapter->vf_rate_link_speed;
832 843
833 if (tx_rate != 0) {
834 /* Calculate the rate factor values to set */ 844 /* Calculate the rate factor values to set */
835 rf_int = link_speed / tx_rate; 845 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
836 rf_dec = (link_speed - (rf_int * tx_rate)); 846 bcnrc_val /= tx_rate;
837 rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; 847
838 848 /* clear everything but the rate factor */
839 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 849 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
840 bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & 850 IXGBE_RTTBCNRC_RF_DEC_MASK;
841 IXGBE_RTTBCNRC_RF_INT_MASK); 851
842 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 852 /* enable the rate scheduler */
843 } else { 853 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
844 bcnrc_val = 0;
845 } 854 }
846 855
847 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
848 /* 856 /*
849 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 857 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
850 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported 858 * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
861 break; 869 break;
862 } 870 }
863 871
864 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 872 /* determine how many queues per pool based on VMDq mask */
873 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
874
875 /* write value for all Tx queues belonging to VF */
876 for (queue = 0; queue < queues_per_pool; queue++) {
877 unsigned int reg_idx = (vf * queues_per_pool) + queue;
878
879 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
880 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
881 }
865} 882}
866 883
867void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) 884void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
868{ 885{
869 int actual_link_speed, i; 886 int i;
870 bool reset_rate = false;
871 887
872 /* VF Tx rate limit was not set */ 888 /* VF Tx rate limit was not set */
873 if (adapter->vf_rate_link_speed == 0) 889 if (!adapter->vf_rate_link_speed)
874 return; 890 return;
875 891
876 actual_link_speed = ixgbe_link_mbps(adapter->link_speed); 892 if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
877 if (actual_link_speed != adapter->vf_rate_link_speed) {
878 reset_rate = true;
879 adapter->vf_rate_link_speed = 0; 893 adapter->vf_rate_link_speed = 0;
880 dev_info(&adapter->pdev->dev, 894 dev_info(&adapter->pdev->dev,
881 "Link speed has been changed. VF Transmit rate " 895 "Link speed has been changed. VF Transmit rate is disabled\n");
882 "is disabled\n");
883 } 896 }
884 897
885 for (i = 0; i < adapter->num_vfs; i++) { 898 for (i = 0; i < adapter->num_vfs; i++) {
886 if (reset_rate) 899 if (!adapter->vf_rate_link_speed)
887 adapter->vfinfo[i].tx_rate = 0; 900 adapter->vfinfo[i].tx_rate = 0;
888 901
889 ixgbe_set_vf_rate_limit(&adapter->hw, i, 902 ixgbe_set_vf_rate_limit(adapter, i);
890 adapter->vfinfo[i].tx_rate,
891 actual_link_speed);
892 } 903 }
893} 904}
894 905
895int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 906int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
896{ 907{
897 struct ixgbe_adapter *adapter = netdev_priv(netdev); 908 struct ixgbe_adapter *adapter = netdev_priv(netdev);
898 struct ixgbe_hw *hw = &adapter->hw; 909 int link_speed;
899 int actual_link_speed; 910
911 /* verify VF is active */
912 if (vf >= adapter->num_vfs)
913 return -EINVAL;
900 914
901 actual_link_speed = ixgbe_link_mbps(adapter->link_speed); 915 /* verify link is up */
902 if ((vf >= adapter->num_vfs) || (!adapter->link_up) || 916 if (!adapter->link_up)
903 (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
904 ((tx_rate != 0) && (tx_rate <= 10)))
905 /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
906 return -EINVAL; 917 return -EINVAL;
907 918
908 adapter->vf_rate_link_speed = actual_link_speed; 919 /* verify we are linked at 10Gbps */
909 adapter->vfinfo[vf].tx_rate = (u16)tx_rate; 920 link_speed = ixgbe_link_mbps(adapter);
910 ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); 921 if (link_speed != 10000)
922 return -EINVAL;
923
924 /* rate limit cannot be less than 10Mbs or greater than link speed */
925 if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
926 return -EINVAL;
927
928 /* store values */
929 adapter->vf_rate_link_speed = link_speed;
930 adapter->vfinfo[vf].tx_rate = tx_rate;
931
932 /* update hardware configuration */
933 ixgbe_set_vf_rate_limit(adapter, vf);
911 934
912 return 0; 935 return 0;
913} 936}
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 418af827b230..da17ccf5c09d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc {
272/* Error Codes */ 272/* Error Codes */
273#define IXGBE_ERR_INVALID_MAC_ADDR -1 273#define IXGBE_ERR_INVALID_MAC_ADDR -1
274#define IXGBE_ERR_RESET_FAILED -2 274#define IXGBE_ERR_RESET_FAILED -2
275#define IXGBE_ERR_INVALID_ARGUMENT -3
275 276
276#endif /* _IXGBEVF_DEFINES_H_ */ 277#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 98cadb0c4dab..383b4e1cd175 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
101 101
102/* Supported Rx Buffer Sizes */ 102/* Supported Rx Buffer Sizes */
103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
104#define IXGBEVF_RXBUFFER_2048 2048 104#define IXGBEVF_RXBUFFER_3K 3072
105#define IXGBEVF_RXBUFFER_7K 7168
106#define IXGBEVF_RXBUFFER_15K 15360
105#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 107#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
106 108
107#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 109#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -259,6 +261,11 @@ enum ixbgevf_state_t {
259 __IXGBEVF_DOWN 261 __IXGBEVF_DOWN
260}; 262};
261 263
264struct ixgbevf_cb {
265 struct sk_buff *prev;
266};
267#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
268
262enum ixgbevf_boards { 269enum ixgbevf_boards {
263 board_82599_vf, 270 board_82599_vf,
264 board_X540_vf, 271 board_X540_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 6647383c4ddc..0ee9bd4819f4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -263,6 +263,8 @@ cont_loop:
263 tx_ring->total_bytes += total_bytes; 263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets; 264 tx_ring->total_packets += total_packets;
265 u64_stats_update_end(&tx_ring->syncp); 265 u64_stats_update_end(&tx_ring->syncp);
266 q_vector->tx.total_bytes += total_bytes;
267 q_vector->tx.total_packets += total_packets;
266 268
267 return count < tx_ring->count; 269 return count < tx_ring->count;
268} 270}
@@ -272,12 +274,10 @@ cont_loop:
272 * @q_vector: structure containing interrupt and ring information 274 * @q_vector: structure containing interrupt and ring information
273 * @skb: packet to send up 275 * @skb: packet to send up
274 * @status: hardware indication of status of receive 276 * @status: hardware indication of status of receive
275 * @rx_ring: rx descriptor ring (for a specific queue) to setup
276 * @rx_desc: rx descriptor 277 * @rx_desc: rx descriptor
277 **/ 278 **/
278static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
279 struct sk_buff *skb, u8 status, 280 struct sk_buff *skb, u8 status,
280 struct ixgbevf_ring *ring,
281 union ixgbe_adv_rx_desc *rx_desc) 281 union ixgbe_adv_rx_desc *rx_desc)
282{ 282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter; 283 struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
433 433
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
435 skb->next = next_buffer->skb; 435 skb->next = next_buffer->skb;
436 skb->next->prev = skb; 436 IXGBE_CB(skb->next)->prev = skb;
437 adapter->non_eop_descs++; 437 adapter->non_eop_descs++;
438 goto next_desc; 438 goto next_desc;
439 } 439 }
440 440
441 /* we should not be chaining buffers, if we did drop the skb */
442 if (IXGBE_CB(skb)->prev) {
443 do {
444 struct sk_buff *this = skb;
445 skb = IXGBE_CB(skb)->prev;
446 dev_kfree_skb(this);
447 } while (skb);
448 goto next_desc;
449 }
450
441 /* ERR_MASK will only have valid bits if EOP set */ 451 /* ERR_MASK will only have valid bits if EOP set */
442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 452 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
443 dev_kfree_skb_irq(skb); 453 dev_kfree_skb_irq(skb);
@@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
461 } 471 }
462 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 472 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
463 473
464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
465 475
466next_desc: 476next_desc:
467 rx_desc->wb.upper.status_error = 0; 477 rx_desc->wb.upper.status_error = 0;
@@ -490,6 +500,8 @@ next_desc:
490 rx_ring->total_packets += total_rx_packets; 500 rx_ring->total_packets += total_rx_packets;
491 rx_ring->total_bytes += total_rx_bytes; 501 rx_ring->total_bytes += total_rx_bytes;
492 u64_stats_update_end(&rx_ring->syncp); 502 u64_stats_update_end(&rx_ring->syncp);
503 q_vector->rx.total_packets += total_rx_packets;
504 q_vector->rx.total_bytes += total_rx_bytes;
493 505
494 return !!budget; 506 return !!budget;
495} 507}
@@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
716 } 728 }
717} 729}
718 730
719static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 731static irqreturn_t ixgbevf_msix_other(int irq, void *data)
720{ 732{
721 struct ixgbevf_adapter *adapter = data; 733 struct ixgbevf_adapter *adapter = data;
722 struct ixgbe_hw *hw = &adapter->hw; 734 struct ixgbe_hw *hw = &adapter->hw;
723 u32 msg;
724 bool got_ack = false;
725
726 if (!hw->mbx.ops.check_for_ack(hw))
727 got_ack = true;
728 735
729 if (!hw->mbx.ops.check_for_msg(hw)) { 736 hw->mac.get_link_status = 1;
730 hw->mbx.ops.read(hw, &msg, 1);
731 737
732 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 738 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
733 mod_timer(&adapter->watchdog_timer, 739 mod_timer(&adapter->watchdog_timer, jiffies);
734 round_jiffies(jiffies + 1));
735
736 if (msg & IXGBE_VT_MSGTYPE_NACK)
737 pr_warn("Last Request of type %2.2x to PF Nacked\n",
738 msg & 0xFF);
739 /*
740 * Restore the PFSTS bit in case someone is polling for a
741 * return message from the PF
742 */
743 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
744 }
745
746 /*
747 * checking for the ack clears the PFACK bit. Place
748 * it back in the v2p_mailbox cache so that anyone
749 * polling for an ack will not miss it
750 */
751 if (got_ack)
752 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
753 740
754 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 741 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
755 742
@@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
899 } 886 }
900 887
901 err = request_irq(adapter->msix_entries[vector].vector, 888 err = request_irq(adapter->msix_entries[vector].vector,
902 &ixgbevf_msix_mbx, 0, netdev->name, adapter); 889 &ixgbevf_msix_other, 0, netdev->name, adapter);
903 if (err) { 890 if (err) {
904 hw_dbg(&adapter->hw, 891 hw_dbg(&adapter->hw,
905 "request_irq for msix_mbx failed: %d\n", err); 892 "request_irq for msix_other failed: %d\n", err);
906 goto free_queue_irqs; 893 goto free_queue_irqs;
907 } 894 }
908 895
@@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1057 1044
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1045 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1059 1046
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1047 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1048 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1049
1063 else
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1050 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1067} 1051}
1068 1052
1053static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1054{
1055 struct ixgbe_hw *hw = &adapter->hw;
1056 struct net_device *netdev = adapter->netdev;
1057 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1058 int i;
1059 u16 rx_buf_len;
1060
1061 /* notify the PF of our intent to use this size of frame */
1062 ixgbevf_rlpml_set_vf(hw, max_frame);
1063
1064 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1065 max_frame += VLAN_HLEN;
1066
1067 /*
1068 * Make best use of allocation by using all but 1K of a
1069 * power of 2 allocation that will be used for skb->head.
1070 */
1071 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1072 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1073 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1074 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1075 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1076 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1077 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1078 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1079 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1080 else
1081 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1082
1083 for (i = 0; i < adapter->num_rx_queues; i++)
1084 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1085}
1086
1069/** 1087/**
1070 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1088 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1071 * @adapter: board private structure 1089 * @adapter: board private structure
@@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1076{ 1094{
1077 u64 rdba; 1095 u64 rdba;
1078 struct ixgbe_hw *hw = &adapter->hw; 1096 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1081 int i, j; 1097 int i, j;
1082 u32 rdlen; 1098 u32 rdlen;
1083 int rx_buf_len;
1084 1099
1085 /* PSRTYPE must be initialized in 82599 */ 1100 /* PSRTYPE must be initialized in 82599 */
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1101 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN) 1102
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1103 /* set_rx_buffer_len must be called before ring initialization */
1089 else 1104 ixgbevf_set_rx_buffer_len(adapter);
1090 rx_buf_len = ALIGN(max_frame, 1024);
1091 1105
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1106 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1107 /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1117 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1118 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1119 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1107 1120
1108 ixgbevf_configure_srrctl(adapter, j); 1121 ixgbevf_configure_srrctl(adapter, j);
1109 } 1122 }
@@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1113{ 1126{
1114 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1115 struct ixgbe_hw *hw = &adapter->hw; 1128 struct ixgbe_hw *hw = &adapter->hw;
1129 int err;
1130
1131 if (!hw->mac.ops.set_vfta)
1132 return -EOPNOTSUPP;
1116 1133
1117 spin_lock(&adapter->mbx_lock); 1134 spin_lock(&adapter->mbx_lock);
1118 1135
1119 /* add VID to filter table */ 1136 /* add VID to filter table */
1120 if (hw->mac.ops.set_vfta) 1137 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1121 hw->mac.ops.set_vfta(hw, vid, 0, true);
1122 1138
1123 spin_unlock(&adapter->mbx_lock); 1139 spin_unlock(&adapter->mbx_lock);
1124 1140
1141 /* translate error return types so error makes sense */
1142 if (err == IXGBE_ERR_MBX)
1143 return -EIO;
1144
1145 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1146 return -EACCES;
1147
1125 set_bit(vid, adapter->active_vlans); 1148 set_bit(vid, adapter->active_vlans);
1126 1149
1127 return 0; 1150 return err;
1128} 1151}
1129 1152
1130static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1153static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1131{ 1154{
1132 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1155 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1133 struct ixgbe_hw *hw = &adapter->hw; 1156 struct ixgbe_hw *hw = &adapter->hw;
1157 int err = -EOPNOTSUPP;
1134 1158
1135 spin_lock(&adapter->mbx_lock); 1159 spin_lock(&adapter->mbx_lock);
1136 1160
1137 /* remove VID from filter table */ 1161 /* remove VID from filter table */
1138 if (hw->mac.ops.set_vfta) 1162 if (hw->mac.ops.set_vfta)
1139 hw->mac.ops.set_vfta(hw, vid, 0, false); 1163 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1140 1164
1141 spin_unlock(&adapter->mbx_lock); 1165 spin_unlock(&adapter->mbx_lock);
1142 1166
1143 clear_bit(vid, adapter->active_vlans); 1167 clear_bit(vid, adapter->active_vlans);
1144 1168
1145 return 0; 1169 return err;
1146} 1170}
1147 1171
1148static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1172static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1308 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1332 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1309} 1333}
1310 1334
1335static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336{
1337 struct ixgbe_hw *hw = &adapter->hw;
1338 int api[] = { ixgbe_mbox_api_10,
1339 ixgbe_mbox_api_unknown };
1340 int err = 0, idx = 0;
1341
1342 spin_lock(&adapter->mbx_lock);
1343
1344 while (api[idx] != ixgbe_mbox_api_unknown) {
1345 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1346 if (!err)
1347 break;
1348 idx++;
1349 }
1350
1351 spin_unlock(&adapter->mbx_lock);
1352}
1353
1311static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1354static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1312{ 1355{
1313 struct net_device *netdev = adapter->netdev; 1356 struct net_device *netdev = adapter->netdev;
@@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1315 int i, j = 0; 1358 int i, j = 0;
1316 int num_rx_rings = adapter->num_rx_queues; 1359 int num_rx_rings = adapter->num_rx_queues;
1317 u32 txdctl, rxdctl; 1360 u32 txdctl, rxdctl;
1318 u32 msg[2];
1319 1361
1320 for (i = 0; i < adapter->num_tx_queues; i++) { 1362 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx; 1363 j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1398 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1357 } 1399 }
1358 1400
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1362
1363 spin_unlock(&adapter->mbx_lock); 1401 spin_unlock(&adapter->mbx_lock);
1364 1402
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1403 clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1371 ixgbevf_save_reset_stats(adapter); 1409 ixgbevf_save_reset_stats(adapter);
1372 ixgbevf_init_last_counter_stats(adapter); 1410 ixgbevf_init_last_counter_stats(adapter);
1373 1411
1412 hw->mac.get_link_status = 1;
1374 mod_timer(&adapter->watchdog_timer, jiffies); 1413 mod_timer(&adapter->watchdog_timer, jiffies);
1375} 1414}
1376 1415
@@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1378{ 1417{
1379 struct ixgbe_hw *hw = &adapter->hw; 1418 struct ixgbe_hw *hw = &adapter->hw;
1380 1419
1420 ixgbevf_negotiate_api(adapter);
1421
1381 ixgbevf_configure(adapter); 1422 ixgbevf_configure(adapter);
1382 1423
1383 ixgbevf_up_complete(adapter); 1424 ixgbevf_up_complete(adapter);
@@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1419 rx_buffer_info->skb = NULL; 1460 rx_buffer_info->skb = NULL;
1420 do { 1461 do {
1421 struct sk_buff *this = skb; 1462 struct sk_buff *this = skb;
1422 skb = skb->prev; 1463 skb = IXGBE_CB(skb)->prev;
1423 dev_kfree_skb(this); 1464 dev_kfree_skb(this);
1424 } while (skb); 1465 } while (skb);
1425 } 1466 }
@@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1547 1588
1548void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1589void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1549{ 1590{
1550 struct ixgbe_hw *hw = &adapter->hw;
1551
1552 WARN_ON(in_interrupt()); 1591 WARN_ON(in_interrupt());
1553 1592
1554 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1593 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1561 * watchdog task will continue to schedule reset tasks until 1600 * watchdog task will continue to schedule reset tasks until
1562 * the PF is up and running. 1601 * the PF is up and running.
1563 */ 1602 */
1564 if (!hw->mac.ops.reset_hw(hw)) { 1603 ixgbevf_down(adapter);
1565 ixgbevf_down(adapter); 1604 ixgbevf_up(adapter);
1566 ixgbevf_up(adapter);
1567 }
1568 1605
1569 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1606 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1570} 1607}
@@ -1867,6 +1904,22 @@ err_set_interrupt:
1867} 1904}
1868 1905
1869/** 1906/**
1907 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1908 * @adapter: board private structure to clear interrupt scheme on
1909 *
1910 * We go through and clear interrupt specific resources and reset the structure
1911 * to pre-load conditions
1912 **/
1913static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1914{
1915 adapter->num_tx_queues = 0;
1916 adapter->num_rx_queues = 0;
1917
1918 ixgbevf_free_q_vectors(adapter);
1919 ixgbevf_reset_interrupt_capability(adapter);
1920}
1921
1922/**
1870 * ixgbevf_sw_init - Initialize general software structures 1923 * ixgbevf_sw_init - Initialize general software structures
1871 * (struct ixgbevf_adapter) 1924 * (struct ixgbevf_adapter)
1872 * @adapter: board private structure to initialize 1925 * @adapter: board private structure to initialize
@@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev)
2351 } 2404 }
2352 } 2405 }
2353 2406
2407 ixgbevf_negotiate_api(adapter);
2408
2354 /* allocate transmit descriptors */ 2409 /* allocate transmit descriptors */
2355 err = ixgbevf_setup_all_tx_resources(adapter); 2410 err = ixgbevf_setup_all_tx_resources(adapter);
2356 if (err) 2411 if (err)
@@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2860static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2915static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2861{ 2916{
2862 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2917 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2863 struct ixgbe_hw *hw = &adapter->hw;
2864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2918 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2865 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2919 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2866 u32 msg[2];
2867 2920
2868 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2921 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2869 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2922 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2877 /* must set new MTU before calling down or up */ 2930 /* must set new MTU before calling down or up */
2878 netdev->mtu = new_mtu; 2931 netdev->mtu = new_mtu;
2879 2932
2880 if (!netif_running(netdev)) {
2881 msg[0] = IXGBE_VF_SET_LPE;
2882 msg[1] = max_frame;
2883 hw->mbx.ops.write_posted(hw, msg, 2);
2884 }
2885
2886 if (netif_running(netdev)) 2933 if (netif_running(netdev))
2887 ixgbevf_reinit_locked(adapter); 2934 ixgbevf_reinit_locked(adapter);
2888 2935
2889 return 0; 2936 return 0;
2890} 2937}
2891 2938
2892static void ixgbevf_shutdown(struct pci_dev *pdev) 2939static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
2893{ 2940{
2894 struct net_device *netdev = pci_get_drvdata(pdev); 2941 struct net_device *netdev = pci_get_drvdata(pdev);
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2942 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2943#ifdef CONFIG_PM
2944 int retval = 0;
2945#endif
2896 2946
2897 netif_device_detach(netdev); 2947 netif_device_detach(netdev);
2898 2948
2899 if (netif_running(netdev)) { 2949 if (netif_running(netdev)) {
2950 rtnl_lock();
2900 ixgbevf_down(adapter); 2951 ixgbevf_down(adapter);
2901 ixgbevf_free_irq(adapter); 2952 ixgbevf_free_irq(adapter);
2902 ixgbevf_free_all_tx_resources(adapter); 2953 ixgbevf_free_all_tx_resources(adapter);
2903 ixgbevf_free_all_rx_resources(adapter); 2954 ixgbevf_free_all_rx_resources(adapter);
2955 rtnl_unlock();
2904 } 2956 }
2905 2957
2906 pci_save_state(pdev); 2958 ixgbevf_clear_interrupt_scheme(adapter);
2907 2959
2960#ifdef CONFIG_PM
2961 retval = pci_save_state(pdev);
2962 if (retval)
2963 return retval;
2964
2965#endif
2908 pci_disable_device(pdev); 2966 pci_disable_device(pdev);
2967
2968 return 0;
2969}
2970
2971#ifdef CONFIG_PM
2972static int ixgbevf_resume(struct pci_dev *pdev)
2973{
2974 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2975 struct net_device *netdev = adapter->netdev;
2976 u32 err;
2977
2978 pci_set_power_state(pdev, PCI_D0);
2979 pci_restore_state(pdev);
2980 /*
2981 * pci_restore_state clears dev->state_saved so call
2982 * pci_save_state to restore it.
2983 */
2984 pci_save_state(pdev);
2985
2986 err = pci_enable_device_mem(pdev);
2987 if (err) {
2988 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2989 return err;
2990 }
2991 pci_set_master(pdev);
2992
2993 rtnl_lock();
2994 err = ixgbevf_init_interrupt_scheme(adapter);
2995 rtnl_unlock();
2996 if (err) {
2997 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
2998 return err;
2999 }
3000
3001 ixgbevf_reset(adapter);
3002
3003 if (netif_running(netdev)) {
3004 err = ixgbevf_open(netdev);
3005 if (err)
3006 return err;
3007 }
3008
3009 netif_device_attach(netdev);
3010
3011 return err;
3012}
3013
3014#endif /* CONFIG_PM */
3015static void ixgbevf_shutdown(struct pci_dev *pdev)
3016{
3017 ixgbevf_suspend(pdev, PMSG_SUSPEND);
2909} 3018}
2910 3019
2911static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3020static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2946 return stats; 3055 return stats;
2947} 3056}
2948 3057
2949static const struct net_device_ops ixgbe_netdev_ops = { 3058static const struct net_device_ops ixgbevf_netdev_ops = {
2950 .ndo_open = ixgbevf_open, 3059 .ndo_open = ixgbevf_open,
2951 .ndo_stop = ixgbevf_close, 3060 .ndo_stop = ixgbevf_close,
2952 .ndo_start_xmit = ixgbevf_xmit_frame, 3061 .ndo_start_xmit = ixgbevf_xmit_frame,
@@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
2962 3071
2963static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3072static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2964{ 3073{
2965 dev->netdev_ops = &ixgbe_netdev_ops; 3074 dev->netdev_ops = &ixgbevf_netdev_ops;
2966 ixgbevf_set_ethtool_ops(dev); 3075 ixgbevf_set_ethtool_ops(dev);
2967 dev->watchdog_timeo = 5 * HZ; 3076 dev->watchdog_timeo = 5 * HZ;
2968} 3077}
@@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3131 return 0; 3240 return 0;
3132 3241
3133err_register: 3242err_register:
3243 ixgbevf_clear_interrupt_scheme(adapter);
3134err_sw_init: 3244err_sw_init:
3135 ixgbevf_reset_interrupt_capability(adapter); 3245 ixgbevf_reset_interrupt_capability(adapter);
3136 iounmap(hw->hw_addr); 3246 iounmap(hw->hw_addr);
@@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3168 if (netdev->reg_state == NETREG_REGISTERED) 3278 if (netdev->reg_state == NETREG_REGISTERED)
3169 unregister_netdev(netdev); 3279 unregister_netdev(netdev);
3170 3280
3281 ixgbevf_clear_interrupt_scheme(adapter);
3171 ixgbevf_reset_interrupt_capability(adapter); 3282 ixgbevf_reset_interrupt_capability(adapter);
3172 3283
3173 iounmap(adapter->hw.hw_addr); 3284 iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = {
3267 .id_table = ixgbevf_pci_tbl, 3378 .id_table = ixgbevf_pci_tbl,
3268 .probe = ixgbevf_probe, 3379 .probe = ixgbevf_probe,
3269 .remove = __devexit_p(ixgbevf_remove), 3380 .remove = __devexit_p(ixgbevf_remove),
3381#ifdef CONFIG_PM
3382 /* Power Management Hooks */
3383 .suspend = ixgbevf_suspend,
3384 .resume = ixgbevf_resume,
3385#endif
3270 .shutdown = ixgbevf_shutdown, 3386 .shutdown = ixgbevf_shutdown,
3271 .err_handler = &ixgbevf_err_handler 3387 .err_handler = &ixgbevf_err_handler
3272}; 3388};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 9c955900fe64..d5028ddf4b31 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
86static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 86static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
87{ 87{
88 struct ixgbe_mbx_info *mbx = &hw->mbx; 88 struct ixgbe_mbx_info *mbx = &hw->mbx;
89 s32 ret_val = IXGBE_ERR_MBX; 89 s32 ret_val = -IXGBE_ERR_MBX;
90
91 if (!mbx->ops.read)
92 goto out;
90 93
91 ret_val = ixgbevf_poll_for_msg(hw); 94 ret_val = ixgbevf_poll_for_msg(hw);
92 95
93 /* if ack received read message, otherwise we timed out */ 96 /* if ack received read message, otherwise we timed out */
94 if (!ret_val) 97 if (!ret_val)
95 ret_val = mbx->ops.read(hw, msg, size); 98 ret_val = mbx->ops.read(hw, msg, size);
96 99out:
97 return ret_val; 100 return ret_val;
98} 101}
99 102
@@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
109static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 112static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
110{ 113{
111 struct ixgbe_mbx_info *mbx = &hw->mbx; 114 struct ixgbe_mbx_info *mbx = &hw->mbx;
112 s32 ret_val; 115 s32 ret_val = -IXGBE_ERR_MBX;
116
117 /* exit if either we can't write or there isn't a defined timeout */
118 if (!mbx->ops.write || !mbx->timeout)
119 goto out;
113 120
114 /* send msg */ 121 /* send msg */
115 ret_val = mbx->ops.write(hw, msg, size); 122 ret_val = mbx->ops.write(hw, msg, size);
@@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
117 /* if msg sent wait until we receive an ack */ 124 /* if msg sent wait until we receive an ack */
118 if (!ret_val) 125 if (!ret_val)
119 ret_val = ixgbevf_poll_for_ack(hw); 126 ret_val = ixgbevf_poll_for_ack(hw);
120 127out:
121 return ret_val; 128 return ret_val;
122} 129}
123 130
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index cf9131c5c115..946ce86f337f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -76,12 +76,29 @@
76/* bits 23:16 are used for exra info for certain messages */ 76/* bits 23:16 are used for exra info for certain messages */
77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
78 78
79/* definitions to support mailbox API version negotiation */
80
81/*
82 * each element denotes a version of the API; existing numbers may not
83 * change; any additions must go at the end
84 */
85enum ixgbe_pfvf_api_rev {
86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
88 /* This value should always be last */
89 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
90};
91
92/* mailbox API, legacy requests */
79#define IXGBE_VF_RESET 0x01 /* VF requests reset */ 93#define IXGBE_VF_RESET 0x01 /* VF requests reset */
80#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 94#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 95#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 96#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 97
84#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 98/* mailbox API, version 1.0 VF requests */
99#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
100#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
101#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
85 102
86/* length of permanent address message returned from PF */ 103/* length of permanent address message returned from PF */
87#define IXGBE_VF_PERMADDR_MSG_LEN 4 104#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index ec89b86f7ca4..0c7447e6fcc8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
79 /* Call adapter stop to disable tx/rx and clear interrupts */ 79 /* Call adapter stop to disable tx/rx and clear interrupts */
80 hw->mac.ops.stop_adapter(hw); 80 hw->mac.ops.stop_adapter(hw);
81 81
82 /* reset the api version */
83 hw->api_version = ixgbe_mbox_api_10;
84
82 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); 85 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
83 IXGBE_WRITE_FLUSH(hw); 86 IXGBE_WRITE_FLUSH(hw);
84 87
@@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
97 msgbuf[0] = IXGBE_VF_RESET; 100 msgbuf[0] = IXGBE_VF_RESET;
98 mbx->ops.write_posted(hw, msgbuf, 1); 101 mbx->ops.write_posted(hw, msgbuf, 1);
99 102
100 msleep(10); 103 mdelay(10);
101 104
102 /* set our "perm_addr" based on info provided by PF */ 105 /* set our "perm_addr" based on info provided by PF */
103 /* also set up the mc_filter_type which is piggy backed 106 /* also set up the mc_filter_type which is piggy backed
@@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
346static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, 349static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
347 bool vlan_on) 350 bool vlan_on)
348{ 351{
352 struct ixgbe_mbx_info *mbx = &hw->mbx;
349 u32 msgbuf[2]; 353 u32 msgbuf[2];
354 s32 err;
350 355
351 msgbuf[0] = IXGBE_VF_SET_VLAN; 356 msgbuf[0] = IXGBE_VF_SET_VLAN;
352 msgbuf[1] = vlan; 357 msgbuf[1] = vlan;
353 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 358 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
354 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 359 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
355 360
356 ixgbevf_write_msg_read_ack(hw, msgbuf, 2); 361 err = mbx->ops.write_posted(hw, msgbuf, 2);
362 if (err)
363 goto mbx_err;
357 364
358 return 0; 365 err = mbx->ops.read_posted(hw, msgbuf, 2);
366 if (err)
367 goto mbx_err;
368
369 /* remove extra bits from the message */
370 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
371 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
372
373 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
374 err = IXGBE_ERR_INVALID_ARGUMENT;
375
376mbx_err:
377 return err;
359} 378}
360 379
361/** 380/**
@@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
389 bool *link_up, 408 bool *link_up,
390 bool autoneg_wait_to_complete) 409 bool autoneg_wait_to_complete)
391{ 410{
411 struct ixgbe_mbx_info *mbx = &hw->mbx;
412 struct ixgbe_mac_info *mac = &hw->mac;
413 s32 ret_val = 0;
392 u32 links_reg; 414 u32 links_reg;
415 u32 in_msg = 0;
393 416
394 if (!(hw->mbx.ops.check_for_rst(hw))) { 417 /* If we were hit with a reset drop the link */
395 *link_up = false; 418 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
396 *speed = 0; 419 mac->get_link_status = true;
397 return -1;
398 }
399 420
400 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 421 if (!mac->get_link_status)
422 goto out;
401 423
402 if (links_reg & IXGBE_LINKS_UP) 424 /* if link status is down no point in checking to see if pf is up */
403 *link_up = true; 425 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
404 else 426 if (!(links_reg & IXGBE_LINKS_UP))
405 *link_up = false; 427 goto out;
406 428
407 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 429 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
408 case IXGBE_LINKS_SPEED_10G_82599: 430 case IXGBE_LINKS_SPEED_10G_82599:
@@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
416 break; 438 break;
417 } 439 }
418 440
419 return 0; 441 /* if the read failed it could just be a mailbox collision, best wait
442 * until we are called again and don't report an error */
443 if (mbx->ops.read(hw, &in_msg, 1))
444 goto out;
445
446 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
447 /* msg is not CTS and is NACK we must have lost CTS status */
448 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
449 ret_val = -1;
450 goto out;
451 }
452
453 /* the pf is talking, if we timed out in the past we reinit */
454 if (!mbx->timeout) {
455 ret_val = -1;
456 goto out;
457 }
458
459 /* if we passed all the tests above then the link is up and we no
460 * longer need to check for link */
461 mac->get_link_status = false;
462
463out:
464 *link_up = !mac->get_link_status;
465 return ret_val;
466}
467
468/**
469 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
470 * @hw: pointer to the HW structure
471 * @max_size: value to assign to max frame size
472 **/
473void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
474{
475 u32 msgbuf[2];
476
477 msgbuf[0] = IXGBE_VF_SET_LPE;
478 msgbuf[1] = max_size;
479 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
480}
481
482/**
483 * ixgbevf_negotiate_api_version - Negotiate supported API version
484 * @hw: pointer to the HW structure
485 * @api: integer containing requested API version
486 **/
487int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
488{
489 int err;
490 u32 msg[3];
491
492 /* Negotiate the mailbox API version */
493 msg[0] = IXGBE_VF_API_NEGOTIATE;
494 msg[1] = api;
495 msg[2] = 0;
496 err = hw->mbx.ops.write_posted(hw, msg, 3);
497
498 if (!err)
499 err = hw->mbx.ops.read_posted(hw, msg, 3);
500
501 if (!err) {
502 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
503
504 /* Store value and return 0 on success */
505 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
506 hw->api_version = api;
507 return 0;
508 }
509
510 err = IXGBE_ERR_INVALID_ARGUMENT;
511 }
512
513 return err;
420} 514}
421 515
422static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 516static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 25c951daee5d..47f11a584d8c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -137,6 +137,8 @@ struct ixgbe_hw {
137 137
138 u8 revision_id; 138 u8 revision_id;
139 bool adapter_stopped; 139 bool adapter_stopped;
140
141 int api_version;
140}; 142};
141 143
142struct ixgbevf_hw_stats { 144struct ixgbevf_hw_stats {
@@ -170,5 +172,7 @@ struct ixgbevf_info {
170 const struct ixgbe_mac_operations *mac_ops; 172 const struct ixgbe_mac_operations *mac_ops;
171}; 173};
172 174
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
173#endif /* __IXGBE_VF_H__ */ 177#endif /* __IXGBE_VF_H__ */
174 178
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 10bba09c44ea..c10e3a6de09f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -712,10 +712,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 712 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 714
715 /* Run destructor before passing skb to HW */
716 if (likely(!skb_shared(skb)))
717 skb_orphan(skb);
718
719 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
720 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
721 op_own |= htonl((bf_index & 0xffff) << 8); 717 op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
deleted file mode 100644
index db5285befe2a..000000000000
--- a/drivers/net/ethernet/mipsnet.c
+++ /dev/null
@@ -1,345 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 */
6
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/platform_device.h>
15#include <asm/mips-boards/simint.h>
16
17#define MIPSNET_VERSION "2007-11-17"
18
19/*
20 * Net status/control block as seen by sw in the core.
21 */
22struct mipsnet_regs {
23 /*
24 * Device info for probing, reads as MIPSNET%d where %d is some
25 * form of version.
26 */
27 u64 devId; /*0x00 */
28
29 /*
30 * read only busy flag.
31 * Set and cleared by the Net Device to indicate that an rx or a tx
32 * is in progress.
33 */
34 u32 busy; /*0x08 */
35
36 /*
37 * Set by the Net Device.
38 * The device will set it once data has been received.
39 * The value is the number of bytes that should be read from
40 * rxDataBuffer. The value will decrease till 0 until all the data
41 * from rxDataBuffer has been read.
42 */
43 u32 rxDataCount; /*0x0c */
44#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
45
46 /*
47 * Settable from the MIPS core, cleared by the Net Device.
48 * The core should set the number of bytes it wants to send,
49 * then it should write those bytes of data to txDataBuffer.
50 * The device will clear txDataCount has been processed (not
51 * necessarily sent).
52 */
53 u32 txDataCount; /*0x10 */
54
55 /*
56 * Interrupt control
57 *
58 * Used to clear the interrupted generated by this dev.
59 * Write a 1 to clear the interrupt. (except bit31).
60 *
61 * Bit0 is set if it was a tx-done interrupt.
62 * Bit1 is set when new rx-data is available.
63 * Until this bit is cleared there will be no other RXs.
64 *
65 * Bit31 is used for testing, it clears after a read.
66 * Writing 1 to this bit will cause an interrupt to be generated.
67 * To clear the test interrupt, write 0 to this register.
68 */
69 u32 interruptControl; /*0x14 */
70#define MIPSNET_INTCTL_TXDONE (1u << 0)
71#define MIPSNET_INTCTL_RXDONE (1u << 1)
72#define MIPSNET_INTCTL_TESTBIT (1u << 31)
73
74 /*
75 * Readonly core-specific interrupt info for the device to signal
76 * the core. The meaning of the contents of this field might change.
77 */
78 /* XXX: the whole memIntf interrupt scheme is messy: the device
79 * should have no control what so ever of what VPE/register set is
80 * being used.
81 * The MemIntf should only expose interrupt lines, and something in
82 * the config should be responsible for the line<->core/vpe bindings.
83 */
84 u32 interruptInfo; /*0x18 */
85
86 /*
87 * This is where the received data is read out.
88 * There is more data to read until rxDataReady is 0.
89 * Only 1 byte at this regs offset is used.
90 */
91 u32 rxDataBuffer; /*0x1c */
92
93 /*
94 * This is where the data to transmit is written.
95 * Data should be written for the amount specified in the
96 * txDataCount register.
97 * Only 1 byte at this regs offset is used.
98 */
99 u32 txDataBuffer; /*0x20 */
100};
101
102#define regaddr(dev, field) \
103 (dev->base_addr + offsetof(struct mipsnet_regs, field))
104
105static char mipsnet_string[] = "mipsnet";
106
107/*
108 * Copy data from the MIPSNET rx data port
109 */
110static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
111 int len)
112{
113 for (; len > 0; len--, kdata++)
114 *kdata = inb(regaddr(dev, rxDataBuffer));
115
116 return inl(regaddr(dev, rxDataCount));
117}
118
119static inline void mipsnet_put_todevice(struct net_device *dev,
120 struct sk_buff *skb)
121{
122 int count_to_go = skb->len;
123 char *buf_ptr = skb->data;
124
125 outl(skb->len, regaddr(dev, txDataCount));
126
127 for (; count_to_go; buf_ptr++, count_to_go--)
128 outb(*buf_ptr, regaddr(dev, txDataBuffer));
129
130 dev->stats.tx_packets++;
131 dev->stats.tx_bytes += skb->len;
132
133 dev_kfree_skb(skb);
134}
135
136static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
137{
138 /*
139 * Only one packet at a time. Once TXDONE interrupt is serviced, the
140 * queue will be restarted.
141 */
142 netif_stop_queue(dev);
143 mipsnet_put_todevice(dev, skb);
144
145 return NETDEV_TX_OK;
146}
147
148static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
149{
150 struct sk_buff *skb;
151
152 if (!len)
153 return len;
154
155 skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
156 if (!skb) {
157 dev->stats.rx_dropped++;
158 return -ENOMEM;
159 }
160
161 skb_reserve(skb, NET_IP_ALIGN);
162 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
163 return -EFAULT;
164
165 skb->protocol = eth_type_trans(skb, dev);
166 skb->ip_summed = CHECKSUM_UNNECESSARY;
167
168 netif_rx(skb);
169
170 dev->stats.rx_packets++;
171 dev->stats.rx_bytes += len;
172
173 return len;
174}
175
176static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
177{
178 struct net_device *dev = dev_id;
179 u32 int_flags;
180 irqreturn_t ret = IRQ_NONE;
181
182 if (irq != dev->irq)
183 goto out_badirq;
184
185 /* TESTBIT is cleared on read. */
186 int_flags = inl(regaddr(dev, interruptControl));
187 if (int_flags & MIPSNET_INTCTL_TESTBIT) {
188 /* TESTBIT takes effect after a write with 0. */
189 outl(0, regaddr(dev, interruptControl));
190 ret = IRQ_HANDLED;
191 } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
192 /* Only one packet at a time, we are done. */
193 dev->stats.tx_packets++;
194 netif_wake_queue(dev);
195 outl(MIPSNET_INTCTL_TXDONE,
196 regaddr(dev, interruptControl));
197 ret = IRQ_HANDLED;
198 } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
199 mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
200 outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
201 ret = IRQ_HANDLED;
202 }
203 return ret;
204
205out_badirq:
206 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
207 dev->name, __func__, irq);
208 return ret;
209}
210
211static int mipsnet_open(struct net_device *dev)
212{
213 int err;
214
215 err = request_irq(dev->irq, mipsnet_interrupt,
216 IRQF_SHARED, dev->name, (void *) dev);
217 if (err) {
218 release_region(dev->base_addr, sizeof(struct mipsnet_regs));
219 return err;
220 }
221
222 netif_start_queue(dev);
223
224 /* test interrupt handler */
225 outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
226
227 return 0;
228}
229
230static int mipsnet_close(struct net_device *dev)
231{
232 netif_stop_queue(dev);
233 free_irq(dev->irq, dev);
234 return 0;
235}
236
237static void mipsnet_set_mclist(struct net_device *dev)
238{
239}
240
241static const struct net_device_ops mipsnet_netdev_ops = {
242 .ndo_open = mipsnet_open,
243 .ndo_stop = mipsnet_close,
244 .ndo_start_xmit = mipsnet_xmit,
245 .ndo_set_rx_mode = mipsnet_set_mclist,
246 .ndo_change_mtu = eth_change_mtu,
247 .ndo_validate_addr = eth_validate_addr,
248 .ndo_set_mac_address = eth_mac_addr,
249};
250
251static int __devinit mipsnet_probe(struct platform_device *dev)
252{
253 struct net_device *netdev;
254 int err;
255
256 netdev = alloc_etherdev(0);
257 if (!netdev) {
258 err = -ENOMEM;
259 goto out;
260 }
261
262 platform_set_drvdata(dev, netdev);
263
264 netdev->netdev_ops = &mipsnet_netdev_ops;
265
266 /*
267 * TODO: probe for these or load them from PARAM
268 */
269 netdev->base_addr = 0x4200;
270 netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
271 inl(regaddr(netdev, interruptInfo));
272
273 /* Get the io region now, get irq on open() */
274 if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
275 "mipsnet")) {
276 err = -EBUSY;
277 goto out_free_netdev;
278 }
279
280 /*
281 * Lacking any better mechanism to allocate a MAC address we use a
282 * random one ...
283 */
284 eth_hw_addr_random(netdev);
285
286 err = register_netdev(netdev);
287 if (err) {
288 printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
289 goto out_free_region;
290 }
291
292 return 0;
293
294out_free_region:
295 release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
296
297out_free_netdev:
298 free_netdev(netdev);
299
300out:
301 return err;
302}
303
304static int __devexit mipsnet_device_remove(struct platform_device *device)
305{
306 struct net_device *dev = platform_get_drvdata(device);
307
308 unregister_netdev(dev);
309 release_region(dev->base_addr, sizeof(struct mipsnet_regs));
310 free_netdev(dev);
311 platform_set_drvdata(device, NULL);
312
313 return 0;
314}
315
316static struct platform_driver mipsnet_driver = {
317 .driver = {
318 .name = mipsnet_string,
319 .owner = THIS_MODULE,
320 },
321 .probe = mipsnet_probe,
322 .remove = __devexit_p(mipsnet_device_remove),
323};
324
325static int __init mipsnet_init_module(void)
326{
327 int err;
328
329 printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
330 "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
331
332 err = platform_driver_register(&mipsnet_driver);
333 if (err)
334 printk(KERN_ERR "Driver registration failed\n");
335
336 return err;
337}
338
339static void __exit mipsnet_exit_module(void)
340{
341 platform_driver_unregister(&mipsnet_driver);
342}
343
344module_init(mipsnet_init_module);
345module_exit(mipsnet_exit_module);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f45def01a98e..876beceaf2d7 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3409,7 +3409,7 @@ set_speed:
3409 3409
3410 pause_flags = 0; 3410 pause_flags = 0;
3411 /* setup pause frame */ 3411 /* setup pause frame */
3412 if (np->duplex != 0) { 3412 if (netif_running(dev) && (np->duplex != 0)) {
3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4435 4435
4436 regs->version = FORCEDETH_REGS_VER; 4436 regs->version = FORCEDETH_REGS_VER;
4437 spin_lock_irq(&np->lock); 4437 spin_lock_irq(&np->lock);
4438 for (i = 0; i <= np->register_size/sizeof(u32); i++) 4438 for (i = 0; i < np->register_size/sizeof(u32); i++)
4439 rbuf[i] = readl(base + i*sizeof(u32)); 4439 rbuf[i] = readl(base + i*sizeof(u32));
4440 spin_unlock_irq(&np->lock); 4440 spin_unlock_irq(&np->lock);
4441} 4441}
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
5455 5455
5456 netif_stop_queue(dev); 5456 netif_stop_queue(dev);
5457 spin_lock_irq(&np->lock); 5457 spin_lock_irq(&np->lock);
5458 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5458 nv_stop_rxtx(dev); 5459 nv_stop_rxtx(dev);
5459 nv_txrx_reset(dev); 5460 nv_txrx_reset(dev);
5460 5461
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5904 goto out_error; 5905 goto out_error;
5905 } 5906 }
5906 5907
5908 netif_carrier_off(dev);
5909
5910 /* Some NICs freeze when TX pause is enabled while NIC is
5911 * down, and this stays across warm reboots. The sequence
5912 * below should be enough to recover from that state.
5913 */
5914 nv_update_pause(dev, 0);
5915 nv_start_tx(dev);
5916 nv_stop_tx(dev);
5917
5907 if (id->driver_data & DEV_HAS_VLAN) 5918 if (id->driver_data & DEV_HAS_VLAN)
5908 nv_vlan_mode(dev, dev->features); 5919 nv_vlan_mode(dev, dev->features);
5909 5920
5910 netif_carrier_off(dev);
5911
5912 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5921 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5913 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5922 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5914 5923
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a7cc56007b33..e7ff886e8047 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -77,7 +77,7 @@
77static const int multicast_filter_limit = 32; 77static const int multicast_filter_limit = 32;
78 78
79#define MAX_READ_REQUEST_SHIFT 12 79#define MAX_READ_REQUEST_SHIFT 12
80#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 83
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_DLINK, 0x4300,
291 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 292 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, 293 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
292 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, 294 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index fb3cbc27063c..25906c1d1b15 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -34,3 +34,10 @@ config SFC_SRIOV
34 This enables support for the SFC9000 I/O Virtualization 34 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in 35 features, allowing accelerated network performance in
36 virtualized environments. 36 virtualized environments.
37config SFC_PTP
38 bool "Solarflare SFC9000-family PTP support"
39 depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
40 default y
41 ---help---
42 This enables support for the Precision Time Protocol (PTP)
43 on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ea1f8db57318..e11f2ecf69d9 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -5,5 +5,6 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o 5 mcdi.o mcdi_phy.o mcdi_mon.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8sfc-$(CONFIG_SFC_PTP) += ptp.o
8 9
9obj-$(CONFIG_SFC) += sfc.o 10obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index b26a954c27fc..5400a33f254f 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -120,10 +120,10 @@ typedef union efx_oword {
120 * [0,high-low), with garbage in bits [high-low+1,...). 120 * [0,high-low), with garbage in bits [high-low+1,...).
121 */ 121 */
122#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ 122#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
123 (((low > max) || (high < min)) ? 0 : \ 123 ((low) > (max) || (high) < (min) ? 0 : \
124 ((low > min) ? \ 124 (low) > (min) ? \
125 ((native_element) >> (low - min)) : \ 125 (native_element) >> ((low) - (min)) : \
126 ((native_element) << (min - low)))) 126 (native_element) << ((min) - (low)))
127 127
128/* 128/*
129 * Extract bit field portion [low,high) from the 64-bit little-endian 129 * Extract bit field portion [low,high) from the 64-bit little-endian
@@ -142,27 +142,27 @@ typedef union efx_oword {
142#define EFX_EXTRACT_OWORD64(oword, low, high) \ 142#define EFX_EXTRACT_OWORD64(oword, low, high) \
143 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ 143 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
144 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ 144 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
145 EFX_MASK64(high + 1 - low)) 145 EFX_MASK64((high) + 1 - (low)))
146 146
147#define EFX_EXTRACT_QWORD64(qword, low, high) \ 147#define EFX_EXTRACT_QWORD64(qword, low, high) \
148 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ 148 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
149 EFX_MASK64(high + 1 - low)) 149 EFX_MASK64((high) + 1 - (low)))
150 150
151#define EFX_EXTRACT_OWORD32(oword, low, high) \ 151#define EFX_EXTRACT_OWORD32(oword, low, high) \
152 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ 152 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
153 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ 153 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
154 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ 154 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
155 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ 155 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
156 EFX_MASK32(high + 1 - low)) 156 EFX_MASK32((high) + 1 - (low)))
157 157
158#define EFX_EXTRACT_QWORD32(qword, low, high) \ 158#define EFX_EXTRACT_QWORD32(qword, low, high) \
159 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ 159 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
160 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ 160 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
161 EFX_MASK32(high + 1 - low)) 161 EFX_MASK32((high) + 1 - (low)))
162 162
163#define EFX_EXTRACT_DWORD(dword, low, high) \ 163#define EFX_EXTRACT_DWORD(dword, low, high) \
164 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ 164 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
165 EFX_MASK32(high + 1 - low)) 165 EFX_MASK32((high) + 1 - (low)))
166 166
167#define EFX_OWORD_FIELD64(oword, field) \ 167#define EFX_OWORD_FIELD64(oword, field) \
168 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ 168 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
@@ -442,10 +442,10 @@ typedef union efx_oword {
442 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) 442 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
443 443
444#define EFX_INPLACE_MASK64(min, max, low, high) \ 444#define EFX_INPLACE_MASK64(min, max, low, high) \
445 EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low)) 445 EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
446 446
447#define EFX_INPLACE_MASK32(min, max, low, high) \ 447#define EFX_INPLACE_MASK32(min, max, low, high) \
448 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low)) 448 EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
449 449
450#define EFX_SET_OWORD64(oword, low, high, value) do { \ 450#define EFX_SET_OWORD64(oword, low, high, value) do { \
451 (oword).u64[0] = (((oword).u64[0] \ 451 (oword).u64[0] = (((oword).u64[0] \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..96bd980e828d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
202 202
203#define EFX_ASSERT_RESET_SERIALISED(efx) \ 203#define EFX_ASSERT_RESET_SERIALISED(efx) \
204 do { \ 204 do { \
205 if ((efx->state == STATE_RUNNING) || \ 205 if ((efx->state == STATE_READY) || \
206 (efx->state == STATE_DISABLED)) \ 206 (efx->state == STATE_DISABLED)) \
207 ASSERT_RTNL(); \ 207 ASSERT_RTNL(); \
208 } while (0) 208 } while (0)
209 209
210static int efx_check_disabled(struct efx_nic *efx)
211{
212 if (efx->state == STATE_DISABLED) {
213 netif_err(efx, drv, efx->net_dev,
214 "device is disabled due to earlier errors\n");
215 return -EIO;
216 }
217 return 0;
218}
219
210/************************************************************************** 220/**************************************************************************
211 * 221 *
212 * Event queue processing 222 * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
630 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 640 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
631 sizeof(struct efx_rx_page_state)); 641 sizeof(struct efx_rx_page_state));
632 642
643 /* We must keep at least one descriptor in a TX ring empty.
644 * We could avoid this when the queue size does not exactly
645 * match the hardware ring size, but it's not that important.
646 * Therefore we stop the queue when one more skb might fill
647 * the ring completely. We wake it when half way back to
648 * empty.
649 */
650 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
651 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
652
633 /* Initialise the channels */ 653 /* Initialise the channels */
634 efx_for_each_channel(channel, efx) { 654 efx_for_each_channel(channel, efx) {
635 efx_for_each_channel_tx_queue(tx_queue, channel) 655 efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel)
714 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 734 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
715 efx_remove_tx_queue(tx_queue); 735 efx_remove_tx_queue(tx_queue);
716 efx_remove_eventq(channel); 736 efx_remove_eventq(channel);
737 channel->type->post_remove(channel);
717} 738}
718 739
719static void efx_remove_channels(struct efx_nic *efx) 740static void efx_remove_channels(struct efx_nic *efx)
@@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
730 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 751 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
731 u32 old_rxq_entries, old_txq_entries; 752 u32 old_rxq_entries, old_txq_entries;
732 unsigned i, next_buffer_table = 0; 753 unsigned i, next_buffer_table = 0;
733 int rc = 0; 754 int rc;
755
756 rc = efx_check_disabled(efx);
757 if (rc)
758 return rc;
734 759
735 /* Not all channels should be reallocated. We must avoid 760 /* Not all channels should be reallocated. We must avoid
736 * reallocating their buffer table entries. 761 * reallocating their buffer table entries.
@@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
828 853
829static const struct efx_channel_type efx_default_channel_type = { 854static const struct efx_channel_type efx_default_channel_type = {
830 .pre_probe = efx_channel_dummy_op_int, 855 .pre_probe = efx_channel_dummy_op_int,
856 .post_remove = efx_channel_dummy_op_void,
831 .get_name = efx_get_channel_name, 857 .get_name = efx_get_channel_name,
832 .copy = efx_copy_channel, 858 .copy = efx_copy_channel,
833 .keep_eventq = false, 859 .keep_eventq = false,
@@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel)
838 return 0; 864 return 0;
839} 865}
840 866
867void efx_channel_dummy_op_void(struct efx_channel *channel)
868{
869}
870
841/************************************************************************** 871/**************************************************************************
842 * 872 *
843 * Port handling 873 * Port handling
@@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1365{ 1395{
1366 struct efx_channel *channel; 1396 struct efx_channel *channel;
1367 1397
1398 BUG_ON(efx->state == STATE_DISABLED);
1399
1368 if (efx->legacy_irq) 1400 if (efx->legacy_irq)
1369 efx->legacy_irq_enabled = true; 1401 efx->legacy_irq_enabled = true;
1370 efx_nic_enable_interrupts(efx); 1402 efx_nic_enable_interrupts(efx);
@@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1382{ 1414{
1383 struct efx_channel *channel; 1415 struct efx_channel *channel;
1384 1416
1417 if (efx->state == STATE_DISABLED)
1418 return;
1419
1385 efx_mcdi_mode_poll(efx); 1420 efx_mcdi_mode_poll(efx);
1386 1421
1387 efx_nic_disable_interrupts(efx); 1422 efx_nic_disable_interrupts(efx);
@@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx)
1422 efx->tx_channel_offset = 1457 efx->tx_channel_offset =
1423 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1458 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1424 1459
1425 /* We need to adjust the TX queue numbers if we have separate 1460 /* We need to mark which channels really have RX and TX
1461 * queues, and adjust the TX queue numbers if we have separate
1426 * RX-only and TX-only channels. 1462 * RX-only and TX-only channels.
1427 */ 1463 */
1428 efx_for_each_channel(channel, efx) { 1464 efx_for_each_channel(channel, efx) {
1465 if (channel->channel < efx->n_rx_channels)
1466 channel->rx_queue.core_index = channel->channel;
1467 else
1468 channel->rx_queue.core_index = -1;
1469
1429 efx_for_each_channel_tx_queue(tx_queue, channel) 1470 efx_for_each_channel_tx_queue(tx_queue, channel)
1430 tx_queue->queue -= (efx->tx_channel_offset * 1471 tx_queue->queue -= (efx->tx_channel_offset *
1431 EFX_TXQ_TYPES); 1472 EFX_TXQ_TYPES);
@@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx)
1533 return rc; 1574 return rc;
1534} 1575}
1535 1576
1536/* Called after previous invocation(s) of efx_stop_all, restarts the port, 1577/* If the interface is supposed to be running but is not, start
1537 * kernel transmit queues and NAPI processing, and ensures that the port is 1578 * the hardware and software data path, regular activity for the port
1538 * scheduled to be reconfigured. This function is safe to call multiple 1579 * (MAC statistics, link polling, etc.) and schedule the port to be
1539 * times when the NIC is in any state. 1580 * reconfigured. Interrupts must already be enabled. This function
1581 * is safe to call multiple times, so long as the NIC is not disabled.
1582 * Requires the RTNL lock.
1540 */ 1583 */
1541static void efx_start_all(struct efx_nic *efx) 1584static void efx_start_all(struct efx_nic *efx)
1542{ 1585{
1543 EFX_ASSERT_RESET_SERIALISED(efx); 1586 EFX_ASSERT_RESET_SERIALISED(efx);
1587 BUG_ON(efx->state == STATE_DISABLED);
1544 1588
1545 /* Check that it is appropriate to restart the interface. All 1589 /* Check that it is appropriate to restart the interface. All
1546 * of these flags are safe to read under just the rtnl lock */ 1590 * of these flags are safe to read under just the rtnl lock */
1547 if (efx->port_enabled) 1591 if (efx->port_enabled || !netif_running(efx->net_dev))
1548 return;
1549 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1550 return;
1551 if (!netif_running(efx->net_dev))
1552 return; 1592 return;
1553 1593
1554 efx_start_port(efx); 1594 efx_start_port(efx);
@@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx)
1582 cancel_work_sync(&efx->mac_work); 1622 cancel_work_sync(&efx->mac_work);
1583} 1623}
1584 1624
1585/* Quiesce hardware and software without bringing the link down. 1625/* Quiesce the hardware and software data path, and regular activity
1586 * Safe to call multiple times, when the nic and interface is in any 1626 * for the port without bringing the link down. Safe to call multiple
1587 * state. The caller is guaranteed to subsequently be in a position 1627 * times with the NIC in almost any state, but interrupts should be
1588 * to modify any hardware and software state they see fit without 1628 * enabled. Requires the RTNL lock.
1589 * taking locks. */ 1629 */
1590static void efx_stop_all(struct efx_nic *efx) 1630static void efx_stop_all(struct efx_nic *efx)
1591{ 1631{
1592 EFX_ASSERT_RESET_SERIALISED(efx); 1632 EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1739 struct efx_nic *efx = netdev_priv(net_dev); 1779 struct efx_nic *efx = netdev_priv(net_dev);
1740 struct mii_ioctl_data *data = if_mii(ifr); 1780 struct mii_ioctl_data *data = if_mii(ifr);
1741 1781
1742 EFX_ASSERT_RESET_SERIALISED(efx); 1782 if (cmd == SIOCSHWTSTAMP)
1783 return efx_ptp_ioctl(efx, ifr, cmd);
1743 1784
1744 /* Convert phy_id from older PRTAD/DEVAD format */ 1785 /* Convert phy_id from older PRTAD/DEVAD format */
1745 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1786 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev)
1820static int efx_net_open(struct net_device *net_dev) 1861static int efx_net_open(struct net_device *net_dev)
1821{ 1862{
1822 struct efx_nic *efx = netdev_priv(net_dev); 1863 struct efx_nic *efx = netdev_priv(net_dev);
1823 EFX_ASSERT_RESET_SERIALISED(efx); 1864 int rc;
1824 1865
1825 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 1866 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
1826 raw_smp_processor_id()); 1867 raw_smp_processor_id());
1827 1868
1828 if (efx->state == STATE_DISABLED) 1869 rc = efx_check_disabled(efx);
1829 return -EIO; 1870 if (rc)
1871 return rc;
1830 if (efx->phy_mode & PHY_MODE_SPECIAL) 1872 if (efx->phy_mode & PHY_MODE_SPECIAL)
1831 return -EBUSY; 1873 return -EBUSY;
1832 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1874 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev)
1852 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 1894 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
1853 raw_smp_processor_id()); 1895 raw_smp_processor_id());
1854 1896
1855 if (efx->state != STATE_DISABLED) { 1897 /* Stop the device and flush all the channels */
1856 /* Stop the device and flush all the channels */ 1898 efx_stop_all(efx);
1857 efx_stop_all(efx);
1858 }
1859 1899
1860 return 0; 1900 return 0;
1861} 1901}
@@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev)
1915static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1955static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1916{ 1956{
1917 struct efx_nic *efx = netdev_priv(net_dev); 1957 struct efx_nic *efx = netdev_priv(net_dev);
1958 int rc;
1918 1959
1919 EFX_ASSERT_RESET_SERIALISED(efx); 1960 rc = efx_check_disabled(efx);
1920 1961 if (rc)
1962 return rc;
1921 if (new_mtu > EFX_MAX_MTU) 1963 if (new_mtu > EFX_MAX_MTU)
1922 return -EINVAL; 1964 return -EINVAL;
1923 1965
@@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1926 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1968 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1927 1969
1928 mutex_lock(&efx->mac_lock); 1970 mutex_lock(&efx->mac_lock);
1929 /* Reconfigure the MAC before enabling the dma queues so that
1930 * the RX buffers don't overflow */
1931 net_dev->mtu = new_mtu; 1971 net_dev->mtu = new_mtu;
1932 efx->type->reconfigure_mac(efx); 1972 efx->type->reconfigure_mac(efx);
1933 mutex_unlock(&efx->mac_lock); 1973 mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1942 struct sockaddr *addr = data; 1982 struct sockaddr *addr = data;
1943 char *new_addr = addr->sa_data; 1983 char *new_addr = addr->sa_data;
1944 1984
1945 EFX_ASSERT_RESET_SERIALISED(efx);
1946
1947 if (!is_valid_ether_addr(new_addr)) { 1985 if (!is_valid_ether_addr(new_addr)) {
1948 netif_err(efx, drv, efx->net_dev, 1986 netif_err(efx, drv, efx->net_dev,
1949 "invalid ethernet MAC address requested: %pM\n", 1987 "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx)
2079 2117
2080 rtnl_lock(); 2118 rtnl_lock();
2081 2119
2120 /* Enable resets to be scheduled and check whether any were
2121 * already requested. If so, the NIC is probably hosed so we
2122 * abort.
2123 */
2124 efx->state = STATE_READY;
2125 smp_mb(); /* ensure we change state before checking reset_pending */
2126 if (efx->reset_pending) {
2127 netif_err(efx, probe, efx->net_dev,
2128 "aborting probe due to scheduled reset\n");
2129 rc = -EIO;
2130 goto fail_locked;
2131 }
2132
2082 rc = dev_alloc_name(net_dev, net_dev->name); 2133 rc = dev_alloc_name(net_dev, net_dev->name);
2083 if (rc < 0) 2134 if (rc < 0)
2084 goto fail_locked; 2135 goto fail_locked;
2085 efx_update_name(efx); 2136 efx_update_name(efx);
2086 2137
2138 /* Always start with carrier off; PHY events will detect the link */
2139 netif_carrier_off(net_dev);
2140
2087 rc = register_netdevice(net_dev); 2141 rc = register_netdevice(net_dev);
2088 if (rc) 2142 if (rc)
2089 goto fail_locked; 2143 goto fail_locked;
@@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx)
2094 efx_init_tx_queue_core_txq(tx_queue); 2148 efx_init_tx_queue_core_txq(tx_queue);
2095 } 2149 }
2096 2150
2097 /* Always start with carrier off; PHY events will detect the link */
2098 netif_carrier_off(net_dev);
2099
2100 rtnl_unlock(); 2151 rtnl_unlock();
2101 2152
2102 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2153 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx)
2108 2159
2109 return 0; 2160 return 0;
2110 2161
2162fail_registered:
2163 rtnl_lock();
2164 unregister_netdevice(net_dev);
2111fail_locked: 2165fail_locked:
2166 efx->state = STATE_UNINIT;
2112 rtnl_unlock(); 2167 rtnl_unlock();
2113 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2168 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2114 return rc; 2169 return rc;
2115
2116fail_registered:
2117 unregister_netdev(net_dev);
2118 return rc;
2119} 2170}
2120 2171
2121static void efx_unregister_netdev(struct efx_nic *efx) 2172static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
2138 2189
2139 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2190 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2140 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2191 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2141 unregister_netdev(efx->net_dev); 2192
2193 rtnl_lock();
2194 unregister_netdevice(efx->net_dev);
2195 efx->state = STATE_UNINIT;
2196 rtnl_unlock();
2142} 2197}
2143 2198
2144/************************************************************************** 2199/**************************************************************************
@@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2154 EFX_ASSERT_RESET_SERIALISED(efx); 2209 EFX_ASSERT_RESET_SERIALISED(efx);
2155 2210
2156 efx_stop_all(efx); 2211 efx_stop_all(efx);
2157 mutex_lock(&efx->mac_lock);
2158
2159 efx_stop_interrupts(efx, false); 2212 efx_stop_interrupts(efx, false);
2213
2214 mutex_lock(&efx->mac_lock);
2160 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2215 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2161 efx->phy_op->fini(efx); 2216 efx->phy_op->fini(efx);
2162 efx->type->fini(efx); 2217 efx->type->fini(efx);
@@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data)
2276 if (!pending) 2331 if (!pending)
2277 return; 2332 return;
2278 2333
2279 /* If we're not RUNNING then don't reset. Leave the reset_pending
2280 * flags set so that efx_pci_probe_main will be retried */
2281 if (efx->state != STATE_RUNNING) {
2282 netif_info(efx, drv, efx->net_dev,
2283 "scheduled reset quenched. NIC not RUNNING\n");
2284 return;
2285 }
2286
2287 rtnl_lock(); 2334 rtnl_lock();
2288 (void)efx_reset(efx, fls(pending) - 1); 2335
2336 /* We checked the state in efx_schedule_reset() but it may
2337 * have changed by now. Now that we have the RTNL lock,
2338 * it cannot change again.
2339 */
2340 if (efx->state == STATE_READY)
2341 (void)efx_reset(efx, fls(pending) - 1);
2342
2289 rtnl_unlock(); 2343 rtnl_unlock();
2290} 2344}
2291 2345
@@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2311 } 2365 }
2312 2366
2313 set_bit(method, &efx->reset_pending); 2367 set_bit(method, &efx->reset_pending);
2368 smp_mb(); /* ensure we change reset_pending before checking state */
2369
2370 /* If we're not READY then just leave the flags set as the cue
2371 * to abort probing or reschedule the reset later.
2372 */
2373 if (ACCESS_ONCE(efx->state) != STATE_READY)
2374 return;
2314 2375
2315 /* efx_process_channel() will no longer read events once a 2376 /* efx_process_channel() will no longer read events once a
2316 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2377 * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
2376/* This zeroes out and then fills in the invariants in a struct 2437/* This zeroes out and then fills in the invariants in a struct
2377 * efx_nic (including all sub-structures). 2438 * efx_nic (including all sub-structures).
2378 */ 2439 */
2379static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, 2440static int efx_init_struct(struct efx_nic *efx,
2380 struct pci_dev *pci_dev, struct net_device *net_dev) 2441 struct pci_dev *pci_dev, struct net_device *net_dev)
2381{ 2442{
2382 int i; 2443 int i;
2383 2444
2384 /* Initialise common structures */ 2445 /* Initialise common structures */
2385 memset(efx, 0, sizeof(*efx));
2386 spin_lock_init(&efx->biu_lock); 2446 spin_lock_init(&efx->biu_lock);
2387#ifdef CONFIG_SFC_MTD 2447#ifdef CONFIG_SFC_MTD
2388 INIT_LIST_HEAD(&efx->mtd_list); 2448 INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2392 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 2452 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2393 efx->pci_dev = pci_dev; 2453 efx->pci_dev = pci_dev;
2394 efx->msg_enable = debug; 2454 efx->msg_enable = debug;
2395 efx->state = STATE_INIT; 2455 efx->state = STATE_UNINIT;
2396 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2456 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2397 2457
2398 efx->net_dev = net_dev; 2458 efx->net_dev = net_dev;
@@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2409 goto fail; 2469 goto fail;
2410 } 2470 }
2411 2471
2412 efx->type = type;
2413
2414 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2472 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2415 2473
2416 /* Higher numbered interrupt modes are less capable! */ 2474 /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx)
2455 */ 2513 */
2456static void efx_pci_remove_main(struct efx_nic *efx) 2514static void efx_pci_remove_main(struct efx_nic *efx)
2457{ 2515{
2516 /* Flush reset_work. It can no longer be scheduled since we
2517 * are not READY.
2518 */
2519 BUG_ON(efx->state == STATE_READY);
2520 cancel_work_sync(&efx->reset_work);
2521
2458#ifdef CONFIG_RFS_ACCEL 2522#ifdef CONFIG_RFS_ACCEL
2459 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 2523 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2460 efx->net_dev->rx_cpu_rmap = NULL; 2524 efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2480 2544
2481 /* Mark the NIC as fini, then stop the interface */ 2545 /* Mark the NIC as fini, then stop the interface */
2482 rtnl_lock(); 2546 rtnl_lock();
2483 efx->state = STATE_FINI;
2484 dev_close(efx->net_dev); 2547 dev_close(efx->net_dev);
2485 2548 efx_stop_interrupts(efx, false);
2486 /* Allow any queued efx_resets() to complete */
2487 rtnl_unlock(); 2549 rtnl_unlock();
2488 2550
2489 efx_stop_interrupts(efx, false);
2490 efx_sriov_fini(efx); 2551 efx_sriov_fini(efx);
2491 efx_unregister_netdev(efx); 2552 efx_unregister_netdev(efx);
2492 2553
2493 efx_mtd_remove(efx); 2554 efx_mtd_remove(efx);
2494 2555
2495 /* Wait for any scheduled resets to complete. No more will be
2496 * scheduled from this point because efx_stop_all() has been
2497 * called, we are no longer registered with driverlink, and
2498 * the net_device's have been removed. */
2499 cancel_work_sync(&efx->reset_work);
2500
2501 efx_pci_remove_main(efx); 2556 efx_pci_remove_main(efx);
2502 2557
2503 efx_fini_io(efx); 2558 efx_fini_io(efx);
@@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2617static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2672static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2618 const struct pci_device_id *entry) 2673 const struct pci_device_id *entry)
2619{ 2674{
2620 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2621 struct net_device *net_dev; 2675 struct net_device *net_dev;
2622 struct efx_nic *efx; 2676 struct efx_nic *efx;
2623 int rc; 2677 int rc;
@@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2627 EFX_MAX_RX_QUEUES); 2681 EFX_MAX_RX_QUEUES);
2628 if (!net_dev) 2682 if (!net_dev)
2629 return -ENOMEM; 2683 return -ENOMEM;
2630 net_dev->features |= (type->offload_features | NETIF_F_SG | 2684 efx = netdev_priv(net_dev);
2685 efx->type = (const struct efx_nic_type *) entry->driver_data;
2686 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2631 NETIF_F_HIGHDMA | NETIF_F_TSO | 2687 NETIF_F_HIGHDMA | NETIF_F_TSO |
2632 NETIF_F_RXCSUM); 2688 NETIF_F_RXCSUM);
2633 if (type->offload_features & NETIF_F_V6_CSUM) 2689 if (efx->type->offload_features & NETIF_F_V6_CSUM)
2634 net_dev->features |= NETIF_F_TSO6; 2690 net_dev->features |= NETIF_F_TSO6;
2635 /* Mask for features that also apply to VLAN devices */ 2691 /* Mask for features that also apply to VLAN devices */
2636 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2692 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2638 NETIF_F_RXCSUM); 2694 NETIF_F_RXCSUM);
2639 /* All offloads can be toggled */ 2695 /* All offloads can be toggled */
2640 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 2696 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2641 efx = netdev_priv(net_dev);
2642 pci_set_drvdata(pci_dev, efx); 2697 pci_set_drvdata(pci_dev, efx);
2643 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2698 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2644 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2699 rc = efx_init_struct(efx, pci_dev, net_dev);
2645 if (rc) 2700 if (rc)
2646 goto fail1; 2701 goto fail1;
2647 2702
@@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2656 goto fail2; 2711 goto fail2;
2657 2712
2658 rc = efx_pci_probe_main(efx); 2713 rc = efx_pci_probe_main(efx);
2659
2660 /* Serialise against efx_reset(). No more resets will be
2661 * scheduled since efx_stop_all() has been called, and we have
2662 * not and never have been registered.
2663 */
2664 cancel_work_sync(&efx->reset_work);
2665
2666 if (rc) 2714 if (rc)
2667 goto fail3; 2715 goto fail3;
2668 2716
2669 /* If there was a scheduled reset during probe, the NIC is
2670 * probably hosed anyway.
2671 */
2672 if (efx->reset_pending) {
2673 rc = -EIO;
2674 goto fail4;
2675 }
2676
2677 /* Switch to the running state before we expose the device to the OS,
2678 * so that dev_open()|efx_start_all() will actually start the device */
2679 efx->state = STATE_RUNNING;
2680
2681 rc = efx_register_netdev(efx); 2717 rc = efx_register_netdev(efx);
2682 if (rc) 2718 if (rc)
2683 goto fail4; 2719 goto fail4;
@@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev)
2717{ 2753{
2718 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2754 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2719 2755
2720 efx->state = STATE_FINI; 2756 rtnl_lock();
2721 2757
2722 netif_device_detach(efx->net_dev); 2758 if (efx->state != STATE_DISABLED) {
2759 efx->state = STATE_UNINIT;
2723 2760
2724 efx_stop_all(efx); 2761 netif_device_detach(efx->net_dev);
2725 efx_stop_interrupts(efx, false); 2762
2763 efx_stop_all(efx);
2764 efx_stop_interrupts(efx, false);
2765 }
2766
2767 rtnl_unlock();
2726 2768
2727 return 0; 2769 return 0;
2728} 2770}
@@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev)
2731{ 2773{
2732 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2774 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2733 2775
2734 efx->state = STATE_INIT; 2776 rtnl_lock();
2735 2777
2736 efx_start_interrupts(efx, false); 2778 if (efx->state != STATE_DISABLED) {
2779 efx_start_interrupts(efx, false);
2737 2780
2738 mutex_lock(&efx->mac_lock); 2781 mutex_lock(&efx->mac_lock);
2739 efx->phy_op->reconfigure(efx); 2782 efx->phy_op->reconfigure(efx);
2740 mutex_unlock(&efx->mac_lock); 2783 mutex_unlock(&efx->mac_lock);
2741 2784
2742 efx_start_all(efx); 2785 efx_start_all(efx);
2743 2786
2744 netif_device_attach(efx->net_dev); 2787 netif_device_attach(efx->net_dev);
2745 2788
2746 efx->state = STATE_RUNNING; 2789 efx->state = STATE_READY;
2747 2790
2748 efx->type->resume_wol(efx); 2791 efx->type->resume_wol(efx);
2792 }
2793
2794 rtnl_unlock();
2749 2795
2750 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 2796 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2751 queue_work(reset_workqueue, &efx->reset_work); 2797 queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 70755c97251a..f11170bc48bf 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
102 102
103/* Channels */ 103/* Channels */
104extern int efx_channel_dummy_op_int(struct efx_channel *channel); 104extern int efx_channel_dummy_op_int(struct efx_channel *channel);
105extern void efx_channel_dummy_op_void(struct efx_channel *channel);
105extern void efx_process_channel_now(struct efx_channel *channel); 106extern void efx_process_channel_now(struct efx_channel *channel);
106extern int 107extern int
107efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); 108efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5faedd855b77..90f078eff8e6 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -337,7 +337,8 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
337 unsigned int test_index, 337 unsigned int test_index,
338 struct ethtool_string *strings, u64 *data) 338 struct ethtool_string *strings, u64 *data)
339{ 339{
340 struct efx_channel *channel = efx_get_channel(efx, 0); 340 struct efx_channel *channel =
341 efx_get_channel(efx, efx->tx_channel_offset);
341 struct efx_tx_queue *tx_queue; 342 struct efx_tx_queue *tx_queue;
342 343
343 efx_for_each_channel_tx_queue(tx_queue, channel) { 344 efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -529,9 +530,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
529 if (!efx_tests) 530 if (!efx_tests)
530 goto fail; 531 goto fail;
531 532
532 533 if (efx->state != STATE_READY) {
533 ASSERT_RTNL();
534 if (efx->state != STATE_RUNNING) {
535 rc = -EIO; 534 rc = -EIO;
536 goto fail1; 535 goto fail1;
537 } 536 }
@@ -962,9 +961,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
962 int rc; 961 int rc;
963 962
964 /* Check that user wants us to choose the location */ 963 /* Check that user wants us to choose the location */
965 if (rule->location != RX_CLS_LOC_ANY && 964 if (rule->location != RX_CLS_LOC_ANY)
966 rule->location != RX_CLS_LOC_FIRST &&
967 rule->location != RX_CLS_LOC_LAST)
968 return -EINVAL; 965 return -EINVAL;
969 966
970 /* Range-check ring_cookie */ 967 /* Range-check ring_cookie */
@@ -978,9 +975,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
978 rule->m_ext.data[1])) 975 rule->m_ext.data[1]))
979 return -EINVAL; 976 return -EINVAL;
980 977
981 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 978 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
982 (rule->location == RX_CLS_LOC_FIRST) ?
983 EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 979 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
985 0xfff : rule->ring_cookie); 980 0xfff : rule->ring_cookie);
986 981
@@ -1176,6 +1171,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1176 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, 1171 .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
1177 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1172 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
1178 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1173 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
1174 .get_ts_info = efx_ptp_get_ts_info,
1179 .get_module_info = efx_ethtool_get_module_info, 1175 .get_module_info = efx_ethtool_get_module_info,
1180 .get_module_eeprom = efx_ethtool_get_module_eeprom, 1176 .get_module_eeprom = efx_ethtool_get_module_eeprom,
1181}; 1177};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 8687a6c3db0d..ec1e99d0dcad 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
380 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
382 err = 0; 382 err = 0;
383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
384 err = -EBUSY; 384 err = -EBUSY;
385 } else { 385 } else {
386 /* Reset the PHY, reconfigure the MAC and enable/disable 386 /* Reset the PHY, reconfigure the MAC and enable/disable
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index c3fd61f0a95c..8af42cd1feda 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -162,20 +162,12 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & 162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS)); 163 EFX_FILTER_FLAG_RX_RSS));
164 EFX_SET_OWORD_FIELD( 164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 165 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id); 166 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD( 167 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 168 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & 169 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS)); 170 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
177 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
178 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
179 } 171 }
180 172
181 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 173 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -480,14 +472,12 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
480 472
481 case EFX_FILTER_TABLE_RX_MAC: { 473 case EFX_FILTER_TABLE_RX_MAC: {
482 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 474 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
483 EFX_POPULATE_OWORD_8( 475 EFX_POPULATE_OWORD_7(
484 *filter, 476 *filter,
485 FRF_CZ_RMFT_RSS_EN, 477 FRF_CZ_RMFT_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 478 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_CZ_RMFT_SCATTER_EN, 479 FRF_CZ_RMFT_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 480 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_CZ_RMFT_IP_OVERRIDE,
490 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
491 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 481 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
492 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 482 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
493 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 483 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
@@ -567,49 +557,62 @@ static int efx_filter_search(struct efx_filter_table *table,
567} 557}
568 558
569/* 559/*
570 * Construct/deconstruct external filter IDs. These must be ordered 560 * Construct/deconstruct external filter IDs. At least the RX filter
571 * by matching priority, for RX NFC semantics. 561 * IDs must be ordered by matching priority, for RX NFC semantics.
572 * 562 *
573 * Each RX MAC filter entry has a flag for whether it can override an 563 * Deconstruction needs to be robust against invalid IDs so that
574 * RX IP filter that also matches. So we assign locations for MAC 564 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
575 * filters with overriding behaviour, then for IP filters, then for 565 * accept user-provided IDs.
576 * MAC filters without overriding behaviour.
577 */ 566 */
578 567
579#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0 568#define EFX_FILTER_MATCH_PRI_COUNT 5
580#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1 569
581#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2 570static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
571 [EFX_FILTER_TCP_FULL] = 0,
572 [EFX_FILTER_UDP_FULL] = 0,
573 [EFX_FILTER_TCP_WILD] = 1,
574 [EFX_FILTER_UDP_WILD] = 1,
575 [EFX_FILTER_MAC_FULL] = 2,
576 [EFX_FILTER_MAC_WILD] = 3,
577 [EFX_FILTER_UC_DEF] = 4,
578 [EFX_FILTER_MC_DEF] = 4,
579};
580
581static const enum efx_filter_table_id efx_filter_range_table[] = {
582 EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
583 EFX_FILTER_TABLE_RX_IP,
584 EFX_FILTER_TABLE_RX_MAC,
585 EFX_FILTER_TABLE_RX_MAC,
586 EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
587 EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
588 EFX_FILTER_TABLE_COUNT, /* invalid */
589 EFX_FILTER_TABLE_TX_MAC,
590 EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
591};
582 592
583#define EFX_FILTER_INDEX_WIDTH 13 593#define EFX_FILTER_INDEX_WIDTH 13
584#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1) 594#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
585 595
586static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id, 596static inline u32
587 unsigned int index, u8 flags) 597efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
588{ 598{
589 unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id; 599 unsigned int range;
590 600
591 if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) { 601 range = efx_filter_type_match_pri[spec->type];
592 if (table_id == EFX_FILTER_TABLE_RX_MAC) 602 if (!(spec->flags & EFX_FILTER_FLAG_RX))
593 match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP; 603 range += EFX_FILTER_MATCH_PRI_COUNT;
594 else if (table_id == EFX_FILTER_TABLE_RX_DEF)
595 match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
596 }
597 604
598 return match_pri << EFX_FILTER_INDEX_WIDTH | index; 605 return range << EFX_FILTER_INDEX_WIDTH | index;
599} 606}
600 607
601static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id) 608static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
602{ 609{
603 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH; 610 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
604 611
605 switch (match_pri) { 612 if (range < ARRAY_SIZE(efx_filter_range_table))
606 case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP: 613 return efx_filter_range_table[range];
607 return EFX_FILTER_TABLE_RX_MAC; 614 else
608 case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP: 615 return EFX_FILTER_TABLE_COUNT; /* invalid */
609 return EFX_FILTER_TABLE_RX_DEF;
610 default:
611 return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
612 }
613} 616}
614 617
615static inline unsigned int efx_filter_id_index(u32 id) 618static inline unsigned int efx_filter_id_index(u32 id)
@@ -619,12 +622,9 @@ static inline unsigned int efx_filter_id_index(u32 id)
619 622
620static inline u8 efx_filter_id_flags(u32 id) 623static inline u8 efx_filter_id_flags(u32 id)
621{ 624{
622 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH; 625 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
623 626
624 if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE) 627 if (range < EFX_FILTER_MATCH_PRI_COUNT)
625 return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
626 else if (match_pri <=
627 EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
628 return EFX_FILTER_FLAG_RX; 628 return EFX_FILTER_FLAG_RX;
629 else 629 else
630 return EFX_FILTER_FLAG_TX; 630 return EFX_FILTER_FLAG_TX;
@@ -633,14 +633,15 @@ static inline u8 efx_filter_id_flags(u32 id)
633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) 633u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
634{ 634{
635 struct efx_filter_state *state = efx->filter_state; 635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int table_id = EFX_FILTER_TABLE_RX_DEF; 636 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
637 enum efx_filter_table_id table_id;
637 638
638 do { 639 do {
640 table_id = efx_filter_range_table[range];
639 if (state->table[table_id].size != 0) 641 if (state->table[table_id].size != 0)
640 return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id) 642 return range << EFX_FILTER_INDEX_WIDTH |
641 << EFX_FILTER_INDEX_WIDTH) +
642 state->table[table_id].size; 643 state->table[table_id].size;
643 } while (table_id--); 644 } while (range--);
644 645
645 return 0; 646 return 0;
646} 647}
@@ -718,7 +719,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
718 netif_vdbg(efx, hw, efx->net_dev, 719 netif_vdbg(efx, hw, efx->net_dev,
719 "%s: filter type %d index %d rxq %u set", 720 "%s: filter type %d index %d rxq %u set",
720 __func__, spec->type, filter_idx, spec->dmaq_id); 721 __func__, spec->type, filter_idx, spec->dmaq_id);
721 rc = efx_filter_make_id(table->id, filter_idx, spec->flags); 722 rc = efx_filter_make_id(spec, filter_idx);
722 723
723out: 724out:
724 spin_unlock_bh(&state->lock); 725 spin_unlock_bh(&state->lock);
@@ -781,8 +782,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
781 spin_lock_bh(&state->lock); 782 spin_lock_bh(&state->lock);
782 783
783 if (test_bit(filter_idx, table->used_bitmap) && 784 if (test_bit(filter_idx, table->used_bitmap) &&
784 spec->priority == priority && 785 spec->priority == priority) {
785 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
786 efx_filter_table_clear_entry(efx, table, filter_idx); 786 efx_filter_table_clear_entry(efx, table, filter_idx);
787 if (table->used == 0) 787 if (table->used == 0)
788 efx_filter_table_reset_search_depth(table); 788 efx_filter_table_reset_search_depth(table);
@@ -833,8 +833,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
833 spin_lock_bh(&state->lock); 833 spin_lock_bh(&state->lock);
834 834
835 if (test_bit(filter_idx, table->used_bitmap) && 835 if (test_bit(filter_idx, table->used_bitmap) &&
836 spec->priority == priority && 836 spec->priority == priority) {
837 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
838 *spec_buf = *spec; 837 *spec_buf = *spec;
839 rc = 0; 838 rc = 0;
840 } else { 839 } else {
@@ -927,8 +926,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
927 goto out; 926 goto out;
928 } 927 }
929 buf[count++] = efx_filter_make_id( 928 buf[count++] = efx_filter_make_id(
930 table_id, filter_idx, 929 &table->spec[filter_idx], filter_idx);
931 table->spec[filter_idx].flags);
932 } 930 }
933 } 931 }
934 } 932 }
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3c77802aed6c..5cb54723b824 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -61,16 +61,12 @@ enum efx_filter_priority {
61 * according to the indirection table. 61 * according to the indirection table.
62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving 62 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
63 * queue. 63 * queue.
64 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
65 * any IP filter that matches the same packet. By default, IP
66 * filters take precedence.
67 * @EFX_FILTER_FLAG_RX: Filter is for RX 64 * @EFX_FILTER_FLAG_RX: Filter is for RX
68 * @EFX_FILTER_FLAG_TX: Filter is for TX 65 * @EFX_FILTER_FLAG_TX: Filter is for TX
69 */ 66 */
70enum efx_filter_flags { 67enum efx_filter_flags {
71 EFX_FILTER_FLAG_RX_RSS = 0x01, 68 EFX_FILTER_FLAG_RX_RSS = 0x01,
72 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 69 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
73 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
74 EFX_FILTER_FLAG_RX = 0x08, 70 EFX_FILTER_FLAG_RX = 0x08,
75 EFX_FILTER_FLAG_TX = 0x10, 71 EFX_FILTER_FLAG_TX = 0x10,
76}; 72};
@@ -88,8 +84,7 @@ enum efx_filter_flags {
88 * 84 *
89 * The @priority field is used by software to determine whether a new 85 * The @priority field is used by software to determine whether a new
90 * filter may replace an old one. The hardware priority of a filter 86 * filter may replace an old one. The hardware priority of a filter
91 * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP 87 * depends on the filter type.
92 * flag.
93 */ 88 */
94struct efx_filter_spec { 89struct efx_filter_spec {
95 u8 type:4; 90 u8 type:4;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index fc5e7bbcbc9e..aea43cbd0520 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
320 efx_mcdi_complete(mcdi); 320 efx_mcdi_complete(mcdi);
321} 321}
322 322
323/* Issue the given command by writing the data into the shared memory PDU,
324 * ring the doorbell and wait for completion. Copyout the result. */
325int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 323int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
326 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 324 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
327 size_t *outlen_actual) 325 size_t *outlen_actual)
328{ 326{
327 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
328 return efx_mcdi_rpc_finish(efx, cmd, inlen,
329 outbuf, outlen, outlen_actual);
330}
331
332void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
333 size_t inlen)
334{
329 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 335 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
330 int rc; 336
331 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 337 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
332 338
333 efx_mcdi_acquire(mcdi); 339 efx_mcdi_acquire(mcdi);
@@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
338 spin_unlock_bh(&mcdi->iface_lock); 344 spin_unlock_bh(&mcdi->iface_lock);
339 345
340 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 346 efx_mcdi_copyin(efx, cmd, inbuf, inlen);
347}
348
349int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
350 u8 *outbuf, size_t outlen, size_t *outlen_actual)
351{
352 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
353 int rc;
354
355 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
341 356
342 if (mcdi->mode == MCDI_MODE_POLL) 357 if (mcdi->mode == MCDI_MODE_POLL)
343 rc = efx_mcdi_poll(efx); 358 rc = efx_mcdi_poll(efx);
@@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
563 case MCDI_EVENT_CODE_FLR: 578 case MCDI_EVENT_CODE_FLR:
564 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 579 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
565 break; 580 break;
581 case MCDI_EVENT_CODE_PTP_RX:
582 case MCDI_EVENT_CODE_PTP_FAULT:
583 case MCDI_EVENT_CODE_PTP_PPS:
584 efx_ptp_event(efx, event);
585 break;
566 586
567 default: 587 default:
568 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 588 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
641 u16 *fw_subtype_list, u32 *capabilities) 661 u16 *fw_subtype_list, u32 *capabilities)
642{ 662{
643 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; 663 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
644 size_t outlen; 664 size_t outlen, offset, i;
645 int port_num = efx_port_num(efx); 665 int port_num = efx_port_num(efx);
646 int offset;
647 int rc; 666 int rc;
648 667
649 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 668 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
@@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
663 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; 682 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
664 if (mac_address) 683 if (mac_address)
665 memcpy(mac_address, outbuf + offset, ETH_ALEN); 684 memcpy(mac_address, outbuf + offset, ETH_ALEN);
666 if (fw_subtype_list) 685 if (fw_subtype_list) {
667 memcpy(fw_subtype_list, 686 /* Byte-swap and truncate or zero-pad as necessary */
668 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 687 offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
669 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * 688 for (i = 0;
670 sizeof(fw_subtype_list[0])); 689 i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
690 i++) {
691 fw_subtype_list[i] =
692 (offset + 2 <= outlen) ?
693 le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
694 offset += 2;
695 }
696 }
671 if (capabilities) { 697 if (capabilities) {
672 if (port_num) 698 if (port_num)
673 *capabilities = MCDI_DWORD(outbuf, 699 *capabilities = MCDI_DWORD(outbuf,
@@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1169 __le32 *qid; 1195 __le32 *qid;
1170 int rc, count; 1196 int rc, count;
1171 1197
1198 BUILD_BUG_ON(EFX_MAX_CHANNELS >
1199 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1200
1172 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); 1201 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
1173 if (qid == NULL) 1202 if (qid == NULL)
1174 return -ENOMEM; 1203 return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 0bdf3e331832..3ba2e5b5a9cc 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
71 size_t inlen, u8 *outbuf, size_t outlen, 71 size_t inlen, u8 *outbuf, size_t outlen,
72 size_t *outlen_actual); 72 size_t *outlen_actual);
73 73
74extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
75 const u8 *inbuf, size_t inlen);
76extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
77 u8 *outbuf, size_t outlen,
78 size_t *outlen_actual);
79
74extern int efx_mcdi_poll_reboot(struct efx_nic *efx); 80extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
75extern void efx_mcdi_mode_poll(struct efx_nic *efx); 81extern void efx_mcdi_mode_poll(struct efx_nic *efx);
76extern void efx_mcdi_mode_event(struct efx_nic *efx); 82extern void efx_mcdi_mode_event(struct efx_nic *efx);
@@ -107,11 +113,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
107#define MCDI_EVENT_FIELD(_ev, _field) \ 113#define MCDI_EVENT_FIELD(_ev, _field) \
108 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 114 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
109#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ 115#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
110 EFX_DWORD_FIELD( \ 116 EFX_EXTRACT_DWORD( \
111 *((efx_dword_t *) \ 117 *((efx_dword_t *) \
112 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \ 118 (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
113 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \ 119 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
114 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2) 120 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
121 (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
122 MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
115 123
116extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); 124extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
117extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 125extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index db4beed97669..9d426d0457bd 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -289,6 +289,7 @@
289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ 289#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ 290#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ 291#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
292#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
292#define MCDI_EVENT_CMDDONE_DATA_OFST 0 293#define MCDI_EVENT_CMDDONE_DATA_OFST 0
293#define MCDI_EVENT_CMDDONE_DATA_LBN 0 294#define MCDI_EVENT_CMDDONE_DATA_LBN 0
294#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 295#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -491,12 +492,12 @@
491 492
492/* MC_CMD_GET_FPGAREG_OUT msgresponse */ 493/* MC_CMD_GET_FPGAREG_OUT msgresponse */
493#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 494#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
494#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255 495#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
495#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) 496#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
496#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0 497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
497#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1 498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
498#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1 499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
499#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255 500#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
500 501
501 502
502/***********************************/ 503/***********************************/
@@ -507,13 +508,13 @@
507 508
508/* MC_CMD_PUT_FPGAREG_IN msgrequest */ 509/* MC_CMD_PUT_FPGAREG_IN msgrequest */
509#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5 510#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
510#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255 511#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
511#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num)) 512#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
512#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0 513#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
513#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4 514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
514#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1 515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
515#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1 516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
516#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251 517#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
517 518
518/* MC_CMD_PUT_FPGAREG_OUT msgresponse */ 519/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
519#define MC_CMD_PUT_FPGAREG_OUT_LEN 0 520#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
@@ -560,7 +561,7 @@
560 561
561/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ 562/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
562#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 563#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
563#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255 564#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
564#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) 565#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
565/* MC_CMD_PTP_IN_CMD_OFST 0 */ 566/* MC_CMD_PTP_IN_CMD_OFST 0 */
566/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ 567/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
@@ -568,7 +569,7 @@
568#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
569#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
570#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
571#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243 572#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
572 573
573/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ 574/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
574#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 575#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
@@ -1145,7 +1146,7 @@
1145 1146
1146/* MC_CMD_PUTS_IN msgrequest */ 1147/* MC_CMD_PUTS_IN msgrequest */
1147#define MC_CMD_PUTS_IN_LENMIN 13 1148#define MC_CMD_PUTS_IN_LENMIN 13
1148#define MC_CMD_PUTS_IN_LENMAX 255 1149#define MC_CMD_PUTS_IN_LENMAX 252
1149#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) 1150#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
1150#define MC_CMD_PUTS_IN_DEST_OFST 0 1151#define MC_CMD_PUTS_IN_DEST_OFST 0
1151#define MC_CMD_PUTS_IN_UART_LBN 0 1152#define MC_CMD_PUTS_IN_UART_LBN 0
@@ -1157,7 +1158,7 @@
1157#define MC_CMD_PUTS_IN_STRING_OFST 12 1158#define MC_CMD_PUTS_IN_STRING_OFST 12
1158#define MC_CMD_PUTS_IN_STRING_LEN 1 1159#define MC_CMD_PUTS_IN_STRING_LEN 1
1159#define MC_CMD_PUTS_IN_STRING_MINNUM 1 1160#define MC_CMD_PUTS_IN_STRING_MINNUM 1
1160#define MC_CMD_PUTS_IN_STRING_MAXNUM 243 1161#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
1161 1162
1162/* MC_CMD_PUTS_OUT msgresponse */ 1163/* MC_CMD_PUTS_OUT msgresponse */
1163#define MC_CMD_PUTS_OUT_LEN 0 1164#define MC_CMD_PUTS_OUT_LEN 0
@@ -1947,12 +1948,12 @@
1947 1948
1948/* MC_CMD_NVRAM_READ_OUT msgresponse */ 1949/* MC_CMD_NVRAM_READ_OUT msgresponse */
1949#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 1950#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
1950#define MC_CMD_NVRAM_READ_OUT_LENMAX 255 1951#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
1951#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) 1952#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
1952#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
1953#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
1954#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
1955#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255 1956#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
1956 1957
1957 1958
1958/***********************************/ 1959/***********************************/
@@ -1963,7 +1964,7 @@
1963 1964
1964/* MC_CMD_NVRAM_WRITE_IN msgrequest */ 1965/* MC_CMD_NVRAM_WRITE_IN msgrequest */
1965#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 1966#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
1966#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255 1967#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
1967#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) 1968#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
1968#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 1969#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
1969/* Enum values, see field(s): */ 1970/* Enum values, see field(s): */
@@ -1973,7 +1974,7 @@
1973#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
1974#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
1975#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
1976#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243 1977#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
1977 1978
1978/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ 1979/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
1979#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 1980#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
@@ -2305,13 +2306,13 @@
2305 2306
2306/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ 2307/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
2307#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
2308#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255 2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
2309#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) 2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
2310#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
2311#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
2312#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
2313#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
2314#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251 2315#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
2315 2316
2316 2317
2317/***********************************/ 2318/***********************************/
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 758148379b0e..08f825b71ac8 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, 585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, 586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, 587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
588 [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
588}; 589};
589 590
590static int siena_mtd_probe_partition(struct efx_nic *efx, 591static int siena_mtd_probe_partition(struct efx_nic *efx,
@@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
598 bool protected; 599 bool protected;
599 int rc; 600 int rc;
600 601
601 if (type >= ARRAY_SIZE(siena_nvram_types)) 602 if (type >= ARRAY_SIZE(siena_nvram_types) ||
603 siena_nvram_types[type].name == NULL)
602 return -ENODEV; 604 return -ENODEV;
603 605
604 info = &siena_nvram_types[type]; 606 info = &siena_nvram_types[type];
@@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
627 struct efx_mtd *efx_mtd) 629 struct efx_mtd *efx_mtd)
628{ 630{
629 struct efx_mtd_partition *part; 631 struct efx_mtd_partition *part;
630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM]; 632 uint16_t fw_subtype_list[
633 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
631 int rc; 634 int rc;
632 635
633 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); 636 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a989692..c1a010cda89b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -37,7 +37,7 @@
37 * 37 *
38 **************************************************************************/ 38 **************************************************************************/
39 39
40#define EFX_DRIVER_VERSION "3.1" 40#define EFX_DRIVER_VERSION "3.2"
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 43#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -56,7 +56,8 @@
56#define EFX_MAX_CHANNELS 32U 56#define EFX_MAX_CHANNELS 32U
57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58#define EFX_EXTRA_CHANNEL_IOV 0 58#define EFX_EXTRA_CHANNEL_IOV 0
59#define EFX_MAX_EXTRA_CHANNELS 1U 59#define EFX_EXTRA_CHANNEL_PTP 1
60#define EFX_MAX_EXTRA_CHANNELS 2U
60 61
61/* Checksum generation is a per-queue option in hardware, so each 62/* Checksum generation is a per-queue option in hardware, so each
62 * queue visible to the networking core is backed by two hardware TX 63 * queue visible to the networking core is backed by two hardware TX
@@ -68,6 +69,9 @@
68#define EFX_TXQ_TYPES 4 69#define EFX_TXQ_TYPES 4
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 71
72/* Forward declare Precision Time Protocol (PTP) support structure. */
73struct efx_ptp_data;
74
71struct efx_self_tests; 75struct efx_self_tests;
72 76
73/** 77/**
@@ -91,29 +95,31 @@ struct efx_special_buffer {
91}; 95};
92 96
93/** 97/**
94 * struct efx_tx_buffer - An Efx TX buffer 98 * struct efx_tx_buffer - buffer state for a TX descriptor
95 * @skb: The associated socket buffer. 99 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
96 * Set only on the final fragment of a packet; %NULL for all other 100 * freed when descriptor completes
97 * fragments. When this fragment completes, then we can free this 101 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
98 * skb. 102 * freed when descriptor completes.
99 * @tsoh: The associated TSO header structure, or %NULL if this
100 * buffer is not a TSO header.
101 * @dma_addr: DMA address of the fragment. 103 * @dma_addr: DMA address of the fragment.
104 * @flags: Flags for allocation and DMA mapping type
102 * @len: Length of this fragment. 105 * @len: Length of this fragment.
103 * This field is zero when the queue slot is empty. 106 * This field is zero when the queue slot is empty.
104 * @continuation: True if this fragment is not the end of a packet.
105 * @unmap_single: True if dma_unmap_single should be used.
106 * @unmap_len: Length of this fragment to unmap 107 * @unmap_len: Length of this fragment to unmap
107 */ 108 */
108struct efx_tx_buffer { 109struct efx_tx_buffer {
109 const struct sk_buff *skb; 110 union {
110 struct efx_tso_header *tsoh; 111 const struct sk_buff *skb;
112 void *heap_buf;
113 };
111 dma_addr_t dma_addr; 114 dma_addr_t dma_addr;
115 unsigned short flags;
112 unsigned short len; 116 unsigned short len;
113 bool continuation;
114 bool unmap_single;
115 unsigned short unmap_len; 117 unsigned short unmap_len;
116}; 118};
119#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
120#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
121#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
122#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
117 123
118/** 124/**
119 * struct efx_tx_queue - An Efx TX queue 125 * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +139,7 @@ struct efx_tx_buffer {
133 * @channel: The associated channel 139 * @channel: The associated channel
134 * @core_txq: The networking core TX queue structure 140 * @core_txq: The networking core TX queue structure
135 * @buffer: The software buffer ring 141 * @buffer: The software buffer ring
142 * @tsoh_page: Array of pages of TSO header buffers
136 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
137 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
138 * @initialised: Has hardware queue been initialised? 145 * @initialised: Has hardware queue been initialised?
@@ -156,9 +163,6 @@ struct efx_tx_buffer {
156 * variable indicates that the queue is full. This is to 163 * variable indicates that the queue is full. This is to
157 * avoid cache-line ping-pong between the xmit path and the 164 * avoid cache-line ping-pong between the xmit path and the
158 * completion path. 165 * completion path.
159 * @tso_headers_free: A list of TSO headers allocated for this TX queue
160 * that are not in use, and so available for new TSO sends. The list
161 * is protected by the TX queue lock.
162 * @tso_bursts: Number of times TSO xmit invoked by kernel 166 * @tso_bursts: Number of times TSO xmit invoked by kernel
163 * @tso_long_headers: Number of packets with headers too long for standard 167 * @tso_long_headers: Number of packets with headers too long for standard
164 * blocks 168 * blocks
@@ -175,6 +179,7 @@ struct efx_tx_queue {
175 struct efx_channel *channel; 179 struct efx_channel *channel;
176 struct netdev_queue *core_txq; 180 struct netdev_queue *core_txq;
177 struct efx_tx_buffer *buffer; 181 struct efx_tx_buffer *buffer;
182 struct efx_buffer *tsoh_page;
178 struct efx_special_buffer txd; 183 struct efx_special_buffer txd;
179 unsigned int ptr_mask; 184 unsigned int ptr_mask;
180 bool initialised; 185 bool initialised;
@@ -187,7 +192,6 @@ struct efx_tx_queue {
187 unsigned int insert_count ____cacheline_aligned_in_smp; 192 unsigned int insert_count ____cacheline_aligned_in_smp;
188 unsigned int write_count; 193 unsigned int write_count;
189 unsigned int old_read_count; 194 unsigned int old_read_count;
190 struct efx_tso_header *tso_headers_free;
191 unsigned int tso_bursts; 195 unsigned int tso_bursts;
192 unsigned int tso_long_headers; 196 unsigned int tso_long_headers;
193 unsigned int tso_packets; 197 unsigned int tso_packets;
@@ -242,6 +246,8 @@ struct efx_rx_page_state {
242/** 246/**
243 * struct efx_rx_queue - An Efx RX queue 247 * struct efx_rx_queue - An Efx RX queue
244 * @efx: The associated Efx NIC 248 * @efx: The associated Efx NIC
249 * @core_index: Index of network core RX queue. Will be >= 0 iff this
250 * is associated with a real RX queue.
245 * @buffer: The software buffer ring 251 * @buffer: The software buffer ring
246 * @rxd: The hardware descriptor ring 252 * @rxd: The hardware descriptor ring
247 * @ptr_mask: The size of the ring minus 1. 253 * @ptr_mask: The size of the ring minus 1.
@@ -263,6 +269,7 @@ struct efx_rx_page_state {
263 */ 269 */
264struct efx_rx_queue { 270struct efx_rx_queue {
265 struct efx_nic *efx; 271 struct efx_nic *efx;
272 int core_index;
266 struct efx_rx_buffer *buffer; 273 struct efx_rx_buffer *buffer;
267 struct efx_special_buffer rxd; 274 struct efx_special_buffer rxd;
268 unsigned int ptr_mask; 275 unsigned int ptr_mask;
@@ -390,14 +397,17 @@ struct efx_channel {
390 * @get_name: Generate the channel's name (used for its IRQ handler) 397 * @get_name: Generate the channel's name (used for its IRQ handler)
391 * @copy: Copy the channel state prior to reallocation. May be %NULL if 398 * @copy: Copy the channel state prior to reallocation. May be %NULL if
392 * reallocation is not supported. 399 * reallocation is not supported.
400 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
393 * @keep_eventq: Flag for whether event queue should be kept initialised 401 * @keep_eventq: Flag for whether event queue should be kept initialised
394 * while the device is stopped 402 * while the device is stopped
395 */ 403 */
396struct efx_channel_type { 404struct efx_channel_type {
397 void (*handle_no_channel)(struct efx_nic *); 405 void (*handle_no_channel)(struct efx_nic *);
398 int (*pre_probe)(struct efx_channel *); 406 int (*pre_probe)(struct efx_channel *);
407 void (*post_remove)(struct efx_channel *);
399 void (*get_name)(struct efx_channel *, char *buf, size_t len); 408 void (*get_name)(struct efx_channel *, char *buf, size_t len);
400 struct efx_channel *(*copy)(const struct efx_channel *); 409 struct efx_channel *(*copy)(const struct efx_channel *);
410 void (*receive_skb)(struct efx_channel *, struct sk_buff *);
401 bool keep_eventq; 411 bool keep_eventq;
402}; 412};
403 413
@@ -430,11 +440,9 @@ enum efx_int_mode {
430#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 440#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
431 441
432enum nic_state { 442enum nic_state {
433 STATE_INIT = 0, 443 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
434 STATE_RUNNING = 1, 444 STATE_READY = 1, /* hardware ready and netdev registered */
435 STATE_FINI = 2, 445 STATE_DISABLED = 2, /* device disabled due to hardware errors */
436 STATE_DISABLED = 3,
437 STATE_MAX,
438}; 446};
439 447
440/* 448/*
@@ -654,7 +662,7 @@ struct vfdi_status;
654 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 662 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
655 * @irq_rx_moderation: IRQ moderation time for RX event queues 663 * @irq_rx_moderation: IRQ moderation time for RX event queues
656 * @msg_enable: Log message enable flags 664 * @msg_enable: Log message enable flags
657 * @state: Device state flag. Serialised by the rtnl_lock. 665 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
658 * @reset_pending: Bitmask for pending resets 666 * @reset_pending: Bitmask for pending resets
659 * @tx_queue: TX DMA queues 667 * @tx_queue: TX DMA queues
660 * @rx_queue: RX DMA queues 668 * @rx_queue: RX DMA queues
@@ -664,6 +672,8 @@ struct vfdi_status;
664 * should be allocated for this NIC 672 * should be allocated for this NIC
665 * @rxq_entries: Size of receive queues requested by user. 673 * @rxq_entries: Size of receive queues requested by user.
666 * @txq_entries: Size of transmit queues requested by user. 674 * @txq_entries: Size of transmit queues requested by user.
675 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
676 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
667 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 677 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
668 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 678 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
669 * @sram_lim_qw: Qword address limit of SRAM 679 * @sram_lim_qw: Qword address limit of SRAM
@@ -730,6 +740,7 @@ struct vfdi_status;
730 * %local_addr_list. Protected by %local_lock. 740 * %local_addr_list. Protected by %local_lock.
731 * @local_lock: Mutex protecting %local_addr_list and %local_page_list. 741 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
732 * @peer_work: Work item to broadcast peer addresses to VMs. 742 * @peer_work: Work item to broadcast peer addresses to VMs.
743 * @ptp_data: PTP state data
733 * @monitor_work: Hardware monitor workitem 744 * @monitor_work: Hardware monitor workitem
734 * @biu_lock: BIU (bus interface unit) lock 745 * @biu_lock: BIU (bus interface unit) lock
735 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 746 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -774,6 +785,9 @@ struct efx_nic {
774 785
775 unsigned rxq_entries; 786 unsigned rxq_entries;
776 unsigned txq_entries; 787 unsigned txq_entries;
788 unsigned int txq_stop_thresh;
789 unsigned int txq_wake_thresh;
790
777 unsigned tx_dc_base; 791 unsigned tx_dc_base;
778 unsigned rx_dc_base; 792 unsigned rx_dc_base;
779 unsigned sram_lim_qw; 793 unsigned sram_lim_qw;
@@ -854,6 +868,10 @@ struct efx_nic {
854 struct work_struct peer_work; 868 struct work_struct peer_work;
855#endif 869#endif
856 870
871#ifdef CONFIG_SFC_PTP
872 struct efx_ptp_data *ptp_data;
873#endif
874
857 /* The following fields may be written more often */ 875 /* The following fields may be written more often */
858 876
859 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 877 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1044 1062
1045static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 1063static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1046{ 1064{
1047 return channel->channel < channel->efx->n_rx_channels; 1065 return channel->rx_queue.core_index >= 0;
1048} 1066}
1049 1067
1050static inline struct efx_rx_queue * 1068static inline struct efx_rx_queue *
@@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
1116#define EFX_MAX_FRAME_LEN(mtu) \ 1134#define EFX_MAX_FRAME_LEN(mtu) \
1117 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) 1135 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1118 1136
1137static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1138{
1139 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1140}
1141static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1142{
1143 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1144}
1119 1145
1120#endif /* EFX_NET_DRIVER_H */ 1146#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799762d6..cdff40b65729 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
298/************************************************************************** 298/**************************************************************************
299 * 299 *
300 * Generic buffer handling 300 * Generic buffer handling
301 * These buffers are used for interrupt status and MAC stats 301 * These buffers are used for interrupt status, MAC stats, etc.
302 * 302 *
303 **************************************************************************/ 303 **************************************************************************/
304 304
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
401 ++tx_queue->write_count; 401 ++tx_queue->write_count;
402 402
403 /* Create TX descriptor ring entry */ 403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
404 EFX_POPULATE_QWORD_4(*txd, 405 EFX_POPULATE_QWORD_4(*txd,
405 FSF_AZ_TX_KER_CONT, buffer->continuation, 406 FSF_AZ_TX_KER_CONT,
407 buffer->flags & EFX_TX_BUF_CONT,
406 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
407 FSF_AZ_TX_KER_BUF_REGION, 0, 409 FSF_AZ_TX_KER_BUF_REGION, 0,
408 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index bab5cd9f5740..438cef11f727 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -11,6 +11,7 @@
11#ifndef EFX_NIC_H 11#ifndef EFX_NIC_H
12#define EFX_NIC_H 12#define EFX_NIC_H
13 13
14#include <linux/net_tstamp.h>
14#include <linux/i2c-algo-bit.h> 15#include <linux/i2c-algo-bit.h>
15#include "net_driver.h" 16#include "net_driver.h"
16#include "efx.h" 17#include "efx.h"
@@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
250extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, 251extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
251 bool spoofchk); 252 bool spoofchk);
252 253
254struct ethtool_ts_info;
255#ifdef CONFIG_SFC_PTP
256extern void efx_ptp_probe(struct efx_nic *efx);
257extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
258extern int efx_ptp_get_ts_info(struct net_device *net_dev,
259 struct ethtool_ts_info *ts_info);
260extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
261extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
262extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263#else
264static inline void efx_ptp_probe(struct efx_nic *efx) {}
265static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
266{
267 return -EOPNOTSUPP;
268}
269static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
270 struct ethtool_ts_info *ts_info)
271{
272 ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
273 SOF_TIMESTAMPING_RX_SOFTWARE);
274 ts_info->phc_index = -1;
275
276 return 0;
277}
278static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
279{
280 return false;
281}
282static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
283{
284 return NETDEV_TX_OK;
285}
286static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
287#endif
288
253extern const struct efx_nic_type falcon_a1_nic_type; 289extern const struct efx_nic_type falcon_a1_nic_type;
254extern const struct efx_nic_type falcon_b0_nic_type; 290extern const struct efx_nic_type falcon_b0_nic_type;
255extern const struct efx_nic_type siena_a0_nic_type; 291extern const struct efx_nic_type siena_a0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
new file mode 100644
index 000000000000..5b3dd028ce85
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -0,0 +1,1484 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/* Theory of operation:
11 *
12 * PTP support is assisted by firmware running on the MC, which provides
13 * the hardware timestamping capabilities. Both transmitted and received
14 * PTP event packets are queued onto internal queues for subsequent processing;
15 * this is because the MC operations are relatively long and would block
16 * block NAPI/interrupt operation.
17 *
18 * Receive event processing:
19 * The event contains the packet's UUID and sequence number, together
20 * with the hardware timestamp. The PTP receive packet queue is searched
21 * for this UUID/sequence number and, if found, put on a pending queue.
22 * Packets not matching are delivered without timestamps (MCDI events will
23 * always arrive after the actual packet).
24 * It is important for the operation of the PTP protocol that the ordering
25 * of packets between the event and general port is maintained.
26 *
27 * Work queue processing:
28 * If work waiting, synchronise host/hardware time
29 *
30 * Transmit: send packet through MC, which returns the transmission time
31 * that is converted to an appropriate timestamp.
32 *
33 * Receive: the packet's reception time is converted to an appropriate
34 * timestamp.
35 */
36#include <linux/ip.h>
37#include <linux/udp.h>
38#include <linux/time.h>
39#include <linux/ktime.h>
40#include <linux/module.h>
41#include <linux/net_tstamp.h>
42#include <linux/pps_kernel.h>
43#include <linux/ptp_clock_kernel.h>
44#include "net_driver.h"
45#include "efx.h"
46#include "mcdi.h"
47#include "mcdi_pcol.h"
48#include "io.h"
49#include "regs.h"
50#include "nic.h"
51
52/* Maximum number of events expected to make up a PTP event */
53#define MAX_EVENT_FRAGS 3
54
55/* Maximum delay, ms, to begin synchronisation */
56#define MAX_SYNCHRONISE_WAIT_MS 2
57
58/* How long, at most, to spend synchronising */
59#define SYNCHRONISE_PERIOD_NS 250000
60
61/* How often to update the shared memory time */
62#define SYNCHRONISATION_GRANULARITY_NS 200
63
64/* Minimum permitted length of a (corrected) synchronisation time */
65#define MIN_SYNCHRONISATION_NS 120
66
67/* Maximum permitted length of a (corrected) synchronisation time */
68#define MAX_SYNCHRONISATION_NS 1000
69
70/* How many (MC) receive events that can be queued */
71#define MAX_RECEIVE_EVENTS 8
72
73/* Length of (modified) moving average. */
74#define AVERAGE_LENGTH 16
75
76/* How long an unmatched event or packet can be held */
77#define PKT_EVENT_LIFETIME_MS 10
78
79/* Offsets into PTP packet for identification. These offsets are from the
80 * start of the IP header, not the MAC header. Note that neither PTP V1 nor
81 * PTP V2 permit the use of IPV4 options.
82 */
83#define PTP_DPORT_OFFSET 22
84
85#define PTP_V1_VERSION_LENGTH 2
86#define PTP_V1_VERSION_OFFSET 28
87
88#define PTP_V1_UUID_LENGTH 6
89#define PTP_V1_UUID_OFFSET 50
90
91#define PTP_V1_SEQUENCE_LENGTH 2
92#define PTP_V1_SEQUENCE_OFFSET 58
93
94/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
95 * includes IP header.
96 */
97#define PTP_V1_MIN_LENGTH 64
98
99#define PTP_V2_VERSION_LENGTH 1
100#define PTP_V2_VERSION_OFFSET 29
101
102/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
103 * the MC only captures the last six bytes of the clock identity. These values
104 * reflect those, not the ones used in the standard. The standard permits
105 * mapping of V1 UUIDs to V2 UUIDs with these same values.
106 */
107#define PTP_V2_MC_UUID_LENGTH 6
108#define PTP_V2_MC_UUID_OFFSET 50
109
110#define PTP_V2_SEQUENCE_LENGTH 2
111#define PTP_V2_SEQUENCE_OFFSET 58
112
113/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
114 * includes IP header.
115 */
116#define PTP_V2_MIN_LENGTH 63
117
118#define PTP_MIN_LENGTH 63
119
120#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
121#define PTP_EVENT_PORT 319
122#define PTP_GENERAL_PORT 320
123
124/* Annoyingly the format of the version numbers are different between
125 * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
126 */
127#define PTP_VERSION_V1 1
128
129#define PTP_VERSION_V2 2
130#define PTP_VERSION_V2_MASK 0x0f
131
132enum ptp_packet_state {
133 PTP_PACKET_STATE_UNMATCHED = 0,
134 PTP_PACKET_STATE_MATCHED,
135 PTP_PACKET_STATE_TIMED_OUT,
136 PTP_PACKET_STATE_MATCH_UNWANTED
137};
138
139/* NIC synchronised with single word of time only comprising
140 * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
141 */
142#define MC_NANOSECOND_BITS 30
143#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1)
144#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
145
146/* Maximum parts-per-billion adjustment that is acceptable */
147#define MAX_PPB 1000000
148
149/* Number of bits required to hold the above */
150#define MAX_PPB_BITS 20
151
152/* Number of extra bits allowed when calculating fractional ns.
153 * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
154 * be less than 63.
155 */
156#define PPB_EXTRA_BITS 2
157
158/* Precalculate scale word to avoid long long division at runtime */
159#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
160 MAX_PPB_BITS)) / 1000000000LL)
161
162#define PTP_SYNC_ATTEMPTS 4
163
164/**
165 * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
166 * @words: UUID and (partial) sequence number
167 * @expiry: Time after which the packet should be delivered irrespective of
168 * event arrival.
169 * @state: The state of the packet - whether it is ready for processing or
170 * whether that is of no interest.
171 */
172struct efx_ptp_match {
173 u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
174 unsigned long expiry;
175 enum ptp_packet_state state;
176};
177
178/**
179 * struct efx_ptp_event_rx - A PTP receive event (from MC)
180 * @seq0: First part of (PTP) UUID
181 * @seq1: Second part of (PTP) UUID and sequence number
182 * @hwtimestamp: Event timestamp
183 */
184struct efx_ptp_event_rx {
185 struct list_head link;
186 u32 seq0;
187 u32 seq1;
188 ktime_t hwtimestamp;
189 unsigned long expiry;
190};
191
192/**
193 * struct efx_ptp_timeset - Synchronisation between host and MC
194 * @host_start: Host time immediately before hardware timestamp taken
195 * @seconds: Hardware timestamp, seconds
196 * @nanoseconds: Hardware timestamp, nanoseconds
197 * @host_end: Host time immediately after hardware timestamp taken
198 * @waitns: Number of nanoseconds between hardware timestamp being read and
199 * host end time being seen
200 * @window: Difference of host_end and host_start
201 * @valid: Whether this timeset is valid
202 */
203struct efx_ptp_timeset {
204 u32 host_start;
205 u32 seconds;
206 u32 nanoseconds;
207 u32 host_end;
208 u32 waitns;
209 u32 window; /* Derived: end - start, allowing for wrap */
210};
211
212/**
213 * struct efx_ptp_data - Precision Time Protocol (PTP) state
214 * @channel: The PTP channel
215 * @rxq: Receive queue (awaiting timestamps)
216 * @txq: Transmit queue
217 * @evt_list: List of MC receive events awaiting packets
218 * @evt_free_list: List of free events
219 * @evt_lock: Lock for manipulating evt_list and evt_free_list
220 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
221 * @workwq: Work queue for processing pending PTP operations
222 * @work: Work task
223 * @reset_required: A serious error has occurred and the PTP task needs to be
224 * reset (disable, enable).
225 * @rxfilter_event: Receive filter when operating
226 * @rxfilter_general: Receive filter when operating
227 * @config: Current timestamp configuration
228 * @enabled: PTP operation enabled
229 * @mode: Mode in which PTP operating (PTP version)
230 * @evt_frags: Partly assembled PTP events
231 * @evt_frag_idx: Current fragment number
232 * @evt_code: Last event code
233 * @start: Address at which MC indicates ready for synchronisation
234 * @host_time_pps: Host time at last PPS
235 * @last_sync_ns: Last number of nanoseconds between readings when synchronising
236 * @base_sync_ns: Number of nanoseconds for last synchronisation.
237 * @base_sync_valid: Whether base_sync_time is valid.
238 * @current_adjfreq: Current ppb adjustment.
239 * @phc_clock: Pointer to registered phc device
240 * @phc_clock_info: Registration structure for phc device
241 * @pps_work: pps work task for handling pps events
242 * @pps_workwq: pps work queue
243 * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
244 * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
245 * allocations in main data path).
246 * @debug_ptp_dir: PTP debugfs directory
247 * @missed_rx_sync: Number of packets received without syncrhonisation.
248 * @good_syncs: Number of successful synchronisations.
249 * @no_time_syncs: Number of synchronisations with no good times.
250 * @bad_sync_durations: Number of synchronisations with bad durations.
251 * @bad_syncs: Number of failed synchronisations.
252 * @last_sync_time: Number of nanoseconds for last synchronisation.
253 * @sync_timeouts: Number of synchronisation timeouts
254 * @fast_syncs: Number of synchronisations requiring short delay
255 * @min_sync_delta: Minimum time between event and synchronisation
256 * @max_sync_delta: Maximum time between event and synchronisation
257 * @average_sync_delta: Average time between event and synchronisation.
258 * Modified moving average.
259 * @last_sync_delta: Last time between event and synchronisation
260 * @mc_stats: Context value for MC statistics
261 * @timeset: Last set of synchronisation statistics.
262 */
263struct efx_ptp_data {
264 struct efx_channel *channel;
265 struct sk_buff_head rxq;
266 struct sk_buff_head txq;
267 struct list_head evt_list;
268 struct list_head evt_free_list;
269 spinlock_t evt_lock;
270 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
271 struct workqueue_struct *workwq;
272 struct work_struct work;
273 bool reset_required;
274 u32 rxfilter_event;
275 u32 rxfilter_general;
276 bool rxfilter_installed;
277 struct hwtstamp_config config;
278 bool enabled;
279 unsigned int mode;
280 efx_qword_t evt_frags[MAX_EVENT_FRAGS];
281 int evt_frag_idx;
282 int evt_code;
283 struct efx_buffer start;
284 struct pps_event_time host_time_pps;
285 unsigned last_sync_ns;
286 unsigned base_sync_ns;
287 bool base_sync_valid;
288 s64 current_adjfreq;
289 struct ptp_clock *phc_clock;
290 struct ptp_clock_info phc_clock_info;
291 struct work_struct pps_work;
292 struct workqueue_struct *pps_workwq;
293 bool nic_ts_enabled;
294 u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
295 MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
296 struct efx_ptp_timeset
297 timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
298};
299
300static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
301static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
302static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
303static int efx_phc_settime(struct ptp_clock_info *ptp,
304 const struct timespec *e_ts);
305static int efx_phc_enable(struct ptp_clock_info *ptp,
306 struct ptp_clock_request *request, int on);
307
308/* Enable MCDI PTP support. */
309static int efx_ptp_enable(struct efx_nic *efx)
310{
311 u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
312
313 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
314 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
315 efx->ptp_data->channel->channel);
316 MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
317
318 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
319 NULL, 0, NULL);
320}
321
322/* Disable MCDI PTP support.
323 *
324 * Note that this function should never rely on the presence of ptp_data -
325 * may be called before that exists.
326 */
327static int efx_ptp_disable(struct efx_nic *efx)
328{
329 u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
330
331 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
332 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
333 NULL, 0, NULL);
334}
335
336static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
337{
338 struct sk_buff *skb;
339
340 while ((skb = skb_dequeue(q))) {
341 local_bh_disable();
342 netif_receive_skb(skb);
343 local_bh_enable();
344 }
345}
346
347static void efx_ptp_handle_no_channel(struct efx_nic *efx)
348{
349 netif_err(efx, drv, efx->net_dev,
350 "ERROR: PTP requires MSI-X and 1 additional interrupt"
351 "vector. PTP disabled\n");
352}
353
354/* Repeatedly send the host time to the MC which will capture the hardware
355 * time.
356 */
357static void efx_ptp_send_times(struct efx_nic *efx,
358 struct pps_event_time *last_time)
359{
360 struct pps_event_time now;
361 struct timespec limit;
362 struct efx_ptp_data *ptp = efx->ptp_data;
363 struct timespec start;
364 int *mc_running = ptp->start.addr;
365
366 pps_get_ts(&now);
367 start = now.ts_real;
368 limit = now.ts_real;
369 timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
370
371 /* Write host time for specified period or until MC is done */
372 while ((timespec_compare(&now.ts_real, &limit) < 0) &&
373 ACCESS_ONCE(*mc_running)) {
374 struct timespec update_time;
375 unsigned int host_time;
376
377 /* Don't update continuously to avoid saturating the PCIe bus */
378 update_time = now.ts_real;
379 timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
380 do {
381 pps_get_ts(&now);
382 } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
383 ACCESS_ONCE(*mc_running));
384
385 /* Synchronise NIC with single word of time only */
386 host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
387 now.ts_real.tv_nsec);
388 /* Update host time in NIC memory */
389 _efx_writed(efx, cpu_to_le32(host_time),
390 FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
391 }
392 *last_time = now;
393}
394
395/* Read a timeset from the MC's results and partial process. */
396static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
397{
398 unsigned start_ns, end_ns;
399
400 timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
401 timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
402 timeset->nanoseconds = MCDI_DWORD(data,
403 PTP_OUT_SYNCHRONIZE_NANOSECONDS);
404 timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
405 timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
406
407 /* Ignore seconds */
408 start_ns = timeset->host_start & MC_NANOSECOND_MASK;
409 end_ns = timeset->host_end & MC_NANOSECOND_MASK;
410 /* Allow for rollover */
411 if (end_ns < start_ns)
412 end_ns += NSEC_PER_SEC;
413 /* Determine duration of operation */
414 timeset->window = end_ns - start_ns;
415}
416
417/* Process times received from MC.
418 *
419 * Extract times from returned results, and establish the minimum value
420 * seen. The minimum value represents the "best" possible time and events
421 * too much greater than this are rejected - the machine is, perhaps, too
422 * busy. A number of readings are taken so that, hopefully, at least one good
423 * synchronisation will be seen in the results.
424 */
425static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
426 size_t response_length,
427 const struct pps_event_time *last_time)
428{
429 unsigned number_readings = (response_length /
430 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
431 unsigned i;
432 unsigned min;
433 unsigned min_set = 0;
434 unsigned total;
435 unsigned ngood = 0;
436 unsigned last_good = 0;
437 struct efx_ptp_data *ptp = efx->ptp_data;
438 bool min_valid = false;
439 u32 last_sec;
440 u32 start_sec;
441 struct timespec delta;
442
443 if (number_readings == 0)
444 return -EAGAIN;
445
446 /* Find minimum value in this set of results, discarding clearly
447 * erroneous results.
448 */
449 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
452 if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
453 if (min_valid) {
454 if (ptp->timeset[i].window < min_set)
455 min_set = ptp->timeset[i].window;
456 } else {
457 min_valid = true;
458 min_set = ptp->timeset[i].window;
459 }
460 }
461 }
462
463 if (min_valid) {
464 if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
465 min = ptp->base_sync_ns;
466 else
467 min = min_set;
468 } else {
469 min = SYNCHRONISATION_GRANULARITY_NS;
470 }
471
472 /* Discard excessively long synchronise durations. The MC times
473 * when it finishes reading the host time so the corrected window
474 * time should be fairly constant for a given platform.
475 */
476 total = 0;
477 for (i = 0; i < number_readings; i++)
478 if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
479 unsigned win;
480
481 win = ptp->timeset[i].window - ptp->timeset[i].waitns;
482 if (win >= MIN_SYNCHRONISATION_NS &&
483 win < MAX_SYNCHRONISATION_NS) {
484 total += ptp->timeset[i].window;
485 ngood++;
486 last_good = i;
487 }
488 }
489
490 if (ngood == 0) {
491 netif_warn(efx, drv, efx->net_dev,
492 "PTP no suitable synchronisations %dns %dns\n",
493 ptp->base_sync_ns, min_set);
494 return -EAGAIN;
495 }
496
497 /* Average minimum this synchronisation */
498 ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
499 if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
500 ptp->base_sync_valid = true;
501 ptp->base_sync_ns = ptp->last_sync_ns;
502 }
503
504 /* Calculate delay from actual PPS to last_time */
505 delta.tv_nsec =
506 ptp->timeset[last_good].nanoseconds +
507 last_time->ts_real.tv_nsec -
508 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
509
510 /* It is possible that the seconds rolled over between taking
511 * the start reading and the last value written by the host. The
512 * timescales are such that a gap of more than one second is never
513 * expected.
514 */
515 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
516 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
517 if (start_sec != last_sec) {
518 if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
519 netif_warn(efx, hw, efx->net_dev,
520 "PTP bad synchronisation seconds\n");
521 return -EAGAIN;
522 } else {
523 delta.tv_sec = 1;
524 }
525 } else {
526 delta.tv_sec = 0;
527 }
528
529 ptp->host_time_pps = *last_time;
530 pps_sub_ts(&ptp->host_time_pps, delta);
531
532 return 0;
533}
534
535/* Synchronize times between the host and the MC */
536static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
537{
538 struct efx_ptp_data *ptp = efx->ptp_data;
539 u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
540 size_t response_length;
541 int rc;
542 unsigned long timeout;
543 struct pps_event_time last_time = {};
544 unsigned int loops = 0;
545 int *start = ptp->start.addr;
546
547 MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
548 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
549 num_readings);
550 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
551 (u32)ptp->start.dma_addr);
552 MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
553 (u32)((u64)ptp->start.dma_addr >> 32));
554
555 /* Clear flag that signals MC ready */
556 ACCESS_ONCE(*start) = 0;
557 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
558 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
559
560 /* Wait for start from MCDI (or timeout) */
561 timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
562 while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
563 udelay(20); /* Usually start MCDI execution quickly */
564 loops++;
565 }
566
567 if (ACCESS_ONCE(*start))
568 efx_ptp_send_times(efx, &last_time);
569
570 /* Collect results */
571 rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
572 MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
573 synch_buf, sizeof(synch_buf),
574 &response_length);
575 if (rc == 0)
576 rc = efx_ptp_process_times(efx, synch_buf, response_length,
577 &last_time);
578
579 return rc;
580}
581
582/* Transmit a PTP packet, via the MCDI interface, to the wire. */
583static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
584{
585 u8 *txbuf = efx->ptp_data->txbuf;
586 struct skb_shared_hwtstamps timestamps;
587 int rc = -EIO;
588 /* MCDI driver requires word aligned lengths */
589 size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
590 u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
591
592 MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
593 MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
594 if (skb_shinfo(skb)->nr_frags != 0) {
595 rc = skb_linearize(skb);
596 if (rc != 0)
597 goto fail;
598 }
599
600 if (skb->ip_summed == CHECKSUM_PARTIAL) {
601 rc = skb_checksum_help(skb);
602 if (rc != 0)
603 goto fail;
604 }
605 skb_copy_from_linear_data(skb,
606 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
607 len);
608 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
609 sizeof(txtime), &len);
610 if (rc != 0)
611 goto fail;
612
613 memset(&timestamps, 0, sizeof(timestamps));
614 timestamps.hwtstamp = ktime_set(
615 MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
616 MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
617
618 skb_tstamp_tx(skb, &timestamps);
619
620 rc = 0;
621
622fail:
623 dev_kfree_skb(skb);
624
625 return rc;
626}
627
628static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
629{
630 struct efx_ptp_data *ptp = efx->ptp_data;
631 struct list_head *cursor;
632 struct list_head *next;
633
634 /* Drop time-expired events */
635 spin_lock_bh(&ptp->evt_lock);
636 if (!list_empty(&ptp->evt_list)) {
637 list_for_each_safe(cursor, next, &ptp->evt_list) {
638 struct efx_ptp_event_rx *evt;
639
640 evt = list_entry(cursor, struct efx_ptp_event_rx,
641 link);
642 if (time_after(jiffies, evt->expiry)) {
643 list_del(&evt->link);
644 list_add(&evt->link, &ptp->evt_free_list);
645 netif_warn(efx, hw, efx->net_dev,
646 "PTP rx event dropped\n");
647 }
648 }
649 }
650 spin_unlock_bh(&ptp->evt_lock);
651}
652
653static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
654 struct sk_buff *skb)
655{
656 struct efx_ptp_data *ptp = efx->ptp_data;
657 bool evts_waiting;
658 struct list_head *cursor;
659 struct list_head *next;
660 struct efx_ptp_match *match;
661 enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
662
663 spin_lock_bh(&ptp->evt_lock);
664 evts_waiting = !list_empty(&ptp->evt_list);
665 spin_unlock_bh(&ptp->evt_lock);
666
667 if (!evts_waiting)
668 return PTP_PACKET_STATE_UNMATCHED;
669
670 match = (struct efx_ptp_match *)skb->cb;
671 /* Look for a matching timestamp in the event queue */
672 spin_lock_bh(&ptp->evt_lock);
673 list_for_each_safe(cursor, next, &ptp->evt_list) {
674 struct efx_ptp_event_rx *evt;
675
676 evt = list_entry(cursor, struct efx_ptp_event_rx, link);
677 if ((evt->seq0 == match->words[0]) &&
678 (evt->seq1 == match->words[1])) {
679 struct skb_shared_hwtstamps *timestamps;
680
681 /* Match - add in hardware timestamp */
682 timestamps = skb_hwtstamps(skb);
683 timestamps->hwtstamp = evt->hwtimestamp;
684
685 match->state = PTP_PACKET_STATE_MATCHED;
686 rc = PTP_PACKET_STATE_MATCHED;
687 list_del(&evt->link);
688 list_add(&evt->link, &ptp->evt_free_list);
689 break;
690 }
691 }
692 spin_unlock_bh(&ptp->evt_lock);
693
694 return rc;
695}
696
697/* Process any queued receive events and corresponding packets
698 *
699 * q is returned with all the packets that are ready for delivery.
700 * true is returned if at least one of those packets requires
701 * synchronisation.
702 */
703static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
704{
705 struct efx_ptp_data *ptp = efx->ptp_data;
706 bool rc = false;
707 struct sk_buff *skb;
708
709 while ((skb = skb_dequeue(&ptp->rxq))) {
710 struct efx_ptp_match *match;
711
712 match = (struct efx_ptp_match *)skb->cb;
713 if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
714 __skb_queue_tail(q, skb);
715 } else if (efx_ptp_match_rx(efx, skb) ==
716 PTP_PACKET_STATE_MATCHED) {
717 rc = true;
718 __skb_queue_tail(q, skb);
719 } else if (time_after(jiffies, match->expiry)) {
720 match->state = PTP_PACKET_STATE_TIMED_OUT;
721 netif_warn(efx, rx_err, efx->net_dev,
722 "PTP packet - no timestamp seen\n");
723 __skb_queue_tail(q, skb);
724 } else {
725 /* Replace unprocessed entry and stop */
726 skb_queue_head(&ptp->rxq, skb);
727 break;
728 }
729 }
730
731 return rc;
732}
733
734/* Complete processing of a received packet */
735static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
736{
737 local_bh_disable();
738 netif_receive_skb(skb);
739 local_bh_enable();
740}
741
742static int efx_ptp_start(struct efx_nic *efx)
743{
744 struct efx_ptp_data *ptp = efx->ptp_data;
745 struct efx_filter_spec rxfilter;
746 int rc;
747
748 ptp->reset_required = false;
749
750 /* Must filter on both event and general ports to ensure
751 * that there is no packet re-ordering.
752 */
753 efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
754 efx_rx_queue_index(
755 efx_channel_get_rx_queue(ptp->channel)));
756 rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
757 htonl(PTP_ADDRESS),
758 htons(PTP_EVENT_PORT));
759 if (rc != 0)
760 return rc;
761
762 rc = efx_filter_insert_filter(efx, &rxfilter, true);
763 if (rc < 0)
764 return rc;
765 ptp->rxfilter_event = rc;
766
767 efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
768 efx_rx_queue_index(
769 efx_channel_get_rx_queue(ptp->channel)));
770 rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
771 htonl(PTP_ADDRESS),
772 htons(PTP_GENERAL_PORT));
773 if (rc != 0)
774 goto fail;
775
776 rc = efx_filter_insert_filter(efx, &rxfilter, true);
777 if (rc < 0)
778 goto fail;
779 ptp->rxfilter_general = rc;
780
781 rc = efx_ptp_enable(efx);
782 if (rc != 0)
783 goto fail2;
784
785 ptp->evt_frag_idx = 0;
786 ptp->current_adjfreq = 0;
787 ptp->rxfilter_installed = true;
788
789 return 0;
790
791fail2:
792 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
793 ptp->rxfilter_general);
794fail:
795 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
796 ptp->rxfilter_event);
797
798 return rc;
799}
800
801static int efx_ptp_stop(struct efx_nic *efx)
802{
803 struct efx_ptp_data *ptp = efx->ptp_data;
804 int rc = efx_ptp_disable(efx);
805 struct list_head *cursor;
806 struct list_head *next;
807
808 if (ptp->rxfilter_installed) {
809 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
810 ptp->rxfilter_general);
811 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
812 ptp->rxfilter_event);
813 ptp->rxfilter_installed = false;
814 }
815
816 /* Make sure RX packets are really delivered */
817 efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
818 skb_queue_purge(&efx->ptp_data->txq);
819
820 /* Drop any pending receive events */
821 spin_lock_bh(&efx->ptp_data->evt_lock);
822 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
823 list_del(cursor);
824 list_add(cursor, &efx->ptp_data->evt_free_list);
825 }
826 spin_unlock_bh(&efx->ptp_data->evt_lock);
827
828 return rc;
829}
830
831static void efx_ptp_pps_worker(struct work_struct *work)
832{
833 struct efx_ptp_data *ptp =
834 container_of(work, struct efx_ptp_data, pps_work);
835 struct efx_nic *efx = ptp->channel->efx;
836 struct ptp_clock_event ptp_evt;
837
838 if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
839 return;
840
841 ptp_evt.type = PTP_CLOCK_PPSUSR;
842 ptp_evt.pps_times = ptp->host_time_pps;
843 ptp_clock_event(ptp->phc_clock, &ptp_evt);
844}
845
846/* Process any pending transmissions and timestamp any received packets.
847 */
848static void efx_ptp_worker(struct work_struct *work)
849{
850 struct efx_ptp_data *ptp_data =
851 container_of(work, struct efx_ptp_data, work);
852 struct efx_nic *efx = ptp_data->channel->efx;
853 struct sk_buff *skb;
854 struct sk_buff_head tempq;
855
856 if (ptp_data->reset_required) {
857 efx_ptp_stop(efx);
858 efx_ptp_start(efx);
859 return;
860 }
861
862 efx_ptp_drop_time_expired_events(efx);
863
864 __skb_queue_head_init(&tempq);
865 if (efx_ptp_process_events(efx, &tempq) ||
866 !skb_queue_empty(&ptp_data->txq)) {
867
868 while ((skb = skb_dequeue(&ptp_data->txq)))
869 efx_ptp_xmit_skb(efx, skb);
870 }
871
872 while ((skb = __skb_dequeue(&tempq)))
873 efx_ptp_process_rx(efx, skb);
874}
875
876/* Initialise PTP channel and state.
877 *
878 * Setting core_index to zero causes the queue to be initialised and doesn't
879 * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
880 */
881static int efx_ptp_probe_channel(struct efx_channel *channel)
882{
883 struct efx_nic *efx = channel->efx;
884 struct efx_ptp_data *ptp;
885 int rc = 0;
886 unsigned int pos;
887
888 channel->irq_moderation = 0;
889 channel->rx_queue.core_index = 0;
890
891 ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
892 efx->ptp_data = ptp;
893 if (!efx->ptp_data)
894 return -ENOMEM;
895
896 rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
897 if (rc != 0)
898 goto fail1;
899
900 ptp->channel = channel;
901 skb_queue_head_init(&ptp->rxq);
902 skb_queue_head_init(&ptp->txq);
903 ptp->workwq = create_singlethread_workqueue("sfc_ptp");
904 if (!ptp->workwq) {
905 rc = -ENOMEM;
906 goto fail2;
907 }
908
909 INIT_WORK(&ptp->work, efx_ptp_worker);
910 ptp->config.flags = 0;
911 ptp->config.tx_type = HWTSTAMP_TX_OFF;
912 ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
913 INIT_LIST_HEAD(&ptp->evt_list);
914 INIT_LIST_HEAD(&ptp->evt_free_list);
915 spin_lock_init(&ptp->evt_lock);
916 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
917 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
918
919 ptp->phc_clock_info.owner = THIS_MODULE;
920 snprintf(ptp->phc_clock_info.name,
921 sizeof(ptp->phc_clock_info.name),
922 "%pm", efx->net_dev->perm_addr);
923 ptp->phc_clock_info.max_adj = MAX_PPB;
924 ptp->phc_clock_info.n_alarm = 0;
925 ptp->phc_clock_info.n_ext_ts = 0;
926 ptp->phc_clock_info.n_per_out = 0;
927 ptp->phc_clock_info.pps = 1;
928 ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
929 ptp->phc_clock_info.adjtime = efx_phc_adjtime;
930 ptp->phc_clock_info.gettime = efx_phc_gettime;
931 ptp->phc_clock_info.settime = efx_phc_settime;
932 ptp->phc_clock_info.enable = efx_phc_enable;
933
934 ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
935 &efx->pci_dev->dev);
936 if (!ptp->phc_clock)
937 goto fail3;
938
939 INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
940 ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
941 if (!ptp->pps_workwq) {
942 rc = -ENOMEM;
943 goto fail4;
944 }
945 ptp->nic_ts_enabled = false;
946
947 return 0;
948fail4:
949 ptp_clock_unregister(efx->ptp_data->phc_clock);
950
951fail3:
952 destroy_workqueue(efx->ptp_data->workwq);
953
954fail2:
955 efx_nic_free_buffer(efx, &ptp->start);
956
957fail1:
958 kfree(efx->ptp_data);
959 efx->ptp_data = NULL;
960
961 return rc;
962}
963
964static void efx_ptp_remove_channel(struct efx_channel *channel)
965{
966 struct efx_nic *efx = channel->efx;
967
968 if (!efx->ptp_data)
969 return;
970
971 (void)efx_ptp_disable(channel->efx);
972
973 cancel_work_sync(&efx->ptp_data->work);
974 cancel_work_sync(&efx->ptp_data->pps_work);
975
976 skb_queue_purge(&efx->ptp_data->rxq);
977 skb_queue_purge(&efx->ptp_data->txq);
978
979 ptp_clock_unregister(efx->ptp_data->phc_clock);
980
981 destroy_workqueue(efx->ptp_data->workwq);
982 destroy_workqueue(efx->ptp_data->pps_workwq);
983
984 efx_nic_free_buffer(efx, &efx->ptp_data->start);
985 kfree(efx->ptp_data);
986}
987
988static void efx_ptp_get_channel_name(struct efx_channel *channel,
989 char *buf, size_t len)
990{
991 snprintf(buf, len, "%s-ptp", channel->efx->name);
992}
993
994/* Determine whether this packet should be processed by the PTP module
995 * or transmitted conventionally.
996 */
997bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
998{
999 return efx->ptp_data &&
1000 efx->ptp_data->enabled &&
1001 skb->len >= PTP_MIN_LENGTH &&
1002 skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
1003 likely(skb->protocol == htons(ETH_P_IP)) &&
1004 ip_hdr(skb)->protocol == IPPROTO_UDP &&
1005 udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
1006}
1007
1008/* Receive a PTP packet. Packets are queued until the arrival of
1009 * the receive timestamp from the MC - this will probably occur after the
1010 * packet arrival because of the processing in the MC.
1011 */
1012static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1013{
1014 struct efx_nic *efx = channel->efx;
1015 struct efx_ptp_data *ptp = efx->ptp_data;
1016 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1017 u8 *data;
1018 unsigned int version;
1019
1020 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1021
1022 /* Correct version? */
1023 if (ptp->mode == MC_CMD_PTP_MODE_V1) {
1024 if (skb->len < PTP_V1_MIN_LENGTH) {
1025 netif_receive_skb(skb);
1026 return;
1027 }
1028 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
1029 if (version != PTP_VERSION_V1) {
1030 netif_receive_skb(skb);
1031 return;
1032 }
1033 } else {
1034 if (skb->len < PTP_V2_MIN_LENGTH) {
1035 netif_receive_skb(skb);
1036 return;
1037 }
1038 version = skb->data[PTP_V2_VERSION_OFFSET];
1039
1040 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
1041 BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
1042 BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
1043 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1044 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1045
1046 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1047 netif_receive_skb(skb);
1048 return;
1049 }
1050 }
1051
1052 /* Does this packet require timestamping? */
1053 if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
1054 struct skb_shared_hwtstamps *timestamps;
1055
1056 match->state = PTP_PACKET_STATE_UNMATCHED;
1057
1058 /* Clear all timestamps held: filled in later */
1059 timestamps = skb_hwtstamps(skb);
1060 memset(timestamps, 0, sizeof(*timestamps));
1061
1062 /* Extract UUID/Sequence information */
1063 data = skb->data + PTP_V1_UUID_OFFSET;
1064 match->words[0] = (data[0] |
1065 (data[1] << 8) |
1066 (data[2] << 16) |
1067 (data[3] << 24));
1068 match->words[1] = (data[4] |
1069 (data[5] << 8) |
1070 (skb->data[PTP_V1_SEQUENCE_OFFSET +
1071 PTP_V1_SEQUENCE_LENGTH - 1] <<
1072 16));
1073 } else {
1074 match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
1075 }
1076
1077 skb_queue_tail(&ptp->rxq, skb);
1078 queue_work(ptp->workwq, &ptp->work);
1079}
1080
1081/* Transmit a PTP packet. This has to be transmitted by the MC
1082 * itself, through an MCDI call. MCDI calls aren't permitted
1083 * in the transmit path so defer the actual transmission to a suitable worker.
1084 */
1085int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
1086{
1087 struct efx_ptp_data *ptp = efx->ptp_data;
1088
1089 skb_queue_tail(&ptp->txq, skb);
1090
1091 if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
1092 (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
1093 efx_xmit_hwtstamp_pending(skb);
1094 queue_work(ptp->workwq, &ptp->work);
1095
1096 return NETDEV_TX_OK;
1097}
1098
1099static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
1100 unsigned int new_mode)
1101{
1102 if ((enable_wanted != efx->ptp_data->enabled) ||
1103 (enable_wanted && (efx->ptp_data->mode != new_mode))) {
1104 int rc;
1105
1106 if (enable_wanted) {
1107 /* Change of mode requires disable */
1108 if (efx->ptp_data->enabled &&
1109 (efx->ptp_data->mode != new_mode)) {
1110 efx->ptp_data->enabled = false;
1111 rc = efx_ptp_stop(efx);
1112 if (rc != 0)
1113 return rc;
1114 }
1115
1116 /* Set new operating mode and establish
1117 * baseline synchronisation, which must
1118 * succeed.
1119 */
1120 efx->ptp_data->mode = new_mode;
1121 rc = efx_ptp_start(efx);
1122 if (rc == 0) {
1123 rc = efx_ptp_synchronize(efx,
1124 PTP_SYNC_ATTEMPTS * 2);
1125 if (rc != 0)
1126 efx_ptp_stop(efx);
1127 }
1128 } else {
1129 rc = efx_ptp_stop(efx);
1130 }
1131
1132 if (rc != 0)
1133 return rc;
1134
1135 efx->ptp_data->enabled = enable_wanted;
1136 }
1137
1138 return 0;
1139}
1140
1141static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1142{
1143 bool enable_wanted = false;
1144 unsigned int new_mode;
1145 int rc;
1146
1147 if (init->flags)
1148 return -EINVAL;
1149
1150 if ((init->tx_type != HWTSTAMP_TX_OFF) &&
1151 (init->tx_type != HWTSTAMP_TX_ON))
1152 return -ERANGE;
1153
1154 new_mode = efx->ptp_data->mode;
1155 /* Determine whether any PTP HW operations are required */
1156 switch (init->rx_filter) {
1157 case HWTSTAMP_FILTER_NONE:
1158 break;
1159 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1160 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1161 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1162 init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1163 new_mode = MC_CMD_PTP_MODE_V1;
1164 enable_wanted = true;
1165 break;
1166 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1167 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1168 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1169 /* Although these three are accepted only IPV4 packets will be
1170 * timestamped
1171 */
1172 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
1173 new_mode = MC_CMD_PTP_MODE_V2;
1174 enable_wanted = true;
1175 break;
1176 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1177 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1178 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1179 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1180 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1181 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1182 /* Non-IP + IPv6 timestamping not supported */
1183 return -ERANGE;
1184 break;
1185 default:
1186 return -ERANGE;
1187 }
1188
1189 if (init->tx_type != HWTSTAMP_TX_OFF)
1190 enable_wanted = true;
1191
1192 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
1193 if (rc != 0)
1194 return rc;
1195
1196 efx->ptp_data->config = *init;
1197
1198 return 0;
1199}
1200
1201int
1202efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
1203{
1204 struct efx_nic *efx = netdev_priv(net_dev);
1205 struct efx_ptp_data *ptp = efx->ptp_data;
1206
1207 if (!ptp)
1208 return -EOPNOTSUPP;
1209
1210 ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
1211 SOF_TIMESTAMPING_RX_HARDWARE |
1212 SOF_TIMESTAMPING_RAW_HARDWARE);
1213 ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
1214 ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
1215 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
1216 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1217 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
1218 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
1219 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
1220 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
1221 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1222 return 0;
1223}
1224
1225int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
1226{
1227 struct hwtstamp_config config;
1228 int rc;
1229
1230 /* Not a PTP enabled port */
1231 if (!efx->ptp_data)
1232 return -EOPNOTSUPP;
1233
1234 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1235 return -EFAULT;
1236
1237 rc = efx_ptp_ts_init(efx, &config);
1238 if (rc != 0)
1239 return rc;
1240
1241 return copy_to_user(ifr->ifr_data, &config, sizeof(config))
1242 ? -EFAULT : 0;
1243}
1244
1245static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
1246{
1247 struct efx_ptp_data *ptp = efx->ptp_data;
1248
1249 netif_err(efx, hw, efx->net_dev,
1250 "PTP unexpected event length: got %d expected %d\n",
1251 ptp->evt_frag_idx, expected_frag_len);
1252 ptp->reset_required = true;
1253 queue_work(ptp->workwq, &ptp->work);
1254}
1255
1256/* Process a completed receive event. Put it on the event queue and
1257 * start worker thread. This is required because event and their
1258 * correspoding packets may come in either order.
1259 */
1260static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1261{
1262 struct efx_ptp_event_rx *evt = NULL;
1263
1264 if (ptp->evt_frag_idx != 3) {
1265 ptp_event_failure(efx, 3);
1266 return;
1267 }
1268
1269 spin_lock_bh(&ptp->evt_lock);
1270 if (!list_empty(&ptp->evt_free_list)) {
1271 evt = list_first_entry(&ptp->evt_free_list,
1272 struct efx_ptp_event_rx, link);
1273 list_del(&evt->link);
1274
1275 evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
1276 evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
1277 MCDI_EVENT_SRC) |
1278 (EFX_QWORD_FIELD(ptp->evt_frags[1],
1279 MCDI_EVENT_SRC) << 8) |
1280 (EFX_QWORD_FIELD(ptp->evt_frags[0],
1281 MCDI_EVENT_SRC) << 16));
1282 evt->hwtimestamp = ktime_set(
1283 EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
1284 EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
1285 evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1286 list_add_tail(&evt->link, &ptp->evt_list);
1287
1288 queue_work(ptp->workwq, &ptp->work);
1289 } else {
1290 netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
1291 }
1292 spin_unlock_bh(&ptp->evt_lock);
1293}
1294
1295static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
1296{
1297 int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
1298 if (ptp->evt_frag_idx != 1) {
1299 ptp_event_failure(efx, 1);
1300 return;
1301 }
1302
1303 netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
1304}
1305
1306static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
1307{
1308 if (ptp->nic_ts_enabled)
1309 queue_work(ptp->pps_workwq, &ptp->pps_work);
1310}
1311
1312void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
1313{
1314 struct efx_ptp_data *ptp = efx->ptp_data;
1315 int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
1316
1317 if (!ptp->enabled)
1318 return;
1319
1320 if (ptp->evt_frag_idx == 0) {
1321 ptp->evt_code = code;
1322 } else if (ptp->evt_code != code) {
1323 netif_err(efx, hw, efx->net_dev,
1324 "PTP out of sequence event %d\n", code);
1325 ptp->evt_frag_idx = 0;
1326 }
1327
1328 ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
1329 if (!MCDI_EVENT_FIELD(*ev, CONT)) {
1330 /* Process resulting event */
1331 switch (code) {
1332 case MCDI_EVENT_CODE_PTP_RX:
1333 ptp_event_rx(efx, ptp);
1334 break;
1335 case MCDI_EVENT_CODE_PTP_FAULT:
1336 ptp_event_fault(efx, ptp);
1337 break;
1338 case MCDI_EVENT_CODE_PTP_PPS:
1339 ptp_event_pps(efx, ptp);
1340 break;
1341 default:
1342 netif_err(efx, hw, efx->net_dev,
1343 "PTP unknown event %d\n", code);
1344 break;
1345 }
1346 ptp->evt_frag_idx = 0;
1347 } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
1348 netif_err(efx, hw, efx->net_dev,
1349 "PTP too many event fragments\n");
1350 ptp->evt_frag_idx = 0;
1351 }
1352}
1353
1354static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
1355{
1356 struct efx_ptp_data *ptp_data = container_of(ptp,
1357 struct efx_ptp_data,
1358 phc_clock_info);
1359 struct efx_nic *efx = ptp_data->channel->efx;
1360 u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
1361 s64 adjustment_ns;
1362 int rc;
1363
1364 if (delta > MAX_PPB)
1365 delta = MAX_PPB;
1366 else if (delta < -MAX_PPB)
1367 delta = -MAX_PPB;
1368
1369 /* Convert ppb to fixed point ns. */
1370 adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
1371 (PPB_EXTRA_BITS + MAX_PPB_BITS));
1372
1373 MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1374 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
1375 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
1376 (u32)(adjustment_ns >> 32));
1377 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
1378 MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
1379 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
1380 NULL, 0, NULL);
1381 if (rc != 0)
1382 return rc;
1383
1384 ptp_data->current_adjfreq = delta;
1385 return 0;
1386}
1387
1388static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
1389{
1390 struct efx_ptp_data *ptp_data = container_of(ptp,
1391 struct efx_ptp_data,
1392 phc_clock_info);
1393 struct efx_nic *efx = ptp_data->channel->efx;
1394 struct timespec delta_ts = ns_to_timespec(delta);
1395 u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
1396
1397 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
1398 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
1399 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
1400 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
1401 MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
1402 return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1403 NULL, 0, NULL);
1404}
1405
1406static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
1407{
1408 struct efx_ptp_data *ptp_data = container_of(ptp,
1409 struct efx_ptp_data,
1410 phc_clock_info);
1411 struct efx_nic *efx = ptp_data->channel->efx;
1412 u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
1413 u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
1414 int rc;
1415
1416 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
1417
1418 rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
1419 outbuf, sizeof(outbuf), NULL);
1420 if (rc != 0)
1421 return rc;
1422
1423 ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
1424 ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
1425 return 0;
1426}
1427
1428static int efx_phc_settime(struct ptp_clock_info *ptp,
1429 const struct timespec *e_ts)
1430{
1431 /* Get the current NIC time, efx_phc_gettime.
1432 * Subtract from the desired time to get the offset
1433 * call efx_phc_adjtime with the offset
1434 */
1435 int rc;
1436 struct timespec time_now;
1437 struct timespec delta;
1438
1439 rc = efx_phc_gettime(ptp, &time_now);
1440 if (rc != 0)
1441 return rc;
1442
1443 delta = timespec_sub(*e_ts, time_now);
1444
1445 efx_phc_adjtime(ptp, timespec_to_ns(&delta));
1446 if (rc != 0)
1447 return rc;
1448
1449 return 0;
1450}
1451
1452static int efx_phc_enable(struct ptp_clock_info *ptp,
1453 struct ptp_clock_request *request,
1454 int enable)
1455{
1456 struct efx_ptp_data *ptp_data = container_of(ptp,
1457 struct efx_ptp_data,
1458 phc_clock_info);
1459 if (request->type != PTP_CLK_REQ_PPS)
1460 return -EOPNOTSUPP;
1461
1462 ptp_data->nic_ts_enabled = !!enable;
1463 return 0;
1464}
1465
1466static const struct efx_channel_type efx_ptp_channel_type = {
1467 .handle_no_channel = efx_ptp_handle_no_channel,
1468 .pre_probe = efx_ptp_probe_channel,
1469 .post_remove = efx_ptp_remove_channel,
1470 .get_name = efx_ptp_get_channel_name,
1471 /* no copy operation; there is no need to reallocate this channel */
1472 .receive_skb = efx_ptp_rx,
1473 .keep_eventq = false,
1474};
1475
1476void efx_ptp_probe(struct efx_nic *efx)
1477{
1478 /* Check whether PTP is implemented on this NIC. The DISABLE
1479 * operation will succeed if and only if it is implemented.
1480 */
1481 if (efx_ptp_disable(efx) == 0)
1482 efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
1483 &efx_ptp_channel_type;
1484}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 719319b89d7a..9e0ad1b75c33 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
481 481
482 skb_record_rx_queue(skb, channel->channel); 482 skb_record_rx_queue(skb, channel->rx_queue.core_index);
483 483
484 gro_result = napi_gro_frags(napi); 484 gro_result = napi_gro_frags(napi);
485 } else { 485 } else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
571 /* Set the SKB flags */ 571 /* Set the SKB flags */
572 skb_checksum_none_assert(skb); 572 skb_checksum_none_assert(skb);
573 573
574 /* Record the rx_queue */
575 skb_record_rx_queue(skb, channel->rx_queue.core_index);
576
574 /* Pass the packet up */ 577 /* Pass the packet up */
575 netif_receive_skb(skb); 578 if (channel->type->receive_skb)
579 channel->type->receive_skb(channel, skb);
580 else
581 netif_receive_skb(skb);
576 582
577 /* Update allocation strategy method */ 583 /* Update allocation strategy method */
578 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 584 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
608 * at the ethernet header */ 614 * at the ethernet header */
609 skb->protocol = eth_type_trans(skb, efx->net_dev); 615 skb->protocol = eth_type_trans(skb, efx->net_dev);
610 616
611 skb_record_rx_queue(skb, channel->channel); 617 skb_record_rx_queue(skb, channel->rx_queue.core_index);
612 } 618 }
613 619
614 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 620 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
615 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 621 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
616 622
617 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) 623 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
624 !channel->type->receive_skb)
618 efx_rx_packet_gro(channel, rx_buf, eh); 625 efx_rx_packet_gro(channel, rx_buf, eh);
619 else 626 else
620 efx_rx_deliver(channel, rx_buf); 627 efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
624{ 631{
625 enum efx_rx_alloc_method method = rx_alloc_method; 632 enum efx_rx_alloc_method method = rx_alloc_method;
626 633
634 if (channel->type->receive_skb) {
635 channel->rx_alloc_push_pages = false;
636 return;
637 }
638
627 /* Only makes sense to use page based allocation if GRO is enabled */ 639 /* Only makes sense to use page based allocation if GRO is enabled */
628 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 640 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
629 method = RX_ALLOC_METHOD_SKB; 641 method = RX_ALLOC_METHOD_SKB;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 96068d15b601..ce72ae4f399f 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -614,7 +614,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
614{ 614{
615 enum efx_loopback_mode mode; 615 enum efx_loopback_mode mode;
616 struct efx_loopback_state *state; 616 struct efx_loopback_state *state;
617 struct efx_channel *channel = efx_get_channel(efx, 0); 617 struct efx_channel *channel =
618 efx_get_channel(efx, efx->tx_channel_offset);
618 struct efx_tx_queue *tx_queue; 619 struct efx_tx_queue *tx_queue;
619 int rc = 0; 620 int rc = 0;
620 621
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 6bafd216e55e..84b41bf08a38 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx)
335 goto fail5; 335 goto fail5;
336 336
337 efx_sriov_probe(efx); 337 efx_sriov_probe(efx);
338 efx_ptp_probe(efx);
338 339
339 return 0; 340 return 0;
340 341
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9cb3b84ecae9..d49b53dc2a50 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -21,6 +21,9 @@
21/* Number of longs required to track all the VIs in a VF */ 21/* Number of longs required to track all the VIs in a VF */
22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) 22#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
23 23
24/* Maximum number of RX queues supported */
25#define VF_MAX_RX_QUEUES 63
26
24/** 27/**
25 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour 28 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
26 * @VF_TX_FILTER_OFF: Disabled 29 * @VF_TX_FILTER_OFF: Disabled
@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
578 efx_oword_t reg; 581 efx_oword_t reg;
579 582
580 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || 583 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
584 vf_rxq >= VF_MAX_RX_QUEUES ||
581 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { 585 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
582 if (net_ratelimit()) 586 if (net_ratelimit())
583 netif_err(efx, hw, efx->net_dev, 587 netif_err(efx, hw, efx->net_dev,
@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
683 __le32 *rxqs; 687 __le32 *rxqs;
684 int rc; 688 int rc;
685 689
690 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
692
686 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL); 693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
687 if (rxqs == NULL) 694 if (rxqs == NULL)
688 return VFDI_RC_ENOMEM; 695 return VFDI_RC_ENOMEM;
@@ -1028,6 +1035,7 @@ efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1028static const struct efx_channel_type efx_sriov_channel_type = { 1035static const struct efx_channel_type efx_sriov_channel_type = {
1029 .handle_no_channel = efx_sriov_handle_no_channel, 1036 .handle_no_channel = efx_sriov_handle_no_channel,
1030 .pre_probe = efx_sriov_probe_channel, 1037 .pre_probe = efx_sriov_probe_channel,
1038 .post_remove = efx_channel_dummy_op_void,
1031 .get_name = efx_sriov_get_channel_name, 1039 .get_name = efx_sriov_get_channel_name,
1032 /* no copy operation; channel must not be reallocated */ 1040 /* no copy operation; channel must not be reallocated */
1033 .keep_eventq = true, 1041 .keep_eventq = true,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 18713436b443..5e090e54298e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
22#include "nic.h" 22#include "nic.h"
23#include "workarounds.h" 23#include "workarounds.h"
24 24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer, 26 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl, 27 unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 31 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 32 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 33 buffer->unmap_len);
42 if (buffer->unmap_single) 34 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 35 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 DMA_TO_DEVICE); 36 DMA_TO_DEVICE);
45 else 37 else
46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 38 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 DMA_TO_DEVICE); 39 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 40 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
50 } 41 }
51 42
52 if (buffer->skb) { 43 if (buffer->flags & EFX_TX_BUF_SKB) {
53 (*pkts_compl)++; 44 (*pkts_compl)++;
54 (*bytes_compl) += buffer->skb->len; 45 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 46 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56 buffer->skb = NULL;
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 47 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n", 48 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count); 49 tx_queue->queue, tx_queue->read_count);
50 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
51 kfree(buffer->heap_buf);
60 } 52 }
61}
62 53
63/** 54 buffer->len = 0;
64 * struct efx_tso_header - a DMA mapped buffer for packet headers 55 buffer->flags = 0;
65 * @next: Linked list of free ones. 56}
66 * The list is protected by the TX queue lock.
67 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68 * @dma_addr: The DMA address of the header below.
69 *
70 * This controls the memory used for a TSO header. Use TSOH_DATA()
71 * to find the packet header data. Use TSOH_SIZE() to calculate the
72 * total size required for a given packet header length. TSO headers
73 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
74 */
75struct efx_tso_header {
76 union {
77 struct efx_tso_header *next;
78 size_t unmap_len;
79 };
80 dma_addr_t dma_addr;
81};
82 57
83static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 58static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
84 struct sk_buff *skb); 59 struct sk_buff *skb);
85static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87 struct efx_tso_header *tsoh);
88
89static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer)
91{
92 if (buffer->tsoh) {
93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh;
96 } else {
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98 }
99 buffer->tsoh = NULL;
100 }
101}
102
103 60
104static inline unsigned 61static inline unsigned
105efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 62efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
138 return max_descs; 95 return max_descs;
139} 96}
140 97
98/* Get partner of a TX queue, seen as part of the same net core queue */
99static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
100{
101 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
102 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
103 else
104 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
105}
106
107static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
108{
109 /* We need to consider both queues that the net core sees as one */
110 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
111 struct efx_nic *efx = txq1->efx;
112 unsigned int fill_level;
113
114 fill_level = max(txq1->insert_count - txq1->old_read_count,
115 txq2->insert_count - txq2->old_read_count);
116 if (likely(fill_level < efx->txq_stop_thresh))
117 return;
118
119 /* We used the stale old_read_count above, which gives us a
120 * pessimistic estimate of the fill level (which may even
121 * validly be >= efx->txq_entries). Now try again using
122 * read_count (more likely to be a cache miss).
123 *
124 * If we read read_count and then conditionally stop the
125 * queue, it is possible for the completion path to race with
126 * us and complete all outstanding descriptors in the middle,
127 * after which there will be no more completions to wake it.
128 * Therefore we stop the queue first, then read read_count
129 * (with a memory barrier to ensure the ordering), then
130 * restart the queue if the fill level turns out to be low
131 * enough.
132 */
133 netif_tx_stop_queue(txq1->core_txq);
134 smp_mb();
135 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
136 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
137
138 fill_level = max(txq1->insert_count - txq1->old_read_count,
139 txq2->insert_count - txq2->old_read_count);
140 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
141 if (likely(fill_level < efx->txq_stop_thresh)) {
142 smp_mb();
143 if (likely(!efx->loopback_selftest))
144 netif_tx_start_queue(txq1->core_txq);
145 }
146}
147
141/* 148/*
142 * Add a socket buffer to a TX queue 149 * Add a socket buffer to a TX queue
143 * 150 *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
151 * This function is split out from efx_hard_start_xmit to allow the 158 * This function is split out from efx_hard_start_xmit to allow the
152 * loopback test to direct packets via specific TX queues. 159 * loopback test to direct packets via specific TX queues.
153 * 160 *
154 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 161 * Returns NETDEV_TX_OK.
155 * You must hold netif_tx_lock() to call this function. 162 * You must hold netif_tx_lock() to call this function.
156 */ 163 */
157netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 164netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
160 struct device *dma_dev = &efx->pci_dev->dev; 167 struct device *dma_dev = &efx->pci_dev->dev;
161 struct efx_tx_buffer *buffer; 168 struct efx_tx_buffer *buffer;
162 skb_frag_t *fragment; 169 skb_frag_t *fragment;
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 170 unsigned int len, unmap_len = 0, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0; 171 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len; 172 unsigned int dma_len;
166 bool unmap_single; 173 unsigned short dma_flags;
167 int q_space, i = 0; 174 int i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK;
169 175
170 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 176 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
171 177
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
183 return NETDEV_TX_OK; 189 return NETDEV_TX_OK;
184 } 190 }
185 191
186 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
187 q_space = efx->txq_entries - 1 - fill_level;
188
189 /* Map for DMA. Use dma_map_single rather than dma_map_page 192 /* Map for DMA. Use dma_map_single rather than dma_map_page
190 * since this is more efficient on machines with sparse 193 * since this is more efficient on machines with sparse
191 * memory. 194 * memory.
192 */ 195 */
193 unmap_single = true; 196 dma_flags = EFX_TX_BUF_MAP_SINGLE;
194 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 197 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
195 198
196 /* Process all fragments */ 199 /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
205 208
206 /* Add to TX queue, splitting across DMA boundaries */ 209 /* Add to TX queue, splitting across DMA boundaries */
207 do { 210 do {
208 if (unlikely(q_space-- <= 0)) {
209 /* It might be that completions have
210 * happened since the xmit path last
211 * checked. Update the xmit path's
212 * copy of read_count.
213 */
214 netif_tx_stop_queue(tx_queue->core_txq);
215 /* This memory barrier protects the
216 * change of queue state from the access
217 * of read_count. */
218 smp_mb();
219 tx_queue->old_read_count =
220 ACCESS_ONCE(tx_queue->read_count);
221 fill_level = (tx_queue->insert_count
222 - tx_queue->old_read_count);
223 q_space = efx->txq_entries - 1 - fill_level;
224 if (unlikely(q_space-- <= 0)) {
225 rc = NETDEV_TX_BUSY;
226 goto unwind;
227 }
228 smp_mb();
229 if (likely(!efx->loopback_selftest))
230 netif_tx_start_queue(
231 tx_queue->core_txq);
232 }
233
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr]; 212 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer); 213 EFX_BUG_ON_PARANOID(buffer->flags);
237 EFX_BUG_ON_PARANOID(buffer->tsoh);
238 EFX_BUG_ON_PARANOID(buffer->skb);
239 EFX_BUG_ON_PARANOID(buffer->len); 214 EFX_BUG_ON_PARANOID(buffer->len);
240 EFX_BUG_ON_PARANOID(!buffer->continuation);
241 EFX_BUG_ON_PARANOID(buffer->unmap_len); 215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
242 216
243 dma_len = efx_max_tx_len(efx, dma_addr); 217 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
247 /* Fill out per descriptor fields */ 221 /* Fill out per descriptor fields */
248 buffer->len = dma_len; 222 buffer->len = dma_len;
249 buffer->dma_addr = dma_addr; 223 buffer->dma_addr = dma_addr;
224 buffer->flags = EFX_TX_BUF_CONT;
250 len -= dma_len; 225 len -= dma_len;
251 dma_addr += dma_len; 226 dma_addr += dma_len;
252 ++tx_queue->insert_count; 227 ++tx_queue->insert_count;
253 } while (len); 228 } while (len);
254 229
255 /* Transfer ownership of the unmapping to the final buffer */ 230 /* Transfer ownership of the unmapping to the final buffer */
256 buffer->unmap_single = unmap_single; 231 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
257 buffer->unmap_len = unmap_len; 232 buffer->unmap_len = unmap_len;
258 unmap_len = 0; 233 unmap_len = 0;
259 234
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
264 len = skb_frag_size(fragment); 239 len = skb_frag_size(fragment);
265 i++; 240 i++;
266 /* Map for DMA */ 241 /* Map for DMA */
267 unmap_single = false; 242 dma_flags = 0;
268 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 243 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
269 DMA_TO_DEVICE); 244 DMA_TO_DEVICE);
270 } 245 }
271 246
272 /* Transfer ownership of the skb to the final buffer */ 247 /* Transfer ownership of the skb to the final buffer */
273 buffer->skb = skb; 248 buffer->skb = skb;
274 buffer->continuation = false; 249 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
275 250
276 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 251 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
277 252
278 /* Pass off to hardware */ 253 /* Pass off to hardware */
279 efx_nic_push_buffers(tx_queue); 254 efx_nic_push_buffers(tx_queue);
280 255
256 efx_tx_maybe_stop_queue(tx_queue);
257
281 return NETDEV_TX_OK; 258 return NETDEV_TX_OK;
282 259
283 dma_err: 260 dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
289 /* Mark the packet as transmitted, and free the SKB ourselves */ 266 /* Mark the packet as transmitted, and free the SKB ourselves */
290 dev_kfree_skb_any(skb); 267 dev_kfree_skb_any(skb);
291 268
292 unwind:
293 /* Work backwards until we hit the original insert pointer value */ 269 /* Work backwards until we hit the original insert pointer value */
294 while (tx_queue->insert_count != tx_queue->write_count) { 270 while (tx_queue->insert_count != tx_queue->write_count) {
295 unsigned int pkts_compl = 0, bytes_compl = 0; 271 unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
297 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
298 buffer = &tx_queue->buffer[insert_ptr]; 274 buffer = &tx_queue->buffer[insert_ptr];
299 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
300 buffer->len = 0;
301 } 276 }
302 277
303 /* Free the fragment we were mid-way through pushing */ 278 /* Free the fragment we were mid-way through pushing */
304 if (unmap_len) { 279 if (unmap_len) {
305 if (unmap_single) 280 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
306 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 281 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
307 DMA_TO_DEVICE); 282 DMA_TO_DEVICE);
308 else 283 else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
310 DMA_TO_DEVICE); 285 DMA_TO_DEVICE);
311 } 286 }
312 287
313 return rc; 288 return NETDEV_TX_OK;
314} 289}
315 290
316/* Remove packets from the TX queue 291/* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
340 } 315 }
341 316
342 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 317 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
343 buffer->continuation = true;
344 buffer->len = 0;
345 318
346 ++tx_queue->read_count; 319 ++tx_queue->read_count;
347 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 320 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
366 339
367 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 340 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
368 341
342 /* PTP "event" packet */
343 if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
344 unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
345 return efx_ptp_tx(efx, skb);
346 }
347
369 index = skb_get_queue_mapping(skb); 348 index = skb_get_queue_mapping(skb);
370 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 349 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
371 if (index >= efx->n_tx_channels) { 350 if (index >= efx->n_tx_channels) {
@@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
450{ 429{
451 unsigned fill_level; 430 unsigned fill_level;
452 struct efx_nic *efx = tx_queue->efx; 431 struct efx_nic *efx = tx_queue->efx;
432 struct efx_tx_queue *txq2;
453 unsigned int pkts_compl = 0, bytes_compl = 0; 433 unsigned int pkts_compl = 0, bytes_compl = 0;
454 434
455 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 435 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
457 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 437 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
458 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 438 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
459 439
460 /* See if we need to restart the netif queue. This barrier 440 /* See if we need to restart the netif queue. This memory
461 * separates the update of read_count from the test of the 441 * barrier ensures that we write read_count (inside
462 * queue state. */ 442 * efx_dequeue_buffers()) before reading the queue status.
443 */
463 smp_mb(); 444 smp_mb();
464 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 445 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
465 likely(efx->port_enabled) && 446 likely(efx->port_enabled) &&
466 likely(netif_device_present(efx->net_dev))) { 447 likely(netif_device_present(efx->net_dev))) {
467 fill_level = tx_queue->insert_count - tx_queue->read_count; 448 txq2 = efx_tx_queue_partner(tx_queue);
468 if (fill_level < EFX_TXQ_THRESHOLD(efx)) 449 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
450 txq2->insert_count - txq2->read_count);
451 if (fill_level <= efx->txq_wake_thresh)
469 netif_tx_wake_queue(tx_queue->core_txq); 452 netif_tx_wake_queue(tx_queue->core_txq);
470 } 453 }
471 454
@@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
480 } 463 }
481} 464}
482 465
466/* Size of page-based TSO header buffers. Larger blocks must be
467 * allocated from the heap.
468 */
469#define TSOH_STD_SIZE 128
470#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
471
472/* At most half the descriptors in the queue at any time will refer to
473 * a TSO header buffer, since they must always be followed by a
474 * payload descriptor referring to an skb.
475 */
476static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
477{
478 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
479}
480
483int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 481int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
484{ 482{
485 struct efx_nic *efx = tx_queue->efx; 483 struct efx_nic *efx = tx_queue->efx;
486 unsigned int entries; 484 unsigned int entries;
487 int i, rc; 485 int rc;
488 486
489 /* Create the smallest power-of-two aligned ring */ 487 /* Create the smallest power-of-two aligned ring */
490 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 488 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
500 GFP_KERNEL); 498 GFP_KERNEL);
501 if (!tx_queue->buffer) 499 if (!tx_queue->buffer)
502 return -ENOMEM; 500 return -ENOMEM;
503 for (i = 0; i <= tx_queue->ptr_mask; ++i) 501
504 tx_queue->buffer[i].continuation = true; 502 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
503 tx_queue->tsoh_page =
504 kcalloc(efx_tsoh_page_count(tx_queue),
505 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
506 if (!tx_queue->tsoh_page) {
507 rc = -ENOMEM;
508 goto fail1;
509 }
510 }
505 511
506 /* Allocate hardware ring */ 512 /* Allocate hardware ring */
507 rc = efx_nic_probe_tx(tx_queue); 513 rc = efx_nic_probe_tx(tx_queue);
508 if (rc) 514 if (rc)
509 goto fail; 515 goto fail2;
510 516
511 return 0; 517 return 0;
512 518
513 fail: 519fail2:
520 kfree(tx_queue->tsoh_page);
521 tx_queue->tsoh_page = NULL;
522fail1:
514 kfree(tx_queue->buffer); 523 kfree(tx_queue->buffer);
515 tx_queue->buffer = NULL; 524 tx_queue->buffer = NULL;
516 return rc; 525 return rc;
@@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
546 unsigned int pkts_compl = 0, bytes_compl = 0; 555 unsigned int pkts_compl = 0, bytes_compl = 0;
547 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 556 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
548 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 557 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
549 buffer->continuation = true;
550 buffer->len = 0;
551 558
552 ++tx_queue->read_count; 559 ++tx_queue->read_count;
553 } 560 }
@@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
568 efx_nic_fini_tx(tx_queue); 575 efx_nic_fini_tx(tx_queue);
569 576
570 efx_release_tx_buffers(tx_queue); 577 efx_release_tx_buffers(tx_queue);
571
572 /* Free up TSO header cache */
573 efx_fini_tso(tx_queue);
574} 578}
575 579
576void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 580void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
577{ 581{
582 int i;
583
578 if (!tx_queue->buffer) 584 if (!tx_queue->buffer)
579 return; 585 return;
580 586
@@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
582 "destroying TX queue %d\n", tx_queue->queue); 588 "destroying TX queue %d\n", tx_queue->queue);
583 efx_nic_remove_tx(tx_queue); 589 efx_nic_remove_tx(tx_queue);
584 590
591 if (tx_queue->tsoh_page) {
592 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
593 efx_nic_free_buffer(tx_queue->efx,
594 &tx_queue->tsoh_page[i]);
595 kfree(tx_queue->tsoh_page);
596 tx_queue->tsoh_page = NULL;
597 }
598
585 kfree(tx_queue->buffer); 599 kfree(tx_queue->buffer);
586 tx_queue->buffer = NULL; 600 tx_queue->buffer = NULL;
587} 601}
@@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
604#define TSOH_OFFSET NET_IP_ALIGN 618#define TSOH_OFFSET NET_IP_ALIGN
605#endif 619#endif
606 620
607#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
608
609/* Total size of struct efx_tso_header, buffer and padding */
610#define TSOH_SIZE(hdr_len) \
611 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
612
613/* Size of blocks on free list. Larger blocks must be allocated from
614 * the heap.
615 */
616#define TSOH_STD_SIZE 128
617
618#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 621#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
619#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
620#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
621#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
622#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
623 622
624/** 623/**
625 * struct tso_state - TSO state for an SKB 624 * struct tso_state - TSO state for an SKB
@@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
631 * @in_len: Remaining length in current SKB fragment 630 * @in_len: Remaining length in current SKB fragment
632 * @unmap_len: Length of SKB fragment 631 * @unmap_len: Length of SKB fragment
633 * @unmap_addr: DMA address of SKB fragment 632 * @unmap_addr: DMA address of SKB fragment
634 * @unmap_single: DMA single vs page mapping flag 633 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
635 * @protocol: Network protocol (after any VLAN header) 634 * @protocol: Network protocol (after any VLAN header)
635 * @ip_off: Offset of IP header
636 * @tcp_off: Offset of TCP header
636 * @header_len: Number of bytes of header 637 * @header_len: Number of bytes of header
637 * @full_packet_size: Number of bytes to put in each outgoing segment 638 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
638 * 639 *
639 * The state used during segmentation. It is put into this data structure 640 * The state used during segmentation. It is put into this data structure
640 * just to make it easy to pass into inline functions. 641 * just to make it easy to pass into inline functions.
@@ -651,11 +652,13 @@ struct tso_state {
651 unsigned in_len; 652 unsigned in_len;
652 unsigned unmap_len; 653 unsigned unmap_len;
653 dma_addr_t unmap_addr; 654 dma_addr_t unmap_addr;
654 bool unmap_single; 655 unsigned short dma_flags;
655 656
656 __be16 protocol; 657 __be16 protocol;
658 unsigned int ip_off;
659 unsigned int tcp_off;
657 unsigned header_len; 660 unsigned header_len;
658 int full_packet_size; 661 unsigned int ip_base_len;
659}; 662};
660 663
661 664
@@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
687 return protocol; 690 return protocol;
688} 691}
689 692
690 693static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
691/* 694 struct efx_tx_buffer *buffer, unsigned int len)
692 * Allocate a page worth of efx_tso_header structures, and string them
693 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
694 */
695static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
696{ 695{
697 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 696 u8 *result;
698 struct efx_tso_header *tsoh;
699 dma_addr_t dma_addr;
700 u8 *base_kva, *kva;
701 697
702 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); 698 EFX_BUG_ON_PARANOID(buffer->len);
703 if (base_kva == NULL) { 699 EFX_BUG_ON_PARANOID(buffer->flags);
704 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, 700 EFX_BUG_ON_PARANOID(buffer->unmap_len);
705 "Unable to allocate page for TSO headers\n");
706 return -ENOMEM;
707 }
708
709 /* dma_alloc_coherent() allocates pages. */
710 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
711
712 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
713 tsoh = (struct efx_tso_header *)kva;
714 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
715 tsoh->next = tx_queue->tso_headers_free;
716 tx_queue->tso_headers_free = tsoh;
717 }
718
719 return 0;
720}
721
722
723/* Free up a TSO header, and all others in the same page. */
724static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
725 struct efx_tso_header *tsoh,
726 struct device *dma_dev)
727{
728 struct efx_tso_header **p;
729 unsigned long base_kva;
730 dma_addr_t base_dma;
731
732 base_kva = (unsigned long)tsoh & PAGE_MASK;
733 base_dma = tsoh->dma_addr & PAGE_MASK;
734
735 p = &tx_queue->tso_headers_free;
736 while (*p != NULL) {
737 if (((unsigned long)*p & PAGE_MASK) == base_kva)
738 *p = (*p)->next;
739 else
740 p = &(*p)->next;
741 }
742 701
743 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); 702 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
744} 703 unsigned index =
704 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
705 struct efx_buffer *page_buf =
706 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
707 unsigned offset =
708 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709
710 if (unlikely(!page_buf->addr) &&
711 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
712 return NULL;
713
714 result = (u8 *)page_buf->addr + offset;
715 buffer->dma_addr = page_buf->dma_addr + offset;
716 buffer->flags = EFX_TX_BUF_CONT;
717 } else {
718 tx_queue->tso_long_headers++;
745 719
746static struct efx_tso_header * 720 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
747efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 721 if (unlikely(!buffer->heap_buf))
748{ 722 return NULL;
749 struct efx_tso_header *tsoh; 723 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
750 724 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
751 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
752 if (unlikely(!tsoh))
753 return NULL;
754
755 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
756 TSOH_BUFFER(tsoh), header_len,
757 DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
759 tsoh->dma_addr))) {
760 kfree(tsoh);
761 return NULL;
762 } 725 }
763 726
764 tsoh->unmap_len = header_len; 727 buffer->len = len;
765 return tsoh;
766}
767 728
768static void 729 return result;
769efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
770{
771 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
772 tsoh->dma_addr, tsoh->unmap_len,
773 DMA_TO_DEVICE);
774 kfree(tsoh);
775} 730}
776 731
777/** 732/**
@@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
781 * @len: Length of fragment 736 * @len: Length of fragment
782 * @final_buffer: The final buffer inserted into the queue 737 * @final_buffer: The final buffer inserted into the queue
783 * 738 *
784 * Push descriptors onto the TX queue. Return 0 on success or 1 if 739 * Push descriptors onto the TX queue.
785 * @tx_queue full.
786 */ 740 */
787static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 741static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
788 dma_addr_t dma_addr, unsigned len, 742 dma_addr_t dma_addr, unsigned len,
789 struct efx_tx_buffer **final_buffer) 743 struct efx_tx_buffer **final_buffer)
790{ 744{
791 struct efx_tx_buffer *buffer; 745 struct efx_tx_buffer *buffer;
792 struct efx_nic *efx = tx_queue->efx; 746 struct efx_nic *efx = tx_queue->efx;
793 unsigned dma_len, fill_level, insert_ptr; 747 unsigned dma_len, insert_ptr;
794 int q_space;
795 748
796 EFX_BUG_ON_PARANOID(len <= 0); 749 EFX_BUG_ON_PARANOID(len <= 0);
797 750
798 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
799 /* -1 as there is no way to represent all descriptors used */
800 q_space = efx->txq_entries - 1 - fill_level;
801
802 while (1) { 751 while (1) {
803 if (unlikely(q_space-- <= 0)) {
804 /* It might be that completions have happened
805 * since the xmit path last checked. Update
806 * the xmit path's copy of read_count.
807 */
808 netif_tx_stop_queue(tx_queue->core_txq);
809 /* This memory barrier protects the change of
810 * queue state from the access of read_count. */
811 smp_mb();
812 tx_queue->old_read_count =
813 ACCESS_ONCE(tx_queue->read_count);
814 fill_level = (tx_queue->insert_count
815 - tx_queue->old_read_count);
816 q_space = efx->txq_entries - 1 - fill_level;
817 if (unlikely(q_space-- <= 0)) {
818 *final_buffer = NULL;
819 return 1;
820 }
821 smp_mb();
822 netif_tx_start_queue(tx_queue->core_txq);
823 }
824
825 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 752 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
826 buffer = &tx_queue->buffer[insert_ptr]; 753 buffer = &tx_queue->buffer[insert_ptr];
827 ++tx_queue->insert_count; 754 ++tx_queue->insert_count;
@@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
830 tx_queue->read_count >= 757 tx_queue->read_count >=
831 efx->txq_entries); 758 efx->txq_entries);
832 759
833 efx_tsoh_free(tx_queue, buffer);
834 EFX_BUG_ON_PARANOID(buffer->len); 760 EFX_BUG_ON_PARANOID(buffer->len);
835 EFX_BUG_ON_PARANOID(buffer->unmap_len); 761 EFX_BUG_ON_PARANOID(buffer->unmap_len);
836 EFX_BUG_ON_PARANOID(buffer->skb); 762 EFX_BUG_ON_PARANOID(buffer->flags);
837 EFX_BUG_ON_PARANOID(!buffer->continuation);
838 EFX_BUG_ON_PARANOID(buffer->tsoh);
839 763
840 buffer->dma_addr = dma_addr; 764 buffer->dma_addr = dma_addr;
841 765
@@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
845 if (dma_len >= len) 769 if (dma_len >= len)
846 break; 770 break;
847 771
848 buffer->len = dma_len; /* Don't set the other members */ 772 buffer->len = dma_len;
773 buffer->flags = EFX_TX_BUF_CONT;
849 dma_addr += dma_len; 774 dma_addr += dma_len;
850 len -= dma_len; 775 len -= dma_len;
851 } 776 }
@@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
853 EFX_BUG_ON_PARANOID(!len); 778 EFX_BUG_ON_PARANOID(!len);
854 buffer->len = len; 779 buffer->len = len;
855 *final_buffer = buffer; 780 *final_buffer = buffer;
856 return 0;
857} 781}
858 782
859 783
@@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
864 * a single fragment, and we know it doesn't cross a page boundary. It 788 * a single fragment, and we know it doesn't cross a page boundary. It
865 * also allows us to not worry about end-of-packet etc. 789 * also allows us to not worry about end-of-packet etc.
866 */ 790 */
867static void efx_tso_put_header(struct efx_tx_queue *tx_queue, 791static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
868 struct efx_tso_header *tsoh, unsigned len) 792 struct efx_tx_buffer *buffer, u8 *header)
869{ 793{
870 struct efx_tx_buffer *buffer; 794 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
871 795 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
872 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 796 header, buffer->len,
873 efx_tsoh_free(tx_queue, buffer); 797 DMA_TO_DEVICE);
874 EFX_BUG_ON_PARANOID(buffer->len); 798 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
875 EFX_BUG_ON_PARANOID(buffer->unmap_len); 799 buffer->dma_addr))) {
876 EFX_BUG_ON_PARANOID(buffer->skb); 800 kfree(buffer->heap_buf);
877 EFX_BUG_ON_PARANOID(!buffer->continuation); 801 buffer->len = 0;
878 EFX_BUG_ON_PARANOID(buffer->tsoh); 802 buffer->flags = 0;
879 buffer->len = len; 803 return -ENOMEM;
880 buffer->dma_addr = tsoh->dma_addr; 804 }
881 buffer->tsoh = tsoh; 805 buffer->unmap_len = buffer->len;
806 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
807 }
882 808
883 ++tx_queue->insert_count; 809 ++tx_queue->insert_count;
810 return 0;
884} 811}
885 812
886 813
887/* Remove descriptors put into a tx_queue. */ 814/* Remove buffers put into a tx_queue. None of the buffers must have
815 * an skb attached.
816 */
888static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 817static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
889{ 818{
890 struct efx_tx_buffer *buffer; 819 struct efx_tx_buffer *buffer;
891 dma_addr_t unmap_addr;
892 820
893 /* Work backwards until we hit the original insert pointer value */ 821 /* Work backwards until we hit the original insert pointer value */
894 while (tx_queue->insert_count != tx_queue->write_count) { 822 while (tx_queue->insert_count != tx_queue->write_count) {
895 --tx_queue->insert_count; 823 --tx_queue->insert_count;
896 buffer = &tx_queue->buffer[tx_queue->insert_count & 824 buffer = &tx_queue->buffer[tx_queue->insert_count &
897 tx_queue->ptr_mask]; 825 tx_queue->ptr_mask];
898 efx_tsoh_free(tx_queue, buffer); 826 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
899 EFX_BUG_ON_PARANOID(buffer->skb);
900 if (buffer->unmap_len) {
901 unmap_addr = (buffer->dma_addr + buffer->len -
902 buffer->unmap_len);
903 if (buffer->unmap_single)
904 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
905 unmap_addr, buffer->unmap_len,
906 DMA_TO_DEVICE);
907 else
908 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
909 unmap_addr, buffer->unmap_len,
910 DMA_TO_DEVICE);
911 buffer->unmap_len = 0;
912 }
913 buffer->len = 0;
914 buffer->continuation = true;
915 } 827 }
916} 828}
917 829
@@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
919/* Parse the SKB header and initialise state. */ 831/* Parse the SKB header and initialise state. */
920static void tso_start(struct tso_state *st, const struct sk_buff *skb) 832static void tso_start(struct tso_state *st, const struct sk_buff *skb)
921{ 833{
922 /* All ethernet/IP/TCP headers combined size is TCP header size 834 st->ip_off = skb_network_header(skb) - skb->data;
923 * plus offset of TCP header relative to start of packet. 835 st->tcp_off = skb_transport_header(skb) - skb->data;
924 */ 836 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
925 st->header_len = ((tcp_hdr(skb)->doff << 2u) 837 if (st->protocol == htons(ETH_P_IP)) {
926 + PTR_DIFF(tcp_hdr(skb), skb->data)); 838 st->ip_base_len = st->header_len - st->ip_off;
927 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
928
929 if (st->protocol == htons(ETH_P_IP))
930 st->ipv4_id = ntohs(ip_hdr(skb)->id); 839 st->ipv4_id = ntohs(ip_hdr(skb)->id);
931 else 840 } else {
841 st->ip_base_len = st->header_len - st->tcp_off;
932 st->ipv4_id = 0; 842 st->ipv4_id = 0;
843 }
933 st->seqnum = ntohl(tcp_hdr(skb)->seq); 844 st->seqnum = ntohl(tcp_hdr(skb)->seq);
934 845
935 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 846 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
938 849
939 st->out_len = skb->len - st->header_len; 850 st->out_len = skb->len - st->header_len;
940 st->unmap_len = 0; 851 st->unmap_len = 0;
941 st->unmap_single = false; 852 st->dma_flags = 0;
942} 853}
943 854
944static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 855static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
947 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 858 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948 skb_frag_size(frag), DMA_TO_DEVICE); 859 skb_frag_size(frag), DMA_TO_DEVICE);
949 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 860 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950 st->unmap_single = false; 861 st->dma_flags = 0;
951 st->unmap_len = skb_frag_size(frag); 862 st->unmap_len = skb_frag_size(frag);
952 st->in_len = skb_frag_size(frag); 863 st->in_len = skb_frag_size(frag);
953 st->dma_addr = st->unmap_addr; 864 st->dma_addr = st->unmap_addr;
@@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
965 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, 876 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
966 len, DMA_TO_DEVICE); 877 len, DMA_TO_DEVICE);
967 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 878 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
968 st->unmap_single = true; 879 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
969 st->unmap_len = len; 880 st->unmap_len = len;
970 st->in_len = len; 881 st->in_len = len;
971 st->dma_addr = st->unmap_addr; 882 st->dma_addr = st->unmap_addr;
@@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
982 * @st: TSO state 893 * @st: TSO state
983 * 894 *
984 * Form descriptors for the current fragment, until we reach the end 895 * Form descriptors for the current fragment, until we reach the end
985 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 896 * of fragment or end-of-packet.
986 * space in @tx_queue.
987 */ 897 */
988static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 898static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
989 const struct sk_buff *skb, 899 const struct sk_buff *skb,
990 struct tso_state *st) 900 struct tso_state *st)
991{ 901{
992 struct efx_tx_buffer *buffer; 902 struct efx_tx_buffer *buffer;
993 int n, end_of_packet, rc; 903 int n;
994 904
995 if (st->in_len == 0) 905 if (st->in_len == 0)
996 return 0; 906 return;
997 if (st->packet_space == 0) 907 if (st->packet_space == 0)
998 return 0; 908 return;
999 909
1000 EFX_BUG_ON_PARANOID(st->in_len <= 0); 910 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1001 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 911 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1006 st->out_len -= n; 916 st->out_len -= n;
1007 st->in_len -= n; 917 st->in_len -= n;
1008 918
1009 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 919 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010 if (likely(rc == 0)) {
1011 if (st->out_len == 0)
1012 /* Transfer ownership of the skb */
1013 buffer->skb = skb;
1014 920
1015 end_of_packet = st->out_len == 0 || st->packet_space == 0; 921 if (st->out_len == 0) {
1016 buffer->continuation = !end_of_packet; 922 /* Transfer ownership of the skb */
923 buffer->skb = skb;
924 buffer->flags = EFX_TX_BUF_SKB;
925 } else if (st->packet_space != 0) {
926 buffer->flags = EFX_TX_BUF_CONT;
927 }
1017 928
1018 if (st->in_len == 0) { 929 if (st->in_len == 0) {
1019 /* Transfer ownership of the DMA mapping */ 930 /* Transfer ownership of the DMA mapping */
1020 buffer->unmap_len = st->unmap_len; 931 buffer->unmap_len = st->unmap_len;
1021 buffer->unmap_single = st->unmap_single; 932 buffer->flags |= st->dma_flags;
1022 st->unmap_len = 0; 933 st->unmap_len = 0;
1023 }
1024 } 934 }
1025 935
1026 st->dma_addr += n; 936 st->dma_addr += n;
1027 return rc;
1028} 937}
1029 938
1030 939
@@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1035 * @st: TSO state 944 * @st: TSO state
1036 * 945 *
1037 * Generate a new header and prepare for the new packet. Return 0 on 946 * Generate a new header and prepare for the new packet. Return 0 on
1038 * success, or -1 if failed to alloc header. 947 * success, or -%ENOMEM if failed to alloc header.
1039 */ 948 */
1040static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 949static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1041 const struct sk_buff *skb, 950 const struct sk_buff *skb,
1042 struct tso_state *st) 951 struct tso_state *st)
1043{ 952{
1044 struct efx_tso_header *tsoh; 953 struct efx_tx_buffer *buffer =
954 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
1045 struct tcphdr *tsoh_th; 955 struct tcphdr *tsoh_th;
1046 unsigned ip_length; 956 unsigned ip_length;
1047 u8 *header; 957 u8 *header;
958 int rc;
1048 959
1049 /* Allocate a DMA-mapped header buffer. */ 960 /* Allocate and insert a DMA-mapped header buffer. */
1050 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { 961 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1051 if (tx_queue->tso_headers_free == NULL) { 962 if (!header)
1052 if (efx_tsoh_block_alloc(tx_queue)) 963 return -ENOMEM;
1053 return -1;
1054 }
1055 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1056 tsoh = tx_queue->tso_headers_free;
1057 tx_queue->tso_headers_free = tsoh->next;
1058 tsoh->unmap_len = 0;
1059 } else {
1060 tx_queue->tso_long_headers++;
1061 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1062 if (unlikely(!tsoh))
1063 return -1;
1064 }
1065 964
1066 header = TSOH_BUFFER(tsoh); 965 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1067 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1068 966
1069 /* Copy and update the headers. */ 967 /* Copy and update the headers. */
1070 memcpy(header, skb->data, st->header_len); 968 memcpy(header, skb->data, st->header_len);
@@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1073 st->seqnum += skb_shinfo(skb)->gso_size; 971 st->seqnum += skb_shinfo(skb)->gso_size;
1074 if (st->out_len > skb_shinfo(skb)->gso_size) { 972 if (st->out_len > skb_shinfo(skb)->gso_size) {
1075 /* This packet will not finish the TSO burst. */ 973 /* This packet will not finish the TSO burst. */
1076 ip_length = st->full_packet_size - ETH_HDR_LEN(skb); 974 st->packet_space = skb_shinfo(skb)->gso_size;
1077 tsoh_th->fin = 0; 975 tsoh_th->fin = 0;
1078 tsoh_th->psh = 0; 976 tsoh_th->psh = 0;
1079 } else { 977 } else {
1080 /* This packet will be the last in the TSO burst. */ 978 /* This packet will be the last in the TSO burst. */
1081 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; 979 st->packet_space = st->out_len;
1082 tsoh_th->fin = tcp_hdr(skb)->fin; 980 tsoh_th->fin = tcp_hdr(skb)->fin;
1083 tsoh_th->psh = tcp_hdr(skb)->psh; 981 tsoh_th->psh = tcp_hdr(skb)->psh;
1084 } 982 }
983 ip_length = st->ip_base_len + st->packet_space;
1085 984
1086 if (st->protocol == htons(ETH_P_IP)) { 985 if (st->protocol == htons(ETH_P_IP)) {
1087 struct iphdr *tsoh_iph = 986 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
1088 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1089 987
1090 tsoh_iph->tot_len = htons(ip_length); 988 tsoh_iph->tot_len = htons(ip_length);
1091 989
@@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1094 st->ipv4_id++; 992 st->ipv4_id++;
1095 } else { 993 } else {
1096 struct ipv6hdr *tsoh_iph = 994 struct ipv6hdr *tsoh_iph =
1097 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); 995 (struct ipv6hdr *)(header + st->ip_off);
1098 996
1099 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); 997 tsoh_iph->payload_len = htons(ip_length);
1100 } 998 }
1101 999
1102 st->packet_space = skb_shinfo(skb)->gso_size; 1000 rc = efx_tso_put_header(tx_queue, buffer, header);
1103 ++tx_queue->tso_packets; 1001 if (unlikely(rc))
1002 return rc;
1104 1003
1105 /* Form a descriptor for this header. */ 1004 ++tx_queue->tso_packets;
1106 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1107 1005
1108 return 0; 1006 return 0;
1109} 1007}
@@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1118 * 1016 *
1119 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1017 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1120 * @skb was not enqueued. In all cases @skb is consumed. Return 1018 * @skb was not enqueued. In all cases @skb is consumed. Return
1121 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1019 * %NETDEV_TX_OK.
1122 */ 1020 */
1123static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1021static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 struct sk_buff *skb) 1022 struct sk_buff *skb)
1125{ 1023{
1126 struct efx_nic *efx = tx_queue->efx; 1024 struct efx_nic *efx = tx_queue->efx;
1127 int frag_i, rc, rc2 = NETDEV_TX_OK; 1025 int frag_i, rc;
1128 struct tso_state state; 1026 struct tso_state state;
1129 1027
1130 /* Find the packet protocol and sanity-check it */ 1028 /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1156 goto mem_err; 1054 goto mem_err;
1157 1055
1158 while (1) { 1056 while (1) {
1159 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1057 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1160 if (unlikely(rc)) {
1161 rc2 = NETDEV_TX_BUSY;
1162 goto unwind;
1163 }
1164 1058
1165 /* Move onto the next fragment? */ 1059 /* Move onto the next fragment? */
1166 if (state.in_len == 0) { 1060 if (state.in_len == 0) {
@@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1184 /* Pass off to hardware */ 1078 /* Pass off to hardware */
1185 efx_nic_push_buffers(tx_queue); 1079 efx_nic_push_buffers(tx_queue);
1186 1080
1081 efx_tx_maybe_stop_queue(tx_queue);
1082
1187 tx_queue->tso_bursts++; 1083 tx_queue->tso_bursts++;
1188 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1189 1085
@@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1192 "Out of memory for TSO headers, or DMA mapping error\n"); 1088 "Out of memory for TSO headers, or DMA mapping error\n");
1193 dev_kfree_skb_any(skb); 1089 dev_kfree_skb_any(skb);
1194 1090
1195 unwind:
1196 /* Free the DMA mapping we were in the process of writing out */ 1091 /* Free the DMA mapping we were in the process of writing out */
1197 if (state.unmap_len) { 1092 if (state.unmap_len) {
1198 if (state.unmap_single) 1093 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1199 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1094 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1200 state.unmap_len, DMA_TO_DEVICE); 1095 state.unmap_len, DMA_TO_DEVICE);
1201 else 1096 else
@@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1204 } 1099 }
1205 1100
1206 efx_enqueue_unwind(tx_queue); 1101 efx_enqueue_unwind(tx_queue);
1207 return rc2; 1102 return NETDEV_TX_OK;
1208}
1209
1210
1211/*
1212 * Free up all TSO datastructures associated with tx_queue. This
1213 * routine should be called only once the tx_queue is both empty and
1214 * will no longer be used.
1215 */
1216static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1217{
1218 unsigned i;
1219
1220 if (tx_queue->buffer) {
1221 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1222 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1223 }
1224
1225 while (tx_queue->tso_headers_free != NULL)
1226 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1227 &tx_queue->efx->pci_dev->dev);
1228} 1103}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ade108232048..0376a5e6b2bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
177 new_bus->write = &stmmac_mdio_write; 177 new_bus->write = &stmmac_mdio_write;
178 new_bus->reset = &stmmac_mdio_reset; 178 new_bus->reset = &stmmac_mdio_reset;
179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", 179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
180 new_bus->name, mdio_bus_data->bus_id); 180 new_bus->name, priv->plat->bus_id);
181 new_bus->priv = ndev; 181 new_bus->priv = ndev;
182 new_bus->irq = irqlist; 182 new_bus->irq = irqlist;
183 new_bus->phy_mask = mdio_bus_data->phy_mask; 183 new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
213 * and no PHY number was provided to the MAC, 213 * and no PHY number was provided to the MAC,
214 * use the one probed here. 214 * use the one probed here.
215 */ 215 */
216 if ((priv->plat->bus_id == mdio_bus_data->bus_id) && 216 if (priv->plat->phy_addr == -1)
217 (priv->plat->phy_addr == -1))
218 priv->plat->phy_addr = addr; 217 priv->plat->phy_addr = addr;
219 218
220 act = (priv->plat->bus_id == mdio_bus_data->bus_id) && 219 act = (priv->plat->phy_addr == addr);
221 (priv->plat->phy_addr == addr);
222 switch (phydev->irq) { 220 switch (phydev->irq) {
223 case PHY_POLL: 221 case PHY_POLL:
224 irq_str = "POLL"; 222 irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
258{ 256{
259 struct stmmac_priv *priv = netdev_priv(ndev); 257 struct stmmac_priv *priv = netdev_priv(ndev);
260 258
259 if (!priv->mii)
260 return 0;
261
261 mdiobus_unregister(priv->mii); 262 mdiobus_unregister(priv->mii);
262 priv->mii->priv = NULL; 263 priv->mii->priv = NULL;
263 mdiobus_free(priv->mii); 264 mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 13afb8edfadc..1f069b0f6af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
42 42
43 mdio_data.bus_id = 1;
44 mdio_data.phy_reset = NULL; 43 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 44 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 45 plat_dat.mdio_bus_data = &mdio_data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b93245c11995..ed112b55ae7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
78{ 78{
79 int ret = 0; 79 int ret = 0;
80 struct resource *res; 80 struct resource *res;
81 struct device *dev = &pdev->dev;
81 void __iomem *addr = NULL; 82 void __iomem *addr = NULL;
82 struct stmmac_priv *priv = NULL; 83 struct stmmac_priv *priv = NULL;
83 struct plat_stmmacenet_data *plat_dat = NULL; 84 struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
87 if (!res) 88 if (!res)
88 return -ENODEV; 89 return -ENODEV;
89 90
90 if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 91 addr = devm_request_and_ioremap(dev, res);
91 pr_err("%s: ERROR: memory allocation failed"
92 "cannot get the I/O addr 0x%x\n",
93 __func__, (unsigned int)res->start);
94 return -EBUSY;
95 }
96
97 addr = ioremap(res->start, resource_size(res));
98 if (!addr) { 92 if (!addr) {
99 pr_err("%s: ERROR: memory mapping failed", __func__); 93 pr_err("%s: ERROR: memory mapping failed", __func__);
100 ret = -ENOMEM; 94 return -ENOMEM;
101 goto out_release_region;
102 } 95 }
103 96
104 if (pdev->dev.of_node) { 97 if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
107 GFP_KERNEL); 100 GFP_KERNEL);
108 if (!plat_dat) { 101 if (!plat_dat) {
109 pr_err("%s: ERROR: no memory", __func__); 102 pr_err("%s: ERROR: no memory", __func__);
110 ret = -ENOMEM; 103 return -ENOMEM;
111 goto out_unmap;
112 } 104 }
113 105
114 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 106 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
115 if (ret) { 107 if (ret) {
116 pr_err("%s: main dt probe failed", __func__); 108 pr_err("%s: main dt probe failed", __func__);
117 goto out_unmap; 109 return ret;
118 } 110 }
119 } else { 111 } else {
120 plat_dat = pdev->dev.platform_data; 112 plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
124 if (plat_dat->init) { 116 if (plat_dat->init) {
125 ret = plat_dat->init(pdev); 117 ret = plat_dat->init(pdev);
126 if (unlikely(ret)) 118 if (unlikely(ret))
127 goto out_unmap; 119 return ret;
128 } 120 }
129 121
130 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); 122 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
131 if (!priv) { 123 if (!priv) {
132 pr_err("%s: main driver probe failed", __func__); 124 pr_err("%s: main driver probe failed", __func__);
133 goto out_unmap; 125 return -ENODEV;
134 } 126 }
135 127
136 /* Get MAC address if available (DT) */ 128 /* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
142 if (priv->dev->irq == -ENXIO) { 134 if (priv->dev->irq == -ENXIO) {
143 pr_err("%s: ERROR: MAC IRQ configuration " 135 pr_err("%s: ERROR: MAC IRQ configuration "
144 "information not found\n", __func__); 136 "information not found\n", __func__);
145 ret = -ENXIO; 137 return -ENXIO;
146 goto out_unmap;
147 } 138 }
148 139
149 /* 140 /*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
165 pr_debug("STMMAC platform driver registration completed"); 156 pr_debug("STMMAC platform driver registration completed");
166 157
167 return 0; 158 return 0;
168
169out_unmap:
170 iounmap(addr);
171 platform_set_drvdata(pdev, NULL);
172
173out_release_region:
174 release_mem_region(res->start, resource_size(res));
175
176 return ret;
177} 159}
178 160
179/** 161/**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
186{ 168{
187 struct net_device *ndev = platform_get_drvdata(pdev); 169 struct net_device *ndev = platform_get_drvdata(pdev);
188 struct stmmac_priv *priv = netdev_priv(ndev); 170 struct stmmac_priv *priv = netdev_priv(ndev);
189 struct resource *res;
190 int ret = stmmac_dvr_remove(ndev); 171 int ret = stmmac_dvr_remove(ndev);
191 172
192 if (priv->plat->exit) 173 if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
194 175
195 platform_set_drvdata(pdev, NULL); 176 platform_set_drvdata(pdev, NULL);
196 177
197 iounmap((void __force __iomem *)priv->ioaddr);
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 release_mem_region(res->start, resource_size(res));
200
201 return ret; 178 return ret;
202} 179}
203 180
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 967fe8cb476e..c9c977bf02ac 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp)
212static void bigmac_init_rings(struct bigmac *bp, int from_irq) 212static void bigmac_init_rings(struct bigmac *bp, int from_irq)
213{ 213{
214 struct bmac_init_block *bb = bp->bmac_block; 214 struct bmac_init_block *bb = bp->bmac_block;
215 struct net_device *dev = bp->dev;
216 int i; 215 int i;
217 gfp_t gfp_flags = GFP_KERNEL; 216 gfp_t gfp_flags = GFP_KERNEL;
218 217
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1b173a6145d6..b26cbda5efa9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
32 32
33config TI_DAVINCI_MDIO 33config TI_DAVINCI_MDIO
34 tristate "TI DaVinci MDIO Support" 34 tristate "TI DaVinci MDIO Support"
35 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 35 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
36 select PHYLIB 36 select PHYLIB
37 ---help--- 37 ---help---
38 This driver supports TI's DaVinci MDIO module. 38 This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
42 42
43config TI_DAVINCI_CPDMA 43config TI_DAVINCI_CPDMA
44 tristate "TI DaVinci CPDMA Support" 44 tristate "TI DaVinci CPDMA Support"
45 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 45 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
46 ---help--- 46 ---help---
47 This driver supports TI's DaVinci CPDMA dma engine. 47 This driver supports TI's DaVinci CPDMA dma engine.
48 48
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1e5d85b06e71..df55e2403746 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -28,6 +28,9 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/pm_runtime.h> 30#include <linux/pm_runtime.h>
31#include <linux/of.h>
32#include <linux/of_net.h>
33#include <linux/of_device.h>
31 34
32#include <linux/platform_data/cpsw.h> 35#include <linux/platform_data/cpsw.h>
33 36
@@ -383,6 +386,11 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
383 mac_control |= BIT(7); /* GIGABITEN */ 386 mac_control |= BIT(7); /* GIGABITEN */
384 if (phy->duplex) 387 if (phy->duplex)
385 mac_control |= BIT(0); /* FULLDUPLEXEN */ 388 mac_control |= BIT(0); /* FULLDUPLEXEN */
389
390 /* set speed_in input in case RMII mode is used in 100Mbps */
391 if (phy->speed == 100)
392 mac_control |= BIT(15);
393
386 *link = true; 394 *link = true;
387 } else { 395 } else {
388 mac_control = 0; 396 mac_control = 0;
@@ -709,6 +717,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
709 slave->sliver = regs + data->sliver_reg_ofs; 717 slave->sliver = regs + data->sliver_reg_ofs;
710} 718}
711 719
720static int cpsw_probe_dt(struct cpsw_platform_data *data,
721 struct platform_device *pdev)
722{
723 struct device_node *node = pdev->dev.of_node;
724 struct device_node *slave_node;
725 int i = 0, ret;
726 u32 prop;
727
728 if (!node)
729 return -EINVAL;
730
731 if (of_property_read_u32(node, "slaves", &prop)) {
732 pr_err("Missing slaves property in the DT.\n");
733 return -EINVAL;
734 }
735 data->slaves = prop;
736
737 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
738 data->slaves, GFP_KERNEL);
739 if (!data->slave_data) {
740 pr_err("Could not allocate slave memory.\n");
741 return -EINVAL;
742 }
743
744 data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
745
746 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
747 pr_err("Missing cpdma_channels property in the DT.\n");
748 ret = -EINVAL;
749 goto error_ret;
750 }
751 data->channels = prop;
752
753 if (of_property_read_u32(node, "host_port_no", &prop)) {
754 pr_err("Missing host_port_no property in the DT.\n");
755 ret = -EINVAL;
756 goto error_ret;
757 }
758 data->host_port_num = prop;
759
760 if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
761 pr_err("Missing cpdma_reg_ofs property in the DT.\n");
762 ret = -EINVAL;
763 goto error_ret;
764 }
765 data->cpdma_reg_ofs = prop;
766
767 if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
768 pr_err("Missing cpdma_sram_ofs property in the DT.\n");
769 ret = -EINVAL;
770 goto error_ret;
771 }
772 data->cpdma_sram_ofs = prop;
773
774 if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
775 pr_err("Missing ale_reg_ofs property in the DT.\n");
776 ret = -EINVAL;
777 goto error_ret;
778 }
779 data->ale_reg_ofs = prop;
780
781 if (of_property_read_u32(node, "ale_entries", &prop)) {
782 pr_err("Missing ale_entries property in the DT.\n");
783 ret = -EINVAL;
784 goto error_ret;
785 }
786 data->ale_entries = prop;
787
788 if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
789 pr_err("Missing host_port_reg_ofs property in the DT.\n");
790 ret = -EINVAL;
791 goto error_ret;
792 }
793 data->host_port_reg_ofs = prop;
794
795 if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
796 pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
797 ret = -EINVAL;
798 goto error_ret;
799 }
800 data->hw_stats_reg_ofs = prop;
801
802 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
803 pr_err("Missing bd_ram_ofs property in the DT.\n");
804 ret = -EINVAL;
805 goto error_ret;
806 }
807 data->bd_ram_ofs = prop;
808
809 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
810 pr_err("Missing bd_ram_size property in the DT.\n");
811 ret = -EINVAL;
812 goto error_ret;
813 }
814 data->bd_ram_size = prop;
815
816 if (of_property_read_u32(node, "rx_descs", &prop)) {
817 pr_err("Missing rx_descs property in the DT.\n");
818 ret = -EINVAL;
819 goto error_ret;
820 }
821 data->rx_descs = prop;
822
823 if (of_property_read_u32(node, "mac_control", &prop)) {
824 pr_err("Missing mac_control property in the DT.\n");
825 ret = -EINVAL;
826 goto error_ret;
827 }
828 data->mac_control = prop;
829
830 for_each_child_of_node(node, slave_node) {
831 struct cpsw_slave_data *slave_data = data->slave_data + i;
832 const char *phy_id = NULL;
833 const void *mac_addr = NULL;
834
835 if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
836 pr_err("Missing slave[%d] phy_id property\n", i);
837 ret = -EINVAL;
838 goto error_ret;
839 }
840 slave_data->phy_id = phy_id;
841
842 if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
843 pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
844 ret = -EINVAL;
845 goto error_ret;
846 }
847 slave_data->slave_reg_ofs = prop;
848
849 if (of_property_read_u32(slave_node, "sliver_reg_ofs",
850 &prop)) {
851 pr_err("Missing slave[%d] sliver_reg_ofs property\n",
852 i);
853 ret = -EINVAL;
854 goto error_ret;
855 }
856 slave_data->sliver_reg_ofs = prop;
857
858 mac_addr = of_get_mac_address(slave_node);
859 if (mac_addr)
860 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
861
862 i++;
863 }
864
865 return 0;
866
867error_ret:
868 kfree(data->slave_data);
869 return ret;
870}
871
712static int __devinit cpsw_probe(struct platform_device *pdev) 872static int __devinit cpsw_probe(struct platform_device *pdev)
713{ 873{
714 struct cpsw_platform_data *data = pdev->dev.platform_data; 874 struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -720,11 +880,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
720 struct resource *res; 880 struct resource *res;
721 int ret = 0, i, k = 0; 881 int ret = 0, i, k = 0;
722 882
723 if (!data) {
724 pr_err("platform data missing\n");
725 return -ENODEV;
726 }
727
728 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 883 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
729 if (!ndev) { 884 if (!ndev) {
730 pr_err("error allocating net_device\n"); 885 pr_err("error allocating net_device\n");
@@ -734,13 +889,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
734 platform_set_drvdata(pdev, ndev); 889 platform_set_drvdata(pdev, ndev);
735 priv = netdev_priv(ndev); 890 priv = netdev_priv(ndev);
736 spin_lock_init(&priv->lock); 891 spin_lock_init(&priv->lock);
737 priv->data = *data;
738 priv->pdev = pdev; 892 priv->pdev = pdev;
739 priv->ndev = ndev; 893 priv->ndev = ndev;
740 priv->dev = &ndev->dev; 894 priv->dev = &ndev->dev;
741 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 895 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
742 priv->rx_packet_max = max(rx_packet_max, 128); 896 priv->rx_packet_max = max(rx_packet_max, 128);
743 897
898 if (cpsw_probe_dt(&priv->data, pdev)) {
899 pr_err("cpsw: platform data missing\n");
900 ret = -ENODEV;
901 goto clean_ndev_ret;
902 }
903 data = &priv->data;
904
744 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 905 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
745 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 906 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
746 pr_info("Detected MACID = %pM", priv->mac_addr); 907 pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1157,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
996 .resume = cpsw_resume, 1157 .resume = cpsw_resume,
997}; 1158};
998 1159
1160static const struct of_device_id cpsw_of_mtable[] = {
1161 { .compatible = "ti,cpsw", },
1162 { /* sentinel */ },
1163};
1164
999static struct platform_driver cpsw_driver = { 1165static struct platform_driver cpsw_driver = {
1000 .driver = { 1166 .driver = {
1001 .name = "cpsw", 1167 .name = "cpsw",
1002 .owner = THIS_MODULE, 1168 .owner = THIS_MODULE,
1003 .pm = &cpsw_pm_ops, 1169 .pm = &cpsw_pm_ops,
1170 .of_match_table = of_match_ptr(cpsw_of_mtable),
1004 }, 1171 },
1005 .probe = cpsw_probe, 1172 .probe = cpsw_probe,
1006 .remove = __devexit_p(cpsw_remove), 1173 .remove = __devexit_p(cpsw_remove),
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a9ca4a03d31b..51a96dbee9ac 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -36,6 +36,8 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/pm_runtime.h> 37#include <linux/pm_runtime.h>
38#include <linux/davinci_emac.h> 38#include <linux/davinci_emac.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
39 41
40/* 42/*
41 * This timeout definition is a worst-case ultra defensive measure against 43 * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
289 return 0; 291 return 0;
290} 292}
291 293
294static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
295 struct platform_device *pdev)
296{
297 struct device_node *node = pdev->dev.of_node;
298 u32 prop;
299
300 if (!node)
301 return -EINVAL;
302
303 if (of_property_read_u32(node, "bus_freq", &prop)) {
304 pr_err("Missing bus_freq property in the DT.\n");
305 return -EINVAL;
306 }
307 data->bus_freq = prop;
308
309 return 0;
310}
311
312
292static int __devinit davinci_mdio_probe(struct platform_device *pdev) 313static int __devinit davinci_mdio_probe(struct platform_device *pdev)
293{ 314{
294 struct mdio_platform_data *pdata = pdev->dev.platform_data; 315 struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
304 return -ENOMEM; 325 return -ENOMEM;
305 } 326 }
306 327
307 data->pdata = pdata ? (*pdata) : default_pdata;
308
309 data->bus = mdiobus_alloc(); 328 data->bus = mdiobus_alloc();
310 if (!data->bus) { 329 if (!data->bus) {
311 dev_err(dev, "failed to alloc mii bus\n"); 330 dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
313 goto bail_out; 332 goto bail_out;
314 } 333 }
315 334
335 if (dev->of_node) {
336 if (davinci_mdio_probe_dt(&data->pdata, pdev))
337 data->pdata = default_pdata;
338 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
339 } else {
340 data->pdata = pdata ? (*pdata) : default_pdata;
341 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
342 pdev->name, pdev->id);
343 }
344
316 data->bus->name = dev_name(dev); 345 data->bus->name = dev_name(dev);
317 data->bus->read = davinci_mdio_read, 346 data->bus->read = davinci_mdio_read,
318 data->bus->write = davinci_mdio_write, 347 data->bus->write = davinci_mdio_write,
319 data->bus->reset = davinci_mdio_reset, 348 data->bus->reset = davinci_mdio_reset,
320 data->bus->parent = dev; 349 data->bus->parent = dev;
321 data->bus->priv = data; 350 data->bus->priv = data;
322 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
323 pdev->name, pdev->id);
324 351
325 pm_runtime_enable(&pdev->dev); 352 pm_runtime_enable(&pdev->dev);
326 pm_runtime_get_sync(&pdev->dev); 353 pm_runtime_get_sync(&pdev->dev);
@@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
456 .resume = davinci_mdio_resume, 483 .resume = davinci_mdio_resume,
457}; 484};
458 485
486static const struct of_device_id davinci_mdio_of_mtable[] = {
487 { .compatible = "ti,davinci_mdio", },
488 { /* sentinel */ },
489};
490
459static struct platform_driver davinci_mdio_driver = { 491static struct platform_driver davinci_mdio_driver = {
460 .driver = { 492 .driver = {
461 .name = "davinci_mdio", 493 .name = "davinci_mdio",
462 .owner = THIS_MODULE, 494 .owner = THIS_MODULE,
463 .pm = &davinci_mdio_pm_ops, 495 .pm = &davinci_mdio_pm_ops,
496 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
464 }, 497 },
465 .probe = davinci_mdio_probe, 498 .probe = davinci_mdio_probe,
466 .remove = __devexit_p(davinci_mdio_remove), 499 .remove = __devexit_p(davinci_mdio_remove),
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 277c93e9ff4d..8fa947a2d929 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev)
1359 } 1359 }
1360 1360
1361 data->rxskbs[i] = skb; 1361 data->rxskbs[i] = skb;
1362 data->rxskbs[i] = skb;
1363 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1362 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1364 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1363 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1365 } 1364 }
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a5826a3111a6..2c08bf6e7bf3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
637 if (data && is_valid_ether_addr(data->mac_addr)) { 637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else { 639 } else {
640 eth_random_addr(ndev->dev_addr); 640 eth_hw_addr_random(ndev);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 } 641 }
643 642
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 643 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index bdd8891c215a..88943d90c765 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
557 if (data && is_valid_ether_addr(data->mac_addr)) { 557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else { 559 } else {
560 eth_random_addr(ndev->dev_addr); 560 eth_hw_addr_random(ndev);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 } 561 }
563 562
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 563 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 95ceb3593043..5fd6f4674326 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -35,6 +35,7 @@ struct hv_netvsc_packet;
35/* Represent the xfer page packet which contains 1 or more netvsc packet */ 35/* Represent the xfer page packet which contains 1 or more netvsc packet */
36struct xferpage_packet { 36struct xferpage_packet {
37 struct list_head list_ent; 37 struct list_head list_ent;
38 u32 status;
38 39
39 /* # of netvsc packets this xfer packet contains */ 40 /* # of netvsc packets this xfer packet contains */
40 u32 count; 41 u32 count;
@@ -47,6 +48,7 @@ struct xferpage_packet {
47struct hv_netvsc_packet { 48struct hv_netvsc_packet {
48 /* Bookkeeping stuff */ 49 /* Bookkeeping stuff */
49 struct list_head list_ent; 50 struct list_head list_ent;
51 u32 status;
50 52
51 struct hv_device *device; 53 struct hv_device *device;
52 bool is_data_pkt; 54 bool is_data_pkt;
@@ -465,8 +467,6 @@ struct nvsp_message {
465 467
466#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 468#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
467 469
468#define NETVSC_RECEIVE_SG_COUNT 1
469
470/* Preallocated receive packets */ 470/* Preallocated receive packets */
471#define NETVSC_RECEIVE_PACKETLIST_COUNT 256 471#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
472 472
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4a1a5f58fa73..1cd77483da50 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -558,7 +558,7 @@ int netvsc_send(struct hv_device *device,
558} 558}
559 559
560static void netvsc_send_recv_completion(struct hv_device *device, 560static void netvsc_send_recv_completion(struct hv_device *device,
561 u64 transaction_id) 561 u64 transaction_id, u32 status)
562{ 562{
563 struct nvsp_message recvcompMessage; 563 struct nvsp_message recvcompMessage;
564 int retries = 0; 564 int retries = 0;
@@ -571,9 +571,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
571 recvcompMessage.hdr.msg_type = 571 recvcompMessage.hdr.msg_type =
572 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; 572 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
573 573
574 /* FIXME: Pass in the status */ 574 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
575 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
576 NVSP_STAT_SUCCESS;
577 575
578retry_send_cmplt: 576retry_send_cmplt:
579 /* Send the completion */ 577 /* Send the completion */
@@ -613,6 +611,7 @@ static void netvsc_receive_completion(void *context)
613 bool fsend_receive_comp = false; 611 bool fsend_receive_comp = false;
614 unsigned long flags; 612 unsigned long flags;
615 struct net_device *ndev; 613 struct net_device *ndev;
614 u32 status = NVSP_STAT_NONE;
616 615
617 /* 616 /*
618 * Even though it seems logical to do a GetOutboundNetDevice() here to 617 * Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -627,6 +626,9 @@ static void netvsc_receive_completion(void *context)
627 /* Overloading use of the lock. */ 626 /* Overloading use of the lock. */
628 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags); 627 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
629 628
629 if (packet->status != NVSP_STAT_SUCCESS)
630 packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
631
630 packet->xfer_page_pkt->count--; 632 packet->xfer_page_pkt->count--;
631 633
632 /* 634 /*
@@ -636,6 +638,7 @@ static void netvsc_receive_completion(void *context)
636 if (packet->xfer_page_pkt->count == 0) { 638 if (packet->xfer_page_pkt->count == 0) {
637 fsend_receive_comp = true; 639 fsend_receive_comp = true;
638 transaction_id = packet->completion.recv.recv_completion_tid; 640 transaction_id = packet->completion.recv.recv_completion_tid;
641 status = packet->xfer_page_pkt->status;
639 list_add_tail(&packet->xfer_page_pkt->list_ent, 642 list_add_tail(&packet->xfer_page_pkt->list_ent,
640 &net_device->recv_pkt_list); 643 &net_device->recv_pkt_list);
641 644
@@ -647,7 +650,7 @@ static void netvsc_receive_completion(void *context)
647 650
648 /* Send a receive completion for the xfer page packet */ 651 /* Send a receive completion for the xfer page packet */
649 if (fsend_receive_comp) 652 if (fsend_receive_comp)
650 netvsc_send_recv_completion(device, transaction_id); 653 netvsc_send_recv_completion(device, transaction_id, status);
651 654
652} 655}
653 656
@@ -736,7 +739,8 @@ static void netvsc_receive(struct hv_device *device,
736 flags); 739 flags);
737 740
738 netvsc_send_recv_completion(device, 741 netvsc_send_recv_completion(device,
739 vmxferpage_packet->d.trans_id); 742 vmxferpage_packet->d.trans_id,
743 NVSP_STAT_FAIL);
740 744
741 return; 745 return;
742 } 746 }
@@ -744,6 +748,7 @@ static void netvsc_receive(struct hv_device *device,
744 /* Remove the 1st packet to represent the xfer page packet itself */ 748 /* Remove the 1st packet to represent the xfer page packet itself */
745 xferpage_packet = (struct xferpage_packet *)listHead.next; 749 xferpage_packet = (struct xferpage_packet *)listHead.next;
746 list_del(&xferpage_packet->list_ent); 750 list_del(&xferpage_packet->list_ent);
751 xferpage_packet->status = NVSP_STAT_SUCCESS;
747 752
748 /* This is how much we can satisfy */ 753 /* This is how much we can satisfy */
749 xferpage_packet->count = count - 1; 754 xferpage_packet->count = count - 1;
@@ -760,6 +765,7 @@ static void netvsc_receive(struct hv_device *device,
760 list_del(&netvsc_packet->list_ent); 765 list_del(&netvsc_packet->list_ent);
761 766
762 /* Initialize the netvsc packet */ 767 /* Initialize the netvsc packet */
768 netvsc_packet->status = NVSP_STAT_SUCCESS;
763 netvsc_packet->xfer_page_pkt = xferpage_packet; 769 netvsc_packet->xfer_page_pkt = xferpage_packet;
764 netvsc_packet->completion.recv.recv_completion = 770 netvsc_packet->completion.recv.recv_completion =
765 netvsc_receive_completion; 771 netvsc_receive_completion;
@@ -904,9 +910,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
904 INIT_LIST_HEAD(&net_device->recv_pkt_list); 910 INIT_LIST_HEAD(&net_device->recv_pkt_list);
905 911
906 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) { 912 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
907 packet = kzalloc(sizeof(struct hv_netvsc_packet) + 913 packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
908 (NETVSC_RECEIVE_SG_COUNT *
909 sizeof(struct hv_page_buffer)), GFP_KERNEL);
910 if (!packet) 914 if (!packet)
911 break; 915 break;
912 916
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8c5a1c43c81d..f825a629a699 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -265,6 +265,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
265 if (!net) { 265 if (!net) {
266 netdev_err(net, "got receive callback but net device" 266 netdev_err(net, "got receive callback but net device"
267 " not initialized yet\n"); 267 " not initialized yet\n");
268 packet->status = NVSP_STAT_FAIL;
268 return 0; 269 return 0;
269 } 270 }
270 271
@@ -272,6 +273,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
272 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); 273 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
273 if (unlikely(!skb)) { 274 if (unlikely(!skb)) {
274 ++net->stats.rx_dropped; 275 ++net->stats.rx_dropped;
276 packet->status = NVSP_STAT_FAIL;
275 return 0; 277 return 0;
276 } 278 }
277 279
@@ -400,7 +402,7 @@ static void netvsc_send_garp(struct work_struct *w)
400 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 402 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
401 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 403 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
402 net = net_device->ndev; 404 net = net_device->ndev;
403 netif_notify_peers(net); 405 netdev_notify_peers(net);
404} 406}
405 407
406 408
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1e88a1095934..928148cc3220 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -32,23 +32,31 @@
32#include "hyperv_net.h" 32#include "hyperv_net.h"
33 33
34 34
35#define RNDIS_EXT_LEN 100
35struct rndis_request { 36struct rndis_request {
36 struct list_head list_ent; 37 struct list_head list_ent;
37 struct completion wait_event; 38 struct completion wait_event;
38 39
40 struct rndis_message response_msg;
39 /* 41 /*
40 * FIXME: We assumed a fixed size response here. If we do ever need to 42 * The buffer for extended info after the RNDIS response message. It's
41 * handle a bigger response, we can either define a max response 43 * referenced based on the data offset in the RNDIS message. Its size
42 * message or add a response buffer variable above this field 44 * is enough for current needs, and should be sufficient for the near
45 * future.
43 */ 46 */
44 struct rndis_message response_msg; 47 u8 response_ext[RNDIS_EXT_LEN];
45 48
46 /* Simplify allocation by having a netvsc packet inline */ 49 /* Simplify allocation by having a netvsc packet inline */
47 struct hv_netvsc_packet pkt; 50 struct hv_netvsc_packet pkt;
48 struct hv_page_buffer buf; 51 /* Set 2 pages for rndis requests crossing page boundary */
49 /* FIXME: We assumed a fixed size request here. */ 52 struct hv_page_buffer buf[2];
53
50 struct rndis_message request_msg; 54 struct rndis_message request_msg;
51 u8 ext[100]; 55 /*
56 * The buffer for the extended info after the RNDIS request message.
57 * It is referenced and sized in a similar way as response_ext.
58 */
59 u8 request_ext[RNDIS_EXT_LEN];
52}; 60};
53 61
54static void rndis_filter_send_completion(void *ctx); 62static void rndis_filter_send_completion(void *ctx);
@@ -221,6 +229,18 @@ static int rndis_filter_send_request(struct rndis_device *dev,
221 packet->page_buf[0].offset = 229 packet->page_buf[0].offset =
222 (unsigned long)&req->request_msg & (PAGE_SIZE - 1); 230 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
223 231
232 /* Add one page_buf when request_msg crossing page boundary */
233 if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) {
234 packet->page_buf_cnt++;
235 packet->page_buf[0].len = PAGE_SIZE -
236 packet->page_buf[0].offset;
237 packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg
238 + packet->page_buf[0].len) >> PAGE_SHIFT;
239 packet->page_buf[1].offset = 0;
240 packet->page_buf[1].len = req->request_msg.msg_len -
241 packet->page_buf[0].len;
242 }
243
224 packet->completion.send.send_completion_ctx = req;/* packet; */ 244 packet->completion.send.send_completion_ctx = req;/* packet; */
225 packet->completion.send.send_completion = 245 packet->completion.send.send_completion =
226 rndis_filter_send_request_completion; 246 rndis_filter_send_request_completion;
@@ -255,7 +275,8 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
255 spin_unlock_irqrestore(&dev->request_lock, flags); 275 spin_unlock_irqrestore(&dev->request_lock, flags);
256 276
257 if (found) { 277 if (found) {
258 if (resp->msg_len <= sizeof(struct rndis_message)) { 278 if (resp->msg_len <=
279 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
259 memcpy(&request->response_msg, resp, 280 memcpy(&request->response_msg, resp,
260 resp->msg_len); 281 resp->msg_len);
261 } else { 282 } else {
@@ -392,9 +413,12 @@ int rndis_filter_receive(struct hv_device *dev,
392 struct rndis_device *rndis_dev; 413 struct rndis_device *rndis_dev;
393 struct rndis_message *rndis_msg; 414 struct rndis_message *rndis_msg;
394 struct net_device *ndev; 415 struct net_device *ndev;
416 int ret = 0;
395 417
396 if (!net_dev) 418 if (!net_dev) {
397 return -EINVAL; 419 ret = -EINVAL;
420 goto exit;
421 }
398 422
399 ndev = net_dev->ndev; 423 ndev = net_dev->ndev;
400 424
@@ -402,14 +426,16 @@ int rndis_filter_receive(struct hv_device *dev,
402 if (!net_dev->extension) { 426 if (!net_dev->extension) {
403 netdev_err(ndev, "got rndis message but no rndis device - " 427 netdev_err(ndev, "got rndis message but no rndis device - "
404 "dropping this message!\n"); 428 "dropping this message!\n");
405 return -ENODEV; 429 ret = -ENODEV;
430 goto exit;
406 } 431 }
407 432
408 rndis_dev = (struct rndis_device *)net_dev->extension; 433 rndis_dev = (struct rndis_device *)net_dev->extension;
409 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) { 434 if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
410 netdev_err(ndev, "got rndis message but rndis device " 435 netdev_err(ndev, "got rndis message but rndis device "
411 "uninitialized...dropping this message!\n"); 436 "uninitialized...dropping this message!\n");
412 return -ENODEV; 437 ret = -ENODEV;
438 goto exit;
413 } 439 }
414 440
415 rndis_msg = pkt->data; 441 rndis_msg = pkt->data;
@@ -441,7 +467,11 @@ int rndis_filter_receive(struct hv_device *dev,
441 break; 467 break;
442 } 468 }
443 469
444 return 0; 470exit:
471 if (ret != 0)
472 pkt->status = NVSP_STAT_FAIL;
473
474 return ret;
445} 475}
446 476
447static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, 477static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -641,6 +671,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
641 if (t == 0) { 671 if (t == 0) {
642 netdev_err(ndev, 672 netdev_err(ndev,
643 "timeout before we got a set response...\n"); 673 "timeout before we got a set response...\n");
674 ret = -ETIMEDOUT;
644 /* 675 /*
645 * We can't deallocate the request since we may still receive a 676 * We can't deallocate the request since we may still receive a
646 * send completion for it. 677 * send completion for it.
@@ -678,8 +709,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
678 init = &request->request_msg.msg.init_req; 709 init = &request->request_msg.msg.init_req;
679 init->major_ver = RNDIS_MAJOR_VERSION; 710 init->major_ver = RNDIS_MAJOR_VERSION;
680 init->minor_ver = RNDIS_MINOR_VERSION; 711 init->minor_ver = RNDIS_MINOR_VERSION;
681 /* FIXME: Use 1536 - rounded ethernet frame size */ 712 init->max_xfer_size = 0x4000;
682 init->max_xfer_size = 2048;
683 713
684 dev->state = RNDIS_DEV_INITIALIZING; 714 dev->state = RNDIS_DEV_INITIALIZING;
685 715
diff --git a/drivers/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1fc4eefc20ed..08ae4655423a 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -34,3 +34,14 @@ config IEEE802154_AT86RF230
34 depends on IEEE802154_DRIVERS && MAC802154 34 depends on IEEE802154_DRIVERS && MAC802154
35 tristate "AT86RF230/231 transceiver driver" 35 tristate "AT86RF230/231 transceiver driver"
36 depends on SPI 36 depends on SPI
37
38config IEEE802154_MRF24J40
39 tristate "Microchip MRF24J40 transceiver driver"
40 depends on IEEE802154_DRIVERS && MAC802154
41 depends on SPI
42 ---help---
43 Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
44 controller.
45
46 This driver can also be built as a module. To do so, say M here.
47 the module will be called 'mrf24j40'.
diff --git a/drivers/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 4f4371d3aa7d..abb0c08decb0 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o 1obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o 2obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o 3obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
4obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 5d309408395d..ba753d87a32f 100644
--- a/drivers/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
952 .resume = at86rf230_resume, 952 .resume = at86rf230_resume,
953}; 953};
954 954
955static int __init at86rf230_init(void) 955module_spi_driver(at86rf230_driver);
956{
957 return spi_register_driver(&at86rf230_driver);
958}
959module_init(at86rf230_init);
960
961static void __exit at86rf230_exit(void)
962{
963 spi_unregister_driver(&at86rf230_driver);
964}
965module_exit(at86rf230_exit);
966 956
967MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); 957MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
968MODULE_LICENSE("GPL v2"); 958MODULE_LICENSE("GPL v2");
diff --git a/drivers/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 73d453159408..7d39add7d467 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
446module_init(fake_init); 446module_init(fake_init);
447module_exit(fake_exit); 447module_exit(fake_exit);
448MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
449
diff --git a/drivers/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index e7456fcd0913..e7456fcd0913 100644
--- a/drivers/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
new file mode 100644
index 000000000000..0e53d4f431d2
--- /dev/null
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -0,0 +1,767 @@
1/*
2 * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller
3 *
4 * Copyright (C) 2012 Alan Ott <alan@signal11.us>
5 * Signal 11 Software
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/spi/spi.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <net/wpan-phy.h>
26#include <net/mac802154.h>
27
28/* MRF24J40 Short Address Registers */
29#define REG_RXMCR 0x00 /* Receive MAC control */
30#define REG_PANIDL 0x01 /* PAN ID (low) */
31#define REG_PANIDH 0x02 /* PAN ID (high) */
32#define REG_SADRL 0x03 /* Short address (low) */
33#define REG_SADRH 0x04 /* Short address (high) */
34#define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */
35#define REG_TXMCR 0x11 /* Transmit MAC control */
36#define REG_PACON0 0x16 /* Power Amplifier Control */
37#define REG_PACON1 0x17 /* Power Amplifier Control */
38#define REG_PACON2 0x18 /* Power Amplifier Control */
39#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */
40#define REG_TXSTAT 0x24 /* TX MAC Status Register */
41#define REG_SOFTRST 0x2A /* Soft Reset */
42#define REG_TXSTBL 0x2E /* TX Stabilization */
43#define REG_INTSTAT 0x31 /* Interrupt Status */
44#define REG_INTCON 0x32 /* Interrupt Control */
45#define REG_RFCTL 0x36 /* RF Control Mode Register */
46#define REG_BBREG1 0x39 /* Baseband Registers */
47#define REG_BBREG2 0x3A /* */
48#define REG_BBREG6 0x3E /* */
49#define REG_CCAEDTH 0x3F /* Energy Detection Threshold */
50
51/* MRF24J40 Long Address Registers */
52#define REG_RFCON0 0x200 /* RF Control Registers */
53#define REG_RFCON1 0x201
54#define REG_RFCON2 0x202
55#define REG_RFCON3 0x203
56#define REG_RFCON5 0x205
57#define REG_RFCON6 0x206
58#define REG_RFCON7 0x207
59#define REG_RFCON8 0x208
60#define REG_RSSI 0x210
61#define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */
62#define REG_SLPCON1 0x220
63#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */
64#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */
65#define REG_RX_FIFO 0x300 /* Receive FIFO */
66
67/* Device configuration: Only channels 11-26 on page 0 are supported. */
68#define MRF24J40_CHAN_MIN 11
69#define MRF24J40_CHAN_MAX 26
70#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \
71 - ((u32)1 << MRF24J40_CHAN_MIN))
72
73#define TX_FIFO_SIZE 128 /* From datasheet */
74#define RX_FIFO_SIZE 144 /* From datasheet */
75#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
76
77/* Device Private Data */
78struct mrf24j40 {
79 struct spi_device *spi;
80 struct ieee802154_dev *dev;
81
82 struct mutex buffer_mutex; /* only used to protect buf */
83 struct completion tx_complete;
84 struct work_struct irqwork;
85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
86};
87
88/* Read/Write SPI Commands for Short and Long Address registers. */
89#define MRF24J40_READSHORT(reg) ((reg) << 1)
90#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
91#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
92#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
93
94/* Maximum speed to run the device at. TODO: Get the real max value from
95 * someone at Microchip since it isn't in the datasheet. */
96#define MAX_SPI_SPEED_HZ 1000000
97
98#define printdev(X) (&X->spi->dev)
99
100static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
101{
102 int ret;
103 struct spi_message msg;
104 struct spi_transfer xfer = {
105 .len = 2,
106 .tx_buf = devrec->buf,
107 .rx_buf = devrec->buf,
108 };
109
110 spi_message_init(&msg);
111 spi_message_add_tail(&xfer, &msg);
112
113 mutex_lock(&devrec->buffer_mutex);
114 devrec->buf[0] = MRF24J40_WRITESHORT(reg);
115 devrec->buf[1] = value;
116
117 ret = spi_sync(devrec->spi, &msg);
118 if (ret)
119 dev_err(printdev(devrec),
120 "SPI write Failed for short register 0x%hhx\n", reg);
121
122 mutex_unlock(&devrec->buffer_mutex);
123 return ret;
124}
125
126static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
127{
128 int ret = -1;
129 struct spi_message msg;
130 struct spi_transfer xfer = {
131 .len = 2,
132 .tx_buf = devrec->buf,
133 .rx_buf = devrec->buf,
134 };
135
136 spi_message_init(&msg);
137 spi_message_add_tail(&xfer, &msg);
138
139 mutex_lock(&devrec->buffer_mutex);
140 devrec->buf[0] = MRF24J40_READSHORT(reg);
141 devrec->buf[1] = 0;
142
143 ret = spi_sync(devrec->spi, &msg);
144 if (ret)
145 dev_err(printdev(devrec),
146 "SPI read Failed for short register 0x%hhx\n", reg);
147 else
148 *val = devrec->buf[1];
149
150 mutex_unlock(&devrec->buffer_mutex);
151 return ret;
152}
153
154static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
155{
156 int ret;
157 u16 cmd;
158 struct spi_message msg;
159 struct spi_transfer xfer = {
160 .len = 3,
161 .tx_buf = devrec->buf,
162 .rx_buf = devrec->buf,
163 };
164
165 spi_message_init(&msg);
166 spi_message_add_tail(&xfer, &msg);
167
168 cmd = MRF24J40_READLONG(reg);
169 mutex_lock(&devrec->buffer_mutex);
170 devrec->buf[0] = cmd >> 8 & 0xff;
171 devrec->buf[1] = cmd & 0xff;
172 devrec->buf[2] = 0;
173
174 ret = spi_sync(devrec->spi, &msg);
175 if (ret)
176 dev_err(printdev(devrec),
177 "SPI read Failed for long register 0x%hx\n", reg);
178 else
179 *value = devrec->buf[2];
180
181 mutex_unlock(&devrec->buffer_mutex);
182 return ret;
183}
184
185static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
186{
187 int ret;
188 u16 cmd;
189 struct spi_message msg;
190 struct spi_transfer xfer = {
191 .len = 3,
192 .tx_buf = devrec->buf,
193 .rx_buf = devrec->buf,
194 };
195
196 spi_message_init(&msg);
197 spi_message_add_tail(&xfer, &msg);
198
199 cmd = MRF24J40_WRITELONG(reg);
200 mutex_lock(&devrec->buffer_mutex);
201 devrec->buf[0] = cmd >> 8 & 0xff;
202 devrec->buf[1] = cmd & 0xff;
203 devrec->buf[2] = val;
204
205 ret = spi_sync(devrec->spi, &msg);
206 if (ret)
207 dev_err(printdev(devrec),
208 "SPI write Failed for long register 0x%hx\n", reg);
209
210 mutex_unlock(&devrec->buffer_mutex);
211 return ret;
212}
213
214/* This function relies on an undocumented write method. Once a write command
215 and address is set, as many bytes of data as desired can be clocked into
216 the device. The datasheet only shows setting one byte at a time. */
217static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
218 const u8 *data, size_t length)
219{
220 int ret;
221 u16 cmd;
222 u8 lengths[2];
223 struct spi_message msg;
224 struct spi_transfer addr_xfer = {
225 .len = 2,
226 .tx_buf = devrec->buf,
227 };
228 struct spi_transfer lengths_xfer = {
229 .len = 2,
230 .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
231 };
232 struct spi_transfer data_xfer = {
233 .len = length,
234 .tx_buf = data,
235 };
236
237 /* Range check the length. 2 bytes are used for the length fields.*/
238 if (length > TX_FIFO_SIZE-2) {
239 dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n");
240 length = TX_FIFO_SIZE-2;
241 }
242
243 spi_message_init(&msg);
244 spi_message_add_tail(&addr_xfer, &msg);
245 spi_message_add_tail(&lengths_xfer, &msg);
246 spi_message_add_tail(&data_xfer, &msg);
247
248 cmd = MRF24J40_WRITELONG(reg);
249 mutex_lock(&devrec->buffer_mutex);
250 devrec->buf[0] = cmd >> 8 & 0xff;
251 devrec->buf[1] = cmd & 0xff;
252 lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
253 lengths[1] = length; /* Total length */
254
255 ret = spi_sync(devrec->spi, &msg);
256 if (ret)
257 dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
258
259 mutex_unlock(&devrec->buffer_mutex);
260 return ret;
261}
262
263static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
264 u8 *data, u8 *len, u8 *lqi)
265{
266 u8 rx_len;
267 u8 addr[2];
268 u8 lqi_rssi[2];
269 u16 cmd;
270 int ret;
271 struct spi_message msg;
272 struct spi_transfer addr_xfer = {
273 .len = 2,
274 .tx_buf = &addr,
275 };
276 struct spi_transfer data_xfer = {
277 .len = 0x0, /* set below */
278 .rx_buf = data,
279 };
280 struct spi_transfer status_xfer = {
281 .len = 2,
282 .rx_buf = &lqi_rssi,
283 };
284
285 /* Get the length of the data in the RX FIFO. The length in this
286 * register exclues the 1-byte length field at the beginning. */
287 ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
288 if (ret)
289 goto out;
290
291 /* Range check the RX FIFO length, accounting for the one-byte
292 * length field at the begining. */
293 if (rx_len > RX_FIFO_SIZE-1) {
294 dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
295 rx_len = RX_FIFO_SIZE-1;
296 }
297
298 if (rx_len > *len) {
299 /* Passed in buffer wasn't big enough. Should never happen. */
300 dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
301 rx_len = *len;
302 }
303
304 /* Set up the commands to read the data. */
305 cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
306 addr[0] = cmd >> 8 & 0xff;
307 addr[1] = cmd & 0xff;
308 data_xfer.len = rx_len;
309
310 spi_message_init(&msg);
311 spi_message_add_tail(&addr_xfer, &msg);
312 spi_message_add_tail(&data_xfer, &msg);
313 spi_message_add_tail(&status_xfer, &msg);
314
315 ret = spi_sync(devrec->spi, &msg);
316 if (ret) {
317 dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
318 goto out;
319 }
320
321 *lqi = lqi_rssi[0];
322 *len = rx_len;
323
324#ifdef DEBUG
325 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
326 DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
327 printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
328 lqi_rssi[0], lqi_rssi[1]);
329#endif
330
331out:
332 return ret;
333}
334
335static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
336{
337 struct mrf24j40 *devrec = dev->priv;
338 u8 val;
339 int ret = 0;
340
341 dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
342
343 ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
344 if (ret)
345 goto err;
346
347 /* Set TXNTRIG bit of TXNCON to send packet */
348 ret = read_short_reg(devrec, REG_TXNCON, &val);
349 if (ret)
350 goto err;
351 val |= 0x1;
352 val &= ~0x4;
353 write_short_reg(devrec, REG_TXNCON, val);
354
355 INIT_COMPLETION(devrec->tx_complete);
356
357 /* Wait for the device to send the TX complete interrupt. */
358 ret = wait_for_completion_interruptible_timeout(
359 &devrec->tx_complete,
360 5 * HZ);
361 if (ret == -ERESTARTSYS)
362 goto err;
363 if (ret == 0) {
364 ret = -ETIMEDOUT;
365 goto err;
366 }
367
368 /* Check for send error from the device. */
369 ret = read_short_reg(devrec, REG_TXSTAT, &val);
370 if (ret)
371 goto err;
372 if (val & 0x1) {
373 dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n");
374 ret = -ECOMM; /* TODO: Better error code ? */
375 } else
376 dev_dbg(printdev(devrec), "Packet Sent\n");
377
378err:
379
380 return ret;
381}
382
383static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
384{
385 /* TODO: */
386 printk(KERN_WARNING "mrf24j40: ed not implemented\n");
387 *level = 0;
388 return 0;
389}
390
391static int mrf24j40_start(struct ieee802154_dev *dev)
392{
393 struct mrf24j40 *devrec = dev->priv;
394 u8 val;
395 int ret;
396
397 dev_dbg(printdev(devrec), "start\n");
398
399 ret = read_short_reg(devrec, REG_INTCON, &val);
400 if (ret)
401 return ret;
402 val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
403 write_short_reg(devrec, REG_INTCON, val);
404
405 return 0;
406}
407
408static void mrf24j40_stop(struct ieee802154_dev *dev)
409{
410 struct mrf24j40 *devrec = dev->priv;
411 u8 val;
412 int ret;
413 dev_dbg(printdev(devrec), "stop\n");
414
415 ret = read_short_reg(devrec, REG_INTCON, &val);
416 if (ret)
417 return;
418 val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
419 write_short_reg(devrec, REG_INTCON, val);
420
421 return;
422}
423
424static int mrf24j40_set_channel(struct ieee802154_dev *dev,
425 int page, int channel)
426{
427 struct mrf24j40 *devrec = dev->priv;
428 u8 val;
429 int ret;
430
431 dev_dbg(printdev(devrec), "Set Channel %d\n", channel);
432
433 WARN_ON(page != 0);
434 WARN_ON(channel < MRF24J40_CHAN_MIN);
435 WARN_ON(channel > MRF24J40_CHAN_MAX);
436
437 /* Set Channel TODO */
438 val = (channel-11) << 4 | 0x03;
439 write_long_reg(devrec, REG_RFCON0, val);
440
441 /* RF Reset */
442 ret = read_short_reg(devrec, REG_RFCTL, &val);
443 if (ret)
444 return ret;
445 val |= 0x04;
446 write_short_reg(devrec, REG_RFCTL, val);
447 val &= ~0x04;
448 write_short_reg(devrec, REG_RFCTL, val);
449
450 udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
451
452 return 0;
453}
454
455static int mrf24j40_filter(struct ieee802154_dev *dev,
456 struct ieee802154_hw_addr_filt *filt,
457 unsigned long changed)
458{
459 struct mrf24j40 *devrec = dev->priv;
460
461 dev_dbg(printdev(devrec), "filter\n");
462
463 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
464 /* Short Addr */
465 u8 addrh, addrl;
466 addrh = filt->short_addr >> 8 & 0xff;
467 addrl = filt->short_addr & 0xff;
468
469 write_short_reg(devrec, REG_SADRH, addrh);
470 write_short_reg(devrec, REG_SADRL, addrl);
471 dev_dbg(printdev(devrec),
472 "Set short addr to %04hx\n", filt->short_addr);
473 }
474
475 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
476 /* Device Address */
477 int i;
478 for (i = 0; i < 8; i++)
479 write_short_reg(devrec, REG_EADR0+i,
480 filt->ieee_addr[i]);
481
482#ifdef DEBUG
483 printk(KERN_DEBUG "Set long addr to: ");
484 for (i = 0; i < 8; i++)
485 printk("%02hhx ", filt->ieee_addr[i]);
486 printk(KERN_DEBUG "\n");
487#endif
488 }
489
490 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
491 /* PAN ID */
492 u8 panidl, panidh;
493 panidh = filt->pan_id >> 8 & 0xff;
494 panidl = filt->pan_id & 0xff;
495 write_short_reg(devrec, REG_PANIDH, panidh);
496 write_short_reg(devrec, REG_PANIDL, panidl);
497
498 dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
499 }
500
501 if (changed & IEEE802515_AFILT_PANC_CHANGED) {
502 /* Pan Coordinator */
503 u8 val;
504 int ret;
505
506 ret = read_short_reg(devrec, REG_RXMCR, &val);
507 if (ret)
508 return ret;
509 if (filt->pan_coord)
510 val |= 0x8;
511 else
512 val &= ~0x8;
513 write_short_reg(devrec, REG_RXMCR, val);
514
515 /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
516 * REG_ORDER is maintained as default (no beacon/superframe).
517 */
518
519 dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
520 filt->pan_coord ? "on" : "off");
521 }
522
523 return 0;
524}
525
526static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
527{
528 u8 len = RX_FIFO_SIZE;
529 u8 lqi = 0;
530 u8 val;
531 int ret = 0;
532 struct sk_buff *skb;
533
534 /* Turn off reception of packets off the air. This prevents the
535 * device from overwriting the buffer while we're reading it. */
536 ret = read_short_reg(devrec, REG_BBREG1, &val);
537 if (ret)
538 goto out;
539 val |= 4; /* SET RXDECINV */
540 write_short_reg(devrec, REG_BBREG1, val);
541
542 skb = alloc_skb(len, GFP_KERNEL);
543 if (!skb) {
544 ret = -ENOMEM;
545 goto out;
546 }
547
548 ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
549 if (ret < 0) {
550 dev_err(printdev(devrec), "Failure reading RX FIFO\n");
551 kfree_skb(skb);
552 ret = -EINVAL;
553 goto out;
554 }
555
556 /* Cut off the checksum */
557 skb_trim(skb, len-2);
558
559 /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
560 * also from a workqueue). I think irqsafe is not necessary here.
561 * Can someone confirm? */
562 ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
563
564 dev_dbg(printdev(devrec), "RX Handled\n");
565
566out:
567 /* Turn back on reception of packets off the air. */
568 ret = read_short_reg(devrec, REG_BBREG1, &val);
569 if (ret)
570 return ret;
571 val &= ~0x4; /* Clear RXDECINV */
572 write_short_reg(devrec, REG_BBREG1, val);
573
574 return ret;
575}
576
577static struct ieee802154_ops mrf24j40_ops = {
578 .owner = THIS_MODULE,
579 .xmit = mrf24j40_tx,
580 .ed = mrf24j40_ed,
581 .start = mrf24j40_start,
582 .stop = mrf24j40_stop,
583 .set_channel = mrf24j40_set_channel,
584 .set_hw_addr_filt = mrf24j40_filter,
585};
586
587static irqreturn_t mrf24j40_isr(int irq, void *data)
588{
589 struct mrf24j40 *devrec = data;
590
591 disable_irq_nosync(irq);
592
593 schedule_work(&devrec->irqwork);
594
595 return IRQ_HANDLED;
596}
597
598static void mrf24j40_isrwork(struct work_struct *work)
599{
600 struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
601 u8 intstat;
602 int ret;
603
604 /* Read the interrupt status */
605 ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
606 if (ret)
607 goto out;
608
609 /* Check for TX complete */
610 if (intstat & 0x1)
611 complete(&devrec->tx_complete);
612
613 /* Check for Rx */
614 if (intstat & 0x8)
615 mrf24j40_handle_rx(devrec);
616
617out:
618 enable_irq(devrec->spi->irq);
619}
620
621static int __devinit mrf24j40_probe(struct spi_device *spi)
622{
623 int ret = -ENOMEM;
624 u8 val;
625 struct mrf24j40 *devrec;
626
627 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
628
629 devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
630 if (!devrec)
631 goto err_devrec;
632 devrec->buf = kzalloc(3, GFP_KERNEL);
633 if (!devrec->buf)
634 goto err_buf;
635
636 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
637 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
638 spi->max_speed_hz = MAX_SPI_SPEED_HZ;
639
640 mutex_init(&devrec->buffer_mutex);
641 init_completion(&devrec->tx_complete);
642 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
643 devrec->spi = spi;
644 dev_set_drvdata(&spi->dev, devrec);
645
646 /* Register with the 802154 subsystem */
647
648 devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
649 if (!devrec->dev)
650 goto err_alloc_dev;
651
652 devrec->dev->priv = devrec;
653 devrec->dev->parent = &devrec->spi->dev;
654 devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
655 devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
656
657 dev_dbg(printdev(devrec), "registered mrf24j40\n");
658 ret = ieee802154_register_device(devrec->dev);
659 if (ret)
660 goto err_register_device;
661
662 /* Initialize the device.
663 From datasheet section 3.2: Initialization. */
664 write_short_reg(devrec, REG_SOFTRST, 0x07);
665 write_short_reg(devrec, REG_PACON2, 0x98);
666 write_short_reg(devrec, REG_TXSTBL, 0x95);
667 write_long_reg(devrec, REG_RFCON0, 0x03);
668 write_long_reg(devrec, REG_RFCON1, 0x01);
669 write_long_reg(devrec, REG_RFCON2, 0x80);
670 write_long_reg(devrec, REG_RFCON6, 0x90);
671 write_long_reg(devrec, REG_RFCON7, 0x80);
672 write_long_reg(devrec, REG_RFCON8, 0x10);
673 write_long_reg(devrec, REG_SLPCON1, 0x21);
674 write_short_reg(devrec, REG_BBREG2, 0x80);
675 write_short_reg(devrec, REG_CCAEDTH, 0x60);
676 write_short_reg(devrec, REG_BBREG6, 0x40);
677 write_short_reg(devrec, REG_RFCTL, 0x04);
678 write_short_reg(devrec, REG_RFCTL, 0x0);
679 udelay(192);
680
681 /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
682 ret = read_short_reg(devrec, REG_RXMCR, &val);
683 if (ret)
684 goto err_read_reg;
685 val &= ~0x3; /* Clear RX mode (normal) */
686 write_short_reg(devrec, REG_RXMCR, val);
687
688 ret = request_irq(spi->irq,
689 mrf24j40_isr,
690 IRQF_TRIGGER_FALLING,
691 dev_name(&spi->dev),
692 devrec);
693
694 if (ret) {
695 dev_err(printdev(devrec), "Unable to get IRQ");
696 goto err_irq;
697 }
698
699 return 0;
700
701err_irq:
702err_read_reg:
703 ieee802154_unregister_device(devrec->dev);
704err_register_device:
705 ieee802154_free_device(devrec->dev);
706err_alloc_dev:
707 kfree(devrec->buf);
708err_buf:
709 kfree(devrec);
710err_devrec:
711 return ret;
712}
713
714static int __devexit mrf24j40_remove(struct spi_device *spi)
715{
716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
717
718 dev_dbg(printdev(devrec), "remove\n");
719
720 free_irq(spi->irq, devrec);
721 flush_work_sync(&devrec->irqwork); /* TODO: Is this the right call? */
722 ieee802154_unregister_device(devrec->dev);
723 ieee802154_free_device(devrec->dev);
724 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
725 * complete? */
726
727 /* Clean up the SPI stuff. */
728 dev_set_drvdata(&spi->dev, NULL);
729 kfree(devrec->buf);
730 kfree(devrec);
731 return 0;
732}
733
734static const struct spi_device_id mrf24j40_ids[] = {
735 { "mrf24j40", 0 },
736 { "mrf24j40ma", 0 },
737 { },
738};
739MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
740
741static struct spi_driver mrf24j40_driver = {
742 .driver = {
743 .name = "mrf24j40",
744 .bus = &spi_bus_type,
745 .owner = THIS_MODULE,
746 },
747 .id_table = mrf24j40_ids,
748 .probe = mrf24j40_probe,
749 .remove = __devexit_p(mrf24j40_remove),
750};
751
752static int __init mrf24j40_init(void)
753{
754 return spi_register_driver(&mrf24j40_driver);
755}
756
757static void __exit mrf24j40_exit(void)
758{
759 spi_unregister_driver(&mrf24j40_driver);
760}
761
762module_init(mrf24j40_init);
763module_exit(mrf24j40_exit);
764
765MODULE_LICENSE("GPL");
766MODULE_AUTHOR("Alan Ott");
767MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index e2a06fd996d5..81f8f9e31db5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = {
157 */ 157 */
158static void loopback_setup(struct net_device *dev) 158static void loopback_setup(struct net_device *dev)
159{ 159{
160 dev->mtu = (16 * 1024) + 20 + 20 + 12; 160 dev->mtu = 64 * 1024;
161 dev->hard_header_len = ETH_HLEN; /* 14 */ 161 dev->hard_header_len = ETH_HLEN; /* 14 */
162 dev->addr_len = ETH_ALEN; /* 6 */ 162 dev->addr_len = ETH_ALEN; /* 6 */
163 dev->tx_queue_len = 0; 163 dev->tx_queue_len = 0;
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
197 if (err) 197 if (err)
198 goto out_free_netdev; 198 goto out_free_netdev;
199 199
200 BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
200 net->loopback_dev = dev; 201 net->loopback_dev = dev;
201 return 0; 202 return 0;
202 203
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 66a9bfe7b1c8..68a43fe602e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -546,9 +546,9 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
546 return 0; 546 return 0;
547} 547}
548 548
549static int macvlan_fdb_add(struct ndmsg *ndm, 549static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
550 struct net_device *dev, 550 struct net_device *dev,
551 unsigned char *addr, 551 const unsigned char *addr,
552 u16 flags) 552 u16 flags)
553{ 553{
554 struct macvlan_dev *vlan = netdev_priv(dev); 554 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm,
567 567
568static int macvlan_fdb_del(struct ndmsg *ndm, 568static int macvlan_fdb_del(struct ndmsg *ndm,
569 struct net_device *dev, 569 struct net_device *dev,
570 unsigned char *addr) 570 const unsigned char *addr)
571{ 571{
572 struct macvlan_dev *vlan = netdev_priv(dev); 572 struct macvlan_dev *vlan = netdev_priv(dev);
573 int err = -EINVAL; 573 int err = -EINVAL;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3090dc65a6f1..983bbf4d5ef6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
159 several child MDIO busses to a parent bus. Child bus 159 several child MDIO busses to a parent bus. Child bus
160 selection is under the control of GPIO lines. 160 selection is under the control of GPIO lines.
161 161
162config MDIO_BUS_MUX_MMIOREG
163 tristate "Support for MMIO device-controlled MDIO bus multiplexers"
164 depends on OF_MDIO
165 select MDIO_BUS_MUX
166 help
167 This module provides a driver for MDIO bus multiplexers that
168 are controlled via a simple memory-mapped device, like an FPGA.
169 The multiplexer connects one of several child MDIO busses to a
170 parent bus. Child bus selection is under the control of one of
171 the FPGA's registers.
172
173 Currently, only 8-bit registers are supported.
174
162endif # PHYLIB 175endif # PHYLIB
163 176
164config MICREL_KS8995MA 177config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6d2dc6c94f2e..426674debae4 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
28obj-$(CONFIG_AMD_PHY) += amd.o 28obj-$(CONFIG_AMD_PHY) += amd.o
29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
31obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0da0226661f..24e05c43bff8 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev)
980 980
981 if (choose_this_phy(clock, phydev)) { 981 if (choose_this_phy(clock, phydev)) {
982 clock->chosen = dp83640; 982 clock->chosen = dp83640;
983 clock->ptp_clock = ptp_clock_register(&clock->caps); 983 clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev);
984 if (IS_ERR(clock->ptp_clock)) { 984 if (IS_ERR(clock->ptp_clock)) {
985 err = PTR_ERR(clock->ptp_clock); 985 err = PTR_ERR(clock->ptp_clock);
986 goto no_register; 986 goto no_register;
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6d1e3fcc43e2..ec40ba882f61 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev)
122 return err; 122 return err;
123} 123}
124 124
125/*
126 * A2 version of LXT973 chip has an ERRATA: it randomly return the contents
127 * of the previous even register when you read a odd register regularly
128 */
129
130static int lxt973a2_update_link(struct phy_device *phydev)
131{
132 int status;
133 int control;
134 int retry = 8; /* we try 8 times */
135
136 /* Do a fake read */
137 status = phy_read(phydev, MII_BMSR);
138
139 if (status < 0)
140 return status;
141
142 control = phy_read(phydev, MII_BMCR);
143 if (control < 0)
144 return control;
145
146 do {
147 /* Read link and autonegotiation status */
148 status = phy_read(phydev, MII_BMSR);
149 } while (status >= 0 && retry-- && status == control);
150
151 if (status < 0)
152 return status;
153
154 if ((status & BMSR_LSTATUS) == 0)
155 phydev->link = 0;
156 else
157 phydev->link = 1;
158
159 return 0;
160}
161
162int lxt973a2_read_status(struct phy_device *phydev)
163{
164 int adv;
165 int err;
166 int lpa;
167 int lpagb = 0;
168
169 /* Update the link, but return if there was an error */
170 err = lxt973a2_update_link(phydev);
171 if (err)
172 return err;
173
174 if (AUTONEG_ENABLE == phydev->autoneg) {
175 int retry = 1;
176
177 adv = phy_read(phydev, MII_ADVERTISE);
178
179 if (adv < 0)
180 return adv;
181
182 do {
183 lpa = phy_read(phydev, MII_LPA);
184
185 if (lpa < 0)
186 return lpa;
187
188 /* If both registers are equal, it is suspect but not
189 * impossible, hence a new try
190 */
191 } while (lpa == adv && retry--);
192
193 lpa &= adv;
194
195 phydev->speed = SPEED_10;
196 phydev->duplex = DUPLEX_HALF;
197 phydev->pause = phydev->asym_pause = 0;
198
199 if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
200 phydev->speed = SPEED_1000;
201
202 if (lpagb & LPA_1000FULL)
203 phydev->duplex = DUPLEX_FULL;
204 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
205 phydev->speed = SPEED_100;
206
207 if (lpa & LPA_100FULL)
208 phydev->duplex = DUPLEX_FULL;
209 } else {
210 if (lpa & LPA_10FULL)
211 phydev->duplex = DUPLEX_FULL;
212 }
213
214 if (phydev->duplex == DUPLEX_FULL) {
215 phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
216 phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
217 }
218 } else {
219 int bmcr = phy_read(phydev, MII_BMCR);
220
221 if (bmcr < 0)
222 return bmcr;
223
224 if (bmcr & BMCR_FULLDPLX)
225 phydev->duplex = DUPLEX_FULL;
226 else
227 phydev->duplex = DUPLEX_HALF;
228
229 if (bmcr & BMCR_SPEED1000)
230 phydev->speed = SPEED_1000;
231 else if (bmcr & BMCR_SPEED100)
232 phydev->speed = SPEED_100;
233 else
234 phydev->speed = SPEED_10;
235
236 phydev->pause = phydev->asym_pause = 0;
237 }
238
239 return 0;
240}
241
125static int lxt973_probe(struct phy_device *phydev) 242static int lxt973_probe(struct phy_device *phydev)
126{ 243{
127 int val = phy_read(phydev, MII_LXT973_PCR); 244 int val = phy_read(phydev, MII_LXT973_PCR);
@@ -175,6 +292,16 @@ static struct phy_driver lxt97x_driver[] = {
175 .driver = { .owner = THIS_MODULE,}, 292 .driver = { .owner = THIS_MODULE,},
176}, { 293}, {
177 .phy_id = 0x00137a10, 294 .phy_id = 0x00137a10,
295 .name = "LXT973-A2",
296 .phy_id_mask = 0xffffffff,
297 .features = PHY_BASIC_FEATURES,
298 .flags = 0,
299 .probe = lxt973_probe,
300 .config_aneg = lxt973_config_aneg,
301 .read_status = lxt973a2_read_status,
302 .driver = { .owner = THIS_MODULE,},
303}, {
304 .phy_id = 0x00137a10,
178 .name = "LXT973", 305 .name = "LXT973",
179 .phy_id_mask = 0xfffffff0, 306 .phy_id_mask = 0xfffffff0,
180 .features = PHY_BASIC_FEATURES, 307 .features = PHY_BASIC_FEATURES,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7189adf54bd1..899274f2f9b1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -28,17 +28,38 @@
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/mdio-gpio.h> 29#include <linux/mdio-gpio.h>
30 30
31#ifdef CONFIG_OF_GPIO
32#include <linux/of_gpio.h> 31#include <linux/of_gpio.h>
33#include <linux/of_mdio.h> 32#include <linux/of_mdio.h>
34#include <linux/of_platform.h>
35#endif
36 33
37struct mdio_gpio_info { 34struct mdio_gpio_info {
38 struct mdiobb_ctrl ctrl; 35 struct mdiobb_ctrl ctrl;
39 int mdc, mdio; 36 int mdc, mdio;
40}; 37};
41 38
39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
40{
41 struct device_node *np = pdev->dev.of_node;
42 struct mdio_gpio_platform_data *pdata;
43 int ret;
44
45 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
46 if (!pdata)
47 return NULL;
48
49 ret = of_get_gpio(np, 0);
50 if (ret < 0)
51 return NULL;
52
53 pdata->mdc = ret;
54
55 ret = of_get_gpio(np, 1);
56 if (ret < 0)
57 return NULL;
58 pdata->mdio = ret;
59
60 return pdata;
61}
62
42static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) 63static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
43{ 64{
44 struct mdio_gpio_info *bitbang = 65 struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
162 183
163static int __devinit mdio_gpio_probe(struct platform_device *pdev) 184static int __devinit mdio_gpio_probe(struct platform_device *pdev)
164{ 185{
165 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; 186 struct mdio_gpio_platform_data *pdata;
166 struct mii_bus *new_bus; 187 struct mii_bus *new_bus;
167 int ret; 188 int ret;
168 189
190 if (pdev->dev.of_node)
191 pdata = mdio_gpio_of_get_data(pdev);
192 else
193 pdata = pdev->dev.platform_data;
194
169 if (!pdata) 195 if (!pdata)
170 return -ENODEV; 196 return -ENODEV;
171 197
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
173 if (!new_bus) 199 if (!new_bus)
174 return -ENODEV; 200 return -ENODEV;
175 201
176 ret = mdiobus_register(new_bus); 202 if (pdev->dev.of_node)
203 ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
204 else
205 ret = mdiobus_register(new_bus);
206
177 if (ret) 207 if (ret)
178 mdio_gpio_bus_deinit(&pdev->dev); 208 mdio_gpio_bus_deinit(&pdev->dev);
179 209
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
187 return 0; 217 return 0;
188} 218}
189 219
190#ifdef CONFIG_OF_GPIO 220static struct of_device_id mdio_gpio_of_match[] = {
191 221 { .compatible = "virtual,mdio-gpio", },
192static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) 222 { /* sentinel */ }
193{
194 struct mdio_gpio_platform_data *pdata;
195 struct mii_bus *new_bus;
196 int ret;
197
198 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
199 if (!pdata)
200 return -ENOMEM;
201
202 ret = of_get_gpio(ofdev->dev.of_node, 0);
203 if (ret < 0)
204 goto out_free;
205 pdata->mdc = ret;
206
207 ret = of_get_gpio(ofdev->dev.of_node, 1);
208 if (ret < 0)
209 goto out_free;
210 pdata->mdio = ret;
211
212 new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
213 if (!new_bus)
214 goto out_free;
215
216 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev);
219
220 return ret;
221
222out_free:
223 kfree(pdata);
224 return -ENODEV;
225}
226
227static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
228{
229 mdio_gpio_bus_destroy(&ofdev->dev);
230 kfree(ofdev->dev.platform_data);
231
232 return 0;
233}
234
235static struct of_device_id mdio_ofgpio_match[] = {
236 {
237 .compatible = "virtual,mdio-gpio",
238 },
239 {},
240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242
243static struct platform_driver mdio_ofgpio_driver = {
244 .driver = {
245 .name = "mdio-ofgpio",
246 .owner = THIS_MODULE,
247 .of_match_table = mdio_ofgpio_match,
248 },
249 .probe = mdio_ofgpio_probe,
250 .remove = __devexit_p(mdio_ofgpio_remove),
251}; 223};
252 224
253static inline int __init mdio_ofgpio_init(void)
254{
255 return platform_driver_register(&mdio_ofgpio_driver);
256}
257
258static inline void mdio_ofgpio_exit(void)
259{
260 platform_driver_unregister(&mdio_ofgpio_driver);
261}
262#else
263static inline int __init mdio_ofgpio_init(void) { return 0; }
264static inline void mdio_ofgpio_exit(void) { }
265#endif /* CONFIG_OF_GPIO */
266
267static struct platform_driver mdio_gpio_driver = { 225static struct platform_driver mdio_gpio_driver = {
268 .probe = mdio_gpio_probe, 226 .probe = mdio_gpio_probe,
269 .remove = __devexit_p(mdio_gpio_remove), 227 .remove = __devexit_p(mdio_gpio_remove),
270 .driver = { 228 .driver = {
271 .name = "mdio-gpio", 229 .name = "mdio-gpio",
272 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
231 .of_match_table = mdio_gpio_of_match,
273 }, 232 },
274}; 233};
275 234
276static int __init mdio_gpio_init(void) 235static int __init mdio_gpio_init(void)
277{ 236{
278 int ret; 237 return platform_driver_register(&mdio_gpio_driver);
279
280 ret = mdio_ofgpio_init();
281 if (ret)
282 return ret;
283
284 ret = platform_driver_register(&mdio_gpio_driver);
285 if (ret)
286 mdio_ofgpio_exit();
287
288 return ret;
289} 238}
290module_init(mdio_gpio_init); 239module_init(mdio_gpio_init);
291 240
292static void __exit mdio_gpio_exit(void) 241static void __exit mdio_gpio_exit(void)
293{ 242{
294 platform_driver_unregister(&mdio_gpio_driver); 243 platform_driver_unregister(&mdio_gpio_driver);
295 mdio_ofgpio_exit();
296} 244}
297module_exit(mdio_gpio_exit); 245module_exit(mdio_gpio_exit);
298 246
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644
index 000000000000..9061ba622ac4
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -0,0 +1,171 @@
1/*
2 * Simple memory-mapped device MDIO MUX driver
3 *
4 * Author: Timur Tabi <timur@freescale.com>
5 *
6 * Copyright 2012 Freescale Semiconductor, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/platform_device.h>
14#include <linux/device.h>
15#include <linux/of_address.h>
16#include <linux/of_mdio.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/phy.h>
20#include <linux/mdio-mux.h>
21
22struct mdio_mux_mmioreg_state {
23 void *mux_handle;
24 phys_addr_t phys;
25 uint8_t mask;
26};
27
28/*
29 * MDIO multiplexing switch function
30 *
31 * This function is called by the mdio-mux layer when it thinks the mdio bus
32 * multiplexer needs to switch.
33 *
34 * 'current_child' is the current value of the mux register (masked via
35 * s->mask).
36 *
37 * 'desired_child' is the value of the 'reg' property of the target child MDIO
38 * node.
39 *
40 * The first time this function is called, current_child == -1.
41 *
42 * If current_child == desired_child, then the mux is already set to the
43 * correct bus.
44 */
45static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
46 void *data)
47{
48 struct mdio_mux_mmioreg_state *s = data;
49
50 if (current_child ^ desired_child) {
51 void *p = ioremap(s->phys, 1);
52 uint8_t x, y;
53
54 if (!p)
55 return -ENOMEM;
56
57 x = ioread8(p);
58 y = (x & ~s->mask) | desired_child;
59 if (x != y) {
60 iowrite8((x & ~s->mask) | desired_child, p);
61 pr_debug("%s: %02x -> %02x\n", __func__, x, y);
62 }
63
64 iounmap(p);
65 }
66
67 return 0;
68}
69
70static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
71{
72 struct device_node *np2, *np = pdev->dev.of_node;
73 struct mdio_mux_mmioreg_state *s;
74 struct resource res;
75 const __be32 *iprop;
76 int len, ret;
77
78 dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
79
80 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
81 if (!s)
82 return -ENOMEM;
83
84 ret = of_address_to_resource(np, 0, &res);
85 if (ret) {
86 dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
87 np->full_name);
88 return ret;
89 }
90 s->phys = res.start;
91
92 if (resource_size(&res) != sizeof(uint8_t)) {
93 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
94 return -EINVAL;
95 }
96
97 iprop = of_get_property(np, "mux-mask", &len);
98 if (!iprop || len != sizeof(uint32_t)) {
99 dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
100 return -ENODEV;
101 }
102 if (be32_to_cpup(iprop) > 255) {
103 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
104 return -EINVAL;
105 }
106 s->mask = be32_to_cpup(iprop);
107
108 /*
109 * Verify that the 'reg' property of each child MDIO bus does not
110 * set any bits outside of the 'mask'.
111 */
112 for_each_available_child_of_node(np, np2) {
113 iprop = of_get_property(np2, "reg", &len);
114 if (!iprop || len != sizeof(uint32_t)) {
115 dev_err(&pdev->dev, "mdio-mux child node %s is "
116 "missing a 'reg' property\n", np2->full_name);
117 return -ENODEV;
118 }
119 if (be32_to_cpup(iprop) & ~s->mask) {
120 dev_err(&pdev->dev, "mdio-mux child node %s has "
121 "a 'reg' value with unmasked bits\n",
122 np2->full_name);
123 return -ENODEV;
124 }
125 }
126
127 ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
128 &s->mux_handle, s);
129 if (ret) {
130 dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
131 np->full_name);
132 return ret;
133 }
134
135 pdev->dev.platform_data = s;
136
137 return 0;
138}
139
140static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
141{
142 struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
143
144 mdio_mux_uninit(s->mux_handle);
145
146 return 0;
147}
148
149static struct of_device_id mdio_mux_mmioreg_match[] = {
150 {
151 .compatible = "mdio-mux-mmioreg",
152 },
153 {},
154};
155MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
156
157static struct platform_driver mdio_mux_mmioreg_driver = {
158 .driver = {
159 .name = "mdio-mux-mmioreg",
160 .owner = THIS_MODULE,
161 .of_match_table = mdio_mux_mmioreg_match,
162 },
163 .probe = mdio_mux_mmioreg_probe,
164 .remove = __devexit_p(mdio_mux_mmioreg_remove),
165};
166
167module_platform_driver(mdio_mux_mmioreg_driver);
168
169MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
170MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
171MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7ca2ff97c368..ef9ea9248223 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1035 bus->write(bus, addr, MII_MMD_DATA, data); 1035 bus->write(bus, addr, MII_MMD_DATA, data);
1036} 1036}
1037 1037
1038static u32 phy_eee_to_adv(u16 eee_adv)
1039{
1040 u32 adv = 0;
1041
1042 if (eee_adv & MDIO_EEE_100TX)
1043 adv |= ADVERTISED_100baseT_Full;
1044 if (eee_adv & MDIO_EEE_1000T)
1045 adv |= ADVERTISED_1000baseT_Full;
1046 if (eee_adv & MDIO_EEE_10GT)
1047 adv |= ADVERTISED_10000baseT_Full;
1048 if (eee_adv & MDIO_EEE_1000KX)
1049 adv |= ADVERTISED_1000baseKX_Full;
1050 if (eee_adv & MDIO_EEE_10GKX4)
1051 adv |= ADVERTISED_10000baseKX4_Full;
1052 if (eee_adv & MDIO_EEE_10GKR)
1053 adv |= ADVERTISED_10000baseKR_Full;
1054
1055 return adv;
1056}
1057
1058static u32 phy_eee_to_supported(u16 eee_caported)
1059{
1060 u32 supported = 0;
1061
1062 if (eee_caported & MDIO_EEE_100TX)
1063 supported |= SUPPORTED_100baseT_Full;
1064 if (eee_caported & MDIO_EEE_1000T)
1065 supported |= SUPPORTED_1000baseT_Full;
1066 if (eee_caported & MDIO_EEE_10GT)
1067 supported |= SUPPORTED_10000baseT_Full;
1068 if (eee_caported & MDIO_EEE_1000KX)
1069 supported |= SUPPORTED_1000baseKX_Full;
1070 if (eee_caported & MDIO_EEE_10GKX4)
1071 supported |= SUPPORTED_10000baseKX4_Full;
1072 if (eee_caported & MDIO_EEE_10GKR)
1073 supported |= SUPPORTED_10000baseKR_Full;
1074
1075 return supported;
1076}
1077
1078static u16 phy_adv_to_eee(u32 adv)
1079{
1080 u16 reg = 0;
1081
1082 if (adv & ADVERTISED_100baseT_Full)
1083 reg |= MDIO_EEE_100TX;
1084 if (adv & ADVERTISED_1000baseT_Full)
1085 reg |= MDIO_EEE_1000T;
1086 if (adv & ADVERTISED_10000baseT_Full)
1087 reg |= MDIO_EEE_10GT;
1088 if (adv & ADVERTISED_1000baseKX_Full)
1089 reg |= MDIO_EEE_1000KX;
1090 if (adv & ADVERTISED_10000baseKX4_Full)
1091 reg |= MDIO_EEE_10GKX4;
1092 if (adv & ADVERTISED_10000baseKR_Full)
1093 reg |= MDIO_EEE_10GKR;
1094
1095 return reg;
1096}
1097
1098/** 1038/**
1099 * phy_init_eee - init and check the EEE feature 1039 * phy_init_eee - init and check the EEE feature
1100 * @phydev: target phy_device struct 1040 * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1132 if (eee_cap < 0) 1072 if (eee_cap < 0)
1133 return eee_cap; 1073 return eee_cap;
1134 1074
1135 cap = phy_eee_to_supported(eee_cap); 1075 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1136 if (!cap) 1076 if (!cap)
1137 goto eee_exit; 1077 goto eee_exit;
1138 1078
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1149 if (eee_adv < 0) 1089 if (eee_adv < 0)
1150 return eee_adv; 1090 return eee_adv;
1151 1091
1152 adv = phy_eee_to_adv(eee_adv); 1092 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1153 lp = phy_eee_to_adv(eee_lp); 1093 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1154 idx = phy_find_setting(phydev->speed, phydev->duplex); 1094 idx = phy_find_setting(phydev->speed, phydev->duplex);
1155 if ((lp & adv & settings[idx].setting)) 1095 if ((lp & adv & settings[idx].setting))
1156 goto eee_exit; 1096 goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1210 MDIO_MMD_PCS, phydev->addr); 1150 MDIO_MMD_PCS, phydev->addr);
1211 if (val < 0) 1151 if (val < 0)
1212 return val; 1152 return val;
1213 data->supported = phy_eee_to_supported(val); 1153 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1214 1154
1215 /* Get advertisement EEE */ 1155 /* Get advertisement EEE */
1216 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, 1156 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1217 MDIO_MMD_AN, phydev->addr); 1157 MDIO_MMD_AN, phydev->addr);
1218 if (val < 0) 1158 if (val < 0)
1219 return val; 1159 return val;
1220 data->advertised = phy_eee_to_adv(val); 1160 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1221 1161
1222 /* Get LP advertisement EEE */ 1162 /* Get LP advertisement EEE */
1223 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, 1163 val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1224 MDIO_MMD_AN, phydev->addr); 1164 MDIO_MMD_AN, phydev->addr);
1225 if (val < 0) 1165 if (val < 0)
1226 return val; 1166 return val;
1227 data->lp_advertised = phy_eee_to_adv(val); 1167 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1228 1168
1229 return 0; 1169 return 0;
1230} 1170}
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1241{ 1181{
1242 int val; 1182 int val;
1243 1183
1244 val = phy_adv_to_eee(data->advertised); 1184 val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1245 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, 1185 phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1246 phydev->addr, val); 1186 phydev->addr, val);
1247 1187
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5c0557222f20..eb3f5cefeba3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -94,6 +94,18 @@ struct ppp_file {
94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
95 95
96/* 96/*
97 * Data structure to hold primary network stats for which
98 * we want to use 64 bit storage. Other network stats
99 * are stored in dev->stats of the ppp strucute.
100 */
101struct ppp_link_stats {
102 u64 rx_packets;
103 u64 tx_packets;
104 u64 rx_bytes;
105 u64 tx_bytes;
106};
107
108/*
97 * Data structure describing one ppp unit. 109 * Data structure describing one ppp unit.
98 * A ppp unit corresponds to a ppp network interface device 110 * A ppp unit corresponds to a ppp network interface device
99 * and represents a multilink bundle. 111 * and represents a multilink bundle.
@@ -136,6 +148,7 @@ struct ppp {
136 unsigned pass_len, active_len; 148 unsigned pass_len, active_len;
137#endif /* CONFIG_PPP_FILTER */ 149#endif /* CONFIG_PPP_FILTER */
138 struct net *ppp_net; /* the net we belong to */ 150 struct net *ppp_net; /* the net we belong to */
151 struct ppp_link_stats stats64; /* 64 bit network stats */
139}; 152};
140 153
141/* 154/*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1021 return err; 1034 return err;
1022} 1035}
1023 1036
1037struct rtnl_link_stats64*
1038ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1039{
1040 struct ppp *ppp = netdev_priv(dev);
1041
1042 ppp_recv_lock(ppp);
1043 stats64->rx_packets = ppp->stats64.rx_packets;
1044 stats64->rx_bytes = ppp->stats64.rx_bytes;
1045 ppp_recv_unlock(ppp);
1046
1047 ppp_xmit_lock(ppp);
1048 stats64->tx_packets = ppp->stats64.tx_packets;
1049 stats64->tx_bytes = ppp->stats64.tx_bytes;
1050 ppp_xmit_unlock(ppp);
1051
1052 stats64->rx_errors = dev->stats.rx_errors;
1053 stats64->tx_errors = dev->stats.tx_errors;
1054 stats64->rx_dropped = dev->stats.rx_dropped;
1055 stats64->tx_dropped = dev->stats.tx_dropped;
1056 stats64->rx_length_errors = dev->stats.rx_length_errors;
1057
1058 return stats64;
1059}
1060
1024static const struct net_device_ops ppp_netdev_ops = { 1061static const struct net_device_ops ppp_netdev_ops = {
1025 .ndo_start_xmit = ppp_start_xmit, 1062 .ndo_start_xmit = ppp_start_xmit,
1026 .ndo_do_ioctl = ppp_net_ioctl, 1063 .ndo_do_ioctl = ppp_net_ioctl,
1064 .ndo_get_stats64 = ppp_get_stats64,
1027}; 1065};
1028 1066
1029static void ppp_setup(struct net_device *dev) 1067static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1157#endif /* CONFIG_PPP_FILTER */ 1195#endif /* CONFIG_PPP_FILTER */
1158 } 1196 }
1159 1197
1160 ++ppp->dev->stats.tx_packets; 1198 ++ppp->stats64.tx_packets;
1161 ppp->dev->stats.tx_bytes += skb->len - 2; 1199 ppp->stats64.tx_bytes += skb->len - 2;
1162 1200
1163 switch (proto) { 1201 switch (proto) {
1164 case PPP_IP: 1202 case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1745 break; 1783 break;
1746 } 1784 }
1747 1785
1748 ++ppp->dev->stats.rx_packets; 1786 ++ppp->stats64.rx_packets;
1749 ppp->dev->stats.rx_bytes += skb->len - 2; 1787 ppp->stats64.rx_bytes += skb->len - 2;
1750 1788
1751 npi = proto_to_npindex(proto); 1789 npi = proto_to_npindex(proto);
1752 if (npi < 0) { 1790 if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2570 struct slcompress *vj = ppp->vj; 2608 struct slcompress *vj = ppp->vj;
2571 2609
2572 memset(st, 0, sizeof(*st)); 2610 memset(st, 0, sizeof(*st));
2573 st->p.ppp_ipackets = ppp->dev->stats.rx_packets; 2611 st->p.ppp_ipackets = ppp->stats64.rx_packets;
2574 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 2612 st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
2575 st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; 2613 st->p.ppp_ibytes = ppp->stats64.rx_bytes;
2576 st->p.ppp_opackets = ppp->dev->stats.tx_packets; 2614 st->p.ppp_opackets = ppp->stats64.tx_packets;
2577 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 2615 st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
2578 st->p.ppp_obytes = ppp->dev->stats.tx_bytes; 2616 st->p.ppp_obytes = ppp->stats64.tx_bytes;
2579 if (!vj) 2617 if (!vj)
2580 return; 2618 return;
2581 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2619 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6a7260b03a1e..6b08bd419fba 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
21 ---help--- 21 ---help---
22 Basic mode where packets are transmitted always by all suitable ports. 22 Basic mode where packets are transmitted always by all suitable ports.
23 23
24 All added ports are setup to have team's mac address. 24 All added ports are setup to have team's device address.
25 25
26 To compile this team mode as a module, choose M here: the module 26 To compile this team mode as a module, choose M here: the module
27 will be called team_mode_broadcast. 27 will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
33 Basic mode where port used for transmitting packets is selected in 33 Basic mode where port used for transmitting packets is selected in
34 round-robin fashion using packet counter. 34 round-robin fashion using packet counter.
35 35
36 All added ports are setup to have team's mac address. 36 All added ports are setup to have team's device address.
37 37
38 To compile this team mode as a module, choose M here: the module 38 To compile this team mode as a module, choose M here: the module
39 will be called team_mode_roundrobin. 39 will be called team_mode_roundrobin.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f8cd61f449a4..5c7547c4f802 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
54} 54}
55 55
56/* 56/*
57 * Since the ability to change mac address for open port device is tested in 57 * Since the ability to change device address for open port device is tested in
58 * team_port_add, this function can be called without control of return value 58 * team_port_add, this function can be called without control of return value
59 */ 59 */
60static int __set_port_mac(struct net_device *port_dev, 60static int __set_port_dev_addr(struct net_device *port_dev,
61 const unsigned char *dev_addr) 61 const unsigned char *dev_addr)
62{ 62{
63 struct sockaddr addr; 63 struct sockaddr addr;
64 64
65 memcpy(addr.sa_data, dev_addr, ETH_ALEN); 65 memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
66 addr.sa_family = ARPHRD_ETHER; 66 addr.sa_family = port_dev->type;
67 return dev_set_mac_address(port_dev, &addr); 67 return dev_set_mac_address(port_dev, &addr);
68} 68}
69 69
70static int team_port_set_orig_mac(struct team_port *port) 70static int team_port_set_orig_dev_addr(struct team_port *port)
71{ 71{
72 return __set_port_mac(port->dev, port->orig.dev_addr); 72 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
73} 73}
74 74
75int team_port_set_team_mac(struct team_port *port) 75int team_port_set_team_dev_addr(struct team_port *port)
76{ 76{
77 return __set_port_mac(port->dev, port->team->dev->dev_addr); 77 return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
78} 78}
79EXPORT_SYMBOL(team_port_set_team_mac); 79EXPORT_SYMBOL(team_port_set_team_dev_addr);
80 80
81static void team_refresh_port_linkup(struct team_port *port) 81static void team_refresh_port_linkup(struct team_port *port)
82{ 82{
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
658} 658}
659 659
660 660
661/*************************************
662 * Multiqueue Tx port select override
663 *************************************/
664
665static int team_queue_override_init(struct team *team)
666{
667 struct list_head *listarr;
668 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
669 unsigned int i;
670
671 if (!queue_cnt)
672 return 0;
673 listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
674 if (!listarr)
675 return -ENOMEM;
676 team->qom_lists = listarr;
677 for (i = 0; i < queue_cnt; i++)
678 INIT_LIST_HEAD(listarr++);
679 return 0;
680}
681
682static void team_queue_override_fini(struct team *team)
683{
684 kfree(team->qom_lists);
685}
686
687static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
688{
689 return &team->qom_lists[queue_id - 1];
690}
691
692/*
693 * note: already called with rcu_read_lock
694 */
695static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
696{
697 struct list_head *qom_list;
698 struct team_port *port;
699
700 if (!team->queue_override_enabled || !skb->queue_mapping)
701 return false;
702 qom_list = __team_get_qom_list(team, skb->queue_mapping);
703 list_for_each_entry_rcu(port, qom_list, qom_list) {
704 if (!team_dev_queue_xmit(team, port, skb))
705 return true;
706 }
707 return false;
708}
709
710static void __team_queue_override_port_del(struct team *team,
711 struct team_port *port)
712{
713 list_del_rcu(&port->qom_list);
714 synchronize_rcu();
715 INIT_LIST_HEAD(&port->qom_list);
716}
717
718static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
719 struct team_port *cur)
720{
721 if (port->priority < cur->priority)
722 return true;
723 if (port->priority > cur->priority)
724 return false;
725 if (port->index < cur->index)
726 return true;
727 return false;
728}
729
730static void __team_queue_override_port_add(struct team *team,
731 struct team_port *port)
732{
733 struct team_port *cur;
734 struct list_head *qom_list;
735 struct list_head *node;
736
737 if (!port->queue_id || !team_port_enabled(port))
738 return;
739
740 qom_list = __team_get_qom_list(team, port->queue_id);
741 node = qom_list;
742 list_for_each_entry(cur, qom_list, qom_list) {
743 if (team_queue_override_port_has_gt_prio_than(port, cur))
744 break;
745 node = &cur->qom_list;
746 }
747 list_add_tail_rcu(&port->qom_list, node);
748}
749
750static void __team_queue_override_enabled_check(struct team *team)
751{
752 struct team_port *port;
753 bool enabled = false;
754
755 list_for_each_entry(port, &team->port_list, list) {
756 if (!list_empty(&port->qom_list)) {
757 enabled = true;
758 break;
759 }
760 }
761 if (enabled == team->queue_override_enabled)
762 return;
763 netdev_dbg(team->dev, "%s queue override\n",
764 enabled ? "Enabling" : "Disabling");
765 team->queue_override_enabled = enabled;
766}
767
768static void team_queue_override_port_refresh(struct team *team,
769 struct team_port *port)
770{
771 __team_queue_override_port_del(team, port);
772 __team_queue_override_port_add(team, port);
773 __team_queue_override_enabled_check(team);
774}
775
776
661/**************** 777/****************
662 * Port handling 778 * Port handling
663 ****************/ 779 ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
688 hlist_add_head_rcu(&port->hlist, 804 hlist_add_head_rcu(&port->hlist,
689 team_port_index_hash(team, port->index)); 805 team_port_index_hash(team, port->index));
690 team_adjust_ops(team); 806 team_adjust_ops(team);
807 team_queue_override_port_refresh(team, port);
691 if (team->ops.port_enabled) 808 if (team->ops.port_enabled)
692 team->ops.port_enabled(team, port); 809 team->ops.port_enabled(team, port);
693} 810}
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
716 hlist_del_rcu(&port->hlist); 833 hlist_del_rcu(&port->hlist);
717 __reconstruct_port_hlist(team, port->index); 834 __reconstruct_port_hlist(team, port->index);
718 port->index = -1; 835 port->index = -1;
836 team_queue_override_port_refresh(team, port);
719 __team_adjust_ops(team, team->en_port_count - 1); 837 __team_adjust_ops(team, team->en_port_count - 1);
720 /* 838 /*
721 * Wait until readers see adjusted ops. This ensures that 839 * Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
849#endif 967#endif
850 968
851static void __team_port_change_port_added(struct team_port *port, bool linkup); 969static void __team_port_change_port_added(struct team_port *port, bool linkup);
970static int team_dev_type_check_change(struct net_device *dev,
971 struct net_device *port_dev);
852 972
853static int team_port_add(struct team *team, struct net_device *port_dev) 973static int team_port_add(struct team *team, struct net_device *port_dev)
854{ 974{
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
857 char *portname = port_dev->name; 977 char *portname = port_dev->name;
858 int err; 978 int err;
859 979
860 if (port_dev->flags & IFF_LOOPBACK || 980 if (port_dev->flags & IFF_LOOPBACK) {
861 port_dev->type != ARPHRD_ETHER) { 981 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
862 netdev_err(dev, "Device %s is of an unsupported type\n",
863 portname); 982 portname);
864 return -EINVAL; 983 return -EINVAL;
865 } 984 }
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
870 return -EBUSY; 989 return -EBUSY;
871 } 990 }
872 991
992 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
993 vlan_uses_dev(dev)) {
994 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
995 portname);
996 return -EPERM;
997 }
998
999 err = team_dev_type_check_change(dev, port_dev);
1000 if (err)
1001 return err;
1002
873 if (port_dev->flags & IFF_UP) { 1003 if (port_dev->flags & IFF_UP) {
874 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", 1004 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
875 portname); 1005 portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
883 1013
884 port->dev = port_dev; 1014 port->dev = port_dev;
885 port->team = team; 1015 port->team = team;
1016 INIT_LIST_HEAD(&port->qom_list);
886 1017
887 port->orig.mtu = port_dev->mtu; 1018 port->orig.mtu = port_dev->mtu;
888 err = dev_set_mtu(port_dev, dev->mtu); 1019 err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
891 goto err_set_mtu; 1022 goto err_set_mtu;
892 } 1023 }
893 1024
894 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); 1025 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
895 1026
896 err = team_port_enter(team, port); 1027 err = team_port_enter(team, port);
897 if (err) { 1028 if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
972 1103
973err_dev_open: 1104err_dev_open:
974 team_port_leave(team, port); 1105 team_port_leave(team, port);
975 team_port_set_orig_mac(port); 1106 team_port_set_orig_dev_addr(port);
976 1107
977err_port_enter: 1108err_port_enter:
978 dev_set_mtu(port_dev, port->orig.mtu); 1109 dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1010 vlan_vids_del_by_dev(port_dev, dev); 1141 vlan_vids_del_by_dev(port_dev, dev);
1011 dev_close(port_dev); 1142 dev_close(port_dev);
1012 team_port_leave(team, port); 1143 team_port_leave(team, port);
1013 team_port_set_orig_mac(port); 1144 team_port_set_orig_dev_addr(port);
1014 dev_set_mtu(port_dev, port->orig.mtu); 1145 dev_set_mtu(port_dev, port->orig.mtu);
1015 synchronize_rcu(); 1146 synchronize_rcu();
1016 kfree(port); 1147 kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
1095 return 0; 1226 return 0;
1096} 1227}
1097 1228
1229static int team_priority_option_get(struct team *team,
1230 struct team_gsetter_ctx *ctx)
1231{
1232 struct team_port *port = ctx->info->port;
1233
1234 ctx->data.s32_val = port->priority;
1235 return 0;
1236}
1237
1238static int team_priority_option_set(struct team *team,
1239 struct team_gsetter_ctx *ctx)
1240{
1241 struct team_port *port = ctx->info->port;
1242
1243 port->priority = ctx->data.s32_val;
1244 team_queue_override_port_refresh(team, port);
1245 return 0;
1246}
1247
1248static int team_queue_id_option_get(struct team *team,
1249 struct team_gsetter_ctx *ctx)
1250{
1251 struct team_port *port = ctx->info->port;
1252
1253 ctx->data.u32_val = port->queue_id;
1254 return 0;
1255}
1256
1257static int team_queue_id_option_set(struct team *team,
1258 struct team_gsetter_ctx *ctx)
1259{
1260 struct team_port *port = ctx->info->port;
1261
1262 if (port->queue_id == ctx->data.u32_val)
1263 return 0;
1264 if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
1265 return -EINVAL;
1266 port->queue_id = ctx->data.u32_val;
1267 team_queue_override_port_refresh(team, port);
1268 return 0;
1269}
1270
1271
1098static const struct team_option team_options[] = { 1272static const struct team_option team_options[] = {
1099 { 1273 {
1100 .name = "mode", 1274 .name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
1123 .getter = team_user_linkup_en_option_get, 1297 .getter = team_user_linkup_en_option_get,
1124 .setter = team_user_linkup_en_option_set, 1298 .setter = team_user_linkup_en_option_set,
1125 }, 1299 },
1300 {
1301 .name = "priority",
1302 .type = TEAM_OPTION_TYPE_S32,
1303 .per_port = true,
1304 .getter = team_priority_option_get,
1305 .setter = team_priority_option_set,
1306 },
1307 {
1308 .name = "queue_id",
1309 .type = TEAM_OPTION_TYPE_U32,
1310 .per_port = true,
1311 .getter = team_queue_id_option_get,
1312 .setter = team_queue_id_option_set,
1313 },
1126}; 1314};
1127 1315
1128static struct lock_class_key team_netdev_xmit_lock_key; 1316static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
1158 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1346 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1159 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1347 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1160 INIT_LIST_HEAD(&team->port_list); 1348 INIT_LIST_HEAD(&team->port_list);
1349 err = team_queue_override_init(team);
1350 if (err)
1351 goto err_team_queue_override_init;
1161 1352
1162 team_adjust_ops(team); 1353 team_adjust_ops(team);
1163 1354
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
1173 return 0; 1364 return 0;
1174 1365
1175err_options_register: 1366err_options_register:
1367 team_queue_override_fini(team);
1368err_team_queue_override_init:
1176 free_percpu(team->pcpu_stats); 1369 free_percpu(team->pcpu_stats);
1177 1370
1178 return err; 1371 return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
1190 1383
1191 __team_change_mode(team, NULL); /* cleanup */ 1384 __team_change_mode(team, NULL); /* cleanup */
1192 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); 1385 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1386 team_queue_override_fini(team);
1193 mutex_unlock(&team->lock); 1387 mutex_unlock(&team->lock);
1194} 1388}
1195 1389
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
1219static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) 1413static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1220{ 1414{
1221 struct team *team = netdev_priv(dev); 1415 struct team *team = netdev_priv(dev);
1222 bool tx_success = false; 1416 bool tx_success;
1223 unsigned int len = skb->len; 1417 unsigned int len = skb->len;
1224 1418
1225 tx_success = team->ops.transmit(team, skb); 1419 tx_success = team_queue_override_transmit(team, skb);
1420 if (!tx_success)
1421 tx_success = team->ops.transmit(team, skb);
1226 if (tx_success) { 1422 if (tx_success) {
1227 struct team_pcpu_stats *pcpu_stats; 1423 struct team_pcpu_stats *pcpu_stats;
1228 1424
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
1296 1492
1297static int team_set_mac_address(struct net_device *dev, void *p) 1493static int team_set_mac_address(struct net_device *dev, void *p)
1298{ 1494{
1495 struct sockaddr *addr = p;
1299 struct team *team = netdev_priv(dev); 1496 struct team *team = netdev_priv(dev);
1300 struct team_port *port; 1497 struct team_port *port;
1301 int err;
1302 1498
1303 err = eth_mac_addr(dev, p); 1499 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1304 if (err) 1500 return -EADDRNOTAVAIL;
1305 return err; 1501 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1502 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1306 rcu_read_lock(); 1503 rcu_read_lock();
1307 list_for_each_entry_rcu(port, &team->port_list, list) 1504 list_for_each_entry_rcu(port, &team->port_list, list)
1308 if (team->ops.port_change_mac) 1505 if (team->ops.port_change_dev_addr)
1309 team->ops.port_change_mac(team, port); 1506 team->ops.port_change_dev_addr(team, port);
1310 rcu_read_unlock(); 1507 rcu_read_unlock();
1311 return 0; 1508 return 0;
1312} 1509}
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
1537 * rt netlink interface 1734 * rt netlink interface
1538 ***********************/ 1735 ***********************/
1539 1736
1737static void team_setup_by_port(struct net_device *dev,
1738 struct net_device *port_dev)
1739{
1740 dev->header_ops = port_dev->header_ops;
1741 dev->type = port_dev->type;
1742 dev->hard_header_len = port_dev->hard_header_len;
1743 dev->addr_len = port_dev->addr_len;
1744 dev->mtu = port_dev->mtu;
1745 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1746 memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
1747 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1748}
1749
1750static int team_dev_type_check_change(struct net_device *dev,
1751 struct net_device *port_dev)
1752{
1753 struct team *team = netdev_priv(dev);
1754 char *portname = port_dev->name;
1755 int err;
1756
1757 if (dev->type == port_dev->type)
1758 return 0;
1759 if (!list_empty(&team->port_list)) {
1760 netdev_err(dev, "Device %s is of different type\n", portname);
1761 return -EBUSY;
1762 }
1763 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
1764 err = notifier_to_errno(err);
1765 if (err) {
1766 netdev_err(dev, "Refused to change device type\n");
1767 return err;
1768 }
1769 dev_uc_flush(dev);
1770 dev_mc_flush(dev);
1771 team_setup_by_port(dev, port_dev);
1772 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1773 return 0;
1774}
1775
1540static void team_setup(struct net_device *dev) 1776static void team_setup(struct net_device *dev)
1541{ 1777{
1542 ether_setup(dev); 1778 ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1651 if (!msg) 1887 if (!msg)
1652 return -ENOMEM; 1888 return -ENOMEM;
1653 1889
1654 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1890 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
1655 &team_nl_family, 0, TEAM_CMD_NOOP); 1891 &team_nl_family, 0, TEAM_CMD_NOOP);
1656 if (!hdr) { 1892 if (!hdr) {
1657 err = -EMSGSIZE; 1893 err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1660 1896
1661 genlmsg_end(msg, hdr); 1897 genlmsg_end(msg, hdr);
1662 1898
1663 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 1899 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
1664 1900
1665err_msg_put: 1901err_msg_put:
1666 nlmsg_free(msg); 1902 nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
1717 if (err < 0) 1953 if (err < 0)
1718 goto err_fill; 1954 goto err_fill;
1719 1955
1720 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); 1956 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
1721 return err; 1957 return err;
1722 1958
1723err_fill: 1959err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
1726} 1962}
1727 1963
1728typedef int team_nl_send_func_t(struct sk_buff *skb, 1964typedef int team_nl_send_func_t(struct sk_buff *skb,
1729 struct team *team, u32 pid); 1965 struct team *team, u32 portid);
1730 1966
1731static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) 1967static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
1732{ 1968{
1733 return genlmsg_unicast(dev_net(team->dev), skb, pid); 1969 return genlmsg_unicast(dev_net(team->dev), skb, portid);
1734} 1970}
1735 1971
1736static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, 1972static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1790 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) 2026 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1791 goto nest_cancel; 2027 goto nest_cancel;
1792 break; 2028 break;
2029 case TEAM_OPTION_TYPE_S32:
2030 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2031 goto nest_cancel;
2032 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2033 goto nest_cancel;
2034 break;
1793 default: 2035 default:
1794 BUG(); 2036 BUG();
1795 } 2037 }
@@ -1809,13 +2051,13 @@ nest_cancel:
1809} 2051}
1810 2052
1811static int __send_and_alloc_skb(struct sk_buff **pskb, 2053static int __send_and_alloc_skb(struct sk_buff **pskb,
1812 struct team *team, u32 pid, 2054 struct team *team, u32 portid,
1813 team_nl_send_func_t *send_func) 2055 team_nl_send_func_t *send_func)
1814{ 2056{
1815 int err; 2057 int err;
1816 2058
1817 if (*pskb) { 2059 if (*pskb) {
1818 err = send_func(*pskb, team, pid); 2060 err = send_func(*pskb, team, portid);
1819 if (err) 2061 if (err)
1820 return err; 2062 return err;
1821 } 2063 }
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
1825 return 0; 2067 return 0;
1826} 2068}
1827 2069
1828static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, 2070static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
1829 int flags, team_nl_send_func_t *send_func, 2071 int flags, team_nl_send_func_t *send_func,
1830 struct list_head *sel_opt_inst_list) 2072 struct list_head *sel_opt_inst_list)
1831{ 2073{
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1842 struct team_option_inst, tmp_list); 2084 struct team_option_inst, tmp_list);
1843 2085
1844start_again: 2086start_again:
1845 err = __send_and_alloc_skb(&skb, team, pid, send_func); 2087 err = __send_and_alloc_skb(&skb, team, portid, send_func);
1846 if (err) 2088 if (err)
1847 return err; 2089 return err;
1848 2090
1849 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, 2091 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
1850 TEAM_CMD_OPTIONS_GET); 2092 TEAM_CMD_OPTIONS_GET);
1851 if (!hdr) 2093 if (!hdr)
1852 return -EMSGSIZE; 2094 return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
1879 goto start_again; 2121 goto start_again;
1880 2122
1881send_done: 2123send_done:
1882 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); 2124 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1883 if (!nlh) { 2125 if (!nlh) {
1884 err = __send_and_alloc_skb(&skb, team, pid, send_func); 2126 err = __send_and_alloc_skb(&skb, team, portid, send_func);
1885 if (err) 2127 if (err)
1886 goto errout; 2128 goto errout;
1887 goto send_done; 2129 goto send_done;
1888 } 2130 }
1889 2131
1890 return send_func(skb, team, pid); 2132 return send_func(skb, team, portid);
1891 2133
1892nla_put_failure: 2134nla_put_failure:
1893 err = -EMSGSIZE; 2135 err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1910 2152
1911 list_for_each_entry(opt_inst, &team->option_inst_list, list) 2153 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1912 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2154 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1913 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, 2155 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
1914 NLM_F_ACK, team_nl_send_unicast, 2156 NLM_F_ACK, team_nl_send_unicast,
1915 &sel_opt_inst_list); 2157 &sel_opt_inst_list);
1916 2158
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1978 case NLA_FLAG: 2220 case NLA_FLAG:
1979 opt_type = TEAM_OPTION_TYPE_BOOL; 2221 opt_type = TEAM_OPTION_TYPE_BOOL;
1980 break; 2222 break;
2223 case NLA_S32:
2224 opt_type = TEAM_OPTION_TYPE_S32;
2225 break;
1981 default: 2226 default:
1982 goto team_put; 2227 goto team_put;
1983 } 2228 }
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2034 case TEAM_OPTION_TYPE_BOOL: 2279 case TEAM_OPTION_TYPE_BOOL:
2035 ctx.data.bool_val = attr_data ? true : false; 2280 ctx.data.bool_val = attr_data ? true : false;
2036 break; 2281 break;
2282 case TEAM_OPTION_TYPE_S32:
2283 ctx.data.s32_val = nla_get_s32(attr_data);
2284 break;
2037 default: 2285 default:
2038 BUG(); 2286 BUG();
2039 } 2287 }
@@ -2058,7 +2306,7 @@ team_put:
2058} 2306}
2059 2307
2060static int team_nl_fill_port_list_get(struct sk_buff *skb, 2308static int team_nl_fill_port_list_get(struct sk_buff *skb,
2061 u32 pid, u32 seq, int flags, 2309 u32 portid, u32 seq, int flags,
2062 struct team *team, 2310 struct team *team,
2063 bool fillall) 2311 bool fillall)
2064{ 2312{
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
2066 void *hdr; 2314 void *hdr;
2067 struct team_port *port; 2315 struct team_port *port;
2068 2316
2069 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 2317 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
2070 TEAM_CMD_PORT_LIST_GET); 2318 TEAM_CMD_PORT_LIST_GET);
2071 if (!hdr) 2319 if (!hdr)
2072 return -EMSGSIZE; 2320 return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
2115 struct genl_info *info, int flags, 2363 struct genl_info *info, int flags,
2116 struct team *team) 2364 struct team *team)
2117{ 2365{
2118 return team_nl_fill_port_list_get(skb, info->snd_pid, 2366 return team_nl_fill_port_list_get(skb, info->snd_portid,
2119 info->snd_seq, NLM_F_ACK, 2367 info->snd_seq, NLM_F_ACK,
2120 team, true); 2368 team, true);
2121} 2369}
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
2168}; 2416};
2169 2417
2170static int team_nl_send_multicast(struct sk_buff *skb, 2418static int team_nl_send_multicast(struct sk_buff *skb,
2171 struct team *team, u32 pid) 2419 struct team *team, u32 portid)
2172{ 2420{
2173 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, 2421 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2174 team_change_event_mcgrp.id, GFP_KERNEL); 2422 team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
2246 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2494 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2247 } 2495 }
2248 err = team_nl_send_event_options_get(team, &sel_opt_inst_list); 2496 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2249 if (err) 2497 if (err && err != -ESRCH)
2250 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", 2498 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2251 err); 2499 err);
2252} 2500}
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
2275 2523
2276send_event: 2524send_event:
2277 err = team_nl_send_event_port_list_get(port->team); 2525 err = team_nl_send_event_port_list_get(port->team);
2278 if (err) 2526 if (err && err != -ESRCH)
2279 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 2527 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2280 port->dev->name); 2528 port->dev->name, err);
2281 2529
2282} 2530}
2283 2531
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c96e4d2967f0..9db0171e9366 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
48 48
49static int bc_port_enter(struct team *team, struct team_port *port) 49static int bc_port_enter(struct team *team, struct team_port *port)
50{ 50{
51 return team_port_set_team_mac(port); 51 return team_port_set_team_dev_addr(port);
52} 52}
53 53
54static void bc_port_change_mac(struct team *team, struct team_port *port) 54static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
55{ 55{
56 team_port_set_team_mac(port); 56 team_port_set_team_dev_addr(port);
57} 57}
58 58
59static const struct team_mode_ops bc_mode_ops = { 59static const struct team_mode_ops bc_mode_ops = {
60 .transmit = bc_transmit, 60 .transmit = bc_transmit,
61 .port_enter = bc_port_enter, 61 .port_enter = bc_port_enter,
62 .port_change_mac = bc_port_change_mac, 62 .port_change_dev_addr = bc_port_change_dev_addr,
63}; 63};
64 64
65static const struct team_mode bc_mode = { 65static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index ad7ed0ec544c..105135aa8f05 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -66,18 +66,18 @@ drop:
66 66
67static int rr_port_enter(struct team *team, struct team_port *port) 67static int rr_port_enter(struct team *team, struct team_port *port)
68{ 68{
69 return team_port_set_team_mac(port); 69 return team_port_set_team_dev_addr(port);
70} 70}
71 71
72static void rr_port_change_mac(struct team *team, struct team_port *port) 72static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
73{ 73{
74 team_port_set_team_mac(port); 74 team_port_set_team_dev_addr(port);
75} 75}
76 76
77static const struct team_mode_ops rr_mode_ops = { 77static const struct team_mode_ops rr_mode_ops = {
78 .transmit = rr_transmit, 78 .transmit = rr_transmit,
79 .port_enter = rr_port_enter, 79 .port_enter = rr_port_enter,
80 .port_change_mac = rr_port_change_mac, 80 .port_change_dev_addr = rr_port_change_dev_addr,
81}; 81};
82 82
83static const struct team_mode rr_mode = { 83static const struct team_mode rr_mode = {
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 32e31c5c5dc6..33ab824773c5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
221 /* Get the MAC address */ 221 /* Get the MAC address */
222 ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 222 ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
223 if (ret < 0) { 223 if (ret < 0) {
224 dbg("read AX_CMD_READ_NODE_ID failed: %d", ret); 224 netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
225 ret);
225 goto out; 226 goto out;
226 } 227 }
227 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 228 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev)
303 304
304 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); 305 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
305 if (ret < 0) { 306 if (ret < 0) {
306 dbg("Select PHY #1 failed: %d", ret); 307 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
307 goto out; 308 goto out;
308 } 309 }
309 310
@@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev)
331 332
332 msleep(150); 333 msleep(150);
333 rx_ctl = asix_read_rx_ctl(dev); 334 rx_ctl = asix_read_rx_ctl(dev);
334 dbg("RX_CTL is 0x%04x after software reset", rx_ctl); 335 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
335 ret = asix_write_rx_ctl(dev, 0x0000); 336 ret = asix_write_rx_ctl(dev, 0x0000);
336 if (ret < 0) 337 if (ret < 0)
337 goto out; 338 goto out;
338 339
339 rx_ctl = asix_read_rx_ctl(dev); 340 rx_ctl = asix_read_rx_ctl(dev);
340 dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl); 341 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
341 342
342 ret = asix_sw_reset(dev, AX_SWRESET_PRL); 343 ret = asix_sw_reset(dev, AX_SWRESET_PRL);
343 if (ret < 0) 344 if (ret < 0)
@@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev)
364 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, 365 AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
365 AX88772_IPG2_DEFAULT, 0, NULL); 366 AX88772_IPG2_DEFAULT, 0, NULL);
366 if (ret < 0) { 367 if (ret < 0) {
367 dbg("Write IPG,IPG1,IPG2 failed: %d", ret); 368 netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
368 goto out; 369 goto out;
369 } 370 }
370 371
@@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev)
381 goto out; 382 goto out;
382 383
383 rx_ctl = asix_read_rx_ctl(dev); 384 rx_ctl = asix_read_rx_ctl(dev);
384 dbg("RX_CTL is 0x%04x after all initializations", rx_ctl); 385 netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
386 rx_ctl);
385 387
386 rx_ctl = asix_read_medium_status(dev); 388 rx_ctl = asix_read_medium_status(dev);
387 dbg("Medium Status is 0x%04x after all initializations", rx_ctl); 389 netdev_dbg(dev->net,
390 "Medium Status is 0x%04x after all initializations\n",
391 rx_ctl);
388 392
389 return 0; 393 return 0;
390 394
@@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
416 /* Get the MAC address */ 420 /* Get the MAC address */
417 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 421 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
418 if (ret < 0) { 422 if (ret < 0) {
419 dbg("Failed to read MAC address: %d", ret); 423 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
420 return ret; 424 return ret;
421 } 425 }
422 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 426 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
439 /* Reset the PHY to normal operation mode */ 443 /* Reset the PHY to normal operation mode */
440 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); 444 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
441 if (ret < 0) { 445 if (ret < 0) {
442 dbg("Select PHY #1 failed: %d", ret); 446 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
443 return ret; 447 return ret;
444 } 448 }
445 449
@@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
459 463
460 /* Read PHYID register *AFTER* the PHY was reset properly */ 464 /* Read PHYID register *AFTER* the PHY was reset properly */
461 phyid = asix_get_phyid(dev); 465 phyid = asix_get_phyid(dev);
462 dbg("PHYID=0x%08x", phyid); 466 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
463 467
464 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 468 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
465 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 469 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev)
575 u32 phyid; 579 u32 phyid;
576 580
577 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); 581 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
578 dbg("GPIO Status: 0x%04x", status); 582 netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
579 583
580 asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL); 584 asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
581 asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom); 585 asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
582 asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL); 586 asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
583 587
584 dbg("EEPROM index 0x17 is 0x%04x", eeprom); 588 netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
585 589
586 if (eeprom == cpu_to_le16(0xffff)) { 590 if (eeprom == cpu_to_le16(0xffff)) {
587 data->phymode = PHY_MODE_MARVELL; 591 data->phymode = PHY_MODE_MARVELL;
@@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev)
592 data->ledmode = le16_to_cpu(eeprom) >> 8; 596 data->ledmode = le16_to_cpu(eeprom) >> 8;
593 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; 597 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
594 } 598 }
595 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); 599 netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
596 600
597 /* Power up external GigaPHY through AX88178 GPIO pin */ 601 /* Power up external GigaPHY through AX88178 GPIO pin */
598 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); 602 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
@@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev)
601 asix_write_gpio(dev, 0x001c, 300); 605 asix_write_gpio(dev, 0x001c, 300);
602 asix_write_gpio(dev, 0x003c, 30); 606 asix_write_gpio(dev, 0x003c, 30);
603 } else { 607 } else {
604 dbg("gpio phymode == 1 path"); 608 netdev_dbg(dev->net, "gpio phymode == 1 path\n");
605 asix_write_gpio(dev, AX_GPIO_GPO1EN, 30); 609 asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
606 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); 610 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
607 } 611 }
608 612
609 /* Read PHYID register *AFTER* powering up PHY */ 613 /* Read PHYID register *AFTER* powering up PHY */
610 phyid = asix_get_phyid(dev); 614 phyid = asix_get_phyid(dev);
611 dbg("PHYID=0x%08x", phyid); 615 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
612 616
613 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ 617 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
614 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); 618 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
@@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
770 /* Get the MAC address */ 774 /* Get the MAC address */
771 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); 775 ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
772 if (ret < 0) { 776 if (ret < 0) {
773 dbg("Failed to read MAC address: %d", ret); 777 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
774 return ret; 778 return ret;
775 } 779 }
776 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 780 memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -930,6 +934,10 @@ static const struct usb_device_id products [] = {
930 USB_DEVICE (0x04f1, 0x3008), 934 USB_DEVICE (0x04f1, 0x3008),
931 .driver_info = (unsigned long) &ax8817x_info, 935 .driver_info = (unsigned long) &ax8817x_info,
932}, { 936}, {
937 // Lenovo U2L100P 10/100
938 USB_DEVICE (0x17ef, 0x7203),
939 .driver_info = (unsigned long) &ax88772_info,
940}, {
933 // ASIX AX88772B 10/100 941 // ASIX AX88772B 10/100
934 USB_DEVICE (0x0b95, 0x772b), 942 USB_DEVICE (0x0b95, 0x772b),
935 .driver_info = (unsigned long) &ax88772_info, 943 .driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 26c5bebd9eca..18d9579123ea 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb)
236 } 236 }
237 237
238 if (status) { 238 if (status) {
239 dbg("rx_done, status %d, length %d", status, urb->actual_length); 239 dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
240 status, urb->actual_length);
240 return; 241 return;
241 } 242 }
242 243
@@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb)
275 if (atomic_read(&catc->recq_sz)) { 276 if (atomic_read(&catc->recq_sz)) {
276 int state; 277 int state;
277 atomic_dec(&catc->recq_sz); 278 atomic_dec(&catc->recq_sz);
278 dbg("getting extra packet"); 279 netdev_dbg(catc->netdev, "getting extra packet\n");
279 urb->dev = catc->usbdev; 280 urb->dev = catc->usbdev;
280 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { 281 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
281 dbg("submit(rx_urb) status %d", state); 282 netdev_dbg(catc->netdev,
283 "submit(rx_urb) status %d\n", state);
282 } 284 }
283 } else { 285 } else {
284 clear_bit(RX_RUNNING, &catc->flags); 286 clear_bit(RX_RUNNING, &catc->flags);
@@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb)
317 return; 319 return;
318 /* -EPIPE: should clear the halt */ 320 /* -EPIPE: should clear the halt */
319 default: /* error */ 321 default: /* error */
320 dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); 322 dev_dbg(&urb->dev->dev,
323 "irq_done, status %d, data %02x %02x.\n",
324 status, data[0], data[1]);
321 goto resubmit; 325 goto resubmit;
322 } 326 }
323 327
324 if (linksts == LinkGood) { 328 if (linksts == LinkGood) {
325 netif_carrier_on(catc->netdev); 329 netif_carrier_on(catc->netdev);
326 dbg("link ok"); 330 netdev_dbg(catc->netdev, "link ok\n");
327 } 331 }
328 332
329 if (linksts == LinkBad) { 333 if (linksts == LinkBad) {
330 netif_carrier_off(catc->netdev); 334 netif_carrier_off(catc->netdev);
331 dbg("link bad"); 335 netdev_dbg(catc->netdev, "link bad\n");
332 } 336 }
333 337
334 if (hasdata) { 338 if (hasdata) {
@@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
385 int r, status = urb->status; 389 int r, status = urb->status;
386 390
387 if (status == -ECONNRESET) { 391 if (status == -ECONNRESET) {
388 dbg("Tx Reset."); 392 dev_dbg(&urb->dev->dev, "Tx Reset.\n");
389 urb->status = 0; 393 urb->status = 0;
390 catc->netdev->trans_start = jiffies; 394 catc->netdev->trans_start = jiffies;
391 catc->netdev->stats.tx_errors++; 395 catc->netdev->stats.tx_errors++;
@@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb)
395 } 399 }
396 400
397 if (status) { 401 if (status) {
398 dbg("tx_done, status %d, length %d", status, urb->actual_length); 402 dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
403 status, urb->actual_length);
399 return; 404 return;
400 } 405 }
401 406
@@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb)
511 int status = urb->status; 516 int status = urb->status;
512 517
513 if (status) 518 if (status)
514 dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); 519 dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
520 status, urb->actual_length);
515 521
516 spin_lock_irqsave(&catc->ctrl_lock, flags); 522 spin_lock_irqsave(&catc->ctrl_lock, flags);
517 523
@@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
667 f5u011_mchash_async(catc, catc->multicast); 673 f5u011_mchash_async(catc, catc->multicast);
668 if (catc->rxmode[0] != rx) { 674 if (catc->rxmode[0] != rx) {
669 catc->rxmode[0] = rx; 675 catc->rxmode[0] = rx;
670 dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); 676 netdev_dbg(catc->netdev,
677 "Setting RX mode to %2.2X %2.2X\n",
678 catc->rxmode[0], catc->rxmode[1]);
671 f5u011_rxmode_async(catc, catc->rxmode); 679 f5u011_rxmode_async(catc, catc->rxmode);
672 } 680 }
673 } 681 }
@@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = {
766 774
767static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) 775static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
768{ 776{
777 struct device *dev = &intf->dev;
769 struct usb_device *usbdev = interface_to_usbdev(intf); 778 struct usb_device *usbdev = interface_to_usbdev(intf);
770 struct net_device *netdev; 779 struct net_device *netdev;
771 struct catc *catc; 780 struct catc *catc;
@@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
774 783
775 if (usb_set_interface(usbdev, 784 if (usb_set_interface(usbdev,
776 intf->altsetting->desc.bInterfaceNumber, 1)) { 785 intf->altsetting->desc.bInterfaceNumber, 1)) {
777 dev_err(&intf->dev, "Can't set altsetting 1.\n"); 786 dev_err(dev, "Can't set altsetting 1.\n");
778 return -EIO; 787 return -EIO;
779 } 788 }
780 789
@@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
817 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 826 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
818 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && 827 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
819 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { 828 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
820 dbg("Testing for f5u011"); 829 dev_dbg(dev, "Testing for f5u011\n");
821 catc->is_f5u011 = 1; 830 catc->is_f5u011 = 1;
822 atomic_set(&catc->recq_sz, 0); 831 atomic_set(&catc->recq_sz, 0);
823 pktsz = RX_PKT_SZ; 832 pktsz = RX_PKT_SZ;
@@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
838 catc->irq_buf, 2, catc_irq_done, catc, 1); 847 catc->irq_buf, 2, catc_irq_done, catc, 1);
839 848
840 if (!catc->is_f5u011) { 849 if (!catc->is_f5u011) {
841 dbg("Checking memory size\n"); 850 dev_dbg(dev, "Checking memory size\n");
842 851
843 i = 0x12345678; 852 i = 0x12345678;
844 catc_write_mem(catc, 0x7a80, &i, 4); 853 catc_write_mem(catc, 0x7a80, &i, 4);
@@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
850 case 0x12345678: 859 case 0x12345678:
851 catc_set_reg(catc, TxBufCount, 8); 860 catc_set_reg(catc, TxBufCount, 8);
852 catc_set_reg(catc, RxBufCount, 32); 861 catc_set_reg(catc, RxBufCount, 32);
853 dbg("64k Memory\n"); 862 dev_dbg(dev, "64k Memory\n");
854 break; 863 break;
855 default: 864 default:
856 dev_warn(&intf->dev, 865 dev_warn(&intf->dev,
@@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
858 case 0x87654321: 867 case 0x87654321:
859 catc_set_reg(catc, TxBufCount, 4); 868 catc_set_reg(catc, TxBufCount, 4);
860 catc_set_reg(catc, RxBufCount, 16); 869 catc_set_reg(catc, RxBufCount, 16);
861 dbg("32k Memory\n"); 870 dev_dbg(dev, "32k Memory\n");
862 break; 871 break;
863 } 872 }
864 873
865 dbg("Getting MAC from SEEROM."); 874 dev_dbg(dev, "Getting MAC from SEEROM.\n");
866 875
867 catc_get_mac(catc, netdev->dev_addr); 876 catc_get_mac(catc, netdev->dev_addr);
868 877
869 dbg("Setting MAC into registers."); 878 dev_dbg(dev, "Setting MAC into registers.\n");
870 879
871 for (i = 0; i < 6; i++) 880 for (i = 0; i < 6; i++)
872 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); 881 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
873 882
874 dbg("Filling the multicast list."); 883 dev_dbg(dev, "Filling the multicast list.\n");
875 884
876 memset(broadcast, 0xff, 6); 885 memset(broadcast, 0xff, 6);
877 catc_multicast(broadcast, catc->multicast); 886 catc_multicast(broadcast, catc->multicast);
878 catc_multicast(netdev->dev_addr, catc->multicast); 887 catc_multicast(netdev->dev_addr, catc->multicast);
879 catc_write_mem(catc, 0xfa80, catc->multicast, 64); 888 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
880 889
881 dbg("Clearing error counters."); 890 dev_dbg(dev, "Clearing error counters.\n");
882 891
883 for (i = 0; i < 8; i++) 892 for (i = 0; i < 8; i++)
884 catc_set_reg(catc, EthStats + i, 0); 893 catc_set_reg(catc, EthStats + i, 0);
885 catc->last_stats = jiffies; 894 catc->last_stats = jiffies;
886 895
887 dbg("Enabling."); 896 dev_dbg(dev, "Enabling.\n");
888 897
889 catc_set_reg(catc, MaxBurst, RX_MAX_BURST); 898 catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
890 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); 899 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
891 catc_set_reg(catc, LEDCtrl, LEDLink); 900 catc_set_reg(catc, LEDCtrl, LEDLink);
892 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); 901 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
893 } else { 902 } else {
894 dbg("Performing reset\n"); 903 dev_dbg(dev, "Performing reset\n");
895 catc_reset(catc); 904 catc_reset(catc);
896 catc_get_mac(catc, netdev->dev_addr); 905 catc_get_mac(catc, netdev->dev_addr);
897 906
898 dbg("Setting RX Mode"); 907 dev_dbg(dev, "Setting RX Mode\n");
899 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; 908 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
900 catc->rxmode[1] = 0; 909 catc->rxmode[1] = 0;
901 f5u011_rxmode(catc, catc->rxmode); 910 f5u011_rxmode(catc, catc->rxmode);
902 } 911 }
903 dbg("Init done."); 912 dev_dbg(dev, "Init done.\n");
904 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", 913 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
905 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 914 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
906 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); 915 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 49ab45e17fe8..1e207f086b75 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -302,18 +302,9 @@ static const struct driver_info cx82310_info = {
302 .tx_fixup = cx82310_tx_fixup, 302 .tx_fixup = cx82310_tx_fixup,
303}; 303};
304 304
305#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
306 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
307 USB_DEVICE_ID_MATCH_DEV_INFO, \
308 .idVendor = (vend), \
309 .idProduct = (prod), \
310 .bDeviceClass = (cl), \
311 .bDeviceSubClass = (sc), \
312 .bDeviceProtocol = (pr)
313
314static const struct usb_device_id products[] = { 305static const struct usb_device_id products[] = {
315 { 306 {
316 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), 307 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
317 .driver_info = (unsigned long) &cx82310_info 308 .driver_info = (unsigned long) &cx82310_info
318 }, 309 },
319 { }, 310 { },
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index db3c8021f2a3..a7e3f4e55bf3 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
91 // get the packet count of the received skb 91 // get the packet count of the received skb
92 count = le32_to_cpu(header->packet_count); 92 count = le32_to_cpu(header->packet_count);
93 if (count > GL_MAX_TRANSMIT_PACKETS) { 93 if (count > GL_MAX_TRANSMIT_PACKETS) {
94 dbg("genelink: invalid received packet count %u", count); 94 netdev_dbg(dev->net,
95 "genelink: invalid received packet count %u\n",
96 count);
95 return 0; 97 return 0;
96 } 98 }
97 99
@@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
107 109
108 // this may be a broken packet 110 // this may be a broken packet
109 if (size > GL_MAX_PACKET_LEN) { 111 if (size > GL_MAX_PACKET_LEN) {
110 dbg("genelink: invalid rx length %d", size); 112 netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
113 size);
111 return 0; 114 return 0;
112 } 115 }
113 116
@@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
133 skb_pull(skb, 4); 136 skb_pull(skb, 4);
134 137
135 if (skb->len > GL_MAX_PACKET_LEN) { 138 if (skb->len > GL_MAX_PACKET_LEN) {
136 dbg("genelink: invalid rx length %d", skb->len); 139 netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
140 skb->len);
137 return 0; 141 return 0;
138 } 142 }
139 return 1; 143 return 1;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c3d03490c97d..c75e11e1b385 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth,
267 struct usb_ctrlrequest *dr; 267 struct usb_ctrlrequest *dr;
268 int retval; 268 int retval;
269 269
270 dbg("kaweth_control()"); 270 netdev_dbg(kaweth->net, "kaweth_control()\n");
271 271
272 if(in_interrupt()) { 272 if(in_interrupt()) {
273 dbg("in_interrupt()"); 273 netdev_dbg(kaweth->net, "in_interrupt()\n");
274 return -EBUSY; 274 return -EBUSY;
275 } 275 }
276 276
277 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); 277 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
278 278 if (!dr)
279 if (!dr) {
280 dbg("kmalloc() failed");
281 return -ENOMEM; 279 return -ENOMEM;
282 }
283 280
284 dr->bRequestType = requesttype; 281 dr->bRequestType = requesttype;
285 dr->bRequest = request; 282 dr->bRequest = request;
@@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
305{ 302{
306 int retval; 303 int retval;
307 304
308 dbg("Reading kaweth configuration"); 305 netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
309 306
310 retval = kaweth_control(kaweth, 307 retval = kaweth_control(kaweth,
311 usb_rcvctrlpipe(kaweth->dev, 0), 308 usb_rcvctrlpipe(kaweth->dev, 0),
@@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
327{ 324{
328 int retval; 325 int retval;
329 326
330 dbg("Setting URB size to %d", (unsigned)urb_size); 327 netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
331 328
332 retval = kaweth_control(kaweth, 329 retval = kaweth_control(kaweth,
333 usb_sndctrlpipe(kaweth->dev, 0), 330 usb_sndctrlpipe(kaweth->dev, 0),
@@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
349{ 346{
350 int retval; 347 int retval;
351 348
352 dbg("Set SOFS wait to %d", (unsigned)sofs_wait); 349 netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
353 350
354 retval = kaweth_control(kaweth, 351 retval = kaweth_control(kaweth,
355 usb_sndctrlpipe(kaweth->dev, 0), 352 usb_sndctrlpipe(kaweth->dev, 0),
@@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
372{ 369{
373 int retval; 370 int retval;
374 371
375 dbg("Set receive filter to %d", (unsigned)receive_filter); 372 netdev_dbg(kaweth->net, "Set receive filter to %d\n",
373 (unsigned)receive_filter);
376 374
377 retval = kaweth_control(kaweth, 375 retval = kaweth_control(kaweth,
378 usb_sndctrlpipe(kaweth->dev, 0), 376 usb_sndctrlpipe(kaweth->dev, 0),
@@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
421 kaweth->firmware_buf[4] = type; 419 kaweth->firmware_buf[4] = type;
422 kaweth->firmware_buf[5] = interrupt; 420 kaweth->firmware_buf[5] = interrupt;
423 421
424 dbg("High: %i, Low:%i", kaweth->firmware_buf[3], 422 netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3],
425 kaweth->firmware_buf[2]); 423 kaweth->firmware_buf[2]);
426 424
427 dbg("Downloading firmware at %p to kaweth device at %p", 425 netdev_dbg(kaweth->net,
428 fw->data, kaweth); 426 "Downloading firmware at %p to kaweth device at %p\n",
429 dbg("Firmware length: %d", data_len); 427 fw->data, kaweth);
428 netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
430 429
431 return kaweth_control(kaweth, 430 return kaweth_control(kaweth,
432 usb_sndctrlpipe(kaweth->dev, 0), 431 usb_sndctrlpipe(kaweth->dev, 0),
@@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
454 kaweth->firmware_buf[6] = 0x00; 453 kaweth->firmware_buf[6] = 0x00;
455 kaweth->firmware_buf[7] = 0x00; 454 kaweth->firmware_buf[7] = 0x00;
456 455
457 dbg("Triggering firmware"); 456 netdev_dbg(kaweth->net, "Triggering firmware\n");
458 457
459 return kaweth_control(kaweth, 458 return kaweth_control(kaweth,
460 usb_sndctrlpipe(kaweth->dev, 0), 459 usb_sndctrlpipe(kaweth->dev, 0),
@@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth)
474{ 473{
475 int result; 474 int result;
476 475
477 dbg("kaweth_reset(%p)", kaweth); 476 netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
478 result = usb_reset_configuration(kaweth->dev); 477 result = usb_reset_configuration(kaweth->dev);
479 mdelay(10); 478 mdelay(10);
480 479
481 dbg("kaweth_reset() returns %d.",result); 480 netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result);
482 481
483 return result; 482 return result;
484} 483}
@@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
595 ****************************************************************/ 594 ****************************************************************/
596static void kaweth_usb_receive(struct urb *urb) 595static void kaweth_usb_receive(struct urb *urb)
597{ 596{
597 struct device *dev = &urb->dev->dev;
598 struct kaweth_device *kaweth = urb->context; 598 struct kaweth_device *kaweth = urb->context;
599 struct net_device *net = kaweth->net; 599 struct net_device *net = kaweth->net;
600 int status = urb->status; 600 int status = urb->status;
@@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb)
610 kaweth->stats.rx_errors++; 610 kaweth->stats.rx_errors++;
611 kaweth->end = 1; 611 kaweth->end = 1;
612 wake_up(&kaweth->term_wait); 612 wake_up(&kaweth->term_wait);
613 dbg("Status was -EPIPE."); 613 dev_dbg(dev, "Status was -EPIPE.\n");
614 return; 614 return;
615 } 615 }
616 if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { 616 if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
617 /* we are killed - set a flag and wake the disconnect handler */ 617 /* we are killed - set a flag and wake the disconnect handler */
618 kaweth->end = 1; 618 kaweth->end = 1;
619 wake_up(&kaweth->term_wait); 619 wake_up(&kaweth->term_wait);
620 dbg("Status was -ECONNRESET or -ESHUTDOWN."); 620 dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n");
621 return; 621 return;
622 } 622 }
623 if (unlikely(status == -EPROTO || status == -ETIME || 623 if (unlikely(status == -EPROTO || status == -ETIME ||
624 status == -EILSEQ)) { 624 status == -EILSEQ)) {
625 kaweth->stats.rx_errors++; 625 kaweth->stats.rx_errors++;
626 dbg("Status was -EPROTO, -ETIME, or -EILSEQ."); 626 dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
627 return; 627 return;
628 } 628 }
629 if (unlikely(status == -EOVERFLOW)) { 629 if (unlikely(status == -EOVERFLOW)) {
630 kaweth->stats.rx_errors++; 630 kaweth->stats.rx_errors++;
631 dbg("Status was -EOVERFLOW."); 631 dev_dbg(dev, "Status was -EOVERFLOW.\n");
632 } 632 }
633 spin_lock(&kaweth->device_lock); 633 spin_lock(&kaweth->device_lock);
634 if (IS_BLOCKED(kaweth->status)) { 634 if (IS_BLOCKED(kaweth->status)) {
@@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net)
687 struct kaweth_device *kaweth = netdev_priv(net); 687 struct kaweth_device *kaweth = netdev_priv(net);
688 int res; 688 int res;
689 689
690 dbg("Opening network device."); 690 netdev_dbg(kaweth->net, "Opening network device.\n");
691 691
692 res = usb_autopm_get_interface(kaweth->intf); 692 res = usb_autopm_get_interface(kaweth->intf);
693 if (res) { 693 if (res) {
@@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
787 787
788 if (unlikely(status != 0)) 788 if (unlikely(status != 0))
789 if (status != -ENOENT) 789 if (status != -ENOENT)
790 dbg("%s: TX status %d.", kaweth->net->name, status); 790 dev_dbg(&urb->dev->dev, "%s: TX status %d.\n",
791 kaweth->net->name, status);
791 792
792 netif_wake_queue(kaweth->net); 793 netif_wake_queue(kaweth->net);
793 dev_kfree_skb_irq(skb); 794 dev_kfree_skb_irq(skb);
@@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
871 KAWETH_PACKET_FILTER_BROADCAST | 872 KAWETH_PACKET_FILTER_BROADCAST |
872 KAWETH_PACKET_FILTER_MULTICAST; 873 KAWETH_PACKET_FILTER_MULTICAST;
873 874
874 dbg("Setting Rx mode to %d", packet_filter_bitmap); 875 netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap);
875 876
876 netif_stop_queue(net); 877 netif_stop_queue(net);
877 878
@@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
916 result); 917 result);
917 } 918 }
918 else { 919 else {
919 dbg("Set Rx mode to %d", packet_filter_bitmap); 920 netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
921 packet_filter_bitmap);
920 } 922 }
921} 923}
922 924
@@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
951 struct kaweth_device *kaweth = usb_get_intfdata(intf); 953 struct kaweth_device *kaweth = usb_get_intfdata(intf);
952 unsigned long flags; 954 unsigned long flags;
953 955
954 dbg("Suspending device"); 956 dev_dbg(&intf->dev, "Suspending device\n");
955 spin_lock_irqsave(&kaweth->device_lock, flags); 957 spin_lock_irqsave(&kaweth->device_lock, flags);
956 kaweth->status |= KAWETH_STATUS_SUSPENDING; 958 kaweth->status |= KAWETH_STATUS_SUSPENDING;
957 spin_unlock_irqrestore(&kaweth->device_lock, flags); 959 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf)
968 struct kaweth_device *kaweth = usb_get_intfdata(intf); 970 struct kaweth_device *kaweth = usb_get_intfdata(intf);
969 unsigned long flags; 971 unsigned long flags;
970 972
971 dbg("Resuming device"); 973 dev_dbg(&intf->dev, "Resuming device\n");
972 spin_lock_irqsave(&kaweth->device_lock, flags); 974 spin_lock_irqsave(&kaweth->device_lock, flags);
973 kaweth->status &= ~KAWETH_STATUS_SUSPENDING; 975 kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
974 spin_unlock_irqrestore(&kaweth->device_lock, flags); 976 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1003,36 +1005,37 @@ static int kaweth_probe(
1003 const struct usb_device_id *id /* from id_table */ 1005 const struct usb_device_id *id /* from id_table */
1004 ) 1006 )
1005{ 1007{
1006 struct usb_device *dev = interface_to_usbdev(intf); 1008 struct device *dev = &intf->dev;
1009 struct usb_device *udev = interface_to_usbdev(intf);
1007 struct kaweth_device *kaweth; 1010 struct kaweth_device *kaweth;
1008 struct net_device *netdev; 1011 struct net_device *netdev;
1009 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1012 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1010 int result = 0; 1013 int result = 0;
1011 1014
1012 dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x", 1015 dev_dbg(dev,
1013 dev->devnum, 1016 "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
1014 le16_to_cpu(dev->descriptor.idVendor), 1017 udev->devnum, le16_to_cpu(udev->descriptor.idVendor),
1015 le16_to_cpu(dev->descriptor.idProduct), 1018 le16_to_cpu(udev->descriptor.idProduct),
1016 le16_to_cpu(dev->descriptor.bcdDevice)); 1019 le16_to_cpu(udev->descriptor.bcdDevice));
1017 1020
1018 dbg("Device at %p", dev); 1021 dev_dbg(dev, "Device at %p\n", udev);
1019 1022
1020 dbg("Descriptor length: %x type: %x", 1023 dev_dbg(dev, "Descriptor length: %x type: %x\n",
1021 (int)dev->descriptor.bLength, 1024 (int)udev->descriptor.bLength,
1022 (int)dev->descriptor.bDescriptorType); 1025 (int)udev->descriptor.bDescriptorType);
1023 1026
1024 netdev = alloc_etherdev(sizeof(*kaweth)); 1027 netdev = alloc_etherdev(sizeof(*kaweth));
1025 if (!netdev) 1028 if (!netdev)
1026 return -ENOMEM; 1029 return -ENOMEM;
1027 1030
1028 kaweth = netdev_priv(netdev); 1031 kaweth = netdev_priv(netdev);
1029 kaweth->dev = dev; 1032 kaweth->dev = udev;
1030 kaweth->net = netdev; 1033 kaweth->net = netdev;
1031 1034
1032 spin_lock_init(&kaweth->device_lock); 1035 spin_lock_init(&kaweth->device_lock);
1033 init_waitqueue_head(&kaweth->term_wait); 1036 init_waitqueue_head(&kaweth->term_wait);
1034 1037
1035 dbg("Resetting."); 1038 dev_dbg(dev, "Resetting.\n");
1036 1039
1037 kaweth_reset(kaweth); 1040 kaweth_reset(kaweth);
1038 1041
@@ -1041,17 +1044,17 @@ static int kaweth_probe(
1041 * downloaded. Don't try to do it again, or we'll hang the device. 1044 * downloaded. Don't try to do it again, or we'll hang the device.
1042 */ 1045 */
1043 1046
1044 if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) { 1047 if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) {
1045 dev_info(&intf->dev, "Firmware present in device.\n"); 1048 dev_info(dev, "Firmware present in device.\n");
1046 } else { 1049 } else {
1047 /* Download the firmware */ 1050 /* Download the firmware */
1048 dev_info(&intf->dev, "Downloading firmware...\n"); 1051 dev_info(dev, "Downloading firmware...\n");
1049 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); 1052 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
1050 if ((result = kaweth_download_firmware(kaweth, 1053 if ((result = kaweth_download_firmware(kaweth,
1051 "kaweth/new_code.bin", 1054 "kaweth/new_code.bin",
1052 100, 1055 100,
1053 2)) < 0) { 1056 2)) < 0) {
1054 dev_err(&intf->dev, "Error downloading firmware (%d)\n", 1057 dev_err(dev, "Error downloading firmware (%d)\n",
1055 result); 1058 result);
1056 goto err_fw; 1059 goto err_fw;
1057 } 1060 }
@@ -1060,8 +1063,7 @@ static int kaweth_probe(
1060 "kaweth/new_code_fix.bin", 1063 "kaweth/new_code_fix.bin",
1061 100, 1064 100,
1062 3)) < 0) { 1065 3)) < 0) {
1063 dev_err(&intf->dev, 1066 dev_err(dev, "Error downloading firmware fix (%d)\n",
1064 "Error downloading firmware fix (%d)\n",
1065 result); 1067 result);
1066 goto err_fw; 1068 goto err_fw;
1067 } 1069 }
@@ -1070,8 +1072,7 @@ static int kaweth_probe(
1070 "kaweth/trigger_code.bin", 1072 "kaweth/trigger_code.bin",
1071 126, 1073 126,
1072 2)) < 0) { 1074 2)) < 0) {
1073 dev_err(&intf->dev, 1075 dev_err(dev, "Error downloading trigger code (%d)\n",
1074 "Error downloading trigger code (%d)\n",
1075 result); 1076 result);
1076 goto err_fw; 1077 goto err_fw;
1077 1078
@@ -1081,19 +1082,18 @@ static int kaweth_probe(
1081 "kaweth/trigger_code_fix.bin", 1082 "kaweth/trigger_code_fix.bin",
1082 126, 1083 126,
1083 3)) < 0) { 1084 3)) < 0) {
1084 dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result); 1085 dev_err(dev, "Error downloading trigger code fix (%d)\n", result);
1085 goto err_fw; 1086 goto err_fw;
1086 } 1087 }
1087 1088
1088 1089
1089 if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { 1090 if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
1090 dev_err(&intf->dev, "Error triggering firmware (%d)\n", 1091 dev_err(dev, "Error triggering firmware (%d)\n", result);
1091 result);
1092 goto err_fw; 1092 goto err_fw;
1093 } 1093 }
1094 1094
1095 /* Device will now disappear for a moment... */ 1095 /* Device will now disappear for a moment... */
1096 dev_info(&intf->dev, "Firmware loaded. I'll be back...\n"); 1096 dev_info(dev, "Firmware loaded. I'll be back...\n");
1097err_fw: 1097err_fw:
1098 free_page((unsigned long)kaweth->firmware_buf); 1098 free_page((unsigned long)kaweth->firmware_buf);
1099 free_netdev(netdev); 1099 free_netdev(netdev);
@@ -1103,29 +1103,29 @@ err_fw:
1103 result = kaweth_read_configuration(kaweth); 1103 result = kaweth_read_configuration(kaweth);
1104 1104
1105 if(result < 0) { 1105 if(result < 0) {
1106 dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result); 1106 dev_err(dev, "Error reading configuration (%d), no net device created\n", result);
1107 goto err_free_netdev; 1107 goto err_free_netdev;
1108 } 1108 }
1109 1109
1110 dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); 1110 dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
1111 dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); 1111 dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
1112 dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); 1112 dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
1113 dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); 1113 dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
1114 1114
1115 if(!memcmp(&kaweth->configuration.hw_addr, 1115 if(!memcmp(&kaweth->configuration.hw_addr,
1116 &bcast_addr, 1116 &bcast_addr,
1117 sizeof(bcast_addr))) { 1117 sizeof(bcast_addr))) {
1118 dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n"); 1118 dev_err(dev, "Firmware not functioning properly, no net device created\n");
1119 goto err_free_netdev; 1119 goto err_free_netdev;
1120 } 1120 }
1121 1121
1122 if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { 1122 if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
1123 dbg("Error setting URB size"); 1123 dev_dbg(dev, "Error setting URB size\n");
1124 goto err_free_netdev; 1124 goto err_free_netdev;
1125 } 1125 }
1126 1126
1127 if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { 1127 if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
1128 dev_err(&intf->dev, "Error setting SOFS wait\n"); 1128 dev_err(dev, "Error setting SOFS wait\n");
1129 goto err_free_netdev; 1129 goto err_free_netdev;
1130 } 1130 }
1131 1131
@@ -1135,11 +1135,11 @@ err_fw:
1135 KAWETH_PACKET_FILTER_MULTICAST); 1135 KAWETH_PACKET_FILTER_MULTICAST);
1136 1136
1137 if(result < 0) { 1137 if(result < 0) {
1138 dev_err(&intf->dev, "Error setting receive filter\n"); 1138 dev_err(dev, "Error setting receive filter\n");
1139 goto err_free_netdev; 1139 goto err_free_netdev;
1140 } 1140 }
1141 1141
1142 dbg("Initializing net device."); 1142 dev_dbg(dev, "Initializing net device.\n");
1143 1143
1144 kaweth->intf = intf; 1144 kaweth->intf = intf;
1145 1145
@@ -1181,20 +1181,20 @@ err_fw:
1181 1181
1182#if 0 1182#if 0
1183// dma_supported() is deeply broken on almost all architectures 1183// dma_supported() is deeply broken on almost all architectures
1184 if (dma_supported (&intf->dev, 0xffffffffffffffffULL)) 1184 if (dma_supported (dev, 0xffffffffffffffffULL))
1185 kaweth->net->features |= NETIF_F_HIGHDMA; 1185 kaweth->net->features |= NETIF_F_HIGHDMA;
1186#endif 1186#endif
1187 1187
1188 SET_NETDEV_DEV(netdev, &intf->dev); 1188 SET_NETDEV_DEV(netdev, dev);
1189 if (register_netdev(netdev) != 0) { 1189 if (register_netdev(netdev) != 0) {
1190 dev_err(&intf->dev, "Error registering netdev.\n"); 1190 dev_err(dev, "Error registering netdev.\n");
1191 goto err_intfdata; 1191 goto err_intfdata;
1192 } 1192 }
1193 1193
1194 dev_info(&intf->dev, "kaweth interface created at %s\n", 1194 dev_info(dev, "kaweth interface created at %s\n",
1195 kaweth->net->name); 1195 kaweth->net->name);
1196 1196
1197 dbg("Kaweth probe returning."); 1197 dev_dbg(dev, "Kaweth probe returning.\n");
1198 1198
1199 return 0; 1199 return 0;
1200 1200
@@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
1232 } 1232 }
1233 netdev = kaweth->net; 1233 netdev = kaweth->net;
1234 1234
1235 dbg("Unregistering net device"); 1235 netdev_dbg(kaweth->net, "Unregistering net device\n");
1236 unregister_netdev(netdev); 1236 unregister_netdev(netdev);
1237 1237
1238 usb_free_urb(kaweth->rx_urb); 1238 usb_free_urb(kaweth->rx_urb);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 28c4d513ba85..c062a3e8295c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev)
155 u8 reg; 155 u8 reg;
156 u16 *vp = kmalloc(sizeof (u16)); 156 u16 *vp = kmalloc(sizeof (u16));
157 157
158 if (!vp) { 158 if (!vp)
159 dbg("no memory?");
160 return; 159 return;
161 }
162 160
163 dbg("%s registers:", dev->net->name); 161 netdev_dbg(dev->net, "registers:\n");
164 for (reg = 0; reg < 0x20; reg++) { 162 for (reg = 0; reg < 0x20; reg++) {
165 int retval; 163 int retval;
166 164
@@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev)
172 170
173 retval = nc_register_read(dev, reg, vp); 171 retval = nc_register_read(dev, reg, vp);
174 if (retval < 0) 172 if (retval < 0)
175 dbg("%s reg [0x%x] ==> error %d", 173 netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n",
176 dev->net->name, reg, retval); 174 reg, retval);
177 else 175 else
178 dbg("%s reg [0x%x] = 0x%x", 176 netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp);
179 dev->net->name, reg, *vp);
180 } 177 }
181 kfree(vp); 178 kfree(vp);
182} 179}
@@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev)
300 // nc_dump_registers(dev); 297 // nc_dump_registers(dev);
301 298
302 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) { 299 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
303 dbg("can't read %s-%s status: %d", 300 netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
304 dev->udev->bus->bus_name, dev->udev->devpath, retval); 301 dev->udev->bus->bus_name, dev->udev->devpath, retval);
305 goto done; 302 goto done;
306 } 303 }
307 status = *vp; 304 status = *vp;
308 nc_dump_status(dev, status); 305 nc_dump_status(dev, status);
309 306
310 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) { 307 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
311 dbg("can't read USBCTL, %d", retval); 308 netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
312 goto done; 309 goto done;
313 } 310 }
314 usbctl = *vp; 311 usbctl = *vp;
@@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev)
318 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); 315 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
319 316
320 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) { 317 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
321 dbg("can't read TTL, %d", retval); 318 netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
322 goto done; 319 goto done;
323 } 320 }
324 ttl = *vp; 321 ttl = *vp;
@@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev)
326 323
327 nc_register_write(dev, REG_TTL, 324 nc_register_write(dev, REG_TTL,
328 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) ); 325 MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
329 dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS); 326 netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS);
330 327
331 netif_info(dev, link, dev->net, "port %c, peer %sconnected\n", 328 netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
332 (status & STATUS_PORT_A) ? 'A' : 'B', 329 (status & STATUS_PORT_A) ? 'A' : 'B',
@@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev)
350 status = *vp; 347 status = *vp;
351 kfree(vp); 348 kfree(vp);
352 if (retval != 0) { 349 if (retval != 0) {
353 dbg("%s net1080_check_conn read - %d", dev->net->name, retval); 350 netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
354 return retval; 351 return retval;
355 } 352 }
356 if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER) 353 if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
420 u16 hdr_len, packet_len; 417 u16 hdr_len, packet_len;
421 418
422 if (!(skb->len & 0x01)) { 419 if (!(skb->len & 0x01)) {
423#ifdef DEBUG 420 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
424 struct net_device *net = dev->net; 421 skb->len, dev->net->hard_header_len, dev->hard_mtu,
425 dbg("rx framesize %d range %d..%d mtu %d", skb->len, 422 dev->net->mtu);
426 net->hard_header_len, dev->hard_mtu, net->mtu);
427#endif
428 dev->net->stats.rx_frame_errors++; 423 dev->net->stats.rx_frame_errors++;
429 nc_ensure_sync(dev); 424 nc_ensure_sync(dev);
430 return 0; 425 return 0;
@@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
435 packet_len = le16_to_cpup(&header->packet_len); 430 packet_len = le16_to_cpup(&header->packet_len);
436 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { 431 if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
437 dev->net->stats.rx_frame_errors++; 432 dev->net->stats.rx_frame_errors++;
438 dbg("packet too big, %d", packet_len); 433 netdev_dbg(dev->net, "packet too big, %d\n", packet_len);
439 nc_ensure_sync(dev); 434 nc_ensure_sync(dev);
440 return 0; 435 return 0;
441 } else if (hdr_len < MIN_HEADER) { 436 } else if (hdr_len < MIN_HEADER) {
442 dev->net->stats.rx_frame_errors++; 437 dev->net->stats.rx_frame_errors++;
443 dbg("header too short, %d", hdr_len); 438 netdev_dbg(dev->net, "header too short, %d\n", hdr_len);
444 nc_ensure_sync(dev); 439 nc_ensure_sync(dev);
445 return 0; 440 return 0;
446 } else if (hdr_len > MIN_HEADER) { 441 } else if (hdr_len > MIN_HEADER) {
447 // out of band data for us? 442 // out of band data for us?
448 dbg("header OOB, %d bytes", hdr_len - MIN_HEADER); 443 netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER);
449 nc_ensure_sync(dev); 444 nc_ensure_sync(dev);
450 // switch (vendor/product ids) { ... } 445 // switch (vendor/product ids) { ... }
451 } 446 }
@@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
458 if ((packet_len & 0x01) == 0) { 453 if ((packet_len & 0x01) == 0) {
459 if (skb->data [packet_len] != PAD_BYTE) { 454 if (skb->data [packet_len] != PAD_BYTE) {
460 dev->net->stats.rx_frame_errors++; 455 dev->net->stats.rx_frame_errors++;
461 dbg("bad pad"); 456 netdev_dbg(dev->net, "bad pad\n");
462 return 0; 457 return 0;
463 } 458 }
464 skb_trim(skb, skb->len - 1); 459 skb_trim(skb, skb->len - 1);
465 } 460 }
466 if (skb->len != packet_len) { 461 if (skb->len != packet_len) {
467 dev->net->stats.rx_frame_errors++; 462 dev->net->stats.rx_frame_errors++;
468 dbg("bad packet len %d (expected %d)", 463 netdev_dbg(dev->net, "bad packet len %d (expected %d)\n",
469 skb->len, packet_len); 464 skb->len, packet_len);
470 nc_ensure_sync(dev); 465 nc_ensure_sync(dev);
471 return 0; 466 return 0;
472 } 467 }
473 if (header->packet_id != get_unaligned(&trailer->packet_id)) { 468 if (header->packet_id != get_unaligned(&trailer->packet_id)) {
474 dev->net->stats.rx_fifo_errors++; 469 dev->net->stats.rx_fifo_errors++;
475 dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", 470 netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n",
476 le16_to_cpu(header->packet_id), 471 le16_to_cpu(header->packet_id),
477 le16_to_cpu(trailer->packet_id)); 472 le16_to_cpu(trailer->packet_id));
478 return 0; 473 return 0;
479 } 474 }
480#if 0 475#if 0
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3543c9e57824..6883c371c59f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
108 atomic_set(&info->pmcount, 0); 108 atomic_set(&info->pmcount, 0);
109 109
110 /* register subdriver */ 110 /* register subdriver */
111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); 111 subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
112 if (IS_ERR(subdriver)) { 112 if (IS_ERR(subdriver)) {
113 dev_err(&info->control->dev, "subdriver registration failed\n"); 113 dev_err(&info->control->dev, "subdriver registration failed\n");
114 rv = PTR_ERR(subdriver); 114 rv = PTR_ERR(subdriver);
@@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
139 139
140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
141 141
142 /* require a single interrupt status endpoint for subdriver */ 142 /* control and data is shared? */
143 if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
144 info->control = intf;
145 info->data = intf;
146 goto shared;
147 }
148
149 /* else require a single interrupt status endpoint on control intf */
143 if (intf->cur_altsetting->desc.bNumEndpoints != 1) 150 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
144 goto err; 151 goto err;
145 152
153 /* and a number of CDC descriptors */
146 while (len > 3) { 154 while (len > 3) {
147 struct usb_descriptor_header *h = (void *)buf; 155 struct usb_descriptor_header *h = (void *)buf;
148 156
@@ -231,8 +239,9 @@ next_desc:
231 if (status < 0) 239 if (status < 0)
232 goto err; 240 goto err;
233 241
242shared:
234 status = qmi_wwan_register_subdriver(dev); 243 status = qmi_wwan_register_subdriver(dev);
235 if (status < 0) { 244 if (status < 0 && info->control != info->data) {
236 usb_set_intfdata(info->data, NULL); 245 usb_set_intfdata(info->data, NULL);
237 usb_driver_release_interface(driver, info->data); 246 usb_driver_release_interface(driver, info->data);
238 } 247 }
@@ -241,20 +250,6 @@ err:
241 return status; 250 return status;
242} 251}
243 252
244/* Some devices combine the "control" and "data" functions into a
245 * single interface with all three endpoints: interrupt + bulk in and
246 * out
247 */
248static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
249{
250 struct qmi_wwan_state *info = (void *)&dev->data;
251
252 /* control and data is shared */
253 info->control = intf;
254 info->data = intf;
255 return qmi_wwan_register_subdriver(dev);
256}
257
258static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) 253static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
259{ 254{
260 struct qmi_wwan_state *info = (void *)&dev->data; 255 struct qmi_wwan_state *info = (void *)&dev->data;
@@ -331,20 +326,12 @@ static const struct driver_info qmi_wwan_info = {
331 .manage_power = qmi_wwan_manage_power, 326 .manage_power = qmi_wwan_manage_power,
332}; 327};
333 328
334static const struct driver_info qmi_wwan_shared = {
335 .description = "WWAN/QMI device",
336 .flags = FLAG_WWAN,
337 .bind = qmi_wwan_bind_shared,
338 .unbind = qmi_wwan_unbind,
339 .manage_power = qmi_wwan_manage_power,
340};
341
342#define HUAWEI_VENDOR_ID 0x12D1 329#define HUAWEI_VENDOR_ID 0x12D1
343 330
344/* map QMI/wwan function by a fixed interface number */ 331/* map QMI/wwan function by a fixed interface number */
345#define QMI_FIXED_INTF(vend, prod, num) \ 332#define QMI_FIXED_INTF(vend, prod, num) \
346 USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ 333 USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
347 .driver_info = (unsigned long)&qmi_wwan_shared 334 .driver_info = (unsigned long)&qmi_wwan_info
348 335
349/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ 336/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
350#define QMI_GOBI1K_DEVICE(vend, prod) \ 337#define QMI_GOBI1K_DEVICE(vend, prod) \
@@ -372,15 +359,15 @@ static const struct usb_device_id products[] = {
372 }, 359 },
373 { /* Huawei E392, E398 and possibly others in "Windows mode" */ 360 { /* Huawei E392, E398 and possibly others in "Windows mode" */
374 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
375 .driver_info = (unsigned long)&qmi_wwan_shared, 362 .driver_info = (unsigned long)&qmi_wwan_info,
376 }, 363 },
377 { /* Pantech UML290, P4200 and more */ 364 { /* Pantech UML290, P4200 and more */
378 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), 365 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
379 .driver_info = (unsigned long)&qmi_wwan_shared, 366 .driver_info = (unsigned long)&qmi_wwan_info,
380 }, 367 },
381 { /* Pantech UML290 - newer firmware */ 368 { /* Pantech UML290 - newer firmware */
382 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), 369 USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
383 .driver_info = (unsigned long)&qmi_wwan_shared, 370 .driver_info = (unsigned long)&qmi_wwan_info,
384 }, 371 },
385 372
386 /* 3. Combined interface devices matching on interface number */ 373 /* 3. Combined interface devices matching on interface number */
@@ -467,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id
467 */ 454 */
468 if (!id->driver_info) { 455 if (!id->driver_info) {
469 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); 456 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
470 id->driver_info = (unsigned long)&qmi_wwan_shared; 457 id->driver_info = (unsigned long)&qmi_wwan_info;
471 } 458 }
472 459
473 return usbnet_probe(intf, id); 460 return usbnet_probe(intf, id);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 0e2c92e0e532..5f39a3b225ef 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
275 return -EBUSY; 275 return -EBUSY;
276 276
277 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 277 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
278 dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); 278 netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
279 /* Set the IDR registers. */ 279 /* Set the IDR registers. */
280 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); 280 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
281#ifdef EEPROM_WRITE 281#ifdef EEPROM_WRITE
@@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb)
503 if ((d[INT_MSR] & MSR_LINK) == 0) { 503 if ((d[INT_MSR] & MSR_LINK) == 0) {
504 if (netif_carrier_ok(dev->netdev)) { 504 if (netif_carrier_ok(dev->netdev)) {
505 netif_carrier_off(dev->netdev); 505 netif_carrier_off(dev->netdev);
506 dbg("%s: LINK LOST\n", __func__); 506 netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__);
507 } 507 }
508 } else { 508 } else {
509 if (!netif_carrier_ok(dev->netdev)) { 509 if (!netif_carrier_ok(dev->netdev)) {
510 netif_carrier_on(dev->netdev); 510 netif_carrier_on(dev->netdev);
511 dbg("%s: LINK CAME BACK\n", __func__); 511 netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__);
512 } 512 }
513 } 513 }
514 514
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 8e22417fa6c1..c27d27701aee 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,9 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
68 */ 68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024 69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70 70
71struct sierra_net_info_data { 71/* Overriding the default usbnet rx_urb_size */
72 u16 rx_urb_size; 72#define SIERRA_NET_RX_URB_SIZE (8 * 1024)
73};
74 73
75/* Private data structure */ 74/* Private data structure */
76struct sierra_net_data { 75struct sierra_net_data {
@@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
560/* 559/*
561 * Sync Retransmit Timer Handler. On expiry, kick the work queue 560 * Sync Retransmit Timer Handler. On expiry, kick the work queue
562 */ 561 */
563void sierra_sync_timer(unsigned long syncdata) 562static void sierra_sync_timer(unsigned long syncdata)
564{ 563{
565 struct usbnet *dev = (struct usbnet *)syncdata; 564 struct usbnet *dev = (struct usbnet *)syncdata;
566 565
@@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
678 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 677 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
679 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; 678 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
680 679
681 struct sierra_net_info_data *data =
682 (struct sierra_net_info_data *)dev->driver_info->data;
683
684 dev_dbg(&dev->udev->dev, "%s", __func__); 680 dev_dbg(&dev->udev->dev, "%s", __func__);
685 681
686 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; 682 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
725 sierra_net_set_ctx_index(priv, 0); 721 sierra_net_set_ctx_index(priv, 0);
726 722
727 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ 723 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
728 dev->rx_urb_size = data->rx_urb_size; 724 dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE;
729 if (dev->udev->speed != USB_SPEED_HIGH) 725 if (dev->udev->speed != USB_SPEED_HIGH)
730 dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); 726 dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE);
731 727
732 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; 728 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
733 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 729 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
@@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
842 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); 838 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
843 839
844 dev->net->stats.rx_frame_errors++; 840 dev->net->stats.rx_frame_errors++;
845 /* dev->net->stats.rx_errors incremented by caller */; 841 /* dev->net->stats.rx_errors incremented by caller */
846 return 0; 842 return 0;
847 } 843 }
848 844
@@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
866} 862}
867 863
868/* ---------------------------- Transmit data path ----------------------*/ 864/* ---------------------------- Transmit data path ----------------------*/
869struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 865static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
870 gfp_t flags) 866 struct sk_buff *skb, gfp_t flags)
871{ 867{
872 struct sierra_net_data *priv = sierra_net_get_private(dev); 868 struct sierra_net_data *priv = sierra_net_get_private(dev);
873 u16 len; 869 u16 len;
@@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
918 return NULL; 914 return NULL;
919} 915}
920 916
921static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
922 .rx_urb_size = 8 * 1024,
923};
924
925static const struct driver_info sierra_net_info_direct_ip = { 917static const struct driver_info sierra_net_info_direct_ip = {
926 .description = "Sierra Wireless USB-to-WWAN Modem", 918 .description = "Sierra Wireless USB-to-WWAN Modem",
927 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 919 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
@@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = {
930 .status = sierra_net_status, 922 .status = sierra_net_status,
931 .rx_fixup = sierra_net_rx_fixup, 923 .rx_fixup = sierra_net_rx_fixup,
932 .tx_fixup = sierra_net_tx_fixup, 924 .tx_fixup = sierra_net_tx_fixup,
933 .data = (unsigned long)&sierra_net_info_data_direct_ip,
934}; 925};
935 926
936#define DIRECT_IP_DEVICE(vend, prod) \ 927#define DIRECT_IP_DEVICE(vend, prod) \
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 376143e8a1aa..b77ae76f4aa8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -52,6 +52,7 @@
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 52#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 53#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2 54#define RXW_PADDING 2
55#define SUPPORTED_WAKE (WAKE_MAGIC)
55 56
56#define check_warn(ret, fmt, args...) \ 57#define check_warn(ret, fmt, args...) \
57 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 58 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -65,6 +66,7 @@
65struct smsc75xx_priv { 66struct smsc75xx_priv {
66 struct usbnet *dev; 67 struct usbnet *dev;
67 u32 rfe_ctl; 68 u32 rfe_ctl;
69 u32 wolopts;
68 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; 70 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
69 struct mutex dataport_mutex; 71 struct mutex dataport_mutex;
70 spinlock_t rfe_ctl_lock; 72 spinlock_t rfe_ctl_lock;
@@ -135,6 +137,30 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
135 return ret; 137 return ret;
136} 138}
137 139
140static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
141{
142 if (WARN_ON_ONCE(!dev))
143 return -EINVAL;
144
145 cpu_to_le32s(&feature);
146
147 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
148 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
149 USB_CTRL_SET_TIMEOUT);
150}
151
152static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
153{
154 if (WARN_ON_ONCE(!dev))
155 return -EINVAL;
156
157 cpu_to_le32s(&feature);
158
159 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
160 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
161 USB_CTRL_SET_TIMEOUT);
162}
163
138/* Loop until the read is completed with timeout 164/* Loop until the read is completed with timeout
139 * called with phy_mutex held */ 165 * called with phy_mutex held */
140static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) 166static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
@@ -578,6 +604,26 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
578 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); 604 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
579} 605}
580 606
607static void smsc75xx_ethtool_get_wol(struct net_device *net,
608 struct ethtool_wolinfo *wolinfo)
609{
610 struct usbnet *dev = netdev_priv(net);
611 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
612
613 wolinfo->supported = SUPPORTED_WAKE;
614 wolinfo->wolopts = pdata->wolopts;
615}
616
617static int smsc75xx_ethtool_set_wol(struct net_device *net,
618 struct ethtool_wolinfo *wolinfo)
619{
620 struct usbnet *dev = netdev_priv(net);
621 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
622
623 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
624 return 0;
625}
626
581static const struct ethtool_ops smsc75xx_ethtool_ops = { 627static const struct ethtool_ops smsc75xx_ethtool_ops = {
582 .get_link = usbnet_get_link, 628 .get_link = usbnet_get_link,
583 .nway_reset = usbnet_nway_reset, 629 .nway_reset = usbnet_nway_reset,
@@ -589,6 +635,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
589 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 635 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
590 .get_eeprom = smsc75xx_ethtool_get_eeprom, 636 .get_eeprom = smsc75xx_ethtool_get_eeprom,
591 .set_eeprom = smsc75xx_ethtool_set_eeprom, 637 .set_eeprom = smsc75xx_ethtool_set_eeprom,
638 .get_wol = smsc75xx_ethtool_get_wol,
639 .set_wol = smsc75xx_ethtool_set_wol,
592}; 640};
593 641
594static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 642static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -756,6 +804,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
756 return 0; 804 return 0;
757} 805}
758 806
807static int smsc75xx_wait_ready(struct usbnet *dev)
808{
809 int timeout = 0;
810
811 do {
812 u32 buf;
813 int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
814 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
815
816 if (buf & PMT_CTL_DEV_RDY)
817 return 0;
818
819 msleep(10);
820 timeout++;
821 } while (timeout < 100);
822
823 netdev_warn(dev->net, "timeout waiting for device ready");
824 return -EIO;
825}
826
759static int smsc75xx_reset(struct usbnet *dev) 827static int smsc75xx_reset(struct usbnet *dev)
760{ 828{
761 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 829 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -764,6 +832,9 @@ static int smsc75xx_reset(struct usbnet *dev)
764 832
765 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); 833 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
766 834
835 ret = smsc75xx_wait_ready(dev);
836 check_warn_return(ret, "device not ready in smsc75xx_reset");
837
767 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 838 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
768 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 839 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
769 840
@@ -1083,6 +1154,169 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1083 } 1154 }
1084} 1155}
1085 1156
1157static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
1158{
1159 struct usbnet *dev = usb_get_intfdata(intf);
1160 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1161 int ret;
1162 u32 val;
1163
1164 ret = usbnet_suspend(intf, message);
1165 check_warn_return(ret, "usbnet_suspend error");
1166
1167 /* if no wol options set, enter lowest power SUSPEND2 mode */
1168 if (!(pdata->wolopts & SUPPORTED_WAKE)) {
1169 netdev_info(dev->net, "entering SUSPEND2 mode");
1170
1171 /* disable energy detect (link up) & wake up events */
1172 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1173 check_warn_return(ret, "Error reading WUCSR");
1174
1175 val &= ~(WUCSR_MPEN | WUCSR_WUEN);
1176
1177 ret = smsc75xx_write_reg(dev, WUCSR, val);
1178 check_warn_return(ret, "Error writing WUCSR");
1179
1180 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1181 check_warn_return(ret, "Error reading PMT_CTL");
1182
1183 val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
1184
1185 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1186 check_warn_return(ret, "Error writing PMT_CTL");
1187
1188 /* enter suspend2 mode */
1189 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1190 check_warn_return(ret, "Error reading PMT_CTL");
1191
1192 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
1193 val |= PMT_CTL_SUS_MODE_2;
1194
1195 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1196 check_warn_return(ret, "Error writing PMT_CTL");
1197
1198 return 0;
1199 }
1200
1201 if (pdata->wolopts & WAKE_MAGIC) {
1202 /* clear any pending magic packet status */
1203 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1204 check_warn_return(ret, "Error reading WUCSR");
1205
1206 val |= WUCSR_MPR;
1207
1208 ret = smsc75xx_write_reg(dev, WUCSR, val);
1209 check_warn_return(ret, "Error writing WUCSR");
1210 }
1211
1212 /* enable/disable magic packup wake */
1213 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1214 check_warn_return(ret, "Error reading WUCSR");
1215
1216 if (pdata->wolopts & WAKE_MAGIC) {
1217 netdev_info(dev->net, "enabling magic packet wakeup");
1218 val |= WUCSR_MPEN;
1219 } else {
1220 netdev_info(dev->net, "disabling magic packet wakeup");
1221 val &= ~WUCSR_MPEN;
1222 }
1223
1224 ret = smsc75xx_write_reg(dev, WUCSR, val);
1225 check_warn_return(ret, "Error writing WUCSR");
1226
1227 /* enable wol wakeup source */
1228 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1229 check_warn_return(ret, "Error reading PMT_CTL");
1230
1231 val |= PMT_CTL_WOL_EN;
1232
1233 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1234 check_warn_return(ret, "Error writing PMT_CTL");
1235
1236 /* enable receiver */
1237 ret = smsc75xx_read_reg(dev, MAC_RX, &val);
1238 check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
1239
1240 val |= MAC_RX_RXEN;
1241
1242 ret = smsc75xx_write_reg(dev, MAC_RX, val);
1243 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
1244
1245 /* some wol options are enabled, so enter SUSPEND0 */
1246 netdev_info(dev->net, "entering SUSPEND0 mode");
1247
1248 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1249 check_warn_return(ret, "Error reading PMT_CTL");
1250
1251 val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
1252 val |= PMT_CTL_SUS_MODE_0;
1253
1254 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1255 check_warn_return(ret, "Error writing PMT_CTL");
1256
1257 /* clear wol status */
1258 val &= ~PMT_CTL_WUPS;
1259 val |= PMT_CTL_WUPS_WOL;
1260 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1261 check_warn_return(ret, "Error writing PMT_CTL");
1262
1263 /* read back PMT_CTL */
1264 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1265 check_warn_return(ret, "Error reading PMT_CTL");
1266
1267 smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1268
1269 return 0;
1270}
1271
1272static int smsc75xx_resume(struct usb_interface *intf)
1273{
1274 struct usbnet *dev = usb_get_intfdata(intf);
1275 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1276 int ret;
1277 u32 val;
1278
1279 if (pdata->wolopts & WAKE_MAGIC) {
1280 netdev_info(dev->net, "resuming from SUSPEND0");
1281
1282 smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1283
1284 /* Disable magic packup wake */
1285 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1286 check_warn_return(ret, "Error reading WUCSR");
1287
1288 val &= ~WUCSR_MPEN;
1289
1290 ret = smsc75xx_write_reg(dev, WUCSR, val);
1291 check_warn_return(ret, "Error writing WUCSR");
1292
1293 /* clear wake-up status */
1294 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1295 check_warn_return(ret, "Error reading PMT_CTL");
1296
1297 val &= ~PMT_CTL_WOL_EN;
1298 val |= PMT_CTL_WUPS;
1299
1300 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1301 check_warn_return(ret, "Error writing PMT_CTL");
1302 } else {
1303 netdev_info(dev->net, "resuming from SUSPEND2");
1304
1305 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1306 check_warn_return(ret, "Error reading PMT_CTL");
1307
1308 val |= PMT_CTL_PHY_PWRUP;
1309
1310 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1311 check_warn_return(ret, "Error writing PMT_CTL");
1312 }
1313
1314 ret = smsc75xx_wait_ready(dev);
1315 check_warn_return(ret, "device not ready in smsc75xx_resume");
1316
1317 return usbnet_resume(intf);
1318}
1319
1086static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, 1320static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
1087 u32 rx_cmd_a, u32 rx_cmd_b) 1321 u32 rx_cmd_a, u32 rx_cmd_b)
1088{ 1322{
@@ -1251,9 +1485,9 @@ static struct usb_driver smsc75xx_driver = {
1251 .name = SMSC_CHIPNAME, 1485 .name = SMSC_CHIPNAME,
1252 .id_table = products, 1486 .id_table = products,
1253 .probe = usbnet_probe, 1487 .probe = usbnet_probe,
1254 .suspend = usbnet_suspend, 1488 .suspend = smsc75xx_suspend,
1255 .resume = usbnet_resume, 1489 .resume = smsc75xx_resume,
1256 .reset_resume = usbnet_resume, 1490 .reset_resume = smsc75xx_resume,
1257 .disconnect = usbnet_disconnect, 1491 .disconnect = usbnet_disconnect,
1258 .disable_hub_initiated_lpm = 1, 1492 .disable_hub_initiated_lpm = 1,
1259}; 1493};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d45e539a84b7..7479a5761d0d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -46,11 +46,22 @@
46#define SMSC95XX_INTERNAL_PHY_ID (1) 46#define SMSC95XX_INTERNAL_PHY_ID (1)
47#define SMSC95XX_TX_OVERHEAD (8) 47#define SMSC95XX_TX_OVERHEAD (8)
48#define SMSC95XX_TX_OVERHEAD_CSUM (12) 48#define SMSC95XX_TX_OVERHEAD_CSUM (12)
49#define SUPPORTED_WAKE (WAKE_MAGIC)
50
51#define check_warn(ret, fmt, args...) \
52 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
53
54#define check_warn_return(ret, fmt, args...) \
55 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
56
57#define check_warn_goto_done(ret, fmt, args...) \
58 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
49 59
50struct smsc95xx_priv { 60struct smsc95xx_priv {
51 u32 mac_cr; 61 u32 mac_cr;
52 u32 hash_hi; 62 u32 hash_hi;
53 u32 hash_lo; 63 u32 hash_lo;
64 u32 wolopts;
54 spinlock_t mac_cr_lock; 65 spinlock_t mac_cr_lock;
55}; 66};
56 67
@@ -63,7 +74,8 @@ static bool turbo_mode = true;
63module_param(turbo_mode, bool, 0644); 74module_param(turbo_mode, bool, 0644);
64MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 75MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
65 76
66static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data) 77static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
78 u32 *data)
67{ 79{
68 u32 *buf = kmalloc(4, GFP_KERNEL); 80 u32 *buf = kmalloc(4, GFP_KERNEL);
69 int ret; 81 int ret;
@@ -88,7 +100,8 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
88 return ret; 100 return ret;
89} 101}
90 102
91static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data) 103static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
104 u32 data)
92{ 105{
93 u32 *buf = kmalloc(4, GFP_KERNEL); 106 u32 *buf = kmalloc(4, GFP_KERNEL);
94 int ret; 107 int ret;
@@ -114,15 +127,41 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
114 return ret; 127 return ret;
115} 128}
116 129
130static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
131{
132 if (WARN_ON_ONCE(!dev))
133 return -EINVAL;
134
135 cpu_to_le32s(&feature);
136
137 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
138 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
139 USB_CTRL_SET_TIMEOUT);
140}
141
142static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
143{
144 if (WARN_ON_ONCE(!dev))
145 return -EINVAL;
146
147 cpu_to_le32s(&feature);
148
149 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
150 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
151 USB_CTRL_SET_TIMEOUT);
152}
153
117/* Loop until the read is completed with timeout 154/* Loop until the read is completed with timeout
118 * called with phy_mutex held */ 155 * called with phy_mutex held */
119static int smsc95xx_phy_wait_not_busy(struct usbnet *dev) 156static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
120{ 157{
121 unsigned long start_time = jiffies; 158 unsigned long start_time = jiffies;
122 u32 val; 159 u32 val;
160 int ret;
123 161
124 do { 162 do {
125 smsc95xx_read_reg(dev, MII_ADDR, &val); 163 ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
164 check_warn_return(ret, "Error reading MII_ACCESS");
126 if (!(val & MII_BUSY_)) 165 if (!(val & MII_BUSY_))
127 return 0; 166 return 0;
128 } while (!time_after(jiffies, start_time + HZ)); 167 } while (!time_after(jiffies, start_time + HZ));
@@ -134,33 +173,32 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
134{ 173{
135 struct usbnet *dev = netdev_priv(netdev); 174 struct usbnet *dev = netdev_priv(netdev);
136 u32 val, addr; 175 u32 val, addr;
176 int ret;
137 177
138 mutex_lock(&dev->phy_mutex); 178 mutex_lock(&dev->phy_mutex);
139 179
140 /* confirm MII not busy */ 180 /* confirm MII not busy */
141 if (smsc95xx_phy_wait_not_busy(dev)) { 181 ret = smsc95xx_phy_wait_not_busy(dev);
142 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n"); 182 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
143 mutex_unlock(&dev->phy_mutex);
144 return -EIO;
145 }
146 183
147 /* set the address, index & direction (read from PHY) */ 184 /* set the address, index & direction (read from PHY) */
148 phy_id &= dev->mii.phy_id_mask; 185 phy_id &= dev->mii.phy_id_mask;
149 idx &= dev->mii.reg_num_mask; 186 idx &= dev->mii.reg_num_mask;
150 addr = (phy_id << 11) | (idx << 6) | MII_READ_; 187 addr = (phy_id << 11) | (idx << 6) | MII_READ_;
151 smsc95xx_write_reg(dev, MII_ADDR, addr); 188 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
189 check_warn_goto_done(ret, "Error writing MII_ADDR");
152 190
153 if (smsc95xx_phy_wait_not_busy(dev)) { 191 ret = smsc95xx_phy_wait_not_busy(dev);
154 netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); 192 check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
155 mutex_unlock(&dev->phy_mutex);
156 return -EIO;
157 }
158 193
159 smsc95xx_read_reg(dev, MII_DATA, &val); 194 ret = smsc95xx_read_reg(dev, MII_DATA, &val);
195 check_warn_goto_done(ret, "Error reading MII_DATA");
160 196
161 mutex_unlock(&dev->phy_mutex); 197 ret = (u16)(val & 0xFFFF);
162 198
163 return (u16)(val & 0xFFFF); 199done:
200 mutex_unlock(&dev->phy_mutex);
201 return ret;
164} 202}
165 203
166static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, 204static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
@@ -168,38 +206,41 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
168{ 206{
169 struct usbnet *dev = netdev_priv(netdev); 207 struct usbnet *dev = netdev_priv(netdev);
170 u32 val, addr; 208 u32 val, addr;
209 int ret;
171 210
172 mutex_lock(&dev->phy_mutex); 211 mutex_lock(&dev->phy_mutex);
173 212
174 /* confirm MII not busy */ 213 /* confirm MII not busy */
175 if (smsc95xx_phy_wait_not_busy(dev)) { 214 ret = smsc95xx_phy_wait_not_busy(dev);
176 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n"); 215 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
177 mutex_unlock(&dev->phy_mutex);
178 return;
179 }
180 216
181 val = regval; 217 val = regval;
182 smsc95xx_write_reg(dev, MII_DATA, val); 218 ret = smsc95xx_write_reg(dev, MII_DATA, val);
219 check_warn_goto_done(ret, "Error writing MII_DATA");
183 220
184 /* set the address, index & direction (write to PHY) */ 221 /* set the address, index & direction (write to PHY) */
185 phy_id &= dev->mii.phy_id_mask; 222 phy_id &= dev->mii.phy_id_mask;
186 idx &= dev->mii.reg_num_mask; 223 idx &= dev->mii.reg_num_mask;
187 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; 224 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
188 smsc95xx_write_reg(dev, MII_ADDR, addr); 225 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
226 check_warn_goto_done(ret, "Error writing MII_ADDR");
189 227
190 if (smsc95xx_phy_wait_not_busy(dev)) 228 ret = smsc95xx_phy_wait_not_busy(dev);
191 netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); 229 check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
192 230
231done:
193 mutex_unlock(&dev->phy_mutex); 232 mutex_unlock(&dev->phy_mutex);
194} 233}
195 234
196static int smsc95xx_wait_eeprom(struct usbnet *dev) 235static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
197{ 236{
198 unsigned long start_time = jiffies; 237 unsigned long start_time = jiffies;
199 u32 val; 238 u32 val;
239 int ret;
200 240
201 do { 241 do {
202 smsc95xx_read_reg(dev, E2P_CMD, &val); 242 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
243 check_warn_return(ret, "Error reading E2P_CMD");
203 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_)) 244 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
204 break; 245 break;
205 udelay(40); 246 udelay(40);
@@ -213,13 +254,15 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
213 return 0; 254 return 0;
214} 255}
215 256
216static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev) 257static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
217{ 258{
218 unsigned long start_time = jiffies; 259 unsigned long start_time = jiffies;
219 u32 val; 260 u32 val;
261 int ret;
220 262
221 do { 263 do {
222 smsc95xx_read_reg(dev, E2P_CMD, &val); 264 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
265 check_warn_return(ret, "Error reading E2P_CMD");
223 266
224 if (!(val & E2P_CMD_BUSY_)) 267 if (!(val & E2P_CMD_BUSY_))
225 return 0; 268 return 0;
@@ -246,13 +289,15 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
246 289
247 for (i = 0; i < length; i++) { 290 for (i = 0; i < length; i++) {
248 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_); 291 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
249 smsc95xx_write_reg(dev, E2P_CMD, val); 292 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
293 check_warn_return(ret, "Error writing E2P_CMD");
250 294
251 ret = smsc95xx_wait_eeprom(dev); 295 ret = smsc95xx_wait_eeprom(dev);
252 if (ret < 0) 296 if (ret < 0)
253 return ret; 297 return ret;
254 298
255 smsc95xx_read_reg(dev, E2P_DATA, &val); 299 ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
300 check_warn_return(ret, "Error reading E2P_DATA");
256 301
257 data[i] = val & 0xFF; 302 data[i] = val & 0xFF;
258 offset++; 303 offset++;
@@ -276,7 +321,8 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
276 321
277 /* Issue write/erase enable command */ 322 /* Issue write/erase enable command */
278 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_; 323 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
279 smsc95xx_write_reg(dev, E2P_CMD, val); 324 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
325 check_warn_return(ret, "Error writing E2P_DATA");
280 326
281 ret = smsc95xx_wait_eeprom(dev); 327 ret = smsc95xx_wait_eeprom(dev);
282 if (ret < 0) 328 if (ret < 0)
@@ -286,11 +332,13 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
286 332
287 /* Fill data register */ 333 /* Fill data register */
288 val = data[i]; 334 val = data[i];
289 smsc95xx_write_reg(dev, E2P_DATA, val); 335 ret = smsc95xx_write_reg(dev, E2P_DATA, val);
336 check_warn_return(ret, "Error writing E2P_DATA");
290 337
291 /* Send "write" command */ 338 /* Send "write" command */
292 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_); 339 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
293 smsc95xx_write_reg(dev, E2P_CMD, val); 340 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
341 check_warn_return(ret, "Error writing E2P_CMD");
294 342
295 ret = smsc95xx_wait_eeprom(dev); 343 ret = smsc95xx_wait_eeprom(dev);
296 if (ret < 0) 344 if (ret < 0)
@@ -308,14 +356,14 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
308 struct usbnet *dev = usb_context->dev; 356 struct usbnet *dev = usb_context->dev;
309 int status = urb->status; 357 int status = urb->status;
310 358
311 if (status < 0) 359 check_warn(status, "async callback failed with %d\n", status);
312 netdev_warn(dev->net, "async callback failed with %d\n", status);
313 360
314 kfree(usb_context); 361 kfree(usb_context);
315 usb_free_urb(urb); 362 usb_free_urb(urb);
316} 363}
317 364
318static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data) 365static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
366 u32 *data)
319{ 367{
320 struct usb_context *usb_context; 368 struct usb_context *usb_context;
321 int status; 369 int status;
@@ -371,6 +419,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
371 struct usbnet *dev = netdev_priv(netdev); 419 struct usbnet *dev = netdev_priv(netdev);
372 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 420 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
373 unsigned long flags; 421 unsigned long flags;
422 int ret;
374 423
375 pdata->hash_hi = 0; 424 pdata->hash_hi = 0;
376 pdata->hash_lo = 0; 425 pdata->hash_lo = 0;
@@ -411,21 +460,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
411 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 460 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
412 461
413 /* Initiate async writes, as we can't wait for completion here */ 462 /* Initiate async writes, as we can't wait for completion here */
414 smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); 463 ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
415 smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); 464 check_warn(ret, "failed to initiate async write to HASHH");
416 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 465
466 ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
467 check_warn(ret, "failed to initiate async write to HASHL");
468
469 ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
470 check_warn(ret, "failed to initiate async write to MAC_CR");
417} 471}
418 472
419static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, 473static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
420 u16 lcladv, u16 rmtadv) 474 u16 lcladv, u16 rmtadv)
421{ 475{
422 u32 flow, afc_cfg = 0; 476 u32 flow, afc_cfg = 0;
423 477
424 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); 478 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
425 if (ret < 0) { 479 check_warn_return(ret, "Error reading AFC_CFG");
426 netdev_warn(dev->net, "error reading AFC_CFG\n");
427 return;
428 }
429 480
430 if (duplex == DUPLEX_FULL) { 481 if (duplex == DUPLEX_FULL) {
431 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 482 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -449,8 +500,13 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
449 afc_cfg |= 0xF; 500 afc_cfg |= 0xF;
450 } 501 }
451 502
452 smsc95xx_write_reg(dev, FLOW, flow); 503 ret = smsc95xx_write_reg(dev, FLOW, flow);
453 smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); 504 check_warn_return(ret, "Error writing FLOW");
505
506 ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
507 check_warn_return(ret, "Error writing AFC_CFG");
508
509 return 0;
454} 510}
455 511
456static int smsc95xx_link_reset(struct usbnet *dev) 512static int smsc95xx_link_reset(struct usbnet *dev)
@@ -460,12 +516,14 @@ static int smsc95xx_link_reset(struct usbnet *dev)
460 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; 516 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
461 unsigned long flags; 517 unsigned long flags;
462 u16 lcladv, rmtadv; 518 u16 lcladv, rmtadv;
463 u32 intdata; 519 int ret;
464 520
465 /* clear interrupt status */ 521 /* clear interrupt status */
466 smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 522 ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
467 intdata = 0xFFFFFFFF; 523 check_warn_return(ret, "Error reading PHY_INT_SRC");
468 smsc95xx_write_reg(dev, INT_STS, intdata); 524
525 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
526 check_warn_return(ret, "Error writing INT_STS");
469 527
470 mii_check_media(mii, 1, 1); 528 mii_check_media(mii, 1, 1);
471 mii_ethtool_gset(&dev->mii, &ecmd); 529 mii_ethtool_gset(&dev->mii, &ecmd);
@@ -486,9 +544,11 @@ static int smsc95xx_link_reset(struct usbnet *dev)
486 } 544 }
487 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 545 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
488 546
489 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 547 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
548 check_warn_return(ret, "Error writing MAC_CR");
490 549
491 smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 550 ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
551 check_warn_return(ret, "Error updating PHY flow control");
492 552
493 return 0; 553 return 0;
494} 554}
@@ -524,10 +584,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
524 int ret; 584 int ret;
525 585
526 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 586 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
527 if (ret < 0) { 587 check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
528 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
529 return ret;
530 }
531 588
532 if (features & NETIF_F_HW_CSUM) 589 if (features & NETIF_F_HW_CSUM)
533 read_buf |= Tx_COE_EN_; 590 read_buf |= Tx_COE_EN_;
@@ -540,10 +597,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
540 read_buf &= ~Rx_COE_EN_; 597 read_buf &= ~Rx_COE_EN_;
541 598
542 ret = smsc95xx_write_reg(dev, COE_CR, read_buf); 599 ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
543 if (ret < 0) { 600 check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
544 netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
545 return ret;
546 }
547 601
548 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf); 602 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
549 return 0; 603 return 0;
@@ -608,6 +662,26 @@ smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
608 } 662 }
609} 663}
610 664
665static void smsc95xx_ethtool_get_wol(struct net_device *net,
666 struct ethtool_wolinfo *wolinfo)
667{
668 struct usbnet *dev = netdev_priv(net);
669 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
670
671 wolinfo->supported = SUPPORTED_WAKE;
672 wolinfo->wolopts = pdata->wolopts;
673}
674
675static int smsc95xx_ethtool_set_wol(struct net_device *net,
676 struct ethtool_wolinfo *wolinfo)
677{
678 struct usbnet *dev = netdev_priv(net);
679 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
680
681 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
682 return 0;
683}
684
611static const struct ethtool_ops smsc95xx_ethtool_ops = { 685static const struct ethtool_ops smsc95xx_ethtool_ops = {
612 .get_link = usbnet_get_link, 686 .get_link = usbnet_get_link,
613 .nway_reset = usbnet_nway_reset, 687 .nway_reset = usbnet_nway_reset,
@@ -621,6 +695,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
621 .set_eeprom = smsc95xx_ethtool_set_eeprom, 695 .set_eeprom = smsc95xx_ethtool_set_eeprom,
622 .get_regs_len = smsc95xx_ethtool_getregslen, 696 .get_regs_len = smsc95xx_ethtool_getregslen,
623 .get_regs = smsc95xx_ethtool_getregs, 697 .get_regs = smsc95xx_ethtool_getregs,
698 .get_wol = smsc95xx_ethtool_get_wol,
699 .set_wol = smsc95xx_ethtool_set_wol,
624}; 700};
625 701
626static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 702static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -658,55 +734,56 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
658 int ret; 734 int ret;
659 735
660 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); 736 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
661 if (ret < 0) { 737 check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
662 netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
663 return ret;
664 }
665 738
666 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); 739 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
667 if (ret < 0) { 740 check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
668 netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
669 return ret;
670 }
671 741
672 return 0; 742 return 0;
673} 743}
674 744
675/* starts the TX path */ 745/* starts the TX path */
676static void smsc95xx_start_tx_path(struct usbnet *dev) 746static int smsc95xx_start_tx_path(struct usbnet *dev)
677{ 747{
678 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 748 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
679 unsigned long flags; 749 unsigned long flags;
680 u32 reg_val; 750 int ret;
681 751
682 /* Enable Tx at MAC */ 752 /* Enable Tx at MAC */
683 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 753 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
684 pdata->mac_cr |= MAC_CR_TXEN_; 754 pdata->mac_cr |= MAC_CR_TXEN_;
685 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 755 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
686 756
687 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 757 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
758 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
688 759
689 /* Enable Tx at SCSRs */ 760 /* Enable Tx at SCSRs */
690 reg_val = TX_CFG_ON_; 761 ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
691 smsc95xx_write_reg(dev, TX_CFG, reg_val); 762 check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
763
764 return 0;
692} 765}
693 766
694/* Starts the Receive path */ 767/* Starts the Receive path */
695static void smsc95xx_start_rx_path(struct usbnet *dev) 768static int smsc95xx_start_rx_path(struct usbnet *dev)
696{ 769{
697 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 770 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
698 unsigned long flags; 771 unsigned long flags;
772 int ret;
699 773
700 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 774 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
701 pdata->mac_cr |= MAC_CR_RXEN_; 775 pdata->mac_cr |= MAC_CR_RXEN_;
702 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 776 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
703 777
704 smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 778 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
779 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
780
781 return 0;
705} 782}
706 783
707static int smsc95xx_phy_initialize(struct usbnet *dev) 784static int smsc95xx_phy_initialize(struct usbnet *dev)
708{ 785{
709 int bmcr, timeout = 0; 786 int bmcr, ret, timeout = 0;
710 787
711 /* Initialize MII structure */ 788 /* Initialize MII structure */
712 dev->mii.dev = dev->net; 789 dev->mii.dev = dev->net;
@@ -735,7 +812,8 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
735 ADVERTISE_PAUSE_ASYM); 812 ADVERTISE_PAUSE_ASYM);
736 813
737 /* read to clear */ 814 /* read to clear */
738 smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 815 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
816 check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
739 817
740 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 818 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
741 PHY_INT_MASK_DEFAULT_); 819 PHY_INT_MASK_DEFAULT_);
@@ -753,22 +831,14 @@ static int smsc95xx_reset(struct usbnet *dev)
753 831
754 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); 832 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
755 833
756 write_buf = HW_CFG_LRST_; 834 ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
757 ret = smsc95xx_write_reg(dev, HW_CFG, write_buf); 835 check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
758 if (ret < 0) {
759 netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
760 ret);
761 return ret;
762 }
763 836
764 timeout = 0; 837 timeout = 0;
765 do { 838 do {
766 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
767 if (ret < 0) {
768 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
769 return ret;
770 }
771 msleep(10); 839 msleep(10);
840 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
841 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
772 timeout++; 842 timeout++;
773 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); 843 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
774 844
@@ -777,21 +847,14 @@ static int smsc95xx_reset(struct usbnet *dev)
777 return ret; 847 return ret;
778 } 848 }
779 849
780 write_buf = PM_CTL_PHY_RST_; 850 ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
781 ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf); 851 check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
782 if (ret < 0) {
783 netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
784 return ret;
785 }
786 852
787 timeout = 0; 853 timeout = 0;
788 do { 854 do {
789 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
790 if (ret < 0) {
791 netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
792 return ret;
793 }
794 msleep(10); 855 msleep(10);
856 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
857 check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
795 timeout++; 858 timeout++;
796 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); 859 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
797 860
@@ -808,10 +871,7 @@ static int smsc95xx_reset(struct usbnet *dev)
808 "MAC Address: %pM\n", dev->net->dev_addr); 871 "MAC Address: %pM\n", dev->net->dev_addr);
809 872
810 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 873 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
811 if (ret < 0) { 874 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
812 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
813 return ret;
814 }
815 875
816 netif_dbg(dev, ifup, dev->net, 876 netif_dbg(dev, ifup, dev->net,
817 "Read Value from HW_CFG : 0x%08x\n", read_buf); 877 "Read Value from HW_CFG : 0x%08x\n", read_buf);
@@ -819,17 +879,10 @@ static int smsc95xx_reset(struct usbnet *dev)
819 read_buf |= HW_CFG_BIR_; 879 read_buf |= HW_CFG_BIR_;
820 880
821 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 881 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
822 if (ret < 0) { 882 check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
823 netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
824 ret);
825 return ret;
826 }
827 883
828 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 884 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
829 if (ret < 0) { 885 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
830 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
831 return ret;
832 }
833 netif_dbg(dev, ifup, dev->net, 886 netif_dbg(dev, ifup, dev->net,
834 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n", 887 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
835 read_buf); 888 read_buf);
@@ -849,41 +902,28 @@ static int smsc95xx_reset(struct usbnet *dev)
849 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); 902 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
850 903
851 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); 904 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
852 if (ret < 0) { 905 check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
853 netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
854 return ret;
855 }
856 906
857 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); 907 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
858 if (ret < 0) { 908 check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
859 netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); 909
860 return ret;
861 }
862 netif_dbg(dev, ifup, dev->net, 910 netif_dbg(dev, ifup, dev->net,
863 "Read Value from BURST_CAP after writing: 0x%08x\n", 911 "Read Value from BURST_CAP after writing: 0x%08x\n",
864 read_buf); 912 read_buf);
865 913
866 read_buf = DEFAULT_BULK_IN_DELAY; 914 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
867 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf); 915 check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
868 if (ret < 0) {
869 netdev_warn(dev->net, "ret = %d\n", ret);
870 return ret;
871 }
872 916
873 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); 917 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
874 if (ret < 0) { 918 check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
875 netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); 919
876 return ret;
877 }
878 netif_dbg(dev, ifup, dev->net, 920 netif_dbg(dev, ifup, dev->net,
879 "Read Value from BULK_IN_DLY after writing: 0x%08x\n", 921 "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
880 read_buf); 922 read_buf);
881 923
882 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 924 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
883 if (ret < 0) { 925 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
884 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); 926
885 return ret;
886 }
887 netif_dbg(dev, ifup, dev->net, 927 netif_dbg(dev, ifup, dev->net,
888 "Read Value from HW_CFG: 0x%08x\n", read_buf); 928 "Read Value from HW_CFG: 0x%08x\n", read_buf);
889 929
@@ -896,101 +936,66 @@ static int smsc95xx_reset(struct usbnet *dev)
896 read_buf |= NET_IP_ALIGN << 9; 936 read_buf |= NET_IP_ALIGN << 9;
897 937
898 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 938 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
899 if (ret < 0) { 939 check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
900 netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
901 ret);
902 return ret;
903 }
904 940
905 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 941 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
906 if (ret < 0) { 942 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
907 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); 943
908 return ret;
909 }
910 netif_dbg(dev, ifup, dev->net, 944 netif_dbg(dev, ifup, dev->net,
911 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf); 945 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
912 946
913 write_buf = 0xFFFFFFFF; 947 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
914 ret = smsc95xx_write_reg(dev, INT_STS, write_buf); 948 check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
915 if (ret < 0) {
916 netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
917 ret);
918 return ret;
919 }
920 949
921 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); 950 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
922 if (ret < 0) { 951 check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
923 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
924 return ret;
925 }
926 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); 952 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
927 953
928 /* Configure GPIO pins as LED outputs */ 954 /* Configure GPIO pins as LED outputs */
929 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | 955 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
930 LED_GPIO_CFG_FDX_LED; 956 LED_GPIO_CFG_FDX_LED;
931 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); 957 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
932 if (ret < 0) { 958 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
933 netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
934 ret);
935 return ret;
936 }
937 959
938 /* Init Tx */ 960 /* Init Tx */
939 write_buf = 0; 961 ret = smsc95xx_write_reg(dev, FLOW, 0);
940 ret = smsc95xx_write_reg(dev, FLOW, write_buf); 962 check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
941 if (ret < 0) {
942 netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
943 return ret;
944 }
945 963
946 read_buf = AFC_CFG_DEFAULT; 964 ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
947 ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf); 965 check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
948 if (ret < 0) {
949 netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
950 return ret;
951 }
952 966
953 /* Don't need mac_cr_lock during initialisation */ 967 /* Don't need mac_cr_lock during initialisation */
954 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); 968 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
955 if (ret < 0) { 969 check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
956 netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
957 return ret;
958 }
959 970
960 /* Init Rx */ 971 /* Init Rx */
961 /* Set Vlan */ 972 /* Set Vlan */
962 write_buf = (u32)ETH_P_8021Q; 973 ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
963 ret = smsc95xx_write_reg(dev, VLAN1, write_buf); 974 check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
964 if (ret < 0) {
965 netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
966 return ret;
967 }
968 975
969 /* Enable or disable checksum offload engines */ 976 /* Enable or disable checksum offload engines */
970 smsc95xx_set_features(dev->net, dev->net->features); 977 ret = smsc95xx_set_features(dev->net, dev->net->features);
978 check_warn_return(ret, "Failed to set checksum offload features");
971 979
972 smsc95xx_set_multicast(dev->net); 980 smsc95xx_set_multicast(dev->net);
973 981
974 if (smsc95xx_phy_initialize(dev) < 0) 982 ret = smsc95xx_phy_initialize(dev);
975 return -EIO; 983 check_warn_return(ret, "Failed to init PHY");
976 984
977 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); 985 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
978 if (ret < 0) { 986 check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
979 netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
980 return ret;
981 }
982 987
983 /* enable PHY interrupts */ 988 /* enable PHY interrupts */
984 read_buf |= INT_EP_CTL_PHY_INT_; 989 read_buf |= INT_EP_CTL_PHY_INT_;
985 990
986 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); 991 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
987 if (ret < 0) { 992 check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
988 netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
989 return ret;
990 }
991 993
992 smsc95xx_start_tx_path(dev); 994 ret = smsc95xx_start_tx_path(dev);
993 smsc95xx_start_rx_path(dev); 995 check_warn_return(ret, "Failed to start TX path");
996
997 ret = smsc95xx_start_rx_path(dev);
998 check_warn_return(ret, "Failed to start RX path");
994 999
995 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n"); 1000 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
996 return 0; 1001 return 0;
@@ -1017,10 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1017 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); 1022 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
1018 1023
1019 ret = usbnet_get_endpoints(dev, intf); 1024 ret = usbnet_get_endpoints(dev, intf);
1020 if (ret < 0) { 1025 check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
1021 netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
1022 return ret;
1023 }
1024 1026
1025 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv), 1027 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
1026 GFP_KERNEL); 1028 GFP_KERNEL);
@@ -1064,6 +1066,153 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1064 } 1066 }
1065} 1067}
1066 1068
1069static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1070{
1071 struct usbnet *dev = usb_get_intfdata(intf);
1072 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1073 int ret;
1074 u32 val;
1075
1076 ret = usbnet_suspend(intf, message);
1077 check_warn_return(ret, "usbnet_suspend error");
1078
1079 /* if no wol options set, enter lowest power SUSPEND2 mode */
1080 if (!(pdata->wolopts & SUPPORTED_WAKE)) {
1081 netdev_info(dev->net, "entering SUSPEND2 mode");
1082
1083 /* disable energy detect (link up) & wake up events */
1084 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1085 check_warn_return(ret, "Error reading WUCSR");
1086
1087 val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
1088
1089 ret = smsc95xx_write_reg(dev, WUCSR, val);
1090 check_warn_return(ret, "Error writing WUCSR");
1091
1092 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1093 check_warn_return(ret, "Error reading PM_CTRL");
1094
1095 val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
1096
1097 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1098 check_warn_return(ret, "Error writing PM_CTRL");
1099
1100 /* enter suspend2 mode */
1101 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1102 check_warn_return(ret, "Error reading PM_CTRL");
1103
1104 val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
1105 val |= PM_CTL_SUS_MODE_2;
1106
1107 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1108 check_warn_return(ret, "Error writing PM_CTRL");
1109
1110 return 0;
1111 }
1112
1113 if (pdata->wolopts & WAKE_MAGIC) {
1114 /* clear any pending magic packet status */
1115 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1116 check_warn_return(ret, "Error reading WUCSR");
1117
1118 val |= WUCSR_MPR_;
1119
1120 ret = smsc95xx_write_reg(dev, WUCSR, val);
1121 check_warn_return(ret, "Error writing WUCSR");
1122 }
1123
1124 /* enable/disable magic packup wake */
1125 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1126 check_warn_return(ret, "Error reading WUCSR");
1127
1128 if (pdata->wolopts & WAKE_MAGIC) {
1129 netdev_info(dev->net, "enabling magic packet wakeup");
1130 val |= WUCSR_MPEN_;
1131 } else {
1132 netdev_info(dev->net, "disabling magic packet wakeup");
1133 val &= ~WUCSR_MPEN_;
1134 }
1135
1136 ret = smsc95xx_write_reg(dev, WUCSR, val);
1137 check_warn_return(ret, "Error writing WUCSR");
1138
1139 /* enable wol wakeup source */
1140 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1141 check_warn_return(ret, "Error reading PM_CTRL");
1142
1143 val |= PM_CTL_WOL_EN_;
1144
1145 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1146 check_warn_return(ret, "Error writing PM_CTRL");
1147
1148 /* enable receiver */
1149 smsc95xx_start_rx_path(dev);
1150
1151 /* some wol options are enabled, so enter SUSPEND0 */
1152 netdev_info(dev->net, "entering SUSPEND0 mode");
1153
1154 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1155 check_warn_return(ret, "Error reading PM_CTRL");
1156
1157 val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
1158 val |= PM_CTL_SUS_MODE_0;
1159
1160 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1161 check_warn_return(ret, "Error writing PM_CTRL");
1162
1163 /* clear wol status */
1164 val &= ~PM_CTL_WUPS_;
1165 val |= PM_CTL_WUPS_WOL_;
1166 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1167 check_warn_return(ret, "Error writing PM_CTRL");
1168
1169 /* read back PM_CTRL */
1170 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1171 check_warn_return(ret, "Error reading PM_CTRL");
1172
1173 smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1174
1175 return 0;
1176}
1177
1178static int smsc95xx_resume(struct usb_interface *intf)
1179{
1180 struct usbnet *dev = usb_get_intfdata(intf);
1181 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1182 int ret;
1183 u32 val;
1184
1185 BUG_ON(!dev);
1186
1187 if (pdata->wolopts & WAKE_MAGIC) {
1188 smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
1189
1190 /* Disable magic packup wake */
1191 ret = smsc95xx_read_reg(dev, WUCSR, &val);
1192 check_warn_return(ret, "Error reading WUCSR");
1193
1194 val &= ~WUCSR_MPEN_;
1195
1196 ret = smsc95xx_write_reg(dev, WUCSR, val);
1197 check_warn_return(ret, "Error writing WUCSR");
1198
1199 /* clear wake-up status */
1200 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1201 check_warn_return(ret, "Error reading PM_CTRL");
1202
1203 val &= ~PM_CTL_WOL_EN_;
1204 val |= PM_CTL_WUPS_;
1205
1206 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1207 check_warn_return(ret, "Error writing PM_CTRL");
1208 }
1209
1210 return usbnet_resume(intf);
1211 check_warn_return(ret, "usbnet_resume error");
1212
1213 return 0;
1214}
1215
1067static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1216static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1068{ 1217{
1069 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1218 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -1326,8 +1475,9 @@ static struct usb_driver smsc95xx_driver = {
1326 .name = "smsc95xx", 1475 .name = "smsc95xx",
1327 .id_table = products, 1476 .id_table = products,
1328 .probe = usbnet_probe, 1477 .probe = usbnet_probe,
1329 .suspend = usbnet_suspend, 1478 .suspend = smsc95xx_suspend,
1330 .resume = usbnet_resume, 1479 .resume = smsc95xx_resume,
1480 .reset_resume = smsc95xx_resume,
1331 .disconnect = usbnet_disconnect, 1481 .disconnect = usbnet_disconnect,
1332 .disable_hub_initiated_lpm = 1, 1482 .disable_hub_initiated_lpm = 1,
1333}; 1483};
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 86bc44977fbd..2ff9815aa27c 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -63,6 +63,7 @@
63#define INT_STS_TDFO_ (0x00001000) 63#define INT_STS_TDFO_ (0x00001000)
64#define INT_STS_RXDF_ (0x00000800) 64#define INT_STS_RXDF_ (0x00000800)
65#define INT_STS_GPIOS_ (0x000007FF) 65#define INT_STS_GPIOS_ (0x000007FF)
66#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
66 67
67#define RX_CFG (0x0C) 68#define RX_CFG (0x0C)
68#define RX_FIFO_FLUSH_ (0x00000001) 69#define RX_FIFO_FLUSH_ (0x00000001)
@@ -83,12 +84,16 @@
83#define HW_CFG_BCE_ (0x00000002) 84#define HW_CFG_BCE_ (0x00000002)
84#define HW_CFG_SRST_ (0x00000001) 85#define HW_CFG_SRST_ (0x00000001)
85 86
87#define RX_FIFO_INF (0x18)
88
86#define PM_CTRL (0x20) 89#define PM_CTRL (0x20)
90#define PM_CTL_RES_CLR_WKP_STS (0x00000200)
87#define PM_CTL_DEV_RDY_ (0x00000080) 91#define PM_CTL_DEV_RDY_ (0x00000080)
88#define PM_CTL_SUS_MODE_ (0x00000060) 92#define PM_CTL_SUS_MODE_ (0x00000060)
89#define PM_CTL_SUS_MODE_0 (0x00000000) 93#define PM_CTL_SUS_MODE_0 (0x00000000)
90#define PM_CTL_SUS_MODE_1 (0x00000020) 94#define PM_CTL_SUS_MODE_1 (0x00000020)
91#define PM_CTL_SUS_MODE_2 (0x00000060) 95#define PM_CTL_SUS_MODE_2 (0x00000040)
96#define PM_CTL_SUS_MODE_3 (0x00000060)
92#define PM_CTL_PHY_RST_ (0x00000010) 97#define PM_CTL_PHY_RST_ (0x00000010)
93#define PM_CTL_WOL_EN_ (0x00000008) 98#define PM_CTL_WOL_EN_ (0x00000008)
94#define PM_CTL_ED_EN_ (0x00000004) 99#define PM_CTL_ED_EN_ (0x00000004)
@@ -200,6 +205,11 @@
200#define WUFF (0x128) 205#define WUFF (0x128)
201 206
202#define WUCSR (0x12C) 207#define WUCSR (0x12C)
208#define WUCSR_GUE_ (0x00000200)
209#define WUCSR_WUFR_ (0x00000040)
210#define WUCSR_MPR_ (0x00000020)
211#define WUCSR_WAKE_EN_ (0x00000004)
212#define WUCSR_MPEN_ (0x00000002)
203 213
204#define COE_CR (0x130) 214#define COE_CR (0x130)
205#define Tx_COE_EN_ (0x00010000) 215#define Tx_COE_EN_ (0x00010000)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5852361032c4..e522ff70444c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
348 if (tbp[IFLA_ADDRESS] == NULL) 348 if (tbp[IFLA_ADDRESS] == NULL)
349 eth_hw_addr_random(peer); 349 eth_hw_addr_random(peer);
350 350
351 if (ifmp && (dev->ifindex != 0))
352 peer->ifindex = ifmp->ifi_index;
353
351 err = register_netdevice(peer); 354 err = register_netdevice(peer);
352 put_net(net); 355 put_net(net);
353 net = NULL; 356 net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9650c413e11f..cbf8b0625352 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
993 goto done; 993 goto done;
994 994
995 if (v & VIRTIO_NET_S_ANNOUNCE) { 995 if (v & VIRTIO_NET_S_ANNOUNCE) {
996 netif_notify_peers(vi->dev); 996 netdev_notify_peers(vi->dev);
997 virtnet_ack_link_announce(vi); 997 virtnet_ack_link_announce(vi);
998 } 998 }
999 999
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
new file mode 100644
index 000000000000..51de9edb55f5
--- /dev/null
+++ b/drivers/net/vxlan.c
@@ -0,0 +1,1219 @@
1/*
2 * VXLAN: Virtual eXtensiable Local Area Network
3 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/version.h>
32#include <linux/hash.h>
33#include <net/ip.h>
34#include <net/icmp.h>
35#include <net/udp.h>
36#include <net/rtnetlink.h>
37#include <net/route.h>
38#include <net/dsfield.h>
39#include <net/inet_ecn.h>
40#include <net/net_namespace.h>
41#include <net/netns/generic.h>
42
43#define VXLAN_VERSION "0.1"
44
45#define VNI_HASH_BITS 10
46#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
47#define FDB_HASH_BITS 8
48#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
49#define FDB_AGE_DEFAULT 300 /* 5 min */
50#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
51
52#define VXLAN_N_VID (1u << 24)
53#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
54/* VLAN + IP header + UDP + VXLAN */
55#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
56
57#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
58
59/* VXLAN protocol header */
60struct vxlanhdr {
61 __be32 vx_flags;
62 __be32 vx_vni;
63};
64
65/* UDP port for VXLAN traffic. */
66static unsigned int vxlan_port __read_mostly = 8472;
67module_param_named(udp_port, vxlan_port, uint, 0444);
68MODULE_PARM_DESC(udp_port, "Destination UDP port");
69
70static bool log_ecn_error = true;
71module_param(log_ecn_error, bool, 0644);
72MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73
74/* per-net private data for this module */
75static unsigned int vxlan_net_id;
76struct vxlan_net {
77 struct socket *sock; /* UDP encap socket */
78 struct hlist_head vni_list[VNI_HASH_SIZE];
79};
80
81/* Forwarding table entry */
82struct vxlan_fdb {
83 struct hlist_node hlist; /* linked list of entries */
84 struct rcu_head rcu;
85 unsigned long updated; /* jiffies */
86 unsigned long used;
87 __be32 remote_ip;
88 u16 state; /* see ndm_state */
89 u8 eth_addr[ETH_ALEN];
90};
91
92/* Per-cpu network traffic stats */
93struct vxlan_stats {
94 u64 rx_packets;
95 u64 rx_bytes;
96 u64 tx_packets;
97 u64 tx_bytes;
98 struct u64_stats_sync syncp;
99};
100
101/* Pseudo network device */
102struct vxlan_dev {
103 struct hlist_node hlist;
104 struct net_device *dev;
105 struct vxlan_stats __percpu *stats;
106 __u32 vni; /* virtual network id */
107 __be32 gaddr; /* multicast group */
108 __be32 saddr; /* source address */
109 unsigned int link; /* link to multicast over */
110 __u8 tos; /* TOS override */
111 __u8 ttl;
112 bool learn;
113
114 unsigned long age_interval;
115 struct timer_list age_timer;
116 spinlock_t hash_lock;
117 unsigned int addrcnt;
118 unsigned int addrmax;
119 unsigned int addrexceeded;
120
121 struct hlist_head fdb_head[FDB_HASH_SIZE];
122};
123
124/* salt for hash table */
125static u32 vxlan_salt __read_mostly;
126
127static inline struct hlist_head *vni_head(struct net *net, u32 id)
128{
129 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
130
131 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
132}
133
134/* Look up VNI in a per net namespace table */
135static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
136{
137 struct vxlan_dev *vxlan;
138 struct hlist_node *node;
139
140 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
141 if (vxlan->vni == id)
142 return vxlan;
143 }
144
145 return NULL;
146}
147
148/* Fill in neighbour message in skbuff. */
149static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
150 const struct vxlan_fdb *fdb,
151 u32 portid, u32 seq, int type, unsigned int flags)
152{
153 unsigned long now = jiffies;
154 struct nda_cacheinfo ci;
155 struct nlmsghdr *nlh;
156 struct ndmsg *ndm;
157
158 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
159 if (nlh == NULL)
160 return -EMSGSIZE;
161
162 ndm = nlmsg_data(nlh);
163 memset(ndm, 0, sizeof(*ndm));
164 ndm->ndm_family = AF_BRIDGE;
165 ndm->ndm_state = fdb->state;
166 ndm->ndm_ifindex = vxlan->dev->ifindex;
167 ndm->ndm_flags = NTF_SELF;
168 ndm->ndm_type = NDA_DST;
169
170 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
171 goto nla_put_failure;
172
173 if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
174 goto nla_put_failure;
175
176 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
177 ci.ndm_confirmed = 0;
178 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
179 ci.ndm_refcnt = 0;
180
181 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
182 goto nla_put_failure;
183
184 return nlmsg_end(skb, nlh);
185
186nla_put_failure:
187 nlmsg_cancel(skb, nlh);
188 return -EMSGSIZE;
189}
190
191static inline size_t vxlan_nlmsg_size(void)
192{
193 return NLMSG_ALIGN(sizeof(struct ndmsg))
194 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
195 + nla_total_size(sizeof(__be32)) /* NDA_DST */
196 + nla_total_size(sizeof(struct nda_cacheinfo));
197}
198
199static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
200 const struct vxlan_fdb *fdb, int type)
201{
202 struct net *net = dev_net(vxlan->dev);
203 struct sk_buff *skb;
204 int err = -ENOBUFS;
205
206 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
207 if (skb == NULL)
208 goto errout;
209
210 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
211 if (err < 0) {
212 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
213 WARN_ON(err == -EMSGSIZE);
214 kfree_skb(skb);
215 goto errout;
216 }
217
218 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
219 return;
220errout:
221 if (err < 0)
222 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
223}
224
225/* Hash Ethernet address */
226static u32 eth_hash(const unsigned char *addr)
227{
228 u64 value = get_unaligned((u64 *)addr);
229
230 /* only want 6 bytes */
231#ifdef __BIG_ENDIAN
232 value <<= 16;
233#else
234 value >>= 16;
235#endif
236 return hash_64(value, FDB_HASH_BITS);
237}
238
239/* Hash chain to use given mac address */
240static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
241 const u8 *mac)
242{
243 return &vxlan->fdb_head[eth_hash(mac)];
244}
245
246/* Look up Ethernet address in forwarding table */
247static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
248 const u8 *mac)
249
250{
251 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
252 struct vxlan_fdb *f;
253 struct hlist_node *node;
254
255 hlist_for_each_entry_rcu(f, node, head, hlist) {
256 if (compare_ether_addr(mac, f->eth_addr) == 0)
257 return f;
258 }
259
260 return NULL;
261}
262
263/* Add new entry to forwarding table -- assumes lock held */
264static int vxlan_fdb_create(struct vxlan_dev *vxlan,
265 const u8 *mac, __be32 ip,
266 __u16 state, __u16 flags)
267{
268 struct vxlan_fdb *f;
269 int notify = 0;
270
271 f = vxlan_find_mac(vxlan, mac);
272 if (f) {
273 if (flags & NLM_F_EXCL) {
274 netdev_dbg(vxlan->dev,
275 "lost race to create %pM\n", mac);
276 return -EEXIST;
277 }
278 if (f->state != state) {
279 f->state = state;
280 f->updated = jiffies;
281 notify = 1;
282 }
283 } else {
284 if (!(flags & NLM_F_CREATE))
285 return -ENOENT;
286
287 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
288 return -ENOSPC;
289
290 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
291 f = kmalloc(sizeof(*f), GFP_ATOMIC);
292 if (!f)
293 return -ENOMEM;
294
295 notify = 1;
296 f->remote_ip = ip;
297 f->state = state;
298 f->updated = f->used = jiffies;
299 memcpy(f->eth_addr, mac, ETH_ALEN);
300
301 ++vxlan->addrcnt;
302 hlist_add_head_rcu(&f->hlist,
303 vxlan_fdb_head(vxlan, mac));
304 }
305
306 if (notify)
307 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
308
309 return 0;
310}
311
312static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
313{
314 netdev_dbg(vxlan->dev,
315 "delete %pM\n", f->eth_addr);
316
317 --vxlan->addrcnt;
318 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
319
320 hlist_del_rcu(&f->hlist);
321 kfree_rcu(f, rcu);
322}
323
324/* Add static entry (via netlink) */
325static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
326 struct net_device *dev,
327 const unsigned char *addr, u16 flags)
328{
329 struct vxlan_dev *vxlan = netdev_priv(dev);
330 __be32 ip;
331 int err;
332
333 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
334 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
335 ndm->ndm_state);
336 return -EINVAL;
337 }
338
339 if (tb[NDA_DST] == NULL)
340 return -EINVAL;
341
342 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
343 return -EAFNOSUPPORT;
344
345 ip = nla_get_be32(tb[NDA_DST]);
346
347 spin_lock_bh(&vxlan->hash_lock);
348 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
349 spin_unlock_bh(&vxlan->hash_lock);
350
351 return err;
352}
353
354/* Delete entry (via netlink) */
355static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
356 const unsigned char *addr)
357{
358 struct vxlan_dev *vxlan = netdev_priv(dev);
359 struct vxlan_fdb *f;
360 int err = -ENOENT;
361
362 spin_lock_bh(&vxlan->hash_lock);
363 f = vxlan_find_mac(vxlan, addr);
364 if (f) {
365 vxlan_fdb_destroy(vxlan, f);
366 err = 0;
367 }
368 spin_unlock_bh(&vxlan->hash_lock);
369
370 return err;
371}
372
373/* Dump forwarding table */
374static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
375 struct net_device *dev, int idx)
376{
377 struct vxlan_dev *vxlan = netdev_priv(dev);
378 unsigned int h;
379
380 for (h = 0; h < FDB_HASH_SIZE; ++h) {
381 struct vxlan_fdb *f;
382 struct hlist_node *n;
383 int err;
384
385 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
386 if (idx < cb->args[0])
387 goto skip;
388
389 err = vxlan_fdb_info(skb, vxlan, f,
390 NETLINK_CB(cb->skb).portid,
391 cb->nlh->nlmsg_seq,
392 RTM_NEWNEIGH,
393 NLM_F_MULTI);
394 if (err < 0)
395 break;
396skip:
397 ++idx;
398 }
399 }
400
401 return idx;
402}
403
404/* Watch incoming packets to learn mapping between Ethernet address
405 * and Tunnel endpoint.
406 */
407static void vxlan_snoop(struct net_device *dev,
408 __be32 src_ip, const u8 *src_mac)
409{
410 struct vxlan_dev *vxlan = netdev_priv(dev);
411 struct vxlan_fdb *f;
412 int err;
413
414 f = vxlan_find_mac(vxlan, src_mac);
415 if (likely(f)) {
416 f->used = jiffies;
417 if (likely(f->remote_ip == src_ip))
418 return;
419
420 if (net_ratelimit())
421 netdev_info(dev,
422 "%pM migrated from %pI4 to %pI4\n",
423 src_mac, &f->remote_ip, &src_ip);
424
425 f->remote_ip = src_ip;
426 f->updated = jiffies;
427 } else {
428 /* learned new entry */
429 spin_lock(&vxlan->hash_lock);
430 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
431 NUD_REACHABLE,
432 NLM_F_EXCL|NLM_F_CREATE);
433 spin_unlock(&vxlan->hash_lock);
434 }
435}
436
437
438/* See if multicast group is already in use by other ID */
439static bool vxlan_group_used(struct vxlan_net *vn,
440 const struct vxlan_dev *this)
441{
442 const struct vxlan_dev *vxlan;
443 struct hlist_node *node;
444 unsigned h;
445
446 for (h = 0; h < VNI_HASH_SIZE; ++h)
447 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
448 if (vxlan == this)
449 continue;
450
451 if (!netif_running(vxlan->dev))
452 continue;
453
454 if (vxlan->gaddr == this->gaddr)
455 return true;
456 }
457
458 return false;
459}
460
461/* kernel equivalent to IP_ADD_MEMBERSHIP */
462static int vxlan_join_group(struct net_device *dev)
463{
464 struct vxlan_dev *vxlan = netdev_priv(dev);
465 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
466 struct sock *sk = vn->sock->sk;
467 struct ip_mreqn mreq = {
468 .imr_multiaddr.s_addr = vxlan->gaddr,
469 };
470 int err;
471
472 /* Already a member of group */
473 if (vxlan_group_used(vn, vxlan))
474 return 0;
475
476 /* Need to drop RTNL to call multicast join */
477 rtnl_unlock();
478 lock_sock(sk);
479 err = ip_mc_join_group(sk, &mreq);
480 release_sock(sk);
481 rtnl_lock();
482
483 return err;
484}
485
486
487/* kernel equivalent to IP_DROP_MEMBERSHIP */
488static int vxlan_leave_group(struct net_device *dev)
489{
490 struct vxlan_dev *vxlan = netdev_priv(dev);
491 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
492 int err = 0;
493 struct sock *sk = vn->sock->sk;
494 struct ip_mreqn mreq = {
495 .imr_multiaddr.s_addr = vxlan->gaddr,
496 };
497
498 /* Only leave group when last vxlan is done. */
499 if (vxlan_group_used(vn, vxlan))
500 return 0;
501
502 /* Need to drop RTNL to call multicast leave */
503 rtnl_unlock();
504 lock_sock(sk);
505 err = ip_mc_leave_group(sk, &mreq);
506 release_sock(sk);
507 rtnl_lock();
508
509 return err;
510}
511
512/* Callback from net/ipv4/udp.c to receive packets */
513static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
514{
515 struct iphdr *oip;
516 struct vxlanhdr *vxh;
517 struct vxlan_dev *vxlan;
518 struct vxlan_stats *stats;
519 __u32 vni;
520 int err;
521
522 /* pop off outer UDP header */
523 __skb_pull(skb, sizeof(struct udphdr));
524
525 /* Need Vxlan and inner Ethernet header to be present */
526 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
527 goto error;
528
529 /* Drop packets with reserved bits set */
530 vxh = (struct vxlanhdr *) skb->data;
531 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
532 (vxh->vx_vni & htonl(0xff))) {
533 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
534 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
535 goto error;
536 }
537
538 __skb_pull(skb, sizeof(struct vxlanhdr));
539 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
540
541 /* Is this VNI defined? */
542 vni = ntohl(vxh->vx_vni) >> 8;
543 vxlan = vxlan_find_vni(sock_net(sk), vni);
544 if (!vxlan) {
545 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
546 goto drop;
547 }
548
549 if (!pskb_may_pull(skb, ETH_HLEN)) {
550 vxlan->dev->stats.rx_length_errors++;
551 vxlan->dev->stats.rx_errors++;
552 goto drop;
553 }
554
555 /* Re-examine inner Ethernet packet */
556 oip = ip_hdr(skb);
557 skb->protocol = eth_type_trans(skb, vxlan->dev);
558 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
559
560 /* Ignore packet loops (and multicast echo) */
561 if (compare_ether_addr(eth_hdr(skb)->h_source,
562 vxlan->dev->dev_addr) == 0)
563 goto drop;
564
565 if (vxlan->learn)
566 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
567
568 __skb_tunnel_rx(skb, vxlan->dev);
569 skb_reset_network_header(skb);
570
571 err = IP_ECN_decapsulate(oip, skb);
572 if (unlikely(err)) {
573 if (log_ecn_error)
574 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
575 &oip->saddr, oip->tos);
576 if (err > 1) {
577 ++vxlan->dev->stats.rx_frame_errors;
578 ++vxlan->dev->stats.rx_errors;
579 goto drop;
580 }
581 }
582
583 stats = this_cpu_ptr(vxlan->stats);
584 u64_stats_update_begin(&stats->syncp);
585 stats->rx_packets++;
586 stats->rx_bytes += skb->len;
587 u64_stats_update_end(&stats->syncp);
588
589 netif_rx(skb);
590
591 return 0;
592error:
593 /* Put UDP header back */
594 __skb_push(skb, sizeof(struct udphdr));
595
596 return 1;
597drop:
598 /* Consume bad packet */
599 kfree_skb(skb);
600 return 0;
601}
602
603/* Extract dsfield from inner protocol */
604static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
605 const struct sk_buff *skb)
606{
607 if (skb->protocol == htons(ETH_P_IP))
608 return iph->tos;
609 else if (skb->protocol == htons(ETH_P_IPV6))
610 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
611 else
612 return 0;
613}
614
615/* Propogate ECN bits out */
616static inline u8 vxlan_ecn_encap(u8 tos,
617 const struct iphdr *iph,
618 const struct sk_buff *skb)
619{
620 u8 inner = vxlan_get_dsfield(iph, skb);
621
622 return INET_ECN_encapsulate(tos, inner);
623}
624
625/* Transmit local packets over Vxlan
626 *
627 * Outer IP header inherits ECN and DF from inner header.
628 * Outer UDP destination is the VXLAN assigned port.
629 * source port is based on hash of flow if available
630 * otherwise use a random value
631 */
632static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
633{
634 struct vxlan_dev *vxlan = netdev_priv(dev);
635 struct rtable *rt;
636 const struct ethhdr *eth;
637 const struct iphdr *old_iph;
638 struct iphdr *iph;
639 struct vxlanhdr *vxh;
640 struct udphdr *uh;
641 struct flowi4 fl4;
642 struct vxlan_fdb *f;
643 unsigned int pkt_len = skb->len;
644 u32 hash;
645 __be32 dst;
646 __be16 df = 0;
647 __u8 tos, ttl;
648 int err;
649
650 /* Need space for new headers (invalidates iph ptr) */
651 if (skb_cow_head(skb, VXLAN_HEADROOM))
652 goto drop;
653
654 eth = (void *)skb->data;
655 old_iph = ip_hdr(skb);
656
657 if (!is_multicast_ether_addr(eth->h_dest) &&
658 (f = vxlan_find_mac(vxlan, eth->h_dest)))
659 dst = f->remote_ip;
660 else if (vxlan->gaddr) {
661 dst = vxlan->gaddr;
662 } else
663 goto drop;
664
665 ttl = vxlan->ttl;
666 if (!ttl && IN_MULTICAST(ntohl(dst)))
667 ttl = 1;
668
669 tos = vxlan->tos;
670 if (tos == 1)
671 tos = vxlan_get_dsfield(old_iph, skb);
672
673 hash = skb_get_rxhash(skb);
674
675 rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
676 vxlan->saddr, vxlan->vni,
677 RT_TOS(tos), vxlan->link);
678 if (IS_ERR(rt)) {
679 netdev_dbg(dev, "no route to %pI4\n", &dst);
680 dev->stats.tx_carrier_errors++;
681 goto tx_error;
682 }
683
684 if (rt->dst.dev == dev) {
685 netdev_dbg(dev, "circular route to %pI4\n", &dst);
686 ip_rt_put(rt);
687 dev->stats.collisions++;
688 goto tx_error;
689 }
690
691 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
692 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
693 IPSKB_REROUTED);
694 skb_dst_drop(skb);
695 skb_dst_set(skb, &rt->dst);
696
697 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
698 vxh->vx_flags = htonl(VXLAN_FLAGS);
699 vxh->vx_vni = htonl(vxlan->vni << 8);
700
701 __skb_push(skb, sizeof(*uh));
702 skb_reset_transport_header(skb);
703 uh = udp_hdr(skb);
704
705 uh->dest = htons(vxlan_port);
706 uh->source = hash ? :random32();
707
708 uh->len = htons(skb->len);
709 uh->check = 0;
710
711 __skb_push(skb, sizeof(*iph));
712 skb_reset_network_header(skb);
713 iph = ip_hdr(skb);
714 iph->version = 4;
715 iph->ihl = sizeof(struct iphdr) >> 2;
716 iph->frag_off = df;
717 iph->protocol = IPPROTO_UDP;
718 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
719 iph->daddr = fl4.daddr;
720 iph->saddr = fl4.saddr;
721 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
722
723 /* See __IPTUNNEL_XMIT */
724 skb->ip_summed = CHECKSUM_NONE;
725 ip_select_ident(iph, &rt->dst, NULL);
726
727 err = ip_local_out(skb);
728 if (likely(net_xmit_eval(err) == 0)) {
729 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
730
731 u64_stats_update_begin(&stats->syncp);
732 stats->tx_packets++;
733 stats->tx_bytes += pkt_len;
734 u64_stats_update_end(&stats->syncp);
735 } else {
736 dev->stats.tx_errors++;
737 dev->stats.tx_aborted_errors++;
738 }
739 return NETDEV_TX_OK;
740
741drop:
742 dev->stats.tx_dropped++;
743 goto tx_free;
744
745tx_error:
746 dev->stats.tx_errors++;
747tx_free:
748 dev_kfree_skb(skb);
749 return NETDEV_TX_OK;
750}
751
752/* Walk the forwarding table and purge stale entries */
753static void vxlan_cleanup(unsigned long arg)
754{
755 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
756 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
757 unsigned int h;
758
759 if (!netif_running(vxlan->dev))
760 return;
761
762 spin_lock_bh(&vxlan->hash_lock);
763 for (h = 0; h < FDB_HASH_SIZE; ++h) {
764 struct hlist_node *p, *n;
765 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
766 struct vxlan_fdb *f
767 = container_of(p, struct vxlan_fdb, hlist);
768 unsigned long timeout;
769
770 if (f->state == NUD_PERMANENT)
771 continue;
772
773 timeout = f->used + vxlan->age_interval * HZ;
774 if (time_before_eq(timeout, jiffies)) {
775 netdev_dbg(vxlan->dev,
776 "garbage collect %pM\n",
777 f->eth_addr);
778 f->state = NUD_STALE;
779 vxlan_fdb_destroy(vxlan, f);
780 } else if (time_before(timeout, next_timer))
781 next_timer = timeout;
782 }
783 }
784 spin_unlock_bh(&vxlan->hash_lock);
785
786 mod_timer(&vxlan->age_timer, next_timer);
787}
788
789/* Setup stats when device is created */
790static int vxlan_init(struct net_device *dev)
791{
792 struct vxlan_dev *vxlan = netdev_priv(dev);
793
794 vxlan->stats = alloc_percpu(struct vxlan_stats);
795 if (!vxlan->stats)
796 return -ENOMEM;
797
798 return 0;
799}
800
801/* Start ageing timer and join group when device is brought up */
802static int vxlan_open(struct net_device *dev)
803{
804 struct vxlan_dev *vxlan = netdev_priv(dev);
805 int err;
806
807 if (vxlan->gaddr) {
808 err = vxlan_join_group(dev);
809 if (err)
810 return err;
811 }
812
813 if (vxlan->age_interval)
814 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
815
816 return 0;
817}
818
819/* Purge the forwarding table */
820static void vxlan_flush(struct vxlan_dev *vxlan)
821{
822 unsigned h;
823
824 spin_lock_bh(&vxlan->hash_lock);
825 for (h = 0; h < FDB_HASH_SIZE; ++h) {
826 struct hlist_node *p, *n;
827 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
828 struct vxlan_fdb *f
829 = container_of(p, struct vxlan_fdb, hlist);
830 vxlan_fdb_destroy(vxlan, f);
831 }
832 }
833 spin_unlock_bh(&vxlan->hash_lock);
834}
835
836/* Cleanup timer and forwarding table on shutdown */
837static int vxlan_stop(struct net_device *dev)
838{
839 struct vxlan_dev *vxlan = netdev_priv(dev);
840
841 if (vxlan->gaddr)
842 vxlan_leave_group(dev);
843
844 del_timer_sync(&vxlan->age_timer);
845
846 vxlan_flush(vxlan);
847
848 return 0;
849}
850
851/* Merge per-cpu statistics */
852static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
853 struct rtnl_link_stats64 *stats)
854{
855 struct vxlan_dev *vxlan = netdev_priv(dev);
856 struct vxlan_stats tmp, sum = { 0 };
857 unsigned int cpu;
858
859 for_each_possible_cpu(cpu) {
860 unsigned int start;
861 const struct vxlan_stats *stats
862 = per_cpu_ptr(vxlan->stats, cpu);
863
864 do {
865 start = u64_stats_fetch_begin_bh(&stats->syncp);
866 memcpy(&tmp, stats, sizeof(tmp));
867 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
868
869 sum.tx_bytes += tmp.tx_bytes;
870 sum.tx_packets += tmp.tx_packets;
871 sum.rx_bytes += tmp.rx_bytes;
872 sum.rx_packets += tmp.rx_packets;
873 }
874
875 stats->tx_bytes = sum.tx_bytes;
876 stats->tx_packets = sum.tx_packets;
877 stats->rx_bytes = sum.rx_bytes;
878 stats->rx_packets = sum.rx_packets;
879
880 stats->multicast = dev->stats.multicast;
881 stats->rx_length_errors = dev->stats.rx_length_errors;
882 stats->rx_frame_errors = dev->stats.rx_frame_errors;
883 stats->rx_errors = dev->stats.rx_errors;
884
885 stats->tx_dropped = dev->stats.tx_dropped;
886 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
887 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
888 stats->collisions = dev->stats.collisions;
889 stats->tx_errors = dev->stats.tx_errors;
890
891 return stats;
892}
893
894/* Stub, nothing needs to be done. */
895static void vxlan_set_multicast_list(struct net_device *dev)
896{
897}
898
899static const struct net_device_ops vxlan_netdev_ops = {
900 .ndo_init = vxlan_init,
901 .ndo_open = vxlan_open,
902 .ndo_stop = vxlan_stop,
903 .ndo_start_xmit = vxlan_xmit,
904 .ndo_get_stats64 = vxlan_stats64,
905 .ndo_set_rx_mode = vxlan_set_multicast_list,
906 .ndo_change_mtu = eth_change_mtu,
907 .ndo_validate_addr = eth_validate_addr,
908 .ndo_set_mac_address = eth_mac_addr,
909 .ndo_fdb_add = vxlan_fdb_add,
910 .ndo_fdb_del = vxlan_fdb_delete,
911 .ndo_fdb_dump = vxlan_fdb_dump,
912};
913
914/* Info for udev, that this is a virtual tunnel endpoint */
915static struct device_type vxlan_type = {
916 .name = "vxlan",
917};
918
919static void vxlan_free(struct net_device *dev)
920{
921 struct vxlan_dev *vxlan = netdev_priv(dev);
922
923 free_percpu(vxlan->stats);
924 free_netdev(dev);
925}
926
927/* Initialize the device structure. */
928static void vxlan_setup(struct net_device *dev)
929{
930 struct vxlan_dev *vxlan = netdev_priv(dev);
931 unsigned h;
932
933 eth_hw_addr_random(dev);
934 ether_setup(dev);
935
936 dev->netdev_ops = &vxlan_netdev_ops;
937 dev->destructor = vxlan_free;
938 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
939
940 dev->tx_queue_len = 0;
941 dev->features |= NETIF_F_LLTX;
942 dev->features |= NETIF_F_NETNS_LOCAL;
943 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
944
945 spin_lock_init(&vxlan->hash_lock);
946
947 init_timer_deferrable(&vxlan->age_timer);
948 vxlan->age_timer.function = vxlan_cleanup;
949 vxlan->age_timer.data = (unsigned long) vxlan;
950
951 vxlan->dev = dev;
952
953 for (h = 0; h < FDB_HASH_SIZE; ++h)
954 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
955}
956
957static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
958 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
959 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
960 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
961 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
962 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
963 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
964 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
965 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
966 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
967};
968
969static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
970{
971 if (tb[IFLA_ADDRESS]) {
972 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
973 pr_debug("invalid link address (not ethernet)\n");
974 return -EINVAL;
975 }
976
977 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
978 pr_debug("invalid all zero ethernet address\n");
979 return -EADDRNOTAVAIL;
980 }
981 }
982
983 if (!data)
984 return -EINVAL;
985
986 if (data[IFLA_VXLAN_ID]) {
987 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
988 if (id >= VXLAN_VID_MASK)
989 return -ERANGE;
990 }
991
992 if (data[IFLA_VXLAN_GROUP]) {
993 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
994 if (!IN_MULTICAST(ntohl(gaddr))) {
995 pr_debug("group address is not IPv4 multicast\n");
996 return -EADDRNOTAVAIL;
997 }
998 }
999 return 0;
1000}
1001
1002static int vxlan_newlink(struct net *net, struct net_device *dev,
1003 struct nlattr *tb[], struct nlattr *data[])
1004{
1005 struct vxlan_dev *vxlan = netdev_priv(dev);
1006 __u32 vni;
1007 int err;
1008
1009 if (!data[IFLA_VXLAN_ID])
1010 return -EINVAL;
1011
1012 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1013 if (vxlan_find_vni(net, vni)) {
1014 pr_info("duplicate VNI %u\n", vni);
1015 return -EEXIST;
1016 }
1017 vxlan->vni = vni;
1018
1019 if (data[IFLA_VXLAN_GROUP])
1020 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1021
1022 if (data[IFLA_VXLAN_LOCAL])
1023 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1024
1025 if (data[IFLA_VXLAN_LINK]) {
1026 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
1027
1028 if (!tb[IFLA_MTU]) {
1029 struct net_device *lowerdev;
1030 lowerdev = __dev_get_by_index(net, vxlan->link);
1031 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1032 }
1033 }
1034
1035 if (data[IFLA_VXLAN_TOS])
1036 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1037
1038 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1039 vxlan->learn = true;
1040
1041 if (data[IFLA_VXLAN_AGEING])
1042 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1043 else
1044 vxlan->age_interval = FDB_AGE_DEFAULT;
1045
1046 if (data[IFLA_VXLAN_LIMIT])
1047 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1048
1049 err = register_netdevice(dev);
1050 if (!err)
1051 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1052
1053 return err;
1054}
1055
1056static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1057{
1058 struct vxlan_dev *vxlan = netdev_priv(dev);
1059
1060 hlist_del_rcu(&vxlan->hlist);
1061
1062 unregister_netdevice_queue(dev, head);
1063}
1064
1065static size_t vxlan_get_size(const struct net_device *dev)
1066{
1067
1068 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1069 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1070 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1071 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1072 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1073 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1074 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1075 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1076 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1077 0;
1078}
1079
1080static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1081{
1082 const struct vxlan_dev *vxlan = netdev_priv(dev);
1083
1084 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1085 goto nla_put_failure;
1086
1087 if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
1088 goto nla_put_failure;
1089
1090 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1091 goto nla_put_failure;
1092
1093 if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1094 goto nla_put_failure;
1095
1096 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1097 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1098 nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
1099 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1100 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1101 goto nla_put_failure;
1102
1103 return 0;
1104
1105nla_put_failure:
1106 return -EMSGSIZE;
1107}
1108
1109static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1110 .kind = "vxlan",
1111 .maxtype = IFLA_VXLAN_MAX,
1112 .policy = vxlan_policy,
1113 .priv_size = sizeof(struct vxlan_dev),
1114 .setup = vxlan_setup,
1115 .validate = vxlan_validate,
1116 .newlink = vxlan_newlink,
1117 .dellink = vxlan_dellink,
1118 .get_size = vxlan_get_size,
1119 .fill_info = vxlan_fill_info,
1120};
1121
1122static __net_init int vxlan_init_net(struct net *net)
1123{
1124 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1125 struct sock *sk;
1126 struct sockaddr_in vxlan_addr = {
1127 .sin_family = AF_INET,
1128 .sin_addr.s_addr = htonl(INADDR_ANY),
1129 };
1130 int rc;
1131 unsigned h;
1132
1133 /* Create UDP socket for encapsulation receive. */
1134 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1135 if (rc < 0) {
1136 pr_debug("UDP socket create failed\n");
1137 return rc;
1138 }
1139 /* Put in proper namespace */
1140 sk = vn->sock->sk;
1141 sk_change_net(sk, net);
1142
1143 vxlan_addr.sin_port = htons(vxlan_port);
1144
1145 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1146 sizeof(vxlan_addr));
1147 if (rc < 0) {
1148 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1149 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1150 sk_release_kernel(sk);
1151 vn->sock = NULL;
1152 return rc;
1153 }
1154
1155 /* Disable multicast loopback */
1156 inet_sk(sk)->mc_loop = 0;
1157
1158 /* Mark socket as an encapsulation socket. */
1159 udp_sk(sk)->encap_type = 1;
1160 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1161 udp_encap_enable();
1162
1163 for (h = 0; h < VNI_HASH_SIZE; ++h)
1164 INIT_HLIST_HEAD(&vn->vni_list[h]);
1165
1166 return 0;
1167}
1168
1169static __net_exit void vxlan_exit_net(struct net *net)
1170{
1171 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1172
1173 if (vn->sock) {
1174 sk_release_kernel(vn->sock->sk);
1175 vn->sock = NULL;
1176 }
1177}
1178
1179static struct pernet_operations vxlan_net_ops = {
1180 .init = vxlan_init_net,
1181 .exit = vxlan_exit_net,
1182 .id = &vxlan_net_id,
1183 .size = sizeof(struct vxlan_net),
1184};
1185
1186static int __init vxlan_init_module(void)
1187{
1188 int rc;
1189
1190 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1191
1192 rc = register_pernet_device(&vxlan_net_ops);
1193 if (rc)
1194 goto out1;
1195
1196 rc = rtnl_link_register(&vxlan_link_ops);
1197 if (rc)
1198 goto out2;
1199
1200 return 0;
1201
1202out2:
1203 unregister_pernet_device(&vxlan_net_ops);
1204out1:
1205 return rc;
1206}
1207module_init(vxlan_init_module);
1208
1209static void __exit vxlan_cleanup_module(void)
1210{
1211 rtnl_link_unregister(&vxlan_link_ops);
1212 unregister_pernet_device(&vxlan_net_ops);
1213}
1214module_exit(vxlan_cleanup_module);
1215
1216MODULE_LICENSE("GPL");
1217MODULE_VERSION(VXLAN_VERSION);
1218MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1219MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 025426132754..9c34d2fccfac 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
222 struct sk_buff *skb; 222 struct sk_buff *skb;
223 const struct i2400m_tlv_detailed_device_info *ddi; 223 const struct i2400m_tlv_detailed_device_info *ddi;
224 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 224 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
225 const unsigned char zeromac[ETH_ALEN] = { 0 };
226 225
227 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 226 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
228 skb = i2400m_get_device_info(i2400m); 227 skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
244 "to that of boot mode's\n"); 243 "to that of boot mode's\n");
245 dev_warn(dev, "device reports %pM\n", ddi->mac_address); 244 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
246 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); 245 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
247 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 246 if (is_zero_ether_addr(ddi->mac_address))
248 dev_err(dev, "device reports an invalid MAC address, " 247 dev_err(dev, "device reports an invalid MAC address, "
249 "not updating\n"); 248 "not updating\n");
250 else { 249 else {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 689a71c1af71..154a4965be4f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1661} 1661}
1662 1662
1663/* Put adm8211_tx_hdr on skb and transmit */ 1663/* Put adm8211_tx_hdr on skb and transmit */
1664static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1664static void adm8211_tx(struct ieee80211_hw *dev,
1665 struct ieee80211_tx_control *control,
1666 struct sk_buff *skb)
1665{ 1667{
1666 struct adm8211_tx_hdr *txhdr; 1668 struct adm8211_tx_hdr *txhdr;
1667 size_t payload_len, hdrlen; 1669 size_t payload_len, hdrlen;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index c586f78c307f..3cd05a7173f6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -87,7 +87,6 @@ static struct pci_driver airo_driver = {
87/* Include Wireless Extension definition and check version - Jean II */ 87/* Include Wireless Extension definition and check version - Jean II */
88#include <linux/wireless.h> 88#include <linux/wireless.h>
89#define WIRELESS_SPY /* enable iwspy support */ 89#define WIRELESS_SPY /* enable iwspy support */
90#include <net/iw_handler.h> /* New driver API */
91 90
92#define CISCO_EXT /* enable Cisco extensions */ 91#define CISCO_EXT /* enable Cisco extensions */
93#ifdef CISCO_EXT 92#ifdef CISCO_EXT
@@ -5984,13 +5983,11 @@ static int airo_set_wap(struct net_device *dev,
5984 Cmd cmd; 5983 Cmd cmd;
5985 Resp rsp; 5984 Resp rsp;
5986 APListRid APList_rid; 5985 APListRid APList_rid;
5987 static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5988 static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
5989 5986
5990 if (awrq->sa_family != ARPHRD_ETHER) 5987 if (awrq->sa_family != ARPHRD_ETHER)
5991 return -EINVAL; 5988 return -EINVAL;
5992 else if (!memcmp(any, awrq->sa_data, ETH_ALEN) || 5989 else if (is_broadcast_ether_addr(awrq->sa_data) ||
5993 !memcmp(off, awrq->sa_data, ETH_ALEN)) { 5990 is_zero_ether_addr(awrq->sa_data)) {
5994 memset(&cmd, 0, sizeof(cmd)); 5991 memset(&cmd, 0, sizeof(cmd));
5995 cmd.cmd=CMD_LOSE_SYNC; 5992 cmd.cmd=CMD_LOSE_SYNC;
5996 if (down_interruptible(&local->sem)) 5993 if (down_interruptible(&local->sem))
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 88b8d64c90f1..99b9ddf21273 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -498,36 +498,6 @@ exit:
498 return ret; 498 return ret;
499} 499}
500 500
501#define HEX2STR_BUFFERS 4
502#define HEX2STR_MAX_LEN 64
503
504/* Convert binary data into hex string */
505static char *hex2str(void *buf, size_t len)
506{
507 static atomic_t a = ATOMIC_INIT(0);
508 static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
509 char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)];
510 char *obuf = ret;
511 u8 *ibuf = buf;
512
513 if (len > HEX2STR_MAX_LEN)
514 len = HEX2STR_MAX_LEN;
515
516 if (len == 0)
517 goto exit;
518
519 while (len--) {
520 obuf = hex_byte_pack(obuf, *ibuf++);
521 *obuf++ = '-';
522 }
523 obuf--;
524
525exit:
526 *obuf = '\0';
527
528 return ret;
529}
530
531/* LED trigger */ 501/* LED trigger */
532static int tx_activity; 502static int tx_activity;
533static void at76_ledtrig_tx_timerfunc(unsigned long data); 503static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -1004,9 +974,9 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
1004 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN; 974 WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
1005 975
1006 for (i = 0; i < WEP_KEYS; i++) 976 for (i = 0; i < WEP_KEYS; i++)
1007 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s", 977 at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
1008 wiphy_name(priv->hw->wiphy), i, 978 wiphy_name(priv->hw->wiphy), i,
1009 hex2str(m->wep_default_keyvalue[i], key_len)); 979 key_len, m->wep_default_keyvalue[i]);
1010exit: 980exit:
1011 kfree(m); 981 kfree(m);
1012} 982}
@@ -1031,7 +1001,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1031 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration " 1001 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
1032 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d " 1002 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
1033 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d " 1003 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
1034 "current_bssid %pM current_essid %s current_bss_type %d " 1004 "current_bssid %pM current_essid %*phD current_bss_type %d "
1035 "pm_mode %d ibss_change %d res %d " 1005 "pm_mode %d ibss_change %d res %d "
1036 "multi_domain_capability_implemented %d " 1006 "multi_domain_capability_implemented %d "
1037 "international_roaming %d country_string %.3s", 1007 "international_roaming %d country_string %.3s",
@@ -1041,7 +1011,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1041 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window), 1011 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
1042 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period, 1012 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
1043 m->CFP_period, m->current_bssid, 1013 m->CFP_period, m->current_bssid,
1044 hex2str(m->current_essid, IW_ESSID_MAX_SIZE), 1014 IW_ESSID_MAX_SIZE, m->current_essid,
1045 m->current_bss_type, m->power_mgmt_mode, m->ibss_change, 1015 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
1046 m->res, m->multi_domain_capability_implemented, 1016 m->res, m->multi_domain_capability_implemented,
1047 m->multi_domain_capability_enabled, m->country_string); 1017 m->multi_domain_capability_enabled, m->country_string);
@@ -1069,7 +1039,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1069 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d " 1039 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
1070 "scan_type %d scan_channel %d probe_delay %u " 1040 "scan_type %d scan_channel %d probe_delay %u "
1071 "min_channel_time %d max_channel_time %d listen_int %d " 1041 "min_channel_time %d max_channel_time %d listen_int %d "
1072 "desired_ssid %s desired_bssid %pM desired_bsstype %d", 1042 "desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
1073 wiphy_name(priv->hw->wiphy), 1043 wiphy_name(priv->hw->wiphy),
1074 le32_to_cpu(m->max_tx_msdu_lifetime), 1044 le32_to_cpu(m->max_tx_msdu_lifetime),
1075 le32_to_cpu(m->max_rx_lifetime), 1045 le32_to_cpu(m->max_rx_lifetime),
@@ -1080,7 +1050,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1080 le16_to_cpu(m->min_channel_time), 1050 le16_to_cpu(m->min_channel_time),
1081 le16_to_cpu(m->max_channel_time), 1051 le16_to_cpu(m->max_channel_time),
1082 le16_to_cpu(m->listen_interval), 1052 le16_to_cpu(m->listen_interval),
1083 hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE), 1053 IW_ESSID_MAX_SIZE, m->desired_ssid,
1084 m->desired_bssid, m->desired_bsstype); 1054 m->desired_bssid, m->desired_bsstype);
1085exit: 1055exit:
1086 kfree(m); 1056 kfree(m);
@@ -1160,13 +1130,13 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
1160 goto exit; 1130 goto exit;
1161 } 1131 }
1162 1132
1163 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s", 1133 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
1164 wiphy_name(priv->hw->wiphy), 1134 wiphy_name(priv->hw->wiphy),
1165 hex2str(m->channel_list, sizeof(m->channel_list))); 1135 (int)sizeof(m->channel_list), m->channel_list);
1166 1136
1167 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s", 1137 at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
1168 wiphy_name(priv->hw->wiphy), 1138 wiphy_name(priv->hw->wiphy),
1169 hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel))); 1139 (int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
1170exit: 1140exit:
1171 kfree(m); 1141 kfree(m);
1172} 1142}
@@ -1369,9 +1339,9 @@ static int at76_startup_device(struct at76_priv *priv)
1369 int ret; 1339 int ret;
1370 1340
1371 at76_dbg(DBG_PARAMS, 1341 at76_dbg(DBG_PARAMS,
1372 "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d " 1342 "%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
1373 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size, 1343 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
1374 priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE), 1344 priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
1375 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra", 1345 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
1376 priv->channel, priv->wep_enabled ? "enabled" : "disabled", 1346 priv->channel, priv->wep_enabled ? "enabled" : "disabled",
1377 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]); 1347 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
@@ -1726,7 +1696,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
1726 ieee80211_wake_queues(priv->hw); 1696 ieee80211_wake_queues(priv->hw);
1727} 1697}
1728 1698
1729static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1699static void at76_mac80211_tx(struct ieee80211_hw *hw,
1700 struct ieee80211_tx_control *control,
1701 struct sk_buff *skb)
1730{ 1702{
1731 struct at76_priv *priv = hw->priv; 1703 struct at76_priv *priv = hw->priv;
1732 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1704 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 6169fbd23ed1..4521342c62cc 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,6 +159,7 @@ struct ath_common {
159 159
160 bool btcoex_enabled; 160 bool btcoex_enabled;
161 bool disable_ani; 161 bool disable_ani;
162 bool antenna_diversity;
162}; 163};
163 164
164struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 165struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 64a453a6dfe4..3150def17193 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
1331 unsigned int nexttbtt; /* next beacon time in TU */ 1331 unsigned int nexttbtt; /* next beacon time in TU */
1332 struct ath5k_txq *cabq; /* content after beacon */ 1332 struct ath5k_txq *cabq; /* content after beacon */
1333 1333
1334 int power_level; /* Requested tx power in dBm */
1335 bool assoc; /* associate state */ 1334 bool assoc; /* associate state */
1336 bool enable_beacon; /* true if beacons are on */ 1335 bool enable_beacon; /* true if beacons are on */
1337 1336
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
1425 /* Value in dB units */ 1424 /* Value in dB units */
1426 s16 txp_cck_ofdm_pwr_delta; 1425 s16 txp_cck_ofdm_pwr_delta;
1427 bool txp_setup; 1426 bool txp_setup;
1427 int txp_requested; /* Requested tx power in dBm */
1428 } ah_txpower; 1428 } ah_txpower;
1429 1429
1430 struct ath5k_nfcal_hist ah_nfcal_hist; 1430 struct ath5k_nfcal_hist ah_nfcal_hist;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 2aab20ee9f38..9fd6d9a9942e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
723 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 723 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
724 ieee80211_get_hdrlen_from_skb(skb), padsize, 724 ieee80211_get_hdrlen_from_skb(skb), padsize,
725 get_hw_packet_type(skb), 725 get_hw_packet_type(skb),
726 (ah->power_level * 2), 726 (ah->ah_txpower.txp_requested * 2),
727 hw_rate, 727 hw_rate,
728 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 728 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
729 cts_rate, duration); 729 cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1778 ds->ds_data = bf->skbaddr; 1778 ds->ds_data = bf->skbaddr;
1779 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 1779 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1780 ieee80211_get_hdrlen_from_skb(skb), padsize, 1780 ieee80211_get_hdrlen_from_skb(skb), padsize,
1781 AR5K_PKT_TYPE_BEACON, (ah->power_level * 2), 1781 AR5K_PKT_TYPE_BEACON,
1782 (ah->ah_txpower.txp_requested * 2),
1782 ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1783 ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1783 1, AR5K_TXKEYIX_INVALID, 1784 1, AR5K_TXKEYIX_INVALID,
1784 antenna, flags, 0, 0); 1785 antenna, flags, 0, 0);
@@ -2445,6 +2446,7 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2445 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 2446 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2446 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2447 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2447 IEEE80211_HW_SIGNAL_DBM | 2448 IEEE80211_HW_SIGNAL_DBM |
2449 IEEE80211_HW_MFP_CAPABLE |
2448 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2450 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2449 2451
2450 hw->wiphy->interface_modes = 2452 hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d56453e43d7e..7a28538e6e05 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -55,7 +55,8 @@
55\********************/ 55\********************/
56 56
57static void 57static void
58ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 58ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
59 struct sk_buff *skb)
59{ 60{
60 struct ath5k_hw *ah = hw->priv; 61 struct ath5k_hw *ah = hw->priv;
61 u16 qnum = skb_get_queue_mapping(skb); 62 u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
207 } 208 }
208 209
209 if ((changed & IEEE80211_CONF_CHANGE_POWER) && 210 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
210 (ah->power_level != conf->power_level)) { 211 (ah->ah_txpower.txp_requested != conf->power_level)) {
211 ah->power_level = conf->power_level; 212 ah->ah_txpower.txp_requested = conf->power_level;
212 213
213 /* Half dB steps */ 214 /* Half dB steps */
214 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); 215 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -488,6 +489,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
488 if (ath5k_modparam_nohwcrypt) 489 if (ath5k_modparam_nohwcrypt)
489 return -EOPNOTSUPP; 490 return -EOPNOTSUPP;
490 491
492 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
493 return -EOPNOTSUPP;
494
491 if (vif->type == NL80211_IFTYPE_ADHOC && 495 if (vif->type == NL80211_IFTYPE_ADHOC &&
492 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 496 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
493 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 497 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
@@ -522,7 +526,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
522 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 526 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
523 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 527 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
524 if (key->cipher == WLAN_CIPHER_SUITE_CCMP) 528 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
525 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 529 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
526 ret = 0; 530 ret = 0;
527 } 531 }
528 break; 532 break;
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 8b71a2d947e0..ab363f34b4df 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1975,11 +1975,13 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1975 spur_delta_phase = (spur_offset << 18) / 25; 1975 spur_delta_phase = (spur_offset << 18) / 25;
1976 spur_freq_sigma_delta = (spur_delta_phase >> 10); 1976 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1977 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2; 1977 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
1978 break;
1978 case AR5K_BWMODE_5MHZ: 1979 case AR5K_BWMODE_5MHZ:
1979 /* Both sample_freq and chip_freq are 10MHz (?) */ 1980 /* Both sample_freq and chip_freq are 10MHz (?) */
1980 spur_delta_phase = (spur_offset << 19) / 25; 1981 spur_delta_phase = (spur_offset << 19) / 25;
1981 spur_freq_sigma_delta = (spur_delta_phase >> 10); 1982 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1982 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4; 1983 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
1984 break;
1983 default: 1985 default:
1984 if (channel->band == IEEE80211_BAND_5GHZ) { 1986 if (channel->band == IEEE80211_BAND_5GHZ) {
1985 /* Both sample_freq and chip_freq are 40MHz */ 1987 /* Both sample_freq and chip_freq are 40MHz */
@@ -3516,6 +3518,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3516{ 3518{
3517 unsigned int i; 3519 unsigned int i;
3518 u16 *rates; 3520 u16 *rates;
3521 s16 rate_idx_scaled = 0;
3519 3522
3520 /* max_pwr is power level we got from driver/user in 0.5dB 3523 /* max_pwr is power level we got from driver/user in 0.5dB
3521 * units, switch to 0.25dB units so we can compare */ 3524 * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3565,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3562 for (i = 8; i <= 15; i++) 3565 for (i = 8; i <= 15; i++)
3563 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta; 3566 rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
3564 3567
3568 /* Save min/max and current tx power for this channel
3569 * in 0.25dB units.
3570 *
3571 * Note: We use rates[0] for current tx power because
3572 * it covers most of the rates, in most cases. It's our
3573 * tx power limit and what the user expects to see. */
3574 ah->ah_txpower.txp_min_pwr = 2 * rates[7];
3575 ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
3576
3577 /* Set max txpower for correct OFDM operation on all rates
3578 * -that is the txpower for 54Mbit-, it's used for the PAPD
3579 * gain probe and it's in 0.5dB units */
3580 ah->ah_txpower.txp_ofdm = rates[7];
3581
3565 /* Now that we have all rates setup use table offset to 3582 /* Now that we have all rates setup use table offset to
3566 * match the power range set by user with the power indices 3583 * match the power range set by user with the power indices
3567 * on PCDAC/PDADC table */ 3584 * on PCDAC/PDADC table */
3568 for (i = 0; i < 16; i++) { 3585 for (i = 0; i < 16; i++) {
3569 rates[i] += ah->ah_txpower.txp_offset; 3586 rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
3570 /* Don't get out of bounds */ 3587 /* Don't get out of bounds */
3571 if (rates[i] > 63) 3588 if (rate_idx_scaled > 63)
3572 rates[i] = 63; 3589 rate_idx_scaled = 63;
3590 if (rate_idx_scaled < 0)
3591 rate_idx_scaled = 0;
3592 rates[i] = rate_idx_scaled;
3573 } 3593 }
3574
3575 /* Min/max in 0.25dB units */
3576 ah->ah_txpower.txp_min_pwr = 2 * rates[7];
3577 ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
3578 ah->ah_txpower.txp_ofdm = rates[7];
3579} 3594}
3580 3595
3581 3596
@@ -3639,10 +3654,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3639 if (!ah->ah_txpower.txp_setup || 3654 if (!ah->ah_txpower.txp_setup ||
3640 (channel->hw_value != curr_channel->hw_value) || 3655 (channel->hw_value != curr_channel->hw_value) ||
3641 (channel->center_freq != curr_channel->center_freq)) { 3656 (channel->center_freq != curr_channel->center_freq)) {
3642 /* Reset TX power values */ 3657 /* Reset TX power values but preserve requested
3658 * tx power from above */
3659 int requested_txpower = ah->ah_txpower.txp_requested;
3660
3643 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 3661 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
3662
3663 /* Restore TPC setting and requested tx power */
3644 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 3664 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
3645 3665
3666 ah->ah_txpower.txp_requested = requested_txpower;
3667
3646 /* Calculate the powertable */ 3668 /* Calculate the powertable */
3647 ret = ath5k_setup_channel_powertable(ah, channel, 3669 ret = ath5k_setup_channel_powertable(ah, channel,
3648 ee_mode, type); 3670 ee_mode, type);
@@ -3789,8 +3811,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3789 * RF buffer settings on 5211/5212+ so that we 3811 * RF buffer settings on 5211/5212+ so that we
3790 * properly set curve indices. 3812 * properly set curve indices.
3791 */ 3813 */
3792 ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ? 3814 ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
3793 ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER); 3815 ah->ah_txpower.txp_requested * 2 :
3816 AR5K_TUNE_MAX_TXPOWER);
3794 if (ret) 3817 if (ret)
3795 return ret; 3818 return ret;
3796 3819
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 86aeef4b9d7e..7089f8160ad5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1488,7 +1488,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1488} 1488}
1489 1489
1490static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, 1490static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
1491 char *name, 1491 const char *name,
1492 enum nl80211_iftype type, 1492 enum nl80211_iftype type,
1493 u32 *flags, 1493 u32 *flags,
1494 struct vif_params *params) 1494 struct vif_params *params)
@@ -3477,7 +3477,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
3477 ar->num_vif--; 3477 ar->num_vif--;
3478} 3478}
3479 3479
3480struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name, 3480struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3481 enum nl80211_iftype type, 3481 enum nl80211_iftype type,
3482 u8 fw_vif_idx, u8 nw_type) 3482 u8 fw_vif_idx, u8 nw_type)
3483{ 3483{
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 56b1ebe79812..780f77775a91 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,7 +25,7 @@ enum ath6kl_cfg_suspend_mode {
25 ATH6KL_CFG_SUSPEND_SCHED_SCAN, 25 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
26}; 26};
27 27
28struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name, 28struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
29 enum nl80211_iftype type, 29 enum nl80211_iftype type,
30 u8 fw_vif_idx, u8 nw_type); 30 u8 fw_vif_idx, u8 nw_type);
31void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, 31void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ff007f500feb..e09ec40ce71a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -237,7 +237,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
237 entry_cck->fir_step_level); 237 entry_cck->fir_step_level);
238 238
239 /* Skip MRC CCK for pre AR9003 families */ 239 /* Skip MRC CCK for pre AR9003 families */
240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah)) 240 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
241 return; 241 return;
242 242
243 if (aniState->mrcCCK != entry_cck->mrc_cck_on) 243 if (aniState->mrcCCK != entry_cck->mrc_cck_on)
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bbcfeb3b2a60..664844c5d3d5 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -311,6 +311,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
311 struct ath_ant_comb *antcomb, 311 struct ath_ant_comb *antcomb,
312 int alt_ratio) 312 int alt_ratio)
313{ 313{
314 ant_conf->main_gaintb = 0;
315 ant_conf->alt_gaintb = 0;
316
314 if (ant_conf->div_group == 0) { 317 if (ant_conf->div_group == 0) {
315 /* Adjust the fast_div_bias based on main and alt lna conf */ 318 /* Adjust the fast_div_bias based on main and alt lna conf */
316 switch ((ant_conf->main_lna_conf << 4) | 319 switch ((ant_conf->main_lna_conf << 4) |
@@ -360,18 +363,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
360 ant_conf->alt_lna_conf) { 363 ant_conf->alt_lna_conf) {
361 case 0x01: /* A-B LNA2 */ 364 case 0x01: /* A-B LNA2 */
362 ant_conf->fast_div_bias = 0x1; 365 ant_conf->fast_div_bias = 0x1;
363 ant_conf->main_gaintb = 0;
364 ant_conf->alt_gaintb = 0;
365 break; 366 break;
366 case 0x02: /* A-B LNA1 */ 367 case 0x02: /* A-B LNA1 */
367 ant_conf->fast_div_bias = 0x1; 368 ant_conf->fast_div_bias = 0x1;
368 ant_conf->main_gaintb = 0;
369 ant_conf->alt_gaintb = 0;
370 break; 369 break;
371 case 0x03: /* A-B A+B */ 370 case 0x03: /* A-B A+B */
372 ant_conf->fast_div_bias = 0x1; 371 ant_conf->fast_div_bias = 0x1;
373 ant_conf->main_gaintb = 0;
374 ant_conf->alt_gaintb = 0;
375 break; 372 break;
376 case 0x10: /* LNA2 A-B */ 373 case 0x10: /* LNA2 A-B */
377 if (!(antcomb->scan) && 374 if (!(antcomb->scan) &&
@@ -379,13 +376,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
379 ant_conf->fast_div_bias = 0x3f; 376 ant_conf->fast_div_bias = 0x3f;
380 else 377 else
381 ant_conf->fast_div_bias = 0x1; 378 ant_conf->fast_div_bias = 0x1;
382 ant_conf->main_gaintb = 0;
383 ant_conf->alt_gaintb = 0;
384 break; 379 break;
385 case 0x12: /* LNA2 LNA1 */ 380 case 0x12: /* LNA2 LNA1 */
386 ant_conf->fast_div_bias = 0x1; 381 ant_conf->fast_div_bias = 0x1;
387 ant_conf->main_gaintb = 0;
388 ant_conf->alt_gaintb = 0;
389 break; 382 break;
390 case 0x13: /* LNA2 A+B */ 383 case 0x13: /* LNA2 A+B */
391 if (!(antcomb->scan) && 384 if (!(antcomb->scan) &&
@@ -393,8 +386,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
393 ant_conf->fast_div_bias = 0x3f; 386 ant_conf->fast_div_bias = 0x3f;
394 else 387 else
395 ant_conf->fast_div_bias = 0x1; 388 ant_conf->fast_div_bias = 0x1;
396 ant_conf->main_gaintb = 0;
397 ant_conf->alt_gaintb = 0;
398 break; 389 break;
399 case 0x20: /* LNA1 A-B */ 390 case 0x20: /* LNA1 A-B */
400 if (!(antcomb->scan) && 391 if (!(antcomb->scan) &&
@@ -402,13 +393,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
402 ant_conf->fast_div_bias = 0x3f; 393 ant_conf->fast_div_bias = 0x3f;
403 else 394 else
404 ant_conf->fast_div_bias = 0x1; 395 ant_conf->fast_div_bias = 0x1;
405 ant_conf->main_gaintb = 0;
406 ant_conf->alt_gaintb = 0;
407 break; 396 break;
408 case 0x21: /* LNA1 LNA2 */ 397 case 0x21: /* LNA1 LNA2 */
409 ant_conf->fast_div_bias = 0x1; 398 ant_conf->fast_div_bias = 0x1;
410 ant_conf->main_gaintb = 0;
411 ant_conf->alt_gaintb = 0;
412 break; 399 break;
413 case 0x23: /* LNA1 A+B */ 400 case 0x23: /* LNA1 A+B */
414 if (!(antcomb->scan) && 401 if (!(antcomb->scan) &&
@@ -416,23 +403,15 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
416 ant_conf->fast_div_bias = 0x3f; 403 ant_conf->fast_div_bias = 0x3f;
417 else 404 else
418 ant_conf->fast_div_bias = 0x1; 405 ant_conf->fast_div_bias = 0x1;
419 ant_conf->main_gaintb = 0;
420 ant_conf->alt_gaintb = 0;
421 break; 406 break;
422 case 0x30: /* A+B A-B */ 407 case 0x30: /* A+B A-B */
423 ant_conf->fast_div_bias = 0x1; 408 ant_conf->fast_div_bias = 0x1;
424 ant_conf->main_gaintb = 0;
425 ant_conf->alt_gaintb = 0;
426 break; 409 break;
427 case 0x31: /* A+B LNA2 */ 410 case 0x31: /* A+B LNA2 */
428 ant_conf->fast_div_bias = 0x1; 411 ant_conf->fast_div_bias = 0x1;
429 ant_conf->main_gaintb = 0;
430 ant_conf->alt_gaintb = 0;
431 break; 412 break;
432 case 0x32: /* A+B LNA1 */ 413 case 0x32: /* A+B LNA1 */
433 ant_conf->fast_div_bias = 0x1; 414 ant_conf->fast_div_bias = 0x1;
434 ant_conf->main_gaintb = 0;
435 ant_conf->alt_gaintb = 0;
436 break; 415 break;
437 default: 416 default:
438 break; 417 break;
@@ -443,18 +422,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
443 ant_conf->alt_lna_conf) { 422 ant_conf->alt_lna_conf) {
444 case 0x01: /* A-B LNA2 */ 423 case 0x01: /* A-B LNA2 */
445 ant_conf->fast_div_bias = 0x1; 424 ant_conf->fast_div_bias = 0x1;
446 ant_conf->main_gaintb = 0;
447 ant_conf->alt_gaintb = 0;
448 break; 425 break;
449 case 0x02: /* A-B LNA1 */ 426 case 0x02: /* A-B LNA1 */
450 ant_conf->fast_div_bias = 0x1; 427 ant_conf->fast_div_bias = 0x1;
451 ant_conf->main_gaintb = 0;
452 ant_conf->alt_gaintb = 0;
453 break; 428 break;
454 case 0x03: /* A-B A+B */ 429 case 0x03: /* A-B A+B */
455 ant_conf->fast_div_bias = 0x1; 430 ant_conf->fast_div_bias = 0x1;
456 ant_conf->main_gaintb = 0;
457 ant_conf->alt_gaintb = 0;
458 break; 431 break;
459 case 0x10: /* LNA2 A-B */ 432 case 0x10: /* LNA2 A-B */
460 if (!(antcomb->scan) && 433 if (!(antcomb->scan) &&
@@ -462,13 +435,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
462 ant_conf->fast_div_bias = 0x1; 435 ant_conf->fast_div_bias = 0x1;
463 else 436 else
464 ant_conf->fast_div_bias = 0x2; 437 ant_conf->fast_div_bias = 0x2;
465 ant_conf->main_gaintb = 0;
466 ant_conf->alt_gaintb = 0;
467 break; 438 break;
468 case 0x12: /* LNA2 LNA1 */ 439 case 0x12: /* LNA2 LNA1 */
469 ant_conf->fast_div_bias = 0x1; 440 ant_conf->fast_div_bias = 0x1;
470 ant_conf->main_gaintb = 0;
471 ant_conf->alt_gaintb = 0;
472 break; 441 break;
473 case 0x13: /* LNA2 A+B */ 442 case 0x13: /* LNA2 A+B */
474 if (!(antcomb->scan) && 443 if (!(antcomb->scan) &&
@@ -476,8 +445,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
476 ant_conf->fast_div_bias = 0x1; 445 ant_conf->fast_div_bias = 0x1;
477 else 446 else
478 ant_conf->fast_div_bias = 0x2; 447 ant_conf->fast_div_bias = 0x2;
479 ant_conf->main_gaintb = 0;
480 ant_conf->alt_gaintb = 0;
481 break; 448 break;
482 case 0x20: /* LNA1 A-B */ 449 case 0x20: /* LNA1 A-B */
483 if (!(antcomb->scan) && 450 if (!(antcomb->scan) &&
@@ -485,13 +452,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
485 ant_conf->fast_div_bias = 0x1; 452 ant_conf->fast_div_bias = 0x1;
486 else 453 else
487 ant_conf->fast_div_bias = 0x2; 454 ant_conf->fast_div_bias = 0x2;
488 ant_conf->main_gaintb = 0;
489 ant_conf->alt_gaintb = 0;
490 break; 455 break;
491 case 0x21: /* LNA1 LNA2 */ 456 case 0x21: /* LNA1 LNA2 */
492 ant_conf->fast_div_bias = 0x1; 457 ant_conf->fast_div_bias = 0x1;
493 ant_conf->main_gaintb = 0;
494 ant_conf->alt_gaintb = 0;
495 break; 458 break;
496 case 0x23: /* LNA1 A+B */ 459 case 0x23: /* LNA1 A+B */
497 if (!(antcomb->scan) && 460 if (!(antcomb->scan) &&
@@ -499,23 +462,77 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
499 ant_conf->fast_div_bias = 0x1; 462 ant_conf->fast_div_bias = 0x1;
500 else 463 else
501 ant_conf->fast_div_bias = 0x2; 464 ant_conf->fast_div_bias = 0x2;
502 ant_conf->main_gaintb = 0;
503 ant_conf->alt_gaintb = 0;
504 break; 465 break;
505 case 0x30: /* A+B A-B */ 466 case 0x30: /* A+B A-B */
506 ant_conf->fast_div_bias = 0x1; 467 ant_conf->fast_div_bias = 0x1;
507 ant_conf->main_gaintb = 0;
508 ant_conf->alt_gaintb = 0;
509 break; 468 break;
510 case 0x31: /* A+B LNA2 */ 469 case 0x31: /* A+B LNA2 */
511 ant_conf->fast_div_bias = 0x1; 470 ant_conf->fast_div_bias = 0x1;
512 ant_conf->main_gaintb = 0;
513 ant_conf->alt_gaintb = 0;
514 break; 471 break;
515 case 0x32: /* A+B LNA1 */ 472 case 0x32: /* A+B LNA1 */
516 ant_conf->fast_div_bias = 0x1; 473 ant_conf->fast_div_bias = 0x1;
517 ant_conf->main_gaintb = 0; 474 break;
518 ant_conf->alt_gaintb = 0; 475 default:
476 break;
477 }
478 } else if (ant_conf->div_group == 3) {
479 switch ((ant_conf->main_lna_conf << 4) |
480 ant_conf->alt_lna_conf) {
481 case 0x01: /* A-B LNA2 */
482 ant_conf->fast_div_bias = 0x1;
483 break;
484 case 0x02: /* A-B LNA1 */
485 ant_conf->fast_div_bias = 0x39;
486 break;
487 case 0x03: /* A-B A+B */
488 ant_conf->fast_div_bias = 0x1;
489 break;
490 case 0x10: /* LNA2 A-B */
491 if ((antcomb->scan == 0) &&
492 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
493 ant_conf->fast_div_bias = 0x3f;
494 } else {
495 ant_conf->fast_div_bias = 0x1;
496 }
497 break;
498 case 0x12: /* LNA2 LNA1 */
499 ant_conf->fast_div_bias = 0x39;
500 break;
501 case 0x13: /* LNA2 A+B */
502 if ((antcomb->scan == 0) &&
503 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
504 ant_conf->fast_div_bias = 0x3f;
505 } else {
506 ant_conf->fast_div_bias = 0x1;
507 }
508 break;
509 case 0x20: /* LNA1 A-B */
510 if ((antcomb->scan == 0) &&
511 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
512 ant_conf->fast_div_bias = 0x3f;
513 } else {
514 ant_conf->fast_div_bias = 0x4;
515 }
516 break;
517 case 0x21: /* LNA1 LNA2 */
518 ant_conf->fast_div_bias = 0x6;
519 break;
520 case 0x23: /* LNA1 A+B */
521 if ((antcomb->scan == 0) &&
522 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
523 ant_conf->fast_div_bias = 0x3f;
524 } else {
525 ant_conf->fast_div_bias = 0x6;
526 }
527 break;
528 case 0x30: /* A+B A-B */
529 ant_conf->fast_div_bias = 0x1;
530 break;
531 case 0x31: /* A+B LNA2 */
532 ant_conf->fast_div_bias = 0x6;
533 break;
534 case 0x32: /* A+B LNA1 */
535 ant_conf->fast_div_bias = 0x1;
519 break; 536 break;
520 default: 537 default:
521 break; 538 break;
@@ -759,6 +776,7 @@ div_comb_done:
759void ath_ant_comb_update(struct ath_softc *sc) 776void ath_ant_comb_update(struct ath_softc *sc)
760{ 777{
761 struct ath_hw *ah = sc->sc_ah; 778 struct ath_hw *ah = sc->sc_ah;
779 struct ath_common *common = ath9k_hw_common(ah);
762 struct ath_hw_antcomb_conf div_ant_conf; 780 struct ath_hw_antcomb_conf div_ant_conf;
763 u8 lna_conf; 781 u8 lna_conf;
764 782
@@ -773,4 +791,7 @@ void ath_ant_comb_update(struct ath_softc *sc)
773 div_ant_conf.alt_lna_conf = lna_conf; 791 div_ant_conf.alt_lna_conf = lna_conf;
774 792
775 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf); 793 ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
794
795 if (common->antenna_diversity)
796 ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
776} 797}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d066f2516e47..5bbe5057ba18 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -138,7 +138,8 @@ static const struct ar9300_eeprom ar9300_default = {
138 }, 138 },
139 .base_ext1 = { 139 .base_ext1 = {
140 .ant_div_control = 0, 140 .ant_div_control = 0,
141 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 141 .future = {0, 0, 0},
142 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
142 }, 143 },
143 .calFreqPier2G = { 144 .calFreqPier2G = {
144 FREQ2FBIN(2412, 1), 145 FREQ2FBIN(2412, 1),
@@ -713,7 +714,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
713 }, 714 },
714 .base_ext1 = { 715 .base_ext1 = {
715 .ant_div_control = 0, 716 .ant_div_control = 0,
716 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 717 .future = {0, 0, 0},
718 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
717 }, 719 },
718 .calFreqPier2G = { 720 .calFreqPier2G = {
719 FREQ2FBIN(2412, 1), 721 FREQ2FBIN(2412, 1),
@@ -1289,7 +1291,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
1289 }, 1291 },
1290 .base_ext1 = { 1292 .base_ext1 = {
1291 .ant_div_control = 0, 1293 .ant_div_control = 0,
1292 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 1294 .future = {0, 0, 0},
1295 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
1293 }, 1296 },
1294 .calFreqPier2G = { 1297 .calFreqPier2G = {
1295 FREQ2FBIN(2412, 1), 1298 FREQ2FBIN(2412, 1),
@@ -1865,7 +1868,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
1865 }, 1868 },
1866 .base_ext1 = { 1869 .base_ext1 = {
1867 .ant_div_control = 0, 1870 .ant_div_control = 0,
1868 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 1871 .future = {0, 0, 0},
1872 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
1869 }, 1873 },
1870 .calFreqPier2G = { 1874 .calFreqPier2G = {
1871 FREQ2FBIN(2412, 1), 1875 FREQ2FBIN(2412, 1),
@@ -2440,7 +2444,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
2440 }, 2444 },
2441 .base_ext1 = { 2445 .base_ext1 = {
2442 .ant_div_control = 0, 2446 .ant_div_control = 0,
2443 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} 2447 .future = {0, 0, 0},
2448 .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
2444 }, 2449 },
2445 .calFreqPier2G = { 2450 .calFreqPier2G = {
2446 FREQ2FBIN(2412, 1), 2451 FREQ2FBIN(2412, 1),
@@ -3524,7 +3529,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3524 3529
3525 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) 3530 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
3526 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); 3531 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3527 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah)) 3532 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
3528 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3533 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3529 else { 3534 else {
3530 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3535 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3561,9 +3566,9 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
3561 3566
3562static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) 3567static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3563{ 3568{
3569 struct ath9k_hw_capabilities *pCap = &ah->caps;
3564 int chain; 3570 int chain;
3565 u32 regval; 3571 u32 regval;
3566 u32 ant_div_ctl1;
3567 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { 3572 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
3568 AR_PHY_SWITCH_CHAIN_0, 3573 AR_PHY_SWITCH_CHAIN_0,
3569 AR_PHY_SWITCH_CHAIN_1, 3574 AR_PHY_SWITCH_CHAIN_1,
@@ -3572,7 +3577,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3572 3577
3573 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); 3578 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
3574 3579
3575 if (AR_SREV_9462(ah)) { 3580 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3576 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3581 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3577 AR_SWITCH_TABLE_COM_AR9462_ALL, value); 3582 AR_SWITCH_TABLE_COM_AR9462_ALL, value);
3578 } else if (AR_SREV_9550(ah)) { 3583 } else if (AR_SREV_9550(ah)) {
@@ -3616,7 +3621,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3616 } 3621 }
3617 } 3622 }
3618 3623
3619 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { 3624 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
3620 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); 3625 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
3621 /* 3626 /*
3622 * main_lnaconf, alt_lnaconf, main_tb, alt_tb 3627 * main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3626,41 +3631,44 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3626 regval &= (~AR_ANT_DIV_CTRL_ALL); 3631 regval &= (~AR_ANT_DIV_CTRL_ALL);
3627 regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; 3632 regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
3628 /* enable_lnadiv */ 3633 /* enable_lnadiv */
3629 regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); 3634 regval &= (~AR_PHY_ANT_DIV_LNADIV);
3630 regval |= ((value >> 6) & 0x1) << 3635 regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
3631 AR_PHY_9485_ANT_DIV_LNADIV_S; 3636
3637 if (AR_SREV_9565(ah)) {
3638 if (ah->shared_chain_lnadiv) {
3639 regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
3640 } else {
3641 regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
3642 regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
3643 }
3644 }
3645
3632 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3646 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3633 3647
3634 /*enable fast_div */ 3648 /*enable fast_div */
3635 regval = REG_READ(ah, AR_PHY_CCK_DETECT); 3649 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
3636 regval &= (~AR_FAST_DIV_ENABLE); 3650 regval &= (~AR_FAST_DIV_ENABLE);
3637 regval |= ((value >> 7) & 0x1) << 3651 regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
3638 AR_FAST_DIV_ENABLE_S;
3639 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); 3652 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
3640 ant_div_ctl1 = 3653
3641 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 3654 if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
3642 /* check whether antenna diversity is enabled */
3643 if ((ant_div_ctl1 >> 0x6) == 0x3) {
3644 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 3655 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
3645 /* 3656 /*
3646 * clear bits 25-30 main_lnaconf, alt_lnaconf, 3657 * clear bits 25-30 main_lnaconf, alt_lnaconf,
3647 * main_tb, alt_tb 3658 * main_tb, alt_tb
3648 */ 3659 */
3649 regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | 3660 regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
3650 AR_PHY_9485_ANT_DIV_ALT_LNACONF | 3661 AR_PHY_ANT_DIV_ALT_LNACONF |
3651 AR_PHY_9485_ANT_DIV_ALT_GAINTB | 3662 AR_PHY_ANT_DIV_ALT_GAINTB |
3652 AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); 3663 AR_PHY_ANT_DIV_MAIN_GAINTB));
3653 /* by default use LNA1 for the main antenna */ 3664 /* by default use LNA1 for the main antenna */
3654 regval |= (AR_PHY_9485_ANT_DIV_LNA1 << 3665 regval |= (AR_PHY_ANT_DIV_LNA1 <<
3655 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); 3666 AR_PHY_ANT_DIV_MAIN_LNACONF_S);
3656 regval |= (AR_PHY_9485_ANT_DIV_LNA2 << 3667 regval |= (AR_PHY_ANT_DIV_LNA2 <<
3657 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); 3668 AR_PHY_ANT_DIV_ALT_LNACONF_S);
3658 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 3669 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3659 } 3670 }
3660
3661
3662 } 3671 }
3663
3664} 3672}
3665 3673
3666static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) 3674static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3847,7 +3855,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3847 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); 3855 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
3848 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) 3856 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
3849 return; 3857 return;
3850 } else if (AR_SREV_9462(ah)) { 3858 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3851 reg_val = le32_to_cpu(pBase->swreg); 3859 reg_val = le32_to_cpu(pBase->swreg);
3852 REG_WRITE(ah, AR_PHY_PMU1, reg_val); 3860 REG_WRITE(ah, AR_PHY_PMU1, reg_val);
3853 } else { 3861 } else {
@@ -3878,7 +3886,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3878 while (!REG_READ_FIELD(ah, AR_PHY_PMU2, 3886 while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
3879 AR_PHY_PMU2_PGM)) 3887 AR_PHY_PMU2_PGM))
3880 udelay(10); 3888 udelay(10);
3881 } else if (AR_SREV_9462(ah)) 3889 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
3882 REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); 3890 REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
3883 else { 3891 else {
3884 reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | 3892 reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
@@ -3981,6 +3989,62 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
3981 bias & 0x3); 3989 bias & 0x3);
3982} 3990}
3983 3991
3992static int ar9003_hw_get_thermometer(struct ath_hw *ah)
3993{
3994 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3995 struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
3996 int thermometer = (pBase->miscConfiguration >> 1) & 0x3;
3997
3998 return --thermometer;
3999}
4000
4001static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
4002{
4003 int thermometer = ar9003_hw_get_thermometer(ah);
4004 u8 therm_on = (thermometer < 0) ? 0 : 1;
4005
4006 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
4007 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4008 if (ah->caps.tx_chainmask & BIT(1))
4009 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
4010 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4011 if (ah->caps.tx_chainmask & BIT(2))
4012 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4013 AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
4014
4015 therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
4016 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
4017 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4018 if (ah->caps.tx_chainmask & BIT(1)) {
4019 therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
4020 REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
4021 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4022 }
4023 if (ah->caps.tx_chainmask & BIT(2)) {
4024 therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
4025 REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
4026 AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
4027 }
4028}
4029
4030static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
4031{
4032 u32 data, ko, kg;
4033
4034 if (!AR_SREV_9462_20(ah))
4035 return;
4036 ar9300_otp_read_word(ah, 1, &data);
4037 ko = data & 0xff;
4038 kg = (data >> 8) & 0xff;
4039 if (ko || kg) {
4040 REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
4041 AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
4042 REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
4043 AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
4044 kg + 256);
4045 }
4046}
4047
3984static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, 4048static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3985 struct ath9k_channel *chan) 4049 struct ath9k_channel *chan)
3986{ 4050{
@@ -3996,6 +4060,8 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3996 ar9003_hw_internal_regulator_apply(ah); 4060 ar9003_hw_internal_regulator_apply(ah);
3997 ar9003_hw_apply_tuning_caps(ah); 4061 ar9003_hw_apply_tuning_caps(ah);
3998 ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); 4062 ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
4063 ar9003_hw_thermometer_apply(ah);
4064 ar9003_hw_thermo_cal_apply(ah);
3999} 4065}
4000 4066
4001static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, 4067static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4532,7 +4598,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
4532{ 4598{
4533 int tempSlope = 0; 4599 int tempSlope = 0;
4534 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4600 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4535 int f[3], t[3]; 4601 int f[8], t[8], i;
4536 4602
4537 REG_RMW(ah, AR_PHY_TPC_11_B0, 4603 REG_RMW(ah, AR_PHY_TPC_11_B0,
4538 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4604 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4565,7 +4631,14 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
4565 */ 4631 */
4566 if (frequency < 4000) 4632 if (frequency < 4000)
4567 tempSlope = eep->modalHeader2G.tempSlope; 4633 tempSlope = eep->modalHeader2G.tempSlope;
4568 else if (eep->base_ext2.tempSlopeLow != 0) { 4634 else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
4635 for (i = 0; i < 8; i++) {
4636 t[i] = eep->base_ext1.tempslopextension[i];
4637 f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
4638 }
4639 tempSlope = ar9003_hw_power_interpolate((s32) frequency,
4640 f, t, 8);
4641 } else if (eep->base_ext2.tempSlopeLow != 0) {
4569 t[0] = eep->base_ext2.tempSlopeLow; 4642 t[0] = eep->base_ext2.tempSlopeLow;
4570 f[0] = 5180; 4643 f[0] = 5180;
4571 t[1] = eep->modalHeader5G.tempSlope; 4644 t[1] = eep->modalHeader5G.tempSlope;
@@ -4905,90 +4978,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4905 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], 4978 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
4906 chan->channel); 4979 chan->channel);
4907 4980
4908 /* 4981 /*
4909 * compare test group from regulatory 4982 * compare test group from regulatory
4910 * channel list with test mode from pCtlMode 4983 * channel list with test mode from pCtlMode
4911 * list 4984 * list
4912 */ 4985 */
4913 if ((((cfgCtl & ~CTL_MODE_M) | 4986 if ((((cfgCtl & ~CTL_MODE_M) |
4914 (pCtlMode[ctlMode] & CTL_MODE_M)) == 4987 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4915 ctlIndex[i]) || 4988 ctlIndex[i]) ||
4916 (((cfgCtl & ~CTL_MODE_M) | 4989 (((cfgCtl & ~CTL_MODE_M) |
4917 (pCtlMode[ctlMode] & CTL_MODE_M)) == 4990 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4918 ((ctlIndex[i] & CTL_MODE_M) | 4991 ((ctlIndex[i] & CTL_MODE_M) |
4919 SD_NO_CTL))) { 4992 SD_NO_CTL))) {
4920 twiceMinEdgePower = 4993 twiceMinEdgePower =
4921 ar9003_hw_get_max_edge_power(pEepData, 4994 ar9003_hw_get_max_edge_power(pEepData,
4922 freq, i, 4995 freq, i,
4923 is2ghz); 4996 is2ghz);
4924 4997
4925 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) 4998 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
4926 /* 4999 /*
4927 * Find the minimum of all CTL 5000 * Find the minimum of all CTL
4928 * edge powers that apply to 5001 * edge powers that apply to
4929 * this channel 5002 * this channel
4930 */ 5003 */
4931 twiceMaxEdgePower = 5004 twiceMaxEdgePower =
4932 min(twiceMaxEdgePower, 5005 min(twiceMaxEdgePower,
4933 twiceMinEdgePower); 5006 twiceMinEdgePower);
4934 else { 5007 else {
4935 /* specific */ 5008 /* specific */
4936 twiceMaxEdgePower = 5009 twiceMaxEdgePower = twiceMinEdgePower;
4937 twiceMinEdgePower; 5010 break;
4938 break;
4939 }
4940 } 5011 }
4941 } 5012 }
5013 }
4942 5014
4943 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 5015 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
4944 5016
4945 ath_dbg(common, REGULATORY, 5017 ath_dbg(common, REGULATORY,
4946 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", 5018 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
4947 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, 5019 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4948 scaledPower, minCtlPower); 5020 scaledPower, minCtlPower);
4949 5021
4950 /* Apply ctl mode to correct target power set */ 5022 /* Apply ctl mode to correct target power set */
4951 switch (pCtlMode[ctlMode]) { 5023 switch (pCtlMode[ctlMode]) {
4952 case CTL_11B: 5024 case CTL_11B:
4953 for (i = ALL_TARGET_LEGACY_1L_5L; 5025 for (i = ALL_TARGET_LEGACY_1L_5L;
4954 i <= ALL_TARGET_LEGACY_11S; i++) 5026 i <= ALL_TARGET_LEGACY_11S; i++)
4955 pPwrArray[i] = 5027 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4956 (u8)min((u16)pPwrArray[i], 5028 minCtlPower);
4957 minCtlPower); 5029 break;
4958 break; 5030 case CTL_11A:
4959 case CTL_11A: 5031 case CTL_11G:
4960 case CTL_11G: 5032 for (i = ALL_TARGET_LEGACY_6_24;
4961 for (i = ALL_TARGET_LEGACY_6_24; 5033 i <= ALL_TARGET_LEGACY_54; i++)
4962 i <= ALL_TARGET_LEGACY_54; i++) 5034 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4963 pPwrArray[i] = 5035 minCtlPower);
4964 (u8)min((u16)pPwrArray[i], 5036 break;
4965 minCtlPower); 5037 case CTL_5GHT20:
4966 break; 5038 case CTL_2GHT20:
4967 case CTL_5GHT20: 5039 for (i = ALL_TARGET_HT20_0_8_16;
4968 case CTL_2GHT20: 5040 i <= ALL_TARGET_HT20_23; i++)
4969 for (i = ALL_TARGET_HT20_0_8_16; 5041 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4970 i <= ALL_TARGET_HT20_21; i++) 5042 minCtlPower);
4971 pPwrArray[i] = 5043 break;
4972 (u8)min((u16)pPwrArray[i], 5044 case CTL_5GHT40:
4973 minCtlPower); 5045 case CTL_2GHT40:
4974 pPwrArray[ALL_TARGET_HT20_22] = 5046 for (i = ALL_TARGET_HT40_0_8_16;
4975 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], 5047 i <= ALL_TARGET_HT40_23; i++)
4976 minCtlPower); 5048 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
4977 pPwrArray[ALL_TARGET_HT20_23] = 5049 minCtlPower);
4978 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], 5050 break;
4979 minCtlPower); 5051 default:
4980 break; 5052 break;
4981 case CTL_5GHT40: 5053 }
4982 case CTL_2GHT40:
4983 for (i = ALL_TARGET_HT40_0_8_16;
4984 i <= ALL_TARGET_HT40_23; i++)
4985 pPwrArray[i] =
4986 (u8)min((u16)pPwrArray[i],
4987 minCtlPower);
4988 break;
4989 default:
4990 break;
4991 }
4992 } /* end ctl mode checking */ 5054 } /* end ctl mode checking */
4993} 5055}
4994 5056
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3a1ff55bceb9..41b1a75e6bec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -267,7 +267,8 @@ struct cal_ctl_data_5g {
267 267
268struct ar9300_BaseExtension_1 { 268struct ar9300_BaseExtension_1 {
269 u8 ant_div_control; 269 u8 ant_div_control;
270 u8 future[11]; 270 u8 future[3];
271 u8 tempslopextension[8];
271 int8_t quick_drop_low; 272 int8_t quick_drop_low;
272 int8_t quick_drop_high; 273 int8_t quick_drop_high;
273} __packed; 274} __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1e8a4da5952f..1a36fa262639 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -24,6 +24,7 @@
24#include "ar955x_1p0_initvals.h" 24#include "ar955x_1p0_initvals.h"
25#include "ar9580_1p0_initvals.h" 25#include "ar9580_1p0_initvals.h"
26#include "ar9462_2p0_initvals.h" 26#include "ar9462_2p0_initvals.h"
27#include "ar9565_1p0_initvals.h"
27 28
28/* General hardware code for the AR9003 hadware family */ 29/* General hardware code for the AR9003 hadware family */
29 30
@@ -34,14 +35,12 @@
34 */ 35 */
35static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 36static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
36{ 37{
37#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
38 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
39
40#define AR9462_BB_CTX_COEFJ(x) \ 38#define AR9462_BB_CTX_COEFJ(x) \
41 ar9462_##x##_baseband_core_txfir_coeff_japan_2484 39 ar9462_##x##_baseband_core_txfir_coeff_japan_2484
42 40
43#define AR9462_BBC_TXIFR_COEFFJ \ 41#define AR9462_BBC_TXIFR_COEFFJ \
44 ar9462_2p0_baseband_core_txfir_coeff_japan_2484 42 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
43
45 if (AR_SREV_9330_11(ah)) { 44 if (AR_SREV_9330_11(ah)) {
46 /* mac */ 45 /* mac */
47 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
220 219
221 /* Awake -> Sleep Setting */ 220 /* Awake -> Sleep Setting */
222 INIT_INI_ARRAY(&ah->iniPcieSerdes, 221 INIT_INI_ARRAY(&ah->iniPcieSerdes,
223 PCIE_PLL_ON_CREQ_DIS_L1_2P0); 222 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
224 /* Sleep -> Awake Setting */ 223 /* Sleep -> Awake Setting */
225 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 224 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
226 PCIE_PLL_ON_CREQ_DIS_L1_2P0); 225 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
227 226
228 /* Fast clock modal settings */ 227 /* Fast clock modal settings */
229 INIT_INI_ARRAY(&ah->iniModesFastClock, 228 INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -302,6 +301,39 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
302 301
303 INIT_INI_ARRAY(&ah->iniModesFastClock, 302 INIT_INI_ARRAY(&ah->iniModesFastClock,
304 ar9580_1p0_modes_fast_clock); 303 ar9580_1p0_modes_fast_clock);
304 } else if (AR_SREV_9565(ah)) {
305 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
306 ar9565_1p0_mac_core);
307 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
308 ar9565_1p0_mac_postamble);
309
310 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
311 ar9565_1p0_baseband_core);
312 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
313 ar9565_1p0_baseband_postamble);
314
315 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
316 ar9565_1p0_radio_core);
317 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
318 ar9565_1p0_radio_postamble);
319
320 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
321 ar9565_1p0_soc_preamble);
322 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
323 ar9565_1p0_soc_postamble);
324
325 INIT_INI_ARRAY(&ah->iniModesRxGain,
326 ar9565_1p0_Common_rx_gain_table);
327 INIT_INI_ARRAY(&ah->iniModesTxGain,
328 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
329
330 INIT_INI_ARRAY(&ah->iniPcieSerdes,
331 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
332 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
333 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
334
335 INIT_INI_ARRAY(&ah->iniModesFastClock,
336 ar9565_1p0_modes_fast_clock);
305 } else { 337 } else {
306 /* mac */ 338 /* mac */
307 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 339 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -374,6 +406,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
374 else if (AR_SREV_9462_20(ah)) 406 else if (AR_SREV_9462_20(ah))
375 INIT_INI_ARRAY(&ah->iniModesTxGain, 407 INIT_INI_ARRAY(&ah->iniModesTxGain,
376 ar9462_modes_low_ob_db_tx_gain_table_2p0); 408 ar9462_modes_low_ob_db_tx_gain_table_2p0);
409 else if (AR_SREV_9565(ah))
410 INIT_INI_ARRAY(&ah->iniModesTxGain,
411 ar9565_1p0_modes_low_ob_db_tx_gain_table);
377 else 412 else
378 INIT_INI_ARRAY(&ah->iniModesTxGain, 413 INIT_INI_ARRAY(&ah->iniModesTxGain,
379 ar9300Modes_lowest_ob_db_tx_gain_table_2p2); 414 ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
@@ -402,6 +437,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
402 else if (AR_SREV_9462_20(ah)) 437 else if (AR_SREV_9462_20(ah))
403 INIT_INI_ARRAY(&ah->iniModesTxGain, 438 INIT_INI_ARRAY(&ah->iniModesTxGain,
404 ar9462_modes_high_ob_db_tx_gain_table_2p0); 439 ar9462_modes_high_ob_db_tx_gain_table_2p0);
440 else if (AR_SREV_9565(ah))
441 INIT_INI_ARRAY(&ah->iniModesTxGain,
442 ar9565_1p0_modes_high_ob_db_tx_gain_table);
405 else 443 else
406 INIT_INI_ARRAY(&ah->iniModesTxGain, 444 INIT_INI_ARRAY(&ah->iniModesTxGain,
407 ar9300Modes_high_ob_db_tx_gain_table_2p2); 445 ar9300Modes_high_ob_db_tx_gain_table_2p2);
@@ -424,6 +462,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
424 else if (AR_SREV_9580(ah)) 462 else if (AR_SREV_9580(ah))
425 INIT_INI_ARRAY(&ah->iniModesTxGain, 463 INIT_INI_ARRAY(&ah->iniModesTxGain,
426 ar9580_1p0_low_ob_db_tx_gain_table); 464 ar9580_1p0_low_ob_db_tx_gain_table);
465 else if (AR_SREV_9565(ah))
466 INIT_INI_ARRAY(&ah->iniModesTxGain,
467 ar9565_1p0_modes_low_ob_db_tx_gain_table);
427 else 468 else
428 INIT_INI_ARRAY(&ah->iniModesTxGain, 469 INIT_INI_ARRAY(&ah->iniModesTxGain,
429 ar9300Modes_low_ob_db_tx_gain_table_2p2); 470 ar9300Modes_low_ob_db_tx_gain_table_2p2);
@@ -446,6 +487,9 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
446 else if (AR_SREV_9580(ah)) 487 else if (AR_SREV_9580(ah))
447 INIT_INI_ARRAY(&ah->iniModesTxGain, 488 INIT_INI_ARRAY(&ah->iniModesTxGain,
448 ar9580_1p0_high_power_tx_gain_table); 489 ar9580_1p0_high_power_tx_gain_table);
490 else if (AR_SREV_9565(ah))
491 INIT_INI_ARRAY(&ah->iniModesTxGain,
492 ar9565_1p0_modes_high_power_tx_gain_table);
449 else 493 else
450 INIT_INI_ARRAY(&ah->iniModesTxGain, 494 INIT_INI_ARRAY(&ah->iniModesTxGain,
451 ar9300Modes_high_power_tx_gain_table_2p2); 495 ar9300Modes_high_power_tx_gain_table_2p2);
@@ -538,6 +582,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
538 } else if (AR_SREV_9580(ah)) 582 } else if (AR_SREV_9580(ah))
539 INIT_INI_ARRAY(&ah->iniModesRxGain, 583 INIT_INI_ARRAY(&ah->iniModesRxGain,
540 ar9580_1p0_wo_xlna_rx_gain_table); 584 ar9580_1p0_wo_xlna_rx_gain_table);
585 else if (AR_SREV_9565(ah))
586 INIT_INI_ARRAY(&ah->iniModesRxGain,
587 ar9565_1p0_common_wo_xlna_rx_gain_table);
541 else 588 else
542 INIT_INI_ARRAY(&ah->iniModesRxGain, 589 INIT_INI_ARRAY(&ah->iniModesRxGain,
543 ar9300Common_wo_xlna_rx_gain_table_2p2); 590 ar9300Common_wo_xlna_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 78816b8b2173..301bf72c53bf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -31,7 +31,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
31 u32 val, ctl12, ctl17; 31 u32 val, ctl12, ctl17;
32 u8 desc_len; 32 u8 desc_len;
33 33
34 desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17); 34 desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
35 35
36 val = (ATHEROS_VENDOR_ID << AR_DescId_S) | 36 val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
37 (1 << AR_TxRxDesc_S) | 37 (1 << AR_TxRxDesc_S) |
@@ -182,6 +182,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
182 struct ath9k_hw_capabilities *pCap = &ah->caps; 182 struct ath9k_hw_capabilities *pCap = &ah->caps;
183 struct ath_common *common = ath9k_hw_common(ah); 183 struct ath_common *common = ath9k_hw_common(ah);
184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ; 184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
185 bool fatal_int;
185 186
186 if (ath9k_hw_mci_is_enabled(ah)) 187 if (ath9k_hw_mci_is_enabled(ah))
187 async_mask |= AR_INTR_ASYNC_MASK_MCI; 188 async_mask |= AR_INTR_ASYNC_MASK_MCI;
@@ -310,6 +311,22 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
310 311
311 if (sync_cause) { 312 if (sync_cause) {
312 ath9k_debug_sync_cause(common, sync_cause); 313 ath9k_debug_sync_cause(common, sync_cause);
314 fatal_int =
315 (sync_cause &
316 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
317 ? true : false;
318
319 if (fatal_int) {
320 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
321 ath_dbg(common, ANY,
322 "received PCI FATAL interrupt\n");
323 }
324 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
325 ath_dbg(common, ANY,
326 "received PCI PERR interrupt\n");
327 }
328 *masked |= ATH9K_INT_FATAL;
329 }
313 330
314 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 331 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
315 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 332 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -531,7 +548,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
531 rxs->rs_status |= ATH9K_RXERR_PHY; 548 rxs->rs_status |= ATH9K_RXERR_PHY;
532 rxs->rs_phyerr = phyerr; 549 rxs->rs_phyerr = phyerr;
533 } 550 }
534 }; 551 }
535 } 552 }
536 553
537 if (rxsp->status11 & AR_KeyMiss) 554 if (rxsp->status11 & AR_KeyMiss)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 9a34fcaae3ff..44c202ce6c66 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,6 +714,7 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
714 714
715 return true; 715 return true;
716} 716}
717EXPORT_SYMBOL(ar9003_mci_start_reset);
717 718
718int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 719int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
719 struct ath9k_hw_cal_data *caldata) 720 struct ath9k_hw_cal_data *caldata)
@@ -812,8 +813,8 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
812 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1); 813 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
813} 814}
814 815
815void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 816int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
816 bool is_full_sleep) 817 bool is_full_sleep)
817{ 818{
818 struct ath_common *common = ath9k_hw_common(ah); 819 struct ath_common *common = ath9k_hw_common(ah);
819 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 820 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -823,14 +824,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
823 is_full_sleep, is_2g); 824 is_full_sleep, is_2g);
824 825
825 if (!mci->gpm_addr && !mci->sched_addr) { 826 if (!mci->gpm_addr && !mci->sched_addr) {
826 ath_dbg(common, MCI, 827 ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
827 "MCI GPM and schedule buffers are not allocated\n"); 828 return -ENOMEM;
828 return;
829 } 829 }
830 830
831 if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { 831 if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
832 ath_dbg(common, MCI, "BTCOEX control register is dead\n"); 832 ath_err(common, "BTCOEX control register is dead\n");
833 return; 833 return -EINVAL;
834 } 834 }
835 835
836 /* Program MCI DMA related registers */ 836 /* Program MCI DMA related registers */
@@ -912,6 +912,8 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
912 912
913 if (en_int) 913 if (en_int)
914 ar9003_mci_enable_interrupt(ah); 914 ar9003_mci_enable_interrupt(ah);
915
916 return 0;
915} 917}
916 918
917void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) 919void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
@@ -1026,6 +1028,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1026 1028
1027 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) 1029 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1028 ar9003_mci_osla_setup(ah, true); 1030 ar9003_mci_osla_setup(ah, true);
1031 REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
1029 } else { 1032 } else {
1030 ar9003_mci_send_lna_take(ah, true); 1033 ar9003_mci_send_lna_take(ah, true);
1031 udelay(5); 1034 udelay(5);
@@ -1142,8 +1145,8 @@ void ar9003_mci_init_cal_done(struct ath_hw *ah)
1142 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); 1145 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
1143} 1146}
1144 1147
1145void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 1148int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
1146 u16 len, u32 sched_addr) 1149 u16 len, u32 sched_addr)
1147{ 1150{
1148 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1151 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1149 1152
@@ -1152,7 +1155,7 @@ void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
1152 mci->gpm_len = len; 1155 mci->gpm_len = len;
1153 mci->sched_addr = sched_addr; 1156 mci->sched_addr = sched_addr;
1154 1157
1155 ar9003_mci_reset(ah, true, true, true); 1158 return ar9003_mci_reset(ah, true, true, true);
1156} 1159}
1157EXPORT_SYMBOL(ar9003_mci_setup); 1160EXPORT_SYMBOL(ar9003_mci_setup);
1158 1161
@@ -1201,12 +1204,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1201 1204
1202 ar9003_mci_2g5g_switch(ah, false); 1205 ar9003_mci_2g5g_switch(ah, false);
1203 break; 1206 break;
1204 case MCI_STATE_SET_BT_CAL_START:
1205 mci->bt_state = MCI_BT_CAL_START;
1206 break;
1207 case MCI_STATE_SET_BT_CAL:
1208 mci->bt_state = MCI_BT_CAL;
1209 break;
1210 case MCI_STATE_RESET_REQ_WAKE: 1207 case MCI_STATE_RESET_REQ_WAKE:
1211 ar9003_mci_reset_req_wakeup(ah); 1208 ar9003_mci_reset_req_wakeup(ah);
1212 mci->update_2g5g = true; 1209 mci->update_2g5g = true;
@@ -1240,6 +1237,10 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1240 case MCI_STATE_NEED_FTP_STOMP: 1237 case MCI_STATE_NEED_FTP_STOMP:
1241 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); 1238 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
1242 break; 1239 break;
1240 case MCI_STATE_NEED_FLUSH_BT_INFO:
1241 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
1242 mci->need_flush_btinfo = false;
1243 break;
1243 default: 1244 default:
1244 break; 1245 break;
1245 } 1246 }
@@ -1289,7 +1290,7 @@ void ar9003_mci_set_power_awake(struct ath_hw *ah)
1289 } 1290 }
1290 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18))); 1291 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
1291 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3; 1292 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
1292 bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP; 1293 bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
1293 1294
1294 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2); 1295 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
1295 REG_WRITE(ah, AR_DIAG_SW, diag_sw); 1296 REG_WRITE(ah, AR_DIAG_SW, diag_sw);
@@ -1327,6 +1328,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1327 1328
1328 if (first) { 1329 if (first) {
1329 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); 1330 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1331
1332 if (gpm_ptr >= mci->gpm_len)
1333 gpm_ptr = 0;
1334
1330 mci->gpm_idx = gpm_ptr; 1335 mci->gpm_idx = gpm_ptr;
1331 return gpm_ptr; 1336 return gpm_ptr;
1332 } 1337 }
@@ -1371,6 +1376,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1371 more_gpm = MCI_GPM_NOMORE; 1376 more_gpm = MCI_GPM_NOMORE;
1372 1377
1373 temp_index = mci->gpm_idx; 1378 temp_index = mci->gpm_idx;
1379
1380 if (temp_index >= mci->gpm_len)
1381 temp_index = 0;
1382
1374 mci->gpm_idx++; 1383 mci->gpm_idx++;
1375 1384
1376 if (mci->gpm_idx >= mci->gpm_len) 1385 if (mci->gpm_idx >= mci->gpm_len)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index d33b8e128855..2a2d01889613 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -190,8 +190,6 @@ enum mci_bt_state {
190enum mci_state_type { 190enum mci_state_type {
191 MCI_STATE_ENABLE, 191 MCI_STATE_ENABLE,
192 MCI_STATE_SET_BT_AWAKE, 192 MCI_STATE_SET_BT_AWAKE,
193 MCI_STATE_SET_BT_CAL_START,
194 MCI_STATE_SET_BT_CAL,
195 MCI_STATE_LAST_SCHD_MSG_OFFSET, 193 MCI_STATE_LAST_SCHD_MSG_OFFSET,
196 MCI_STATE_REMOTE_SLEEP, 194 MCI_STATE_REMOTE_SLEEP,
197 MCI_STATE_RESET_REQ_WAKE, 195 MCI_STATE_RESET_REQ_WAKE,
@@ -202,6 +200,7 @@ enum mci_state_type {
202 MCI_STATE_RECOVER_RX, 200 MCI_STATE_RECOVER_RX,
203 MCI_STATE_NEED_FTP_STOMP, 201 MCI_STATE_NEED_FTP_STOMP,
204 MCI_STATE_DEBUG, 202 MCI_STATE_DEBUG,
203 MCI_STATE_NEED_FLUSH_BT_INFO,
205 MCI_STATE_MAX 204 MCI_STATE_MAX
206}; 205};
207 206
@@ -213,7 +212,8 @@ enum mci_gpm_coex_opcode {
213 MCI_GPM_COEX_WLAN_CHANNELS, 212 MCI_GPM_COEX_WLAN_CHANNELS,
214 MCI_GPM_COEX_BT_PROFILE_INFO, 213 MCI_GPM_COEX_BT_PROFILE_INFO,
215 MCI_GPM_COEX_BT_STATUS_UPDATE, 214 MCI_GPM_COEX_BT_STATUS_UPDATE,
216 MCI_GPM_COEX_BT_UPDATE_FLAGS 215 MCI_GPM_COEX_BT_UPDATE_FLAGS,
216 MCI_GPM_COEX_NOOP,
217}; 217};
218 218
219#define MCI_GPM_NOMORE 0 219#define MCI_GPM_NOMORE 0
@@ -249,8 +249,8 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
249 u32 *payload, u8 len, bool wait_done, 249 u32 *payload, u8 len, bool wait_done,
250 bool check_bt); 250 bool check_bt);
251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type); 251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
252void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 252int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
253 u16 len, u32 sched_addr); 253 u16 len, u32 sched_addr);
254void ar9003_mci_cleanup(struct ath_hw *ah); 254void ar9003_mci_cleanup(struct ath_hw *ah);
255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, 255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
256 u32 *rx_msg_intr); 256 u32 *rx_msg_intr);
@@ -272,8 +272,8 @@ void ar9003_mci_check_bt(struct ath_hw *ah);
272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); 272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
274 struct ath9k_hw_cal_data *caldata); 274 struct ath9k_hw_cal_data *caldata);
275void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 275int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
276 bool is_full_sleep); 276 bool is_full_sleep);
277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); 277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); 278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah); 279void ar9003_mci_set_power_awake(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e476f9f92ce3..759f5f5a7154 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -88,7 +88,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
88 channelSel = (freq * 4) / div; 88 channelSel = (freq * 4) / div;
89 chan_frac = (((freq * 4) % div) * 0x20000) / div; 89 chan_frac = (((freq * 4) % div) * 0x20000) / div;
90 channelSel = (channelSel << 17) | chan_frac; 90 channelSel = (channelSel << 17) | chan_frac;
91 } else if (AR_SREV_9485(ah)) { 91 } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
92 u32 chan_frac; 92 u32 chan_frac;
93 93
94 /* 94 /*
@@ -206,6 +206,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
206 for (i = 0; i < max_spur_cnts; i++) { 206 for (i = 0; i < max_spur_cnts; i++) {
207 if (AR_SREV_9462(ah) && (i == 0 || i == 3)) 207 if (AR_SREV_9462(ah) && (i == 0 || i == 3))
208 continue; 208 continue;
209
209 negative = 0; 210 negative = 0;
210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 211 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
211 AR_SREV_9550(ah)) 212 AR_SREV_9550(ah))
@@ -301,7 +302,9 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
301 int freq_offset, 302 int freq_offset,
302 int spur_freq_sd, 303 int spur_freq_sd,
303 int spur_delta_phase, 304 int spur_delta_phase,
304 int spur_subchannel_sd) 305 int spur_subchannel_sd,
306 int range,
307 int synth_freq)
305{ 308{
306 int mask_index = 0; 309 int mask_index = 0;
307 310
@@ -316,8 +319,11 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
316 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd); 319 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
317 REG_RMW_FIELD(ah, AR_PHY_TIMING11, 320 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
318 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1); 321 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
319 REG_RMW_FIELD(ah, AR_PHY_TIMING11, 322
320 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1); 323 if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
324 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
325 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
326
321 REG_RMW_FIELD(ah, AR_PHY_TIMING4, 327 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
322 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1); 328 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
323 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG, 329 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
@@ -358,9 +364,44 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
358 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff); 364 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
359} 365}
360 366
367static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
368 int freq_offset)
369{
370 int mask_index = 0;
371
372 mask_index = (freq_offset << 4) / 5;
373 if (mask_index < 0)
374 mask_index = mask_index - 1;
375
376 mask_index = mask_index & 0x7f;
377
378 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
379 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
380 mask_index);
381
382 /* A == B */
383 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
384 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
385 mask_index);
386
387 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
388 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
389 mask_index);
390 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
391 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
392 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
393 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
394
395 /* A == B */
396 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
397 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
398}
399
361static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah, 400static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
362 struct ath9k_channel *chan, 401 struct ath9k_channel *chan,
363 int freq_offset) 402 int freq_offset,
403 int range,
404 int synth_freq)
364{ 405{
365 int spur_freq_sd = 0; 406 int spur_freq_sd = 0;
366 int spur_subchannel_sd = 0; 407 int spur_subchannel_sd = 0;
@@ -402,7 +443,8 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
402 freq_offset, 443 freq_offset,
403 spur_freq_sd, 444 spur_freq_sd,
404 spur_delta_phase, 445 spur_delta_phase,
405 spur_subchannel_sd); 446 spur_subchannel_sd,
447 range, synth_freq);
406} 448}
407 449
408/* Spur mitigation for OFDM */ 450/* Spur mitigation for OFDM */
@@ -447,7 +489,17 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
447 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode); 489 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
448 freq_offset -= synth_freq; 490 freq_offset -= synth_freq;
449 if (abs(freq_offset) < range) { 491 if (abs(freq_offset) < range) {
450 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); 492 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
493 range, synth_freq);
494
495 if (AR_SREV_9565(ah) && (i < 4)) {
496 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
497 mode);
498 freq_offset -= synth_freq;
499 if (abs(freq_offset) < range)
500 ar9003_hw_spur_ofdm_9565(ah, freq_offset);
501 }
502
451 break; 503 break;
452 } 504 }
453 } 505 }
@@ -456,7 +508,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
456static void ar9003_hw_spur_mitigate(struct ath_hw *ah, 508static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
457 struct ath9k_channel *chan) 509 struct ath9k_channel *chan)
458{ 510{
459 ar9003_hw_spur_mitigate_mrc_cck(ah, chan); 511 if (!AR_SREV_9565(ah))
512 ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
460 ar9003_hw_spur_mitigate_ofdm(ah, chan); 513 ar9003_hw_spur_mitigate_ofdm(ah, chan);
461} 514}
462 515
@@ -552,9 +605,6 @@ static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
552 605
553 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) 606 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
554 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); 607 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
555 else if (AR_SREV_9462(ah))
556 /* xxx only when MCI support is enabled */
557 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
558 else 608 else
559 REG_WRITE(ah, AR_SELFGEN_MASK, tx); 609 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
560 610
@@ -736,7 +786,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
736 if (chan->channel == 2484) 786 if (chan->channel == 2484)
737 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 787 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
738 788
739 if (AR_SREV_9462(ah)) 789 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
740 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, 790 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
741 AR_GLB_SWREG_DISCONT_EN_BT_WLAN); 791 AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
742 792
@@ -746,9 +796,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
746 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 796 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
747 ath9k_hw_apply_txpower(ah, chan, false); 797 ath9k_hw_apply_txpower(ah, chan, false);
748 798
749 if (AR_SREV_9462(ah)) { 799 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
750 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, 800 if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
751 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) 801 AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
752 ah->enabled_cals |= TX_IQ_CAL; 802 ah->enabled_cals |= TX_IQ_CAL;
753 else 803 else
754 ah->enabled_cals &= ~TX_IQ_CAL; 804 ah->enabled_cals &= ~TX_IQ_CAL;
@@ -1111,7 +1161,7 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
1111 if (AR_SREV_9330(ah)) 1161 if (AR_SREV_9330(ah))
1112 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ; 1162 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
1113 1163
1114 if (AR_SREV_9462(ah)) { 1164 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
1115 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ; 1165 ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
1116 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ; 1166 ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
1117 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ; 1167 ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
@@ -1223,17 +1273,17 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
1223} 1273}
1224 1274
1225static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah, 1275static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1226 struct ath_hw_antcomb_conf *antconf) 1276 struct ath_hw_antcomb_conf *antconf)
1227{ 1277{
1228 u32 regval; 1278 u32 regval;
1229 1279
1230 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1280 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1231 antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >> 1281 antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
1232 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S; 1282 AR_PHY_ANT_DIV_MAIN_LNACONF_S;
1233 antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >> 1283 antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
1234 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S; 1284 AR_PHY_ANT_DIV_ALT_LNACONF_S;
1235 antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >> 1285 antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
1236 AR_PHY_9485_ANT_FAST_DIV_BIAS_S; 1286 AR_PHY_ANT_FAST_DIV_BIAS_S;
1237 1287
1238 if (AR_SREV_9330_11(ah)) { 1288 if (AR_SREV_9330_11(ah)) {
1239 antconf->lna1_lna2_delta = -9; 1289 antconf->lna1_lna2_delta = -9;
@@ -1241,6 +1291,9 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1241 } else if (AR_SREV_9485(ah)) { 1291 } else if (AR_SREV_9485(ah)) {
1242 antconf->lna1_lna2_delta = -9; 1292 antconf->lna1_lna2_delta = -9;
1243 antconf->div_group = 2; 1293 antconf->div_group = 2;
1294 } else if (AR_SREV_9565(ah)) {
1295 antconf->lna1_lna2_delta = -3;
1296 antconf->div_group = 3;
1244 } else { 1297 } else {
1245 antconf->lna1_lna2_delta = -3; 1298 antconf->lna1_lna2_delta = -3;
1246 antconf->div_group = 0; 1299 antconf->div_group = 0;
@@ -1253,26 +1306,84 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
1253 u32 regval; 1306 u32 regval;
1254 1307
1255 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); 1308 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1256 regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | 1309 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1257 AR_PHY_9485_ANT_DIV_ALT_LNACONF | 1310 AR_PHY_ANT_DIV_ALT_LNACONF |
1258 AR_PHY_9485_ANT_FAST_DIV_BIAS | 1311 AR_PHY_ANT_FAST_DIV_BIAS |
1259 AR_PHY_9485_ANT_DIV_MAIN_GAINTB | 1312 AR_PHY_ANT_DIV_MAIN_GAINTB |
1260 AR_PHY_9485_ANT_DIV_ALT_GAINTB); 1313 AR_PHY_ANT_DIV_ALT_GAINTB);
1261 regval |= ((antconf->main_lna_conf << 1314 regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
1262 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S) 1315 & AR_PHY_ANT_DIV_MAIN_LNACONF);
1263 & AR_PHY_9485_ANT_DIV_MAIN_LNACONF); 1316 regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
1264 regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S) 1317 & AR_PHY_ANT_DIV_ALT_LNACONF);
1265 & AR_PHY_9485_ANT_DIV_ALT_LNACONF); 1318 regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
1266 regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S) 1319 & AR_PHY_ANT_FAST_DIV_BIAS);
1267 & AR_PHY_9485_ANT_FAST_DIV_BIAS); 1320 regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
1268 regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S) 1321 & AR_PHY_ANT_DIV_MAIN_GAINTB);
1269 & AR_PHY_9485_ANT_DIV_MAIN_GAINTB); 1322 regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
1270 regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S) 1323 & AR_PHY_ANT_DIV_ALT_GAINTB);
1271 & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
1272 1324
1273 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); 1325 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1274} 1326}
1275 1327
1328static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
1329 bool enable)
1330{
1331 u8 ant_div_ctl1;
1332 u32 regval;
1333
1334 if (!AR_SREV_9565(ah))
1335 return;
1336
1337 ah->shared_chain_lnadiv = enable;
1338 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
1339
1340 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1341 regval &= (~AR_ANT_DIV_CTRL_ALL);
1342 regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
1343 regval &= ~AR_PHY_ANT_DIV_LNADIV;
1344 regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
1345
1346 if (enable)
1347 regval |= AR_ANT_DIV_ENABLE;
1348
1349 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1350
1351 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
1352 regval &= ~AR_FAST_DIV_ENABLE;
1353 regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
1354
1355 if (enable)
1356 regval |= AR_FAST_DIV_ENABLE;
1357
1358 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
1359
1360 if (enable) {
1361 REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1362 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1363 if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
1364 REG_SET_BIT(ah, AR_PHY_RESTART,
1365 AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
1366 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
1367 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1368 } else {
1369 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
1370 REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
1371 (1 << AR_PHY_ANT_SW_RX_PROT_S));
1372 REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
1373 REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
1374 AR_BTCOEX_WL_LNADIV_FORCE_ON);
1375
1376 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1377 regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
1378 AR_PHY_ANT_DIV_ALT_LNACONF |
1379 AR_PHY_ANT_DIV_MAIN_GAINTB |
1380 AR_PHY_ANT_DIV_ALT_GAINTB);
1381 regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
1382 regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
1383 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1384 }
1385}
1386
1276static int ar9003_hw_fast_chan_change(struct ath_hw *ah, 1387static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1277 struct ath9k_channel *chan, 1388 struct ath9k_channel *chan,
1278 u8 *ini_reloaded) 1389 u8 *ini_reloaded)
@@ -1312,10 +1423,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1312 ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex); 1423 ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
1313 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex); 1424 ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
1314 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex); 1425 ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
1426
1315 if (AR_SREV_9462_20(ah)) 1427 if (AR_SREV_9462_20(ah))
1316 ar9003_hw_prog_ini(ah, 1428 ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
1317 &ah->ini_radio_post_sys2ant, 1429 modesIndex);
1318 modesIndex);
1319 1430
1320 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 1431 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1321 1432
@@ -1326,6 +1437,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
1326 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1437 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1327 REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites); 1438 REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
1328 1439
1440 if (AR_SREV_9565(ah))
1441 REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
1442
1329 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 1443 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
1330 1444
1331 ah->modes_index = modesIndex; 1445 ah->modes_index = modesIndex;
@@ -1368,6 +1482,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1368 1482
1369 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get; 1483 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
1370 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set; 1484 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
1485 ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
1371 1486
1372 ar9003_hw_set_nf_limits(ah); 1487 ar9003_hw_set_nf_limits(ah);
1373 ar9003_hw_set_radar_conf(ah); 1488 ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 84d3d4956861..9a48e3d2f231 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -223,15 +223,24 @@
223#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c) 223#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
224#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20) 224#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
225 225
226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0 226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5 227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
228#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F 228#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
229#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0 229#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
230#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B 0x00FE0000
231#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S 17
232#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B 0x0001F000
233#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S 12
230 234
231#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0 235#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
232#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5 236#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
233#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F 237#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
234#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0 238#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
239#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B 0x00FE0000
240#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S 17
241#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B 0x0001F000
242#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S 12
243
235 244
236/* 245/*
237 * MRC Feild Definitions 246 * MRC Feild Definitions
@@ -271,23 +280,25 @@
271#define AR_ANT_DIV_ENABLE_S 24 280#define AR_ANT_DIV_ENABLE_S 24
272 281
273 282
274#define AR_PHY_9485_ANT_FAST_DIV_BIAS 0x00007e00 283#define AR_PHY_ANT_FAST_DIV_BIAS 0x00007e00
275#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S 9 284#define AR_PHY_ANT_FAST_DIV_BIAS_S 9
276#define AR_PHY_9485_ANT_DIV_LNADIV 0x01000000 285#define AR_PHY_ANT_SW_RX_PROT 0x00800000
277#define AR_PHY_9485_ANT_DIV_LNADIV_S 24 286#define AR_PHY_ANT_SW_RX_PROT_S 23
278#define AR_PHY_9485_ANT_DIV_ALT_LNACONF 0x06000000 287#define AR_PHY_ANT_DIV_LNADIV 0x01000000
279#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S 25 288#define AR_PHY_ANT_DIV_LNADIV_S 24
280#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF 0x18000000 289#define AR_PHY_ANT_DIV_ALT_LNACONF 0x06000000
281#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S 27 290#define AR_PHY_ANT_DIV_ALT_LNACONF_S 25
282#define AR_PHY_9485_ANT_DIV_ALT_GAINTB 0x20000000 291#define AR_PHY_ANT_DIV_MAIN_LNACONF 0x18000000
283#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S 29 292#define AR_PHY_ANT_DIV_MAIN_LNACONF_S 27
284#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB 0x40000000 293#define AR_PHY_ANT_DIV_ALT_GAINTB 0x20000000
285#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S 30 294#define AR_PHY_ANT_DIV_ALT_GAINTB_S 29
286 295#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
287#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2 0x0 296#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
288#define AR_PHY_9485_ANT_DIV_LNA2 0x1 297
289#define AR_PHY_9485_ANT_DIV_LNA1 0x2 298#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
290#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2 0x3 299#define AR_PHY_ANT_DIV_LNA2 0x1
300#define AR_PHY_ANT_DIV_LNA1 0x2
301#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
291 302
292#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) 303#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
293#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) 304#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
@@ -413,6 +424,8 @@
413#define AR_PHY_FIND_SIG_RELSTEP 0x1f 424#define AR_PHY_FIND_SIG_RELSTEP 0x1f
414#define AR_PHY_FIND_SIG_RELSTEP_S 0 425#define AR_PHY_FIND_SIG_RELSTEP_S 0
415#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5 426#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
427#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
428#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
416#define AR_PHY_RESTART_DIV_GC 0x001C0000 429#define AR_PHY_RESTART_DIV_GC 0x001C0000
417#define AR_PHY_RESTART_DIV_GC_S 18 430#define AR_PHY_RESTART_DIV_GC_S 18
418#define AR_PHY_RESTART_ENA 0x01 431#define AR_PHY_RESTART_ENA 0x01
@@ -609,6 +622,12 @@
609#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff 622#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
610#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0 623#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
611 624
625#define AR_PHY_BB_THERM_ADC_3 (AR_SM_BASE + 0x250)
626#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN 0x0001ff00
627#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S 8
628#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET 0x000000ff
629#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S 0
630
612#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254) 631#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
613#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff 632#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
614#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0 633#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
@@ -630,8 +649,8 @@
630#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1 649#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
631 650
632#define AR_PHY_65NM_CH0_SYNTH4 0x1608c 651#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
633#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002) 652#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
634#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1) 653#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
635#define AR_PHY_65NM_CH0_SYNTH7 0x16098 654#define AR_PHY_65NM_CH0_SYNTH7 0x16098
636#define AR_PHY_65NM_CH0_BIAS1 0x160c0 655#define AR_PHY_65NM_CH0_BIAS1 0x160c0
637#define AR_PHY_65NM_CH0_BIAS2 0x160c4 656#define AR_PHY_65NM_CH0_BIAS2 0x160c4
@@ -641,7 +660,7 @@
641#define AR_PHY_65NM_CH2_RXTX4 0x1690c 660#define AR_PHY_65NM_CH2_RXTX4 0x1690c
642 661
643#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ 662#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
644 ((AR_SREV_9462(ah) ? 0x1628c : 0x16280))) 663 (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
645#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) 664#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
646#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8) 665#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
647 666
@@ -669,7 +688,7 @@
669#define AR_SWITCH_TABLE_ALL_S (0) 688#define AR_SWITCH_TABLE_ALL_S (0)
670 689
671#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ 690#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
672 (AR_SREV_9462(ah) ? 0x16294 : 0x1628c)) 691 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
673 692
674#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 693#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
675#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 694#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -691,17 +710,17 @@
691#define AR_CH0_TOP2_XPABIASLVL_S 12 710#define AR_CH0_TOP2_XPABIASLVL_S 12
692 711
693#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ 712#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
694 (AR_SREV_9462(ah) ? 0x16298 : 0x16290)) 713 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
695#define AR_CH0_XTAL_CAPINDAC 0x7f000000 714#define AR_CH0_XTAL_CAPINDAC 0x7f000000
696#define AR_CH0_XTAL_CAPINDAC_S 24 715#define AR_CH0_XTAL_CAPINDAC_S 24
697#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 716#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
698#define AR_CH0_XTAL_CAPOUTDAC_S 17 717#define AR_CH0_XTAL_CAPOUTDAC_S 17
699 718
700#define AR_PHY_PMU1 (AR_SREV_9462(ah) ? 0x16340 : 0x16c40) 719#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
701#define AR_PHY_PMU1_PWD 0x1 720#define AR_PHY_PMU1_PWD 0x1
702#define AR_PHY_PMU1_PWD_S 0 721#define AR_PHY_PMU1_PWD_S 0
703 722
704#define AR_PHY_PMU2 (AR_SREV_9462(ah) ? 0x16344 : 0x16c44) 723#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
705#define AR_PHY_PMU2_PGM 0x00200000 724#define AR_PHY_PMU2_PGM 0x00200000
706#define AR_PHY_PMU2_PGM_S 21 725#define AR_PHY_PMU2_PGM_S 21
707 726
@@ -881,6 +900,8 @@
881 900
882#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000 901#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
883#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28 902#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
903#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR 0x20000000
904#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S 29
884 905
885#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000 906#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
886#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30 907#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
@@ -1244,4 +1265,24 @@
1244#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f 1265#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
1245#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0 1266#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
1246 1267
1268#define AR_BTCOEX_WL_LNADIV 0x1a64
1269#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD 0x00003FFF
1270#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S 0
1271#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY 0x00004000
1272#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S 14
1273#define AR_BTCOEX_WL_LNADIV_FORCE_ON 0x00008000
1274#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S 15
1275#define AR_BTCOEX_WL_LNADIV_MODE_OPTION 0x00030000
1276#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S 16
1277#define AR_BTCOEX_WL_LNADIV_MODE 0x007c0000
1278#define AR_BTCOEX_WL_LNADIV_MODE_S 18
1279#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ 0x00800000
1280#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S 23
1281#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE 0x01000000
1282#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S 24
1283#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT 0x02000000
1284#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
1285#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
1286#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
1287
1247#endif /* AR9003_PHY_H */ 1288#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 4ef7dcccaa2f..58f30f65c6b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -58,7 +58,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
61 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 61 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644
index 000000000000..843e79f67ff2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -0,0 +1,1231 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef INITVALS_9565_1P0_H
19#define INITVALS_9565_1P0_H
20
21/* AR9565 1.0 */
22
23static const u32 ar9565_1p0_mac_core[][2] = {
24 /* Addr allmodes */
25 {0x00000008, 0x00000000},
26 {0x00000030, 0x000a0085},
27 {0x00000034, 0x00000005},
28 {0x00000040, 0x00000000},
29 {0x00000044, 0x00000000},
30 {0x00000048, 0x00000008},
31 {0x0000004c, 0x00000010},
32 {0x00000050, 0x00000000},
33 {0x00001040, 0x002ffc0f},
34 {0x00001044, 0x002ffc0f},
35 {0x00001048, 0x002ffc0f},
36 {0x0000104c, 0x002ffc0f},
37 {0x00001050, 0x002ffc0f},
38 {0x00001054, 0x002ffc0f},
39 {0x00001058, 0x002ffc0f},
40 {0x0000105c, 0x002ffc0f},
41 {0x00001060, 0x002ffc0f},
42 {0x00001064, 0x002ffc0f},
43 {0x000010f0, 0x00000100},
44 {0x00001270, 0x00000000},
45 {0x000012b0, 0x00000000},
46 {0x000012f0, 0x00000000},
47 {0x0000143c, 0x00000000},
48 {0x0000147c, 0x00000000},
49 {0x00001810, 0x0f000003},
50 {0x00008000, 0x00000000},
51 {0x00008004, 0x00000000},
52 {0x00008008, 0x00000000},
53 {0x0000800c, 0x00000000},
54 {0x00008018, 0x00000000},
55 {0x00008020, 0x00000000},
56 {0x00008038, 0x00000000},
57 {0x0000803c, 0x00000000},
58 {0x00008040, 0x00000000},
59 {0x00008044, 0x00000000},
60 {0x00008048, 0x00000000},
61 {0x00008054, 0x00000000},
62 {0x00008058, 0x00000000},
63 {0x0000805c, 0x000fc78f},
64 {0x00008060, 0x0000000f},
65 {0x00008064, 0x00000000},
66 {0x00008070, 0x00000310},
67 {0x00008074, 0x00000020},
68 {0x00008078, 0x00000000},
69 {0x0000809c, 0x0000000f},
70 {0x000080a0, 0x00000000},
71 {0x000080a4, 0x02ff0000},
72 {0x000080a8, 0x0e070605},
73 {0x000080ac, 0x0000000d},
74 {0x000080b0, 0x00000000},
75 {0x000080b4, 0x00000000},
76 {0x000080b8, 0x00000000},
77 {0x000080bc, 0x00000000},
78 {0x000080c0, 0x2a800000},
79 {0x000080c4, 0x06900168},
80 {0x000080c8, 0x13881c20},
81 {0x000080cc, 0x01f40000},
82 {0x000080d0, 0x00252500},
83 {0x000080d4, 0x00b00005},
84 {0x000080d8, 0x00400002},
85 {0x000080dc, 0x00000000},
86 {0x000080e0, 0xffffffff},
87 {0x000080e4, 0x0000ffff},
88 {0x000080e8, 0x3f3f3f3f},
89 {0x000080ec, 0x00000000},
90 {0x000080f0, 0x00000000},
91 {0x000080f4, 0x00000000},
92 {0x000080fc, 0x00020000},
93 {0x00008100, 0x00000000},
94 {0x00008108, 0x00000052},
95 {0x0000810c, 0x00000000},
96 {0x00008110, 0x00000000},
97 {0x00008114, 0x000007ff},
98 {0x00008118, 0x000000aa},
99 {0x0000811c, 0x00003210},
100 {0x00008124, 0x00000000},
101 {0x00008128, 0x00000000},
102 {0x0000812c, 0x00000000},
103 {0x00008130, 0x00000000},
104 {0x00008134, 0x00000000},
105 {0x00008138, 0x00000000},
106 {0x0000813c, 0x0000ffff},
107 {0x00008144, 0xffffffff},
108 {0x00008168, 0x00000000},
109 {0x0000816c, 0x00000000},
110 {0x00008170, 0x18486200},
111 {0x00008174, 0x33332210},
112 {0x00008178, 0x00000000},
113 {0x0000817c, 0x00020000},
114 {0x000081c4, 0x33332210},
115 {0x000081c8, 0x00000000},
116 {0x000081cc, 0x00000000},
117 {0x000081d4, 0x00000000},
118 {0x000081ec, 0x00000000},
119 {0x000081f0, 0x00000000},
120 {0x000081f4, 0x00000000},
121 {0x000081f8, 0x00000000},
122 {0x000081fc, 0x00000000},
123 {0x00008240, 0x00100000},
124 {0x00008244, 0x0010f424},
125 {0x00008248, 0x00000800},
126 {0x0000824c, 0x0001e848},
127 {0x00008250, 0x00000000},
128 {0x00008254, 0x00000000},
129 {0x00008258, 0x00000000},
130 {0x0000825c, 0x40000000},
131 {0x00008260, 0x00080922},
132 {0x00008264, 0x9d400010},
133 {0x00008268, 0xffffffff},
134 {0x0000826c, 0x0000ffff},
135 {0x00008270, 0x00000000},
136 {0x00008274, 0x40000000},
137 {0x00008278, 0x003e4180},
138 {0x0000827c, 0x00000004},
139 {0x00008284, 0x0000002c},
140 {0x00008288, 0x0000002c},
141 {0x0000828c, 0x000000ff},
142 {0x00008294, 0x00000000},
143 {0x00008298, 0x00000000},
144 {0x0000829c, 0x00000000},
145 {0x00008300, 0x00000140},
146 {0x00008314, 0x00000000},
147 {0x0000831c, 0x0000010d},
148 {0x00008328, 0x00000000},
149 {0x0000832c, 0x0000001f},
150 {0x00008330, 0x00000302},
151 {0x00008334, 0x00000700},
152 {0x00008338, 0xffff0000},
153 {0x0000833c, 0x02400000},
154 {0x00008340, 0x000107ff},
155 {0x00008344, 0xaa48105b},
156 {0x00008348, 0x008f0000},
157 {0x0000835c, 0x00000000},
158 {0x00008360, 0xffffffff},
159 {0x00008364, 0xffffffff},
160 {0x00008368, 0x00000000},
161 {0x00008370, 0x00000000},
162 {0x00008374, 0x000000ff},
163 {0x00008378, 0x00000000},
164 {0x0000837c, 0x00000000},
165 {0x00008380, 0xffffffff},
166 {0x00008384, 0xffffffff},
167 {0x00008390, 0xffffffff},
168 {0x00008394, 0xffffffff},
169 {0x00008398, 0x00000000},
170 {0x0000839c, 0x00000000},
171 {0x000083a4, 0x0000fa14},
172 {0x000083a8, 0x000f0c00},
173 {0x000083ac, 0x33332210},
174 {0x000083b0, 0x33332210},
175 {0x000083b4, 0x33332210},
176 {0x000083b8, 0x33332210},
177 {0x000083bc, 0x00000000},
178 {0x000083c0, 0x00000000},
179 {0x000083c4, 0x00000000},
180 {0x000083c8, 0x00000000},
181 {0x000083cc, 0x00000200},
182 {0x000083d0, 0x800301ff},
183};
184
185static const u32 ar9565_1p0_mac_postamble[][5] = {
186 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
187 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
188 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
189 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
190 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
191 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
192 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
193 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
194 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
195};
196
197static const u32 ar9565_1p0_baseband_core[][2] = {
198 /* Addr allmodes */
199 {0x00009800, 0xafe68e30},
200 {0x00009804, 0xfd14e000},
201 {0x00009808, 0x9c0a8f6b},
202 {0x0000980c, 0x04800000},
203 {0x00009814, 0x9280c00a},
204 {0x00009818, 0x00000000},
205 {0x0000981c, 0x00020028},
206 {0x00009834, 0x6400a290},
207 {0x00009838, 0x0108ecff},
208 {0x0000983c, 0x0d000600},
209 {0x00009880, 0x201fff00},
210 {0x00009884, 0x00001042},
211 {0x000098a4, 0x00200400},
212 {0x000098b0, 0x32840bbe},
213 {0x000098d0, 0x004b6a8e},
214 {0x000098d4, 0x00000820},
215 {0x000098dc, 0x00000000},
216 {0x000098e4, 0x01ffffff},
217 {0x000098e8, 0x01ffffff},
218 {0x000098ec, 0x01ffffff},
219 {0x000098f0, 0x00000000},
220 {0x000098f4, 0x00000000},
221 {0x00009bf0, 0x80000000},
222 {0x00009c04, 0xff55ff55},
223 {0x00009c08, 0x0320ff55},
224 {0x00009c0c, 0x00000000},
225 {0x00009c10, 0x00000000},
226 {0x00009c14, 0x00046384},
227 {0x00009c18, 0x05b6b440},
228 {0x00009c1c, 0x00b6b440},
229 {0x00009d00, 0xc080a333},
230 {0x00009d04, 0x40206c10},
231 {0x00009d08, 0x009c4060},
232 {0x00009d0c, 0x1883800a},
233 {0x00009d10, 0x01834061},
234 {0x00009d14, 0x00c00400},
235 {0x00009d18, 0x00000000},
236 {0x00009e08, 0x0078230c},
237 {0x00009e24, 0x990bb515},
238 {0x00009e28, 0x126f0000},
239 {0x00009e30, 0x06336f77},
240 {0x00009e34, 0x6af6532f},
241 {0x00009e38, 0x0cc80c00},
242 {0x00009e40, 0x0d261820},
243 {0x00009e4c, 0x00001004},
244 {0x00009e50, 0x00ff03f1},
245 {0x00009e54, 0xe4c355c7},
246 {0x00009e5c, 0xe9198724},
247 {0x00009fc0, 0x823e4fc8},
248 {0x00009fc4, 0x0001efb5},
249 {0x00009fcc, 0x40000014},
250 {0x0000a20c, 0x00000000},
251 {0x0000a220, 0x00000000},
252 {0x0000a224, 0x00000000},
253 {0x0000a228, 0x10002310},
254 {0x0000a23c, 0x00000000},
255 {0x0000a244, 0x0c000000},
256 {0x0000a2a0, 0x00000001},
257 {0x0000a2c0, 0x00000001},
258 {0x0000a2c8, 0x00000000},
259 {0x0000a2cc, 0x18c43433},
260 {0x0000a2d4, 0x00000000},
261 {0x0000a2ec, 0x00000000},
262 {0x0000a2f0, 0x00000000},
263 {0x0000a2f4, 0x00000000},
264 {0x0000a2f8, 0x00000000},
265 {0x0000a344, 0x00000000},
266 {0x0000a34c, 0x00000000},
267 {0x0000a350, 0x0000a000},
268 {0x0000a364, 0x00000000},
269 {0x0000a370, 0x00000000},
270 {0x0000a390, 0x00000001},
271 {0x0000a394, 0x00000444},
272 {0x0000a398, 0x001f0e0f},
273 {0x0000a39c, 0x0075393f},
274 {0x0000a3a0, 0xb79f6427},
275 {0x0000a3a4, 0x00000000},
276 {0x0000a3a8, 0xaaaaaaaa},
277 {0x0000a3ac, 0x3c466478},
278 {0x0000a3c0, 0x20202020},
279 {0x0000a3c4, 0x22222220},
280 {0x0000a3c8, 0x20200020},
281 {0x0000a3cc, 0x20202020},
282 {0x0000a3d0, 0x20202020},
283 {0x0000a3d4, 0x20202020},
284 {0x0000a3d8, 0x20202020},
285 {0x0000a3dc, 0x20202020},
286 {0x0000a3e0, 0x20202020},
287 {0x0000a3e4, 0x20202020},
288 {0x0000a3e8, 0x20202020},
289 {0x0000a3ec, 0x20202020},
290 {0x0000a3f0, 0x00000000},
291 {0x0000a3f4, 0x00000006},
292 {0x0000a3f8, 0x0c9bd380},
293 {0x0000a3fc, 0x000f0f01},
294 {0x0000a400, 0x8fa91f01},
295 {0x0000a404, 0x00000000},
296 {0x0000a408, 0x0e79e5c6},
297 {0x0000a40c, 0x00820820},
298 {0x0000a414, 0x1ce739ce},
299 {0x0000a418, 0x2d001dce},
300 {0x0000a41c, 0x1ce739ce},
301 {0x0000a420, 0x000001ce},
302 {0x0000a424, 0x1ce739ce},
303 {0x0000a428, 0x000001ce},
304 {0x0000a42c, 0x1ce739ce},
305 {0x0000a430, 0x1ce739ce},
306 {0x0000a434, 0x00000000},
307 {0x0000a438, 0x00001801},
308 {0x0000a43c, 0x00000000},
309 {0x0000a440, 0x00000000},
310 {0x0000a444, 0x00000000},
311 {0x0000a448, 0x05000096},
312 {0x0000a44c, 0x00000001},
313 {0x0000a450, 0x00010000},
314 {0x0000a454, 0x03000000},
315 {0x0000a458, 0x00000000},
316 {0x0000a644, 0xbfad9d74},
317 {0x0000a648, 0x0048060a},
318 {0x0000a64c, 0x00003c37},
319 {0x0000a670, 0x03020100},
320 {0x0000a674, 0x09080504},
321 {0x0000a678, 0x0d0c0b0a},
322 {0x0000a67c, 0x13121110},
323 {0x0000a680, 0x31301514},
324 {0x0000a684, 0x35343332},
325 {0x0000a688, 0x00000036},
326 {0x0000a690, 0x00000838},
327 {0x0000a6b4, 0x00512c01},
328 {0x0000a7c0, 0x00000000},
329 {0x0000a7c4, 0xfffffffc},
330 {0x0000a7c8, 0x00000000},
331 {0x0000a7cc, 0x00000000},
332 {0x0000a7d0, 0x00000000},
333 {0x0000a7d4, 0x00000004},
334 {0x0000a7dc, 0x00000001},
335 {0x0000a7f0, 0x80000000},
336};
337
338static const u32 ar9565_1p0_baseband_postamble[][5] = {
339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
344 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
348 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
354 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
355 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
356 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
357 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
358 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
359 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
360 {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
361 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
362 {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
363 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
364 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
365 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
366 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
367 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
368 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
369 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
370 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
371 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
372 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
373 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
374 {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
375 {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
376 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
377 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
378 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
379 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
380 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
381 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
382};
383
384static const u32 ar9565_1p0_radio_core[][2] = {
385 /* Addr allmodes */
386 {0x00016000, 0x36db6db6},
387 {0x00016004, 0x6db6db40},
388 {0x00016008, 0x73f00000},
389 {0x0001600c, 0x00000000},
390 {0x00016010, 0x6d823601},
391 {0x00016040, 0x7f80fff8},
392 {0x0001604c, 0x1c99e04f},
393 {0x00016050, 0x6db6db6c},
394 {0x00016058, 0x6c200000},
395 {0x00016080, 0x000c0000},
396 {0x00016084, 0x9a68048c},
397 {0x00016088, 0x54214514},
398 {0x0001608c, 0x1203040b},
399 {0x00016090, 0x24926490},
400 {0x00016098, 0xd28b3330},
401 {0x000160a0, 0x0a108ffe},
402 {0x000160a4, 0x812fc491},
403 {0x000160a8, 0x423c8000},
404 {0x000160b4, 0x92000000},
405 {0x000160b8, 0x0285dddc},
406 {0x000160bc, 0x02908888},
407 {0x000160c0, 0x006db6d0},
408 {0x000160c4, 0x6dd6db60},
409 {0x000160c8, 0x6db6db6c},
410 {0x000160cc, 0x6de6c1b0},
411 {0x00016100, 0x3fffbe04},
412 {0x00016104, 0xfff80000},
413 {0x00016108, 0x00200400},
414 {0x00016110, 0x00000000},
415 {0x00016144, 0x02084080},
416 {0x00016148, 0x000080c0},
417 {0x00016280, 0x050a0001},
418 {0x00016284, 0x3d841440},
419 {0x00016288, 0x00000000},
420 {0x0001628c, 0xe3000000},
421 {0x00016290, 0xa1004080},
422 {0x00016294, 0x40000028},
423 {0x00016298, 0x55aa2900},
424 {0x00016340, 0x131c827a},
425 {0x00016344, 0x00300000},
426};
427
428static const u32 ar9565_1p0_radio_postamble[][5] = {
429 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
430 {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
431 {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
432 {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
433 {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
434 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
435};
436
437static const u32 ar9565_1p0_soc_preamble[][2] = {
438 /* Addr allmodes */
439 {0x00004078, 0x00000002},
440 {0x000040a4, 0x00a0c9c9},
441 {0x00007020, 0x00000000},
442 {0x00007034, 0x00000002},
443 {0x00007038, 0x000004c2},
444};
445
446static const u32 ar9565_1p0_soc_postamble[][5] = {
447 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
448 {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
449};
450
451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */
453 {0x0000a000, 0x00010000},
454 {0x0000a004, 0x00030002},
455 {0x0000a008, 0x00050004},
456 {0x0000a00c, 0x00810080},
457 {0x0000a010, 0x00830082},
458 {0x0000a014, 0x01810180},
459 {0x0000a018, 0x01830182},
460 {0x0000a01c, 0x01850184},
461 {0x0000a020, 0x01890188},
462 {0x0000a024, 0x018b018a},
463 {0x0000a028, 0x018d018c},
464 {0x0000a02c, 0x01910190},
465 {0x0000a030, 0x01930192},
466 {0x0000a034, 0x01950194},
467 {0x0000a038, 0x038a0196},
468 {0x0000a03c, 0x038c038b},
469 {0x0000a040, 0x0390038d},
470 {0x0000a044, 0x03920391},
471 {0x0000a048, 0x03940393},
472 {0x0000a04c, 0x03960395},
473 {0x0000a050, 0x00000000},
474 {0x0000a054, 0x00000000},
475 {0x0000a058, 0x00000000},
476 {0x0000a05c, 0x00000000},
477 {0x0000a060, 0x00000000},
478 {0x0000a064, 0x00000000},
479 {0x0000a068, 0x00000000},
480 {0x0000a06c, 0x00000000},
481 {0x0000a070, 0x00000000},
482 {0x0000a074, 0x00000000},
483 {0x0000a078, 0x00000000},
484 {0x0000a07c, 0x00000000},
485 {0x0000a080, 0x22222229},
486 {0x0000a084, 0x1d1d1d1d},
487 {0x0000a088, 0x1d1d1d1d},
488 {0x0000a08c, 0x1d1d1d1d},
489 {0x0000a090, 0x171d1d1d},
490 {0x0000a094, 0x11111717},
491 {0x0000a098, 0x00030311},
492 {0x0000a09c, 0x00000000},
493 {0x0000a0a0, 0x00000000},
494 {0x0000a0a4, 0x00000000},
495 {0x0000a0a8, 0x00000000},
496 {0x0000a0ac, 0x00000000},
497 {0x0000a0b0, 0x00000000},
498 {0x0000a0b4, 0x00000000},
499 {0x0000a0b8, 0x00000000},
500 {0x0000a0bc, 0x00000000},
501 {0x0000a0c0, 0x001f0000},
502 {0x0000a0c4, 0x01000101},
503 {0x0000a0c8, 0x011e011f},
504 {0x0000a0cc, 0x011c011d},
505 {0x0000a0d0, 0x02030204},
506 {0x0000a0d4, 0x02010202},
507 {0x0000a0d8, 0x021f0200},
508 {0x0000a0dc, 0x0302021e},
509 {0x0000a0e0, 0x03000301},
510 {0x0000a0e4, 0x031e031f},
511 {0x0000a0e8, 0x0402031d},
512 {0x0000a0ec, 0x04000401},
513 {0x0000a0f0, 0x041e041f},
514 {0x0000a0f4, 0x0502041d},
515 {0x0000a0f8, 0x05000501},
516 {0x0000a0fc, 0x051e051f},
517 {0x0000a100, 0x06010602},
518 {0x0000a104, 0x061f0600},
519 {0x0000a108, 0x061d061e},
520 {0x0000a10c, 0x07020703},
521 {0x0000a110, 0x07000701},
522 {0x0000a114, 0x00000000},
523 {0x0000a118, 0x00000000},
524 {0x0000a11c, 0x00000000},
525 {0x0000a120, 0x00000000},
526 {0x0000a124, 0x00000000},
527 {0x0000a128, 0x00000000},
528 {0x0000a12c, 0x00000000},
529 {0x0000a130, 0x00000000},
530 {0x0000a134, 0x00000000},
531 {0x0000a138, 0x00000000},
532 {0x0000a13c, 0x00000000},
533 {0x0000a140, 0x001f0000},
534 {0x0000a144, 0x01000101},
535 {0x0000a148, 0x011e011f},
536 {0x0000a14c, 0x011c011d},
537 {0x0000a150, 0x02030204},
538 {0x0000a154, 0x02010202},
539 {0x0000a158, 0x021f0200},
540 {0x0000a15c, 0x0302021e},
541 {0x0000a160, 0x03000301},
542 {0x0000a164, 0x031e031f},
543 {0x0000a168, 0x0402031d},
544 {0x0000a16c, 0x04000401},
545 {0x0000a170, 0x041e041f},
546 {0x0000a174, 0x0502041d},
547 {0x0000a178, 0x05000501},
548 {0x0000a17c, 0x051e051f},
549 {0x0000a180, 0x06010602},
550 {0x0000a184, 0x061f0600},
551 {0x0000a188, 0x061d061e},
552 {0x0000a18c, 0x07020703},
553 {0x0000a190, 0x07000701},
554 {0x0000a194, 0x00000000},
555 {0x0000a198, 0x00000000},
556 {0x0000a19c, 0x00000000},
557 {0x0000a1a0, 0x00000000},
558 {0x0000a1a4, 0x00000000},
559 {0x0000a1a8, 0x00000000},
560 {0x0000a1ac, 0x00000000},
561 {0x0000a1b0, 0x00000000},
562 {0x0000a1b4, 0x00000000},
563 {0x0000a1b8, 0x00000000},
564 {0x0000a1bc, 0x00000000},
565 {0x0000a1c0, 0x00000000},
566 {0x0000a1c4, 0x00000000},
567 {0x0000a1c8, 0x00000000},
568 {0x0000a1cc, 0x00000000},
569 {0x0000a1d0, 0x00000000},
570 {0x0000a1d4, 0x00000000},
571 {0x0000a1d8, 0x00000000},
572 {0x0000a1dc, 0x00000000},
573 {0x0000a1e0, 0x00000000},
574 {0x0000a1e4, 0x00000000},
575 {0x0000a1e8, 0x00000000},
576 {0x0000a1ec, 0x00000000},
577 {0x0000a1f0, 0x00000396},
578 {0x0000a1f4, 0x00000396},
579 {0x0000a1f8, 0x00000396},
580 {0x0000a1fc, 0x00000196},
581 {0x0000b000, 0x00010000},
582 {0x0000b004, 0x00030002},
583 {0x0000b008, 0x00050004},
584 {0x0000b00c, 0x00810080},
585 {0x0000b010, 0x00830082},
586 {0x0000b014, 0x01810180},
587 {0x0000b018, 0x01830182},
588 {0x0000b01c, 0x01850184},
589 {0x0000b020, 0x02810280},
590 {0x0000b024, 0x02830282},
591 {0x0000b028, 0x02850284},
592 {0x0000b02c, 0x02890288},
593 {0x0000b030, 0x028b028a},
594 {0x0000b034, 0x0388028c},
595 {0x0000b038, 0x038a0389},
596 {0x0000b03c, 0x038c038b},
597 {0x0000b040, 0x0390038d},
598 {0x0000b044, 0x03920391},
599 {0x0000b048, 0x03940393},
600 {0x0000b04c, 0x03960395},
601 {0x0000b050, 0x00000000},
602 {0x0000b054, 0x00000000},
603 {0x0000b058, 0x00000000},
604 {0x0000b05c, 0x00000000},
605 {0x0000b060, 0x00000000},
606 {0x0000b064, 0x00000000},
607 {0x0000b068, 0x00000000},
608 {0x0000b06c, 0x00000000},
609 {0x0000b070, 0x00000000},
610 {0x0000b074, 0x00000000},
611 {0x0000b078, 0x00000000},
612 {0x0000b07c, 0x00000000},
613 {0x0000b080, 0x32323232},
614 {0x0000b084, 0x2f2f3232},
615 {0x0000b088, 0x23282a2d},
616 {0x0000b08c, 0x1c1e2123},
617 {0x0000b090, 0x14171919},
618 {0x0000b094, 0x0e0e1214},
619 {0x0000b098, 0x03050707},
620 {0x0000b09c, 0x00030303},
621 {0x0000b0a0, 0x00000000},
622 {0x0000b0a4, 0x00000000},
623 {0x0000b0a8, 0x00000000},
624 {0x0000b0ac, 0x00000000},
625 {0x0000b0b0, 0x00000000},
626 {0x0000b0b4, 0x00000000},
627 {0x0000b0b8, 0x00000000},
628 {0x0000b0bc, 0x00000000},
629 {0x0000b0c0, 0x003f0020},
630 {0x0000b0c4, 0x00400041},
631 {0x0000b0c8, 0x0140005f},
632 {0x0000b0cc, 0x0160015f},
633 {0x0000b0d0, 0x017e017f},
634 {0x0000b0d4, 0x02410242},
635 {0x0000b0d8, 0x025f0240},
636 {0x0000b0dc, 0x027f0260},
637 {0x0000b0e0, 0x0341027e},
638 {0x0000b0e4, 0x035f0340},
639 {0x0000b0e8, 0x037f0360},
640 {0x0000b0ec, 0x04400441},
641 {0x0000b0f0, 0x0460045f},
642 {0x0000b0f4, 0x0541047f},
643 {0x0000b0f8, 0x055f0540},
644 {0x0000b0fc, 0x057f0560},
645 {0x0000b100, 0x06400641},
646 {0x0000b104, 0x0660065f},
647 {0x0000b108, 0x067e067f},
648 {0x0000b10c, 0x07410742},
649 {0x0000b110, 0x075f0740},
650 {0x0000b114, 0x077f0760},
651 {0x0000b118, 0x07800781},
652 {0x0000b11c, 0x07a0079f},
653 {0x0000b120, 0x07c107bf},
654 {0x0000b124, 0x000007c0},
655 {0x0000b128, 0x00000000},
656 {0x0000b12c, 0x00000000},
657 {0x0000b130, 0x00000000},
658 {0x0000b134, 0x00000000},
659 {0x0000b138, 0x00000000},
660 {0x0000b13c, 0x00000000},
661 {0x0000b140, 0x003f0020},
662 {0x0000b144, 0x00400041},
663 {0x0000b148, 0x0140005f},
664 {0x0000b14c, 0x0160015f},
665 {0x0000b150, 0x017e017f},
666 {0x0000b154, 0x02410242},
667 {0x0000b158, 0x025f0240},
668 {0x0000b15c, 0x027f0260},
669 {0x0000b160, 0x0341027e},
670 {0x0000b164, 0x035f0340},
671 {0x0000b168, 0x037f0360},
672 {0x0000b16c, 0x04400441},
673 {0x0000b170, 0x0460045f},
674 {0x0000b174, 0x0541047f},
675 {0x0000b178, 0x055f0540},
676 {0x0000b17c, 0x057f0560},
677 {0x0000b180, 0x06400641},
678 {0x0000b184, 0x0660065f},
679 {0x0000b188, 0x067e067f},
680 {0x0000b18c, 0x07410742},
681 {0x0000b190, 0x075f0740},
682 {0x0000b194, 0x077f0760},
683 {0x0000b198, 0x07800781},
684 {0x0000b19c, 0x07a0079f},
685 {0x0000b1a0, 0x07c107bf},
686 {0x0000b1a4, 0x000007c0},
687 {0x0000b1a8, 0x00000000},
688 {0x0000b1ac, 0x00000000},
689 {0x0000b1b0, 0x00000000},
690 {0x0000b1b4, 0x00000000},
691 {0x0000b1b8, 0x00000000},
692 {0x0000b1bc, 0x00000000},
693 {0x0000b1c0, 0x00000000},
694 {0x0000b1c4, 0x00000000},
695 {0x0000b1c8, 0x00000000},
696 {0x0000b1cc, 0x00000000},
697 {0x0000b1d0, 0x00000000},
698 {0x0000b1d4, 0x00000000},
699 {0x0000b1d8, 0x00000000},
700 {0x0000b1dc, 0x00000000},
701 {0x0000b1e0, 0x00000000},
702 {0x0000b1e4, 0x00000000},
703 {0x0000b1e8, 0x00000000},
704 {0x0000b1ec, 0x00000000},
705 {0x0000b1f0, 0x00000396},
706 {0x0000b1f4, 0x00000396},
707 {0x0000b1f8, 0x00000396},
708 {0x0000b1fc, 0x00000196},
709};
710
711static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
712 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
713 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
714 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
715 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
716 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
717 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
718 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
719 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
720 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
721 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
722 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
723 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
724 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
725 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
726 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
727 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
728 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
729 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
730 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
731 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
732 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
733 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
734 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
735 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
736 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
737 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
738 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
739 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
740 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
741 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
742 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
743 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
744 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
745 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
746 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
747 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
748 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
749 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
750 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
751 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
752 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
753 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
754 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
755 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
756 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
757 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
758 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
759 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
760 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
761 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
762 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
763 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
764 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
765 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
766 {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
767 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
768 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
769};
770
771static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */
773 {0x00018c00, 0x18212ede},
774 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c},
776};
777
778static const u32 ar9565_1p0_modes_fast_clock[][3] = {
779 /* Addr 5G_HT20 5G_HT40 */
780 {0x00001030, 0x00000268, 0x000004d0},
781 {0x00001070, 0x0000018c, 0x00000318},
782 {0x000010b0, 0x00000fd0, 0x00001fa0},
783 {0x00008014, 0x044c044c, 0x08980898},
784 {0x0000801c, 0x148ec02b, 0x148ec057},
785 {0x00008318, 0x000044c0, 0x00008980},
786 {0x00009e00, 0x03721821, 0x03721821},
787 {0x0000a230, 0x0000400b, 0x00004016},
788 {0x0000a254, 0x00000898, 0x00001130},
789};
790
791static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
792 /* Addr allmodes */
793 {0x0000a000, 0x00010000},
794 {0x0000a004, 0x00030002},
795 {0x0000a008, 0x00050004},
796 {0x0000a00c, 0x00810080},
797 {0x0000a010, 0x00830082},
798 {0x0000a014, 0x01810180},
799 {0x0000a018, 0x01830182},
800 {0x0000a01c, 0x01850184},
801 {0x0000a020, 0x01890188},
802 {0x0000a024, 0x018b018a},
803 {0x0000a028, 0x018d018c},
804 {0x0000a02c, 0x03820190},
805 {0x0000a030, 0x03840383},
806 {0x0000a034, 0x03880385},
807 {0x0000a038, 0x038a0389},
808 {0x0000a03c, 0x038c038b},
809 {0x0000a040, 0x0390038d},
810 {0x0000a044, 0x03920391},
811 {0x0000a048, 0x03940393},
812 {0x0000a04c, 0x03960395},
813 {0x0000a050, 0x00000000},
814 {0x0000a054, 0x00000000},
815 {0x0000a058, 0x00000000},
816 {0x0000a05c, 0x00000000},
817 {0x0000a060, 0x00000000},
818 {0x0000a064, 0x00000000},
819 {0x0000a068, 0x00000000},
820 {0x0000a06c, 0x00000000},
821 {0x0000a070, 0x00000000},
822 {0x0000a074, 0x00000000},
823 {0x0000a078, 0x00000000},
824 {0x0000a07c, 0x00000000},
825 {0x0000a080, 0x29292929},
826 {0x0000a084, 0x29292929},
827 {0x0000a088, 0x29292929},
828 {0x0000a08c, 0x29292929},
829 {0x0000a090, 0x22292929},
830 {0x0000a094, 0x1d1d2222},
831 {0x0000a098, 0x0c111117},
832 {0x0000a09c, 0x00030303},
833 {0x0000a0a0, 0x00000000},
834 {0x0000a0a4, 0x00000000},
835 {0x0000a0a8, 0x00000000},
836 {0x0000a0ac, 0x00000000},
837 {0x0000a0b0, 0x00000000},
838 {0x0000a0b4, 0x00000000},
839 {0x0000a0b8, 0x00000000},
840 {0x0000a0bc, 0x00000000},
841 {0x0000a0c0, 0x00bf00a0},
842 {0x0000a0c4, 0x11a011a1},
843 {0x0000a0c8, 0x11be11bf},
844 {0x0000a0cc, 0x11bc11bd},
845 {0x0000a0d0, 0x22632264},
846 {0x0000a0d4, 0x22612262},
847 {0x0000a0d8, 0x227f2260},
848 {0x0000a0dc, 0x4322227e},
849 {0x0000a0e0, 0x43204321},
850 {0x0000a0e4, 0x433e433f},
851 {0x0000a0e8, 0x4462433d},
852 {0x0000a0ec, 0x44604461},
853 {0x0000a0f0, 0x447e447f},
854 {0x0000a0f4, 0x5582447d},
855 {0x0000a0f8, 0x55805581},
856 {0x0000a0fc, 0x559e559f},
857 {0x0000a100, 0x66816682},
858 {0x0000a104, 0x669f6680},
859 {0x0000a108, 0x669d669e},
860 {0x0000a10c, 0x77627763},
861 {0x0000a110, 0x77607761},
862 {0x0000a114, 0x00000000},
863 {0x0000a118, 0x00000000},
864 {0x0000a11c, 0x00000000},
865 {0x0000a120, 0x00000000},
866 {0x0000a124, 0x00000000},
867 {0x0000a128, 0x00000000},
868 {0x0000a12c, 0x00000000},
869 {0x0000a130, 0x00000000},
870 {0x0000a134, 0x00000000},
871 {0x0000a138, 0x00000000},
872 {0x0000a13c, 0x00000000},
873 {0x0000a140, 0x00bf00a0},
874 {0x0000a144, 0x11a011a1},
875 {0x0000a148, 0x11be11bf},
876 {0x0000a14c, 0x11bc11bd},
877 {0x0000a150, 0x22632264},
878 {0x0000a154, 0x22612262},
879 {0x0000a158, 0x227f2260},
880 {0x0000a15c, 0x4322227e},
881 {0x0000a160, 0x43204321},
882 {0x0000a164, 0x433e433f},
883 {0x0000a168, 0x4462433d},
884 {0x0000a16c, 0x44604461},
885 {0x0000a170, 0x447e447f},
886 {0x0000a174, 0x5582447d},
887 {0x0000a178, 0x55805581},
888 {0x0000a17c, 0x559e559f},
889 {0x0000a180, 0x66816682},
890 {0x0000a184, 0x669f6680},
891 {0x0000a188, 0x669d669e},
892 {0x0000a18c, 0x77627763},
893 {0x0000a190, 0x77607761},
894 {0x0000a194, 0x00000000},
895 {0x0000a198, 0x00000000},
896 {0x0000a19c, 0x00000000},
897 {0x0000a1a0, 0x00000000},
898 {0x0000a1a4, 0x00000000},
899 {0x0000a1a8, 0x00000000},
900 {0x0000a1ac, 0x00000000},
901 {0x0000a1b0, 0x00000000},
902 {0x0000a1b4, 0x00000000},
903 {0x0000a1b8, 0x00000000},
904 {0x0000a1bc, 0x00000000},
905 {0x0000a1c0, 0x00000000},
906 {0x0000a1c4, 0x00000000},
907 {0x0000a1c8, 0x00000000},
908 {0x0000a1cc, 0x00000000},
909 {0x0000a1d0, 0x00000000},
910 {0x0000a1d4, 0x00000000},
911 {0x0000a1d8, 0x00000000},
912 {0x0000a1dc, 0x00000000},
913 {0x0000a1e0, 0x00000000},
914 {0x0000a1e4, 0x00000000},
915 {0x0000a1e8, 0x00000000},
916 {0x0000a1ec, 0x00000000},
917 {0x0000a1f0, 0x00000396},
918 {0x0000a1f4, 0x00000396},
919 {0x0000a1f8, 0x00000396},
920 {0x0000a1fc, 0x00000196},
921 {0x0000b000, 0x00010000},
922 {0x0000b004, 0x00030002},
923 {0x0000b008, 0x00050004},
924 {0x0000b00c, 0x00810080},
925 {0x0000b010, 0x00830082},
926 {0x0000b014, 0x01810180},
927 {0x0000b018, 0x01830182},
928 {0x0000b01c, 0x01850184},
929 {0x0000b020, 0x02810280},
930 {0x0000b024, 0x02830282},
931 {0x0000b028, 0x02850284},
932 {0x0000b02c, 0x02890288},
933 {0x0000b030, 0x028b028a},
934 {0x0000b034, 0x0388028c},
935 {0x0000b038, 0x038a0389},
936 {0x0000b03c, 0x038c038b},
937 {0x0000b040, 0x0390038d},
938 {0x0000b044, 0x03920391},
939 {0x0000b048, 0x03940393},
940 {0x0000b04c, 0x03960395},
941 {0x0000b050, 0x00000000},
942 {0x0000b054, 0x00000000},
943 {0x0000b058, 0x00000000},
944 {0x0000b05c, 0x00000000},
945 {0x0000b060, 0x00000000},
946 {0x0000b064, 0x00000000},
947 {0x0000b068, 0x00000000},
948 {0x0000b06c, 0x00000000},
949 {0x0000b070, 0x00000000},
950 {0x0000b074, 0x00000000},
951 {0x0000b078, 0x00000000},
952 {0x0000b07c, 0x00000000},
953 {0x0000b080, 0x32323232},
954 {0x0000b084, 0x2f2f3232},
955 {0x0000b088, 0x23282a2d},
956 {0x0000b08c, 0x1c1e2123},
957 {0x0000b090, 0x14171919},
958 {0x0000b094, 0x0e0e1214},
959 {0x0000b098, 0x03050707},
960 {0x0000b09c, 0x00030303},
961 {0x0000b0a0, 0x00000000},
962 {0x0000b0a4, 0x00000000},
963 {0x0000b0a8, 0x00000000},
964 {0x0000b0ac, 0x00000000},
965 {0x0000b0b0, 0x00000000},
966 {0x0000b0b4, 0x00000000},
967 {0x0000b0b8, 0x00000000},
968 {0x0000b0bc, 0x00000000},
969 {0x0000b0c0, 0x003f0020},
970 {0x0000b0c4, 0x00400041},
971 {0x0000b0c8, 0x0140005f},
972 {0x0000b0cc, 0x0160015f},
973 {0x0000b0d0, 0x017e017f},
974 {0x0000b0d4, 0x02410242},
975 {0x0000b0d8, 0x025f0240},
976 {0x0000b0dc, 0x027f0260},
977 {0x0000b0e0, 0x0341027e},
978 {0x0000b0e4, 0x035f0340},
979 {0x0000b0e8, 0x037f0360},
980 {0x0000b0ec, 0x04400441},
981 {0x0000b0f0, 0x0460045f},
982 {0x0000b0f4, 0x0541047f},
983 {0x0000b0f8, 0x055f0540},
984 {0x0000b0fc, 0x057f0560},
985 {0x0000b100, 0x06400641},
986 {0x0000b104, 0x0660065f},
987 {0x0000b108, 0x067e067f},
988 {0x0000b10c, 0x07410742},
989 {0x0000b110, 0x075f0740},
990 {0x0000b114, 0x077f0760},
991 {0x0000b118, 0x07800781},
992 {0x0000b11c, 0x07a0079f},
993 {0x0000b120, 0x07c107bf},
994 {0x0000b124, 0x000007c0},
995 {0x0000b128, 0x00000000},
996 {0x0000b12c, 0x00000000},
997 {0x0000b130, 0x00000000},
998 {0x0000b134, 0x00000000},
999 {0x0000b138, 0x00000000},
1000 {0x0000b13c, 0x00000000},
1001 {0x0000b140, 0x003f0020},
1002 {0x0000b144, 0x00400041},
1003 {0x0000b148, 0x0140005f},
1004 {0x0000b14c, 0x0160015f},
1005 {0x0000b150, 0x017e017f},
1006 {0x0000b154, 0x02410242},
1007 {0x0000b158, 0x025f0240},
1008 {0x0000b15c, 0x027f0260},
1009 {0x0000b160, 0x0341027e},
1010 {0x0000b164, 0x035f0340},
1011 {0x0000b168, 0x037f0360},
1012 {0x0000b16c, 0x04400441},
1013 {0x0000b170, 0x0460045f},
1014 {0x0000b174, 0x0541047f},
1015 {0x0000b178, 0x055f0540},
1016 {0x0000b17c, 0x057f0560},
1017 {0x0000b180, 0x06400641},
1018 {0x0000b184, 0x0660065f},
1019 {0x0000b188, 0x067e067f},
1020 {0x0000b18c, 0x07410742},
1021 {0x0000b190, 0x075f0740},
1022 {0x0000b194, 0x077f0760},
1023 {0x0000b198, 0x07800781},
1024 {0x0000b19c, 0x07a0079f},
1025 {0x0000b1a0, 0x07c107bf},
1026 {0x0000b1a4, 0x000007c0},
1027 {0x0000b1a8, 0x00000000},
1028 {0x0000b1ac, 0x00000000},
1029 {0x0000b1b0, 0x00000000},
1030 {0x0000b1b4, 0x00000000},
1031 {0x0000b1b8, 0x00000000},
1032 {0x0000b1bc, 0x00000000},
1033 {0x0000b1c0, 0x00000000},
1034 {0x0000b1c4, 0x00000000},
1035 {0x0000b1c8, 0x00000000},
1036 {0x0000b1cc, 0x00000000},
1037 {0x0000b1d0, 0x00000000},
1038 {0x0000b1d4, 0x00000000},
1039 {0x0000b1d8, 0x00000000},
1040 {0x0000b1dc, 0x00000000},
1041 {0x0000b1e0, 0x00000000},
1042 {0x0000b1e4, 0x00000000},
1043 {0x0000b1e8, 0x00000000},
1044 {0x0000b1ec, 0x00000000},
1045 {0x0000b1f0, 0x00000396},
1046 {0x0000b1f4, 0x00000396},
1047 {0x0000b1f8, 0x00000396},
1048 {0x0000b1fc, 0x00000196},
1049};
1050
1051static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
1052 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1053 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1054 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1055 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1056 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1057 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1058 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1059 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1060 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1061 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1062 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1063 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1064 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1065 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1066 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1067 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1068 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1069 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1070 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1071 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1072 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1073 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1074 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1075 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1076 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1077 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1078 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1079 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1080 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1081 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1082 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1083 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1084 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1085 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1086 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1087 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1088 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1089 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1090 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1091 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1092 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1093 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1094 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1095 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1096 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1097 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1098 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1099 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1100 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1101 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1102 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1103 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1104 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1105 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1106 {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
1107 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1108 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1109};
1110
1111static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1112 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1113 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1114 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1115 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1116 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1117 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1118 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1119 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1120 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
1121 {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
1122 {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
1123 {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
1124 {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
1125 {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
1126 {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
1127 {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
1128 {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
1129 {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
1130 {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
1131 {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
1132 {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
1133 {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
1134 {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
1135 {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
1136 {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
1137 {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
1138 {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
1139 {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
1140 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
1141 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
1142 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
1143 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1144 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1145 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1146 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1147 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1148 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1149 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
1150 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1152 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1153 {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
1154 {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
1155 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
1156 {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
1157 {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
1158 {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
1159 {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
1160 {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
1161 {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
1162 {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1163 {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1164 {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1165 {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
1166 {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
1167 {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
1168 {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
1169};
1170
1171static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1172 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1173 {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
1174 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1175 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1176 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1178 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1179 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1180 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
1181 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
1182 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
1183 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
1184 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
1185 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
1186 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
1187 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
1188 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
1189 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
1190 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
1191 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
1192 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
1193 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
1194 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
1195 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
1196 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
1197 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
1198 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
1199 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
1200 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1201 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1202 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1203 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1204 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1205 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1206 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1207 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1208 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1209 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
1210 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1211 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1212 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1213 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1214 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1215 {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1216 {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1217 {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1218 {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1219 {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1220 {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1221 {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1222 {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1223 {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1224 {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1225 {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1226 {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
1227 {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1228 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1229};
1230
1231#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b09285c36c4a..dfe6a4707fd2 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -173,6 +173,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
173 173
174#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)]) 174#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
175 175
176#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
177
176#define ATH_TX_COMPLETE_POLL_INT 1000 178#define ATH_TX_COMPLETE_POLL_INT 1000
177 179
178enum ATH_AGGR_STATUS { 180enum ATH_AGGR_STATUS {
@@ -280,6 +282,7 @@ struct ath_tx_control {
280 struct ath_txq *txq; 282 struct ath_txq *txq;
281 struct ath_node *an; 283 struct ath_node *an;
282 u8 paprd; 284 u8 paprd;
285 struct ieee80211_sta *sta;
283}; 286};
284 287
285#define ATH_TX_ERROR 0x01 288#define ATH_TX_ERROR 0x01
@@ -422,7 +425,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
422void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); 425void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
423void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif); 426void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
424void ath9k_set_beacon(struct ath_softc *sc); 427void ath9k_set_beacon(struct ath_softc *sc);
425void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
426 428
427/*******************/ 429/*******************/
428/* Link Monitoring */ 430/* Link Monitoring */
@@ -472,7 +474,7 @@ struct ath_btcoex {
472 unsigned long op_flags; 474 unsigned long op_flags;
473 int bt_stomp_type; /* Types of BT stomping */ 475 int bt_stomp_type; /* Types of BT stomping */
474 u32 btcoex_no_stomp; /* in usec */ 476 u32 btcoex_no_stomp; /* in usec */
475 u32 btcoex_period; /* in usec */ 477 u32 btcoex_period; /* in msec */
476 u32 btscan_no_stomp; /* in usec */ 478 u32 btscan_no_stomp; /* in usec */
477 u32 duty_cycle; 479 u32 duty_cycle;
478 u32 bt_wait_time; 480 u32 bt_wait_time;
@@ -537,6 +539,7 @@ struct ath9k_wow_pattern {
537#ifdef CONFIG_MAC80211_LEDS 539#ifdef CONFIG_MAC80211_LEDS
538void ath_init_leds(struct ath_softc *sc); 540void ath_init_leds(struct ath_softc *sc);
539void ath_deinit_leds(struct ath_softc *sc); 541void ath_deinit_leds(struct ath_softc *sc);
542void ath_fill_led_pin(struct ath_softc *sc);
540#else 543#else
541static inline void ath_init_leds(struct ath_softc *sc) 544static inline void ath_init_leds(struct ath_softc *sc)
542{ 545{
@@ -545,6 +548,9 @@ static inline void ath_init_leds(struct ath_softc *sc)
545static inline void ath_deinit_leds(struct ath_softc *sc) 548static inline void ath_deinit_leds(struct ath_softc *sc)
546{ 549{
547} 550}
551static inline void ath_fill_led_pin(struct ath_softc *sc)
552{
553}
548#endif 554#endif
549 555
550/*******************************/ 556/*******************************/
@@ -596,8 +602,6 @@ struct ath_ant_comb {
596 int main_conf; 602 int main_conf;
597 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf; 603 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
598 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf; 604 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
599 int first_bias;
600 int second_bias;
601 bool first_ratio; 605 bool first_ratio;
602 bool second_ratio; 606 bool second_ratio;
603 unsigned long scan_start_time; 607 unsigned long scan_start_time;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index acd437384fe4..419e9a3f2fed 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -43,8 +43,8 @@ static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
43 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */ 43 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
44}; 44};
45 45
46static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX] 46static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
47 [AR9300_NUM_WLAN_WEIGHTS] = { 47 [AR9300_NUM_WLAN_WEIGHTS] = {
48 { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */ 48 { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */ 49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */ 50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
@@ -208,14 +208,37 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
208 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); 208 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
209} 209}
210 210
211/*
212 * For AR9002, bt_weight/wlan_weight are used.
213 * For AR9003 and above, stomp_type is used.
214 */
211void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, 215void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
212 u32 bt_weight, 216 u32 bt_weight,
213 u32 wlan_weight) 217 u32 wlan_weight,
218 enum ath_stomp_type stomp_type)
214{ 219{
215 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 220 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
216 221
217 btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) | 222 if (AR_SREV_9300_20_OR_LATER(ah)) {
218 SM(wlan_weight, AR_BTCOEX_WL_WGHT); 223 const u32 *weight = ar9003_wlan_weights[stomp_type];
224 int i;
225
226 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
227 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
228 btcoex_hw->mci.stomp_ftp)
229 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
230 weight = mci_wlan_weights[stomp_type];
231 }
232
233 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
234 btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
235 btcoex_hw->wlan_weight[i] = weight[i];
236 }
237 } else {
238 btcoex_hw->bt_coex_weights =
239 SM(bt_weight, AR_BTCOEX_BT_WGHT) |
240 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
241 }
219} 242}
220EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); 243EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
221 244
@@ -282,7 +305,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
282 ath9k_hw_btcoex_enable_2wire(ah); 305 ath9k_hw_btcoex_enable_2wire(ah);
283 break; 306 break;
284 case ATH_BTCOEX_CFG_3WIRE: 307 case ATH_BTCOEX_CFG_3WIRE:
285 if (AR_SREV_9462(ah)) { 308 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
286 ath9k_hw_btcoex_enable_mci(ah); 309 ath9k_hw_btcoex_enable_mci(ah);
287 return; 310 return;
288 } 311 }
@@ -304,7 +327,7 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
304 int i; 327 int i;
305 328
306 btcoex_hw->enabled = false; 329 btcoex_hw->enabled = false;
307 if (AR_SREV_9462(ah)) { 330 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
308 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 331 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
309 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 332 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
310 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), 333 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
@@ -332,26 +355,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
332} 355}
333EXPORT_SYMBOL(ath9k_hw_btcoex_disable); 356EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
334 357
335static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
336 enum ath_stomp_type stomp_type)
337{
338 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
339 const u32 *weight = ar9003_wlan_weights[stomp_type];
340 int i;
341
342 if (AR_SREV_9462(ah)) {
343 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
344 btcoex->mci.stomp_ftp)
345 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
346 weight = ar9462_wlan_weights[stomp_type];
347 }
348
349 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
350 btcoex->bt_weight[i] = AR9300_BT_WGHT;
351 btcoex->wlan_weight[i] = weight[i];
352 }
353}
354
355/* 358/*
356 * Configures appropriate weight based on stomp type. 359 * Configures appropriate weight based on stomp type.
357 */ 360 */
@@ -359,22 +362,22 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
359 enum ath_stomp_type stomp_type) 362 enum ath_stomp_type stomp_type)
360{ 363{
361 if (AR_SREV_9300_20_OR_LATER(ah)) { 364 if (AR_SREV_9300_20_OR_LATER(ah)) {
362 ar9003_btcoex_bt_stomp(ah, stomp_type); 365 ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
363 return; 366 return;
364 } 367 }
365 368
366 switch (stomp_type) { 369 switch (stomp_type) {
367 case ATH_BTCOEX_STOMP_ALL: 370 case ATH_BTCOEX_STOMP_ALL:
368 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 371 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
369 AR_STOMP_ALL_WLAN_WGHT); 372 AR_STOMP_ALL_WLAN_WGHT, 0);
370 break; 373 break;
371 case ATH_BTCOEX_STOMP_LOW: 374 case ATH_BTCOEX_STOMP_LOW:
372 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 375 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
373 AR_STOMP_LOW_WLAN_WGHT); 376 AR_STOMP_LOW_WLAN_WGHT, 0);
374 break; 377 break;
375 case ATH_BTCOEX_STOMP_NONE: 378 case ATH_BTCOEX_STOMP_NONE:
376 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 379 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
377 AR_STOMP_NONE_WLAN_WGHT); 380 AR_STOMP_NONE_WLAN_WGHT, 0);
378 break; 381 break;
379 default: 382 default:
380 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n"); 383 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 20092f98658f..385197ad79b0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -107,7 +107,8 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
107void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); 107void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
108void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, 108void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
109 u32 bt_weight, 109 u32 bt_weight,
110 u32 wlan_weight); 110 u32 wlan_weight,
111 enum ath_stomp_type stomp_type);
111void ath9k_hw_btcoex_disable(struct ath_hw *ah); 112void ath9k_hw_btcoex_disable(struct ath_hw *ah);
112void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, 113void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
113 enum ath_stomp_type stomp_type); 114 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c8ef30127adb..6727b566d294 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -222,6 +222,57 @@ static const struct file_operations fops_disable_ani = {
222 .llseek = default_llseek, 222 .llseek = default_llseek,
223}; 223};
224 224
225static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227{
228 struct ath_softc *sc = file->private_data;
229 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
230 char buf[32];
231 unsigned int len;
232
233 len = sprintf(buf, "%d\n", common->antenna_diversity);
234 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
235}
236
237static ssize_t write_file_ant_diversity(struct file *file,
238 const char __user *user_buf,
239 size_t count, loff_t *ppos)
240{
241 struct ath_softc *sc = file->private_data;
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 unsigned long antenna_diversity;
244 char buf[32];
245 ssize_t len;
246
247 len = min(count, sizeof(buf) - 1);
248 if (copy_from_user(buf, user_buf, len))
249 return -EFAULT;
250
251 if (!AR_SREV_9565(sc->sc_ah))
252 goto exit;
253
254 buf[len] = '\0';
255 if (strict_strtoul(buf, 0, &antenna_diversity))
256 return -EINVAL;
257
258 common->antenna_diversity = !!antenna_diversity;
259 ath9k_ps_wakeup(sc);
260 ath_ant_comb_update(sc);
261 ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
262 common->antenna_diversity);
263 ath9k_ps_restore(sc);
264exit:
265 return count;
266}
267
268static const struct file_operations fops_ant_diversity = {
269 .read = read_file_ant_diversity,
270 .write = write_file_ant_diversity,
271 .open = simple_open,
272 .owner = THIS_MODULE,
273 .llseek = default_llseek,
274};
275
225static ssize_t read_file_dma(struct file *file, char __user *user_buf, 276static ssize_t read_file_dma(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos) 277 size_t count, loff_t *ppos)
227{ 278{
@@ -373,6 +424,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
373 sc->debug.stats.istats.tsfoor++; 424 sc->debug.stats.istats.tsfoor++;
374 if (status & ATH9K_INT_MCI) 425 if (status & ATH9K_INT_MCI)
375 sc->debug.stats.istats.mci++; 426 sc->debug.stats.istats.mci++;
427 if (status & ATH9K_INT_GENTIMER)
428 sc->debug.stats.istats.gen_timer++;
376} 429}
377 430
378static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 431static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +471,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
418 PR_IS("DTIM", dtim); 471 PR_IS("DTIM", dtim);
419 PR_IS("TSFOOR", tsfoor); 472 PR_IS("TSFOOR", tsfoor);
420 PR_IS("MCI", mci); 473 PR_IS("MCI", mci);
474 PR_IS("GENTIMER", gen_timer);
421 PR_IS("TOTAL", total); 475 PR_IS("TOTAL", total);
422 476
423 len += snprintf(buf + len, mxlen - len, 477 len += snprintf(buf + len, mxlen - len,
@@ -1598,12 +1652,12 @@ int ath9k_init_debug(struct ath_hw *ah)
1598 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, 1652 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
1599 &fops_samps); 1653 &fops_samps);
1600#endif 1654#endif
1601
1602 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1655 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1603 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1656 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1604
1605 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, 1657 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
1606 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); 1658 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1659 debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
1660 sc->debug.debugfs_phy, sc, &fops_ant_diversity);
1607 1661
1608 return 0; 1662 return 0;
1609} 1663}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 8b9d080d89da..2ed9785a38fa 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,7 +41,6 @@ enum ath_reset_type {
41 RESET_TYPE_PLL_HANG, 41 RESET_TYPE_PLL_HANG,
42 RESET_TYPE_MAC_HANG, 42 RESET_TYPE_MAC_HANG,
43 RESET_TYPE_BEACON_STUCK, 43 RESET_TYPE_BEACON_STUCK,
44 RESET_TYPE_MCI,
45 __RESET_TYPE_MAX 44 __RESET_TYPE_MAX
46}; 45};
47 46
@@ -74,6 +73,8 @@ enum ath_reset_type {
74 * from a beacon differs from the PCU's internal TSF by more than a 73 * from a beacon differs from the PCU's internal TSF by more than a
75 * (programmable) threshold 74 * (programmable) threshold
76 * @local_timeout: Internal bus timeout. 75 * @local_timeout: Internal bus timeout.
76 * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
77 * @gen_timer: Generic hardware timer interrupt
77 */ 78 */
78struct ath_interrupt_stats { 79struct ath_interrupt_stats {
79 u32 total; 80 u32 total;
@@ -100,6 +101,7 @@ struct ath_interrupt_stats {
100 u32 bb_watchdog; 101 u32 bb_watchdog;
101 u32 tsfoor; 102 u32 tsfoor;
102 u32 mci; 103 u32 mci;
104 u32 gen_timer;
103 105
104 /* Sync-cause stats */ 106 /* Sync-cause stats */
105 u32 sync_cause_all; 107 u32 sync_cause_all;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 484b31305906..319c651fa6c5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -96,6 +96,7 @@
96 96
97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
99#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 100#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
100 101
101#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) 102#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
@@ -108,7 +109,7 @@
108#define EEP_RFSILENT_ENABLED_S 0 109#define EEP_RFSILENT_ENABLED_S 0
109#define EEP_RFSILENT_POLARITY 0x0002 110#define EEP_RFSILENT_POLARITY 0x0002
110#define EEP_RFSILENT_POLARITY_S 1 111#define EEP_RFSILENT_POLARITY_S 1
111#define EEP_RFSILENT_GPIO_SEL (AR_SREV_9462(ah) ? 0x00fc : 0x001c) 112#define EEP_RFSILENT_GPIO_SEL ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
112#define EEP_RFSILENT_GPIO_SEL_S 2 113#define EEP_RFSILENT_GPIO_SEL_S 2
113 114
114#define AR5416_OPFLAGS_11A 0x01 115#define AR5416_OPFLAGS_11A 0x01
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 9f83f71742a5..d9ed141a053e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -44,25 +44,6 @@ void ath_init_leds(struct ath_softc *sc)
44 if (AR_SREV_9100(sc->sc_ah)) 44 if (AR_SREV_9100(sc->sc_ah))
45 return; 45 return;
46 46
47 if (sc->sc_ah->led_pin < 0) {
48 if (AR_SREV_9287(sc->sc_ah))
49 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
50 else if (AR_SREV_9485(sc->sc_ah))
51 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
52 else if (AR_SREV_9300(sc->sc_ah))
53 sc->sc_ah->led_pin = ATH_LED_PIN_9300;
54 else if (AR_SREV_9462(sc->sc_ah))
55 sc->sc_ah->led_pin = ATH_LED_PIN_9462;
56 else
57 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
58 }
59
60 /* Configure gpio 1 for output */
61 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
62 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
63 /* LED off, active low */
64 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
65
66 if (!led_blink) 47 if (!led_blink)
67 sc->led_cdev.default_trigger = 48 sc->led_cdev.default_trigger =
68 ieee80211_get_radio_led_name(sc->hw); 49 ieee80211_get_radio_led_name(sc->hw);
@@ -78,6 +59,31 @@ void ath_init_leds(struct ath_softc *sc)
78 59
79 sc->led_registered = true; 60 sc->led_registered = true;
80} 61}
62
63void ath_fill_led_pin(struct ath_softc *sc)
64{
65 struct ath_hw *ah = sc->sc_ah;
66
67 if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
68 return;
69
70 if (AR_SREV_9287(ah))
71 ah->led_pin = ATH_LED_PIN_9287;
72 else if (AR_SREV_9485(sc->sc_ah))
73 ah->led_pin = ATH_LED_PIN_9485;
74 else if (AR_SREV_9300(sc->sc_ah))
75 ah->led_pin = ATH_LED_PIN_9300;
76 else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
77 ah->led_pin = ATH_LED_PIN_9462;
78 else
79 ah->led_pin = ATH_LED_PIN_DEF;
80
81 /* Configure gpio 1 for output */
82 ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
83
84 /* LED off, active low */
85 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
86}
81#endif 87#endif
82 88
83/*******************/ 89/*******************/
@@ -228,7 +234,12 @@ static void ath_btcoex_period_timer(unsigned long data)
228 ath9k_hw_btcoex_enable(ah); 234 ath9k_hw_btcoex_enable(ah);
229 spin_unlock_bh(&btcoex->btcoex_lock); 235 spin_unlock_bh(&btcoex->btcoex_lock);
230 236
231 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { 237 /*
238 * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
239 * ensure that we properly convert btcoex_period to usec
240 * for any comparision with (btcoex/btscan_)no_stomp.
241 */
242 if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
232 if (btcoex->hw_timer_enabled) 243 if (btcoex->hw_timer_enabled)
233 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); 244 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
234 245
@@ -309,8 +320,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
309 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); 320 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
310 321
311 /* make sure duty cycle timer is also stopped when resuming */ 322 /* make sure duty cycle timer is also stopped when resuming */
312 if (btcoex->hw_timer_enabled) 323 if (btcoex->hw_timer_enabled) {
313 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); 324 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
325 btcoex->hw_timer_enabled = false;
326 }
314 327
315 btcoex->bt_priority_cnt = 0; 328 btcoex->bt_priority_cnt = 0;
316 btcoex->bt_priority_time = jiffies; 329 btcoex->bt_priority_time = jiffies;
@@ -331,18 +344,20 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
331 344
332 del_timer_sync(&btcoex->period_timer); 345 del_timer_sync(&btcoex->period_timer);
333 346
334 if (btcoex->hw_timer_enabled) 347 if (btcoex->hw_timer_enabled) {
335 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); 348 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
336 349 btcoex->hw_timer_enabled = false;
337 btcoex->hw_timer_enabled = false; 350 }
338} 351}
339 352
340void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) 353void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
341{ 354{
342 struct ath_btcoex *btcoex = &sc->btcoex; 355 struct ath_btcoex *btcoex = &sc->btcoex;
343 356
344 if (btcoex->hw_timer_enabled) 357 if (btcoex->hw_timer_enabled) {
345 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); 358 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
359 btcoex->hw_timer_enabled = false;
360 }
346} 361}
347 362
348u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen) 363u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -380,7 +395,10 @@ void ath9k_start_btcoex(struct ath_softc *sc)
380 !ah->btcoex_hw.enabled) { 395 !ah->btcoex_hw.enabled) {
381 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) 396 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
382 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 397 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
383 AR_STOMP_LOW_WLAN_WGHT); 398 AR_STOMP_LOW_WLAN_WGHT, 0);
399 else
400 ath9k_hw_btcoex_set_weight(ah, 0, 0,
401 ATH_BTCOEX_STOMP_NONE);
384 ath9k_hw_btcoex_enable(ah); 402 ath9k_hw_btcoex_enable(ah);
385 403
386 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 404 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
@@ -397,7 +415,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
397 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 415 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
398 ath9k_btcoex_timer_pause(sc); 416 ath9k_btcoex_timer_pause(sc);
399 ath9k_hw_btcoex_disable(ah); 417 ath9k_hw_btcoex_disable(ah);
400 if (AR_SREV_9462(ah)) 418 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
401 ath_mci_flush_profile(&sc->btcoex.mci); 419 ath_mci_flush_profile(&sc->btcoex.mci);
402 } 420 }
403} 421}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index aa327adcc3d8..924c4616c3d9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -973,8 +973,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
973static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) 973static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
974{ 974{
975 int transfer, err; 975 int transfer, err;
976 const void *data = hif_dev->firmware->data; 976 const void *data = hif_dev->fw_data;
977 size_t len = hif_dev->firmware->size; 977 size_t len = hif_dev->fw_size;
978 u32 addr = AR9271_FIRMWARE; 978 u32 addr = AR9271_FIRMWARE;
979 u8 *buf = kzalloc(4096, GFP_KERNEL); 979 u8 *buf = kzalloc(4096, GFP_KERNEL);
980 u32 firm_offset; 980 u32 firm_offset;
@@ -1017,7 +1017,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
1017 return -EIO; 1017 return -EIO;
1018 1018
1019 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n", 1019 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
1020 hif_dev->fw_name, (unsigned long) hif_dev->firmware->size); 1020 hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
1021 1021
1022 return 0; 1022 return 0;
1023} 1023}
@@ -1072,14 +1072,15 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
1072 */ 1072 */
1073static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev) 1073static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
1074{ 1074{
1075 struct device *parent = hif_dev->udev->dev.parent; 1075 struct device *dev = &hif_dev->udev->dev;
1076 struct device *parent = dev->parent;
1076 1077
1077 complete(&hif_dev->fw_done); 1078 complete(&hif_dev->fw_done);
1078 1079
1079 if (parent) 1080 if (parent)
1080 device_lock(parent); 1081 device_lock(parent);
1081 1082
1082 device_release_driver(&hif_dev->udev->dev); 1083 device_release_driver(dev);
1083 1084
1084 if (parent) 1085 if (parent)
1085 device_unlock(parent); 1086 device_unlock(parent);
@@ -1099,11 +1100,11 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1099 1100
1100 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb, 1101 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
1101 &hif_dev->udev->dev); 1102 &hif_dev->udev->dev);
1102 if (hif_dev->htc_handle == NULL) { 1103 if (hif_dev->htc_handle == NULL)
1103 goto err_fw; 1104 goto err_dev_alloc;
1104 }
1105 1105
1106 hif_dev->firmware = fw; 1106 hif_dev->fw_data = fw->data;
1107 hif_dev->fw_size = fw->size;
1107 1108
1108 /* Proceed with initialization */ 1109 /* Proceed with initialization */
1109 1110
@@ -1121,6 +1122,8 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
1121 goto err_htc_hw_init; 1122 goto err_htc_hw_init;
1122 } 1123 }
1123 1124
1125 release_firmware(fw);
1126 hif_dev->flags |= HIF_USB_READY;
1124 complete(&hif_dev->fw_done); 1127 complete(&hif_dev->fw_done);
1125 1128
1126 return; 1129 return;
@@ -1129,8 +1132,8 @@ err_htc_hw_init:
1129 ath9k_hif_usb_dev_deinit(hif_dev); 1132 ath9k_hif_usb_dev_deinit(hif_dev);
1130err_dev_init: 1133err_dev_init:
1131 ath9k_htc_hw_free(hif_dev->htc_handle); 1134 ath9k_htc_hw_free(hif_dev->htc_handle);
1135err_dev_alloc:
1132 release_firmware(fw); 1136 release_firmware(fw);
1133 hif_dev->firmware = NULL;
1134err_fw: 1137err_fw:
1135 ath9k_hif_usb_firmware_fail(hif_dev); 1138 ath9k_hif_usb_firmware_fail(hif_dev);
1136} 1139}
@@ -1277,11 +1280,10 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1277 1280
1278 wait_for_completion(&hif_dev->fw_done); 1281 wait_for_completion(&hif_dev->fw_done);
1279 1282
1280 if (hif_dev->firmware) { 1283 if (hif_dev->flags & HIF_USB_READY) {
1281 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); 1284 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
1282 ath9k_htc_hw_free(hif_dev->htc_handle); 1285 ath9k_htc_hw_free(hif_dev->htc_handle);
1283 ath9k_hif_usb_dev_deinit(hif_dev); 1286 ath9k_hif_usb_dev_deinit(hif_dev);
1284 release_firmware(hif_dev->firmware);
1285 } 1287 }
1286 1288
1287 usb_set_intfdata(interface, NULL); 1289 usb_set_intfdata(interface, NULL);
@@ -1317,13 +1319,23 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1317 struct hif_device_usb *hif_dev = usb_get_intfdata(interface); 1319 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1318 struct htc_target *htc_handle = hif_dev->htc_handle; 1320 struct htc_target *htc_handle = hif_dev->htc_handle;
1319 int ret; 1321 int ret;
1322 const struct firmware *fw;
1320 1323
1321 ret = ath9k_hif_usb_alloc_urbs(hif_dev); 1324 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
1322 if (ret) 1325 if (ret)
1323 return ret; 1326 return ret;
1324 1327
1325 if (hif_dev->firmware) { 1328 if (hif_dev->flags & HIF_USB_READY) {
1329 /* request cached firmware during suspend/resume cycle */
1330 ret = request_firmware(&fw, hif_dev->fw_name,
1331 &hif_dev->udev->dev);
1332 if (ret)
1333 goto fail_resume;
1334
1335 hif_dev->fw_data = fw->data;
1336 hif_dev->fw_size = fw->size;
1326 ret = ath9k_hif_usb_download_fw(hif_dev); 1337 ret = ath9k_hif_usb_download_fw(hif_dev);
1338 release_firmware(fw);
1327 if (ret) 1339 if (ret)
1328 goto fail_resume; 1340 goto fail_resume;
1329 } else { 1341 } else {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 487ff658b4c1..51496e74b83e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -85,12 +85,14 @@ struct cmd_buf {
85}; 85};
86 86
87#define HIF_USB_START BIT(0) 87#define HIF_USB_START BIT(0)
88#define HIF_USB_READY BIT(1)
88 89
89struct hif_device_usb { 90struct hif_device_usb {
90 struct usb_device *udev; 91 struct usb_device *udev;
91 struct usb_interface *interface; 92 struct usb_interface *interface;
92 const struct usb_device_id *usb_device_id; 93 const struct usb_device_id *usb_device_id;
93 const struct firmware *firmware; 94 const void *fw_data;
95 size_t fw_size;
94 struct completion fw_done; 96 struct completion fw_done;
95 struct htc_target *htc_handle; 97 struct htc_target *htc_handle;
96 struct hif_usb_tx tx; 98 struct hif_usb_tx tx;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 936e920fb88e..b30596fcf73a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
542 542
543int ath9k_tx_init(struct ath9k_htc_priv *priv); 543int ath9k_tx_init(struct ath9k_htc_priv *priv);
544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
545 struct ieee80211_sta *sta,
545 struct sk_buff *skb, u8 slot, bool is_cab); 546 struct sk_buff *skb, u8 slot, bool is_cab);
546void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); 547void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
547bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); 548bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 77d541feb910..f42d2eb6af99 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
326 goto next; 326 goto next;
327 } 327 }
328 328
329 ret = ath9k_htc_tx_start(priv, skb, tx_slot, true); 329 ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
330 if (ret != 0) { 330 if (ret != 0) {
331 ath9k_htc_tx_clear_slot(priv, tx_slot); 331 ath9k_htc_tx_clear_slot(priv, tx_slot);
332 dev_kfree_skb_any(skb); 332 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 07df279c8d46..0eacfc13c915 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -161,7 +161,7 @@ void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
161 161
162 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) { 162 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
163 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 163 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
164 AR_STOMP_LOW_WLAN_WGHT); 164 AR_STOMP_LOW_WLAN_WGHT, 0);
165 ath9k_hw_btcoex_enable(ah); 165 ath9k_hw_btcoex_enable(ah);
166 ath_htc_resume_btcoex_work(priv); 166 ath_htc_resume_btcoex_work(priv);
167 } 167 }
@@ -173,17 +173,26 @@ void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
173 173
174 if (ah->btcoex_hw.enabled && 174 if (ah->btcoex_hw.enabled &&
175 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 175 ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
176 ath9k_hw_btcoex_disable(ah);
177 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 176 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
178 ath_htc_cancel_btcoex_work(priv); 177 ath_htc_cancel_btcoex_work(priv);
178 ath9k_hw_btcoex_disable(ah);
179 } 179 }
180} 180}
181 181
182void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product) 182void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
183{ 183{
184 struct ath_hw *ah = priv->ah; 184 struct ath_hw *ah = priv->ah;
185 struct ath_common *common = ath9k_hw_common(ah);
185 int qnum; 186 int qnum;
186 187
188 /*
189 * Check if BTCOEX is globally disabled.
190 */
191 if (!common->btcoex_enabled) {
192 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
193 return;
194 }
195
187 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) { 196 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
188 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE; 197 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
189 } 198 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a035a380d669..d98255eb1b9a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -30,6 +30,10 @@ int htc_modparam_nohwcrypt;
30module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444); 30module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
31MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 31MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
32 32
33static int ath9k_htc_btcoex_enable;
34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
36
33#define CHAN2G(_freq, _idx) { \ 37#define CHAN2G(_freq, _idx) { \
34 .center_freq = (_freq), \ 38 .center_freq = (_freq), \
35 .hw_value = (_idx), \ 39 .hw_value = (_idx), \
@@ -635,6 +639,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
635 common->hw = priv->hw; 639 common->hw = priv->hw;
636 common->priv = priv; 640 common->priv = priv;
637 common->debug_mask = ath9k_debug; 641 common->debug_mask = ath9k_debug;
642 common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
638 643
639 spin_lock_init(&priv->beacon_lock); 644 spin_lock_init(&priv->beacon_lock);
640 spin_lock_init(&priv->tx.tx_lock); 645 spin_lock_init(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c785129692ff..ca78e33ca23e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -489,24 +489,20 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
489 ista = (struct ath9k_htc_sta *) sta->drv_priv; 489 ista = (struct ath9k_htc_sta *) sta->drv_priv;
490 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN); 490 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
491 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN); 491 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
492 tsta.is_vif_sta = 0;
493 ista->index = sta_idx; 492 ista->index = sta_idx;
493 tsta.is_vif_sta = 0;
494 maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
495 sta->ht_cap.ampdu_factor);
496 tsta.maxampdu = cpu_to_be16(maxampdu);
494 } else { 497 } else {
495 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); 498 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
496 tsta.is_vif_sta = 1; 499 tsta.is_vif_sta = 1;
500 tsta.maxampdu = cpu_to_be16(0xffff);
497 } 501 }
498 502
499 tsta.sta_index = sta_idx; 503 tsta.sta_index = sta_idx;
500 tsta.vif_index = avp->index; 504 tsta.vif_index = avp->index;
501 505
502 if (!sta) {
503 tsta.maxampdu = cpu_to_be16(0xffff);
504 } else {
505 maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
506 sta->ht_cap.ampdu_factor);
507 tsta.maxampdu = cpu_to_be16(maxampdu);
508 }
509
510 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 506 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
511 if (ret) { 507 if (ret) {
512 if (sta) 508 if (sta)
@@ -856,7 +852,9 @@ set_timer:
856/* mac80211 Callbacks */ 852/* mac80211 Callbacks */
857/**********************/ 853/**********************/
858 854
859static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 855static void ath9k_htc_tx(struct ieee80211_hw *hw,
856 struct ieee80211_tx_control *control,
857 struct sk_buff *skb)
860{ 858{
861 struct ieee80211_hdr *hdr; 859 struct ieee80211_hdr *hdr;
862 struct ath9k_htc_priv *priv = hw->priv; 860 struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +881,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
883 goto fail_tx; 881 goto fail_tx;
884 } 882 }
885 883
886 ret = ath9k_htc_tx_start(priv, skb, slot, false); 884 ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
887 if (ret != 0) { 885 if (ret != 0) {
888 ath_dbg(common, XMIT, "Tx failed\n"); 886 ath_dbg(common, XMIT, "Tx failed\n");
889 goto clear_slot; 887 goto clear_slot;
@@ -1331,6 +1329,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1331 return ret; 1329 return ret;
1332} 1330}
1333 1331
1332static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
1333 struct ieee80211_vif *vif,
1334 struct ieee80211_sta *sta, u32 changed)
1335{
1336 struct ath9k_htc_priv *priv = hw->priv;
1337 struct ath_common *common = ath9k_hw_common(priv->ah);
1338 struct ath9k_htc_target_rate trate;
1339
1340 mutex_lock(&priv->mutex);
1341 ath9k_htc_ps_wakeup(priv);
1342
1343 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
1344 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
1345 ath9k_htc_setup_rate(priv, sta, &trate);
1346 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1347 ath_dbg(common, CONFIG,
1348 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1349 sta->addr, be32_to_cpu(trate.capflags));
1350 else
1351 ath_dbg(common, CONFIG,
1352 "Unable to update supported rates for sta: %pM\n",
1353 sta->addr);
1354 }
1355
1356 ath9k_htc_ps_restore(priv);
1357 mutex_unlock(&priv->mutex);
1358}
1359
1334static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, 1360static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
1335 struct ieee80211_vif *vif, u16 queue, 1361 struct ieee80211_vif *vif, u16 queue,
1336 const struct ieee80211_tx_queue_params *params) 1362 const struct ieee80211_tx_queue_params *params)
@@ -1419,7 +1445,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1419 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1445 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1420 if (priv->ah->sw_mgmt_crypto && 1446 if (priv->ah->sw_mgmt_crypto &&
1421 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1447 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1422 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1448 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1423 ret = 0; 1449 ret = 0;
1424 } 1450 }
1425 break; 1451 break;
@@ -1758,6 +1784,7 @@ struct ieee80211_ops ath9k_htc_ops = {
1758 .sta_add = ath9k_htc_sta_add, 1784 .sta_add = ath9k_htc_sta_add,
1759 .sta_remove = ath9k_htc_sta_remove, 1785 .sta_remove = ath9k_htc_sta_remove,
1760 .conf_tx = ath9k_htc_conf_tx, 1786 .conf_tx = ath9k_htc_conf_tx,
1787 .sta_rc_update = ath9k_htc_sta_rc_update,
1761 .bss_info_changed = ath9k_htc_bss_info_changed, 1788 .bss_info_changed = ath9k_htc_bss_info_changed,
1762 .set_key = ath9k_htc_set_key, 1789 .set_key = ath9k_htc_set_key,
1763 .get_tsf = ath9k_htc_get_tsf, 1790 .get_tsf = ath9k_htc_get_tsf,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 47e61d0da33b..06cdcb772d78 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
333} 333}
334 334
335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
336 struct ieee80211_sta *sta,
336 struct sk_buff *skb, 337 struct sk_buff *skb,
337 u8 slot, bool is_cab) 338 u8 slot, bool is_cab)
338{ 339{
339 struct ieee80211_hdr *hdr; 340 struct ieee80211_hdr *hdr;
340 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
341 struct ieee80211_sta *sta = tx_info->control.sta;
342 struct ieee80211_vif *vif = tx_info->control.vif; 342 struct ieee80211_vif *vif = tx_info->control.vif;
343 struct ath9k_htc_sta *ista; 343 struct ath9k_htc_sta *ista;
344 struct ath9k_htc_vif *avp = NULL; 344 struct ath9k_htc_vif *avp = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 265bf77598a2..0f2b97f6b739 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,13 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf); 78 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
79} 79}
80 80
81static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
82 bool enable)
83{
84 if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
85 ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
86}
87
81/* Private hardware call ops */ 88/* Private hardware call ops */
82 89
83/* PHY ops */ 90/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4faf0a395876..f9a6ec5cf470 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,7 @@
24#include "rc.h" 24#include "rc.h"
25#include "ar9003_mac.h" 25#include "ar9003_mac.h"
26#include "ar9003_mci.h" 26#include "ar9003_mci.h"
27#include "ar9003_phy.h"
27#include "debug.h" 28#include "debug.h"
28#include "ath9k.h" 29#include "ath9k.h"
29 30
@@ -355,7 +356,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
355 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; 356 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
356 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 357 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
357 358
358 if (AR_SREV_9462(ah)) 359 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
359 ah->is_pciexpress = true; 360 ah->is_pciexpress = true;
360 else 361 else
361 ah->is_pciexpress = (val & 362 ah->is_pciexpress = (val &
@@ -602,6 +603,11 @@ static int __ath9k_hw_init(struct ath_hw *ah)
602 if (AR_SREV_9462(ah)) 603 if (AR_SREV_9462(ah))
603 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE; 604 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
604 605
606 if (AR_SREV_9565(ah)) {
607 ah->WARegVal |= AR_WA_BIT22;
608 REG_WRITE(ah, AR_WA, ah->WARegVal);
609 }
610
605 ath9k_hw_init_defaults(ah); 611 ath9k_hw_init_defaults(ah);
606 ath9k_hw_init_config(ah); 612 ath9k_hw_init_config(ah);
607 613
@@ -647,6 +653,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
647 case AR_SREV_VERSION_9340: 653 case AR_SREV_VERSION_9340:
648 case AR_SREV_VERSION_9462: 654 case AR_SREV_VERSION_9462:
649 case AR_SREV_VERSION_9550: 655 case AR_SREV_VERSION_9550:
656 case AR_SREV_VERSION_9565:
650 break; 657 break;
651 default: 658 default:
652 ath_err(common, 659 ath_err(common,
@@ -708,7 +715,7 @@ int ath9k_hw_init(struct ath_hw *ah)
708 int ret; 715 int ret;
709 struct ath_common *common = ath9k_hw_common(ah); 716 struct ath_common *common = ath9k_hw_common(ah);
710 717
711 /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ 718 /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
712 switch (ah->hw_version.devid) { 719 switch (ah->hw_version.devid) {
713 case AR5416_DEVID_PCI: 720 case AR5416_DEVID_PCI:
714 case AR5416_DEVID_PCIE: 721 case AR5416_DEVID_PCIE:
@@ -728,6 +735,7 @@ int ath9k_hw_init(struct ath_hw *ah)
728 case AR9300_DEVID_AR9580: 735 case AR9300_DEVID_AR9580:
729 case AR9300_DEVID_AR9462: 736 case AR9300_DEVID_AR9462:
730 case AR9485_DEVID_AR1111: 737 case AR9485_DEVID_AR1111:
738 case AR9300_DEVID_AR9565:
731 break; 739 break;
732 default: 740 default:
733 if (common->bus_ops->ath_bus_type == ATH_USB) 741 if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -800,8 +808,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
800{ 808{
801 u32 pll; 809 u32 pll;
802 810
803 if (AR_SREV_9485(ah)) { 811 if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
804
805 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ 812 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
806 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 813 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
807 AR_CH0_BB_DPLL2_PLL_PWD, 0x1); 814 AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
@@ -912,7 +919,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
912 } 919 }
913 920
914 pll = ath9k_hw_compute_pll_control(ah, chan); 921 pll = ath9k_hw_compute_pll_control(ah, chan);
915 922 if (AR_SREV_9565(ah))
923 pll |= 0x40000;
916 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 924 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
917 925
918 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 926 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
@@ -1726,12 +1734,12 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1726 if (!ret) 1734 if (!ret)
1727 goto fail; 1735 goto fail;
1728 1736
1729 ath9k_hw_loadnf(ah, ah->curchan);
1730 ath9k_hw_start_nfcal(ah, true);
1731
1732 if (ath9k_hw_mci_is_enabled(ah)) 1737 if (ath9k_hw_mci_is_enabled(ah))
1733 ar9003_mci_2g5g_switch(ah, false); 1738 ar9003_mci_2g5g_switch(ah, false);
1734 1739
1740 ath9k_hw_loadnf(ah, ah->curchan);
1741 ath9k_hw_start_nfcal(ah, true);
1742
1735 if (AR_SREV_9271(ah)) 1743 if (AR_SREV_9271(ah))
1736 ar9002_hw_load_ani_reg(ah, chan); 1744 ar9002_hw_load_ani_reg(ah, chan);
1737 1745
@@ -2018,6 +2026,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2018 2026
2019 ath9k_hw_apply_gpio_override(ah); 2027 ath9k_hw_apply_gpio_override(ah);
2020 2028
2029 if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
2030 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
2031
2021 return 0; 2032 return 0;
2022} 2033}
2023EXPORT_SYMBOL(ath9k_hw_reset); 2034EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2034,7 +2045,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
2034{ 2045{
2035 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2046 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2036 2047
2037 if (AR_SREV_9462(ah)) { 2048 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
2038 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); 2049 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
2039 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); 2050 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
2040 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); 2051 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
@@ -2401,7 +2412,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2401 if (eeval & AR5416_OPFLAGS_11G) 2412 if (eeval & AR5416_OPFLAGS_11G)
2402 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; 2413 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
2403 2414
2404 if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) 2415 if (AR_SREV_9485(ah) ||
2416 AR_SREV_9285(ah) ||
2417 AR_SREV_9330(ah) ||
2418 AR_SREV_9565(ah))
2405 chip_chainmask = 1; 2419 chip_chainmask = 1;
2406 else if (AR_SREV_9462(ah)) 2420 else if (AR_SREV_9462(ah))
2407 chip_chainmask = 3; 2421 chip_chainmask = 3;
@@ -2489,7 +2503,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2489 2503
2490 if (AR_SREV_9300_20_OR_LATER(ah)) { 2504 if (AR_SREV_9300_20_OR_LATER(ah)) {
2491 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; 2505 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
2492 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) 2506 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
2493 pCap->hw_caps |= ATH9K_HW_CAP_LDPC; 2507 pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
2494 2508
2495 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; 2509 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2525,7 +2539,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2525 } 2539 }
2526 2540
2527 2541
2528 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { 2542 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
2529 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2543 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2530 /* 2544 /*
2531 * enable the diversity-combining algorithm only when 2545 * enable the diversity-combining algorithm only when
@@ -2568,14 +2582,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2568 ah->enabled_cals |= TX_IQ_ON_AGC_CAL; 2582 ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
2569 } 2583 }
2570 2584
2571 if (AR_SREV_9462(ah)) { 2585 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
2572
2573 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) 2586 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
2574 pCap->hw_caps |= ATH9K_HW_CAP_MCI; 2587 pCap->hw_caps |= ATH9K_HW_CAP_MCI;
2575 2588
2576 if (AR_SREV_9462_20(ah)) 2589 if (AR_SREV_9462_20(ah))
2577 pCap->hw_caps |= ATH9K_HW_CAP_RTT; 2590 pCap->hw_caps |= ATH9K_HW_CAP_RTT;
2578
2579 } 2591 }
2580 2592
2581 2593
@@ -2741,7 +2753,7 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
2741 2753
2742 ENABLE_REGWRITE_BUFFER(ah); 2754 ENABLE_REGWRITE_BUFFER(ah);
2743 2755
2744 if (AR_SREV_9462(ah)) 2756 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
2745 bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER; 2757 bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
2746 2758
2747 REG_WRITE(ah, AR_RX_FILTER, bits); 2759 REG_WRITE(ah, AR_RX_FILTER, bits);
@@ -3038,7 +3050,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
3038 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3050 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3039 gen_tmr_configuration[timer->index].mode_mask); 3051 gen_tmr_configuration[timer->index].mode_mask);
3040 3052
3041 if (AR_SREV_9462(ah)) { 3053 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3042 /* 3054 /*
3043 * Starting from AR9462, each generic timer can select which tsf 3055 * Starting from AR9462, each generic timer can select which tsf
3044 * to use. But we still follow the old rule, 0 - 7 use tsf and 3056 * to use. But we still follow the old rule, 0 - 7 use tsf and
@@ -3072,6 +3084,16 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
3072 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3084 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
3073 gen_tmr_configuration[timer->index].mode_mask); 3085 gen_tmr_configuration[timer->index].mode_mask);
3074 3086
3087 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3088 /*
3089 * Need to switch back to TSF if it was using TSF2.
3090 */
3091 if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
3092 REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
3093 (1 << timer->index));
3094 }
3095 }
3096
3075 /* Disable both trigger and thresh interrupt masks */ 3097 /* Disable both trigger and thresh interrupt masks */
3076 REG_CLR_BIT(ah, AR_IMR_S5, 3098 REG_CLR_BIT(ah, AR_IMR_S5,
3077 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3099 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -3153,6 +3175,7 @@ static struct {
3153 { AR_SREV_VERSION_9485, "9485" }, 3175 { AR_SREV_VERSION_9485, "9485" },
3154 { AR_SREV_VERSION_9462, "9462" }, 3176 { AR_SREV_VERSION_9462, "9462" },
3155 { AR_SREV_VERSION_9550, "9550" }, 3177 { AR_SREV_VERSION_9550, "9550" },
3178 { AR_SREV_VERSION_9565, "9565" },
3156}; 3179};
3157 3180
3158/* For devices with external radios */ 3181/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index de6968fc64f4..566a4ce4f156 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -50,6 +50,7 @@
50#define AR9300_DEVID_AR9330 0x0035 50#define AR9300_DEVID_AR9330 0x0035
51#define AR9300_DEVID_QCA955X 0x0038 51#define AR9300_DEVID_QCA955X 0x0038
52#define AR9485_DEVID_AR1111 0x0037 52#define AR9485_DEVID_AR1111 0x0037
53#define AR9300_DEVID_AR9565 0x0036
53 54
54#define AR5416_AR9100_DEVID 0x000b 55#define AR5416_AR9100_DEVID 0x000b
55 56
@@ -685,7 +686,7 @@ struct ath_hw_ops {
685 struct ath_hw_antcomb_conf *antconf); 686 struct ath_hw_antcomb_conf *antconf);
686 void (*antdiv_comb_conf_set)(struct ath_hw *ah, 687 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
687 struct ath_hw_antcomb_conf *antconf); 688 struct ath_hw_antcomb_conf *antconf);
688 689 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
689}; 690};
690 691
691struct ath_nf_limits { 692struct ath_nf_limits {
@@ -729,6 +730,7 @@ struct ath_hw {
729 bool aspm_enabled; 730 bool aspm_enabled;
730 bool is_monitoring; 731 bool is_monitoring;
731 bool need_an_top2_fixup; 732 bool need_an_top2_fixup;
733 bool shared_chain_lnadiv;
732 u16 tx_trig_level; 734 u16 tx_trig_level;
733 735
734 u32 nf_regs[6]; 736 u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f33712140fa5..fad3ccd5cd91 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -46,6 +46,10 @@ static int ath9k_btcoex_enable;
46module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 46module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 47MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48 48
49static int ath9k_enable_diversity;
50module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
51MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
52
49bool is_ath9k_unloaded; 53bool is_ath9k_unloaded;
50/* We use the hw_value as an index into our private channel structure */ 54/* We use the hw_value as an index into our private channel structure */
51 55
@@ -258,7 +262,7 @@ static void setup_ht_cap(struct ath_softc *sc,
258 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 262 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 263 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
260 264
261 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) 265 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
262 max_streams = 1; 266 max_streams = 1;
263 else if (AR_SREV_9462(ah)) 267 else if (AR_SREV_9462(ah))
264 max_streams = 2; 268 max_streams = 2;
@@ -546,6 +550,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
546 common->debug_mask = ath9k_debug; 550 common->debug_mask = ath9k_debug;
547 common->btcoex_enabled = ath9k_btcoex_enable == 1; 551 common->btcoex_enabled = ath9k_btcoex_enable == 1;
548 common->disable_ani = false; 552 common->disable_ani = false;
553
554 /*
555 * Enable Antenna diversity only when BTCOEX is disabled
556 * and the user manually requests the feature.
557 */
558 if (!common->btcoex_enabled && ath9k_enable_diversity)
559 common->antenna_diversity = 1;
560
549 spin_lock_init(&common->cc_lock); 561 spin_lock_init(&common->cc_lock);
550 562
551 spin_lock_init(&sc->sc_serial_rw); 563 spin_lock_init(&sc->sc_serial_rw);
@@ -597,6 +609,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
597 609
598 ath9k_cmn_init_crypto(sc->sc_ah); 610 ath9k_cmn_init_crypto(sc->sc_ah);
599 ath9k_init_misc(sc); 611 ath9k_init_misc(sc);
612 ath_fill_led_pin(sc);
600 613
601 if (common->bus_ops->aspm_init) 614 if (common->bus_ops->aspm_init)
602 common->bus_ops->aspm_init(common); 615 common->bus_ops->aspm_init(common);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a22df749b8db..31ab82e3ba85 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -696,7 +696,9 @@ mutex_unlock:
696 return r; 696 return r;
697} 697}
698 698
699static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 699static void ath9k_tx(struct ieee80211_hw *hw,
700 struct ieee80211_tx_control *control,
701 struct sk_buff *skb)
700{ 702{
701 struct ath_softc *sc = hw->priv; 703 struct ath_softc *sc = hw->priv;
702 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 704 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
756 758
757 memset(&txctl, 0, sizeof(struct ath_tx_control)); 759 memset(&txctl, 0, sizeof(struct ath_tx_control));
758 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 760 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
761 txctl.sta = control->sta;
759 762
760 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); 763 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
761 764
@@ -983,47 +986,21 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
983 struct ath_softc *sc = hw->priv; 986 struct ath_softc *sc = hw->priv;
984 struct ath_hw *ah = sc->sc_ah; 987 struct ath_hw *ah = sc->sc_ah;
985 struct ath_common *common = ath9k_hw_common(ah); 988 struct ath_common *common = ath9k_hw_common(ah);
986 int ret = 0;
987 989
988 ath9k_ps_wakeup(sc);
989 mutex_lock(&sc->mutex); 990 mutex_lock(&sc->mutex);
990 991
991 switch (vif->type) {
992 case NL80211_IFTYPE_STATION:
993 case NL80211_IFTYPE_WDS:
994 case NL80211_IFTYPE_ADHOC:
995 case NL80211_IFTYPE_AP:
996 case NL80211_IFTYPE_MESH_POINT:
997 break;
998 default:
999 ath_err(common, "Interface type %d not yet supported\n",
1000 vif->type);
1001 ret = -EOPNOTSUPP;
1002 goto out;
1003 }
1004
1005 if (ath9k_uses_beacons(vif->type)) {
1006 if (sc->nbcnvifs >= ATH_BCBUF) {
1007 ath_err(common, "Not enough beacon buffers when adding"
1008 " new interface of type: %i\n",
1009 vif->type);
1010 ret = -ENOBUFS;
1011 goto out;
1012 }
1013 }
1014
1015 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 992 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1016
1017 sc->nvifs++; 993 sc->nvifs++;
1018 994
995 ath9k_ps_wakeup(sc);
1019 ath9k_calculate_summary_state(hw, vif); 996 ath9k_calculate_summary_state(hw, vif);
997 ath9k_ps_restore(sc);
998
1020 if (ath9k_uses_beacons(vif->type)) 999 if (ath9k_uses_beacons(vif->type))
1021 ath9k_beacon_assign_slot(sc, vif); 1000 ath9k_beacon_assign_slot(sc, vif);
1022 1001
1023out:
1024 mutex_unlock(&sc->mutex); 1002 mutex_unlock(&sc->mutex);
1025 ath9k_ps_restore(sc); 1003 return 0;
1026 return ret;
1027} 1004}
1028 1005
1029static int ath9k_change_interface(struct ieee80211_hw *hw, 1006static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1033,21 +1010,9 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1033{ 1010{
1034 struct ath_softc *sc = hw->priv; 1011 struct ath_softc *sc = hw->priv;
1035 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1012 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1036 int ret = 0;
1037 1013
1038 ath_dbg(common, CONFIG, "Change Interface\n"); 1014 ath_dbg(common, CONFIG, "Change Interface\n");
1039
1040 mutex_lock(&sc->mutex); 1015 mutex_lock(&sc->mutex);
1041 ath9k_ps_wakeup(sc);
1042
1043 if (ath9k_uses_beacons(new_type) &&
1044 !ath9k_uses_beacons(vif->type)) {
1045 if (sc->nbcnvifs >= ATH_BCBUF) {
1046 ath_err(common, "No beacon slot available\n");
1047 ret = -ENOBUFS;
1048 goto out;
1049 }
1050 }
1051 1016
1052 if (ath9k_uses_beacons(vif->type)) 1017 if (ath9k_uses_beacons(vif->type))
1053 ath9k_beacon_remove_slot(sc, vif); 1018 ath9k_beacon_remove_slot(sc, vif);
@@ -1055,14 +1020,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1055 vif->type = new_type; 1020 vif->type = new_type;
1056 vif->p2p = p2p; 1021 vif->p2p = p2p;
1057 1022
1023 ath9k_ps_wakeup(sc);
1058 ath9k_calculate_summary_state(hw, vif); 1024 ath9k_calculate_summary_state(hw, vif);
1025 ath9k_ps_restore(sc);
1026
1059 if (ath9k_uses_beacons(vif->type)) 1027 if (ath9k_uses_beacons(vif->type))
1060 ath9k_beacon_assign_slot(sc, vif); 1028 ath9k_beacon_assign_slot(sc, vif);
1061 1029
1062out:
1063 ath9k_ps_restore(sc);
1064 mutex_unlock(&sc->mutex); 1030 mutex_unlock(&sc->mutex);
1065 return ret; 1031 return 0;
1066} 1032}
1067 1033
1068static void ath9k_remove_interface(struct ieee80211_hw *hw, 1034static void ath9k_remove_interface(struct ieee80211_hw *hw,
@@ -1073,7 +1039,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1073 1039
1074 ath_dbg(common, CONFIG, "Detach Interface\n"); 1040 ath_dbg(common, CONFIG, "Detach Interface\n");
1075 1041
1076 ath9k_ps_wakeup(sc);
1077 mutex_lock(&sc->mutex); 1042 mutex_lock(&sc->mutex);
1078 1043
1079 sc->nvifs--; 1044 sc->nvifs--;
@@ -1081,10 +1046,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1081 if (ath9k_uses_beacons(vif->type)) 1046 if (ath9k_uses_beacons(vif->type))
1082 ath9k_beacon_remove_slot(sc, vif); 1047 ath9k_beacon_remove_slot(sc, vif);
1083 1048
1049 ath9k_ps_wakeup(sc);
1084 ath9k_calculate_summary_state(hw, NULL); 1050 ath9k_calculate_summary_state(hw, NULL);
1051 ath9k_ps_restore(sc);
1085 1052
1086 mutex_unlock(&sc->mutex); 1053 mutex_unlock(&sc->mutex);
1087 ath9k_ps_restore(sc);
1088} 1054}
1089 1055
1090static void ath9k_enable_ps(struct ath_softc *sc) 1056static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1440,7 +1406,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1440 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1406 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1441 if (sc->sc_ah->sw_mgmt_crypto && 1407 if (sc->sc_ah->sw_mgmt_crypto &&
1442 key->cipher == WLAN_CIPHER_SUITE_CCMP) 1408 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1443 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1409 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1444 ret = 0; 1410 ret = 0;
1445 } 1411 }
1446 break; 1412 break;
@@ -2257,7 +2223,7 @@ static int ath9k_suspend(struct ieee80211_hw *hw,
2257 mutex_lock(&sc->mutex); 2223 mutex_lock(&sc->mutex);
2258 2224
2259 ath_cancel_work(sc); 2225 ath_cancel_work(sc);
2260 del_timer_sync(&common->ani.timer); 2226 ath_stop_ani(sc);
2261 del_timer_sync(&sc->rx_poll_timer); 2227 del_timer_sync(&sc->rx_poll_timer);
2262 2228
2263 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) { 2229 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index fb536e7e661b..ec2d7c807567 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -80,6 +80,7 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
80 struct ath_mci_profile_info *info, *tinfo; 80 struct ath_mci_profile_info *info, *tinfo;
81 81
82 mci->aggr_limit = 0; 82 mci->aggr_limit = 0;
83 mci->num_mgmt = 0;
83 84
84 if (list_empty(&mci->info)) 85 if (list_empty(&mci->info))
85 return; 86 return;
@@ -120,7 +121,14 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) 121 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
121 goto skip_tuning; 122 goto skip_tuning;
122 123
124 mci->aggr_limit = 0;
123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; 125 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
126 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
127 if (NUM_PROF(mci))
128 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
129 else
130 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
131 ATH_BTCOEX_STOMP_LOW;
124 132
125 if (num_profile == 1) { 133 if (num_profile == 1) {
126 info = list_first_entry(&mci->info, 134 info = list_first_entry(&mci->info,
@@ -132,7 +140,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
132 else if (info->T == 6) { 140 else if (info->T == 6) {
133 mci->aggr_limit = 6; 141 mci->aggr_limit = 6;
134 btcoex->duty_cycle = 30; 142 btcoex->duty_cycle = 30;
135 } 143 } else
144 mci->aggr_limit = 6;
136 ath_dbg(common, MCI, 145 ath_dbg(common, MCI,
137 "Single SCO, aggregation limit %d 1/4 ms\n", 146 "Single SCO, aggregation limit %d 1/4 ms\n",
138 mci->aggr_limit); 147 mci->aggr_limit);
@@ -191,6 +200,23 @@ skip_tuning:
191 ath9k_btcoex_timer_resume(sc); 200 ath9k_btcoex_timer_resume(sc);
192} 201}
193 202
203static void ath_mci_wait_btcal_done(struct ath_softc *sc)
204{
205 struct ath_hw *ah = sc->sc_ah;
206
207 /* Stop tx & rx */
208 ieee80211_stop_queues(sc->hw);
209 ath_stoprecv(sc);
210 ath_drain_all_txq(sc, false);
211
212 /* Wait for cal done */
213 ar9003_mci_start_reset(ah, ah->curchan);
214
215 /* Resume tx & rx */
216 ath_startrecv(sc);
217 ieee80211_wake_queues(sc->hw);
218}
219
194static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 220static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
195{ 221{
196 struct ath_hw *ah = sc->sc_ah; 222 struct ath_hw *ah = sc->sc_ah;
@@ -201,8 +227,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
201 switch (opcode) { 227 switch (opcode) {
202 case MCI_GPM_BT_CAL_REQ: 228 case MCI_GPM_BT_CAL_REQ:
203 if (mci_hw->bt_state == MCI_BT_AWAKE) { 229 if (mci_hw->bt_state == MCI_BT_AWAKE) {
204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START); 230 mci_hw->bt_state = MCI_BT_CAL_START;
205 ath9k_queue_reset(sc, RESET_TYPE_MCI); 231 ath_mci_wait_btcal_done(sc);
206 } 232 }
207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); 233 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
208 break; 234 break;
@@ -224,8 +250,8 @@ static void ath9k_mci_work(struct work_struct *work)
224 ath_mci_update_scheme(sc); 250 ath_mci_update_scheme(sc);
225} 251}
226 252
227static void ath_mci_process_profile(struct ath_softc *sc, 253static u8 ath_mci_process_profile(struct ath_softc *sc,
228 struct ath_mci_profile_info *info) 254 struct ath_mci_profile_info *info)
229{ 255{
230 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 256 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
231 struct ath_btcoex *btcoex = &sc->btcoex; 257 struct ath_btcoex *btcoex = &sc->btcoex;
@@ -251,25 +277,15 @@ static void ath_mci_process_profile(struct ath_softc *sc,
251 277
252 if (info->start) { 278 if (info->start) {
253 if (!entry && !ath_mci_add_profile(common, mci, info)) 279 if (!entry && !ath_mci_add_profile(common, mci, info))
254 return; 280 return 0;
255 } else 281 } else
256 ath_mci_del_profile(common, mci, entry); 282 ath_mci_del_profile(common, mci, entry);
257 283
258 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 284 return 1;
259 mci->aggr_limit = mci->num_sco ? 6 : 0;
260
261 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
262 if (NUM_PROF(mci))
263 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
264 else
265 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
266 ATH_BTCOEX_STOMP_LOW;
267
268 ieee80211_queue_work(sc->hw, &sc->mci_work);
269} 285}
270 286
271static void ath_mci_process_status(struct ath_softc *sc, 287static u8 ath_mci_process_status(struct ath_softc *sc,
272 struct ath_mci_profile_status *status) 288 struct ath_mci_profile_status *status)
273{ 289{
274 struct ath_btcoex *btcoex = &sc->btcoex; 290 struct ath_btcoex *btcoex = &sc->btcoex;
275 struct ath_mci_profile *mci = &btcoex->mci; 291 struct ath_mci_profile *mci = &btcoex->mci;
@@ -278,14 +294,14 @@ static void ath_mci_process_status(struct ath_softc *sc,
278 294
279 /* Link status type are not handled */ 295 /* Link status type are not handled */
280 if (status->is_link) 296 if (status->is_link)
281 return; 297 return 0;
282 298
283 info.conn_handle = status->conn_handle; 299 info.conn_handle = status->conn_handle;
284 if (ath_mci_find_profile(mci, &info)) 300 if (ath_mci_find_profile(mci, &info))
285 return; 301 return 0;
286 302
287 if (status->conn_handle >= ATH_MCI_MAX_PROFILE) 303 if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
288 return; 304 return 0;
289 305
290 if (status->is_critical) 306 if (status->is_critical)
291 __set_bit(status->conn_handle, mci->status); 307 __set_bit(status->conn_handle, mci->status);
@@ -299,7 +315,9 @@ static void ath_mci_process_status(struct ath_softc *sc,
299 } while (++i < ATH_MCI_MAX_PROFILE); 315 } while (++i < ATH_MCI_MAX_PROFILE);
300 316
301 if (old_num_mgmt != mci->num_mgmt) 317 if (old_num_mgmt != mci->num_mgmt)
302 ieee80211_queue_work(sc->hw, &sc->mci_work); 318 return 1;
319
320 return 0;
303} 321}
304 322
305static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 323static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -308,9 +326,16 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
308 struct ath_mci_profile_info profile_info; 326 struct ath_mci_profile_info profile_info;
309 struct ath_mci_profile_status profile_status; 327 struct ath_mci_profile_status profile_status;
310 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 328 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
311 u8 major, minor; 329 u8 major, minor, update_scheme = 0;
312 u32 seq_num; 330 u32 seq_num;
313 331
332 if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
333 ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
334 ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
335 ath_mci_flush_profile(&sc->btcoex.mci);
336 ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
337 }
338
314 switch (opcode) { 339 switch (opcode) {
315 case MCI_GPM_COEX_VERSION_QUERY: 340 case MCI_GPM_COEX_VERSION_QUERY:
316 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); 341 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
@@ -336,7 +361,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
336 break; 361 break;
337 } 362 }
338 363
339 ath_mci_process_profile(sc, &profile_info); 364 update_scheme += ath_mci_process_profile(sc, &profile_info);
340 break; 365 break;
341 case MCI_GPM_COEX_BT_STATUS_UPDATE: 366 case MCI_GPM_COEX_BT_STATUS_UPDATE:
342 profile_status.is_link = *(rx_payload + 367 profile_status.is_link = *(rx_payload +
@@ -352,12 +377,14 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
352 profile_status.is_link, profile_status.conn_handle, 377 profile_status.is_link, profile_status.conn_handle,
353 profile_status.is_critical, seq_num); 378 profile_status.is_critical, seq_num);
354 379
355 ath_mci_process_status(sc, &profile_status); 380 update_scheme += ath_mci_process_status(sc, &profile_status);
356 break; 381 break;
357 default: 382 default:
358 ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode); 383 ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
359 break; 384 break;
360 } 385 }
386 if (update_scheme)
387 ieee80211_queue_work(sc->hw, &sc->mci_work);
361} 388}
362 389
363int ath_mci_setup(struct ath_softc *sc) 390int ath_mci_setup(struct ath_softc *sc)
@@ -365,6 +392,7 @@ int ath_mci_setup(struct ath_softc *sc)
365 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 392 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
366 struct ath_mci_coex *mci = &sc->mci_coex; 393 struct ath_mci_coex *mci = &sc->mci_coex;
367 struct ath_mci_buf *buf = &mci->sched_buf; 394 struct ath_mci_buf *buf = &mci->sched_buf;
395 int ret;
368 396
369 buf->bf_addr = dma_alloc_coherent(sc->dev, 397 buf->bf_addr = dma_alloc_coherent(sc->dev,
370 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, 398 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
@@ -384,9 +412,13 @@ int ath_mci_setup(struct ath_softc *sc)
384 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len; 412 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
385 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; 413 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
386 414
387 ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, 415 ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
388 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 416 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
389 mci->sched_buf.bf_paddr); 417 mci->sched_buf.bf_paddr);
418 if (ret) {
419 ath_err(common, "Failed to initialize MCI\n");
420 return ret;
421 }
390 422
391 INIT_WORK(&sc->mci_work, ath9k_mci_work); 423 INIT_WORK(&sc->mci_work, ath9k_mci_work);
392 ath_dbg(common, MCI, "MCI Initialized\n"); 424 ath_dbg(common, MCI, "MCI Initialized\n");
@@ -551,9 +583,11 @@ void ath_mci_intr(struct ath_softc *sc)
551 } 583 }
552 584
553 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 585 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
554 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) 586 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
555 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | 587 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
556 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); 588 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
589 ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
590 }
557} 591}
558 592
559void ath_mci_enable(struct ath_softc *sc) 593void ath_mci_enable(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ef11dc639461..0e630a99b68b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -38,6 +38,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ 40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
41 { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
41 { 0 } 42 { 0 }
42}; 43};
43 44
@@ -122,7 +123,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
122 if (!parent) 123 if (!parent)
123 return; 124 return;
124 125
125 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 126 if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
127 (AR_SREV_9285(ah))) {
126 /* Bluetooth coexistance requires disabling ASPM. */ 128 /* Bluetooth coexistance requires disabling ASPM. */
127 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, 129 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
128 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 130 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e034add9cd5a..27ed80b54881 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
25 8, /* MCS start */ 25 8, /* MCS start */
26 { 26 {
27 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, 27 [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
28 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */ 28 5400, 0, 12 }, /* 6 Mb */
29 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, 29 [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
30 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */ 30 7800, 1, 18 }, /* 9 Mb */
31 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, 31 [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
32 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */ 32 10000, 2, 24 }, /* 12 Mb */
33 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, 33 [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
34 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */ 34 13900, 3, 36 }, /* 18 Mb */
35 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, 35 [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
36 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */ 36 17300, 4, 48 }, /* 24 Mb */
37 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, 37 [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
38 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */ 38 23000, 5, 72 }, /* 36 Mb */
39 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, 39 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
40 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */ 40 27400, 6, 96 }, /* 48 Mb */
41 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, 41 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
42 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */ 42 29300, 7, 108 }, /* 54 Mb */
43 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500, 43 [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
44 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */ 44 6400, 0, 0 }, /* 6.5 Mb */
45 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, 45 [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
46 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */ 46 12700, 1, 1 }, /* 13 Mb */
47 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, 47 [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
48 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */ 48 18800, 2, 2 }, /* 19.5 Mb */
49 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, 49 [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
50 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */ 50 25000, 3, 3 }, /* 26 Mb */
51 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, 51 [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
52 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */ 52 36700, 4, 4 }, /* 39 Mb */
53 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, 53 [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
54 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */ 54 48100, 5, 5 }, /* 52 Mb */
55 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, 55 [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
56 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */ 56 53500, 6, 6 }, /* 58.5 Mb */
57 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, 57 [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
58 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */ 58 59000, 7, 7 }, /* 65 Mb */
59 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, 59 [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
60 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */ 60 65400, 7, 7 }, /* 75 Mb */
61 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, 61 [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
62 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */ 62 12700, 8, 8 }, /* 13 Mb */
63 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, 63 [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
64 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */ 64 24800, 9, 9 }, /* 26 Mb */
65 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, 65 [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
66 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */ 66 36600, 10, 10 }, /* 39 Mb */
67 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, 67 [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
68 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */ 68 48100, 11, 11 }, /* 52 Mb */
69 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, 69 [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
70 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */ 70 69500, 12, 12 }, /* 78 Mb */
71 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, 71 [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
72 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */ 72 89500, 13, 13 }, /* 104 Mb */
73 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, 73 [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
74 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */ 74 98900, 14, 14 }, /* 117 Mb */
75 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, 75 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
76 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */ 76 108300, 15, 15 }, /* 130 Mb */
77 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, 77 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
78 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */ 78 120000, 15, 15 }, /* 144.4 Mb */
79 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, 79 [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
80 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */ 80 17400, 16, 16 }, /* 19.5 Mb */
81 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, 81 [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
82 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */ 82 35100, 17, 17 }, /* 39 Mb */
83 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, 83 [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
84 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */ 84 52600, 18, 18 }, /* 58.5 Mb */
85 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, 85 [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
86 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */ 86 70400, 19, 19 }, /* 78 Mb */
87 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, 87 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
88 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */ 88 104900, 20, 20 }, /* 117 Mb */
89 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, 89 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
90 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/ 90 115800, 20, 20 }, /* 130 Mb*/
91 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, 91 [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
92 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */ 92 137200, 21, 21 }, /* 156 Mb */
93 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, 93 [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
94 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */ 94 151100, 21, 21 }, /* 173.3 Mb */
95 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, 95 [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
96 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */ 96 152800, 22, 22 }, /* 175.5 Mb */
97 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, 97 [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
98 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/ 98 168400, 22, 22 }, /* 195 Mb*/
99 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, 99 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
100 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */ 100 168400, 23, 23 }, /* 195 Mb */
101 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, 101 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
102 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */ 102 185000, 23, 23 }, /* 216.7 Mb */
103 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, 103 [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
104 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/ 104 13200, 0, 0 }, /* 13.5 Mb*/
105 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, 105 [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
106 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/ 106 25900, 1, 1 }, /* 27.0 Mb*/
107 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, 107 [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
108 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/ 108 38600, 2, 2 }, /* 40.5 Mb*/
109 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, 109 [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
110 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */ 110 49800, 3, 3 }, /* 54 Mb */
111 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, 111 [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
112 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */ 112 72200, 4, 4 }, /* 81 Mb */
113 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000, 113 [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
114 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */ 114 92900, 5, 5 }, /* 108 Mb */
115 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, 115 [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
116 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/ 116 102700, 6, 6 }, /* 121.5 Mb*/
117 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, 117 [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
118 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */ 118 112000, 7, 7 }, /* 135 Mb */
119 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, 119 [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
120 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */ 120 122000, 7, 7 }, /* 150 Mb */
121 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, 121 [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
122 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */ 122 25800, 8, 8 }, /* 27 Mb */
123 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, 123 [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
124 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */ 124 49800, 9, 9 }, /* 54 Mb */
125 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, 125 [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
126 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */ 126 71900, 10, 10 }, /* 81 Mb */
127 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, 127 [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
128 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */ 128 92500, 11, 11 }, /* 108 Mb */
129 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, 129 [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
130 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */ 130 130300, 12, 12 }, /* 162 Mb */
131 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, 131 [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
132 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */ 132 162800, 13, 13 }, /* 216 Mb */
133 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, 133 [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
134 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */ 134 178200, 14, 14 }, /* 243 Mb */
135 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, 135 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
136 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */ 136 192100, 15, 15 }, /* 270 Mb */
137 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, 137 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
138 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */ 138 207000, 15, 15 }, /* 300 Mb */
139 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, 139 [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
140 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */ 140 36100, 16, 16 }, /* 40.5 Mb */
141 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, 141 [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
142 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */ 142 72900, 17, 17 }, /* 81 Mb */
143 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, 143 [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
144 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */ 144 108300, 18, 18 }, /* 121.5 Mb */
145 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, 145 [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
146 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */ 146 142000, 19, 19 }, /* 162 Mb */
147 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, 147 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
148 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */ 148 205100, 20, 20 }, /* 243 Mb */
149 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, 149 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
150 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */ 150 224700, 20, 20 }, /* 270 Mb */
151 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, 151 [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
152 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */ 152 263100, 21, 21 }, /* 324 Mb */
153 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, 153 [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
154 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */ 154 288000, 21, 21 }, /* 360 Mb */
155 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, 155 [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
156 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */ 156 290700, 22, 22 }, /* 364.5 Mb */
157 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, 157 [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
158 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */ 158 317200, 22, 22 }, /* 405 Mb */
159 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, 159 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
160 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */ 160 317200, 23, 23 }, /* 405 Mb */
161 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, 161 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
162 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */ 162 346400, 23, 23 }, /* 450 Mb */
163 }, 163 },
164 50, /* probe interval */ 164 50, /* probe interval */
165 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 165 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
173 12, /* MCS start */ 173 12, /* MCS start */
174 { 174 {
175 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000, 175 [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
176 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */ 176 900, 0, 2 }, /* 1 Mb */
177 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000, 177 [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
178 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */ 178 1900, 1, 4 }, /* 2 Mb */
179 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500, 179 [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
180 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */ 180 4900, 2, 11 }, /* 5.5 Mb */
181 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000, 181 [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
182 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */ 182 8100, 3, 22 }, /* 11 Mb */
183 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, 183 [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
184 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */ 184 5400, 4, 12 }, /* 6 Mb */
185 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, 185 [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
186 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */ 186 7800, 5, 18 }, /* 9 Mb */
187 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, 187 [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
188 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */ 188 10100, 6, 24 }, /* 12 Mb */
189 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, 189 [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
190 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */ 190 14100, 7, 36 }, /* 18 Mb */
191 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, 191 [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
192 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */ 192 17700, 8, 48 }, /* 24 Mb */
193 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, 193 [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
194 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */ 194 23700, 9, 72 }, /* 36 Mb */
195 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, 195 [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
196 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */ 196 27400, 10, 96 }, /* 48 Mb */
197 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, 197 [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
198 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */ 198 30900, 11, 108 }, /* 54 Mb */
199 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500, 199 [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
200 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */ 200 6400, 0, 0 }, /* 6.5 Mb */
201 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, 201 [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
202 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */ 202 12700, 1, 1 }, /* 13 Mb */
203 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, 203 [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
204 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/ 204 18800, 2, 2 }, /* 19.5 Mb*/
205 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, 205 [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
206 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */ 206 25000, 3, 3 }, /* 26 Mb */
207 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, 207 [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
208 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */ 208 36700, 4, 4 }, /* 39 Mb */
209 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, 209 [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
210 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */ 210 48100, 5, 5 }, /* 52 Mb */
211 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, 211 [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
212 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */ 212 53500, 6, 6 }, /* 58.5 Mb */
213 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, 213 [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
214 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */ 214 59000, 7, 7 }, /* 65 Mb */
215 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, 215 [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
216 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/ 216 65400, 7, 7 }, /* 65 Mb*/
217 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, 217 [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
218 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */ 218 12700, 8, 8 }, /* 13 Mb */
219 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, 219 [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
220 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */ 220 24800, 9, 9 }, /* 26 Mb */
221 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, 221 [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
222 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */ 222 36600, 10, 10 }, /* 39 Mb */
223 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, 223 [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
224 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */ 224 48100, 11, 11 }, /* 52 Mb */
225 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, 225 [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
226 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */ 226 69500, 12, 12 }, /* 78 Mb */
227 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, 227 [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
228 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */ 228 89500, 13, 13 }, /* 104 Mb */
229 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, 229 [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
230 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */ 230 98900, 14, 14 }, /* 117 Mb */
231 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, 231 [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
232 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */ 232 108300, 15, 15 }, /* 130 Mb */
233 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, 233 [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
234 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */ 234 120000, 15, 15 }, /* 144.4 Mb */
235 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, 235 [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
236 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */ 236 17400, 16, 16 }, /* 19.5 Mb */
237 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, 237 [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
238 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */ 238 35100, 17, 17 }, /* 39 Mb */
239 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, 239 [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
240 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */ 240 52600, 18, 18 }, /* 58.5 Mb */
241 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, 241 [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
242 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */ 242 70400, 19, 19 }, /* 78 Mb */
243 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, 243 [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
244 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */ 244 104900, 20, 20 }, /* 117 Mb */
245 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, 245 [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
246 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */ 246 115800, 20, 20 }, /* 130 Mb */
247 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, 247 [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
248 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */ 248 137200, 21, 21 }, /* 156 Mb */
249 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, 249 [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
250 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */ 250 151100, 21, 21 }, /* 173.3 Mb */
251 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, 251 [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
252 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */ 252 152800, 22, 22 }, /* 175.5 Mb */
253 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, 253 [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
254 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */ 254 168400, 22, 22 }, /* 195 Mb */
255 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, 255 [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
256 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */ 256 168400, 23, 23 }, /* 195 Mb */
257 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, 257 [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
258 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */ 258 185000, 23, 23 }, /* 216.7 Mb */
259 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, 259 [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
260 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */ 260 13200, 0, 0 }, /* 13.5 Mb */
261 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, 261 [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
262 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */ 262 25900, 1, 1 }, /* 27.0 Mb */
263 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, 263 [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
264 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */ 264 38600, 2, 2 }, /* 40.5 Mb */
265 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, 265 [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
266 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */ 266 49800, 3, 3 }, /* 54 Mb */
267 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, 267 [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
268 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */ 268 72200, 4, 4 }, /* 81 Mb */
269 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000, 269 [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
270 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */ 270 92900, 5, 5 }, /* 108 Mb */
271 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, 271 [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
272 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */ 272 102700, 6, 6 }, /* 121.5 Mb */
273 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, 273 [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
274 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */ 274 112000, 7, 7 }, /* 135 Mb */
275 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, 275 [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
276 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */ 276 122000, 7, 7 }, /* 150 Mb */
277 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, 277 [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
278 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */ 278 25800, 8, 8 }, /* 27 Mb */
279 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, 279 [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
280 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */ 280 49800, 9, 9 }, /* 54 Mb */
281 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, 281 [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
282 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */ 282 71900, 10, 10 }, /* 81 Mb */
283 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, 283 [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
284 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */ 284 92500, 11, 11 }, /* 108 Mb */
285 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, 285 [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
286 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */ 286 130300, 12, 12 }, /* 162 Mb */
287 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, 287 [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
288 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */ 288 162800, 13, 13 }, /* 216 Mb */
289 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, 289 [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
290 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */ 290 178200, 14, 14 }, /* 243 Mb */
291 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, 291 [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
292 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */ 292 192100, 15, 15 }, /* 270 Mb */
293 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, 293 [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
294 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */ 294 207000, 15, 15 }, /* 300 Mb */
295 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, 295 [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
296 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */ 296 36100, 16, 16 }, /* 40.5 Mb */
297 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, 297 [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
298 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */ 298 72900, 17, 17 }, /* 81 Mb */
299 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, 299 [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
300 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */ 300 108300, 18, 18 }, /* 121.5 Mb */
301 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, 301 [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
302 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */ 302 142000, 19, 19 }, /* 162 Mb */
303 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, 303 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
304 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */ 304 205100, 20, 20 }, /* 243 Mb */
305 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, 305 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
306 224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */ 306 224700, 20, 20 }, /* 270 Mb */
307 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, 307 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
308 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */ 308 263100, 21, 21 }, /* 324 Mb */
309 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, 309 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
310 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */ 310 288000, 21, 21 }, /* 360 Mb */
311 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, 311 [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
312 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */ 312 290700, 22, 22 }, /* 364.5 Mb */
313 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, 313 [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
314 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */ 314 317200, 22, 22 }, /* 405 Mb */
315 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, 315 [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
316 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */ 316 317200, 23, 23 }, /* 405 Mb */
317 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, 317 [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
318 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */ 318 346400, 23, 23 }, /* 450 Mb */
319 }, 319 },
320 50, /* probe interval */ 320 50, /* probe interval */
321 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 321 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
326 0, 326 0,
327 { 327 {
328 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 328 { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
329 5400, 0, 12, 0}, 329 5400, 0, 12},
330 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 330 { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
331 7800, 1, 18, 0}, 331 7800, 1, 18},
332 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 332 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
333 10000, 2, 24, 2}, 333 10000, 2, 24},
334 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 334 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
335 13900, 3, 36, 2}, 335 13900, 3, 36},
336 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 336 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
337 17300, 4, 48, 4}, 337 17300, 4, 48},
338 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 338 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
339 23000, 5, 72, 4}, 339 23000, 5, 72},
340 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 340 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
341 27400, 6, 96, 4}, 341 27400, 6, 96},
342 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 342 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
343 29300, 7, 108, 4}, 343 29300, 7, 108},
344 }, 344 },
345 50, /* probe interval */ 345 50, /* probe interval */
346 0, /* Phy rates allowed initially */ 346 0, /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
351 0, 351 0,
352 { 352 {
353 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ 353 { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
354 900, 0, 2, 0}, 354 900, 0, 2},
355 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ 355 { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
356 1900, 1, 4, 1}, 356 1900, 1, 4},
357 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ 357 { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
358 4900, 2, 11, 2}, 358 4900, 2, 11},
359 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ 359 { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
360 8100, 3, 22, 3}, 360 8100, 3, 22},
361 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ 361 { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
362 5400, 4, 12, 4}, 362 5400, 4, 12},
363 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ 363 { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
364 7800, 5, 18, 4}, 364 7800, 5, 18},
365 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ 365 { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
366 10000, 6, 24, 6}, 366 10000, 6, 24},
367 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ 367 { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
368 13900, 7, 36, 6}, 368 13900, 7, 36},
369 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ 369 { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
370 17300, 8, 48, 8}, 370 17300, 8, 48},
371 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ 371 { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
372 23000, 9, 72, 8}, 372 23000, 9, 72},
373 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ 373 { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
374 27400, 10, 96, 8}, 374 27400, 10, 96},
375 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ 375 { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
376 29300, 11, 108, 8}, 376 29300, 11, 108},
377 }, 377 },
378 50, /* probe interval */ 378 50, /* probe interval */
379 0, /* Phy rates allowed initially */ 379 0, /* Phy rates allowed initially */
380}; 380};
381 381
382static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 382static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
383 struct ieee80211_tx_rate *rate) 383 struct ieee80211_tx_rate *rate)
384{ 384{
385 int rix = 0, i = 0; 385 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
386 static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; 386 int rix, i, idx = 0;
387 387
388 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 388 if (!(rate->flags & IEEE80211_TX_RC_MCS))
389 return rate->idx; 389 return rate->idx;
390 390
391 while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) { 391 for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
392 rix++; i++; 392 idx = ath_rc_priv->valid_rate_index[i];
393
394 if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
395 rate_table->info[idx].ratecode == rate->idx)
396 break;
393 } 397 }
394 398
395 rix += rate->idx + rate_table->mcs_start; 399 rix = idx;
396 400
397 if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && 401 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
398 (rate->flags & IEEE80211_TX_RC_SHORT_GI)) 402 rix++;
399 rix = rate_table->info[rix].ht_index;
400 else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
401 rix = rate_table->info[rix].sgi_index;
402 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
403 rix = rate_table->info[rix].cw40index;
404 403
405 return rix; 404 return rix;
406} 405}
407 406
408static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, 407static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
409 struct ath_rate_priv *ath_rc_priv)
410{ 408{
409 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
411 u8 i, j, idx, idx_next; 410 u8 i, j, idx, idx_next;
412 411
413 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) { 412 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
424 } 423 }
425} 424}
426 425
427static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
428{
429 u8 i;
430
431 for (i = 0; i < ath_rc_priv->rate_table_size; i++)
432 ath_rc_priv->valid_rate_index[i] = 0;
433}
434
435static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
436 u8 index, int valid_tx_rate)
437{
438 BUG_ON(index > ath_rc_priv->rate_table_size);
439 ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
440}
441
442static inline 426static inline
443int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, 427int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
444 struct ath_rate_priv *ath_rc_priv, 428 struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
479} 463}
480 464
481static inline int 465static inline int
482ath_rc_get_lower_rix(const struct ath_rate_table *rate_table, 466ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
483 struct ath_rate_priv *ath_rc_priv,
484 u8 cur_valid_txrate, u8 *next_idx) 467 u8 cur_valid_txrate, u8 *next_idx)
485{ 468{
486 int8_t i; 469 int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
495 return 0; 478 return 0;
496} 479}
497 480
498static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, 481static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
499 const struct ath_rate_table *rate_table,
500 u32 capflag)
501{ 482{
483 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
502 u8 i, hi = 0; 484 u8 i, hi = 0;
503 485
504 for (i = 0; i < rate_table->rate_cnt; i++) { 486 for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
506 u32 phy = rate_table->info[i].phy; 488 u32 phy = rate_table->info[i].phy;
507 u8 valid_rate_count = 0; 489 u8 valid_rate_count = 0;
508 490
509 if (!ath_rc_valid_phyrate(phy, capflag, 0)) 491 if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
510 continue; 492 continue;
511 493
512 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy]; 494 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
513 495
514 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; 496 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
515 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 497 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
516 ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1); 498 ath_rc_priv->valid_rate_index[i] = true;
517 hi = i; 499 hi = i;
518 } 500 }
519 } 501 }
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
521 return hi; 503 return hi;
522} 504}
523 505
524static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, 506static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
525 const struct ath_rate_table *rate_table, 507 u32 phy, u32 capflag)
526 struct ath_rateset *rateset,
527 u32 capflag)
528{ 508{
529 u8 i, j, hi = 0; 509 if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
510 return false;
530 511
531 /* Use intersection of working rates and valid rates */ 512 if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
532 for (i = 0; i < rateset->rs_nrates; i++) { 513 return false;
533 for (j = 0; j < rate_table->rate_cnt; j++) {
534 u32 phy = rate_table->info[j].phy;
535 u16 rate_flags = rate_table->info[j].rate_flags;
536 u8 rate = rateset->rs_rates[i];
537 u8 dot11rate = rate_table->info[j].dot11rate;
538
539 /* We allow a rate only if its valid and the
540 * capflag matches one of the validity
541 * (VALID/VALID_20/VALID_40) flags */
542
543 if ((rate == dot11rate) &&
544 (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
545 WLAN_RC_CAP_MODE(capflag) &&
546 (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
547 !WLAN_RC_PHY_HT(phy)) {
548 u8 valid_rate_count = 0;
549
550 if (!ath_rc_valid_phyrate(phy, capflag, 0))
551 continue;
552
553 valid_rate_count =
554 ath_rc_priv->valid_phy_ratecnt[phy];
555
556 ath_rc_priv->valid_phy_rateidx[phy]
557 [valid_rate_count] = j;
558 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
559 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
560 hi = max(hi, j);
561 }
562 }
563 }
564 514
565 return hi; 515 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
516 return false;
517
518 return true;
566} 519}
567 520
568static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, 521static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
569 const struct ath_rate_table *rate_table, 522 u32 phy, u32 capflag)
570 struct ath_rateset *rateset, u32 capflag)
571{ 523{
572 u8 i, j, hi = 0; 524 if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
525 return false;
526
527 if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
528 return false;
529
530 if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
531 return false;
532
533 return true;
534}
535
536static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
537{
538 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
539 struct ath_rateset *rateset;
540 u32 phy, capflag = ath_rc_priv->ht_cap;
541 u16 rate_flags;
542 u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
543
544 if (legacy)
545 rateset = &ath_rc_priv->neg_rates;
546 else
547 rateset = &ath_rc_priv->neg_ht_rates;
573 548
574 /* Use intersection of working rates and valid rates */
575 for (i = 0; i < rateset->rs_nrates; i++) { 549 for (i = 0; i < rateset->rs_nrates; i++) {
576 for (j = 0; j < rate_table->rate_cnt; j++) { 550 for (j = 0; j < rate_table->rate_cnt; j++) {
577 u32 phy = rate_table->info[j].phy; 551 phy = rate_table->info[j].phy;
578 u16 rate_flags = rate_table->info[j].rate_flags; 552 rate_flags = rate_table->info[j].rate_flags;
579 u8 rate = rateset->rs_rates[i]; 553 rate = rateset->rs_rates[i];
580 u8 dot11rate = rate_table->info[j].dot11rate; 554 dot11rate = rate_table->info[j].dot11rate;
581 555
582 if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || 556 if (legacy &&
583 !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) || 557 !ath_rc_check_legacy(rate, dot11rate,
584 !WLAN_RC_PHY_HT_VALID(rate_flags, capflag)) 558 rate_flags, phy, capflag))
559 continue;
560
561 if (!legacy &&
562 !ath_rc_check_ht(rate, dot11rate,
563 rate_flags, phy, capflag))
585 continue; 564 continue;
586 565
587 if (!ath_rc_valid_phyrate(phy, capflag, 0)) 566 if (!ath_rc_valid_phyrate(phy, capflag, 0))
588 continue; 567 continue;
589 568
590 ath_rc_priv->valid_phy_rateidx[phy] 569 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
591 [ath_rc_priv->valid_phy_ratecnt[phy]] = j; 570 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
592 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 571 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
593 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1); 572 ath_rc_priv->valid_rate_index[j] = true;
594 hi = max(hi, j); 573 hi = max(hi, j);
595 } 574 }
596 } 575 }
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
598 return hi; 577 return hi;
599} 578}
600 579
601/* Finds the highest rate index we can use */ 580static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
602static u8 ath_rc_get_highest_rix(struct ath_softc *sc, 581 int *is_probing)
603 struct ath_rate_priv *ath_rc_priv,
604 const struct ath_rate_table *rate_table,
605 int *is_probing,
606 bool legacy)
607{ 582{
583 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
608 u32 best_thruput, this_thruput, now_msec; 584 u32 best_thruput, this_thruput, now_msec;
609 u8 rate, next_rate, best_rate, maxindex, minindex; 585 u8 rate, next_rate, best_rate, maxindex, minindex;
610 int8_t index = 0; 586 int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
624 u8 per_thres; 600 u8 per_thres;
625 601
626 rate = ath_rc_priv->valid_rate_index[index]; 602 rate = ath_rc_priv->valid_rate_index[index];
627 if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
628 continue;
629 if (rate > ath_rc_priv->rate_max_phy) 603 if (rate > ath_rc_priv->rate_max_phy)
630 continue; 604 continue;
631 605
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
707 rate->count = tries; 681 rate->count = tries;
708 rate->idx = rate_table->info[rix].ratecode; 682 rate->idx = rate_table->info[rix].ratecode;
709 683
710 if (txrc->short_preamble)
711 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
712 if (txrc->rts || rtsctsenable) 684 if (txrc->rts || rtsctsenable)
713 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 685 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
714 686
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
726 const struct ath_rate_table *rate_table, 698 const struct ath_rate_table *rate_table,
727 struct ieee80211_tx_info *tx_info) 699 struct ieee80211_tx_info *tx_info)
728{ 700{
729 struct ieee80211_tx_rate *rates = tx_info->control.rates; 701 struct ieee80211_bss_conf *bss_conf;
730 int i = 0, rix = 0, cix, enable_g_protection = 0;
731 702
732 /* get the cix for the lowest valid rix */ 703 if (!tx_info->control.vif)
733 for (i = 3; i >= 0; i--) { 704 return;
734 if (rates[i].count && (rates[i].idx >= 0)) { 705 /*
735 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 706 * For legacy frames, mac80211 takes care of CTS protection.
736 break; 707 */
737 } 708 if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
738 } 709 return;
739 cix = rate_table->info[rix].ctrl_rate;
740 710
741 /* All protection frames are transmited at 2Mb/s for 802.11g, 711 bss_conf = &tx_info->control.vif->bss_conf;
742 * otherwise we transmit them at 1Mb/s */ 712
743 if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ && 713 if (!bss_conf->basic_rates)
744 !conf_is_ht(&sc->hw->conf)) 714 return;
745 enable_g_protection = 1;
746 715
747 /* 716 /*
748 * If 802.11g protection is enabled, determine whether to use RTS/CTS or 717 * For now, use the lowest allowed basic rate for HT frames.
749 * just CTS. Note that this is only done for OFDM/HT unicast frames.
750 */ 718 */
751 if ((tx_info->control.vif && 719 tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
752 tx_info->control.vif->bss_conf.use_cts_prot) &&
753 (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
754 WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
755 rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
756 cix = rate_table->info[enable_g_protection].ctrl_rate;
757 }
758
759 tx_info->control.rts_cts_rate_idx = cix;
760} 720}
761 721
762static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, 722static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
789 try_per_rate = 4; 749 try_per_rate = 4;
790 750
791 rate_table = ath_rc_priv->rate_table; 751 rate_table = ath_rc_priv->rate_table;
792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 752 rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
793 &is_probe, false);
794 753
795 /*
796 * If we're in HT mode and both us and our peer supports LDPC.
797 * We don't need to check our own device's capabilities as our own
798 * ht capabilities would have already been intersected with our peer's.
799 */
800 if (conf_is_ht(&sc->hw->conf) && 754 if (conf_is_ht(&sc->hw->conf) &&
801 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) 755 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
802 tx_info->flags |= IEEE80211_TX_CTL_LDPC; 756 tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
806 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT); 760 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
807 761
808 if (is_probe) { 762 if (is_probe) {
809 /* set one try for probe rates. For the 763 /*
810 * probes don't enable rts */ 764 * Set one try for probe rates. For the
765 * probes don't enable RTS.
766 */
811 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 767 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
812 1, rix, 0); 768 1, rix, 0);
813 769 /*
814 /* Get the next tried/allowed rate. No RTS for the next series 770 * Get the next tried/allowed rate.
815 * after the probe rate 771 * No RTS for the next series after the probe rate.
816 */ 772 */
817 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); 773 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
818 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 774 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
819 try_per_rate, rix, 0); 775 try_per_rate, rix, 0);
820 776
821 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 777 tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
822 } else { 778 } else {
823 /* Set the chosen rate. No RTS for first series entry. */ 779 /*
780 * Set the chosen rate. No RTS for first series entry.
781 */
824 ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 782 ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
825 try_per_rate, rix, 0); 783 try_per_rate, rix, 0);
826 } 784 }
827 785
828 /* Fill in the other rates for multirate retry */ 786 for ( ; i < 4; i++) {
829 for ( ; i < 3; i++) { 787 /*
788 * Use twice the number of tries for the last MRR segment.
789 */
790 if (i + 1 == 4)
791 try_per_rate = 8;
792
793 ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
830 794
831 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); 795 /*
832 /* All other rates in the series have RTS enabled */ 796 * All other rates in the series have RTS enabled.
797 */
833 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 798 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
834 try_per_rate, rix, 1); 799 try_per_rate, rix, 1);
835 } 800 }
836 801
837 /* Use twice the number of tries for the last MRR segment. */
838 try_per_rate = 8;
839
840 /*
841 * If the last rate in the rate series is MCS and has
842 * more than 80% of per thresh, then use a legacy rate
843 * as last retry to ensure that the frame is tried in both
844 * MCS and legacy rate.
845 */
846 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
847 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
848 (ath_rc_priv->per[rix] > 45))
849 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
850 &is_probe, true);
851
852 /* All other rates in the series have RTS enabled */
853 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
854 try_per_rate, rix, 1);
855 /* 802 /*
856 * NB:Change rate series to enable aggregation when operating 803 * NB:Change rate series to enable aggregation when operating
857 * at lower MCS rates. When first rate in series is MCS2 804 * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
893 rates[0].count = ATH_TXMAXTRY; 840 rates[0].count = ATH_TXMAXTRY;
894 } 841 }
895 842
896 /* Setup RTS/CTS */
897 ath_rc_rate_set_rtscts(sc, rate_table, tx_info); 843 ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
898} 844}
899 845
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1046 stats->per = per; 992 stats->per = per;
1047} 993}
1048 994
1049/* Update PER, RSSI and whatever else that the code thinks it is doing.
1050 If you can make sense of all this, you really need to go out more. */
1051
1052static void ath_rc_update_ht(struct ath_softc *sc, 995static void ath_rc_update_ht(struct ath_softc *sc,
1053 struct ath_rate_priv *ath_rc_priv, 996 struct ath_rate_priv *ath_rc_priv,
1054 struct ieee80211_tx_info *tx_info, 997 struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1077 if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 && 1020 if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
1078 rate_table->info[tx_rate].ratekbps <= 1021 rate_table->info[tx_rate].ratekbps <=
1079 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) { 1022 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
1080 ath_rc_get_lower_rix(rate_table, ath_rc_priv, 1023 ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
1081 (u8)tx_rate, &ath_rc_priv->rate_max_phy); 1024 &ath_rc_priv->rate_max_phy);
1082 1025
1083 /* Don't probe for a little while. */ 1026 /* Don't probe for a little while. */
1084 ath_rc_priv->probe_time = now_msec; 1027 ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1122 1065
1123} 1066}
1124 1067
1068static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1069{
1070 struct ath_rc_stats *stats;
1071
1072 stats = &rc->rcstats[final_rate];
1073 stats->success++;
1074}
1125 1075
1126static void ath_rc_tx_status(struct ath_softc *sc, 1076static void ath_rc_tx_status(struct ath_softc *sc,
1127 struct ath_rate_priv *ath_rc_priv, 1077 struct ath_rate_priv *ath_rc_priv,
1128 struct ieee80211_tx_info *tx_info, 1078 struct sk_buff *skb)
1129 int final_ts_idx, int xretries, int long_retry)
1130{ 1079{
1131 const struct ath_rate_table *rate_table; 1080 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1132 struct ieee80211_tx_rate *rates = tx_info->status.rates; 1081 struct ieee80211_tx_rate *rates = tx_info->status.rates;
1082 struct ieee80211_tx_rate *rate;
1083 int final_ts_idx = 0, xretries = 0, long_retry = 0;
1133 u8 flags; 1084 u8 flags;
1134 u32 i = 0, rix; 1085 u32 i = 0, rix;
1135 1086
1136 rate_table = ath_rc_priv->rate_table; 1087 for (i = 0; i < sc->hw->max_rates; i++) {
1088 rate = &tx_info->status.rates[i];
1089 if (rate->idx < 0 || !rate->count)
1090 break;
1091
1092 final_ts_idx = i;
1093 long_retry = rate->count - 1;
1094 }
1095
1096 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
1097 xretries = 1;
1137 1098
1138 /* 1099 /*
1139 * If the first rate is not the final index, there 1100 * If the first rate is not the final index, there
1140 * are intermediate rate failures to be processed. 1101 * are intermediate rate failures to be processed.
1141 */ 1102 */
1142 if (final_ts_idx != 0) { 1103 if (final_ts_idx != 0) {
1143 /* Process intermediate rates that failed.*/
1144 for (i = 0; i < final_ts_idx ; i++) { 1104 for (i = 0; i < final_ts_idx ; i++) {
1145 if (rates[i].count != 0 && (rates[i].idx >= 0)) { 1105 if (rates[i].count != 0 && (rates[i].idx >= 0)) {
1146 flags = rates[i].flags; 1106 flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1152 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) 1112 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1153 return; 1113 return;
1154 1114
1155 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1115 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
1156 ath_rc_update_ht(sc, ath_rc_priv, tx_info, 1116 ath_rc_update_ht(sc, ath_rc_priv, tx_info,
1157 rix, xretries ? 1 : 2, 1117 rix, xretries ? 1 : 2,
1158 rates[i].count); 1118 rates[i].count);
1159 } 1119 }
1160 } 1120 }
1161 } else {
1162 /*
1163 * Handle the special case of MIMO PS burst, where the second
1164 * aggregate is sent out with only one rate and one try.
1165 * Treating it as an excessive retry penalizes the rate
1166 * inordinately.
1167 */
1168 if (rates[0].count == 1 && xretries == 1)
1169 xretries = 2;
1170 } 1121 }
1171 1122
1172 flags = rates[i].flags; 1123 flags = rates[final_ts_idx].flags;
1173 1124
1174 /* If HT40 and we have switched mode from 40 to 20 => don't update */ 1125 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1175 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && 1126 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1176 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) 1127 !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
1177 return; 1128 return;
1178 1129
1179 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1130 rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
1180 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry); 1131 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
1132 ath_debug_stat_rc(ath_rc_priv, rix);
1181} 1133}
1182 1134
1183static const 1135static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1185 enum ieee80211_band band, 1137 enum ieee80211_band band,
1186 bool is_ht) 1138 bool is_ht)
1187{ 1139{
1188 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1189
1190 switch(band) { 1140 switch(band) {
1191 case IEEE80211_BAND_2GHZ: 1141 case IEEE80211_BAND_2GHZ:
1192 if (is_ht) 1142 if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1197 return &ar5416_11na_ratetable; 1147 return &ar5416_11na_ratetable;
1198 return &ar5416_11a_ratetable; 1148 return &ar5416_11a_ratetable;
1199 default: 1149 default:
1200 ath_dbg(common, CONFIG, "Invalid band\n");
1201 return NULL; 1150 return NULL;
1202 } 1151 }
1203} 1152}
1204 1153
1205static void ath_rc_init(struct ath_softc *sc, 1154static void ath_rc_init(struct ath_softc *sc,
1206 struct ath_rate_priv *ath_rc_priv, 1155 struct ath_rate_priv *ath_rc_priv)
1207 struct ieee80211_supported_band *sband,
1208 struct ieee80211_sta *sta,
1209 const struct ath_rate_table *rate_table)
1210{ 1156{
1157 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
1211 struct ath_rateset *rateset = &ath_rc_priv->neg_rates; 1158 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1212 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1159 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1213 struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
1214 u8 i, j, k, hi = 0, hthi = 0; 1160 u8 i, j, k, hi = 0, hthi = 0;
1215 1161
1216 /* Initial rate table size. Will change depending
1217 * on the working rate set */
1218 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE; 1162 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
1219 1163
1220 /* Initialize thresholds according to the global rate table */
1221 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) { 1164 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
1222 ath_rc_priv->per[i] = 0; 1165 ath_rc_priv->per[i] = 0;
1166 ath_rc_priv->valid_rate_index[i] = 0;
1223 } 1167 }
1224 1168
1225 /* Determine the valid rates */
1226 ath_rc_init_valid_rate_idx(ath_rc_priv);
1227
1228 for (i = 0; i < WLAN_RC_PHY_MAX; i++) { 1169 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1229 for (j = 0; j < RATE_TABLE_SIZE; j++) 1170 for (j = 0; j < RATE_TABLE_SIZE; j++)
1230 ath_rc_priv->valid_phy_rateidx[i][j] = 0; 1171 ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
1232 } 1173 }
1233 1174
1234 if (!rateset->rs_nrates) { 1175 if (!rateset->rs_nrates) {
1235 /* No working rate, just initialize valid rates */ 1176 hi = ath_rc_init_validrates(ath_rc_priv);
1236 hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
1237 ath_rc_priv->ht_cap);
1238 } else { 1177 } else {
1239 /* Use intersection of working rates and valid rates */ 1178 hi = ath_rc_setvalid_rates(ath_rc_priv, true);
1240 hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table, 1179
1241 rateset, ath_rc_priv->ht_cap); 1180 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
1242 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) { 1181 hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
1243 hthi = ath_rc_setvalid_htrates(ath_rc_priv, 1182
1244 rate_table,
1245 ht_mcs,
1246 ath_rc_priv->ht_cap);
1247 }
1248 hi = max(hi, hthi); 1183 hi = max(hi, hthi);
1249 } 1184 }
1250 1185
1251 ath_rc_priv->rate_table_size = hi + 1; 1186 ath_rc_priv->rate_table_size = hi + 1;
1252 ath_rc_priv->rate_max_phy = 0; 1187 ath_rc_priv->rate_max_phy = 0;
1253 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); 1188 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1254 1189
1255 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { 1190 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1256 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { 1191 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
1258 ath_rc_priv->valid_phy_rateidx[i][j]; 1193 ath_rc_priv->valid_phy_rateidx[i][j];
1259 } 1194 }
1260 1195
1261 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) 1196 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
1262 || !ath_rc_priv->valid_phy_ratecnt[i]) 1197 !ath_rc_priv->valid_phy_ratecnt[i])
1263 continue; 1198 continue;
1264 1199
1265 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; 1200 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1266 } 1201 }
1267 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); 1202 WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1268 BUG_ON(k > RATE_TABLE_SIZE); 1203 WARN_ON(k > RATE_TABLE_SIZE);
1269 1204
1270 ath_rc_priv->max_valid_rate = k; 1205 ath_rc_priv->max_valid_rate = k;
1271 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1206 ath_rc_sort_validrates(ath_rc_priv);
1272 ath_rc_priv->rate_max_phy = (k > 4) ? 1207 ath_rc_priv->rate_max_phy = (k > 4) ?
1273 ath_rc_priv->valid_rate_index[k-4] : 1208 ath_rc_priv->valid_rate_index[k-4] :
1274 ath_rc_priv->valid_rate_index[k-1]; 1209 ath_rc_priv->valid_rate_index[k-1];
1275 ath_rc_priv->rate_table = rate_table;
1276 1210
1277 ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n", 1211 ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
1278 ath_rc_priv->ht_cap); 1212 ath_rc_priv->ht_cap);
1279} 1213}
1280 1214
1281static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1215static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
1282 bool is_cw40, bool is_sgi)
1283{ 1216{
1284 u8 caps = 0; 1217 u8 caps = 0;
1285 1218
@@ -1289,10 +1222,14 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1289 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG; 1222 caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
1290 else if (sta->ht_cap.mcs.rx_mask[1]) 1223 else if (sta->ht_cap.mcs.rx_mask[1])
1291 caps |= WLAN_RC_DS_FLAG; 1224 caps |= WLAN_RC_DS_FLAG;
1292 if (is_cw40) 1225 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
1293 caps |= WLAN_RC_40_FLAG; 1226 caps |= WLAN_RC_40_FLAG;
1294 if (is_sgi) 1227 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1295 caps |= WLAN_RC_SGI_FLAG; 1228 caps |= WLAN_RC_SGI_FLAG;
1229 } else {
1230 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
1231 caps |= WLAN_RC_SGI_FLAG;
1232 }
1296 } 1233 }
1297 1234
1298 return caps; 1235 return caps;
@@ -1319,15 +1256,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
1319/* mac80211 Rate Control callbacks */ 1256/* mac80211 Rate Control callbacks */
1320/***********************************/ 1257/***********************************/
1321 1258
1322static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1323{
1324 struct ath_rc_stats *stats;
1325
1326 stats = &rc->rcstats[final_rate];
1327 stats->success++;
1328}
1329
1330
1331static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, 1259static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1332 struct ieee80211_sta *sta, void *priv_sta, 1260 struct ieee80211_sta *sta, void *priv_sta,
1333 struct sk_buff *skb) 1261 struct sk_buff *skb)
@@ -1335,22 +1263,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1335 struct ath_softc *sc = priv; 1263 struct ath_softc *sc = priv;
1336 struct ath_rate_priv *ath_rc_priv = priv_sta; 1264 struct ath_rate_priv *ath_rc_priv = priv_sta;
1337 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1265 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1338 struct ieee80211_hdr *hdr; 1266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1339 int final_ts_idx = 0, tx_status = 0; 1267 __le16 fc = hdr->frame_control;
1340 int long_retry = 0;
1341 __le16 fc;
1342 int i;
1343
1344 hdr = (struct ieee80211_hdr *)skb->data;
1345 fc = hdr->frame_control;
1346 for (i = 0; i < sc->hw->max_rates; i++) {
1347 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1348 if (rate->idx < 0 || !rate->count)
1349 break;
1350
1351 final_ts_idx = i;
1352 long_retry = rate->count - 1;
1353 }
1354 1268
1355 if (!priv_sta || !ieee80211_is_data(fc)) 1269 if (!priv_sta || !ieee80211_is_data(fc))
1356 return; 1270 return;
@@ -1363,11 +1277,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1363 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) 1277 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
1364 return; 1278 return;
1365 1279
1366 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) 1280 ath_rc_tx_status(sc, ath_rc_priv, skb);
1367 tx_status = 1;
1368
1369 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
1370 long_retry);
1371 1281
1372 /* Check if aggregation has to be enabled for this tid */ 1282 /* Check if aggregation has to be enabled for this tid */
1373 if (conf_is_ht(&sc->hw->conf) && 1283 if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1293,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1383 ieee80211_start_tx_ba_session(sta, tid, 0); 1293 ieee80211_start_tx_ba_session(sta, tid, 0);
1384 } 1294 }
1385 } 1295 }
1386
1387 ath_debug_stat_rc(ath_rc_priv,
1388 ath_rc_get_rateindex(ath_rc_priv->rate_table,
1389 &tx_info->status.rates[final_ts_idx]));
1390} 1296}
1391 1297
1392static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1298static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1393 struct ieee80211_sta *sta, void *priv_sta) 1299 struct ieee80211_sta *sta, void *priv_sta)
1394{ 1300{
1395 struct ath_softc *sc = priv; 1301 struct ath_softc *sc = priv;
1302 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1396 struct ath_rate_priv *ath_rc_priv = priv_sta; 1303 struct ath_rate_priv *ath_rc_priv = priv_sta;
1397 const struct ath_rate_table *rate_table;
1398 bool is_cw40, is_sgi = false;
1399 int i, j = 0; 1304 int i, j = 0;
1400 1305
1401 for (i = 0; i < sband->n_bitrates; i++) { 1306 for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1322,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1417 ath_rc_priv->neg_ht_rates.rs_nrates = j; 1322 ath_rc_priv->neg_ht_rates.rs_nrates = j;
1418 } 1323 }
1419 1324
1420 is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40); 1325 ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
1421 1326 sta->ht_cap.ht_supported);
1422 if (is_cw40) 1327 if (!ath_rc_priv->rate_table) {
1423 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 1328 ath_err(common, "No rate table chosen\n");
1424 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 1329 return;
1425 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 1330 }
1426
1427 /* Choose rate table first */
1428
1429 rate_table = ath_choose_rate_table(sc, sband->band,
1430 sta->ht_cap.ht_supported);
1431 1331
1432 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi); 1332 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1433 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1333 ath_rc_init(sc, priv_sta);
1434} 1334}
1435 1335
1436static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, 1336static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1339,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1439{ 1339{
1440 struct ath_softc *sc = priv; 1340 struct ath_softc *sc = priv;
1441 struct ath_rate_priv *ath_rc_priv = priv_sta; 1341 struct ath_rate_priv *ath_rc_priv = priv_sta;
1442 const struct ath_rate_table *rate_table = NULL;
1443 bool oper_cw40 = false, oper_sgi;
1444 bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
1445 bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
1446
1447 /* FIXME: Handle AP mode later when we support CWM */
1448 1342
1449 if (changed & IEEE80211_RC_BW_CHANGED) { 1343 if (changed & IEEE80211_RC_BW_CHANGED) {
1450 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 1344 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
1451 return; 1345 ath_rc_init(sc, priv_sta);
1452 1346
1453 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 1347 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1454 oper_cw40 = true; 1348 "Operating HT Bandwidth changed to: %d\n",
1455 1349 sc->hw->conf.channel_type);
1456 if (oper_cw40)
1457 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1458 true : false;
1459 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1460 oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1461 true : false;
1462 else
1463 oper_sgi = false;
1464
1465 if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
1466 rate_table = ath_choose_rate_table(sc, sband->band,
1467 sta->ht_cap.ht_supported);
1468 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1469 oper_cw40, oper_sgi);
1470 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1471
1472 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1473 "Operating HT Bandwidth changed to: %d\n",
1474 sc->hw->conf.channel_type);
1475 }
1476 } 1350 }
1477} 1351}
1478 1352
@@ -1484,7 +1358,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1484 struct ath_rate_priv *rc = file->private_data; 1358 struct ath_rate_priv *rc = file->private_data;
1485 char *buf; 1359 char *buf;
1486 unsigned int len = 0, max; 1360 unsigned int len = 0, max;
1487 int i = 0; 1361 int rix;
1488 ssize_t retval; 1362 ssize_t retval;
1489 1363
1490 if (rc->rate_table == NULL) 1364 if (rc->rate_table == NULL)
@@ -1500,7 +1374,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1500 "HT", "MCS", "Rate", 1374 "HT", "MCS", "Rate",
1501 "Success", "Retries", "XRetries", "PER"); 1375 "Success", "Retries", "XRetries", "PER");
1502 1376
1503 for (i = 0; i < rc->rate_table_size; i++) { 1377 for (rix = 0; rix < rc->max_valid_rate; rix++) {
1378 u8 i = rc->valid_rate_index[rix];
1504 u32 ratekbps = rc->rate_table->info[i].ratekbps; 1379 u32 ratekbps = rc->rate_table->info[i].ratekbps;
1505 struct ath_rc_stats *stats = &rc->rcstats[i]; 1380 struct ath_rc_stats *stats = &rc->rcstats[i];
1506 char mcs[5]; 1381 char mcs[5];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 75f8e9b06b28..268e67dc5fb2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -160,10 +160,6 @@ struct ath_rate_table {
160 u32 user_ratekbps; 160 u32 user_ratekbps;
161 u8 ratecode; 161 u8 ratecode;
162 u8 dot11rate; 162 u8 dot11rate;
163 u8 ctrl_rate;
164 u8 cw40index;
165 u8 sgi_index;
166 u8 ht_index;
167 } info[RATE_TABLE_SIZE]; 163 } info[RATE_TABLE_SIZE];
168 u32 probe_interval; 164 u32 probe_interval;
169 u8 initial_ratemax; 165 u8 initial_ratemax;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4480c0cc655f..83d16e7ed272 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -424,8 +424,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
424 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 424 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
425 425
426 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 426 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
427 /* The following may also be needed for other older chips */ 427 /* This is needed for older chips */
428 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 428 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
429 rfilt |= ATH9K_RX_FILTER_PROM; 429 rfilt |= ATH9K_RX_FILTER_PROM;
430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 430 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
431 } 431 }
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 87cac8eb7834..4e6760f8596d 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -801,6 +801,8 @@
801#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */ 801#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
802#define AR_SREV_VERSION_9462 0x280 802#define AR_SREV_VERSION_9462 0x280
803#define AR_SREV_REVISION_9462_20 2 803#define AR_SREV_REVISION_9462_20 2
804#define AR_SREV_VERSION_9565 0x2C0
805#define AR_SREV_REVISION_9565_10 0
804#define AR_SREV_VERSION_9550 0x400 806#define AR_SREV_VERSION_9550 0x400
805 807
806#define AR_SREV_5416(_ah) \ 808#define AR_SREV_5416(_ah) \
@@ -909,6 +911,13 @@
909 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ 911 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
910 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20)) 912 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
911 913
914#define AR_SREV_9565(_ah) \
915 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
916
917#define AR_SREV_9565_10(_ah) \
918 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
919 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
920
912#define AR_SREV_9550(_ah) \ 921#define AR_SREV_9550(_ah) \
913 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550)) 922 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
914 923
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 44a08eb53c62..a483d518758c 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -497,7 +497,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
497 497
498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); 498 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
499 499
500 if (AR_SREV_9462(ah)) { 500 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
501 /* 501 /*
502 * this is needed to prevent the chip waking up 502 * this is needed to prevent the chip waking up
503 * the host within 3-4 seconds with certain 503 * the host within 3-4 seconds with certain
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0d4155aec48d..36618e3a5e60 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -568,7 +568,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
568 if (!an->sleeping) { 568 if (!an->sleeping) {
569 ath_tx_queue_tid(txq, tid); 569 ath_tx_queue_tid(txq, tid);
570 570
571 if (ts->ts_status & ATH9K_TXERR_FILT) 571 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
572 tid->ac->clear_ps_filter = true; 572 tid->ac->clear_ps_filter = true;
573 } 573 }
574 } 574 }
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1773 TX_STAT_INC(txq->axq_qnum, queued); 1773 TX_STAT_INC(txq->axq_qnum, queued);
1774} 1774}
1775 1775
1776static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1776static void setup_frame_info(struct ieee80211_hw *hw,
1777 struct ieee80211_sta *sta,
1778 struct sk_buff *skb,
1777 int framelen) 1779 int framelen)
1778{ 1780{
1779 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1781 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1780 struct ieee80211_sta *sta = tx_info->control.sta;
1781 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1782 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1782 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1783 const struct ieee80211_rate *rate; 1784 const struct ieee80211_rate *rate;
@@ -1819,10 +1820,14 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1819{ 1820{
1820 struct ath_hw *ah = sc->sc_ah; 1821 struct ath_hw *ah = sc->sc_ah;
1821 struct ath9k_channel *curchan = ah->curchan; 1822 struct ath9k_channel *curchan = ah->curchan;
1823
1822 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1824 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1823 (curchan->channelFlags & CHANNEL_5GHZ) && 1825 (curchan->channelFlags & CHANNEL_5GHZ) &&
1824 (chainmask == 0x7) && (rate < 0x90)) 1826 (chainmask == 0x7) && (rate < 0x90))
1825 return 0x3; 1827 return 0x3;
1828 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
1829 IS_CCK_RATE(rate))
1830 return 0x2;
1826 else 1831 else
1827 return chainmask; 1832 return chainmask;
1828} 1833}
@@ -1935,7 +1940,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1935{ 1940{
1936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1941 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1942 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1938 struct ieee80211_sta *sta = info->control.sta; 1943 struct ieee80211_sta *sta = txctl->sta;
1939 struct ieee80211_vif *vif = info->control.vif; 1944 struct ieee80211_vif *vif = info->control.vif;
1940 struct ath_softc *sc = hw->priv; 1945 struct ath_softc *sc = hw->priv;
1941 struct ath_txq *txq = txctl->txq; 1946 struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1984,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1979 !ieee80211_is_data(hdr->frame_control)) 1984 !ieee80211_is_data(hdr->frame_control))
1980 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1985 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1981 1986
1982 setup_frame_info(hw, skb, frmlen); 1987 setup_frame_info(hw, sta, skb, frmlen);
1983 1988
1984 /* 1989 /*
1985 * At this point, the vif, hw_key and sta pointers in the tx control 1990 * At this point, the vif, hw_key and sta pointers in the tx control
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 376be11161c0..2aa4a59c72c8 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -425,6 +425,7 @@ struct ar9170 {
425 bool rx_has_plcp; 425 bool rx_has_plcp;
426 struct sk_buff *rx_failover; 426 struct sk_buff *rx_failover;
427 int rx_failover_missing; 427 int rx_failover_missing;
428 u32 ampdu_ref;
428 429
429 /* FIFO for collecting outstanding BlockAckRequest */ 430 /* FIFO for collecting outstanding BlockAckRequest */
430 struct list_head bar_list[__AR9170_NUM_TXQ]; 431 struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
577void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 578void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
578 579
579/* TX */ 580/* TX */
580void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 581void carl9170_op_tx(struct ieee80211_hw *hw,
582 struct ieee80211_tx_control *control,
583 struct sk_buff *skb);
581void carl9170_tx_janitor(struct work_struct *work); 584void carl9170_tx_janitor(struct work_struct *work);
582void carl9170_tx_process_status(struct ar9170 *ar, 585void carl9170_tx_process_status(struct ar9170 *ar,
583 const struct carl9170_rsp *cmd); 586 const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index c5ca6f1f5836..24ac2876a733 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
341 if (SUPP(CARL9170FW_WLANTX_CAB)) { 341 if (SUPP(CARL9170FW_WLANTX_CAB)) {
342 if_comb_types |= 342 if_comb_types |=
343 BIT(NL80211_IFTYPE_AP) | 343 BIT(NL80211_IFTYPE_AP) |
344 BIT(NL80211_IFTYPE_MESH_POINT) |
344 BIT(NL80211_IFTYPE_P2P_GO); 345 BIT(NL80211_IFTYPE_P2P_GO);
345 } 346 }
346 } 347 }
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 53415bfd8bef..e3b1b6e87760 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -304,7 +304,8 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
304 struct ath_common *common = &ar->common; 304 struct ath_common *common = &ar->common;
305 u8 *mac_addr, *bssid; 305 u8 *mac_addr, *bssid;
306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS; 306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS; 307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
308 AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
308 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG | 309 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
309 AR9170_MAC_RX_CTRL_SHORT_FILTER; 310 AR9170_MAC_RX_CTRL_SHORT_FILTER;
310 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS; 311 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
@@ -318,10 +319,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
318 bssid = common->curbssid; 319 bssid = common->curbssid;
319 320
320 switch (vif->type) { 321 switch (vif->type) {
321 case NL80211_IFTYPE_MESH_POINT:
322 case NL80211_IFTYPE_ADHOC: 322 case NL80211_IFTYPE_ADHOC:
323 cam_mode |= AR9170_MAC_CAM_IBSS; 323 cam_mode |= AR9170_MAC_CAM_IBSS;
324 break; 324 break;
325 case NL80211_IFTYPE_MESH_POINT:
325 case NL80211_IFTYPE_AP: 326 case NL80211_IFTYPE_AP:
326 cam_mode |= AR9170_MAC_CAM_AP; 327 cam_mode |= AR9170_MAC_CAM_AP;
327 328
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 858e58dfc4dc..67997b39aba7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
616 616
617 goto unlock; 617 goto unlock;
618 618
619 case NL80211_IFTYPE_MESH_POINT:
619 case NL80211_IFTYPE_AP: 620 case NL80211_IFTYPE_AP:
620 if ((vif->type == NL80211_IFTYPE_STATION) || 621 if ((vif->type == NL80211_IFTYPE_STATION) ||
621 (vif->type == NL80211_IFTYPE_WDS) || 622 (vif->type == NL80211_IFTYPE_WDS) ||
622 (vif->type == NL80211_IFTYPE_AP)) 623 (vif->type == NL80211_IFTYPE_AP) ||
624 (vif->type == NL80211_IFTYPE_MESH_POINT))
623 break; 625 break;
624 626
625 err = -EBUSY; 627 err = -EBUSY;
@@ -1147,6 +1149,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1147 break; 1149 break;
1148 case WLAN_CIPHER_SUITE_CCMP: 1150 case WLAN_CIPHER_SUITE_CCMP:
1149 ktype = AR9170_ENC_ALG_AESCCMP; 1151 ktype = AR9170_ENC_ALG_AESCCMP;
1152 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1150 break; 1153 break;
1151 default: 1154 default:
1152 return -EOPNOTSUPP; 1155 return -EOPNOTSUPP;
@@ -1778,6 +1781,7 @@ void *carl9170_alloc(size_t priv_size)
1778 hw->wiphy->interface_modes = 0; 1781 hw->wiphy->interface_modes = 0;
1779 1782
1780 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1783 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1784 IEEE80211_HW_MFP_CAPABLE |
1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1785 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1782 IEEE80211_HW_SUPPORTS_PS | 1786 IEEE80211_HW_SUPPORTS_PS |
1783 IEEE80211_HW_PS_NULLFUNC_STACK | 1787 IEEE80211_HW_PS_NULLFUNC_STACK |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 6f6a34155667..a0b723078547 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
206 206
207 case NL80211_IFTYPE_AP: 207 case NL80211_IFTYPE_AP:
208 case NL80211_IFTYPE_ADHOC: 208 case NL80211_IFTYPE_ADHOC:
209 case NL80211_IFTYPE_MESH_POINT:
209 carl9170_update_beacon(ar, true); 210 carl9170_update_beacon(ar, true);
210 break; 211 break;
211 212
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
623#undef TID_CHECK 624#undef TID_CHECK
624} 625}
625 626
626static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) 627static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
628 struct ieee80211_rx_status *rx_status)
627{ 629{
628 __le16 fc; 630 __le16 fc;
629 631
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
636 return true; 638 return true;
637 } 639 }
638 640
641 rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
642 rx_status->ampdu_reference = ar->ampdu_ref;
643
639 /* 644 /*
640 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts 645 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
641 * certain frame types can be part of an aMPDU. 646 * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
684 if (unlikely(len < sizeof(*mac))) 689 if (unlikely(len < sizeof(*mac)))
685 goto drop; 690 goto drop;
686 691
692 memset(&status, 0, sizeof(status));
693
687 mpdu_len = len - sizeof(*mac); 694 mpdu_len = len - sizeof(*mac);
688 695
689 mac = (void *)(buf + mpdu_len); 696 mac = (void *)(buf + mpdu_len);
690 mac_status = mac->status; 697 mac_status = mac->status;
691 switch (mac_status & AR9170_RX_STATUS_MPDU) { 698 switch (mac_status & AR9170_RX_STATUS_MPDU) {
692 case AR9170_RX_STATUS_MPDU_FIRST: 699 case AR9170_RX_STATUS_MPDU_FIRST:
700 ar->ampdu_ref++;
693 /* Aggregated MPDUs start with an PLCP header */ 701 /* Aggregated MPDUs start with an PLCP header */
694 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { 702 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
695 head = (void *) buf; 703 head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
720 break; 728 break;
721 729
722 case AR9170_RX_STATUS_MPDU_LAST: 730 case AR9170_RX_STATUS_MPDU_LAST:
731 status.flag |= RX_FLAG_AMPDU_IS_LAST;
732
723 /* 733 /*
724 * The last frame of an A-MPDU has an extra tail 734 * The last frame of an A-MPDU has an extra tail
725 * which does contain the phy status of the whole 735 * which does contain the phy status of the whole
726 * aggregate. 736 * aggregate.
727 */ 737 */
728
729 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { 738 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
730 mpdu_len -= sizeof(struct ar9170_rx_phystatus); 739 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
731 phy = (void *)(buf + mpdu_len); 740 phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
773 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN))) 782 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
774 goto drop; 783 goto drop;
775 784
776 memset(&status, 0, sizeof(status));
777 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) 785 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
778 goto drop; 786 goto drop;
779 787
780 if (!carl9170_ampdu_check(ar, buf, mac_status)) 788 if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
781 goto drop; 789 goto drop;
782 790
783 if (phy) 791 if (phy)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6a8681407a1d..84377cf580e0 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
867 return false; 867 return false;
868} 868}
869 869
870static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) 870static int carl9170_tx_prepare(struct ar9170 *ar,
871 struct ieee80211_sta *sta,
872 struct sk_buff *skb)
871{ 873{
872 struct ieee80211_hdr *hdr; 874 struct ieee80211_hdr *hdr;
873 struct _carl9170_tx_superframe *txc; 875 struct _carl9170_tx_superframe *txc;
874 struct carl9170_vif_info *cvif; 876 struct carl9170_vif_info *cvif;
875 struct ieee80211_tx_info *info; 877 struct ieee80211_tx_info *info;
876 struct ieee80211_tx_rate *txrate; 878 struct ieee80211_tx_rate *txrate;
877 struct ieee80211_sta *sta;
878 struct carl9170_tx_info *arinfo; 879 struct carl9170_tx_info *arinfo;
879 unsigned int hw_queue; 880 unsigned int hw_queue;
880 int i; 881 int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
910 else 911 else
911 cvif = NULL; 912 cvif = NULL;
912 913
913 sta = info->control.sta;
914
915 txc = (void *)skb_push(skb, sizeof(*txc)); 914 txc = (void *)skb_push(skb, sizeof(*txc));
916 memset(txc, 0, sizeof(*txc)); 915 memset(txc, 0, sizeof(*txc));
917 916
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
1457 return false; 1456 return false;
1458} 1457}
1459 1458
1460void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1459void carl9170_op_tx(struct ieee80211_hw *hw,
1460 struct ieee80211_tx_control *control,
1461 struct sk_buff *skb)
1461{ 1462{
1462 struct ar9170 *ar = hw->priv; 1463 struct ar9170 *ar = hw->priv;
1463 struct ieee80211_tx_info *info; 1464 struct ieee80211_tx_info *info;
1464 struct ieee80211_sta *sta; 1465 struct ieee80211_sta *sta = control->sta;
1465 bool run; 1466 bool run;
1466 1467
1467 if (unlikely(!IS_STARTED(ar))) 1468 if (unlikely(!IS_STARTED(ar)))
1468 goto err_free; 1469 goto err_free;
1469 1470
1470 info = IEEE80211_SKB_CB(skb); 1471 info = IEEE80211_SKB_CB(skb);
1471 sta = info->control.sta;
1472 1472
1473 if (unlikely(carl9170_tx_prepare(ar, skb))) 1473 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1474 goto err_free; 1474 goto err_free;
1475 1475
1476 carl9170_tx_accounting(ar, skb); 1476 carl9170_tx_accounting(ar, skb);
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 4648bbf76abc..098fe9ee7096 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -4,6 +4,7 @@ b43-y += tables.o
4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o 4b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
5b43-$(CONFIG_B43_PHY_N) += radio_2055.o 5b43-$(CONFIG_B43_PHY_N) += radio_2055.o
6b43-$(CONFIG_B43_PHY_N) += radio_2056.o 6b43-$(CONFIG_B43_PHY_N) += radio_2056.o
7b43-$(CONFIG_B43_PHY_N) += radio_2057.o
7b43-y += phy_common.o 8b43-y += phy_common.o
8b43-y += phy_g.o 9b43-y += phy_g.o
9b43-y += phy_a.o 10b43-y += phy_a.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7c899fc7ddd0..b298e5d68be2 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -241,16 +241,18 @@ enum {
241#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ 241#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */
242#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ 242#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
243#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ 243#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
244#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ 244#define B43_SHM_SH_HOSTF1 0x005E /* Hostflags 1 for ucode options */
245#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */ 245#define B43_SHM_SH_HOSTF2 0x0060 /* Hostflags 2 for ucode options */
246#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */ 246#define B43_SHM_SH_HOSTF3 0x0062 /* Hostflags 3 for ucode options */
247#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ 247#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */
248#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ 248#define B43_SHM_SH_RADAR 0x0066 /* Radar register */
249#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ 249#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */
250#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ 250#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */
251#define B43_SHM_SH_HOSTF4 0x0078 /* Hostflags 4 for ucode options */
251#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ 252#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
252#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */ 253#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */
253#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */ 254#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */
255#define B43_SHM_SH_HOSTF5 0x00D4 /* Hostflags 5 for ucode options */
254#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ 256#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
255/* TSSI information */ 257/* TSSI information */
256#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ 258#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
415#define B43_PHYTYPE_HT 0x07 417#define B43_PHYTYPE_HT 0x07
416#define B43_PHYTYPE_LCN 0x08 418#define B43_PHYTYPE_LCN 0x08
417#define B43_PHYTYPE_LCNXN 0x09 419#define B43_PHYTYPE_LCNXN 0x09
420#define B43_PHYTYPE_LCN40 0x0a
421#define B43_PHYTYPE_AC 0x0b
418 422
419/* PHYRegisters */ 423/* PHYRegisters */
420#define B43_PHY_ILT_A_CTRL 0x0072 424#define B43_PHY_ILT_A_CTRL 0x0072
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a140165dfee0..73730e94e0ac 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
533{ 533{
534 u64 ret; 534 u64 ret;
535 535
536 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); 536 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
537 ret <<= 16; 537 ret <<= 16;
538 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); 538 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
539 ret <<= 16; 539 ret <<= 16;
540 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); 540 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
541 541
542 return ret; 542 return ret;
543} 543}
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
550 lo = (value & 0x00000000FFFFULL); 550 lo = (value & 0x00000000FFFFULL);
551 mi = (value & 0x0000FFFF0000ULL) >> 16; 551 mi = (value & 0x0000FFFF0000ULL) >> 16;
552 hi = (value & 0xFFFF00000000ULL) >> 32; 552 hi = (value & 0xFFFF00000000ULL) >> 32;
553 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); 553 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
554 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); 554 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
555 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); 555 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
556} 556}
557 557
558/* Read the firmware capabilities bitmask (Opensource firmware only) */ 558/* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
3412} 3412}
3413 3413
3414static void b43_op_tx(struct ieee80211_hw *hw, 3414static void b43_op_tx(struct ieee80211_hw *hw,
3415 struct sk_buff *skb) 3415 struct ieee80211_tx_control *control,
3416 struct sk_buff *skb)
3416{ 3417{
3417 struct b43_wl *wl = hw_to_b43_wl(hw); 3418 struct b43_wl *wl = hw_to_b43_wl(hw);
3418 3419
@@ -4282,6 +4283,35 @@ out:
4282 return err; 4283 return err;
4283} 4284}
4284 4285
4286static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
4287{
4288 switch (phy_type) {
4289 case B43_PHYTYPE_A:
4290 return "A";
4291 case B43_PHYTYPE_B:
4292 return "B";
4293 case B43_PHYTYPE_G:
4294 return "G";
4295 case B43_PHYTYPE_N:
4296 return "N";
4297 case B43_PHYTYPE_LP:
4298 return "LP";
4299 case B43_PHYTYPE_SSLPN:
4300 return "SSLPN";
4301 case B43_PHYTYPE_HT:
4302 return "HT";
4303 case B43_PHYTYPE_LCN:
4304 return "LCN";
4305 case B43_PHYTYPE_LCNXN:
4306 return "LCNXN";
4307 case B43_PHYTYPE_LCN40:
4308 return "LCN40";
4309 case B43_PHYTYPE_AC:
4310 return "AC";
4311 }
4312 return "UNKNOWN";
4313}
4314
4285/* Get PHY and RADIO versioning numbers */ 4315/* Get PHY and RADIO versioning numbers */
4286static int b43_phy_versioning(struct b43_wldev *dev) 4316static int b43_phy_versioning(struct b43_wldev *dev)
4287{ 4317{
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4342 unsupported = 1; 4372 unsupported = 1;
4343 } 4373 }
4344 if (unsupported) { 4374 if (unsupported) {
4345 b43err(dev->wl, "FOUND UNSUPPORTED PHY " 4375 b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
4346 "(Analog %u, Type %u, Revision %u)\n", 4376 analog_type, phy_type, b43_phy_name(dev, phy_type),
4347 analog_type, phy_type, phy_rev); 4377 phy_rev);
4348 return -EOPNOTSUPP; 4378 return -EOPNOTSUPP;
4349 } 4379 }
4350 b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", 4380 b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
4351 analog_type, phy_type, phy_rev); 4381 analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
4352 4382
4353 /* Get RADIO versioning */ 4383 /* Get RADIO versioning */
4354 if (dev->dev->core_rev >= 24) { 4384 if (dev->dev->core_rev >= 24) {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3f8883b14d9c..f01676ac481b 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
240 (b43_radio_read16(dev, offset) & mask) | set); 240 (b43_radio_read16(dev, offset) & mask) | set);
241} 241}
242 242
243bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
244 u16 value, int delay, int timeout)
245{
246 u16 val;
247 int i;
248
249 for (i = 0; i < timeout; i += delay) {
250 val = b43_radio_read(dev, offset);
251 if ((val & mask) == value)
252 return true;
253 udelay(delay);
254 }
255 return false;
256}
257
243u16 b43_phy_read(struct b43_wldev *dev, u16 reg) 258u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
244{ 259{
245 assert_mac_suspended(dev); 260 assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
428 average = (a + b + c + d + 2) / 4; 443 average = (a + b + c + d + 2) / 4;
429 if (is_ofdm) { 444 if (is_ofdm) {
430 /* Adjust for CCK-boost */ 445 /* Adjust for CCK-boost */
431 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO) 446 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
432 & B43_HF_CCKBOOST) 447 & B43_HF_CCKBOOST)
433 average = (average >= 13) ? (average - 13) : 0; 448 average = (average >= 13) ? (average - 13) : 0;
434 } 449 }
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9233b13fc16d..f1b999349876 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -365,6 +365,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
365void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set); 365void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
366 366
367/** 367/**
368 * b43_radio_wait_value - Waits for a given value in masked register read
369 */
370bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
371 u16 value, int delay, int timeout);
372
373/**
368 * b43_radio_lock - Lock firmware radio register access 374 * b43_radio_lock - Lock firmware radio register access
369 */ 375 */
370void b43_radio_lock(struct b43_wldev *dev); 376void b43_radio_lock(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b92bb9c92ad1..3c35382ee6c2 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -32,6 +32,7 @@
32#include "tables_nphy.h" 32#include "tables_nphy.h"
33#include "radio_2055.h" 33#include "radio_2055.h"
34#include "radio_2056.h" 34#include "radio_2056.h"
35#include "radio_2057.h"
35#include "main.h" 36#include "main.h"
36 37
37struct nphy_txgains { 38struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
126 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); 127 b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
127} 128}
128 129
130/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
131static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
132 u16 value, u8 core, bool off,
133 u8 override)
134{
135 const struct nphy_rf_control_override_rev7 *e;
136 u16 en_addrs[3][2] = {
137 { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
138 };
139 u16 en_addr;
140 u16 en_mask = field;
141 u16 val_addr;
142 u8 i;
143
144 /* Remember: we can get NULL! */
145 e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
146
147 for (i = 0; i < 2; i++) {
148 if (override >= ARRAY_SIZE(en_addrs)) {
149 b43err(dev->wl, "Invalid override value %d\n", override);
150 return;
151 }
152 en_addr = en_addrs[override][i];
153
154 val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
155
156 if (off) {
157 b43_phy_mask(dev, en_addr, ~en_mask);
158 if (e) /* Do it safer, better than wl */
159 b43_phy_mask(dev, val_addr, ~e->val_mask);
160 } else {
161 if (!core || (core & (1 << i))) {
162 b43_phy_set(dev, en_addr, en_mask);
163 if (e)
164 b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
165 }
166 }
167 }
168}
169
129/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ 170/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
130static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, 171static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
131 u16 value, u8 core, bool off) 172 u16 value, u8 core, bool off)
@@ -459,6 +500,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
459} 500}
460 501
461/************************************************** 502/**************************************************
503 * Radio 0x2057
504 **************************************************/
505
506/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
507static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
508{
509 struct b43_phy *phy = &dev->phy;
510 u16 tmp;
511
512 if (phy->radio_rev == 5) {
513 b43_phy_mask(dev, 0x342, ~0x2);
514 udelay(10);
515 b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
516 b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
517 }
518
519 b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
520 udelay(10);
521 b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
522 if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
523 b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
524 return 0;
525 }
526 b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
527 tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
528 b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
529
530 if (phy->radio_rev == 5) {
531 b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
532 b43_radio_mask(dev, 0x1ca, ~0x2);
533 }
534 if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
535 b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
536 b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
537 tmp << 2);
538 }
539
540 return tmp & 0x3e;
541}
542
543/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
544static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
545{
546 struct b43_phy *phy = &dev->phy;
547 bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
548 phy->radio_rev == 6);
549 u16 tmp;
550
551 if (special) {
552 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
553 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
554 } else {
555 b43_radio_write(dev, 0x1AE, 0x61);
556 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
557 }
558 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
559 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
560 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
561 5000000))
562 b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
563 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
564 if (special) {
565 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
566 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
567 } else {
568 b43_radio_write(dev, 0x1AE, 0x69);
569 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
570 }
571 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
572 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
573 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
574 5000000))
575 b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
576 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
577 if (special) {
578 b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
579 b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
580 b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
581 } else {
582 b43_radio_write(dev, 0x1AE, 0x73);
583 b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
584 b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
585 }
586 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
587 if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
588 5000000)) {
589 b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
590 return 0;
591 }
592 tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
593 b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
594 return tmp;
595}
596
597static void b43_radio_2057_init_pre(struct b43_wldev *dev)
598{
599 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
600 /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
601 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
602 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
603 b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
604}
605
606static void b43_radio_2057_init_post(struct b43_wldev *dev)
607{
608 b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
609
610 b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
611 b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
612 mdelay(2);
613 b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
614 b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
615
616 if (dev->phy.n->init_por) {
617 b43_radio_2057_rcal(dev);
618 b43_radio_2057_rccal(dev);
619 }
620 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
621
622 dev->phy.n->init_por = false;
623}
624
625/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
626static void b43_radio_2057_init(struct b43_wldev *dev)
627{
628 b43_radio_2057_init_pre(dev);
629 r2057_upload_inittabs(dev);
630 b43_radio_2057_init_post(dev);
631}
632
633/**************************************************
462 * Radio 0x2056 634 * Radio 0x2056
463 **************************************************/ 635 **************************************************/
464 636
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
545 enum ieee80211_band band = b43_current_band(dev->wl); 717 enum ieee80211_band band = b43_current_band(dev->wl);
546 u16 offset; 718 u16 offset;
547 u8 i; 719 u8 i;
548 u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; 720 u16 bias, cbias;
721 u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
722 u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
549 723
550 B43_WARN_ON(dev->phy.rev < 3); 724 B43_WARN_ON(dev->phy.rev < 3);
551 725
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
630 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); 804 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
631 } 805 }
632 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { 806 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
633 /* TODO */ 807 u16 freq = dev->phy.channel_freq;
808 if (freq < 5100) {
809 paa_boost = 0xA;
810 pada_boost = 0x77;
811 pgaa_boost = 0xF;
812 mixa_boost = 0xF;
813 } else if (freq < 5340) {
814 paa_boost = 0x8;
815 pada_boost = 0x77;
816 pgaa_boost = 0xFB;
817 mixa_boost = 0xF;
818 } else if (freq < 5650) {
819 paa_boost = 0x0;
820 pada_boost = 0x77;
821 pgaa_boost = 0xB;
822 mixa_boost = 0xF;
823 } else {
824 paa_boost = 0x0;
825 pada_boost = 0x77;
826 if (freq != 5825)
827 pgaa_boost = -(freq - 18) / 36 + 168;
828 else
829 pgaa_boost = 6;
830 mixa_boost = 0xF;
831 }
832
833 for (i = 0; i < 2; i++) {
834 offset = i ? B2056_TX1 : B2056_TX0;
835
836 b43_radio_write(dev,
837 offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
838 b43_radio_write(dev,
839 offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
840 b43_radio_write(dev,
841 offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
842 b43_radio_write(dev,
843 offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
844 b43_radio_write(dev,
845 offset | B2056_TX_TXSPARE1, 0x30);
846 b43_radio_write(dev,
847 offset | B2056_TX_PA_SPARE2, 0xee);
848 b43_radio_write(dev,
849 offset | B2056_TX_PADA_CASCBIAS, 0x03);
850 b43_radio_write(dev,
851 offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
852 b43_radio_write(dev,
853 offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
854 b43_radio_write(dev,
855 offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
856 }
634 } 857 }
635 858
636 udelay(50); 859 udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
643 udelay(300); 866 udelay(300);
644} 867}
645 868
869static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
870{
871 struct b43_phy *phy = &dev->phy;
872 u16 mast2, tmp;
873
874 if (phy->rev != 3)
875 return 0;
876
877 mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
878 b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
879
880 udelay(10);
881 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
882 udelay(10);
883 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
884
885 if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
886 1000000)) {
887 b43err(dev->wl, "Radio recalibration timeout\n");
888 return 0;
889 }
890
891 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
892 tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
893 b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
894
895 b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
896
897 return tmp & 0x1f;
898}
899
646static void b43_radio_init2056_pre(struct b43_wldev *dev) 900static void b43_radio_init2056_pre(struct b43_wldev *dev)
647{ 901{
648 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, 902 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
665 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); 919 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
666 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); 920 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
667 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); 921 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
668 /* 922 if (dev->phy.n->init_por)
669 if (nphy->init_por) 923 b43_radio_2056_rcal(dev);
670 Call Radio 2056 Recalibrate
671 */
672} 924}
673 925
674/* 926/*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
680 b43_radio_init2056_pre(dev); 932 b43_radio_init2056_pre(dev);
681 b2056_upload_inittabs(dev, 0, 0); 933 b2056_upload_inittabs(dev, 0, 0);
682 b43_radio_init2056_post(dev); 934 b43_radio_init2056_post(dev);
935
936 dev->phy.n->init_por = false;
683} 937}
684 938
685/************************************************** 939/**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
753{ 1007{
754 struct b43_phy_n *nphy = dev->phy.n; 1008 struct b43_phy_n *nphy = dev->phy.n;
755 struct ssb_sprom *sprom = dev->dev->bus_sprom; 1009 struct ssb_sprom *sprom = dev->dev->bus_sprom;
756 int i;
757 u16 val;
758 bool workaround = false; 1010 bool workaround = false;
759 1011
760 if (sprom->revision < 4) 1012 if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
777 b43_radio_set(dev, B2055_CAL_MISC, 0x1); 1029 b43_radio_set(dev, B2055_CAL_MISC, 0x1);
778 msleep(1); 1030 msleep(1);
779 b43_radio_set(dev, B2055_CAL_MISC, 0x40); 1031 b43_radio_set(dev, B2055_CAL_MISC, 0x40);
780 for (i = 0; i < 200; i++) { 1032 if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
781 val = b43_radio_read(dev, B2055_CAL_COUT2);
782 if (val & 0x80) {
783 i = 0;
784 break;
785 }
786 udelay(10);
787 }
788 if (i)
789 b43err(dev->wl, "radio post init timeout\n"); 1033 b43err(dev->wl, "radio post init timeout\n");
790 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 1034 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
791 b43_switch_channel(dev, dev->phy.channel); 1035 b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
1860/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 2104/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
1861static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) 2105static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
1862{ 2106{
1863 if (dev->phy.rev >= 3) 2107 if (dev->phy.rev >= 7)
2108 ; /* TODO */
2109 else if (dev->phy.rev >= 3)
1864 b43_nphy_gain_ctl_workarounds_rev3plus(dev); 2110 b43_nphy_gain_ctl_workarounds_rev3plus(dev);
1865 else 2111 else
1866 b43_nphy_gain_ctl_workarounds_rev1_2(dev); 2112 b43_nphy_gain_ctl_workarounds_rev1_2(dev);
1867} 2113}
1868 2114
2115/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
2116static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
2117{
2118 if (!offset)
2119 offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
2120 return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
2121}
2122
2123static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2124{
2125 struct ssb_sprom *sprom = dev->dev->bus_sprom;
2126 struct b43_phy *phy = &dev->phy;
2127
2128 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
2129 0x1F };
2130 u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
2131
2132 u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
2133 u8 ntab7_138_146[] = { 0x11, 0x11 };
2134 u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
2135
2136 u16 lpf_20, lpf_40, lpf_11b;
2137 u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
2138 u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
2139 bool rccal_ovrd = false;
2140
2141 u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
2142 u16 bias, conv, filt;
2143
2144 u32 tmp32;
2145 u8 core;
2146
2147 if (phy->rev == 7) {
2148 b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
2149 b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
2150 b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
2151 b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
2152 b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
2153 b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
2154 b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
2155 b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
2156 b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
2157 b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
2158 b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
2159 b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
2160 b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
2161 b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
2162 b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
2163 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
2164 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
2165 }
2166 if (phy->rev <= 8) {
2167 b43_phy_write(dev, 0x23F, 0x1B0);
2168 b43_phy_write(dev, 0x240, 0x1B0);
2169 }
2170 if (phy->rev >= 8)
2171 b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
2172
2173 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
2174 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
2175 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
2176 tmp32 &= 0xffffff;
2177 b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
2178 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
2179 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
2180
2181 if (b43_nphy_ipa(dev))
2182 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
2183 rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
2184
2185 b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
2186 b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
2187
2188 lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
2189 lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
2190 lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
2191 if (b43_nphy_ipa(dev)) {
2192 if ((phy->radio_rev == 5 && phy->is_40mhz) ||
2193 phy->radio_rev == 7 || phy->radio_rev == 8) {
2194 bcap_val = b43_radio_read(dev, 0x16b);
2195 scap_val = b43_radio_read(dev, 0x16a);
2196 scap_val_11b = scap_val;
2197 bcap_val_11b = bcap_val;
2198 if (phy->radio_rev == 5 && phy->is_40mhz) {
2199 scap_val_11n_20 = scap_val;
2200 bcap_val_11n_20 = bcap_val;
2201 scap_val_11n_40 = bcap_val_11n_40 = 0xc;
2202 rccal_ovrd = true;
2203 } else { /* Rev 7/8 */
2204 lpf_20 = 4;
2205 lpf_11b = 1;
2206 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2207 scap_val_11n_20 = 0xc;
2208 bcap_val_11n_20 = 0xc;
2209 scap_val_11n_40 = 0xa;
2210 bcap_val_11n_40 = 0xa;
2211 } else {
2212 scap_val_11n_20 = 0x14;
2213 bcap_val_11n_20 = 0x14;
2214 scap_val_11n_40 = 0xf;
2215 bcap_val_11n_40 = 0xf;
2216 }
2217 rccal_ovrd = true;
2218 }
2219 }
2220 } else {
2221 if (phy->radio_rev == 5) {
2222 lpf_20 = 1;
2223 lpf_40 = 3;
2224 bcap_val = b43_radio_read(dev, 0x16b);
2225 scap_val = b43_radio_read(dev, 0x16a);
2226 scap_val_11b = scap_val;
2227 bcap_val_11b = bcap_val;
2228 scap_val_11n_20 = 0x11;
2229 scap_val_11n_40 = 0x11;
2230 bcap_val_11n_20 = 0x13;
2231 bcap_val_11n_40 = 0x13;
2232 rccal_ovrd = true;
2233 }
2234 }
2235 if (rccal_ovrd) {
2236 rx2tx_lut_20_11b = (bcap_val_11b << 8) |
2237 (scap_val_11b << 3) |
2238 lpf_11b;
2239 rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
2240 (scap_val_11n_20 << 3) |
2241 lpf_20;
2242 rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
2243 (scap_val_11n_40 << 3) |
2244 lpf_40;
2245 for (core = 0; core < 2; core++) {
2246 b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
2247 rx2tx_lut_20_11b);
2248 b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
2249 rx2tx_lut_20_11n);
2250 b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
2251 rx2tx_lut_20_11n);
2252 b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
2253 rx2tx_lut_40_11n);
2254 b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
2255 rx2tx_lut_40_11n);
2256 b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
2257 rx2tx_lut_40_11n);
2258 b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
2259 rx2tx_lut_40_11n);
2260 b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
2261 rx2tx_lut_40_11n);
2262 }
2263 b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
2264 }
2265 b43_phy_write(dev, 0x32F, 0x3);
2266 if (phy->radio_rev == 4 || phy->radio_rev == 6)
2267 b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
2268
2269 if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
2270 if (sprom->revision &&
2271 sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
2272 b43_radio_write(dev, 0x5, 0x05);
2273 b43_radio_write(dev, 0x6, 0x30);
2274 b43_radio_write(dev, 0x7, 0x00);
2275 b43_radio_set(dev, 0x4f, 0x1);
2276 b43_radio_set(dev, 0xd4, 0x1);
2277 bias = 0x1f;
2278 conv = 0x6f;
2279 filt = 0xaa;
2280 } else {
2281 bias = 0x2b;
2282 conv = 0x7f;
2283 filt = 0xee;
2284 }
2285 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2286 for (core = 0; core < 2; core++) {
2287 if (core == 0) {
2288 b43_radio_write(dev, 0x5F, bias);
2289 b43_radio_write(dev, 0x64, conv);
2290 b43_radio_write(dev, 0x66, filt);
2291 } else {
2292 b43_radio_write(dev, 0xE8, bias);
2293 b43_radio_write(dev, 0xE9, conv);
2294 b43_radio_write(dev, 0xEB, filt);
2295 }
2296 }
2297 }
2298 }
2299
2300 if (b43_nphy_ipa(dev)) {
2301 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2302 if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
2303 phy->radio_rev == 6) {
2304 for (core = 0; core < 2; core++) {
2305 if (core == 0)
2306 b43_radio_write(dev, 0x51,
2307 0x7f);
2308 else
2309 b43_radio_write(dev, 0xd6,
2310 0x7f);
2311 }
2312 }
2313 if (phy->radio_rev == 3) {
2314 for (core = 0; core < 2; core++) {
2315 if (core == 0) {
2316 b43_radio_write(dev, 0x64,
2317 0x13);
2318 b43_radio_write(dev, 0x5F,
2319 0x1F);
2320 b43_radio_write(dev, 0x66,
2321 0xEE);
2322 b43_radio_write(dev, 0x59,
2323 0x8A);
2324 b43_radio_write(dev, 0x80,
2325 0x3E);
2326 } else {
2327 b43_radio_write(dev, 0x69,
2328 0x13);
2329 b43_radio_write(dev, 0xE8,
2330 0x1F);
2331 b43_radio_write(dev, 0xEB,
2332 0xEE);
2333 b43_radio_write(dev, 0xDE,
2334 0x8A);
2335 b43_radio_write(dev, 0x105,
2336 0x3E);
2337 }
2338 }
2339 } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
2340 if (!phy->is_40mhz) {
2341 b43_radio_write(dev, 0x5F, 0x14);
2342 b43_radio_write(dev, 0xE8, 0x12);
2343 } else {
2344 b43_radio_write(dev, 0x5F, 0x16);
2345 b43_radio_write(dev, 0xE8, 0x16);
2346 }
2347 }
2348 } else {
2349 u16 freq = phy->channel_freq;
2350 if ((freq >= 5180 && freq <= 5230) ||
2351 (freq >= 5745 && freq <= 5805)) {
2352 b43_radio_write(dev, 0x7D, 0xFF);
2353 b43_radio_write(dev, 0xFE, 0xFF);
2354 }
2355 }
2356 } else {
2357 if (phy->radio_rev != 5) {
2358 for (core = 0; core < 2; core++) {
2359 if (core == 0) {
2360 b43_radio_write(dev, 0x5c, 0x61);
2361 b43_radio_write(dev, 0x51, 0x70);
2362 } else {
2363 b43_radio_write(dev, 0xe1, 0x61);
2364 b43_radio_write(dev, 0xd6, 0x70);
2365 }
2366 }
2367 }
2368 }
2369
2370 if (phy->radio_rev == 4) {
2371 b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
2372 b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
2373 for (core = 0; core < 2; core++) {
2374 if (core == 0) {
2375 b43_radio_write(dev, 0x1a1, 0x00);
2376 b43_radio_write(dev, 0x1a2, 0x3f);
2377 b43_radio_write(dev, 0x1a6, 0x3f);
2378 } else {
2379 b43_radio_write(dev, 0x1a7, 0x00);
2380 b43_radio_write(dev, 0x1ab, 0x3f);
2381 b43_radio_write(dev, 0x1ac, 0x3f);
2382 }
2383 }
2384 } else {
2385 b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
2386 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
2387 b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
2388 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
2389
2390 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
2391 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
2392 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
2393 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
2394 b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
2395 b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
2396
2397 b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
2398 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
2399 b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
2400 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
2401 }
2402
2403 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
2404
2405 b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
2406 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
2407 b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
2408 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
2409 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
2410 b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
2411 b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
2412
2413 if (!phy->is_40mhz) {
2414 b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
2415 b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
2416 } else {
2417 b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
2418 b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
2419 }
2420
2421 b43_nphy_gain_ctl_workarounds(dev);
2422
2423 /* TODO
2424 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
2425 aux_adc_vmid_rev7_core0);
2426 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
2427 aux_adc_vmid_rev7_core1);
2428 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
2429 aux_adc_gain_rev7);
2430 b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
2431 aux_adc_gain_rev7);
2432 */
2433}
2434
1869static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) 2435static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1870{ 2436{
1871 struct b43_phy_n *nphy = dev->phy.n; 2437 struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1916 rx2tx_delays[6] = 1; 2482 rx2tx_delays[6] = 1;
1917 rx2tx_events[7] = 0x1F; 2483 rx2tx_events[7] = 0x1F;
1918 } 2484 }
1919 b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 2485 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
1920 ARRAY_SIZE(rx2tx_events)); 2486 ARRAY_SIZE(rx2tx_events));
1921 } 2487 }
1922 2488
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1926 2492
1927 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); 2493 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
1928 2494
1929 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); 2495 if (!dev->phy.is_40mhz) {
1930 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); 2496 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
2497 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
2498 } else {
2499 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
2500 b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
2501 }
1931 2502
1932 b43_nphy_gain_ctl_workarounds(dev); 2503 b43_nphy_gain_ctl_workarounds(dev);
1933 2504
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1963 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); 2534 b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
1964 2535
1965 if (dev->phy.rev == 4 && 2536 if (dev->phy.rev == 4 &&
1966 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 2537 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1967 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 2538 b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
1968 0x70); 2539 0x70);
1969 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 2540 b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
1970 0x70); 2541 0x70);
1971 } 2542 }
1972 2543
2544 /* Dropped probably-always-true condition */
1973 b43_phy_write(dev, 0x224, 0x03eb); 2545 b43_phy_write(dev, 0x224, 0x03eb);
1974 b43_phy_write(dev, 0x225, 0x03eb); 2546 b43_phy_write(dev, 0x225, 0x03eb);
1975 b43_phy_write(dev, 0x226, 0x0341); 2547 b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1982 b43_phy_write(dev, 0x22d, 0x042b); 2554 b43_phy_write(dev, 0x22d, 0x042b);
1983 b43_phy_write(dev, 0x22e, 0x0381); 2555 b43_phy_write(dev, 0x22e, 0x0381);
1984 b43_phy_write(dev, 0x22f, 0x0381); 2556 b43_phy_write(dev, 0x22f, 0x0381);
2557
2558 if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
2559 ; /* TODO: 0x0080000000000000 HF */
1985} 2560}
1986 2561
1987static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) 2562static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
1996 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 2571 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
1997 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 2572 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
1998 2573
2574 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
2575 dev->dev->board_type == 0x8B) {
2576 delays1[0] = 0x1;
2577 delays1[5] = 0x14;
2578 }
2579
1999 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && 2580 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
2000 nphy->band5g_pwrgain) { 2581 nphy->band5g_pwrgain) {
2001 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); 2582 b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2007 2588
2008 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); 2589 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
2009 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); 2590 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
2010 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); 2591 if (dev->phy.rev < 3) {
2011 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); 2592 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
2593 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
2594 }
2012 2595
2013 if (dev->phy.rev < 2) { 2596 if (dev->phy.rev < 2) {
2014 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); 2597 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2024 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); 2607 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
2025 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); 2608 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
2026 2609
2027 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
2028 dev->dev->board_type == 0x8B) {
2029 delays1[0] = 0x1;
2030 delays1[5] = 0x14;
2031 }
2032 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); 2610 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
2033 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); 2611 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
2034 2612
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2055 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); 2633 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
2056 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); 2634 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
2057 2635
2058 b43_phy_mask(dev, B43_NPHY_PIL_DW1, 2636 if (dev->phy.rev < 3) {
2059 ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); 2637 b43_phy_mask(dev, B43_NPHY_PIL_DW1,
2060 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); 2638 ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
2061 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); 2639 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
2062 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); 2640 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
2641 b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
2642 }
2063 2643
2064 if (dev->phy.rev == 2) 2644 if (dev->phy.rev == 2)
2065 b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 2645 b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
2083 b43_phy_set(dev, B43_NPHY_IQFLIP, 2663 b43_phy_set(dev, B43_NPHY_IQFLIP,
2084 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 2664 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
2085 2665
2086 if (dev->phy.rev >= 3) 2666 if (dev->phy.rev >= 7)
2667 b43_nphy_workarounds_rev7plus(dev);
2668 else if (dev->phy.rev >= 3)
2087 b43_nphy_workarounds_rev3plus(dev); 2669 b43_nphy_workarounds_rev3plus(dev);
2088 else 2670 else
2089 b43_nphy_workarounds_rev1_2(dev); 2671 b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
2542 b43_nphy_ipa_internal_tssi_setup(dev); 3124 b43_nphy_ipa_internal_tssi_setup(dev);
2543 3125
2544 if (phy->rev >= 7) 3126 if (phy->rev >= 7)
2545 ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ 3127 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
2546 else if (phy->rev >= 3) 3128 else if (phy->rev >= 3)
2547 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); 3129 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
2548 3130
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
2554 b43_nphy_rssi_select(dev, 0, 0); 3136 b43_nphy_rssi_select(dev, 0, 0);
2555 3137
2556 if (phy->rev >= 7) 3138 if (phy->rev >= 7)
2557 ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ 3139 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
2558 else if (phy->rev >= 3) 3140 else if (phy->rev >= 3)
2559 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); 3141 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
2560 3142
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4761 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); 5343 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
4762 nphy->spur_avoid = (phy->rev >= 3) ? 5344 nphy->spur_avoid = (phy->rev >= 3) ?
4763 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; 5345 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
5346 nphy->init_por = true;
4764 nphy->gain_boost = true; /* this way we follow wl, assume it is true */ 5347 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
4765 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ 5348 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
4766 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ 5349 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4801 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; 5384 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
4802 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; 5385 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
4803 } 5386 }
5387
5388 nphy->init_por = true;
4804} 5389}
4805 5390
4806static void b43_nphy_op_free(struct b43_wldev *dev) 5391static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
4887 if (blocked) { 5472 if (blocked) {
4888 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, 5473 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
4889 ~B43_NPHY_RFCTL_CMD_CHIP0PU); 5474 ~B43_NPHY_RFCTL_CMD_CHIP0PU);
4890 if (dev->phy.rev >= 3) { 5475 if (dev->phy.rev >= 7) {
5476 /* TODO */
5477 } else if (dev->phy.rev >= 3) {
4891 b43_radio_mask(dev, 0x09, ~0x2); 5478 b43_radio_mask(dev, 0x09, ~0x2);
4892 5479
4893 b43_radio_write(dev, 0x204D, 0); 5480 b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
4905 b43_radio_write(dev, 0x3064, 0); 5492 b43_radio_write(dev, 0x3064, 0);
4906 } 5493 }
4907 } else { 5494 } else {
4908 if (dev->phy.rev >= 3) { 5495 if (dev->phy.rev >= 7) {
5496 b43_radio_2057_init(dev);
5497 b43_switch_channel(dev, dev->phy.channel);
5498 } else if (dev->phy.rev >= 3) {
4909 b43_radio_init2056(dev); 5499 b43_radio_init2056(dev);
4910 b43_switch_channel(dev, dev->phy.channel); 5500 b43_switch_channel(dev, dev->phy.channel);
4911 } else { 5501 } else {
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fd12b386fea1..092c0140c249 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -785,6 +785,7 @@ struct b43_phy_n {
785 u16 papd_epsilon_offset[2]; 785 u16 papd_epsilon_offset[2];
786 s32 preamble_override; 786 s32 preamble_override;
787 u32 bb_mult_save; 787 u32 bb_mult_save;
788 bool init_por;
788 789
789 bool gain_boost; 790 bool gain_boost;
790 bool elna_gain_config; 791 bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644
index 000000000000..d61d6830c5c7
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.c
@@ -0,0 +1,141 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11n 2057 radio device data tables
5
6 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "radio_2057.h"
27#include "phy_common.h"
28
29static u16 r2057_rev4_init[42][2] = {
30 { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
31 { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
32 { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
33 { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
34 { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
35 { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
36 { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
37 { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
38 { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
39 { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
40 { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
41};
42
43static u16 r2057_rev5_init[44][2] = {
44 { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
45 { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
46 { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
47 { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
48 { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
49 { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
50 { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
51 { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
52 { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
53 { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
54 { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
55};
56
57static u16 r2057_rev5a_init[45][2] = {
58 { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
59 { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
60 { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
61 { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
62 { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
63 { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
64 { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
65 { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
66 { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
67 { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
68 { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
69 { 0x1C2, 0x80 },
70};
71
72static u16 r2057_rev7_init[54][2] = {
73 { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
74 { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
75 { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
76 { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
77 { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
78 { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
79 { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
80 { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
81 { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
82 { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
83 { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
84 { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
85 { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
86 { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
87};
88
89static u16 r2057_rev8_init[54][2] = {
90 { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
91 { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
92 { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
93 { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
94 { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
95 { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
96 { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
97 { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
98 { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
99 { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
100 { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
101 { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
102 { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
103 { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
104};
105
106void r2057_upload_inittabs(struct b43_wldev *dev)
107{
108 struct b43_phy *phy = &dev->phy;
109 u16 *table = NULL;
110 u16 size, i;
111
112 if (phy->rev == 7) {
113 table = r2057_rev4_init[0];
114 size = ARRAY_SIZE(r2057_rev4_init);
115 } else if (phy->rev == 8 || phy->rev == 9) {
116 if (phy->radio_rev == 5) {
117 if (phy->radio_rev == 8) {
118 table = r2057_rev5_init[0];
119 size = ARRAY_SIZE(r2057_rev5_init);
120 } else {
121 table = r2057_rev5a_init[0];
122 size = ARRAY_SIZE(r2057_rev5a_init);
123 }
124 } else if (phy->radio_rev == 7) {
125 table = r2057_rev7_init[0];
126 size = ARRAY_SIZE(r2057_rev7_init);
127 } else if (phy->radio_rev == 9) {
128 table = r2057_rev8_init[0];
129 size = ARRAY_SIZE(r2057_rev8_init);
130 }
131 }
132
133 if (table) {
134 for (i = 0; i < 10; i++) {
135 pr_info("radio_write 0x%X ", *table);
136 table++;
137 pr_info("0x%X\n", *table);
138 table++;
139 }
140 }
141}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644
index 000000000000..eeebd8fbeb0d
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.h
@@ -0,0 +1,430 @@
1#ifndef B43_RADIO_2057_H_
2#define B43_RADIO_2057_H_
3
4#include <linux/types.h>
5
6#include "tables_nphy.h"
7
8#define R2057_DACBUF_VINCM_CORE0 0x000
9#define R2057_IDCODE 0x001
10#define R2057_RCCAL_MASTER 0x002
11#define R2057_RCCAL_CAP_SIZE 0x003
12#define R2057_RCAL_CONFIG 0x004
13#define R2057_GPAIO_CONFIG 0x005
14#define R2057_GPAIO_SEL1 0x006
15#define R2057_GPAIO_SEL0 0x007
16#define R2057_CLPO_CONFIG 0x008
17#define R2057_BANDGAP_CONFIG 0x009
18#define R2057_BANDGAP_RCAL_TRIM 0x00a
19#define R2057_AFEREG_CONFIG 0x00b
20#define R2057_TEMPSENSE_CONFIG 0x00c
21#define R2057_XTAL_CONFIG1 0x00d
22#define R2057_XTAL_ICORE_SIZE 0x00e
23#define R2057_XTAL_BUF_SIZE 0x00f
24#define R2057_XTAL_PULLCAP_SIZE 0x010
25#define R2057_RFPLL_MASTER 0x011
26#define R2057_VCOMONITOR_VTH_L 0x012
27#define R2057_VCOMONITOR_VTH_H 0x013
28#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT 0x014
29#define R2057_VCO_VARCSIZE_IDAC 0x015
30#define R2057_VCOCAL_COUNTVAL0 0x016
31#define R2057_VCOCAL_COUNTVAL1 0x017
32#define R2057_VCOCAL_INTCLK_COUNT 0x018
33#define R2057_VCOCAL_MASTER 0x019
34#define R2057_VCOCAL_NUMCAPCHANGE 0x01a
35#define R2057_VCOCAL_WINSIZE 0x01b
36#define R2057_VCOCAL_DELAY_AFTER_REFRESH 0x01c
37#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP 0x01d
38#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP 0x01e
39#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP 0x01f
40#define R2057_VCO_FORCECAPEN_FORCECAP1 0x020
41#define R2057_VCO_FORCECAP0 0x021
42#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE 0x022
43#define R2057_RFPLL_PFD_RESET_PW 0x023
44#define R2057_RFPLL_LOOPFILTER_R2 0x024
45#define R2057_RFPLL_LOOPFILTER_R1 0x025
46#define R2057_RFPLL_LOOPFILTER_C3 0x026
47#define R2057_RFPLL_LOOPFILTER_C2 0x027
48#define R2057_RFPLL_LOOPFILTER_C1 0x028
49#define R2057_CP_KPD_IDAC 0x029
50#define R2057_RFPLL_IDACS 0x02a
51#define R2057_RFPLL_MISC_EN 0x02b
52#define R2057_RFPLL_MMD0 0x02c
53#define R2057_RFPLL_MMD1 0x02d
54#define R2057_RFPLL_MISC_CAL_RESETN 0x02e
55#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES 0x02f
56#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE 0x030
57#define R2057_VCOCAL_READCAP0 0x031
58#define R2057_VCOCAL_READCAP1 0x032
59#define R2057_VCOCAL_STATUS 0x033
60#define R2057_LOGEN_PUS 0x034
61#define R2057_LOGEN_PTAT_RESETS 0x035
62#define R2057_VCOBUF_IDACS 0x036
63#define R2057_VCOBUF_TUNE 0x037
64#define R2057_CMOSBUF_TX2GQ_IDACS 0x038
65#define R2057_CMOSBUF_TX2GI_IDACS 0x039
66#define R2057_CMOSBUF_TX5GQ_IDACS 0x03a
67#define R2057_CMOSBUF_TX5GI_IDACS 0x03b
68#define R2057_CMOSBUF_RX2GQ_IDACS 0x03c
69#define R2057_CMOSBUF_RX2GI_IDACS 0x03d
70#define R2057_CMOSBUF_RX5GQ_IDACS 0x03e
71#define R2057_CMOSBUF_RX5GI_IDACS 0x03f
72#define R2057_LOGEN_MX2G_IDACS 0x040
73#define R2057_LOGEN_MX2G_TUNE 0x041
74#define R2057_LOGEN_MX5G_IDACS 0x042
75#define R2057_LOGEN_MX5G_TUNE 0x043
76#define R2057_LOGEN_MX5G_RCCR 0x044
77#define R2057_LOGEN_INDBUF2G_IDAC 0x045
78#define R2057_LOGEN_INDBUF2G_IBOOST 0x046
79#define R2057_LOGEN_INDBUF2G_TUNE 0x047
80#define R2057_LOGEN_INDBUF5G_IDAC 0x048
81#define R2057_LOGEN_INDBUF5G_IBOOST 0x049
82#define R2057_LOGEN_INDBUF5G_TUNE 0x04a
83#define R2057_CMOSBUF_TX_RCCR 0x04b
84#define R2057_CMOSBUF_RX_RCCR 0x04c
85#define R2057_LOGEN_SEL_PKDET 0x04d
86#define R2057_CMOSBUF_SHAREIQ_PTAT 0x04e
87#define R2057_RXTXBIAS_CONFIG_CORE0 0x04f
88#define R2057_TXGM_TXRF_PUS_CORE0 0x050
89#define R2057_TXGM_IDAC_BLEED_CORE0 0x051
90#define R2057_TXGM_GAIN_CORE0 0x056
91#define R2057_TXGM2G_PKDET_PUS_CORE0 0x057
92#define R2057_PAD2G_PTATS_CORE0 0x058
93#define R2057_PAD2G_IDACS_CORE0 0x059
94#define R2057_PAD2G_BOOST_PU_CORE0 0x05a
95#define R2057_PAD2G_CASCV_GAIN_CORE0 0x05b
96#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0 0x05c
97#define R2057_TXMIX2G_LODC_CORE0 0x05d
98#define R2057_PAD2G_TUNE_PUS_CORE0 0x05e
99#define R2057_IPA2G_GAIN_CORE0 0x05f
100#define R2057_TSSI2G_SPARE1_CORE0 0x060
101#define R2057_TSSI2G_SPARE2_CORE0 0x061
102#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0 0x062
103#define R2057_IPA2G_IMAIN_CORE0 0x063
104#define R2057_IPA2G_CASCONV_CORE0 0x064
105#define R2057_IPA2G_CASCOFFV_CORE0 0x065
106#define R2057_IPA2G_BIAS_FILTER_CORE0 0x066
107#define R2057_TX5G_PKDET_CORE0 0x069
108#define R2057_PGA_PTAT_TXGM5G_PU_CORE0 0x06a
109#define R2057_PAD5G_PTATS1_CORE0 0x06b
110#define R2057_PAD5G_CLASS_PTATS2_CORE0 0x06c
111#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0 0x06d
112#define R2057_PAD5G_CASCV_IMAIN_CORE0 0x06e
113#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0 0x06f
114#define R2057_PGA_BOOST_TUNE_CORE0 0x070
115#define R2057_PGA_GAIN_CORE0 0x071
116#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0 0x072
117#define R2057_TXMIX5G_BOOST_TUNE_CORE0 0x073
118#define R2057_PAD5G_TUNE_MISC_PUS_CORE0 0x074
119#define R2057_IPA5G_IAUX_CORE0 0x075
120#define R2057_IPA5G_GAIN_CORE0 0x076
121#define R2057_TSSI5G_SPARE1_CORE0 0x077
122#define R2057_TSSI5G_SPARE2_CORE0 0x078
123#define R2057_IPA5G_CASCOFFV_PU_CORE0 0x079
124#define R2057_IPA5G_PTAT_CORE0 0x07a
125#define R2057_IPA5G_IMAIN_CORE0 0x07b
126#define R2057_IPA5G_CASCONV_CORE0 0x07c
127#define R2057_IPA5G_BIAS_FILTER_CORE0 0x07d
128#define R2057_PAD_BIAS_FILTER_BWS_CORE0 0x080
129#define R2057_TR2G_CONFIG1_CORE0_NU 0x081
130#define R2057_TR2G_CONFIG2_CORE0_NU 0x082
131#define R2057_LNA5G_RFEN_CORE0 0x083
132#define R2057_TR5G_CONFIG2_CORE0_NU 0x084
133#define R2057_RXRFBIAS_IBOOST_PU_CORE0 0x085
134#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0 0x086
135#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0 0x087
136#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0 0x088
137#define R2057_RXMIX_CMFBITAIL_PU_CORE0 0x089
138#define R2057_LNA2_IMAIN_PTAT_PU_CORE0 0x08a
139#define R2057_LNA2_IAUX_PTAT_CORE0 0x08b
140#define R2057_LNA1_IMAIN_PTAT_PU_CORE0 0x08c
141#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0 0x08d
142#define R2057_RXRFBIAS_BANDSEL_CORE0 0x08e
143#define R2057_TIA_CONFIG_CORE0 0x08f
144#define R2057_TIA_IQGAIN_CORE0 0x090
145#define R2057_TIA_IBIAS2_CORE0 0x091
146#define R2057_TIA_IBIAS1_CORE0 0x092
147#define R2057_TIA_SPARE_Q_CORE0 0x093
148#define R2057_TIA_SPARE_I_CORE0 0x094
149#define R2057_RXMIX2G_PUS_CORE0 0x095
150#define R2057_RXMIX2G_VCMREFS_CORE0 0x096
151#define R2057_RXMIX2G_LODC_QI_CORE0 0x097
152#define R2057_W12G_BW_LNA2G_PUS_CORE0 0x098
153#define R2057_LNA2G_GAIN_CORE0 0x099
154#define R2057_LNA2G_TUNE_CORE0 0x09a
155#define R2057_RXMIX5G_PUS_CORE0 0x09b
156#define R2057_RXMIX5G_VCMREFS_CORE0 0x09c
157#define R2057_RXMIX5G_LODC_QI_CORE0 0x09d
158#define R2057_W15G_BW_LNA5G_PUS_CORE0 0x09e
159#define R2057_LNA5G_GAIN_CORE0 0x09f
160#define R2057_LNA5G_TUNE_CORE0 0x0a0
161#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0 0x0a1
162#define R2057_RXBB_BIAS_MASTER_CORE0 0x0a2
163#define R2057_RXBB_VGABUF_IDACS_CORE0 0x0a3
164#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0 0x0a4
165#define R2057_TXBUF_VINCM_CORE0 0x0a5
166#define R2057_TXBUF_IDACS_CORE0 0x0a6
167#define R2057_LPF_RESP_RXBUF_BW_CORE0 0x0a7
168#define R2057_RXBB_CC_CORE0 0x0a8
169#define R2057_RXBB_SPARE3_CORE0 0x0a9
170#define R2057_RXBB_RCCAL_HPC_CORE0 0x0aa
171#define R2057_LPF_IDACS_CORE0 0x0ab
172#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0 0x0ac
173#define R2057_TXBUF_GAIN_CORE0 0x0ad
174#define R2057_AFELOOPBACK_AACI_RESP_CORE0 0x0ae
175#define R2057_RXBUF_DEGEN_CORE0 0x0af
176#define R2057_RXBB_SPARE2_CORE0 0x0b0
177#define R2057_RXBB_SPARE1_CORE0 0x0b1
178#define R2057_RSSI_MASTER_CORE0 0x0b2
179#define R2057_W2_MASTER_CORE0 0x0b3
180#define R2057_NB_MASTER_CORE0 0x0b4
181#define R2057_W2_IDACS0_Q_CORE0 0x0b5
182#define R2057_W2_IDACS1_Q_CORE0 0x0b6
183#define R2057_W2_IDACS0_I_CORE0 0x0b7
184#define R2057_W2_IDACS1_I_CORE0 0x0b8
185#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0 0x0b9
186#define R2057_NB_IDACS_Q_CORE0 0x0ba
187#define R2057_NB_IDACS_I_CORE0 0x0bb
188#define R2057_BACKUP4_CORE0 0x0c1
189#define R2057_BACKUP3_CORE0 0x0c2
190#define R2057_BACKUP2_CORE0 0x0c3
191#define R2057_BACKUP1_CORE0 0x0c4
192#define R2057_SPARE16_CORE0 0x0c5
193#define R2057_SPARE15_CORE0 0x0c6
194#define R2057_SPARE14_CORE0 0x0c7
195#define R2057_SPARE13_CORE0 0x0c8
196#define R2057_SPARE12_CORE0 0x0c9
197#define R2057_SPARE11_CORE0 0x0ca
198#define R2057_TX2G_BIAS_RESETS_CORE0 0x0cb
199#define R2057_TX5G_BIAS_RESETS_CORE0 0x0cc
200#define R2057_IQTEST_SEL_PU 0x0cd
201#define R2057_XTAL_CONFIG2 0x0ce
202#define R2057_BUFS_MISC_LPFBW_CORE0 0x0cf
203#define R2057_TXLPF_RCCAL_CORE0 0x0d0
204#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0 0x0d1
205#define R2057_LPF_GAIN_CORE0 0x0d2
206#define R2057_DACBUF_IDACS_BW_CORE0 0x0d3
207#define R2057_RXTXBIAS_CONFIG_CORE1 0x0d4
208#define R2057_TXGM_TXRF_PUS_CORE1 0x0d5
209#define R2057_TXGM_IDAC_BLEED_CORE1 0x0d6
210#define R2057_TXGM_GAIN_CORE1 0x0db
211#define R2057_TXGM2G_PKDET_PUS_CORE1 0x0dc
212#define R2057_PAD2G_PTATS_CORE1 0x0dd
213#define R2057_PAD2G_IDACS_CORE1 0x0de
214#define R2057_PAD2G_BOOST_PU_CORE1 0x0df
215#define R2057_PAD2G_CASCV_GAIN_CORE1 0x0e0
216#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1 0x0e1
217#define R2057_TXMIX2G_LODC_CORE1 0x0e2
218#define R2057_PAD2G_TUNE_PUS_CORE1 0x0e3
219#define R2057_IPA2G_GAIN_CORE1 0x0e4
220#define R2057_TSSI2G_SPARE1_CORE1 0x0e5
221#define R2057_TSSI2G_SPARE2_CORE1 0x0e6
222#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1 0x0e7
223#define R2057_IPA2G_IMAIN_CORE1 0x0e8
224#define R2057_IPA2G_CASCONV_CORE1 0x0e9
225#define R2057_IPA2G_CASCOFFV_CORE1 0x0ea
226#define R2057_IPA2G_BIAS_FILTER_CORE1 0x0eb
227#define R2057_TX5G_PKDET_CORE1 0x0ee
228#define R2057_PGA_PTAT_TXGM5G_PU_CORE1 0x0ef
229#define R2057_PAD5G_PTATS1_CORE1 0x0f0
230#define R2057_PAD5G_CLASS_PTATS2_CORE1 0x0f1
231#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1 0x0f2
232#define R2057_PAD5G_CASCV_IMAIN_CORE1 0x0f3
233#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1 0x0f4
234#define R2057_PGA_BOOST_TUNE_CORE1 0x0f5
235#define R2057_PGA_GAIN_CORE1 0x0f6
236#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1 0x0f7
237#define R2057_TXMIX5G_BOOST_TUNE_CORE1 0x0f8
238#define R2057_PAD5G_TUNE_MISC_PUS_CORE1 0x0f9
239#define R2057_IPA5G_IAUX_CORE1 0x0fa
240#define R2057_IPA5G_GAIN_CORE1 0x0fb
241#define R2057_TSSI5G_SPARE1_CORE1 0x0fc
242#define R2057_TSSI5G_SPARE2_CORE1 0x0fd
243#define R2057_IPA5G_CASCOFFV_PU_CORE1 0x0fe
244#define R2057_IPA5G_PTAT_CORE1 0x0ff
245#define R2057_IPA5G_IMAIN_CORE1 0x100
246#define R2057_IPA5G_CASCONV_CORE1 0x101
247#define R2057_IPA5G_BIAS_FILTER_CORE1 0x102
248#define R2057_PAD_BIAS_FILTER_BWS_CORE1 0x105
249#define R2057_TR2G_CONFIG1_CORE1_NU 0x106
250#define R2057_TR2G_CONFIG2_CORE1_NU 0x107
251#define R2057_LNA5G_RFEN_CORE1 0x108
252#define R2057_TR5G_CONFIG2_CORE1_NU 0x109
253#define R2057_RXRFBIAS_IBOOST_PU_CORE1 0x10a
254#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1 0x10b
255#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1 0x10c
256#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1 0x10d
257#define R2057_RXMIX_CMFBITAIL_PU_CORE1 0x10e
258#define R2057_LNA2_IMAIN_PTAT_PU_CORE1 0x10f
259#define R2057_LNA2_IAUX_PTAT_CORE1 0x110
260#define R2057_LNA1_IMAIN_PTAT_PU_CORE1 0x111
261#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1 0x112
262#define R2057_RXRFBIAS_BANDSEL_CORE1 0x113
263#define R2057_TIA_CONFIG_CORE1 0x114
264#define R2057_TIA_IQGAIN_CORE1 0x115
265#define R2057_TIA_IBIAS2_CORE1 0x116
266#define R2057_TIA_IBIAS1_CORE1 0x117
267#define R2057_TIA_SPARE_Q_CORE1 0x118
268#define R2057_TIA_SPARE_I_CORE1 0x119
269#define R2057_RXMIX2G_PUS_CORE1 0x11a
270#define R2057_RXMIX2G_VCMREFS_CORE1 0x11b
271#define R2057_RXMIX2G_LODC_QI_CORE1 0x11c
272#define R2057_W12G_BW_LNA2G_PUS_CORE1 0x11d
273#define R2057_LNA2G_GAIN_CORE1 0x11e
274#define R2057_LNA2G_TUNE_CORE1 0x11f
275#define R2057_RXMIX5G_PUS_CORE1 0x120
276#define R2057_RXMIX5G_VCMREFS_CORE1 0x121
277#define R2057_RXMIX5G_LODC_QI_CORE1 0x122
278#define R2057_W15G_BW_LNA5G_PUS_CORE1 0x123
279#define R2057_LNA5G_GAIN_CORE1 0x124
280#define R2057_LNA5G_TUNE_CORE1 0x125
281#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1 0x126
282#define R2057_RXBB_BIAS_MASTER_CORE1 0x127
283#define R2057_RXBB_VGABUF_IDACS_CORE1 0x128
284#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1 0x129
285#define R2057_TXBUF_VINCM_CORE1 0x12a
286#define R2057_TXBUF_IDACS_CORE1 0x12b
287#define R2057_LPF_RESP_RXBUF_BW_CORE1 0x12c
288#define R2057_RXBB_CC_CORE1 0x12d
289#define R2057_RXBB_SPARE3_CORE1 0x12e
290#define R2057_RXBB_RCCAL_HPC_CORE1 0x12f
291#define R2057_LPF_IDACS_CORE1 0x130
292#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1 0x131
293#define R2057_TXBUF_GAIN_CORE1 0x132
294#define R2057_AFELOOPBACK_AACI_RESP_CORE1 0x133
295#define R2057_RXBUF_DEGEN_CORE1 0x134
296#define R2057_RXBB_SPARE2_CORE1 0x135
297#define R2057_RXBB_SPARE1_CORE1 0x136
298#define R2057_RSSI_MASTER_CORE1 0x137
299#define R2057_W2_MASTER_CORE1 0x138
300#define R2057_NB_MASTER_CORE1 0x139
301#define R2057_W2_IDACS0_Q_CORE1 0x13a
302#define R2057_W2_IDACS1_Q_CORE1 0x13b
303#define R2057_W2_IDACS0_I_CORE1 0x13c
304#define R2057_W2_IDACS1_I_CORE1 0x13d
305#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1 0x13e
306#define R2057_NB_IDACS_Q_CORE1 0x13f
307#define R2057_NB_IDACS_I_CORE1 0x140
308#define R2057_BACKUP4_CORE1 0x146
309#define R2057_BACKUP3_CORE1 0x147
310#define R2057_BACKUP2_CORE1 0x148
311#define R2057_BACKUP1_CORE1 0x149
312#define R2057_SPARE16_CORE1 0x14a
313#define R2057_SPARE15_CORE1 0x14b
314#define R2057_SPARE14_CORE1 0x14c
315#define R2057_SPARE13_CORE1 0x14d
316#define R2057_SPARE12_CORE1 0x14e
317#define R2057_SPARE11_CORE1 0x14f
318#define R2057_TX2G_BIAS_RESETS_CORE1 0x150
319#define R2057_TX5G_BIAS_RESETS_CORE1 0x151
320#define R2057_SPARE8_CORE1 0x152
321#define R2057_SPARE7_CORE1 0x153
322#define R2057_BUFS_MISC_LPFBW_CORE1 0x154
323#define R2057_TXLPF_RCCAL_CORE1 0x155
324#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1 0x156
325#define R2057_LPF_GAIN_CORE1 0x157
326#define R2057_DACBUF_IDACS_BW_CORE1 0x158
327#define R2057_DACBUF_VINCM_CORE1 0x159
328#define R2057_RCCAL_START_R1_Q1_P1 0x15a
329#define R2057_RCCAL_X1 0x15b
330#define R2057_RCCAL_TRC0 0x15c
331#define R2057_RCCAL_TRC1 0x15d
332#define R2057_RCCAL_DONE_OSCCAP 0x15e
333#define R2057_RCCAL_N0_0 0x15f
334#define R2057_RCCAL_N0_1 0x160
335#define R2057_RCCAL_N1_0 0x161
336#define R2057_RCCAL_N1_1 0x162
337#define R2057_RCAL_STATUS 0x163
338#define R2057_XTALPUOVR_PINCTRL 0x164
339#define R2057_OVR_REG0 0x165
340#define R2057_OVR_REG1 0x166
341#define R2057_OVR_REG2 0x167
342#define R2057_OVR_REG3 0x168
343#define R2057_OVR_REG4 0x169
344#define R2057_RCCAL_SCAP_VAL 0x16a
345#define R2057_RCCAL_BCAP_VAL 0x16b
346#define R2057_RCCAL_HPC_VAL 0x16c
347#define R2057_RCCAL_OVERRIDES 0x16d
348#define R2057_TX0_IQCAL_GAIN_BW 0x170
349#define R2057_TX0_LOFT_FINE_I 0x171
350#define R2057_TX0_LOFT_FINE_Q 0x172
351#define R2057_TX0_LOFT_COARSE_I 0x173
352#define R2057_TX0_LOFT_COARSE_Q 0x174
353#define R2057_TX0_TX_SSI_MASTER 0x175
354#define R2057_TX0_IQCAL_VCM_HG 0x176
355#define R2057_TX0_IQCAL_IDAC 0x177
356#define R2057_TX0_TSSI_VCM 0x178
357#define R2057_TX0_TX_SSI_MUX 0x179
358#define R2057_TX0_TSSIA 0x17a
359#define R2057_TX0_TSSIG 0x17b
360#define R2057_TX0_TSSI_MISC1 0x17c
361#define R2057_TX0_TXRXCOUPLE_2G_ATTEN 0x17d
362#define R2057_TX0_TXRXCOUPLE_2G_PWRUP 0x17e
363#define R2057_TX0_TXRXCOUPLE_5G_ATTEN 0x17f
364#define R2057_TX0_TXRXCOUPLE_5G_PWRUP 0x180
365#define R2057_TX1_IQCAL_GAIN_BW 0x190
366#define R2057_TX1_LOFT_FINE_I 0x191
367#define R2057_TX1_LOFT_FINE_Q 0x192
368#define R2057_TX1_LOFT_COARSE_I 0x193
369#define R2057_TX1_LOFT_COARSE_Q 0x194
370#define R2057_TX1_TX_SSI_MASTER 0x195
371#define R2057_TX1_IQCAL_VCM_HG 0x196
372#define R2057_TX1_IQCAL_IDAC 0x197
373#define R2057_TX1_TSSI_VCM 0x198
374#define R2057_TX1_TX_SSI_MUX 0x199
375#define R2057_TX1_TSSIA 0x19a
376#define R2057_TX1_TSSIG 0x19b
377#define R2057_TX1_TSSI_MISC1 0x19c
378#define R2057_TX1_TXRXCOUPLE_2G_ATTEN 0x19d
379#define R2057_TX1_TXRXCOUPLE_2G_PWRUP 0x19e
380#define R2057_TX1_TXRXCOUPLE_5G_ATTEN 0x19f
381#define R2057_TX1_TXRXCOUPLE_5G_PWRUP 0x1a0
382#define R2057_AFE_VCM_CAL_MASTER_CORE0 0x1a1
383#define R2057_AFE_SET_VCM_I_CORE0 0x1a2
384#define R2057_AFE_SET_VCM_Q_CORE0 0x1a3
385#define R2057_AFE_STATUS_VCM_IQADC_CORE0 0x1a4
386#define R2057_AFE_STATUS_VCM_I_CORE0 0x1a5
387#define R2057_AFE_STATUS_VCM_Q_CORE0 0x1a6
388#define R2057_AFE_VCM_CAL_MASTER_CORE1 0x1a7
389#define R2057_AFE_SET_VCM_I_CORE1 0x1a8
390#define R2057_AFE_SET_VCM_Q_CORE1 0x1a9
391#define R2057_AFE_STATUS_VCM_IQADC_CORE1 0x1aa
392#define R2057_AFE_STATUS_VCM_I_CORE1 0x1ab
393#define R2057_AFE_STATUS_VCM_Q_CORE1 0x1ac
394
395#define R2057v7_DACBUF_VINCM_CORE0 0x1ad
396#define R2057v7_RCCAL_MASTER 0x1ae
397#define R2057v7_TR2G_CONFIG3_CORE0_NU 0x1af
398#define R2057v7_TR2G_CONFIG3_CORE1_NU 0x1b0
399#define R2057v7_LOGEN_PUS1 0x1b1
400#define R2057v7_OVR_REG5 0x1b2
401#define R2057v7_OVR_REG6 0x1b3
402#define R2057v7_OVR_REG7 0x1b4
403#define R2057v7_OVR_REG8 0x1b5
404#define R2057v7_OVR_REG9 0x1b6
405#define R2057v7_OVR_REG10 0x1b7
406#define R2057v7_OVR_REG11 0x1b8
407#define R2057v7_OVR_REG12 0x1b9
408#define R2057v7_OVR_REG13 0x1ba
409#define R2057v7_OVR_REG14 0x1bb
410#define R2057v7_OVR_REG15 0x1bc
411#define R2057v7_OVR_REG16 0x1bd
412#define R2057v7_OVR_REG1 0x1be
413#define R2057v7_OVR_REG18 0x1bf
414#define R2057v7_OVR_REG19 0x1c0
415#define R2057v7_OVR_REG20 0x1c1
416#define R2057v7_OVR_REG21 0x1c2
417#define R2057v7_OVR_REG2 0x1c3
418#define R2057v7_OVR_REG23 0x1c4
419#define R2057v7_OVR_REG24 0x1c5
420#define R2057v7_OVR_REG25 0x1c6
421#define R2057v7_OVR_REG26 0x1c7
422#define R2057v7_OVR_REG27 0x1c8
423#define R2057v7_OVR_REG28 0x1c9
424#define R2057v7_IQTEST_SEL_PU2 0x1ca
425
426#define R2057_VCM_MASK 0x7
427
428void r2057_upload_inittabs(struct b43_wldev *dev);
429
430#endif /* B43_RADIO_2057_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index f0d8377429c6..97d4e27bf36f 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
2757 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ 2757 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
2758}; 2758};
2759 2759
2760/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2761static const struct nphy_rf_control_override_rev7
2762 tbl_rf_control_override_rev7_over0[] = {
2763 { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
2764 { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
2765 { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
2766 { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
2767 { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
2768 { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
2769 { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
2770 { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
2771 { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
2772 { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
2773 { 0x2000, 0x348, 0x349, 0x000F, 0 },
2774};
2775
2776/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2777static const struct nphy_rf_control_override_rev7
2778 tbl_rf_control_override_rev7_over1[] = {
2779 { 0x0002, 0x340, 0x341, 0x0002, 1 },
2780 { 0x0008, 0x340, 0x341, 0x0008, 3 },
2781 { 0x0020, 0x340, 0x341, 0x0020, 5 },
2782 { 0x0010, 0x340, 0x341, 0x0010, 4 },
2783 { 0x0004, 0x340, 0x341, 0x0004, 2 },
2784 { 0x0080, 0x340, 0x341, 0x0700, 8 },
2785 { 0x0800, 0x340, 0x341, 0x4000, 14 },
2786 { 0x0400, 0x340, 0x341, 0x2000, 13 },
2787 { 0x0200, 0x340, 0x341, 0x0800, 12 },
2788 { 0x0100, 0x340, 0x341, 0x0100, 11 },
2789 { 0x0040, 0x340, 0x341, 0x0040, 6 },
2790 { 0x0001, 0x340, 0x341, 0x0001, 0 },
2791};
2792
2793/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
2794static const struct nphy_rf_control_override_rev7
2795 tbl_rf_control_override_rev7_over2[] = {
2796 { 0x0008, 0x344, 0x345, 0x0008, 3 },
2797 { 0x0002, 0x344, 0x345, 0x0002, 1 },
2798 { 0x0001, 0x344, 0x345, 0x0001, 0 },
2799 { 0x0004, 0x344, 0x345, 0x0004, 2 },
2800 { 0x0010, 0x344, 0x345, 0x0010, 4 },
2801};
2802
2760struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { 2803struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
2761 { 10, 14, 19, 27 }, 2804 { 10, 14, 19, 27 },
2762 { -5, 6, 10, 15 }, 2805 { -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
3248 3291
3249 return e; 3292 return e;
3250} 3293}
3294
3295const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
3296 struct b43_wldev *dev, u16 field, u8 override)
3297{
3298 const struct nphy_rf_control_override_rev7 *e;
3299 u8 size, i;
3300
3301 switch (override) {
3302 case 0:
3303 e = tbl_rf_control_override_rev7_over0;
3304 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
3305 break;
3306 case 1:
3307 e = tbl_rf_control_override_rev7_over1;
3308 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
3309 break;
3310 case 2:
3311 e = tbl_rf_control_override_rev7_over2;
3312 size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
3313 break;
3314 default:
3315 b43err(dev->wl, "Invalid override value %d\n", override);
3316 return NULL;
3317 }
3318
3319 for (i = 0; i < size; i++) {
3320 if (e[i].field == field)
3321 return &e[i];
3322 }
3323
3324 return NULL;
3325}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index f348953c0230..c600700ceedc 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
35 u8 val_addr1; 35 u8 val_addr1;
36}; 36};
37 37
38struct nphy_rf_control_override_rev7 {
39 u16 field;
40 u16 val_addr_core0;
41 u16 val_addr_core1;
42 u16 val_mask;
43 u8 val_shift;
44};
45
38struct nphy_gain_ctl_workaround_entry { 46struct nphy_gain_ctl_workaround_entry {
39 s8 lna1_gain[4]; 47 s8 lna1_gain[4];
40 s8 lna2_gain[4]; 48 s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
202 tbl_rf_control_override_rev2[]; 210 tbl_rf_control_override_rev2[];
203extern const struct nphy_rf_control_override_rev3 211extern const struct nphy_rf_control_override_rev3
204 tbl_rf_control_override_rev3[]; 212 tbl_rf_control_override_rev3[];
213const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
214 struct b43_wldev *dev, u16 field, u8 override);
205 215
206#endif /* B43_TABLES_NPHY_H_ */ 216#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8156135a0590..18e208e3eca1 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
1920 return 0; 1920 return 0;
1921 ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 1921 ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
1922 (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) 1922 (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
1923 & mask) | set); 1923 & ~mask) | set);
1924 1924
1925 return 0; 1925 return 0;
1926} 1926}
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
2492} 2492}
2493 2493
2494static void b43legacy_op_tx(struct ieee80211_hw *hw, 2494static void b43legacy_op_tx(struct ieee80211_hw *hw,
2495 struct ieee80211_tx_control *control,
2495 struct sk_buff *skb) 2496 struct sk_buff *skb)
2496{ 2497{
2497 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2498 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
@@ -3894,6 +3895,8 @@ static void b43legacy_remove(struct ssb_device *dev)
3894 cancel_work_sync(&wl->firmware_load); 3895 cancel_work_sync(&wl->firmware_load);
3895 3896
3896 B43legacy_WARN_ON(!wl); 3897 B43legacy_WARN_ON(!wl);
3898 if (!wldev->fw.ucode)
3899 return; /* NULL if fw never loaded */
3897 if (wl->current_dev == wldev) 3900 if (wl->current_dev == wldev)
3898 ieee80211_unregister_hw(wl->hw); 3901 ieee80211_unregister_hw(wl->hw);
3899 3902
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b480088b3dbe..c9d811eb6556 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,6 +55,14 @@ config BRCMFMAC_USB
55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to 55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
56 use the driver for an USB wireless card. 56 use the driver for an USB wireless card.
57 57
58config BRCMISCAN
59 bool "Broadcom I-Scan (OBSOLETE)"
60 depends on BRCMFMAC
61 ---help---
62 This option enables the I-Scan method. By default fullmac uses the
63 new E-Scan method which uses less memory in firmware and gives no
64 limitation on the number of scan results.
65
58config BRCMDBG 66config BRCMDBG
59 bool "Broadcom driver debug functions" 67 bool "Broadcom driver debug functions"
60 depends on BRCMSMAC || BRCMFMAC 68 depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 8e7e6928c936..3b2c4c20e7fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -185,7 +185,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
185 return err; 185 return err;
186} 186}
187 187
188static int 188int
189brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 189brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
190 void *data, bool write) 190 void *data, bool write)
191{ 191{
@@ -249,7 +249,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
249 int retval; 249 int retval;
250 250
251 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 251 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
252 sdio_claim_host(sdiodev->func[1]);
252 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 253 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
254 sdio_release_host(sdiodev->func[1]);
253 brcmf_dbg(INFO, "data:0x%02x\n", data); 255 brcmf_dbg(INFO, "data:0x%02x\n", data);
254 256
255 if (ret) 257 if (ret)
@@ -264,7 +266,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
264 int retval; 266 int retval;
265 267
266 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 268 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
269 sdio_claim_host(sdiodev->func[1]);
267 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 270 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
271 sdio_release_host(sdiodev->func[1]);
268 brcmf_dbg(INFO, "data:0x%08x\n", data); 272 brcmf_dbg(INFO, "data:0x%08x\n", data);
269 273
270 if (ret) 274 if (ret)
@@ -279,7 +283,9 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
279 int retval; 283 int retval;
280 284
281 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data); 285 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
286 sdio_claim_host(sdiodev->func[1]);
282 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 287 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
288 sdio_release_host(sdiodev->func[1]);
283 289
284 if (ret) 290 if (ret)
285 *ret = retval; 291 *ret = retval;
@@ -291,7 +297,9 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
291 int retval; 297 int retval;
292 298
293 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data); 299 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
300 sdio_claim_host(sdiodev->func[1]);
294 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 301 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
302 sdio_release_host(sdiodev->func[1]);
295 303
296 if (ret) 304 if (ret)
297 *ret = retval; 305 *ret = retval;
@@ -356,15 +364,20 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
356 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 364 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
357 fn, addr, pkt->len); 365 fn, addr, pkt->len);
358 366
367 sdio_claim_host(sdiodev->func[1]);
368
359 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 369 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
360 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 370 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
361 if (err) 371 if (err)
362 return err; 372 goto done;
363 373
364 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 374 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
365 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, 375 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
366 fn, addr, pkt); 376 fn, addr, pkt);
367 377
378done:
379 sdio_release_host(sdiodev->func[1]);
380
368 return err; 381 return err;
369} 382}
370 383
@@ -378,15 +391,20 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
378 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 391 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
379 fn, addr, pktq->qlen); 392 fn, addr, pktq->qlen);
380 393
394 sdio_claim_host(sdiodev->func[1]);
395
381 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 396 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
382 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 397 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
383 if (err) 398 if (err)
384 return err; 399 goto done;
385 400
386 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 401 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
387 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, 402 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
388 pktq); 403 pktq);
389 404
405done:
406 sdio_release_host(sdiodev->func[1]);
407
390 return err; 408 return err;
391} 409}
392 410
@@ -428,10 +446,12 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
428 if (flags & SDIO_REQ_ASYNC) 446 if (flags & SDIO_REQ_ASYNC)
429 return -ENOTSUPP; 447 return -ENOTSUPP;
430 448
449 sdio_claim_host(sdiodev->func[1]);
450
431 if (bar0 != sdiodev->sbwad) { 451 if (bar0 != sdiodev->sbwad) {
432 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); 452 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
433 if (err) 453 if (err)
434 return err; 454 goto done;
435 455
436 sdiodev->sbwad = bar0; 456 sdiodev->sbwad = bar0;
437 } 457 }
@@ -443,8 +463,13 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
443 if (width == 4) 463 if (width == 4)
444 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 464 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
445 465
446 return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, 466 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
447 addr, pkt); 467 addr, pkt);
468
469done:
470 sdio_release_host(sdiodev->func[1]);
471
472 return err;
448} 473}
449 474
450int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, 475int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
@@ -485,8 +510,10 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
485 brcmf_dbg(TRACE, "Enter\n"); 510 brcmf_dbg(TRACE, "Enter\n");
486 511
487 /* issue abort cmd52 command through F0 */ 512 /* issue abort cmd52 command through F0 */
513 sdio_claim_host(sdiodev->func[1]);
488 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0, 514 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
489 SDIO_CCCR_ABORT, &t_func); 515 SDIO_CCCR_ABORT, &t_func);
516 sdio_release_host(sdiodev->func[1]);
490 517
491 brcmf_dbg(TRACE, "Exit\n"); 518 brcmf_dbg(TRACE, "Exit\n");
492 return 0; 519 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 7c4ee72f9d56..c3247d5b3c22 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -42,6 +42,7 @@
42 42
43#define DMA_ALIGN_MASK 0x03 43#define DMA_ALIGN_MASK 0x03
44 44
45#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
45#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 46#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
46#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 47#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
47#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 48#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
@@ -51,6 +52,7 @@
51 52
52/* devices we support, null terminated */ 53/* devices we support, null terminated */
53static const struct sdio_device_id brcmf_sdmmc_ids[] = { 54static const struct sdio_device_id brcmf_sdmmc_ids[] = {
55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 57 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, 58 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
@@ -101,7 +103,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
101 if (regaddr == SDIO_CCCR_IOEx) { 103 if (regaddr == SDIO_CCCR_IOEx) {
102 sdfunc = sdiodev->func[2]; 104 sdfunc = sdiodev->func[2];
103 if (sdfunc) { 105 if (sdfunc) {
104 sdio_claim_host(sdfunc);
105 if (*byte & SDIO_FUNC_ENABLE_2) { 106 if (*byte & SDIO_FUNC_ENABLE_2) {
106 /* Enable Function 2 */ 107 /* Enable Function 2 */
107 err_ret = sdio_enable_func(sdfunc); 108 err_ret = sdio_enable_func(sdfunc);
@@ -117,7 +118,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
117 "Disable F2 failed:%d\n", 118 "Disable F2 failed:%d\n",
118 err_ret); 119 err_ret);
119 } 120 }
120 sdio_release_host(sdfunc);
121 } 121 }
122 } else if ((regaddr == SDIO_CCCR_ABORT) || 122 } else if ((regaddr == SDIO_CCCR_ABORT) ||
123 (regaddr == SDIO_CCCR_IENx)) { 123 (regaddr == SDIO_CCCR_IENx)) {
@@ -126,17 +126,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
126 if (!sdfunc) 126 if (!sdfunc)
127 return -ENOMEM; 127 return -ENOMEM;
128 sdfunc->num = 0; 128 sdfunc->num = 0;
129 sdio_claim_host(sdfunc);
130 sdio_writeb(sdfunc, *byte, regaddr, &err_ret); 129 sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
131 sdio_release_host(sdfunc);
132 kfree(sdfunc); 130 kfree(sdfunc);
133 } else if (regaddr < 0xF0) { 131 } else if (regaddr < 0xF0) {
134 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); 132 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
135 err_ret = -EPERM; 133 err_ret = -EPERM;
136 } else { 134 } else {
137 sdio_claim_host(sdfunc);
138 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret); 135 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
139 sdio_release_host(sdfunc);
140 } 136 }
141 137
142 return err_ret; 138 return err_ret;
@@ -157,7 +153,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
157 /* handle F0 separately */ 153 /* handle F0 separately */
158 err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte); 154 err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
159 } else { 155 } else {
160 sdio_claim_host(sdiodev->func[func]);
161 if (rw) /* CMD52 Write */ 156 if (rw) /* CMD52 Write */
162 sdio_writeb(sdiodev->func[func], *byte, regaddr, 157 sdio_writeb(sdiodev->func[func], *byte, regaddr,
163 &err_ret); 158 &err_ret);
@@ -168,7 +163,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
168 *byte = sdio_readb(sdiodev->func[func], regaddr, 163 *byte = sdio_readb(sdiodev->func[func], regaddr,
169 &err_ret); 164 &err_ret);
170 } 165 }
171 sdio_release_host(sdiodev->func[func]);
172 } 166 }
173 167
174 if (err_ret) 168 if (err_ret)
@@ -195,8 +189,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
195 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait); 189 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
196 if (brcmf_pm_resume_error(sdiodev)) 190 if (brcmf_pm_resume_error(sdiodev))
197 return -EIO; 191 return -EIO;
198 /* Claim host controller */
199 sdio_claim_host(sdiodev->func[func]);
200 192
201 if (rw) { /* CMD52 Write */ 193 if (rw) { /* CMD52 Write */
202 if (nbytes == 4) 194 if (nbytes == 4)
@@ -217,9 +209,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
217 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes); 209 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
218 } 210 }
219 211
220 /* Release host controller */
221 sdio_release_host(sdiodev->func[func]);
222
223 if (err_ret) 212 if (err_ret)
224 brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n", 213 brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
225 rw ? "write" : "read", err_ret); 214 rw ? "write" : "read", err_ret);
@@ -273,9 +262,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
273 if (brcmf_pm_resume_error(sdiodev)) 262 if (brcmf_pm_resume_error(sdiodev))
274 return -EIO; 263 return -EIO;
275 264
276 /* Claim host controller */
277 sdio_claim_host(sdiodev->func[func]);
278
279 skb_queue_walk(pktq, pkt) { 265 skb_queue_walk(pktq, pkt) {
280 uint pkt_len = pkt->len; 266 uint pkt_len = pkt->len;
281 pkt_len += 3; 267 pkt_len += 3;
@@ -298,9 +284,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
298 SGCount++; 284 SGCount++;
299 } 285 }
300 286
301 /* Release host controller */
302 sdio_release_host(sdiodev->func[func]);
303
304 brcmf_dbg(TRACE, "Exit\n"); 287 brcmf_dbg(TRACE, "Exit\n");
305 return err_ret; 288 return err_ret;
306} 289}
@@ -326,9 +309,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
326 if (brcmf_pm_resume_error(sdiodev)) 309 if (brcmf_pm_resume_error(sdiodev))
327 return -EIO; 310 return -EIO;
328 311
329 /* Claim host controller */
330 sdio_claim_host(sdiodev->func[func]);
331
332 pkt_len += 3; 312 pkt_len += 3;
333 pkt_len &= (uint)~3; 313 pkt_len &= (uint)~3;
334 314
@@ -342,9 +322,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
342 write ? "TX" : "RX", pkt, addr, pkt_len); 322 write ? "TX" : "RX", pkt, addr, pkt_len);
343 } 323 }
344 324
345 /* Release host controller */
346 sdio_release_host(sdiodev->func[func]);
347
348 return status; 325 return status;
349} 326}
350 327
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index a11fe54f5950..17e7ae73e008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -27,6 +27,7 @@
27 * IO codes that are interpreted by dongle firmware 27 * IO codes that are interpreted by dongle firmware
28 ******************************************************************************/ 28 ******************************************************************************/
29#define BRCMF_C_UP 2 29#define BRCMF_C_UP 2
30#define BRCMF_C_DOWN 3
30#define BRCMF_C_SET_PROMISC 10 31#define BRCMF_C_SET_PROMISC 10
31#define BRCMF_C_GET_RATE 12 32#define BRCMF_C_GET_RATE 12
32#define BRCMF_C_GET_INFRA 19 33#define BRCMF_C_GET_INFRA 19
@@ -50,7 +51,10 @@
50#define BRCMF_C_REASSOC 53 51#define BRCMF_C_REASSOC 53
51#define BRCMF_C_SET_ROAM_TRIGGER 55 52#define BRCMF_C_SET_ROAM_TRIGGER 55
52#define BRCMF_C_SET_ROAM_DELTA 57 53#define BRCMF_C_SET_ROAM_DELTA 57
54#define BRCMF_C_GET_BCNPRD 75
55#define BRCMF_C_SET_BCNPRD 76
53#define BRCMF_C_GET_DTIMPRD 77 56#define BRCMF_C_GET_DTIMPRD 77
57#define BRCMF_C_SET_DTIMPRD 78
54#define BRCMF_C_SET_COUNTRY 84 58#define BRCMF_C_SET_COUNTRY 84
55#define BRCMF_C_GET_PM 85 59#define BRCMF_C_GET_PM 85
56#define BRCMF_C_SET_PM 86 60#define BRCMF_C_SET_PM 86
@@ -130,6 +134,13 @@
130#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02 134#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
131#define BRCMF_EVENT_MSG_GROUP 0x04 135#define BRCMF_EVENT_MSG_GROUP 0x04
132 136
137#define BRCMF_ESCAN_REQ_VERSION 1
138
139#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
140
141#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
142#define BRCMF_STA_ASSOC 0x10 /* Associated */
143
133struct brcmf_event_msg { 144struct brcmf_event_msg {
134 __be16 version; 145 __be16 version;
135 __be16 flags; 146 __be16 flags;
@@ -140,6 +151,8 @@ struct brcmf_event_msg {
140 __be32 datalen; 151 __be32 datalen;
141 u8 addr[ETH_ALEN]; 152 u8 addr[ETH_ALEN];
142 char ifname[IFNAMSIZ]; 153 char ifname[IFNAMSIZ];
154 u8 ifidx;
155 u8 bsscfgidx;
143} __packed; 156} __packed;
144 157
145struct brcm_ethhdr { 158struct brcm_ethhdr {
@@ -454,6 +467,24 @@ struct brcmf_scan_results_le {
454 __le32 count; 467 __le32 count;
455}; 468};
456 469
470struct brcmf_escan_params_le {
471 __le32 version;
472 __le16 action;
473 __le16 sync_id;
474 struct brcmf_scan_params_le params_le;
475};
476
477struct brcmf_escan_result_le {
478 __le32 buflen;
479 __le32 version;
480 __le16 sync_id;
481 __le16 bss_count;
482 struct brcmf_bss_info_le bss_info_le;
483};
484
485#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
486 sizeof(struct brcmf_bss_info_le))
487
457/* used for association with a specific BSSID and chanspec list */ 488/* used for association with a specific BSSID and chanspec list */
458struct brcmf_assoc_params_le { 489struct brcmf_assoc_params_le {
459 /* 00:00:00:00:00:00: broadcast scan */ 490 /* 00:00:00:00:00:00: broadcast scan */
@@ -542,6 +573,28 @@ struct brcmf_channel_info_le {
542 __le32 scan_channel; 573 __le32 scan_channel;
543}; 574};
544 575
576struct brcmf_sta_info_le {
577 __le16 ver; /* version of this struct */
578 __le16 len; /* length in bytes of this structure */
579 __le16 cap; /* sta's advertised capabilities */
580 __le32 flags; /* flags defined below */
581 __le32 idle; /* time since data pkt rx'd from sta */
582 u8 ea[ETH_ALEN]; /* Station address */
583 __le32 count; /* # rates in this set */
584 u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
585 /* w/hi bit set if basic */
586 __le32 in; /* seconds elapsed since associated */
587 __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
588 __le32 tx_pkts; /* # of packets transmitted */
589 __le32 tx_failures; /* # of packets failed */
590 __le32 rx_ucast_pkts; /* # of unicast packets received */
591 __le32 rx_mcast_pkts; /* # of multicast packets received */
592 __le32 tx_rate; /* Rate of last successful tx frame */
593 __le32 rx_rate; /* Rate of last successful rx frame */
594 __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
595 __le32 rx_decrypt_failures; /* # of packet decrypted failed */
596};
597
545/* Bus independent dongle command */ 598/* Bus independent dongle command */
546struct brcmf_dcmd { 599struct brcmf_dcmd {
547 uint cmd; /* common dongle cmd definition */ 600 uint cmd; /* common dongle cmd definition */
@@ -561,7 +614,7 @@ struct brcmf_pub {
561 /* Linkage ponters */ 614 /* Linkage ponters */
562 struct brcmf_bus *bus_if; 615 struct brcmf_bus *bus_if;
563 struct brcmf_proto *prot; 616 struct brcmf_proto *prot;
564 struct brcmf_cfg80211_dev *config; 617 struct brcmf_cfg80211_info *config;
565 struct device *dev; /* fullmac dongle device pointer */ 618 struct device *dev; /* fullmac dongle device pointer */
566 619
567 /* Internal brcmf items */ 620 /* Internal brcmf items */
@@ -634,10 +687,13 @@ extern const struct bcmevent_name bcmevent_names[];
634 687
635extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, 688extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
636 char *buf, uint len); 689 char *buf, uint len);
690extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
691 char *buf, uint buflen, s32 bssidx);
637 692
638extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 693extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
639 694
640extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); 695extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
696extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
641 697
642/* Return pointer to interface name */ 698/* Return pointer to interface name */
643extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 699extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -657,10 +713,6 @@ extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
657 713
658extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); 714extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
659 715
660/* Send packet to dongle via data channel */
661extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
662 struct sk_buff *pkt);
663
664extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg); 716extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
665extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, 717extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
666 int enable, int master_mode); 718 int enable, int master_mode);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 537f499cc5d2..9b8ee19ea55d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -103,7 +103,7 @@ extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
103extern void brcmf_detach(struct device *dev); 103extern void brcmf_detach(struct device *dev);
104 104
105/* Indication from bus module to change flow-control state */ 105/* Indication from bus module to change flow-control state */
106extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on); 106extern void brcmf_txflowblock(struct device *dev, bool state);
107 107
108/* Notify tx completion */ 108/* Notify tx completion */
109extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, 109extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6f70953f0bad..15c5db5752d1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -80,12 +80,60 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
80 strncpy(buf, name, buflen); 80 strncpy(buf, name, buflen);
81 81
82 /* append data onto the end of the name string */ 82 /* append data onto the end of the name string */
83 memcpy(&buf[len], data, datalen); 83 if (data && datalen) {
84 len += datalen; 84 memcpy(&buf[len], data, datalen);
85 len += datalen;
86 }
85 87
86 return len; 88 return len;
87} 89}
88 90
91uint
92brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
93 char *buf, uint buflen, s32 bssidx)
94{
95 const s8 *prefix = "bsscfg:";
96 s8 *p;
97 u32 prefixlen;
98 u32 namelen;
99 u32 iolen;
100 __le32 bssidx_le;
101
102 if (bssidx == 0)
103 return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
104
105 prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
106 namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
107 iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
108
109 if (buflen < 0 || iolen > (u32)buflen) {
110 brcmf_dbg(ERROR, "buffer is too short\n");
111 return 0;
112 }
113
114 p = buf;
115
116 /* copy prefix, no null */
117 memcpy(p, prefix, prefixlen);
118 p += prefixlen;
119
120 /* copy iovar name including null */
121 memcpy(p, name, namelen);
122 p += namelen;
123
124 /* bss config index as first data */
125 bssidx_le = cpu_to_le32(bssidx);
126 memcpy(p, &bssidx_le, sizeof(bssidx_le));
127 p += sizeof(bssidx_le);
128
129 /* parameter buffer follows */
130 if (datalen)
131 memcpy(p, data, datalen);
132
133 return iolen;
134
135}
136
89bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 137bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
90 struct sk_buff *pkt, int prec) 138 struct sk_buff *pkt, int prec)
91{ 139{
@@ -205,7 +253,8 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
205 BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, { 253 BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
206 BRCMF_E_IF, "IF"}, { 254 BRCMF_E_IF, "IF"}, {
207 BRCMF_E_RSSI, "RSSI"}, { 255 BRCMF_E_RSSI, "RSSI"}, {
208 BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} 256 BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
257 BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
209 }; 258 };
210 uint event_type, flags, auth_type, datalen; 259 uint event_type, flags, auth_type, datalen;
211 static u32 seqnum_prev; 260 static u32 seqnum_prev;
@@ -350,6 +399,11 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
350 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); 399 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
351 break; 400 break;
352 401
402 case BRCMF_E_ESCAN_RESULT:
403 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
404 datalen = 0;
405 break;
406
353 case BRCMF_E_PFN_NET_FOUND: 407 case BRCMF_E_PFN_NET_FOUND:
354 case BRCMF_E_PFN_NET_LOST: 408 case BRCMF_E_PFN_NET_LOST:
355 case BRCMF_E_PFN_SCAN_COMPLETE: 409 case BRCMF_E_PFN_SCAN_COMPLETE:
@@ -425,13 +479,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
425 } 479 }
426 480
427 /* show any appended data */ 481 /* show any appended data */
428 if (datalen) { 482 brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
429 buf = (unsigned char *) event_data;
430 brcmf_dbg(EVENT, " data (%d) : ", datalen);
431 for (i = 0; i < datalen; i++)
432 brcmf_dbg(EVENT, " 0x%02x ", *buf++);
433 brcmf_dbg(EVENT, "\n");
434 }
435} 483}
436#endif /* DEBUG */ 484#endif /* DEBUG */
437 485
@@ -522,8 +570,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
522 } 570 }
523 571
524#ifdef DEBUG 572#ifdef DEBUG
525 brcmf_c_show_host_event(event, event_data); 573 if (BRCMF_EVENT_ON())
526#endif /* DEBUG */ 574 brcmf_c_show_host_event(event, event_data);
575#endif /* DEBUG */
527 576
528 return 0; 577 return 0;
529} 578}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index b784920532d3..fb508c2256dd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -55,6 +55,7 @@ do { \
55#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL) 55#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
56#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL) 56#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
57#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL) 57#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
58#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
58 59
59#else /* (defined DEBUG) || (defined DEBUG) */ 60#else /* (defined DEBUG) || (defined DEBUG) */
60 61
@@ -65,6 +66,7 @@ do { \
65#define BRCMF_HDRS_ON() 0 66#define BRCMF_HDRS_ON() 0
66#define BRCMF_BYTES_ON() 0 67#define BRCMF_BYTES_ON() 0
67#define BRCMF_GLOM_ON() 0 68#define BRCMF_GLOM_ON() 0
69#define BRCMF_EVENT_ON() 0
68 70
69#endif /* defined(DEBUG) */ 71#endif /* defined(DEBUG) */
70 72
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 9ab24528f9b9..d7c76ce9d8cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -272,30 +272,6 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
272 schedule_work(&drvr->multicast_work); 272 schedule_work(&drvr->multicast_work);
273} 273}
274 274
275int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
276{
277 /* Reject if down */
278 if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
279 return -ENODEV;
280
281 /* Update multicast statistic */
282 if (pktbuf->len >= ETH_ALEN) {
283 u8 *pktdata = (u8 *) (pktbuf->data);
284 struct ethhdr *eh = (struct ethhdr *)pktdata;
285
286 if (is_multicast_ether_addr(eh->h_dest))
287 drvr->tx_multicast++;
288 if (ntohs(eh->h_proto) == ETH_P_PAE)
289 atomic_inc(&drvr->pend_8021x_cnt);
290 }
291
292 /* If the protocol uses a data header, apply it */
293 brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
294
295 /* Use bus module to send data frame */
296 return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
297}
298
299static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) 275static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
300{ 276{
301 int ret; 277 int ret;
@@ -338,7 +314,22 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
338 } 314 }
339 } 315 }
340 316
341 ret = brcmf_sendpkt(drvr, ifp->idx, skb); 317 /* Update multicast statistic */
318 if (skb->len >= ETH_ALEN) {
319 u8 *pktdata = (u8 *)(skb->data);
320 struct ethhdr *eh = (struct ethhdr *)pktdata;
321
322 if (is_multicast_ether_addr(eh->h_dest))
323 drvr->tx_multicast++;
324 if (ntohs(eh->h_proto) == ETH_P_PAE)
325 atomic_inc(&drvr->pend_8021x_cnt);
326 }
327
328 /* If the protocol uses a data header, apply it */
329 brcmf_proto_hdrpush(drvr, ifp->idx, skb);
330
331 /* Use bus module to send data frame */
332 ret = drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
342 333
343done: 334done:
344 if (ret) 335 if (ret)
@@ -350,19 +341,23 @@ done:
350 return 0; 341 return 0;
351} 342}
352 343
353void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state) 344void brcmf_txflowblock(struct device *dev, bool state)
354{ 345{
355 struct net_device *ndev; 346 struct net_device *ndev;
356 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 347 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
357 struct brcmf_pub *drvr = bus_if->drvr; 348 struct brcmf_pub *drvr = bus_if->drvr;
349 int i;
358 350
359 brcmf_dbg(TRACE, "Enter\n"); 351 brcmf_dbg(TRACE, "Enter\n");
360 352
361 ndev = drvr->iflist[ifidx]->ndev; 353 for (i = 0; i < BRCMF_MAX_IFS; i++)
362 if (state == ON) 354 if (drvr->iflist[i]) {
363 netif_stop_queue(ndev); 355 ndev = drvr->iflist[i]->ndev;
364 else 356 if (state)
365 netif_wake_queue(ndev); 357 netif_stop_queue(ndev);
358 else
359 netif_wake_queue(ndev);
360 }
366} 361}
367 362
368static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx, 363static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
@@ -775,6 +770,14 @@ done:
775 return err; 770 return err;
776} 771}
777 772
773int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
774{
775 brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
776 dcmd->cmd, dcmd->buf, dcmd->len);
777
778 return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
779}
780
778static int brcmf_netdev_stop(struct net_device *ndev) 781static int brcmf_netdev_stop(struct net_device *ndev)
779{ 782{
780 struct brcmf_if *ifp = netdev_priv(ndev); 783 struct brcmf_if *ifp = netdev_priv(ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 472f2ef5c652..3564686add9a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -482,6 +482,15 @@ struct sdpcm_shared_le {
482 __le32 brpt_addr; 482 __le32 brpt_addr;
483}; 483};
484 484
485/* SDIO read frame info */
486struct brcmf_sdio_read {
487 u8 seq_num;
488 u8 channel;
489 u16 len;
490 u16 len_left;
491 u16 len_nxtfrm;
492 u8 dat_offset;
493};
485 494
486/* misc chip info needed by some of the routines */ 495/* misc chip info needed by some of the routines */
487/* Private data for SDIO bus interaction */ 496/* Private data for SDIO bus interaction */
@@ -494,9 +503,8 @@ struct brcmf_sdio {
494 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */ 503 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
495 504
496 u32 hostintmask; /* Copy of Host Interrupt Mask */ 505 u32 hostintmask; /* Copy of Host Interrupt Mask */
497 u32 intstatus; /* Intstatus bits (events) pending */ 506 atomic_t intstatus; /* Intstatus bits (events) pending */
498 bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ 507 atomic_t fcstate; /* State of dongle flow-control */
499 bool fcstate; /* State of dongle flow-control */
500 508
501 uint blocksize; /* Block size of SDIO transfers */ 509 uint blocksize; /* Block size of SDIO transfers */
502 uint roundup; /* Max roundup limit */ 510 uint roundup; /* Max roundup limit */
@@ -508,9 +516,11 @@ struct brcmf_sdio {
508 516
509 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN]; 517 u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
510 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ 518 u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
511 u16 nextlen; /* Next Read Len from last header */
512 u8 rx_seq; /* Receive sequence number (expected) */ 519 u8 rx_seq; /* Receive sequence number (expected) */
520 struct brcmf_sdio_read cur_read;
521 /* info of current read frame */
513 bool rxskip; /* Skip receive (awaiting NAK ACK) */ 522 bool rxskip; /* Skip receive (awaiting NAK ACK) */
523 bool rxpending; /* Data frame pending in dongle */
514 524
515 uint rxbound; /* Rx frames to read before resched */ 525 uint rxbound; /* Rx frames to read before resched */
516 uint txbound; /* Tx frames to send before resched */ 526 uint txbound; /* Tx frames to send before resched */
@@ -531,7 +541,7 @@ struct brcmf_sdio {
531 541
532 bool intr; /* Use interrupts */ 542 bool intr; /* Use interrupts */
533 bool poll; /* Use polling */ 543 bool poll; /* Use polling */
534 bool ipend; /* Device interrupt is pending */ 544 atomic_t ipend; /* Device interrupt is pending */
535 uint spurious; /* Count of spurious interrupts */ 545 uint spurious; /* Count of spurious interrupts */
536 uint pollrate; /* Ticks between device polls */ 546 uint pollrate; /* Ticks between device polls */
537 uint polltick; /* Tick counter */ 547 uint polltick; /* Tick counter */
@@ -549,12 +559,9 @@ struct brcmf_sdio {
549 s32 idleclock; /* How to set bus driver when idle */ 559 s32 idleclock; /* How to set bus driver when idle */
550 s32 sd_rxchain; 560 s32 sd_rxchain;
551 bool use_rxchain; /* If brcmf should use PKT chains */ 561 bool use_rxchain; /* If brcmf should use PKT chains */
552 bool sleeping; /* Is SDIO bus sleeping? */
553 bool rxflow_mode; /* Rx flow control mode */ 562 bool rxflow_mode; /* Rx flow control mode */
554 bool rxflow; /* Is rx flow control on */ 563 bool rxflow; /* Is rx flow control on */
555 bool alp_only; /* Don't use HT clock (ALP only) */ 564 bool alp_only; /* Don't use HT clock (ALP only) */
556/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
557 bool usebufpool;
558 565
559 u8 *ctrl_frame_buf; 566 u8 *ctrl_frame_buf;
560 u32 ctrl_frame_len; 567 u32 ctrl_frame_len;
@@ -570,8 +577,8 @@ struct brcmf_sdio {
570 bool wd_timer_valid; 577 bool wd_timer_valid;
571 uint save_ms; 578 uint save_ms;
572 579
573 struct task_struct *dpc_tsk; 580 struct workqueue_struct *brcmf_wq;
574 struct completion dpc_wait; 581 struct work_struct datawork;
575 struct list_head dpc_tsklst; 582 struct list_head dpc_tsklst;
576 spinlock_t dpc_tl_lock; 583 spinlock_t dpc_tl_lock;
577 584
@@ -657,15 +664,6 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
657 664
658#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) 665#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
659 666
660/* Packet free applicable unconditionally for sdio and sdspi.
661 * Conditional if bufpool was present for gspi bus.
662 */
663static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
664{
665 if (bus->usebufpool)
666 brcmu_pkt_buf_free_skb(pkt);
667}
668
669/* Turn backplane clock on or off */ 667/* Turn backplane clock on or off */
670static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok) 668static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
671{ 669{
@@ -853,81 +851,6 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
853 return 0; 851 return 0;
854} 852}
855 853
856static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
857{
858 int ret;
859
860 brcmf_dbg(INFO, "request %s (currently %s)\n",
861 sleep ? "SLEEP" : "WAKE",
862 bus->sleeping ? "SLEEP" : "WAKE");
863
864 /* Done if we're already in the requested state */
865 if (sleep == bus->sleeping)
866 return 0;
867
868 /* Going to sleep: set the alarm and turn off the lights... */
869 if (sleep) {
870 /* Don't sleep if something is pending */
871 if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
872 return -EBUSY;
873
874 /* Make sure the controller has the bus up */
875 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
876
877 /* Tell device to start using OOB wakeup */
878 ret = w_sdreg32(bus, SMB_USE_OOB,
879 offsetof(struct sdpcmd_regs, tosbmailbox));
880 if (ret != 0)
881 brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
882
883 /* Turn off our contribution to the HT clock request */
884 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
885
886 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
887 SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
888
889 /* Isolate the bus */
890 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
891 SBSDIO_DEVCTL_PADS_ISO, NULL);
892
893 /* Change state */
894 bus->sleeping = true;
895
896 } else {
897 /* Waking up: bus power up is ok, set local state */
898
899 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
900 0, NULL);
901
902 /* Make sure the controller has the bus up */
903 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
904
905 /* Send misc interrupt to indicate OOB not needed */
906 ret = w_sdreg32(bus, 0,
907 offsetof(struct sdpcmd_regs, tosbmailboxdata));
908 if (ret == 0)
909 ret = w_sdreg32(bus, SMB_DEV_INT,
910 offsetof(struct sdpcmd_regs, tosbmailbox));
911
912 if (ret != 0)
913 brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
914
915 /* Make sure we have SD bus access */
916 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
917
918 /* Change state */
919 bus->sleeping = false;
920 }
921
922 return 0;
923}
924
925static void bus_wake(struct brcmf_sdio *bus)
926{
927 if (bus->sleeping)
928 brcmf_sdbrcm_bussleep(bus, false);
929}
930
931static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) 854static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
932{ 855{
933 u32 intstatus = 0; 856 u32 intstatus = 0;
@@ -1056,7 +979,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1056 } 979 }
1057 980
1058 /* Clear partial in any case */ 981 /* Clear partial in any case */
1059 bus->nextlen = 0; 982 bus->cur_read.len = 0;
1060 983
1061 /* If we can't reach the device, signal failure */ 984 /* If we can't reach the device, signal failure */
1062 if (err) 985 if (err)
@@ -1108,6 +1031,96 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1108 } 1031 }
1109} 1032}
1110 1033
1034static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1035 struct brcmf_sdio_read *rd)
1036{
1037 u16 len, checksum;
1038 u8 rx_seq, fc, tx_seq_max;
1039
1040 /*
1041 * 4 bytes hardware header (frame tag)
1042 * Byte 0~1: Frame length
1043 * Byte 2~3: Checksum, bit-wise inverse of frame length
1044 */
1045 len = get_unaligned_le16(header);
1046 checksum = get_unaligned_le16(header + sizeof(u16));
1047 /* All zero means no more to read */
1048 if (!(len | checksum)) {
1049 bus->rxpending = false;
1050 return false;
1051 }
1052 if ((u16)(~(len ^ checksum))) {
1053 brcmf_dbg(ERROR, "HW header checksum error\n");
1054 bus->sdcnt.rx_badhdr++;
1055 brcmf_sdbrcm_rxfail(bus, false, false);
1056 return false;
1057 }
1058 if (len < SDPCM_HDRLEN) {
1059 brcmf_dbg(ERROR, "HW header length error\n");
1060 return false;
1061 }
1062 rd->len = len;
1063
1064 /*
1065 * 8 bytes hardware header
1066 * Byte 0: Rx sequence number
1067 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1068 * Byte 2: Length of next data frame
1069 * Byte 3: Data offset
1070 * Byte 4: Flow control bits
1071 * Byte 5: Maximum Sequence number allow for Tx
1072 * Byte 6~7: Reserved
1073 */
1074 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1075 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1076 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
1077 brcmf_dbg(ERROR, "HW header length too long\n");
1078 bus->sdiodev->bus_if->dstats.rx_errors++;
1079 bus->sdcnt.rx_toolong++;
1080 brcmf_sdbrcm_rxfail(bus, false, false);
1081 rd->len = 0;
1082 return false;
1083 }
1084 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1085 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1086 brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
1087 bus->sdcnt.rx_badhdr++;
1088 brcmf_sdbrcm_rxfail(bus, false, false);
1089 rd->len = 0;
1090 return false;
1091 }
1092 if (rd->seq_num != rx_seq) {
1093 brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
1094 rx_seq, rd->seq_num);
1095 bus->sdcnt.rx_badseq++;
1096 rd->seq_num = rx_seq;
1097 }
1098 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1099 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1100 /* only warm for NON glom packet */
1101 if (rd->channel != SDPCM_GLOM_CHANNEL)
1102 brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
1103 rd->len_nxtfrm = 0;
1104 }
1105 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1106 if (bus->flowcontrol != fc) {
1107 if (~bus->flowcontrol & fc)
1108 bus->sdcnt.fc_xoff++;
1109 if (bus->flowcontrol & ~fc)
1110 bus->sdcnt.fc_xon++;
1111 bus->sdcnt.fc_rcvd++;
1112 bus->flowcontrol = fc;
1113 }
1114 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1115 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1116 brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
1117 tx_seq_max = bus->tx_seq + 2;
1118 }
1119 bus->tx_max = tx_seq_max;
1120
1121 return true;
1122}
1123
1111static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1124static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1112{ 1125{
1113 u16 dlen, totlen; 1126 u16 dlen, totlen;
@@ -1122,6 +1135,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1122 1135
1123 int ifidx = 0; 1136 int ifidx = 0;
1124 bool usechain = bus->use_rxchain; 1137 bool usechain = bus->use_rxchain;
1138 u16 next_len;
1125 1139
1126 /* If packets, issue read(s) and send up packet chain */ 1140 /* If packets, issue read(s) and send up packet chain */
1127 /* Return sequence numbers consumed? */ 1141 /* Return sequence numbers consumed? */
@@ -1185,10 +1199,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1185 if (pnext) { 1199 if (pnext) {
1186 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n", 1200 brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1187 totlen, num); 1201 totlen, num);
1188 if (BRCMF_GLOM_ON() && bus->nextlen && 1202 if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1189 totlen != bus->nextlen) { 1203 totlen != bus->cur_read.len) {
1190 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", 1204 brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1191 bus->nextlen, totlen, rxseq); 1205 bus->cur_read.len, totlen, rxseq);
1192 } 1206 }
1193 pfirst = pnext = NULL; 1207 pfirst = pnext = NULL;
1194 } else { 1208 } else {
@@ -1199,7 +1213,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1199 /* Done with descriptor packet */ 1213 /* Done with descriptor packet */
1200 brcmu_pkt_buf_free_skb(bus->glomd); 1214 brcmu_pkt_buf_free_skb(bus->glomd);
1201 bus->glomd = NULL; 1215 bus->glomd = NULL;
1202 bus->nextlen = 0; 1216 bus->cur_read.len = 0;
1203 } 1217 }
1204 1218
1205 /* Ok -- either we just generated a packet chain, 1219 /* Ok -- either we just generated a packet chain,
@@ -1272,12 +1286,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1272 1286
1273 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); 1287 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
1274 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); 1288 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
1275 bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; 1289 next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1276 if ((bus->nextlen << 4) > MAX_RX_DATASZ) { 1290 if ((next_len << 4) > MAX_RX_DATASZ) {
1277 brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n", 1291 brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
1278 bus->nextlen, seq); 1292 next_len, seq);
1279 bus->nextlen = 0; 1293 next_len = 0;
1280 } 1294 }
1295 bus->cur_read.len = next_len << 4;
1281 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1296 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1282 txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1297 txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1283 1298
@@ -1378,7 +1393,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1378 bus->sdcnt.rxglomfail++; 1393 bus->sdcnt.rxglomfail++;
1379 brcmf_sdbrcm_free_glom(bus); 1394 brcmf_sdbrcm_free_glom(bus);
1380 } 1395 }
1381 bus->nextlen = 0; 1396 bus->cur_read.len = 0;
1382 return 0; 1397 return 0;
1383 } 1398 }
1384 1399
@@ -1573,422 +1588,166 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1573 } 1588 }
1574} 1589}
1575 1590
1576static void 1591static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1577brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1578 struct sk_buff **pkt, u8 **rxbuf)
1579{ 1592{
1580 int sdret; /* Return code from calls */
1581
1582 *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
1583 if (*pkt == NULL)
1584 return;
1585
1586 pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
1587 *rxbuf = (u8 *) ((*pkt)->data);
1588 /* Read the entire frame */
1589 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1590 SDIO_FUNC_2, F2SYNC, *pkt);
1591 bus->sdcnt.f2rxdata++;
1592
1593 if (sdret < 0) {
1594 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
1595 rdlen, sdret);
1596 brcmu_pkt_buf_free_skb(*pkt);
1597 bus->sdiodev->bus_if->dstats.rx_errors++;
1598 /* Force retry w/normal header read.
1599 * Don't attempt NAK for
1600 * gSPI
1601 */
1602 brcmf_sdbrcm_rxfail(bus, true, true);
1603 *pkt = NULL;
1604 }
1605}
1606
1607/* Checks the header */
1608static int
1609brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1610 u8 rxseq, u16 nextlen, u16 *len)
1611{
1612 u16 check;
1613 bool len_consistent; /* Result of comparing readahead len and
1614 len from hw-hdr */
1615
1616 memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
1617
1618 /* Extract hardware header fields */
1619 *len = get_unaligned_le16(bus->rxhdr);
1620 check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
1621
1622 /* All zeros means readahead info was bad */
1623 if (!(*len | check)) {
1624 brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
1625 goto fail;
1626 }
1627
1628 /* Validate check bytes */
1629 if ((u16)~(*len ^ check)) {
1630 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
1631 nextlen, *len, check);
1632 bus->sdcnt.rx_badhdr++;
1633 brcmf_sdbrcm_rxfail(bus, false, false);
1634 goto fail;
1635 }
1636
1637 /* Validate frame length */
1638 if (*len < SDPCM_HDRLEN) {
1639 brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
1640 *len);
1641 goto fail;
1642 }
1643
1644 /* Check for consistency with readahead info */
1645 len_consistent = (nextlen != (roundup(*len, 16) >> 4));
1646 if (len_consistent) {
1647 /* Mismatch, force retry w/normal
1648 header (may be >4K) */
1649 brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
1650 nextlen, *len, roundup(*len, 16),
1651 rxseq);
1652 brcmf_sdbrcm_rxfail(bus, true, true);
1653 goto fail;
1654 }
1655
1656 return 0;
1657
1658fail:
1659 brcmf_sdbrcm_pktfree2(bus, pkt);
1660 return -EINVAL;
1661}
1662
1663/* Return true if there may be more frames to read */
1664static uint
1665brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1666{
1667 u16 len, check; /* Extracted hardware header fields */
1668 u8 chan, seq, doff; /* Extracted software header fields */
1669 u8 fcbits; /* Extracted fcbits from software header */
1670
1671 struct sk_buff *pkt; /* Packet for event or data frames */ 1593 struct sk_buff *pkt; /* Packet for event or data frames */
1672 u16 pad; /* Number of pad bytes to read */ 1594 u16 pad; /* Number of pad bytes to read */
1673 u16 rdlen; /* Total number of bytes to read */
1674 u8 rxseq; /* Next sequence number to expect */
1675 uint rxleft = 0; /* Remaining number of frames allowed */ 1595 uint rxleft = 0; /* Remaining number of frames allowed */
1676 int sdret; /* Return code from calls */ 1596 int sdret; /* Return code from calls */
1677 u8 txmax; /* Maximum tx sequence offered */
1678 u8 *rxbuf;
1679 int ifidx = 0; 1597 int ifidx = 0;
1680 uint rxcount = 0; /* Total frames read */ 1598 uint rxcount = 0; /* Total frames read */
1599 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1600 u8 head_read = 0;
1681 1601
1682 brcmf_dbg(TRACE, "Enter\n"); 1602 brcmf_dbg(TRACE, "Enter\n");
1683 1603
1684 /* Not finished unless we encounter no more frames indication */ 1604 /* Not finished unless we encounter no more frames indication */
1685 *finished = false; 1605 bus->rxpending = true;
1686 1606
1687 for (rxseq = bus->rx_seq, rxleft = maxframes; 1607 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1688 !bus->rxskip && rxleft && 1608 !bus->rxskip && rxleft &&
1689 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN; 1609 bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
1690 rxseq++, rxleft--) { 1610 rd->seq_num++, rxleft--) {
1691 1611
1692 /* Handle glomming separately */ 1612 /* Handle glomming separately */
1693 if (bus->glomd || !skb_queue_empty(&bus->glom)) { 1613 if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1694 u8 cnt; 1614 u8 cnt;
1695 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", 1615 brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1696 bus->glomd, skb_peek(&bus->glom)); 1616 bus->glomd, skb_peek(&bus->glom));
1697 cnt = brcmf_sdbrcm_rxglom(bus, rxseq); 1617 cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
1698 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt); 1618 brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1699 rxseq += cnt - 1; 1619 rd->seq_num += cnt - 1;
1700 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; 1620 rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1701 continue; 1621 continue;
1702 } 1622 }
1703 1623
1704 /* Try doing single read if we can */ 1624 rd->len_left = rd->len;
1705 if (bus->nextlen) { 1625 /* read header first for unknow frame length */
1706 u16 nextlen = bus->nextlen; 1626 if (!rd->len) {
1707 bus->nextlen = 0; 1627 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1708 1628 bus->sdiodev->sbwad,
1709 rdlen = len = nextlen << 4; 1629 SDIO_FUNC_2, F2SYNC,
1710 brcmf_pad(bus, &pad, &rdlen); 1630 bus->rxhdr,
1711 1631 BRCMF_FIRSTREAD);
1712 /* 1632 bus->sdcnt.f2rxhdrs++;
1713 * After the frame is received we have to 1633 if (sdret < 0) {
1714 * distinguish whether it is data 1634 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
1715 * or non-data frame. 1635 sdret);
1716 */ 1636 bus->sdcnt.rx_hdrfail++;
1717 brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf); 1637 brcmf_sdbrcm_rxfail(bus, true, true);
1718 if (pkt == NULL) {
1719 /* Give up on data, request rtx of events */
1720 brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
1721 len, rdlen, rxseq);
1722 continue;
1723 }
1724
1725 if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
1726 &len) < 0)
1727 continue; 1638 continue;
1728
1729 /* Extract software header fields */
1730 chan = SDPCM_PACKET_CHANNEL(
1731 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1732 seq = SDPCM_PACKET_SEQUENCE(
1733 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1734 doff = SDPCM_DOFFSET_VALUE(
1735 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1736 txmax = SDPCM_WINDOW_VALUE(
1737 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1738
1739 bus->nextlen =
1740 bus->rxhdr[SDPCM_FRAMETAG_LEN +
1741 SDPCM_NEXTLEN_OFFSET];
1742 if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
1743 brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
1744 bus->nextlen, seq);
1745 bus->nextlen = 0;
1746 } 1639 }
1747 1640
1748 bus->sdcnt.rx_readahead_cnt++; 1641 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1749
1750 /* Handle Flow Control */
1751 fcbits = SDPCM_FCMASK_VALUE(
1752 &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1753
1754 if (bus->flowcontrol != fcbits) {
1755 if (~bus->flowcontrol & fcbits)
1756 bus->sdcnt.fc_xoff++;
1757
1758 if (bus->flowcontrol & ~fcbits)
1759 bus->sdcnt.fc_xon++;
1760
1761 bus->sdcnt.fc_rcvd++;
1762 bus->flowcontrol = fcbits;
1763 }
1764
1765 /* Check and update sequence number */
1766 if (rxseq != seq) {
1767 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
1768 seq, rxseq);
1769 bus->sdcnt.rx_badseq++;
1770 rxseq = seq;
1771 }
1772
1773 /* Check window for sanity */
1774 if ((u8) (txmax - bus->tx_seq) > 0x40) {
1775 brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
1776 txmax, bus->tx_seq);
1777 txmax = bus->tx_seq + 2;
1778 }
1779 bus->tx_max = txmax;
1780
1781 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1782 rxbuf, len, "Rx Data:\n");
1783 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1784 BRCMF_DATA_ON()) &&
1785 BRCMF_HDRS_ON(),
1786 bus->rxhdr, SDPCM_HDRLEN, 1642 bus->rxhdr, SDPCM_HDRLEN,
1787 "RxHdr:\n"); 1643 "RxHdr:\n");
1788 1644
1789 if (chan == SDPCM_CONTROL_CHANNEL) { 1645 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
1790 brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n", 1646 if (!bus->rxpending)
1791 seq); 1647 break;
1792 /* Force retry w/normal header read */ 1648 else
1793 bus->nextlen = 0; 1649 continue;
1794 brcmf_sdbrcm_rxfail(bus, false, true);
1795 brcmf_sdbrcm_pktfree2(bus, pkt);
1796 continue;
1797 } 1650 }
1798 1651
1799 /* Validate data offset */ 1652 if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1800 if ((doff < SDPCM_HDRLEN) || (doff > len)) { 1653 brcmf_sdbrcm_read_control(bus, bus->rxhdr,
1801 brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n", 1654 rd->len,
1802 doff, len, SDPCM_HDRLEN); 1655 rd->dat_offset);
1803 brcmf_sdbrcm_rxfail(bus, false, false); 1656 /* prepare the descriptor for the next read */
1804 brcmf_sdbrcm_pktfree2(bus, pkt); 1657 rd->len = rd->len_nxtfrm << 4;
1658 rd->len_nxtfrm = 0;
1659 /* treat all packet as event if we don't know */
1660 rd->channel = SDPCM_EVENT_CHANNEL;
1805 continue; 1661 continue;
1806 } 1662 }
1807 1663 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1808 /* All done with this one -- now deliver the packet */ 1664 rd->len - BRCMF_FIRSTREAD : 0;
1809 goto deliver; 1665 head_read = BRCMF_FIRSTREAD;
1810 }
1811
1812 /* Read frame header (hardware and software) */
1813 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1814 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1815 BRCMF_FIRSTREAD);
1816 bus->sdcnt.f2rxhdrs++;
1817
1818 if (sdret < 0) {
1819 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
1820 bus->sdcnt.rx_hdrfail++;
1821 brcmf_sdbrcm_rxfail(bus, true, true);
1822 continue;
1823 }
1824 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1825 bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
1826
1827
1828 /* Extract hardware header fields */
1829 len = get_unaligned_le16(bus->rxhdr);
1830 check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
1831
1832 /* All zeros means no more frames */
1833 if (!(len | check)) {
1834 *finished = true;
1835 break;
1836 }
1837
1838 /* Validate check bytes */
1839 if ((u16) ~(len ^ check)) {
1840 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
1841 len, check);
1842 bus->sdcnt.rx_badhdr++;
1843 brcmf_sdbrcm_rxfail(bus, false, false);
1844 continue;
1845 }
1846
1847 /* Validate frame length */
1848 if (len < SDPCM_HDRLEN) {
1849 brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
1850 continue;
1851 }
1852
1853 /* Extract software header fields */
1854 chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1855 seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1856 doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1857 txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1858
1859 /* Validate data offset */
1860 if ((doff < SDPCM_HDRLEN) || (doff > len)) {
1861 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
1862 doff, len, SDPCM_HDRLEN, seq);
1863 bus->sdcnt.rx_badhdr++;
1864 brcmf_sdbrcm_rxfail(bus, false, false);
1865 continue;
1866 }
1867
1868 /* Save the readahead length if there is one */
1869 bus->nextlen =
1870 bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1871 if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
1872 brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
1873 bus->nextlen, seq);
1874 bus->nextlen = 0;
1875 }
1876
1877 /* Handle Flow Control */
1878 fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
1879
1880 if (bus->flowcontrol != fcbits) {
1881 if (~bus->flowcontrol & fcbits)
1882 bus->sdcnt.fc_xoff++;
1883
1884 if (bus->flowcontrol & ~fcbits)
1885 bus->sdcnt.fc_xon++;
1886
1887 bus->sdcnt.fc_rcvd++;
1888 bus->flowcontrol = fcbits;
1889 }
1890
1891 /* Check and update sequence number */
1892 if (rxseq != seq) {
1893 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
1894 bus->sdcnt.rx_badseq++;
1895 rxseq = seq;
1896 }
1897
1898 /* Check window for sanity */
1899 if ((u8) (txmax - bus->tx_seq) > 0x40) {
1900 brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
1901 txmax, bus->tx_seq);
1902 txmax = bus->tx_seq + 2;
1903 }
1904 bus->tx_max = txmax;
1905
1906 /* Call a separate function for control frames */
1907 if (chan == SDPCM_CONTROL_CHANNEL) {
1908 brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
1909 continue;
1910 }
1911
1912 /* precondition: chan is either SDPCM_DATA_CHANNEL,
1913 SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
1914 SDPCM_GLOM_CHANNEL */
1915
1916 /* Length to read */
1917 rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
1918
1919 /* May pad read to blocksize for efficiency */
1920 if (bus->roundup && bus->blocksize &&
1921 (rdlen > bus->blocksize)) {
1922 pad = bus->blocksize - (rdlen % bus->blocksize);
1923 if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1924 ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
1925 rdlen += pad;
1926 } else if (rdlen % BRCMF_SDALIGN) {
1927 rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
1928 } 1666 }
1929 1667
1930 /* Satisfy length-alignment requirements */ 1668 brcmf_pad(bus, &pad, &rd->len_left);
1931 if (rdlen & (ALIGNMENT - 1))
1932 rdlen = roundup(rdlen, ALIGNMENT);
1933
1934 if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
1935 /* Too long -- skip this frame */
1936 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
1937 len, rdlen);
1938 bus->sdiodev->bus_if->dstats.rx_errors++;
1939 bus->sdcnt.rx_toolong++;
1940 brcmf_sdbrcm_rxfail(bus, false, false);
1941 continue;
1942 }
1943 1669
1944 pkt = brcmu_pkt_buf_get_skb(rdlen + 1670 pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1945 BRCMF_FIRSTREAD + BRCMF_SDALIGN); 1671 BRCMF_SDALIGN);
1946 if (!pkt) { 1672 if (!pkt) {
1947 /* Give up on data, request rtx of events */ 1673 /* Give up on data, request rtx of events */
1948 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n", 1674 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
1949 rdlen, chan);
1950 bus->sdiodev->bus_if->dstats.rx_dropped++; 1675 bus->sdiodev->bus_if->dstats.rx_dropped++;
1951 brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan)); 1676 brcmf_sdbrcm_rxfail(bus, false,
1677 RETRYCHAN(rd->channel));
1952 continue; 1678 continue;
1953 } 1679 }
1680 skb_pull(pkt, head_read);
1681 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1954 1682
1955 /* Leave room for what we already read, and align remainder */
1956 skb_pull(pkt, BRCMF_FIRSTREAD);
1957 pkt_align(pkt, rdlen, BRCMF_SDALIGN);
1958
1959 /* Read the remaining frame data */
1960 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1683 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1961 SDIO_FUNC_2, F2SYNC, pkt); 1684 SDIO_FUNC_2, F2SYNC, pkt);
1962 bus->sdcnt.f2rxdata++; 1685 bus->sdcnt.f2rxdata++;
1963 1686
1964 if (sdret < 0) { 1687 if (sdret < 0) {
1965 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, 1688 brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
1966 ((chan == SDPCM_EVENT_CHANNEL) ? "event" 1689 rd->len, rd->channel, sdret);
1967 : ((chan == SDPCM_DATA_CHANNEL) ? "data"
1968 : "test")), sdret);
1969 brcmu_pkt_buf_free_skb(pkt); 1690 brcmu_pkt_buf_free_skb(pkt);
1970 bus->sdiodev->bus_if->dstats.rx_errors++; 1691 bus->sdiodev->bus_if->dstats.rx_errors++;
1971 brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan)); 1692 brcmf_sdbrcm_rxfail(bus, true,
1693 RETRYCHAN(rd->channel));
1972 continue; 1694 continue;
1973 } 1695 }
1974 1696
1975 /* Copy the already-read portion */ 1697 if (head_read) {
1976 skb_push(pkt, BRCMF_FIRSTREAD); 1698 skb_push(pkt, head_read);
1977 memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD); 1699 memcpy(pkt->data, bus->rxhdr, head_read);
1700 head_read = 0;
1701 } else {
1702 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1703 rd_new.seq_num = rd->seq_num;
1704 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
1705 rd->len = 0;
1706 brcmu_pkt_buf_free_skb(pkt);
1707 }
1708 bus->sdcnt.rx_readahead_cnt++;
1709 if (rd->len != roundup(rd_new.len, 16)) {
1710 brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
1711 rd->len,
1712 roundup(rd_new.len, 16) >> 4);
1713 rd->len = 0;
1714 brcmf_sdbrcm_rxfail(bus, true, true);
1715 brcmu_pkt_buf_free_skb(pkt);
1716 continue;
1717 }
1718 rd->len_nxtfrm = rd_new.len_nxtfrm;
1719 rd->channel = rd_new.channel;
1720 rd->dat_offset = rd_new.dat_offset;
1721
1722 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1723 BRCMF_DATA_ON()) &&
1724 BRCMF_HDRS_ON(),
1725 bus->rxhdr, SDPCM_HDRLEN,
1726 "RxHdr:\n");
1727
1728 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1729 brcmf_dbg(ERROR, "readahead on control packet %d?\n",
1730 rd_new.seq_num);
1731 /* Force retry w/normal header read */
1732 rd->len = 0;
1733 brcmf_sdbrcm_rxfail(bus, false, true);
1734 brcmu_pkt_buf_free_skb(pkt);
1735 continue;
1736 }
1737 }
1978 1738
1979 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1739 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1980 pkt->data, len, "Rx Data:\n"); 1740 pkt->data, rd->len, "Rx Data:\n");
1981 1741
1982deliver:
1983 /* Save superframe descriptor and allocate packet frame */ 1742 /* Save superframe descriptor and allocate packet frame */
1984 if (chan == SDPCM_GLOM_CHANNEL) { 1743 if (rd->channel == SDPCM_GLOM_CHANNEL) {
1985 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { 1744 if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
1986 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n", 1745 brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
1987 len); 1746 rd->len);
1988 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1747 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1989 pkt->data, len, 1748 pkt->data, rd->len,
1990 "Glom Data:\n"); 1749 "Glom Data:\n");
1991 __skb_trim(pkt, len); 1750 __skb_trim(pkt, rd->len);
1992 skb_pull(pkt, SDPCM_HDRLEN); 1751 skb_pull(pkt, SDPCM_HDRLEN);
1993 bus->glomd = pkt; 1752 bus->glomd = pkt;
1994 } else { 1753 } else {
@@ -1996,12 +1755,23 @@ deliver:
1996 "descriptor!\n", __func__); 1755 "descriptor!\n", __func__);
1997 brcmf_sdbrcm_rxfail(bus, false, false); 1756 brcmf_sdbrcm_rxfail(bus, false, false);
1998 } 1757 }
1758 /* prepare the descriptor for the next read */
1759 rd->len = rd->len_nxtfrm << 4;
1760 rd->len_nxtfrm = 0;
1761 /* treat all packet as event if we don't know */
1762 rd->channel = SDPCM_EVENT_CHANNEL;
1999 continue; 1763 continue;
2000 } 1764 }
2001 1765
2002 /* Fill in packet len and prio, deliver upward */ 1766 /* Fill in packet len and prio, deliver upward */
2003 __skb_trim(pkt, len); 1767 __skb_trim(pkt, rd->len);
2004 skb_pull(pkt, doff); 1768 skb_pull(pkt, rd->dat_offset);
1769
1770 /* prepare the descriptor for the next read */
1771 rd->len = rd->len_nxtfrm << 4;
1772 rd->len_nxtfrm = 0;
1773 /* treat all packet as event if we don't know */
1774 rd->channel = SDPCM_EVENT_CHANNEL;
2005 1775
2006 if (pkt->len == 0) { 1776 if (pkt->len == 0) {
2007 brcmu_pkt_buf_free_skb(pkt); 1777 brcmu_pkt_buf_free_skb(pkt);
@@ -2019,17 +1789,17 @@ deliver:
2019 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); 1789 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
2020 down(&bus->sdsem); 1790 down(&bus->sdsem);
2021 } 1791 }
1792
2022 rxcount = maxframes - rxleft; 1793 rxcount = maxframes - rxleft;
2023 /* Message if we hit the limit */ 1794 /* Message if we hit the limit */
2024 if (!rxleft) 1795 if (!rxleft)
2025 brcmf_dbg(DATA, "hit rx limit of %d frames\n", 1796 brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2026 maxframes);
2027 else 1797 else
2028 brcmf_dbg(DATA, "processed %d frames\n", rxcount); 1798 brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2029 /* Back off rxseq if awaiting rtx, update rx_seq */ 1799 /* Back off rxseq if awaiting rtx, update rx_seq */
2030 if (bus->rxskip) 1800 if (bus->rxskip)
2031 rxseq--; 1801 rd->seq_num--;
2032 bus->rx_seq = rxseq; 1802 bus->rx_seq = rd->seq_num;
2033 1803
2034 return rxcount; 1804 return rxcount;
2035} 1805}
@@ -2227,7 +1997,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2227 if (ret != 0) 1997 if (ret != 0)
2228 break; 1998 break;
2229 if (intstatus & bus->hostintmask) 1999 if (intstatus & bus->hostintmask)
2230 bus->ipend = true; 2000 atomic_set(&bus->ipend, 1);
2231 } 2001 }
2232 } 2002 }
2233 2003
@@ -2235,8 +2005,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2235 if (bus->sdiodev->bus_if->drvr_up && 2005 if (bus->sdiodev->bus_if->drvr_up &&
2236 (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && 2006 (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
2237 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { 2007 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2238 bus->txoff = OFF; 2008 bus->txoff = false;
2239 brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF); 2009 brcmf_txflowblock(bus->sdiodev->dev, false);
2240 } 2010 }
2241 2011
2242 return cnt; 2012 return cnt;
@@ -2259,16 +2029,8 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2259 bus->watchdog_tsk = NULL; 2029 bus->watchdog_tsk = NULL;
2260 } 2030 }
2261 2031
2262 if (bus->dpc_tsk && bus->dpc_tsk != current) {
2263 send_sig(SIGTERM, bus->dpc_tsk, 1);
2264 kthread_stop(bus->dpc_tsk);
2265 bus->dpc_tsk = NULL;
2266 }
2267
2268 down(&bus->sdsem); 2032 down(&bus->sdsem);
2269 2033
2270 bus_wake(bus);
2271
2272 /* Enable clock for device interrupts */ 2034 /* Enable clock for device interrupts */
2273 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2035 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2274 2036
@@ -2327,7 +2089,7 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2327 unsigned long flags; 2089 unsigned long flags;
2328 2090
2329 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); 2091 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2330 if (!bus->sdiodev->irq_en && !bus->ipend) { 2092 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2331 enable_irq(bus->sdiodev->irq); 2093 enable_irq(bus->sdiodev->irq);
2332 bus->sdiodev->irq_en = true; 2094 bus->sdiodev->irq_en = true;
2333 } 2095 }
@@ -2339,21 +2101,69 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2339} 2101}
2340#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ 2102#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
2341 2103
2342static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) 2104static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2343{ 2105{
2344 u32 intstatus, newstatus = 0; 2106 struct list_head *new_hd;
2107 unsigned long flags;
2108
2109 if (in_interrupt())
2110 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2111 else
2112 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2113 if (new_hd == NULL)
2114 return;
2115
2116 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2117 list_add_tail(new_hd, &bus->dpc_tsklst);
2118 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2119}
2120
2121static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2122{
2123 u8 idx;
2124 u32 addr;
2125 unsigned long val;
2126 int n, ret;
2127
2128 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
2129 addr = bus->ci->c_inf[idx].base +
2130 offsetof(struct sdpcmd_regs, intstatus);
2131
2132 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
2133 bus->sdcnt.f1regdata++;
2134 if (ret != 0)
2135 val = 0;
2136
2137 val &= bus->hostintmask;
2138 atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2139
2140 /* Clear interrupts */
2141 if (val) {
2142 ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
2143 bus->sdcnt.f1regdata++;
2144 }
2145
2146 if (ret) {
2147 atomic_set(&bus->intstatus, 0);
2148 } else if (val) {
2149 for_each_set_bit(n, &val, 32)
2150 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2151 }
2152
2153 return ret;
2154}
2155
2156static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2157{
2158 u32 newstatus = 0;
2159 unsigned long intstatus;
2345 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */ 2160 uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
2346 uint txlimit = bus->txbound; /* Tx frames to send before resched */ 2161 uint txlimit = bus->txbound; /* Tx frames to send before resched */
2347 uint framecnt = 0; /* Temporary counter of tx/rx frames */ 2162 uint framecnt = 0; /* Temporary counter of tx/rx frames */
2348 bool rxdone = true; /* Flag for no more read data */ 2163 int err = 0, n;
2349 bool resched = false; /* Flag indicating resched wanted */
2350 int err;
2351 2164
2352 brcmf_dbg(TRACE, "Enter\n"); 2165 brcmf_dbg(TRACE, "Enter\n");
2353 2166
2354 /* Start with leftover status bits */
2355 intstatus = bus->intstatus;
2356
2357 down(&bus->sdsem); 2167 down(&bus->sdsem);
2358 2168
2359 /* If waiting for HTAVAIL, check status */ 2169 /* If waiting for HTAVAIL, check status */
@@ -2399,39 +2209,22 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2399 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2209 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2400 } 2210 }
2401 bus->clkstate = CLK_AVAIL; 2211 bus->clkstate = CLK_AVAIL;
2402 } else {
2403 goto clkwait;
2404 } 2212 }
2405 } 2213 }
2406 2214
2407 bus_wake(bus);
2408
2409 /* Make sure backplane clock is on */ 2215 /* Make sure backplane clock is on */
2410 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true); 2216 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
2411 if (bus->clkstate == CLK_PENDING)
2412 goto clkwait;
2413 2217
2414 /* Pending interrupt indicates new device status */ 2218 /* Pending interrupt indicates new device status */
2415 if (bus->ipend) { 2219 if (atomic_read(&bus->ipend) > 0) {
2416 bus->ipend = false; 2220 atomic_set(&bus->ipend, 0);
2417 err = r_sdreg32(bus, &newstatus, 2221 sdio_claim_host(bus->sdiodev->func[1]);
2418 offsetof(struct sdpcmd_regs, intstatus)); 2222 err = brcmf_sdio_intr_rstatus(bus);
2419 bus->sdcnt.f1regdata++; 2223 sdio_release_host(bus->sdiodev->func[1]);
2420 if (err != 0)
2421 newstatus = 0;
2422 newstatus &= bus->hostintmask;
2423 bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
2424 if (newstatus) {
2425 err = w_sdreg32(bus, newstatus,
2426 offsetof(struct sdpcmd_regs,
2427 intstatus));
2428 bus->sdcnt.f1regdata++;
2429 }
2430 } 2224 }
2431 2225
2432 /* Merge new bits with previous */ 2226 /* Start with leftover status bits */
2433 intstatus |= newstatus; 2227 intstatus = atomic_xchg(&bus->intstatus, 0);
2434 bus->intstatus = 0;
2435 2228
2436 /* Handle flow-control change: read new state in case our ack 2229 /* Handle flow-control change: read new state in case our ack
2437 * crossed another change interrupt. If change still set, assume 2230 * crossed another change interrupt. If change still set, assume
@@ -2445,8 +2238,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2445 err = r_sdreg32(bus, &newstatus, 2238 err = r_sdreg32(bus, &newstatus,
2446 offsetof(struct sdpcmd_regs, intstatus)); 2239 offsetof(struct sdpcmd_regs, intstatus));
2447 bus->sdcnt.f1regdata += 2; 2240 bus->sdcnt.f1regdata += 2;
2448 bus->fcstate = 2241 atomic_set(&bus->fcstate,
2449 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); 2242 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2450 intstatus |= (newstatus & bus->hostintmask); 2243 intstatus |= (newstatus & bus->hostintmask);
2451 } 2244 }
2452 2245
@@ -2483,32 +2276,34 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2483 intstatus &= ~I_HMB_FRAME_IND; 2276 intstatus &= ~I_HMB_FRAME_IND;
2484 2277
2485 /* On frame indication, read available frames */ 2278 /* On frame indication, read available frames */
2486 if (PKT_AVAILABLE()) { 2279 if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
2487 framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone); 2280 framecnt = brcmf_sdio_readframes(bus, rxlimit);
2488 if (rxdone || bus->rxskip) 2281 if (!bus->rxpending)
2489 intstatus &= ~I_HMB_FRAME_IND; 2282 intstatus &= ~I_HMB_FRAME_IND;
2490 rxlimit -= min(framecnt, rxlimit); 2283 rxlimit -= min(framecnt, rxlimit);
2491 } 2284 }
2492 2285
2493 /* Keep still-pending events for next scheduling */ 2286 /* Keep still-pending events for next scheduling */
2494 bus->intstatus = intstatus; 2287 if (intstatus) {
2288 for_each_set_bit(n, &intstatus, 32)
2289 set_bit(n, (unsigned long *)&bus->intstatus.counter);
2290 }
2495 2291
2496clkwait:
2497 brcmf_sdbrcm_clrintr(bus); 2292 brcmf_sdbrcm_clrintr(bus);
2498 2293
2499 if (data_ok(bus) && bus->ctrl_frame_stat && 2294 if (data_ok(bus) && bus->ctrl_frame_stat &&
2500 (bus->clkstate == CLK_AVAIL)) { 2295 (bus->clkstate == CLK_AVAIL)) {
2501 int ret, i; 2296 int i;
2502 2297
2503 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, 2298 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2504 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf, 2299 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2505 (u32) bus->ctrl_frame_len); 2300 (u32) bus->ctrl_frame_len);
2506 2301
2507 if (ret < 0) { 2302 if (err < 0) {
2508 /* On failure, abort the command and 2303 /* On failure, abort the command and
2509 terminate the frame */ 2304 terminate the frame */
2510 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2305 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2511 ret); 2306 err);
2512 bus->sdcnt.tx_sderrs++; 2307 bus->sdcnt.tx_sderrs++;
2513 2308
2514 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2309 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
@@ -2530,42 +2325,34 @@ clkwait:
2530 break; 2325 break;
2531 } 2326 }
2532 2327
2533 } 2328 } else {
2534 if (ret == 0)
2535 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 2329 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2536 2330 }
2537 brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
2538 bus->ctrl_frame_stat = false; 2331 bus->ctrl_frame_stat = false;
2539 brcmf_sdbrcm_wait_event_wakeup(bus); 2332 brcmf_sdbrcm_wait_event_wakeup(bus);
2540 } 2333 }
2541 /* Send queued frames (limit 1 if rx may still be pending) */ 2334 /* Send queued frames (limit 1 if rx may still be pending) */
2542 else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && 2335 else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2543 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit 2336 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
2544 && data_ok(bus)) { 2337 && data_ok(bus)) {
2545 framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax); 2338 framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2339 txlimit;
2546 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt); 2340 framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
2547 txlimit -= framecnt; 2341 txlimit -= framecnt;
2548 } 2342 }
2549 2343
2550 /* Resched if events or tx frames are pending,
2551 else await next interrupt */
2552 /* On failed register access, all bets are off:
2553 no resched or interrupts */
2554 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) { 2344 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2555 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n"); 2345 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
2556 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2346 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2557 bus->intstatus = 0; 2347 atomic_set(&bus->intstatus, 0);
2558 } else if (bus->clkstate == CLK_PENDING) { 2348 } else if (atomic_read(&bus->intstatus) ||
2559 brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n"); 2349 atomic_read(&bus->ipend) > 0 ||
2560 resched = true; 2350 (!atomic_read(&bus->fcstate) &&
2561 } else if (bus->intstatus || bus->ipend || 2351 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2562 (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) 2352 data_ok(bus)) || PKT_AVAILABLE()) {
2563 && data_ok(bus)) || PKT_AVAILABLE()) { 2353 brcmf_sdbrcm_adddpctsk(bus);
2564 resched = true;
2565 } 2354 }
2566 2355
2567 bus->dpc_sched = resched;
2568
2569 /* If we're done for now, turn off clock request. */ 2356 /* If we're done for now, turn off clock request. */
2570 if ((bus->clkstate != CLK_PENDING) 2357 if ((bus->clkstate != CLK_PENDING)
2571 && bus->idletime == BRCMF_IDLE_IMMEDIATE) { 2358 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
@@ -2574,65 +2361,6 @@ clkwait:
2574 } 2361 }
2575 2362
2576 up(&bus->sdsem); 2363 up(&bus->sdsem);
2577
2578 return resched;
2579}
2580
2581static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2582{
2583 struct list_head *new_hd;
2584 unsigned long flags;
2585
2586 if (in_interrupt())
2587 new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
2588 else
2589 new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
2590 if (new_hd == NULL)
2591 return;
2592
2593 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2594 list_add_tail(new_hd, &bus->dpc_tsklst);
2595 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2596}
2597
2598static int brcmf_sdbrcm_dpc_thread(void *data)
2599{
2600 struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
2601 struct list_head *cur_hd, *tmp_hd;
2602 unsigned long flags;
2603
2604 allow_signal(SIGTERM);
2605 /* Run until signal received */
2606 while (1) {
2607 if (kthread_should_stop())
2608 break;
2609
2610 if (list_empty(&bus->dpc_tsklst))
2611 if (wait_for_completion_interruptible(&bus->dpc_wait))
2612 break;
2613
2614 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2615 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
2616 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2617
2618 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
2619 /* after stopping the bus, exit thread */
2620 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
2621 bus->dpc_tsk = NULL;
2622 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2623 break;
2624 }
2625
2626 if (brcmf_sdbrcm_dpc(bus))
2627 brcmf_sdbrcm_adddpctsk(bus);
2628
2629 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2630 list_del(cur_hd);
2631 kfree(cur_hd);
2632 }
2633 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2634 }
2635 return 0;
2636} 2364}
2637 2365
2638static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) 2366static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2642,6 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2642 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2370 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2643 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2371 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2644 struct brcmf_sdio *bus = sdiodev->bus; 2372 struct brcmf_sdio *bus = sdiodev->bus;
2373 unsigned long flags;
2645 2374
2646 brcmf_dbg(TRACE, "Enter\n"); 2375 brcmf_dbg(TRACE, "Enter\n");
2647 2376
@@ -2672,21 +2401,23 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2672 spin_unlock_bh(&bus->txqlock); 2401 spin_unlock_bh(&bus->txqlock);
2673 2402
2674 if (pktq_len(&bus->txq) >= TXHI) { 2403 if (pktq_len(&bus->txq) >= TXHI) {
2675 bus->txoff = ON; 2404 bus->txoff = true;
2676 brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON); 2405 brcmf_txflowblock(bus->sdiodev->dev, true);
2677 } 2406 }
2678 2407
2679#ifdef DEBUG 2408#ifdef DEBUG
2680 if (pktq_plen(&bus->txq, prec) > qcount[prec]) 2409 if (pktq_plen(&bus->txq, prec) > qcount[prec])
2681 qcount[prec] = pktq_plen(&bus->txq, prec); 2410 qcount[prec] = pktq_plen(&bus->txq, prec);
2682#endif 2411#endif
2683 /* Schedule DPC if needed to send queued packet(s) */ 2412
2684 if (!bus->dpc_sched) { 2413 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2685 bus->dpc_sched = true; 2414 if (list_empty(&bus->dpc_tsklst)) {
2686 if (bus->dpc_tsk) { 2415 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2687 brcmf_sdbrcm_adddpctsk(bus); 2416
2688 complete(&bus->dpc_wait); 2417 brcmf_sdbrcm_adddpctsk(bus);
2689 } 2418 queue_work(bus->brcmf_wq, &bus->datawork);
2419 } else {
2420 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2690 } 2421 }
2691 2422
2692 return ret; 2423 return ret;
@@ -2707,6 +2438,8 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2707 else 2438 else
2708 dsize = size; 2439 dsize = size;
2709 2440
2441 sdio_claim_host(bus->sdiodev->func[1]);
2442
2710 /* Set the backplane window to include the start address */ 2443 /* Set the backplane window to include the start address */
2711 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address); 2444 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
2712 if (bcmerror) { 2445 if (bcmerror) {
@@ -2748,6 +2481,8 @@ xfer_done:
2748 brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n", 2481 brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
2749 bus->sdiodev->sbwad); 2482 bus->sdiodev->sbwad);
2750 2483
2484 sdio_release_host(bus->sdiodev->func[1]);
2485
2751 return bcmerror; 2486 return bcmerror;
2752} 2487}
2753 2488
@@ -2882,6 +2617,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2882 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2617 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2883 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2618 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2884 struct brcmf_sdio *bus = sdiodev->bus; 2619 struct brcmf_sdio *bus = sdiodev->bus;
2620 unsigned long flags;
2885 2621
2886 brcmf_dbg(TRACE, "Enter\n"); 2622 brcmf_dbg(TRACE, "Enter\n");
2887 2623
@@ -2918,8 +2654,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2918 /* Need to lock here to protect txseq and SDIO tx calls */ 2654 /* Need to lock here to protect txseq and SDIO tx calls */
2919 down(&bus->sdsem); 2655 down(&bus->sdsem);
2920 2656
2921 bus_wake(bus);
2922
2923 /* Make sure backplane clock is on */ 2657 /* Make sure backplane clock is on */
2924 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2658 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2925 2659
@@ -2967,9 +2701,15 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2967 } while (ret < 0 && retries++ < TXRETRIES); 2701 } while (ret < 0 && retries++ < TXRETRIES);
2968 } 2702 }
2969 2703
2970 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) { 2704 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
2705 if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
2706 list_empty(&bus->dpc_tsklst)) {
2707 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2708
2971 bus->activity = false; 2709 bus->activity = false;
2972 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); 2710 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2711 } else {
2712 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2973 } 2713 }
2974 2714
2975 up(&bus->sdsem); 2715 up(&bus->sdsem);
@@ -3774,23 +3514,20 @@ void brcmf_sdbrcm_isr(void *arg)
3774 } 3514 }
3775 /* Count the interrupt call */ 3515 /* Count the interrupt call */
3776 bus->sdcnt.intrcount++; 3516 bus->sdcnt.intrcount++;
3777 bus->ipend = true; 3517 if (in_interrupt())
3778 3518 atomic_set(&bus->ipend, 1);
3779 /* Shouldn't get this interrupt if we're sleeping? */ 3519 else
3780 if (bus->sleeping) { 3520 if (brcmf_sdio_intr_rstatus(bus)) {
3781 brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n"); 3521 brcmf_dbg(ERROR, "failed backplane access\n");
3782 return; 3522 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3783 } 3523 }
3784 3524
3785 /* Disable additional interrupts (is this needed now)? */ 3525 /* Disable additional interrupts (is this needed now)? */
3786 if (!bus->intr) 3526 if (!bus->intr)
3787 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); 3527 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
3788 3528
3789 bus->dpc_sched = true; 3529 brcmf_sdbrcm_adddpctsk(bus);
3790 if (bus->dpc_tsk) { 3530 queue_work(bus->brcmf_wq, &bus->datawork);
3791 brcmf_sdbrcm_adddpctsk(bus);
3792 complete(&bus->dpc_wait);
3793 }
3794} 3531}
3795 3532
3796static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) 3533static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3798,13 +3535,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3798#ifdef DEBUG 3535#ifdef DEBUG
3799 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); 3536 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3800#endif /* DEBUG */ 3537#endif /* DEBUG */
3538 unsigned long flags;
3801 3539
3802 brcmf_dbg(TIMER, "Enter\n"); 3540 brcmf_dbg(TIMER, "Enter\n");
3803 3541
3804 /* Ignore the timer if simulating bus down */
3805 if (bus->sleeping)
3806 return false;
3807
3808 down(&bus->sdsem); 3542 down(&bus->sdsem);
3809 3543
3810 /* Poll period: check device if appropriate. */ 3544 /* Poll period: check device if appropriate. */
@@ -3818,27 +3552,30 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3818 if (!bus->intr || 3552 if (!bus->intr ||
3819 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3553 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3820 3554
3821 if (!bus->dpc_sched) { 3555 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3556 if (list_empty(&bus->dpc_tsklst)) {
3822 u8 devpend; 3557 u8 devpend;
3558 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3559 flags);
3823 devpend = brcmf_sdio_regrb(bus->sdiodev, 3560 devpend = brcmf_sdio_regrb(bus->sdiodev,
3824 SDIO_CCCR_INTx, 3561 SDIO_CCCR_INTx,
3825 NULL); 3562 NULL);
3826 intstatus = 3563 intstatus =
3827 devpend & (INTR_STATUS_FUNC1 | 3564 devpend & (INTR_STATUS_FUNC1 |
3828 INTR_STATUS_FUNC2); 3565 INTR_STATUS_FUNC2);
3566 } else {
3567 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3568 flags);
3829 } 3569 }
3830 3570
3831 /* If there is something, make like the ISR and 3571 /* If there is something, make like the ISR and
3832 schedule the DPC */ 3572 schedule the DPC */
3833 if (intstatus) { 3573 if (intstatus) {
3834 bus->sdcnt.pollcnt++; 3574 bus->sdcnt.pollcnt++;
3835 bus->ipend = true; 3575 atomic_set(&bus->ipend, 1);
3836 3576
3837 bus->dpc_sched = true; 3577 brcmf_sdbrcm_adddpctsk(bus);
3838 if (bus->dpc_tsk) { 3578 queue_work(bus->brcmf_wq, &bus->datawork);
3839 brcmf_sdbrcm_adddpctsk(bus);
3840 complete(&bus->dpc_wait);
3841 }
3842 } 3579 }
3843 } 3580 }
3844 3581
@@ -3876,11 +3613,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3876 3613
3877 up(&bus->sdsem); 3614 up(&bus->sdsem);
3878 3615
3879 return bus->ipend; 3616 return (atomic_read(&bus->ipend) > 0);
3880} 3617}
3881 3618
3882static bool brcmf_sdbrcm_chipmatch(u16 chipid) 3619static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3883{ 3620{
3621 if (chipid == BCM43241_CHIP_ID)
3622 return true;
3884 if (chipid == BCM4329_CHIP_ID) 3623 if (chipid == BCM4329_CHIP_ID)
3885 return true; 3624 return true;
3886 if (chipid == BCM4330_CHIP_ID) 3625 if (chipid == BCM4330_CHIP_ID)
@@ -3890,6 +3629,26 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3890 return false; 3629 return false;
3891} 3630}
3892 3631
3632static void brcmf_sdio_dataworker(struct work_struct *work)
3633{
3634 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3635 datawork);
3636 struct list_head *cur_hd, *tmp_hd;
3637 unsigned long flags;
3638
3639 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3640 list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
3641 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3642
3643 brcmf_sdbrcm_dpc(bus);
3644
3645 spin_lock_irqsave(&bus->dpc_tl_lock, flags);
3646 list_del(cur_hd);
3647 kfree(cur_hd);
3648 }
3649 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
3650}
3651
3893static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) 3652static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3894{ 3653{
3895 brcmf_dbg(TRACE, "Enter\n"); 3654 brcmf_dbg(TRACE, "Enter\n");
@@ -4022,7 +3781,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
4022 SDIO_FUNC_ENABLE_1, NULL); 3781 SDIO_FUNC_ENABLE_1, NULL);
4023 3782
4024 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 3783 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
4025 bus->sleeping = false;
4026 bus->rxflow = false; 3784 bus->rxflow = false;
4027 3785
4028 /* Done with backplane-dependent accesses, can drop clock... */ 3786 /* Done with backplane-dependent accesses, can drop clock... */
@@ -4103,6 +3861,9 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
4103 /* De-register interrupt handler */ 3861 /* De-register interrupt handler */
4104 brcmf_sdio_intr_unregister(bus->sdiodev); 3862 brcmf_sdio_intr_unregister(bus->sdiodev);
4105 3863
3864 cancel_work_sync(&bus->datawork);
3865 destroy_workqueue(bus->brcmf_wq);
3866
4106 if (bus->sdiodev->bus_if->drvr) { 3867 if (bus->sdiodev->bus_if->drvr) {
4107 brcmf_detach(bus->sdiodev->dev); 3868 brcmf_detach(bus->sdiodev->dev);
4108 brcmf_sdbrcm_release_dongle(bus); 3869 brcmf_sdbrcm_release_dongle(bus);
@@ -4142,8 +3903,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4142 bus->rxbound = BRCMF_RXBOUND; 3903 bus->rxbound = BRCMF_RXBOUND;
4143 bus->txminmax = BRCMF_TXMINMAX; 3904 bus->txminmax = BRCMF_TXMINMAX;
4144 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; 3905 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
4145 bus->usebufpool = false; /* Use bufpool if allocated,
4146 else use locally malloced rxbuf */
4147 3906
4148 /* attempt to attach to the dongle */ 3907 /* attempt to attach to the dongle */
4149 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) { 3908 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
@@ -4155,6 +3914,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4155 init_waitqueue_head(&bus->ctrl_wait); 3914 init_waitqueue_head(&bus->ctrl_wait);
4156 init_waitqueue_head(&bus->dcmd_resp_wait); 3915 init_waitqueue_head(&bus->dcmd_resp_wait);
4157 3916
3917 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3918 if (bus->brcmf_wq == NULL) {
3919 brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
3920 goto fail;
3921 }
3922 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3923
4158 /* Set up the watchdog timer */ 3924 /* Set up the watchdog timer */
4159 init_timer(&bus->timer); 3925 init_timer(&bus->timer);
4160 bus->timer.data = (unsigned long)bus; 3926 bus->timer.data = (unsigned long)bus;
@@ -4172,15 +3938,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
4172 bus->watchdog_tsk = NULL; 3938 bus->watchdog_tsk = NULL;
4173 } 3939 }
4174 /* Initialize DPC thread */ 3940 /* Initialize DPC thread */
4175 init_completion(&bus->dpc_wait);
4176 INIT_LIST_HEAD(&bus->dpc_tsklst); 3941 INIT_LIST_HEAD(&bus->dpc_tsklst);
4177 spin_lock_init(&bus->dpc_tl_lock); 3942 spin_lock_init(&bus->dpc_tl_lock);
4178 bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
4179 bus, "brcmf_dpc");
4180 if (IS_ERR(bus->dpc_tsk)) {
4181 pr_warn("brcmf_dpc thread failed to start\n");
4182 bus->dpc_tsk = NULL;
4183 }
4184 3943
4185 /* Assign bus interface call back */ 3944 /* Assign bus interface call back */
4186 bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop; 3945 bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 58155e23d220..9434440bbc65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -377,6 +377,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
377 377
378 /* Address of cores for new chips should be added here */ 378 /* Address of cores for new chips should be added here */
379 switch (ci->chip) { 379 switch (ci->chip) {
380 case BCM43241_CHIP_ID:
381 ci->c_inf[0].wrapbase = 0x18100000;
382 ci->c_inf[0].cib = 0x2a084411;
383 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
384 ci->c_inf[1].base = 0x18002000;
385 ci->c_inf[1].wrapbase = 0x18102000;
386 ci->c_inf[1].cib = 0x0e004211;
387 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
388 ci->c_inf[2].base = 0x18004000;
389 ci->c_inf[2].wrapbase = 0x18104000;
390 ci->c_inf[2].cib = 0x14080401;
391 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
392 ci->c_inf[3].base = 0x18003000;
393 ci->c_inf[3].wrapbase = 0x18103000;
394 ci->c_inf[3].cib = 0x07004211;
395 ci->ramsize = 0x90000;
396 break;
380 case BCM4329_CHIP_ID: 397 case BCM4329_CHIP_ID:
381 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV; 398 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
382 ci->c_inf[1].base = BCM4329_CORE_BUS_BASE; 399 ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 29bf78d264e0..0d30afd8c672 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -174,6 +174,8 @@ extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
174 u8 data, int *ret); 174 u8 data, int *ret);
175extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, 175extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
176 u32 data, int *ret); 176 u32 data, int *ret);
177extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
178 void *data, bool write);
177 179
178/* Buffer transfer to/from device (client) core via cmd53. 180/* Buffer transfer to/from device (client) core via cmd53.
179 * fn: function number 181 * fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 58f89fa9c9f8..a2b4b1e71017 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -66,7 +66,9 @@
66#define BRCMF_USB_CBCTL_READ 1 66#define BRCMF_USB_CBCTL_READ 1
67#define BRCMF_USB_MAX_PKT_SIZE 1600 67#define BRCMF_USB_MAX_PKT_SIZE 1600
68 68
69#define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin"
69#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin" 70#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
71#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
70 72
71enum usbdev_suspend_state { 73enum usbdev_suspend_state {
72 USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow 74 USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
@@ -78,25 +80,13 @@ enum usbdev_suspend_state {
78 USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */ 80 USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
79}; 81};
80 82
81struct brcmf_usb_probe_info {
82 void *usbdev_info;
83 struct usb_device *usb; /* USB device pointer from OS */
84 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
85 int intr_size; /* Size of interrupt message */
86 int interval; /* Interrupt polling interval */
87 int vid;
88 int pid;
89 enum usb_device_speed device_speed;
90 enum usbdev_suspend_state suspend_state;
91 struct usb_interface *intf;
92};
93static struct brcmf_usb_probe_info usbdev_probe_info;
94
95struct brcmf_usb_image { 83struct brcmf_usb_image {
96 void *data; 84 struct list_head list;
97 u32 len; 85 s8 *fwname;
86 u8 *image;
87 int image_len;
98}; 88};
99static struct brcmf_usb_image g_image = { NULL, 0 }; 89static struct list_head fw_image_list;
100 90
101struct intr_transfer_buf { 91struct intr_transfer_buf {
102 u32 notification; 92 u32 notification;
@@ -117,9 +107,8 @@ struct brcmf_usbdev_info {
117 int rx_low_watermark; 107 int rx_low_watermark;
118 int tx_low_watermark; 108 int tx_low_watermark;
119 int tx_high_watermark; 109 int tx_high_watermark;
120 bool txoff; 110 int tx_freecount;
121 bool rxoff; 111 bool tx_flowblock;
122 bool txoverride;
123 112
124 struct brcmf_usbreq *tx_reqs; 113 struct brcmf_usbreq *tx_reqs;
125 struct brcmf_usbreq *rx_reqs; 114 struct brcmf_usbreq *rx_reqs;
@@ -133,7 +122,6 @@ struct brcmf_usbdev_info {
133 122
134 struct usb_device *usbdev; 123 struct usb_device *usbdev;
135 struct device *dev; 124 struct device *dev;
136 enum usb_device_speed device_speed;
137 125
138 int ctl_in_pipe, ctl_out_pipe; 126 int ctl_in_pipe, ctl_out_pipe;
139 struct urb *ctl_urb; /* URB for control endpoint */ 127 struct urb *ctl_urb; /* URB for control endpoint */
@@ -146,16 +134,11 @@ struct brcmf_usbdev_info {
146 wait_queue_head_t ctrl_wait; 134 wait_queue_head_t ctrl_wait;
147 ulong ctl_op; 135 ulong ctl_op;
148 136
149 bool rxctl_deferrespok;
150
151 struct urb *bulk_urb; /* used for FW download */ 137 struct urb *bulk_urb; /* used for FW download */
152 struct urb *intr_urb; /* URB for interrupt endpoint */ 138 struct urb *intr_urb; /* URB for interrupt endpoint */
153 int intr_size; /* Size of interrupt message */ 139 int intr_size; /* Size of interrupt message */
154 int interval; /* Interrupt polling interval */ 140 int interval; /* Interrupt polling interval */
155 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */ 141 struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
156
157 struct brcmf_usb_probe_info probe_info;
158
159}; 142};
160 143
161static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 144static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -177,48 +160,17 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
177 return brcmf_usb_get_buspub(dev)->devinfo; 160 return brcmf_usb_get_buspub(dev)->devinfo;
178} 161}
179 162
180#if 0 163static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
181static void
182brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
183{ 164{
184 dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff); 165 return wait_event_timeout(devinfo->ioctl_resp_wait,
166 devinfo->ctl_completed,
167 msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
185} 168}
186#endif
187 169
188static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo, 170static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
189 uint *condition, bool *pending)
190{
191 DECLARE_WAITQUEUE(wait, current);
192 int timeout = IOCTL_RESP_TIMEOUT;
193
194 /* Convert timeout in millsecond to jiffies */
195 timeout = msecs_to_jiffies(timeout);
196 /* Wait until control frame is available */
197 add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
198 set_current_state(TASK_INTERRUPTIBLE);
199
200 smp_mb();
201 while (!(*condition) && (!signal_pending(current) && timeout)) {
202 timeout = schedule_timeout(timeout);
203 /* Wait until control frame is available */
204 smp_mb();
205 }
206
207 if (signal_pending(current))
208 *pending = true;
209
210 set_current_state(TASK_RUNNING);
211 remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
212
213 return timeout;
214}
215
216static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
217{ 171{
218 if (waitqueue_active(&devinfo->ioctl_resp_wait)) 172 if (waitqueue_active(&devinfo->ioctl_resp_wait))
219 wake_up_interruptible(&devinfo->ioctl_resp_wait); 173 wake_up(&devinfo->ioctl_resp_wait);
220
221 return 0;
222} 174}
223 175
224static void 176static void
@@ -324,17 +276,9 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
324 devinfo->ctl_read.wLength = cpu_to_le16p(&size); 276 devinfo->ctl_read.wLength = cpu_to_le16p(&size);
325 devinfo->ctl_urb->transfer_buffer_length = size; 277 devinfo->ctl_urb->transfer_buffer_length = size;
326 278
327 if (devinfo->rxctl_deferrespok) { 279 devinfo->ctl_read.bRequestType = USB_DIR_IN
328 /* BMAC model */ 280 | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
329 devinfo->ctl_read.bRequestType = USB_DIR_IN 281 devinfo->ctl_read.bRequest = 1;
330 | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
331 devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
332 } else {
333 /* full dongle model */
334 devinfo->ctl_read.bRequestType = USB_DIR_IN
335 | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
336 devinfo->ctl_read.bRequest = 1;
337 }
338 282
339 usb_fill_control_urb(devinfo->ctl_urb, 283 usb_fill_control_urb(devinfo->ctl_urb,
340 devinfo->usbdev, 284 devinfo->usbdev,
@@ -355,7 +299,6 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
355{ 299{
356 int err = 0; 300 int err = 0;
357 int timeout = 0; 301 int timeout = 0;
358 bool pending;
359 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 302 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
360 303
361 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 304 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -366,15 +309,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
366 if (test_and_set_bit(0, &devinfo->ctl_op)) 309 if (test_and_set_bit(0, &devinfo->ctl_op))
367 return -EIO; 310 return -EIO;
368 311
312 devinfo->ctl_completed = false;
369 err = brcmf_usb_send_ctl(devinfo, buf, len); 313 err = brcmf_usb_send_ctl(devinfo, buf, len);
370 if (err) { 314 if (err) {
371 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 315 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
316 clear_bit(0, &devinfo->ctl_op);
372 return err; 317 return err;
373 } 318 }
374 319 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
375 devinfo->ctl_completed = false;
376 timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
377 &pending);
378 clear_bit(0, &devinfo->ctl_op); 320 clear_bit(0, &devinfo->ctl_op);
379 if (!timeout) { 321 if (!timeout) {
380 brcmf_dbg(ERROR, "Txctl wait timed out\n"); 322 brcmf_dbg(ERROR, "Txctl wait timed out\n");
@@ -387,7 +329,6 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
387{ 329{
388 int err = 0; 330 int err = 0;
389 int timeout = 0; 331 int timeout = 0;
390 bool pending;
391 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 332 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
392 333
393 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 334 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -397,14 +338,14 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
397 if (test_and_set_bit(0, &devinfo->ctl_op)) 338 if (test_and_set_bit(0, &devinfo->ctl_op))
398 return -EIO; 339 return -EIO;
399 340
341 devinfo->ctl_completed = false;
400 err = brcmf_usb_recv_ctl(devinfo, buf, len); 342 err = brcmf_usb_recv_ctl(devinfo, buf, len);
401 if (err) { 343 if (err) {
402 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 344 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
345 clear_bit(0, &devinfo->ctl_op);
403 return err; 346 return err;
404 } 347 }
405 devinfo->ctl_completed = false; 348 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
406 timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
407 &pending);
408 err = devinfo->ctl_urb_status; 349 err = devinfo->ctl_urb_status;
409 clear_bit(0, &devinfo->ctl_op); 350 clear_bit(0, &devinfo->ctl_op);
410 if (!timeout) { 351 if (!timeout) {
@@ -418,7 +359,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
418} 359}
419 360
420static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo, 361static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
421 struct list_head *q) 362 struct list_head *q, int *counter)
422{ 363{
423 unsigned long flags; 364 unsigned long flags;
424 struct brcmf_usbreq *req; 365 struct brcmf_usbreq *req;
@@ -429,17 +370,22 @@ static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
429 } 370 }
430 req = list_entry(q->next, struct brcmf_usbreq, list); 371 req = list_entry(q->next, struct brcmf_usbreq, list);
431 list_del_init(q->next); 372 list_del_init(q->next);
373 if (counter)
374 (*counter)--;
432 spin_unlock_irqrestore(&devinfo->qlock, flags); 375 spin_unlock_irqrestore(&devinfo->qlock, flags);
433 return req; 376 return req;
434 377
435} 378}
436 379
437static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo, 380static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
438 struct list_head *q, struct brcmf_usbreq *req) 381 struct list_head *q, struct brcmf_usbreq *req,
382 int *counter)
439{ 383{
440 unsigned long flags; 384 unsigned long flags;
441 spin_lock_irqsave(&devinfo->qlock, flags); 385 spin_lock_irqsave(&devinfo->qlock, flags);
442 list_add_tail(&req->list, q); 386 list_add_tail(&req->list, q);
387 if (counter)
388 (*counter)++;
443 spin_unlock_irqrestore(&devinfo->qlock, flags); 389 spin_unlock_irqrestore(&devinfo->qlock, flags);
444} 390}
445 391
@@ -519,10 +465,16 @@ static void brcmf_usb_tx_complete(struct urb *urb)
519 else 465 else
520 devinfo->bus_pub.bus->dstats.tx_errors++; 466 devinfo->bus_pub.bus->dstats.tx_errors++;
521 467
468 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
469
522 brcmu_pkt_buf_free_skb(req->skb); 470 brcmu_pkt_buf_free_skb(req->skb);
523 req->skb = NULL; 471 req->skb = NULL;
524 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); 472 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
525 473 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
474 devinfo->tx_flowblock) {
475 brcmf_txflowblock(devinfo->dev, false);
476 devinfo->tx_flowblock = false;
477 }
526} 478}
527 479
528static void brcmf_usb_rx_complete(struct urb *urb) 480static void brcmf_usb_rx_complete(struct urb *urb)
@@ -541,7 +493,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
541 } else { 493 } else {
542 devinfo->bus_pub.bus->dstats.rx_errors++; 494 devinfo->bus_pub.bus->dstats.rx_errors++;
543 brcmu_pkt_buf_free_skb(skb); 495 brcmu_pkt_buf_free_skb(skb);
544 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 496 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
545 return; 497 return;
546 } 498 }
547 499
@@ -550,15 +502,13 @@ static void brcmf_usb_rx_complete(struct urb *urb)
550 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) { 502 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
551 brcmf_dbg(ERROR, "rx protocol error\n"); 503 brcmf_dbg(ERROR, "rx protocol error\n");
552 brcmu_pkt_buf_free_skb(skb); 504 brcmu_pkt_buf_free_skb(skb);
553 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
554 devinfo->bus_pub.bus->dstats.rx_errors++; 505 devinfo->bus_pub.bus->dstats.rx_errors++;
555 } else { 506 } else
556 brcmf_rx_packet(devinfo->dev, ifidx, skb); 507 brcmf_rx_packet(devinfo->dev, ifidx, skb);
557 brcmf_usb_rx_refill(devinfo, req); 508 brcmf_usb_rx_refill(devinfo, req);
558 }
559 } else { 509 } else {
560 brcmu_pkt_buf_free_skb(skb); 510 brcmu_pkt_buf_free_skb(skb);
561 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 511 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
562 } 512 }
563 return; 513 return;
564 514
@@ -575,7 +525,7 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
575 525
576 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu); 526 skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
577 if (!skb) { 527 if (!skb) {
578 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 528 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
579 return; 529 return;
580 } 530 }
581 req->skb = skb; 531 req->skb = skb;
@@ -584,14 +534,14 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
584 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete, 534 skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
585 req); 535 req);
586 req->devinfo = devinfo; 536 req->devinfo = devinfo;
587 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req); 537 brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
588 538
589 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 539 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
590 if (ret) { 540 if (ret) {
591 brcmf_usb_del_fromq(devinfo, req); 541 brcmf_usb_del_fromq(devinfo, req);
592 brcmu_pkt_buf_free_skb(req->skb); 542 brcmu_pkt_buf_free_skb(req->skb);
593 req->skb = NULL; 543 req->skb = NULL;
594 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); 544 brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
595 } 545 }
596 return; 546 return;
597} 547}
@@ -604,7 +554,7 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
604 brcmf_dbg(ERROR, "bus is not up\n"); 554 brcmf_dbg(ERROR, "bus is not up\n");
605 return; 555 return;
606 } 556 }
607 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL) 557 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
608 brcmf_usb_rx_refill(devinfo, req); 558 brcmf_usb_rx_refill(devinfo, req);
609} 559}
610 560
@@ -682,7 +632,8 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
682 return -EIO; 632 return -EIO;
683 } 633 }
684 634
685 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq); 635 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
636 &devinfo->tx_freecount);
686 if (!req) { 637 if (!req) {
687 brcmu_pkt_buf_free_skb(skb); 638 brcmu_pkt_buf_free_skb(skb);
688 brcmf_dbg(ERROR, "no req to send\n"); 639 brcmf_dbg(ERROR, "no req to send\n");
@@ -694,14 +645,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
694 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, 645 usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
695 skb->data, skb->len, brcmf_usb_tx_complete, req); 646 skb->data, skb->len, brcmf_usb_tx_complete, req);
696 req->urb->transfer_flags |= URB_ZERO_PACKET; 647 req->urb->transfer_flags |= URB_ZERO_PACKET;
697 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req); 648 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
698 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 649 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
699 if (ret) { 650 if (ret) {
700 brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n"); 651 brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
701 brcmf_usb_del_fromq(devinfo, req); 652 brcmf_usb_del_fromq(devinfo, req);
702 brcmu_pkt_buf_free_skb(req->skb); 653 brcmu_pkt_buf_free_skb(req->skb);
703 req->skb = NULL; 654 req->skb = NULL;
704 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); 655 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
656 &devinfo->tx_freecount);
657 } else {
658 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
659 !devinfo->tx_flowblock) {
660 brcmf_txflowblock(dev, true);
661 devinfo->tx_flowblock = true;
662 }
705 } 663 }
706 664
707 return ret; 665 return ret;
@@ -1112,10 +1070,14 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
1112static bool brcmf_usb_chip_support(int chipid, int chiprev) 1070static bool brcmf_usb_chip_support(int chipid, int chiprev)
1113{ 1071{
1114 switch(chipid) { 1072 switch(chipid) {
1073 case 43143:
1074 return true;
1115 case 43235: 1075 case 43235:
1116 case 43236: 1076 case 43236:
1117 case 43238: 1077 case 43238:
1118 return (chiprev == 3); 1078 return (chiprev == 3);
1079 case 43242:
1080 return true;
1119 default: 1081 default:
1120 break; 1082 break;
1121 } 1083 }
@@ -1154,17 +1116,10 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1154} 1116}
1155 1117
1156 1118
1157static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub) 1119static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
1158{ 1120{
1159 struct brcmf_usbdev_info *devinfo =
1160 (struct brcmf_usbdev_info *)bus_pub;
1161
1162 brcmf_dbg(TRACE, "devinfo %p\n", devinfo); 1121 brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
1163 1122
1164 /* store the image globally */
1165 g_image.data = devinfo->image;
1166 g_image.len = devinfo->image_len;
1167
1168 /* free the URBS */ 1123 /* free the URBS */
1169 brcmf_usb_free_q(&devinfo->rx_freeq, false); 1124 brcmf_usb_free_q(&devinfo->rx_freeq, false);
1170 brcmf_usb_free_q(&devinfo->tx_freeq, false); 1125 brcmf_usb_free_q(&devinfo->tx_freeq, false);
@@ -1175,7 +1130,6 @@ static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
1175 1130
1176 kfree(devinfo->tx_reqs); 1131 kfree(devinfo->tx_reqs);
1177 kfree(devinfo->rx_reqs); 1132 kfree(devinfo->rx_reqs);
1178 kfree(devinfo);
1179} 1133}
1180 1134
1181#define TRX_MAGIC 0x30524448 /* "HDR0" */ 1135#define TRX_MAGIC 0x30524448 /* "HDR0" */
@@ -1217,19 +1171,34 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1217{ 1171{
1218 s8 *fwname; 1172 s8 *fwname;
1219 const struct firmware *fw; 1173 const struct firmware *fw;
1174 struct brcmf_usb_image *fw_image;
1220 int err; 1175 int err;
1221 1176
1222 devinfo->image = g_image.data; 1177 switch (devinfo->bus_pub.devid) {
1223 devinfo->image_len = g_image.len; 1178 case 43143:
1224 1179 fwname = BRCMF_USB_43143_FW_NAME;
1225 /* 1180 break;
1226 * if we have an image we can leave here. 1181 case 43235:
1227 */ 1182 case 43236:
1228 if (devinfo->image) 1183 case 43238:
1229 return 0; 1184 fwname = BRCMF_USB_43236_FW_NAME;
1230 1185 break;
1231 fwname = BRCMF_USB_43236_FW_NAME; 1186 case 43242:
1187 fwname = BRCMF_USB_43242_FW_NAME;
1188 break;
1189 default:
1190 return -EINVAL;
1191 break;
1192 }
1232 1193
1194 list_for_each_entry(fw_image, &fw_image_list, list) {
1195 if (fw_image->fwname == fwname) {
1196 devinfo->image = fw_image->image;
1197 devinfo->image_len = fw_image->image_len;
1198 return 0;
1199 }
1200 }
1201 /* fw image not yet loaded. Load it now and add to list */
1233 err = request_firmware(&fw, fwname, devinfo->dev); 1202 err = request_firmware(&fw, fwname, devinfo->dev);
1234 if (!fw) { 1203 if (!fw) {
1235 brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname); 1204 brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
@@ -1240,27 +1209,32 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1240 return -EINVAL; 1209 return -EINVAL;
1241 } 1210 }
1242 1211
1243 devinfo->image = vmalloc(fw->size); /* plus nvram */ 1212 fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
1244 if (!devinfo->image) 1213 if (!fw_image)
1214 return -ENOMEM;
1215 INIT_LIST_HEAD(&fw_image->list);
1216 list_add_tail(&fw_image->list, &fw_image_list);
1217 fw_image->fwname = fwname;
1218 fw_image->image = vmalloc(fw->size);
1219 if (!fw_image->image)
1245 return -ENOMEM; 1220 return -ENOMEM;
1246 1221
1247 memcpy(devinfo->image, fw->data, fw->size); 1222 memcpy(fw_image->image, fw->data, fw->size);
1248 devinfo->image_len = fw->size; 1223 fw_image->image_len = fw->size;
1249 1224
1250 release_firmware(fw); 1225 release_firmware(fw);
1226
1227 devinfo->image = fw_image->image;
1228 devinfo->image_len = fw_image->image_len;
1229
1251 return 0; 1230 return 0;
1252} 1231}
1253 1232
1254 1233
1255static 1234static
1256struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev) 1235struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1236 int nrxq, int ntxq)
1257{ 1237{
1258 struct brcmf_usbdev_info *devinfo;
1259
1260 devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
1261 if (devinfo == NULL)
1262 return NULL;
1263
1264 devinfo->bus_pub.nrxq = nrxq; 1238 devinfo->bus_pub.nrxq = nrxq;
1265 devinfo->rx_low_watermark = nrxq / 2; 1239 devinfo->rx_low_watermark = nrxq / 2;
1266 devinfo->bus_pub.devinfo = devinfo; 1240 devinfo->bus_pub.devinfo = devinfo;
@@ -1269,18 +1243,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1269 /* flow control when too many tx urbs posted */ 1243 /* flow control when too many tx urbs posted */
1270 devinfo->tx_low_watermark = ntxq / 4; 1244 devinfo->tx_low_watermark = ntxq / 4;
1271 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3; 1245 devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
1272 devinfo->dev = dev;
1273 devinfo->usbdev = usbdev_probe_info.usb;
1274 devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
1275 devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
1276 devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
1277 devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
1278
1279 devinfo->interval = usbdev_probe_info.interval;
1280 devinfo->intr_size = usbdev_probe_info.intr_size;
1281
1282 memcpy(&devinfo->probe_info, &usbdev_probe_info,
1283 sizeof(struct brcmf_usb_probe_info));
1284 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE; 1246 devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
1285 1247
1286 /* Initialize other structure content */ 1248 /* Initialize other structure content */
@@ -1295,6 +1257,8 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1295 INIT_LIST_HEAD(&devinfo->tx_freeq); 1257 INIT_LIST_HEAD(&devinfo->tx_freeq);
1296 INIT_LIST_HEAD(&devinfo->tx_postq); 1258 INIT_LIST_HEAD(&devinfo->tx_postq);
1297 1259
1260 devinfo->tx_flowblock = false;
1261
1298 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq); 1262 devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
1299 if (!devinfo->rx_reqs) 1263 if (!devinfo->rx_reqs)
1300 goto error; 1264 goto error;
@@ -1302,6 +1266,7 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1302 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq); 1266 devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
1303 if (!devinfo->tx_reqs) 1267 if (!devinfo->tx_reqs)
1304 goto error; 1268 goto error;
1269 devinfo->tx_freecount = ntxq;
1305 1270
1306 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC); 1271 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
1307 if (!devinfo->intr_urb) { 1272 if (!devinfo->intr_urb) {
@@ -1313,8 +1278,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1313 brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n"); 1278 brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
1314 goto error; 1279 goto error;
1315 } 1280 }
1316 devinfo->rxctl_deferrespok = 0;
1317
1318 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); 1281 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
1319 if (!devinfo->bulk_urb) { 1282 if (!devinfo->bulk_urb) {
1320 brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n"); 1283 brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
@@ -1336,23 +1299,21 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
1336 1299
1337error: 1300error:
1338 brcmf_dbg(ERROR, "failed!\n"); 1301 brcmf_dbg(ERROR, "failed!\n");
1339 brcmf_usb_detach(&devinfo->bus_pub); 1302 brcmf_usb_detach(devinfo);
1340 return NULL; 1303 return NULL;
1341} 1304}
1342 1305
1343static int brcmf_usb_probe_cb(struct device *dev, const char *desc, 1306static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
1344 u32 bustype, u32 hdrlen) 1307 const char *desc, u32 bustype, u32 hdrlen)
1345{ 1308{
1346 struct brcmf_bus *bus = NULL; 1309 struct brcmf_bus *bus = NULL;
1347 struct brcmf_usbdev *bus_pub = NULL; 1310 struct brcmf_usbdev *bus_pub = NULL;
1348 int ret; 1311 int ret;
1312 struct device *dev = devinfo->dev;
1349 1313
1350 1314 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
1351 bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev); 1315 if (!bus_pub)
1352 if (!bus_pub) { 1316 return -ENODEV;
1353 ret = -ENODEV;
1354 goto fail;
1355 }
1356 1317
1357 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); 1318 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
1358 if (!bus) { 1319 if (!bus) {
@@ -1387,23 +1348,21 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
1387 return 0; 1348 return 0;
1388fail: 1349fail:
1389 /* Release resources in reverse order */ 1350 /* Release resources in reverse order */
1390 if (bus_pub)
1391 brcmf_usb_detach(bus_pub);
1392 kfree(bus); 1351 kfree(bus);
1352 brcmf_usb_detach(devinfo);
1393 return ret; 1353 return ret;
1394} 1354}
1395 1355
1396static void 1356static void
1397brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub) 1357brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
1398{ 1358{
1399 if (!bus_pub) 1359 if (!devinfo)
1400 return; 1360 return;
1401 brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub); 1361 brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
1402
1403 brcmf_detach(bus_pub->devinfo->dev);
1404 kfree(bus_pub->bus);
1405 brcmf_usb_detach(bus_pub);
1406 1362
1363 brcmf_detach(devinfo->dev);
1364 kfree(devinfo->bus_pub.bus);
1365 brcmf_usb_detach(devinfo);
1407} 1366}
1408 1367
1409static int 1368static int
@@ -1415,18 +1374,18 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1415 struct usb_device *usb = interface_to_usbdev(intf); 1374 struct usb_device *usb = interface_to_usbdev(intf);
1416 int num_of_eps; 1375 int num_of_eps;
1417 u8 endpoint_num; 1376 u8 endpoint_num;
1377 struct brcmf_usbdev_info *devinfo;
1418 1378
1419 brcmf_dbg(TRACE, "enter\n"); 1379 brcmf_dbg(TRACE, "enter\n");
1420 1380
1421 usbdev_probe_info.usb = usb; 1381 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
1422 usbdev_probe_info.intf = intf; 1382 if (devinfo == NULL)
1383 return -ENOMEM;
1423 1384
1424 if (id != NULL) { 1385 devinfo->usbdev = usb;
1425 usbdev_probe_info.vid = id->idVendor; 1386 devinfo->dev = &usb->dev;
1426 usbdev_probe_info.pid = id->idProduct;
1427 }
1428 1387
1429 usb_set_intfdata(intf, &usbdev_probe_info); 1388 usb_set_intfdata(intf, devinfo);
1430 1389
1431 /* Check that the device supports only one configuration */ 1390 /* Check that the device supports only one configuration */
1432 if (usb->descriptor.bNumConfigurations != 1) { 1391 if (usb->descriptor.bNumConfigurations != 1) {
@@ -1475,11 +1434,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1475 } 1434 }
1476 1435
1477 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 1436 endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1478 usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num); 1437 devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
1479 1438
1480 usbdev_probe_info.rx_pipe = 0; 1439 devinfo->rx_pipe = 0;
1481 usbdev_probe_info.rx_pipe2 = 0; 1440 devinfo->rx_pipe2 = 0;
1482 usbdev_probe_info.tx_pipe = 0; 1441 devinfo->tx_pipe = 0;
1483 num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1; 1442 num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
1484 1443
1485 /* Check data endpoints and get pipes */ 1444 /* Check data endpoints and get pipes */
@@ -1496,35 +1455,33 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1496 USB_ENDPOINT_NUMBER_MASK; 1455 USB_ENDPOINT_NUMBER_MASK;
1497 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 1456 if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1498 == USB_DIR_IN) { 1457 == USB_DIR_IN) {
1499 if (!usbdev_probe_info.rx_pipe) { 1458 if (!devinfo->rx_pipe) {
1500 usbdev_probe_info.rx_pipe = 1459 devinfo->rx_pipe =
1501 usb_rcvbulkpipe(usb, endpoint_num); 1460 usb_rcvbulkpipe(usb, endpoint_num);
1502 } else { 1461 } else {
1503 usbdev_probe_info.rx_pipe2 = 1462 devinfo->rx_pipe2 =
1504 usb_rcvbulkpipe(usb, endpoint_num); 1463 usb_rcvbulkpipe(usb, endpoint_num);
1505 } 1464 }
1506 } else { 1465 } else {
1507 usbdev_probe_info.tx_pipe = 1466 devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
1508 usb_sndbulkpipe(usb, endpoint_num);
1509 } 1467 }
1510 } 1468 }
1511 1469
1512 /* Allocate interrupt URB and data buffer */ 1470 /* Allocate interrupt URB and data buffer */
1513 /* RNDIS says 8-byte intr, our old drivers used 4-byte */ 1471 /* RNDIS says 8-byte intr, our old drivers used 4-byte */
1514 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16)) 1472 if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
1515 usbdev_probe_info.intr_size = 8; 1473 devinfo->intr_size = 8;
1516 else 1474 else
1517 usbdev_probe_info.intr_size = 4; 1475 devinfo->intr_size = 4;
1518 1476
1519 usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval; 1477 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
1520 1478
1521 usbdev_probe_info.device_speed = usb->speed;
1522 if (usb->speed == USB_SPEED_HIGH) 1479 if (usb->speed == USB_SPEED_HIGH)
1523 brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n"); 1480 brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
1524 else 1481 else
1525 brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n"); 1482 brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
1526 1483
1527 ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0); 1484 ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
1528 if (ret) 1485 if (ret)
1529 goto fail; 1486 goto fail;
1530 1487
@@ -1533,6 +1490,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1533 1490
1534fail: 1491fail:
1535 brcmf_dbg(ERROR, "failed with errno %d\n", ret); 1492 brcmf_dbg(ERROR, "failed with errno %d\n", ret);
1493 kfree(devinfo);
1536 usb_set_intfdata(intf, NULL); 1494 usb_set_intfdata(intf, NULL);
1537 return ret; 1495 return ret;
1538 1496
@@ -1541,11 +1499,12 @@ fail:
1541static void 1499static void
1542brcmf_usb_disconnect(struct usb_interface *intf) 1500brcmf_usb_disconnect(struct usb_interface *intf)
1543{ 1501{
1544 struct usb_device *usb = interface_to_usbdev(intf); 1502 struct brcmf_usbdev_info *devinfo;
1545 1503
1546 brcmf_dbg(TRACE, "enter\n"); 1504 brcmf_dbg(TRACE, "enter\n");
1547 brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev)); 1505 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
1548 usb_set_intfdata(intf, NULL); 1506 brcmf_usb_disconnect_cb(devinfo);
1507 kfree(devinfo);
1549} 1508}
1550 1509
1551/* 1510/*
@@ -1577,17 +1536,23 @@ static int brcmf_usb_resume(struct usb_interface *intf)
1577} 1536}
1578 1537
1579#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c 1538#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
1539#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
1580#define BRCMF_USB_DEVICE_ID_43236 0xbd17 1540#define BRCMF_USB_DEVICE_ID_43236 0xbd17
1541#define BRCMF_USB_DEVICE_ID_43242 0xbd1f
1581#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc 1542#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
1582 1543
1583static struct usb_device_id brcmf_usb_devid_table[] = { 1544static struct usb_device_id brcmf_usb_devid_table[] = {
1545 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
1584 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) }, 1546 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
1547 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
1585 /* special entry for device with firmware loaded and running */ 1548 /* special entry for device with firmware loaded and running */
1586 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) }, 1549 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
1587 { } 1550 { }
1588}; 1551};
1589MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); 1552MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
1553MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1590MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1554MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
1555MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
1591 1556
1592/* TODO: suspend and resume entries */ 1557/* TODO: suspend and resume entries */
1593static struct usb_driver brcmf_usbdrvr = { 1558static struct usb_driver brcmf_usbdrvr = {
@@ -1601,15 +1566,25 @@ static struct usb_driver brcmf_usbdrvr = {
1601 .disable_hub_initiated_lpm = 1, 1566 .disable_hub_initiated_lpm = 1,
1602}; 1567};
1603 1568
1569static void brcmf_release_fw(struct list_head *q)
1570{
1571 struct brcmf_usb_image *fw_image, *next;
1572
1573 list_for_each_entry_safe(fw_image, next, q, list) {
1574 vfree(fw_image->image);
1575 list_del_init(&fw_image->list);
1576 }
1577}
1578
1579
1604void brcmf_usb_exit(void) 1580void brcmf_usb_exit(void)
1605{ 1581{
1606 usb_deregister(&brcmf_usbdrvr); 1582 usb_deregister(&brcmf_usbdrvr);
1607 vfree(g_image.data); 1583 brcmf_release_fw(&fw_image_list);
1608 g_image.data = NULL;
1609 g_image.len = 0;
1610} 1584}
1611 1585
1612void brcmf_usb_init(void) 1586void brcmf_usb_init(void)
1613{ 1587{
1588 INIT_LIST_HEAD(&fw_image_list);
1614 usb_register(&brcmf_usbdrvr); 1589 usb_register(&brcmf_usbdrvr);
1615} 1590}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 50b5553b6964..c1abaa6db59e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -28,6 +28,7 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <net/cfg80211.h> 30#include <net/cfg80211.h>
31#include <net/netlink.h>
31 32
32#include <brcmu_utils.h> 33#include <brcmu_utils.h>
33#include <defs.h> 34#include <defs.h>
@@ -35,6 +36,58 @@
35#include "dhd.h" 36#include "dhd.h"
36#include "wl_cfg80211.h" 37#include "wl_cfg80211.h"
37 38
39#define BRCMF_SCAN_IE_LEN_MAX 2048
40#define BRCMF_PNO_VERSION 2
41#define BRCMF_PNO_TIME 30
42#define BRCMF_PNO_REPEAT 4
43#define BRCMF_PNO_FREQ_EXPO_MAX 3
44#define BRCMF_PNO_MAX_PFN_COUNT 16
45#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
46#define BRCMF_PNO_HIDDEN_BIT 2
47#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
48#define BRCMF_PNO_SCAN_COMPLETE 1
49#define BRCMF_PNO_SCAN_INCOMPLETE 0
50
51#define TLV_LEN_OFF 1 /* length offset */
52#define TLV_HDR_LEN 2 /* header length */
53#define TLV_BODY_OFF 2 /* body offset */
54#define TLV_OUI_LEN 3 /* oui id length */
55#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
56#define WPA_OUI_TYPE 1
57#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
58#define WME_OUI_TYPE 2
59
60#define VS_IE_FIXED_HDR_LEN 6
61#define WPA_IE_VERSION_LEN 2
62#define WPA_IE_MIN_OUI_LEN 4
63#define WPA_IE_SUITE_COUNT_LEN 2
64
65#define WPA_CIPHER_NONE 0 /* None */
66#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */
67#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */
68#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */
69#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
70
71#define RSN_AKM_NONE 0 /* None (IBSS) */
72#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
73#define RSN_AKM_PSK 2 /* Pre-shared Key */
74#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
75#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
76
77#define VNDR_IE_CMD_LEN 4 /* length of the set command
78 * string :"add", "del" (+ NUL)
79 */
80#define VNDR_IE_COUNT_OFFSET 4
81#define VNDR_IE_PKTFLAG_OFFSET 8
82#define VNDR_IE_VSIE_OFFSET 12
83#define VNDR_IE_HDR_SIZE 12
84#define VNDR_IE_BEACON_FLAG 0x1
85#define VNDR_IE_PRBRSP_FLAG 0x2
86#define MAX_VNDR_IE_NUMBER 5
87
88#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
89#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
90
38#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ 91#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
39 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) 92 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
40 93
@@ -42,33 +95,12 @@ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
42 95
43static u32 brcmf_dbg_level = WL_DBG_ERR; 96static u32 brcmf_dbg_level = WL_DBG_ERR;
44 97
45static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
46{
47 dev->driver_data = data;
48}
49
50static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
51{
52 void *data = NULL;
53
54 if (dev)
55 data = dev->driver_data;
56 return data;
57}
58
59static
60struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
61{
62 struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
63 return ci->cfg_priv;
64}
65
66static bool check_sys_up(struct wiphy *wiphy) 98static bool check_sys_up(struct wiphy *wiphy)
67{ 99{
68 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 100 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
69 if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { 101 if (!test_bit(WL_STATUS_READY, &cfg->status)) {
70 WL_INFO("device is not ready : status (%d)\n", 102 WL_INFO("device is not ready : status (%d)\n",
71 (int)cfg_priv->status); 103 (int)cfg->status);
72 return false; 104 return false;
73 } 105 }
74 return true; 106 return true;
@@ -256,6 +288,25 @@ struct brcmf_tlv {
256 u8 data[1]; 288 u8 data[1];
257}; 289};
258 290
291/* Vendor specific ie. id = 221, oui and type defines exact ie */
292struct brcmf_vs_tlv {
293 u8 id;
294 u8 len;
295 u8 oui[3];
296 u8 oui_type;
297};
298
299struct parsed_vndr_ie_info {
300 u8 *ie_ptr;
301 u32 ie_len; /* total length including id & length field */
302 struct brcmf_vs_tlv vndrie;
303};
304
305struct parsed_vndr_ies {
306 u32 count;
307 struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
308};
309
259/* Quarter dBm units to mW 310/* Quarter dBm units to mW
260 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 311 * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
261 * Table is offset so the last entry is largest mW value that fits in 312 * Table is offset so the last entry is largest mW value that fits in
@@ -353,6 +404,44 @@ brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
353 return err; 404 return err;
354} 405}
355 406
407static s32
408brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
409 void *param, s32 paramlen,
410 void *buf, s32 buflen, s32 bssidx)
411{
412 s32 err = -ENOMEM;
413 u32 len;
414
415 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
416 buf, buflen, bssidx);
417 BUG_ON(!len);
418 if (len > 0)
419 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
420 if (err)
421 WL_ERR("error (%d)\n", err);
422
423 return err;
424}
425
426static s32
427brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
428 void *param, s32 paramlen,
429 void *buf, s32 buflen, s32 bssidx)
430{
431 s32 err = -ENOMEM;
432 u32 len;
433
434 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
435 buf, buflen, bssidx);
436 BUG_ON(!len);
437 if (len > 0)
438 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
439 if (err)
440 WL_ERR("error (%d)\n", err);
441
442 return err;
443}
444
356static void convert_key_from_CPU(struct brcmf_wsec_key *key, 445static void convert_key_from_CPU(struct brcmf_wsec_key *key,
357 struct brcmf_wsec_key_le *key_le) 446 struct brcmf_wsec_key_le *key_le)
358{ 447{
@@ -367,16 +456,22 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
367 memcpy(key_le->ea, key->ea, sizeof(key->ea)); 456 memcpy(key_le->ea, key->ea, sizeof(key->ea));
368} 457}
369 458
370static int send_key_to_dongle(struct net_device *ndev, 459static int
371 struct brcmf_wsec_key *key) 460send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
461 struct net_device *ndev, struct brcmf_wsec_key *key)
372{ 462{
373 int err; 463 int err;
374 struct brcmf_wsec_key_le key_le; 464 struct brcmf_wsec_key_le key_le;
375 465
376 convert_key_from_CPU(key, &key_le); 466 convert_key_from_CPU(key, &key_le);
377 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); 467
468 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
469 sizeof(key_le),
470 cfg->extra_buf,
471 WL_EXTRA_BUF_MAX, bssidx);
472
378 if (err) 473 if (err)
379 WL_ERR("WLC_SET_KEY error (%d)\n", err); 474 WL_ERR("wsec_key error (%d)\n", err);
380 return err; 475 return err;
381} 476}
382 477
@@ -385,14 +480,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
385 enum nl80211_iftype type, u32 *flags, 480 enum nl80211_iftype type, u32 *flags,
386 struct vif_params *params) 481 struct vif_params *params)
387{ 482{
388 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 483 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
389 struct wireless_dev *wdev;
390 s32 infra = 0; 484 s32 infra = 0;
485 s32 ap = 0;
391 s32 err = 0; 486 s32 err = 0;
392 487
393 WL_TRACE("Enter\n"); 488 WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
394 if (!check_sys_up(wiphy))
395 return -EIO;
396 489
397 switch (type) { 490 switch (type) {
398 case NL80211_IFTYPE_MONITOR: 491 case NL80211_IFTYPE_MONITOR:
@@ -401,29 +494,44 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
401 type); 494 type);
402 return -EOPNOTSUPP; 495 return -EOPNOTSUPP;
403 case NL80211_IFTYPE_ADHOC: 496 case NL80211_IFTYPE_ADHOC:
404 cfg_priv->conf->mode = WL_MODE_IBSS; 497 cfg->conf->mode = WL_MODE_IBSS;
405 infra = 0; 498 infra = 0;
406 break; 499 break;
407 case NL80211_IFTYPE_STATION: 500 case NL80211_IFTYPE_STATION:
408 cfg_priv->conf->mode = WL_MODE_BSS; 501 cfg->conf->mode = WL_MODE_BSS;
409 infra = 1; 502 infra = 1;
410 break; 503 break;
504 case NL80211_IFTYPE_AP:
505 cfg->conf->mode = WL_MODE_AP;
506 ap = 1;
507 break;
411 default: 508 default:
412 err = -EINVAL; 509 err = -EINVAL;
413 goto done; 510 goto done;
414 } 511 }
415 512
416 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); 513 if (ap) {
417 if (err) { 514 set_bit(WL_STATUS_AP_CREATING, &cfg->status);
418 WL_ERR("WLC_SET_INFRA error (%d)\n", err); 515 if (!cfg->ap_info)
419 err = -EAGAIN; 516 cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
517 GFP_KERNEL);
518 if (!cfg->ap_info) {
519 err = -ENOMEM;
520 goto done;
521 }
522 WL_INFO("IF Type = AP\n");
420 } else { 523 } else {
421 wdev = ndev->ieee80211_ptr; 524 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
422 wdev->iftype = type; 525 if (err) {
526 WL_ERR("WLC_SET_INFRA error (%d)\n", err);
527 err = -EAGAIN;
528 goto done;
529 }
530 WL_INFO("IF Type = %s\n",
531 (cfg->conf->mode == WL_MODE_IBSS) ?
532 "Adhoc" : "Infra");
423 } 533 }
424 534 ndev->ieee80211_ptr->iftype = type;
425 WL_INFO("IF Type = %s\n",
426 (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
427 535
428done: 536done:
429 WL_TRACE("Exit\n"); 537 WL_TRACE("Exit\n");
@@ -474,12 +582,55 @@ brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
474 return err; 582 return err;
475} 583}
476 584
585static s32
586brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
587 s32 bssidx)
588{
589 s8 buf[BRCMF_DCMD_SMLEN];
590 __le32 val_le;
591
592 val_le = cpu_to_le32(val);
593
594 return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
595 sizeof(val_le), buf, sizeof(buf),
596 bssidx);
597}
598
599static s32
600brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
601 s32 bssidx)
602{
603 s8 buf[BRCMF_DCMD_SMLEN];
604 s32 err;
605 __le32 val_le;
606
607 memset(buf, 0, sizeof(buf));
608 err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
609 sizeof(buf), bssidx);
610 if (err == 0) {
611 memcpy(&val_le, buf, sizeof(val_le));
612 *val = le32_to_cpu(val_le);
613 }
614 return err;
615}
616
617
618/*
619 * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
620 * should return the ndev matching bssidx.
621 */
622static s32
623brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
624{
625 return 0;
626}
627
477static void brcmf_set_mpc(struct net_device *ndev, int mpc) 628static void brcmf_set_mpc(struct net_device *ndev, int mpc)
478{ 629{
479 s32 err = 0; 630 s32 err = 0;
480 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 631 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
481 632
482 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { 633 if (test_bit(WL_STATUS_READY, &cfg->status)) {
483 err = brcmf_dev_intvar_set(ndev, "mpc", mpc); 634 err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
484 if (err) { 635 if (err) {
485 WL_ERR("fail to set mpc\n"); 636 WL_ERR("fail to set mpc\n");
@@ -489,8 +640,8 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
489 } 640 }
490} 641}
491 642
492static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, 643static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
493 struct brcmf_ssid *ssid) 644 struct brcmf_ssid *ssid)
494{ 645{
495 memcpy(params_le->bssid, ether_bcast, ETH_ALEN); 646 memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
496 params_le->bss_type = DOT11_BSSTYPE_ANY; 647 params_le->bss_type = DOT11_BSSTYPE_ANY;
@@ -546,7 +697,7 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
546 return -ENOMEM; 697 return -ENOMEM;
547 BUG_ON(params_size >= BRCMF_DCMD_SMLEN); 698 BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
548 699
549 wl_iscan_prep(&params->params_le, ssid); 700 brcmf_iscan_prep(&params->params_le, ssid);
550 701
551 params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); 702 params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
552 params->action = cpu_to_le16(action); 703 params->action = cpu_to_le16(action);
@@ -565,10 +716,10 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
565 return err; 716 return err;
566} 717}
567 718
568static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv) 719static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
569{ 720{
570 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 721 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
571 struct net_device *ndev = cfg_to_ndev(cfg_priv); 722 struct net_device *ndev = cfg_to_ndev(cfg);
572 struct brcmf_ssid ssid; 723 struct brcmf_ssid ssid;
573 __le32 passive_scan; 724 __le32 passive_scan;
574 s32 err = 0; 725 s32 err = 0;
@@ -578,19 +729,19 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
578 729
579 iscan->state = WL_ISCAN_STATE_SCANING; 730 iscan->state = WL_ISCAN_STATE_SCANING;
580 731
581 passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); 732 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
582 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN, 733 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
583 &passive_scan, sizeof(passive_scan)); 734 &passive_scan, sizeof(passive_scan));
584 if (err) { 735 if (err) {
585 WL_ERR("error (%d)\n", err); 736 WL_ERR("error (%d)\n", err);
586 return err; 737 return err;
587 } 738 }
588 brcmf_set_mpc(ndev, 0); 739 brcmf_set_mpc(ndev, 0);
589 cfg_priv->iscan_kickstart = true; 740 cfg->iscan_kickstart = true;
590 err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START); 741 err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
591 if (err) { 742 if (err) {
592 brcmf_set_mpc(ndev, 1); 743 brcmf_set_mpc(ndev, 1);
593 cfg_priv->iscan_kickstart = false; 744 cfg->iscan_kickstart = false;
594 return err; 745 return err;
595 } 746 }
596 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); 747 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -599,31 +750,31 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
599} 750}
600 751
601static s32 752static s32
602__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, 753brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
603 struct cfg80211_scan_request *request, 754 struct cfg80211_scan_request *request,
604 struct cfg80211_ssid *this_ssid) 755 struct cfg80211_ssid *this_ssid)
605{ 756{
606 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 757 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
607 struct cfg80211_ssid *ssids; 758 struct cfg80211_ssid *ssids;
608 struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; 759 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
609 __le32 passive_scan; 760 __le32 passive_scan;
610 bool iscan_req; 761 bool iscan_req;
611 bool spec_scan; 762 bool spec_scan;
612 s32 err = 0; 763 s32 err = 0;
613 u32 SSID_len; 764 u32 SSID_len;
614 765
615 if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 766 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
616 WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); 767 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
617 return -EAGAIN; 768 return -EAGAIN;
618 } 769 }
619 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { 770 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
620 WL_ERR("Scanning being aborted : status (%lu)\n", 771 WL_ERR("Scanning being aborted : status (%lu)\n",
621 cfg_priv->status); 772 cfg->status);
622 return -EAGAIN; 773 return -EAGAIN;
623 } 774 }
624 if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { 775 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
625 WL_ERR("Connecting : status (%lu)\n", 776 WL_ERR("Connecting : status (%lu)\n",
626 cfg_priv->status); 777 cfg->status);
627 return -EAGAIN; 778 return -EAGAIN;
628 } 779 }
629 780
@@ -632,7 +783,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
632 if (request) { 783 if (request) {
633 /* scan bss */ 784 /* scan bss */
634 ssids = request->ssids; 785 ssids = request->ssids;
635 if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len)) 786 if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
636 iscan_req = true; 787 iscan_req = true;
637 } else { 788 } else {
638 /* scan in ibss */ 789 /* scan in ibss */
@@ -640,10 +791,10 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
640 ssids = this_ssid; 791 ssids = this_ssid;
641 } 792 }
642 793
643 cfg_priv->scan_request = request; 794 cfg->scan_request = request;
644 set_bit(WL_STATUS_SCANNING, &cfg_priv->status); 795 set_bit(WL_STATUS_SCANNING, &cfg->status);
645 if (iscan_req) { 796 if (iscan_req) {
646 err = brcmf_do_iscan(cfg_priv); 797 err = brcmf_do_iscan(cfg);
647 if (!err) 798 if (!err)
648 return err; 799 return err;
649 else 800 else
@@ -662,7 +813,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
662 WL_SCAN("Broadcast scan\n"); 813 WL_SCAN("Broadcast scan\n");
663 } 814 }
664 815
665 passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); 816 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
666 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, 817 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
667 &passive_scan, sizeof(passive_scan)); 818 &passive_scan, sizeof(passive_scan));
668 if (err) { 819 if (err) {
@@ -687,8 +838,346 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
687 return 0; 838 return 0;
688 839
689scan_out: 840scan_out:
690 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); 841 clear_bit(WL_STATUS_SCANNING, &cfg->status);
691 cfg_priv->scan_request = NULL; 842 cfg->scan_request = NULL;
843 return err;
844}
845
846static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
847 struct cfg80211_scan_request *request)
848{
849 u32 n_ssids;
850 u32 n_channels;
851 s32 i;
852 s32 offset;
853 u16 chanspec;
854 u16 channel;
855 struct ieee80211_channel *req_channel;
856 char *ptr;
857 struct brcmf_ssid_le ssid_le;
858
859 memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
860 params_le->bss_type = DOT11_BSSTYPE_ANY;
861 params_le->scan_type = 0;
862 params_le->channel_num = 0;
863 params_le->nprobes = cpu_to_le32(-1);
864 params_le->active_time = cpu_to_le32(-1);
865 params_le->passive_time = cpu_to_le32(-1);
866 params_le->home_time = cpu_to_le32(-1);
867 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
868
869 /* if request is null exit so it will be all channel broadcast scan */
870 if (!request)
871 return;
872
873 n_ssids = request->n_ssids;
874 n_channels = request->n_channels;
875 /* Copy channel array if applicable */
876 WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
877 if (n_channels > 0) {
878 for (i = 0; i < n_channels; i++) {
879 chanspec = 0;
880 req_channel = request->channels[i];
881 channel = ieee80211_frequency_to_channel(
882 req_channel->center_freq);
883 if (req_channel->band == IEEE80211_BAND_2GHZ)
884 chanspec |= WL_CHANSPEC_BAND_2G;
885 else
886 chanspec |= WL_CHANSPEC_BAND_5G;
887
888 if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
889 chanspec |= WL_CHANSPEC_BW_20;
890 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
891 } else {
892 chanspec |= WL_CHANSPEC_BW_40;
893 if (req_channel->flags &
894 IEEE80211_CHAN_NO_HT40PLUS)
895 chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
896 else
897 chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
898 }
899
900 chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
901 WL_SCAN("Chan : %d, Channel spec: %x\n",
902 channel, chanspec);
903 params_le->channel_list[i] = cpu_to_le16(chanspec);
904 }
905 } else {
906 WL_SCAN("Scanning all channels\n");
907 }
908 /* Copy ssid array if applicable */
909 WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
910 if (n_ssids > 0) {
911 offset = offsetof(struct brcmf_scan_params_le, channel_list) +
912 n_channels * sizeof(u16);
913 offset = roundup(offset, sizeof(u32));
914 ptr = (char *)params_le + offset;
915 for (i = 0; i < n_ssids; i++) {
916 memset(&ssid_le, 0, sizeof(ssid_le));
917 ssid_le.SSID_len =
918 cpu_to_le32(request->ssids[i].ssid_len);
919 memcpy(ssid_le.SSID, request->ssids[i].ssid,
920 request->ssids[i].ssid_len);
921 if (!ssid_le.SSID_len)
922 WL_SCAN("%d: Broadcast scan\n", i);
923 else
924 WL_SCAN("%d: scan for %s size =%d\n", i,
925 ssid_le.SSID, ssid_le.SSID_len);
926 memcpy(ptr, &ssid_le, sizeof(ssid_le));
927 ptr += sizeof(ssid_le);
928 }
929 } else {
930 WL_SCAN("Broadcast scan %p\n", request->ssids);
931 if ((request->ssids) && request->ssids->ssid_len) {
932 WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
933 request->ssids->ssid_len);
934 params_le->ssid_le.SSID_len =
935 cpu_to_le32(request->ssids->ssid_len);
936 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
937 request->ssids->ssid_len);
938 }
939 }
940 /* Adding mask to channel numbers */
941 params_le->channel_num =
942 cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
943 (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
944}
945
946static s32
947brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
948 struct net_device *ndev,
949 bool aborted, bool fw_abort)
950{
951 struct brcmf_scan_params_le params_le;
952 struct cfg80211_scan_request *scan_request;
953 s32 err = 0;
954
955 WL_SCAN("Enter\n");
956
957 /* clear scan request, because the FW abort can cause a second call */
958 /* to this functon and might cause a double cfg80211_scan_done */
959 scan_request = cfg->scan_request;
960 cfg->scan_request = NULL;
961
962 if (timer_pending(&cfg->escan_timeout))
963 del_timer_sync(&cfg->escan_timeout);
964
965 if (fw_abort) {
966 /* Do a scan abort to stop the driver's scan engine */
967 WL_SCAN("ABORT scan in firmware\n");
968 memset(&params_le, 0, sizeof(params_le));
969 memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
970 params_le.bss_type = DOT11_BSSTYPE_ANY;
971 params_le.scan_type = 0;
972 params_le.channel_num = cpu_to_le32(1);
973 params_le.nprobes = cpu_to_le32(1);
974 params_le.active_time = cpu_to_le32(-1);
975 params_le.passive_time = cpu_to_le32(-1);
976 params_le.home_time = cpu_to_le32(-1);
977 /* Scan is aborted by setting channel_list[0] to -1 */
978 params_le.channel_list[0] = cpu_to_le16(-1);
979 /* E-Scan (or anyother type) can be aborted by SCAN */
980 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
981 sizeof(params_le));
982 if (err)
983 WL_ERR("Scan abort failed\n");
984 }
985 /*
986 * e-scan can be initiated by scheduled scan
987 * which takes precedence.
988 */
989 if (cfg->sched_escan) {
990 WL_SCAN("scheduled scan completed\n");
991 cfg->sched_escan = false;
992 if (!aborted)
993 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
994 brcmf_set_mpc(ndev, 1);
995 } else if (scan_request) {
996 WL_SCAN("ESCAN Completed scan: %s\n",
997 aborted ? "Aborted" : "Done");
998 cfg80211_scan_done(scan_request, aborted);
999 brcmf_set_mpc(ndev, 1);
1000 }
1001 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
1002 WL_ERR("Scan complete while device not scanning\n");
1003 return -EPERM;
1004 }
1005
1006 return err;
1007}
1008
1009static s32
1010brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
1011 struct cfg80211_scan_request *request, u16 action)
1012{
1013 s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
1014 offsetof(struct brcmf_escan_params_le, params_le);
1015 struct brcmf_escan_params_le *params;
1016 s32 err = 0;
1017
1018 WL_SCAN("E-SCAN START\n");
1019
1020 if (request != NULL) {
1021 /* Allocate space for populating ssids in struct */
1022 params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
1023
1024 /* Allocate space for populating ssids in struct */
1025 params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
1026 }
1027
1028 params = kzalloc(params_size, GFP_KERNEL);
1029 if (!params) {
1030 err = -ENOMEM;
1031 goto exit;
1032 }
1033 BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
1034 brcmf_escan_prep(&params->params_le, request);
1035 params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
1036 params->action = cpu_to_le16(action);
1037 params->sync_id = cpu_to_le16(0x1234);
1038
1039 err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
1040 cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
1041 if (err) {
1042 if (err == -EBUSY)
1043 WL_INFO("system busy : escan canceled\n");
1044 else
1045 WL_ERR("error (%d)\n", err);
1046 }
1047
1048 kfree(params);
1049exit:
1050 return err;
1051}
1052
1053static s32
1054brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
1055 struct net_device *ndev, struct cfg80211_scan_request *request)
1056{
1057 s32 err;
1058 __le32 passive_scan;
1059 struct brcmf_scan_results *results;
1060
1061 WL_SCAN("Enter\n");
1062 cfg->escan_info.ndev = ndev;
1063 cfg->escan_info.wiphy = wiphy;
1064 cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
1065 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
1066 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
1067 &passive_scan, sizeof(passive_scan));
1068 if (err) {
1069 WL_ERR("error (%d)\n", err);
1070 return err;
1071 }
1072 brcmf_set_mpc(ndev, 0);
1073 results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
1074 results->version = 0;
1075 results->count = 0;
1076 results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
1077
1078 err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
1079 if (err)
1080 brcmf_set_mpc(ndev, 1);
1081 return err;
1082}
1083
1084static s32
1085brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1086 struct cfg80211_scan_request *request,
1087 struct cfg80211_ssid *this_ssid)
1088{
1089 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1090 struct cfg80211_ssid *ssids;
1091 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
1092 __le32 passive_scan;
1093 bool escan_req;
1094 bool spec_scan;
1095 s32 err;
1096 u32 SSID_len;
1097
1098 WL_SCAN("START ESCAN\n");
1099
1100 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
1101 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
1102 return -EAGAIN;
1103 }
1104 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
1105 WL_ERR("Scanning being aborted : status (%lu)\n",
1106 cfg->status);
1107 return -EAGAIN;
1108 }
1109 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
1110 WL_ERR("Connecting : status (%lu)\n",
1111 cfg->status);
1112 return -EAGAIN;
1113 }
1114
1115 /* Arm scan timeout timer */
1116 mod_timer(&cfg->escan_timeout, jiffies +
1117 WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
1118
1119 escan_req = false;
1120 if (request) {
1121 /* scan bss */
1122 ssids = request->ssids;
1123 escan_req = true;
1124 } else {
1125 /* scan in ibss */
1126 /* we don't do escan in ibss */
1127 ssids = this_ssid;
1128 }
1129
1130 cfg->scan_request = request;
1131 set_bit(WL_STATUS_SCANNING, &cfg->status);
1132 if (escan_req) {
1133 err = brcmf_do_escan(cfg, wiphy, ndev, request);
1134 if (!err)
1135 return err;
1136 else
1137 goto scan_out;
1138 } else {
1139 WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
1140 ssids->ssid, ssids->ssid_len);
1141 memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
1142 SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
1143 sr->ssid_le.SSID_len = cpu_to_le32(0);
1144 spec_scan = false;
1145 if (SSID_len) {
1146 memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
1147 sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
1148 spec_scan = true;
1149 } else
1150 WL_SCAN("Broadcast scan\n");
1151
1152 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
1153 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
1154 &passive_scan, sizeof(passive_scan));
1155 if (err) {
1156 WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
1157 goto scan_out;
1158 }
1159 brcmf_set_mpc(ndev, 0);
1160 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
1161 sizeof(sr->ssid_le));
1162 if (err) {
1163 if (err == -EBUSY)
1164 WL_INFO("BUSY: scan for \"%s\" canceled\n",
1165 sr->ssid_le.SSID);
1166 else
1167 WL_ERR("WLC_SCAN error (%d)\n", err);
1168
1169 brcmf_set_mpc(ndev, 1);
1170 goto scan_out;
1171 }
1172 }
1173
1174 return 0;
1175
1176scan_out:
1177 clear_bit(WL_STATUS_SCANNING, &cfg->status);
1178 if (timer_pending(&cfg->escan_timeout))
1179 del_timer_sync(&cfg->escan_timeout);
1180 cfg->scan_request = NULL;
692 return err; 1181 return err;
693} 1182}
694 1183
@@ -697,6 +1186,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
697 struct cfg80211_scan_request *request) 1186 struct cfg80211_scan_request *request)
698{ 1187{
699 struct net_device *ndev = request->wdev->netdev; 1188 struct net_device *ndev = request->wdev->netdev;
1189 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
700 s32 err = 0; 1190 s32 err = 0;
701 1191
702 WL_TRACE("Enter\n"); 1192 WL_TRACE("Enter\n");
@@ -704,7 +1194,11 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
704 if (!check_sys_up(wiphy)) 1194 if (!check_sys_up(wiphy))
705 return -EIO; 1195 return -EIO;
706 1196
707 err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); 1197 if (cfg->iscan_on)
1198 err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
1199 else if (cfg->escan_on)
1200 err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
1201
708 if (err) 1202 if (err)
709 WL_ERR("scan error (%d)\n", err); 1203 WL_ERR("scan error (%d)\n", err);
710 1204
@@ -749,8 +1243,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
749 1243
750static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) 1244static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
751{ 1245{
752 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1246 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
753 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1247 struct net_device *ndev = cfg_to_ndev(cfg);
754 s32 err = 0; 1248 s32 err = 0;
755 1249
756 WL_TRACE("Enter\n"); 1250 WL_TRACE("Enter\n");
@@ -758,30 +1252,30 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
758 return -EIO; 1252 return -EIO;
759 1253
760 if (changed & WIPHY_PARAM_RTS_THRESHOLD && 1254 if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
761 (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) { 1255 (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
762 cfg_priv->conf->rts_threshold = wiphy->rts_threshold; 1256 cfg->conf->rts_threshold = wiphy->rts_threshold;
763 err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold); 1257 err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
764 if (!err) 1258 if (!err)
765 goto done; 1259 goto done;
766 } 1260 }
767 if (changed & WIPHY_PARAM_FRAG_THRESHOLD && 1261 if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
768 (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) { 1262 (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
769 cfg_priv->conf->frag_threshold = wiphy->frag_threshold; 1263 cfg->conf->frag_threshold = wiphy->frag_threshold;
770 err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold); 1264 err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
771 if (!err) 1265 if (!err)
772 goto done; 1266 goto done;
773 } 1267 }
774 if (changed & WIPHY_PARAM_RETRY_LONG 1268 if (changed & WIPHY_PARAM_RETRY_LONG
775 && (cfg_priv->conf->retry_long != wiphy->retry_long)) { 1269 && (cfg->conf->retry_long != wiphy->retry_long)) {
776 cfg_priv->conf->retry_long = wiphy->retry_long; 1270 cfg->conf->retry_long = wiphy->retry_long;
777 err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true); 1271 err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
778 if (!err) 1272 if (!err)
779 goto done; 1273 goto done;
780 } 1274 }
781 if (changed & WIPHY_PARAM_RETRY_SHORT 1275 if (changed & WIPHY_PARAM_RETRY_SHORT
782 && (cfg_priv->conf->retry_short != wiphy->retry_short)) { 1276 && (cfg->conf->retry_short != wiphy->retry_short)) {
783 cfg_priv->conf->retry_short = wiphy->retry_short; 1277 cfg->conf->retry_short = wiphy->retry_short;
784 err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false); 1278 err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
785 if (!err) 1279 if (!err)
786 goto done; 1280 goto done;
787 } 1281 }
@@ -791,61 +1285,6 @@ done:
791 return err; 1285 return err;
792} 1286}
793 1287
794static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
795{
796 switch (item) {
797 case WL_PROF_SEC:
798 return &cfg_priv->profile->sec;
799 case WL_PROF_BSSID:
800 return &cfg_priv->profile->bssid;
801 case WL_PROF_SSID:
802 return &cfg_priv->profile->ssid;
803 }
804 WL_ERR("invalid item (%d)\n", item);
805 return NULL;
806}
807
808static s32
809brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
810 const struct brcmf_event_msg *e, void *data, s32 item)
811{
812 s32 err = 0;
813 struct brcmf_ssid *ssid;
814
815 switch (item) {
816 case WL_PROF_SSID:
817 ssid = (struct brcmf_ssid *) data;
818 memset(cfg_priv->profile->ssid.SSID, 0,
819 sizeof(cfg_priv->profile->ssid.SSID));
820 memcpy(cfg_priv->profile->ssid.SSID,
821 ssid->SSID, ssid->SSID_len);
822 cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
823 break;
824 case WL_PROF_BSSID:
825 if (data)
826 memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
827 else
828 memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
829 break;
830 case WL_PROF_SEC:
831 memcpy(&cfg_priv->profile->sec, data,
832 sizeof(cfg_priv->profile->sec));
833 break;
834 case WL_PROF_BEACONINT:
835 cfg_priv->profile->beacon_interval = *(u16 *)data;
836 break;
837 case WL_PROF_DTIMPERIOD:
838 cfg_priv->profile->dtim_period = *(u8 *)data;
839 break;
840 default:
841 WL_ERR("unsupported item (%d)\n", item);
842 err = -EOPNOTSUPP;
843 break;
844 }
845
846 return err;
847}
848
849static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof) 1288static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
850{ 1289{
851 memset(prof, 0, sizeof(*prof)); 1290 memset(prof, 0, sizeof(*prof));
@@ -878,20 +1317,20 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
878 } 1317 }
879} 1318}
880 1319
881static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv) 1320static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
882{ 1321{
883 struct net_device *ndev = NULL; 1322 struct net_device *ndev = NULL;
884 s32 err = 0; 1323 s32 err = 0;
885 1324
886 WL_TRACE("Enter\n"); 1325 WL_TRACE("Enter\n");
887 1326
888 if (cfg_priv->link_up) { 1327 if (cfg->link_up) {
889 ndev = cfg_to_ndev(cfg_priv); 1328 ndev = cfg_to_ndev(cfg);
890 WL_INFO("Call WLC_DISASSOC to stop excess roaming\n "); 1329 WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
891 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0); 1330 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
892 if (err) 1331 if (err)
893 WL_ERR("WLC_DISASSOC failed (%d)\n", err); 1332 WL_ERR("WLC_DISASSOC failed (%d)\n", err);
894 cfg_priv->link_up = false; 1333 cfg->link_up = false;
895 } 1334 }
896 WL_TRACE("Exit\n"); 1335 WL_TRACE("Exit\n");
897} 1336}
@@ -900,13 +1339,13 @@ static s32
900brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, 1339brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
901 struct cfg80211_ibss_params *params) 1340 struct cfg80211_ibss_params *params)
902{ 1341{
903 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1342 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1343 struct brcmf_cfg80211_profile *profile = cfg->profile;
904 struct brcmf_join_params join_params; 1344 struct brcmf_join_params join_params;
905 size_t join_params_size = 0; 1345 size_t join_params_size = 0;
906 s32 err = 0; 1346 s32 err = 0;
907 s32 wsec = 0; 1347 s32 wsec = 0;
908 s32 bcnprd; 1348 s32 bcnprd;
909 struct brcmf_ssid ssid;
910 1349
911 WL_TRACE("Enter\n"); 1350 WL_TRACE("Enter\n");
912 if (!check_sys_up(wiphy)) 1351 if (!check_sys_up(wiphy))
@@ -919,7 +1358,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
919 return -EOPNOTSUPP; 1358 return -EOPNOTSUPP;
920 } 1359 }
921 1360
922 set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1361 set_bit(WL_STATUS_CONNECTING, &cfg->status);
923 1362
924 if (params->bssid) 1363 if (params->bssid)
925 WL_CONN("BSSID: %pM\n", params->bssid); 1364 WL_CONN("BSSID: %pM\n", params->bssid);
@@ -982,40 +1421,38 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
982 memset(&join_params, 0, sizeof(struct brcmf_join_params)); 1421 memset(&join_params, 0, sizeof(struct brcmf_join_params));
983 1422
984 /* SSID */ 1423 /* SSID */
985 ssid.SSID_len = min_t(u32, params->ssid_len, 32); 1424 profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
986 memcpy(ssid.SSID, params->ssid, ssid.SSID_len); 1425 memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
987 memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len); 1426 memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
988 join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); 1427 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
989 join_params_size = sizeof(join_params.ssid_le); 1428 join_params_size = sizeof(join_params.ssid_le);
990 brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
991 1429
992 /* BSSID */ 1430 /* BSSID */
993 if (params->bssid) { 1431 if (params->bssid) {
994 memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); 1432 memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
995 join_params_size = sizeof(join_params.ssid_le) + 1433 join_params_size = sizeof(join_params.ssid_le) +
996 BRCMF_ASSOC_PARAMS_FIXED_SIZE; 1434 BRCMF_ASSOC_PARAMS_FIXED_SIZE;
1435 memcpy(profile->bssid, params->bssid, ETH_ALEN);
997 } else { 1436 } else {
998 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1437 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
1438 memset(profile->bssid, 0, ETH_ALEN);
999 } 1439 }
1000 1440
1001 brcmf_update_prof(cfg_priv, NULL,
1002 &join_params.params_le.bssid, WL_PROF_BSSID);
1003
1004 /* Channel */ 1441 /* Channel */
1005 if (params->channel) { 1442 if (params->channel) {
1006 u32 target_channel; 1443 u32 target_channel;
1007 1444
1008 cfg_priv->channel = 1445 cfg->channel =
1009 ieee80211_frequency_to_channel( 1446 ieee80211_frequency_to_channel(
1010 params->channel->center_freq); 1447 params->channel->center_freq);
1011 if (params->channel_fixed) { 1448 if (params->channel_fixed) {
1012 /* adding chanspec */ 1449 /* adding chanspec */
1013 brcmf_ch_to_chanspec(cfg_priv->channel, 1450 brcmf_ch_to_chanspec(cfg->channel,
1014 &join_params, &join_params_size); 1451 &join_params, &join_params_size);
1015 } 1452 }
1016 1453
1017 /* set channel for starter */ 1454 /* set channel for starter */
1018 target_channel = cfg_priv->channel; 1455 target_channel = cfg->channel;
1019 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL, 1456 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
1020 &target_channel); 1457 &target_channel);
1021 if (err) { 1458 if (err) {
@@ -1023,9 +1460,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1023 goto done; 1460 goto done;
1024 } 1461 }
1025 } else 1462 } else
1026 cfg_priv->channel = 0; 1463 cfg->channel = 0;
1027 1464
1028 cfg_priv->ibss_starter = false; 1465 cfg->ibss_starter = false;
1029 1466
1030 1467
1031 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1468 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
@@ -1037,7 +1474,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1037 1474
1038done: 1475done:
1039 if (err) 1476 if (err)
1040 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1477 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
1041 WL_TRACE("Exit\n"); 1478 WL_TRACE("Exit\n");
1042 return err; 1479 return err;
1043} 1480}
@@ -1045,14 +1482,14 @@ done:
1045static s32 1482static s32
1046brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) 1483brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1047{ 1484{
1048 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1485 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1049 s32 err = 0; 1486 s32 err = 0;
1050 1487
1051 WL_TRACE("Enter\n"); 1488 WL_TRACE("Enter\n");
1052 if (!check_sys_up(wiphy)) 1489 if (!check_sys_up(wiphy))
1053 return -EIO; 1490 return -EIO;
1054 1491
1055 brcmf_link_down(cfg_priv); 1492 brcmf_link_down(cfg);
1056 1493
1057 WL_TRACE("Exit\n"); 1494 WL_TRACE("Exit\n");
1058 1495
@@ -1062,7 +1499,8 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1062static s32 brcmf_set_wpa_version(struct net_device *ndev, 1499static s32 brcmf_set_wpa_version(struct net_device *ndev,
1063 struct cfg80211_connect_params *sme) 1500 struct cfg80211_connect_params *sme)
1064{ 1501{
1065 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1502 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1503 struct brcmf_cfg80211_profile *profile = cfg->profile;
1066 struct brcmf_cfg80211_security *sec; 1504 struct brcmf_cfg80211_security *sec;
1067 s32 val = 0; 1505 s32 val = 0;
1068 s32 err = 0; 1506 s32 err = 0;
@@ -1079,7 +1517,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1079 WL_ERR("set wpa_auth failed (%d)\n", err); 1517 WL_ERR("set wpa_auth failed (%d)\n", err);
1080 return err; 1518 return err;
1081 } 1519 }
1082 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1520 sec = &profile->sec;
1083 sec->wpa_versions = sme->crypto.wpa_versions; 1521 sec->wpa_versions = sme->crypto.wpa_versions;
1084 return err; 1522 return err;
1085} 1523}
@@ -1087,7 +1525,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1087static s32 brcmf_set_auth_type(struct net_device *ndev, 1525static s32 brcmf_set_auth_type(struct net_device *ndev,
1088 struct cfg80211_connect_params *sme) 1526 struct cfg80211_connect_params *sme)
1089{ 1527{
1090 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1528 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1529 struct brcmf_cfg80211_profile *profile = cfg->profile;
1091 struct brcmf_cfg80211_security *sec; 1530 struct brcmf_cfg80211_security *sec;
1092 s32 val = 0; 1531 s32 val = 0;
1093 s32 err = 0; 1532 s32 err = 0;
@@ -1118,7 +1557,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
1118 WL_ERR("set auth failed (%d)\n", err); 1557 WL_ERR("set auth failed (%d)\n", err);
1119 return err; 1558 return err;
1120 } 1559 }
1121 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1560 sec = &profile->sec;
1122 sec->auth_type = sme->auth_type; 1561 sec->auth_type = sme->auth_type;
1123 return err; 1562 return err;
1124} 1563}
@@ -1127,7 +1566,8 @@ static s32
1127brcmf_set_set_cipher(struct net_device *ndev, 1566brcmf_set_set_cipher(struct net_device *ndev,
1128 struct cfg80211_connect_params *sme) 1567 struct cfg80211_connect_params *sme)
1129{ 1568{
1130 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1569 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1570 struct brcmf_cfg80211_profile *profile = cfg->profile;
1131 struct brcmf_cfg80211_security *sec; 1571 struct brcmf_cfg80211_security *sec;
1132 s32 pval = 0; 1572 s32 pval = 0;
1133 s32 gval = 0; 1573 s32 gval = 0;
@@ -1183,7 +1623,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
1183 return err; 1623 return err;
1184 } 1624 }
1185 1625
1186 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1626 sec = &profile->sec;
1187 sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; 1627 sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
1188 sec->cipher_group = sme->crypto.cipher_group; 1628 sec->cipher_group = sme->crypto.cipher_group;
1189 1629
@@ -1193,7 +1633,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
1193static s32 1633static s32
1194brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) 1634brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1195{ 1635{
1196 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1636 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1637 struct brcmf_cfg80211_profile *profile = cfg->profile;
1197 struct brcmf_cfg80211_security *sec; 1638 struct brcmf_cfg80211_security *sec;
1198 s32 val = 0; 1639 s32 val = 0;
1199 s32 err = 0; 1640 s32 err = 0;
@@ -1239,74 +1680,76 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1239 return err; 1680 return err;
1240 } 1681 }
1241 } 1682 }
1242 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1683 sec = &profile->sec;
1243 sec->wpa_auth = sme->crypto.akm_suites[0]; 1684 sec->wpa_auth = sme->crypto.akm_suites[0];
1244 1685
1245 return err; 1686 return err;
1246} 1687}
1247 1688
1248static s32 1689static s32
1249brcmf_set_wep_sharedkey(struct net_device *ndev, 1690brcmf_set_sharedkey(struct net_device *ndev,
1250 struct cfg80211_connect_params *sme) 1691 struct cfg80211_connect_params *sme)
1251{ 1692{
1252 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 1693 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1694 struct brcmf_cfg80211_profile *profile = cfg->profile;
1253 struct brcmf_cfg80211_security *sec; 1695 struct brcmf_cfg80211_security *sec;
1254 struct brcmf_wsec_key key; 1696 struct brcmf_wsec_key key;
1255 s32 val; 1697 s32 val;
1256 s32 err = 0; 1698 s32 err = 0;
1699 s32 bssidx;
1257 1700
1258 WL_CONN("key len (%d)\n", sme->key_len); 1701 WL_CONN("key len (%d)\n", sme->key_len);
1259 1702
1260 if (sme->key_len == 0) 1703 if (sme->key_len == 0)
1261 return 0; 1704 return 0;
1262 1705
1263 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 1706 sec = &profile->sec;
1264 WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n", 1707 WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
1265 sec->wpa_versions, sec->cipher_pairwise); 1708 sec->wpa_versions, sec->cipher_pairwise);
1266 1709
1267 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) 1710 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
1268 return 0; 1711 return 0;
1269 1712
1270 if (sec->cipher_pairwise & 1713 if (!(sec->cipher_pairwise &
1271 (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) { 1714 (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
1272 memset(&key, 0, sizeof(key)); 1715 return 0;
1273 key.len = (u32) sme->key_len;
1274 key.index = (u32) sme->key_idx;
1275 if (key.len > sizeof(key.data)) {
1276 WL_ERR("Too long key length (%u)\n", key.len);
1277 return -EINVAL;
1278 }
1279 memcpy(key.data, sme->key, key.len);
1280 key.flags = BRCMF_PRIMARY_KEY;
1281 switch (sec->cipher_pairwise) {
1282 case WLAN_CIPHER_SUITE_WEP40:
1283 key.algo = CRYPTO_ALGO_WEP1;
1284 break;
1285 case WLAN_CIPHER_SUITE_WEP104:
1286 key.algo = CRYPTO_ALGO_WEP128;
1287 break;
1288 default:
1289 WL_ERR("Invalid algorithm (%d)\n",
1290 sme->crypto.ciphers_pairwise[0]);
1291 return -EINVAL;
1292 }
1293 /* Set the new key/index */
1294 WL_CONN("key length (%d) key index (%d) algo (%d)\n",
1295 key.len, key.index, key.algo);
1296 WL_CONN("key \"%s\"\n", key.data);
1297 err = send_key_to_dongle(ndev, &key);
1298 if (err)
1299 return err;
1300 1716
1301 if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) { 1717 memset(&key, 0, sizeof(key));
1302 WL_CONN("set auth_type to shared key\n"); 1718 key.len = (u32) sme->key_len;
1303 val = 1; /* shared key */ 1719 key.index = (u32) sme->key_idx;
1304 err = brcmf_dev_intvar_set(ndev, "auth", val); 1720 if (key.len > sizeof(key.data)) {
1305 if (err) { 1721 WL_ERR("Too long key length (%u)\n", key.len);
1306 WL_ERR("set auth failed (%d)\n", err); 1722 return -EINVAL;
1307 return err; 1723 }
1308 } 1724 memcpy(key.data, sme->key, key.len);
1309 } 1725 key.flags = BRCMF_PRIMARY_KEY;
1726 switch (sec->cipher_pairwise) {
1727 case WLAN_CIPHER_SUITE_WEP40:
1728 key.algo = CRYPTO_ALGO_WEP1;
1729 break;
1730 case WLAN_CIPHER_SUITE_WEP104:
1731 key.algo = CRYPTO_ALGO_WEP128;
1732 break;
1733 default:
1734 WL_ERR("Invalid algorithm (%d)\n",
1735 sme->crypto.ciphers_pairwise[0]);
1736 return -EINVAL;
1737 }
1738 /* Set the new key/index */
1739 WL_CONN("key length (%d) key index (%d) algo (%d)\n",
1740 key.len, key.index, key.algo);
1741 WL_CONN("key \"%s\"\n", key.data);
1742 bssidx = brcmf_find_bssidx(cfg, ndev);
1743 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1744 if (err)
1745 return err;
1746
1747 if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
1748 WL_CONN("set auth_type to shared key\n");
1749 val = WL_AUTH_SHARED_KEY; /* shared key */
1750 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
1751 if (err)
1752 WL_ERR("set auth failed (%d)\n", err);
1310 } 1753 }
1311 return err; 1754 return err;
1312} 1755}
@@ -1315,7 +1758,8 @@ static s32
1315brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, 1758brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1316 struct cfg80211_connect_params *sme) 1759 struct cfg80211_connect_params *sme)
1317{ 1760{
1318 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1761 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1762 struct brcmf_cfg80211_profile *profile = cfg->profile;
1319 struct ieee80211_channel *chan = sme->channel; 1763 struct ieee80211_channel *chan = sme->channel;
1320 struct brcmf_join_params join_params; 1764 struct brcmf_join_params join_params;
1321 size_t join_params_size; 1765 size_t join_params_size;
@@ -1332,15 +1776,15 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1332 return -EOPNOTSUPP; 1776 return -EOPNOTSUPP;
1333 } 1777 }
1334 1778
1335 set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1779 set_bit(WL_STATUS_CONNECTING, &cfg->status);
1336 1780
1337 if (chan) { 1781 if (chan) {
1338 cfg_priv->channel = 1782 cfg->channel =
1339 ieee80211_frequency_to_channel(chan->center_freq); 1783 ieee80211_frequency_to_channel(chan->center_freq);
1340 WL_CONN("channel (%d), center_req (%d)\n", 1784 WL_CONN("channel (%d), center_req (%d)\n",
1341 cfg_priv->channel, chan->center_freq); 1785 cfg->channel, chan->center_freq);
1342 } else 1786 } else
1343 cfg_priv->channel = 0; 1787 cfg->channel = 0;
1344 1788
1345 WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); 1789 WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
1346 1790
@@ -1368,20 +1812,20 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1368 goto done; 1812 goto done;
1369 } 1813 }
1370 1814
1371 err = brcmf_set_wep_sharedkey(ndev, sme); 1815 err = brcmf_set_sharedkey(ndev, sme);
1372 if (err) { 1816 if (err) {
1373 WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err); 1817 WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
1374 goto done; 1818 goto done;
1375 } 1819 }
1376 1820
1377 memset(&join_params, 0, sizeof(join_params)); 1821 memset(&join_params, 0, sizeof(join_params));
1378 join_params_size = sizeof(join_params.ssid_le); 1822 join_params_size = sizeof(join_params.ssid_le);
1379 1823
1380 ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len); 1824 profile->ssid.SSID_len = min_t(u32,
1381 memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len); 1825 sizeof(ssid.SSID), (u32)sme->ssid_len);
1382 memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len); 1826 memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
1383 join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); 1827 memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
1384 brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); 1828 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
1385 1829
1386 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1830 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
1387 1831
@@ -1389,7 +1833,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1389 WL_CONN("ssid \"%s\", len (%d)\n", 1833 WL_CONN("ssid \"%s\", len (%d)\n",
1390 ssid.SSID, ssid.SSID_len); 1834 ssid.SSID, ssid.SSID_len);
1391 1835
1392 brcmf_ch_to_chanspec(cfg_priv->channel, 1836 brcmf_ch_to_chanspec(cfg->channel,
1393 &join_params, &join_params_size); 1837 &join_params, &join_params_size);
1394 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1838 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
1395 &join_params, join_params_size); 1839 &join_params, join_params_size);
@@ -1398,7 +1842,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1398 1842
1399done: 1843done:
1400 if (err) 1844 if (err)
1401 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 1845 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
1402 WL_TRACE("Exit\n"); 1846 WL_TRACE("Exit\n");
1403 return err; 1847 return err;
1404} 1848}
@@ -1407,7 +1851,8 @@ static s32
1407brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, 1851brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1408 u16 reason_code) 1852 u16 reason_code)
1409{ 1853{
1410 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1854 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1855 struct brcmf_cfg80211_profile *profile = cfg->profile;
1411 struct brcmf_scb_val_le scbval; 1856 struct brcmf_scb_val_le scbval;
1412 s32 err = 0; 1857 s32 err = 0;
1413 1858
@@ -1415,16 +1860,16 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1415 if (!check_sys_up(wiphy)) 1860 if (!check_sys_up(wiphy))
1416 return -EIO; 1861 return -EIO;
1417 1862
1418 clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 1863 clear_bit(WL_STATUS_CONNECTED, &cfg->status);
1419 1864
1420 memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN); 1865 memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
1421 scbval.val = cpu_to_le32(reason_code); 1866 scbval.val = cpu_to_le32(reason_code);
1422 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval, 1867 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
1423 sizeof(struct brcmf_scb_val_le)); 1868 sizeof(struct brcmf_scb_val_le));
1424 if (err) 1869 if (err)
1425 WL_ERR("error (%d)\n", err); 1870 WL_ERR("error (%d)\n", err);
1426 1871
1427 cfg_priv->link_up = false; 1872 cfg->link_up = false;
1428 1873
1429 WL_TRACE("Exit\n"); 1874 WL_TRACE("Exit\n");
1430 return err; 1875 return err;
@@ -1435,8 +1880,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1435 enum nl80211_tx_power_setting type, s32 mbm) 1880 enum nl80211_tx_power_setting type, s32 mbm)
1436{ 1881{
1437 1882
1438 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1883 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1439 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1884 struct net_device *ndev = cfg_to_ndev(cfg);
1440 u16 txpwrmw; 1885 u16 txpwrmw;
1441 s32 err = 0; 1886 s32 err = 0;
1442 s32 disable = 0; 1887 s32 disable = 0;
@@ -1472,7 +1917,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1472 (s32) (brcmf_mw_to_qdbm(txpwrmw))); 1917 (s32) (brcmf_mw_to_qdbm(txpwrmw)));
1473 if (err) 1918 if (err)
1474 WL_ERR("qtxpower error (%d)\n", err); 1919 WL_ERR("qtxpower error (%d)\n", err);
1475 cfg_priv->conf->tx_power = dbm; 1920 cfg->conf->tx_power = dbm;
1476 1921
1477done: 1922done:
1478 WL_TRACE("Exit\n"); 1923 WL_TRACE("Exit\n");
@@ -1481,8 +1926,8 @@ done:
1481 1926
1482static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) 1927static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
1483{ 1928{
1484 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1929 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1485 struct net_device *ndev = cfg_to_ndev(cfg_priv); 1930 struct net_device *ndev = cfg_to_ndev(cfg);
1486 s32 txpwrdbm; 1931 s32 txpwrdbm;
1487 u8 result; 1932 u8 result;
1488 s32 err = 0; 1933 s32 err = 0;
@@ -1509,16 +1954,19 @@ static s32
1509brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, 1954brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
1510 u8 key_idx, bool unicast, bool multicast) 1955 u8 key_idx, bool unicast, bool multicast)
1511{ 1956{
1957 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1512 u32 index; 1958 u32 index;
1513 u32 wsec; 1959 u32 wsec;
1514 s32 err = 0; 1960 s32 err = 0;
1961 s32 bssidx;
1515 1962
1516 WL_TRACE("Enter\n"); 1963 WL_TRACE("Enter\n");
1517 WL_CONN("key index (%d)\n", key_idx); 1964 WL_CONN("key index (%d)\n", key_idx);
1518 if (!check_sys_up(wiphy)) 1965 if (!check_sys_up(wiphy))
1519 return -EIO; 1966 return -EIO;
1520 1967
1521 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); 1968 bssidx = brcmf_find_bssidx(cfg, ndev);
1969 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1522 if (err) { 1970 if (err) {
1523 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 1971 WL_ERR("WLC_GET_WSEC error (%d)\n", err);
1524 goto done; 1972 goto done;
@@ -1541,9 +1989,11 @@ static s32
1541brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, 1989brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1542 u8 key_idx, const u8 *mac_addr, struct key_params *params) 1990 u8 key_idx, const u8 *mac_addr, struct key_params *params)
1543{ 1991{
1992 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1544 struct brcmf_wsec_key key; 1993 struct brcmf_wsec_key key;
1545 struct brcmf_wsec_key_le key_le; 1994 struct brcmf_wsec_key_le key_le;
1546 s32 err = 0; 1995 s32 err = 0;
1996 s32 bssidx;
1547 1997
1548 memset(&key, 0, sizeof(key)); 1998 memset(&key, 0, sizeof(key));
1549 key.index = (u32) key_idx; 1999 key.index = (u32) key_idx;
@@ -1552,12 +2002,13 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1552 if (!is_multicast_ether_addr(mac_addr)) 2002 if (!is_multicast_ether_addr(mac_addr))
1553 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); 2003 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
1554 key.len = (u32) params->key_len; 2004 key.len = (u32) params->key_len;
2005 bssidx = brcmf_find_bssidx(cfg, ndev);
1555 /* check for key index change */ 2006 /* check for key index change */
1556 if (key.len == 0) { 2007 if (key.len == 0) {
1557 /* key delete */ 2008 /* key delete */
1558 err = send_key_to_dongle(ndev, &key); 2009 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1559 if (err) 2010 if (err)
1560 return err; 2011 WL_ERR("key delete error (%d)\n", err);
1561 } else { 2012 } else {
1562 if (key.len > sizeof(key.data)) { 2013 if (key.len > sizeof(key.data)) {
1563 WL_ERR("Invalid key length (%d)\n", key.len); 2014 WL_ERR("Invalid key length (%d)\n", key.len);
@@ -1613,12 +2064,12 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1613 convert_key_from_CPU(&key, &key_le); 2064 convert_key_from_CPU(&key, &key_le);
1614 2065
1615 brcmf_netdev_wait_pend8021x(ndev); 2066 brcmf_netdev_wait_pend8021x(ndev);
1616 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, 2067 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
1617 sizeof(key_le)); 2068 sizeof(key_le),
1618 if (err) { 2069 cfg->extra_buf,
1619 WL_ERR("WLC_SET_KEY error (%d)\n", err); 2070 WL_EXTRA_BUF_MAX, bssidx);
1620 return err; 2071 if (err)
1621 } 2072 WL_ERR("wsec_key error (%d)\n", err);
1622 } 2073 }
1623 return err; 2074 return err;
1624} 2075}
@@ -1628,11 +2079,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1628 u8 key_idx, bool pairwise, const u8 *mac_addr, 2079 u8 key_idx, bool pairwise, const u8 *mac_addr,
1629 struct key_params *params) 2080 struct key_params *params)
1630{ 2081{
2082 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1631 struct brcmf_wsec_key key; 2083 struct brcmf_wsec_key key;
1632 s32 val; 2084 s32 val;
1633 s32 wsec; 2085 s32 wsec;
1634 s32 err = 0; 2086 s32 err = 0;
1635 u8 keybuf[8]; 2087 u8 keybuf[8];
2088 s32 bssidx;
1636 2089
1637 WL_TRACE("Enter\n"); 2090 WL_TRACE("Enter\n");
1638 WL_CONN("key index (%d)\n", key_idx); 2091 WL_CONN("key index (%d)\n", key_idx);
@@ -1659,25 +2112,33 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1659 switch (params->cipher) { 2112 switch (params->cipher) {
1660 case WLAN_CIPHER_SUITE_WEP40: 2113 case WLAN_CIPHER_SUITE_WEP40:
1661 key.algo = CRYPTO_ALGO_WEP1; 2114 key.algo = CRYPTO_ALGO_WEP1;
2115 val = WEP_ENABLED;
1662 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 2116 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
1663 break; 2117 break;
1664 case WLAN_CIPHER_SUITE_WEP104: 2118 case WLAN_CIPHER_SUITE_WEP104:
1665 key.algo = CRYPTO_ALGO_WEP128; 2119 key.algo = CRYPTO_ALGO_WEP128;
2120 val = WEP_ENABLED;
1666 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); 2121 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
1667 break; 2122 break;
1668 case WLAN_CIPHER_SUITE_TKIP: 2123 case WLAN_CIPHER_SUITE_TKIP:
1669 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2124 if (cfg->conf->mode != WL_MODE_AP) {
1670 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2125 WL_CONN("Swapping key\n");
1671 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 2126 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2127 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
2128 memcpy(&key.data[16], keybuf, sizeof(keybuf));
2129 }
1672 key.algo = CRYPTO_ALGO_TKIP; 2130 key.algo = CRYPTO_ALGO_TKIP;
2131 val = TKIP_ENABLED;
1673 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); 2132 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
1674 break; 2133 break;
1675 case WLAN_CIPHER_SUITE_AES_CMAC: 2134 case WLAN_CIPHER_SUITE_AES_CMAC:
1676 key.algo = CRYPTO_ALGO_AES_CCM; 2135 key.algo = CRYPTO_ALGO_AES_CCM;
2136 val = AES_ENABLED;
1677 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); 2137 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
1678 break; 2138 break;
1679 case WLAN_CIPHER_SUITE_CCMP: 2139 case WLAN_CIPHER_SUITE_CCMP:
1680 key.algo = CRYPTO_ALGO_AES_CCM; 2140 key.algo = CRYPTO_ALGO_AES_CCM;
2141 val = AES_ENABLED;
1681 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); 2142 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
1682 break; 2143 break;
1683 default: 2144 default:
@@ -1686,28 +2147,23 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
1686 goto done; 2147 goto done;
1687 } 2148 }
1688 2149
1689 err = send_key_to_dongle(ndev, &key); /* Set the new key/index */ 2150 bssidx = brcmf_find_bssidx(cfg, ndev);
2151 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1690 if (err) 2152 if (err)
1691 goto done; 2153 goto done;
1692 2154
1693 val = WEP_ENABLED; 2155 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1694 err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
1695 if (err) { 2156 if (err) {
1696 WL_ERR("get wsec error (%d)\n", err); 2157 WL_ERR("get wsec error (%d)\n", err);
1697 goto done; 2158 goto done;
1698 } 2159 }
1699 wsec &= ~(WEP_ENABLED);
1700 wsec |= val; 2160 wsec |= val;
1701 err = brcmf_dev_intvar_set(ndev, "wsec", wsec); 2161 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
1702 if (err) { 2162 if (err) {
1703 WL_ERR("set wsec error (%d)\n", err); 2163 WL_ERR("set wsec error (%d)\n", err);
1704 goto done; 2164 goto done;
1705 } 2165 }
1706 2166
1707 val = 1; /* assume shared key. otherwise 0 */
1708 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
1709 if (err)
1710 WL_ERR("WLC_SET_AUTH error (%d)\n", err);
1711done: 2167done:
1712 WL_TRACE("Exit\n"); 2168 WL_TRACE("Exit\n");
1713 return err; 2169 return err;
@@ -1717,10 +2173,10 @@ static s32
1717brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 2173brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1718 u8 key_idx, bool pairwise, const u8 *mac_addr) 2174 u8 key_idx, bool pairwise, const u8 *mac_addr)
1719{ 2175{
2176 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1720 struct brcmf_wsec_key key; 2177 struct brcmf_wsec_key key;
1721 s32 err = 0; 2178 s32 err = 0;
1722 s32 val; 2179 s32 bssidx;
1723 s32 wsec;
1724 2180
1725 WL_TRACE("Enter\n"); 2181 WL_TRACE("Enter\n");
1726 if (!check_sys_up(wiphy)) 2182 if (!check_sys_up(wiphy))
@@ -1735,7 +2191,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1735 WL_CONN("key index (%d)\n", key_idx); 2191 WL_CONN("key index (%d)\n", key_idx);
1736 2192
1737 /* Set the new key/index */ 2193 /* Set the new key/index */
1738 err = send_key_to_dongle(ndev, &key); 2194 bssidx = brcmf_find_bssidx(cfg, ndev);
2195 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1739 if (err) { 2196 if (err) {
1740 if (err == -EINVAL) { 2197 if (err == -EINVAL) {
1741 if (key.index >= DOT11_MAX_DEFAULT_KEYS) 2198 if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1744,35 +2201,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
1744 } 2201 }
1745 /* Ignore this error, may happen during DISASSOC */ 2202 /* Ignore this error, may happen during DISASSOC */
1746 err = -EAGAIN; 2203 err = -EAGAIN;
1747 goto done;
1748 } 2204 }
1749 2205
1750 val = 0;
1751 err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
1752 if (err) {
1753 WL_ERR("get wsec error (%d)\n", err);
1754 /* Ignore this error, may happen during DISASSOC */
1755 err = -EAGAIN;
1756 goto done;
1757 }
1758 wsec &= ~(WEP_ENABLED);
1759 wsec |= val;
1760 err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
1761 if (err) {
1762 WL_ERR("set wsec error (%d)\n", err);
1763 /* Ignore this error, may happen during DISASSOC */
1764 err = -EAGAIN;
1765 goto done;
1766 }
1767
1768 val = 0; /* assume open key. otherwise 1 */
1769 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
1770 if (err) {
1771 WL_ERR("WLC_SET_AUTH error (%d)\n", err);
1772 /* Ignore this error, may happen during DISASSOC */
1773 err = -EAGAIN;
1774 }
1775done:
1776 WL_TRACE("Exit\n"); 2206 WL_TRACE("Exit\n");
1777 return err; 2207 return err;
1778} 2208}
@@ -1783,10 +2213,12 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
1783 void (*callback) (void *cookie, struct key_params * params)) 2213 void (*callback) (void *cookie, struct key_params * params))
1784{ 2214{
1785 struct key_params params; 2215 struct key_params params;
1786 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2216 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2217 struct brcmf_cfg80211_profile *profile = cfg->profile;
1787 struct brcmf_cfg80211_security *sec; 2218 struct brcmf_cfg80211_security *sec;
1788 s32 wsec; 2219 s32 wsec;
1789 s32 err = 0; 2220 s32 err = 0;
2221 s32 bssidx;
1790 2222
1791 WL_TRACE("Enter\n"); 2223 WL_TRACE("Enter\n");
1792 WL_CONN("key index (%d)\n", key_idx); 2224 WL_CONN("key index (%d)\n", key_idx);
@@ -1795,16 +2227,17 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
1795 2227
1796 memset(&params, 0, sizeof(params)); 2228 memset(&params, 0, sizeof(params));
1797 2229
1798 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); 2230 bssidx = brcmf_find_bssidx(cfg, ndev);
2231 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1799 if (err) { 2232 if (err) {
1800 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 2233 WL_ERR("WLC_GET_WSEC error (%d)\n", err);
1801 /* Ignore this error, may happen during DISASSOC */ 2234 /* Ignore this error, may happen during DISASSOC */
1802 err = -EAGAIN; 2235 err = -EAGAIN;
1803 goto done; 2236 goto done;
1804 } 2237 }
1805 switch (wsec) { 2238 switch (wsec & ~SES_OW_ENABLED) {
1806 case WEP_ENABLED: 2239 case WEP_ENABLED:
1807 sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); 2240 sec = &profile->sec;
1808 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { 2241 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
1809 params.cipher = WLAN_CIPHER_SUITE_WEP40; 2242 params.cipher = WLAN_CIPHER_SUITE_WEP40;
1810 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 2243 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1844,53 +2277,73 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
1844 2277
1845static s32 2278static s32
1846brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 2279brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
1847 u8 *mac, struct station_info *sinfo) 2280 u8 *mac, struct station_info *sinfo)
1848{ 2281{
1849 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2282 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2283 struct brcmf_cfg80211_profile *profile = cfg->profile;
1850 struct brcmf_scb_val_le scb_val; 2284 struct brcmf_scb_val_le scb_val;
1851 int rssi; 2285 int rssi;
1852 s32 rate; 2286 s32 rate;
1853 s32 err = 0; 2287 s32 err = 0;
1854 u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID); 2288 u8 *bssid = profile->bssid;
2289 struct brcmf_sta_info_le *sta_info_le;
1855 2290
1856 WL_TRACE("Enter\n"); 2291 WL_TRACE("Enter, MAC %pM\n", mac);
1857 if (!check_sys_up(wiphy)) 2292 if (!check_sys_up(wiphy))
1858 return -EIO; 2293 return -EIO;
1859 2294
1860 if (memcmp(mac, bssid, ETH_ALEN)) { 2295 if (cfg->conf->mode == WL_MODE_AP) {
1861 WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X" 2296 err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
1862 "wl_bssid-%X:%X:%X:%X:%X:%X\n", 2297 cfg->dcmd_buf,
1863 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], 2298 WL_DCMD_LEN_MAX);
1864 bssid[0], bssid[1], bssid[2], bssid[3], 2299 if (err < 0) {
1865 bssid[4], bssid[5]); 2300 WL_ERR("GET STA INFO failed, %d\n", err);
1866 err = -ENOENT; 2301 goto done;
1867 goto done; 2302 }
1868 } 2303 sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
1869
1870 /* Report the current tx rate */
1871 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
1872 if (err) {
1873 WL_ERR("Could not get rate (%d)\n", err);
1874 } else {
1875 sinfo->filled |= STATION_INFO_TX_BITRATE;
1876 sinfo->txrate.legacy = rate * 5;
1877 WL_CONN("Rate %d Mbps\n", rate / 2);
1878 }
1879 2304
1880 if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { 2305 sinfo->filled = STATION_INFO_INACTIVE_TIME;
1881 memset(&scb_val, 0, sizeof(scb_val)); 2306 sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
1882 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, 2307 if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
1883 sizeof(struct brcmf_scb_val_le)); 2308 sinfo->filled |= STATION_INFO_CONNECTED_TIME;
2309 sinfo->connected_time = le32_to_cpu(sta_info_le->in);
2310 }
2311 WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
2312 sinfo->inactive_time, sinfo->connected_time);
2313 } else if (cfg->conf->mode == WL_MODE_BSS) {
2314 if (memcmp(mac, bssid, ETH_ALEN)) {
2315 WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
2316 mac, bssid);
2317 err = -ENOENT;
2318 goto done;
2319 }
2320 /* Report the current tx rate */
2321 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
1884 if (err) { 2322 if (err) {
1885 WL_ERR("Could not get rssi (%d)\n", err); 2323 WL_ERR("Could not get rate (%d)\n", err);
2324 goto done;
1886 } else { 2325 } else {
1887 rssi = le32_to_cpu(scb_val.val); 2326 sinfo->filled |= STATION_INFO_TX_BITRATE;
1888 sinfo->filled |= STATION_INFO_SIGNAL; 2327 sinfo->txrate.legacy = rate * 5;
1889 sinfo->signal = rssi; 2328 WL_CONN("Rate %d Mbps\n", rate / 2);
1890 WL_CONN("RSSI %d dBm\n", rssi);
1891 } 2329 }
1892 }
1893 2330
2331 if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
2332 memset(&scb_val, 0, sizeof(scb_val));
2333 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
2334 sizeof(scb_val));
2335 if (err) {
2336 WL_ERR("Could not get rssi (%d)\n", err);
2337 goto done;
2338 } else {
2339 rssi = le32_to_cpu(scb_val.val);
2340 sinfo->filled |= STATION_INFO_SIGNAL;
2341 sinfo->signal = rssi;
2342 WL_CONN("RSSI %d dBm\n", rssi);
2343 }
2344 }
2345 } else
2346 err = -EPERM;
1894done: 2347done:
1895 WL_TRACE("Exit\n"); 2348 WL_TRACE("Exit\n");
1896 return err; 2349 return err;
@@ -1902,7 +2355,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
1902{ 2355{
1903 s32 pm; 2356 s32 pm;
1904 s32 err = 0; 2357 s32 err = 0;
1905 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 2358 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1906 2359
1907 WL_TRACE("Enter\n"); 2360 WL_TRACE("Enter\n");
1908 2361
@@ -1910,14 +2363,13 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
1910 * Powersave enable/disable request is coming from the 2363 * Powersave enable/disable request is coming from the
1911 * cfg80211 even before the interface is up. In that 2364 * cfg80211 even before the interface is up. In that
1912 * scenario, driver will be storing the power save 2365 * scenario, driver will be storing the power save
1913 * preference in cfg_priv struct to apply this to 2366 * preference in cfg struct to apply this to
1914 * FW later while initializing the dongle 2367 * FW later while initializing the dongle
1915 */ 2368 */
1916 cfg_priv->pwr_save = enabled; 2369 cfg->pwr_save = enabled;
1917 if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { 2370 if (!test_bit(WL_STATUS_READY, &cfg->status)) {
1918 2371
1919 WL_INFO("Device is not ready," 2372 WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
1920 "storing the value in cfg_priv struct\n");
1921 goto done; 2373 goto done;
1922 } 2374 }
1923 2375
@@ -1995,10 +2447,10 @@ done:
1995 return err; 2447 return err;
1996} 2448}
1997 2449
1998static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, 2450static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
1999 struct brcmf_bss_info_le *bi) 2451 struct brcmf_bss_info_le *bi)
2000{ 2452{
2001 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 2453 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2002 struct ieee80211_channel *notify_channel; 2454 struct ieee80211_channel *notify_channel;
2003 struct cfg80211_bss *bss; 2455 struct cfg80211_bss *bss;
2004 struct ieee80211_supported_band *band; 2456 struct ieee80211_supported_band *band;
@@ -2062,14 +2514,14 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
2062 le32_to_cpu(bss->length)); 2514 le32_to_cpu(bss->length));
2063} 2515}
2064 2516
2065static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) 2517static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
2066{ 2518{
2067 struct brcmf_scan_results *bss_list; 2519 struct brcmf_scan_results *bss_list;
2068 struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ 2520 struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
2069 s32 err = 0; 2521 s32 err = 0;
2070 int i; 2522 int i;
2071 2523
2072 bss_list = cfg_priv->bss_list; 2524 bss_list = cfg->bss_list;
2073 if (bss_list->version != BRCMF_BSS_INFO_VERSION) { 2525 if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
2074 WL_ERR("Version %d != WL_BSS_INFO_VERSION\n", 2526 WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
2075 bss_list->version); 2527 bss_list->version);
@@ -2078,17 +2530,17 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
2078 WL_SCAN("scanned AP count (%d)\n", bss_list->count); 2530 WL_SCAN("scanned AP count (%d)\n", bss_list->count);
2079 for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { 2531 for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
2080 bi = next_bss_le(bss_list, bi); 2532 bi = next_bss_le(bss_list, bi);
2081 err = brcmf_inform_single_bss(cfg_priv, bi); 2533 err = brcmf_inform_single_bss(cfg, bi);
2082 if (err) 2534 if (err)
2083 break; 2535 break;
2084 } 2536 }
2085 return err; 2537 return err;
2086} 2538}
2087 2539
2088static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, 2540static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2089 struct net_device *ndev, const u8 *bssid) 2541 struct net_device *ndev, const u8 *bssid)
2090{ 2542{
2091 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 2543 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2092 struct ieee80211_channel *notify_channel; 2544 struct ieee80211_channel *notify_channel;
2093 struct brcmf_bss_info_le *bi = NULL; 2545 struct brcmf_bss_info_le *bi = NULL;
2094 struct ieee80211_supported_band *band; 2546 struct ieee80211_supported_band *band;
@@ -2163,9 +2615,9 @@ CleanUp:
2163 return err; 2615 return err;
2164} 2616}
2165 2617
2166static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv) 2618static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
2167{ 2619{
2168 return cfg_priv->conf->mode == WL_MODE_IBSS; 2620 return cfg->conf->mode == WL_MODE_IBSS;
2169} 2621}
2170 2622
2171/* 2623/*
@@ -2182,22 +2634,62 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
2182 totlen = buflen; 2634 totlen = buflen;
2183 2635
2184 /* find tagged parameter */ 2636 /* find tagged parameter */
2185 while (totlen >= 2) { 2637 while (totlen >= TLV_HDR_LEN) {
2186 int len = elt->len; 2638 int len = elt->len;
2187 2639
2188 /* validate remaining totlen */ 2640 /* validate remaining totlen */
2189 if ((elt->id == key) && (totlen >= (len + 2))) 2641 if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
2190 return elt; 2642 return elt;
2191 2643
2192 elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2)); 2644 elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
2193 totlen -= (len + 2); 2645 totlen -= (len + TLV_HDR_LEN);
2194 } 2646 }
2195 2647
2196 return NULL; 2648 return NULL;
2197} 2649}
2198 2650
2199static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) 2651/* Is any of the tlvs the expected entry? If
2652 * not update the tlvs buffer pointer/length.
2653 */
2654static bool
2655brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
2656 u8 *oui, u32 oui_len, u8 type)
2657{
2658 /* If the contents match the OUI and the type */
2659 if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
2660 !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
2661 type == ie[TLV_BODY_OFF + oui_len]) {
2662 return true;
2663 }
2664
2665 if (tlvs == NULL)
2666 return false;
2667 /* point to the next ie */
2668 ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
2669 /* calculate the length of the rest of the buffer */
2670 *tlvs_len -= (int)(ie - *tlvs);
2671 /* update the pointer to the start of the buffer */
2672 *tlvs = ie;
2673
2674 return false;
2675}
2676
2677struct brcmf_vs_tlv *
2678brcmf_find_wpaie(u8 *parse, u32 len)
2679{
2680 struct brcmf_tlv *ie;
2681
2682 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
2683 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
2684 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
2685 return (struct brcmf_vs_tlv *)ie;
2686 }
2687 return NULL;
2688}
2689
2690static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
2200{ 2691{
2692 struct brcmf_cfg80211_profile *profile = cfg->profile;
2201 struct brcmf_bss_info_le *bi; 2693 struct brcmf_bss_info_le *bi;
2202 struct brcmf_ssid *ssid; 2694 struct brcmf_ssid *ssid;
2203 struct brcmf_tlv *tim; 2695 struct brcmf_tlv *tim;
@@ -2208,21 +2700,21 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2208 s32 err = 0; 2700 s32 err = 0;
2209 2701
2210 WL_TRACE("Enter\n"); 2702 WL_TRACE("Enter\n");
2211 if (brcmf_is_ibssmode(cfg_priv)) 2703 if (brcmf_is_ibssmode(cfg))
2212 return err; 2704 return err;
2213 2705
2214 ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID); 2706 ssid = &profile->ssid;
2215 2707
2216 *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); 2708 *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
2217 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO, 2709 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
2218 cfg_priv->extra_buf, WL_EXTRA_BUF_MAX); 2710 cfg->extra_buf, WL_EXTRA_BUF_MAX);
2219 if (err) { 2711 if (err) {
2220 WL_ERR("Could not get bss info %d\n", err); 2712 WL_ERR("Could not get bss info %d\n", err);
2221 goto update_bss_info_out; 2713 goto update_bss_info_out;
2222 } 2714 }
2223 2715
2224 bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); 2716 bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
2225 err = brcmf_inform_single_bss(cfg_priv, bi); 2717 err = brcmf_inform_single_bss(cfg, bi);
2226 if (err) 2718 if (err)
2227 goto update_bss_info_out; 2719 goto update_bss_info_out;
2228 2720
@@ -2240,7 +2732,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2240 * so we speficially query dtim information to dongle. 2732 * so we speficially query dtim information to dongle.
2241 */ 2733 */
2242 u32 var; 2734 u32 var;
2243 err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv), 2735 err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
2244 "dtim_assoc", &var); 2736 "dtim_assoc", &var);
2245 if (err) { 2737 if (err) {
2246 WL_ERR("wl dtim_assoc failed (%d)\n", err); 2738 WL_ERR("wl dtim_assoc failed (%d)\n", err);
@@ -2249,20 +2741,22 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
2249 dtim_period = (u8)var; 2741 dtim_period = (u8)var;
2250 } 2742 }
2251 2743
2252 brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT); 2744 profile->beacon_interval = beacon_interval;
2253 brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD); 2745 profile->dtim_period = dtim_period;
2254 2746
2255update_bss_info_out: 2747update_bss_info_out:
2256 WL_TRACE("Exit"); 2748 WL_TRACE("Exit");
2257 return err; 2749 return err;
2258} 2750}
2259 2751
2260static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2752static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
2261{ 2753{
2262 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2754 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2755 struct escan_info *escan = &cfg->escan_info;
2263 struct brcmf_ssid ssid; 2756 struct brcmf_ssid ssid;
2264 2757
2265 if (cfg_priv->iscan_on) { 2758 set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
2759 if (cfg->iscan_on) {
2266 iscan->state = WL_ISCAN_STATE_IDLE; 2760 iscan->state = WL_ISCAN_STATE_IDLE;
2267 2761
2268 if (iscan->timer_on) { 2762 if (iscan->timer_on) {
@@ -2275,27 +2769,40 @@ static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
2275 /* Abort iscan running in FW */ 2769 /* Abort iscan running in FW */
2276 memset(&ssid, 0, sizeof(ssid)); 2770 memset(&ssid, 0, sizeof(ssid));
2277 brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT); 2771 brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
2772
2773 if (cfg->scan_request) {
2774 /* Indidate scan abort to cfg80211 layer */
2775 WL_INFO("Terminating scan in progress\n");
2776 cfg80211_scan_done(cfg->scan_request, true);
2777 cfg->scan_request = NULL;
2778 }
2779 }
2780 if (cfg->escan_on && cfg->scan_request) {
2781 escan->escan_state = WL_ESCAN_STATE_IDLE;
2782 brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
2278 } 2783 }
2784 clear_bit(WL_STATUS_SCANNING, &cfg->status);
2785 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
2279} 2786}
2280 2787
2281static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan, 2788static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
2282 bool aborted) 2789 bool aborted)
2283{ 2790{
2284 struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); 2791 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2285 struct net_device *ndev = cfg_to_ndev(cfg_priv); 2792 struct net_device *ndev = cfg_to_ndev(cfg);
2286 2793
2287 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 2794 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
2288 WL_ERR("Scan complete while device not scanning\n"); 2795 WL_ERR("Scan complete while device not scanning\n");
2289 return; 2796 return;
2290 } 2797 }
2291 if (cfg_priv->scan_request) { 2798 if (cfg->scan_request) {
2292 WL_SCAN("ISCAN Completed scan: %s\n", 2799 WL_SCAN("ISCAN Completed scan: %s\n",
2293 aborted ? "Aborted" : "Done"); 2800 aborted ? "Aborted" : "Done");
2294 cfg80211_scan_done(cfg_priv->scan_request, aborted); 2801 cfg80211_scan_done(cfg->scan_request, aborted);
2295 brcmf_set_mpc(ndev, 1); 2802 brcmf_set_mpc(ndev, 1);
2296 cfg_priv->scan_request = NULL; 2803 cfg->scan_request = NULL;
2297 } 2804 }
2298 cfg_priv->iscan_kickstart = false; 2805 cfg->iscan_kickstart = false;
2299} 2806}
2300 2807
2301static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan) 2808static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
@@ -2348,21 +2855,21 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
2348 return err; 2855 return err;
2349} 2856}
2350 2857
2351static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv) 2858static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
2352{ 2859{
2353 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2860 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2354 s32 err = 0; 2861 s32 err = 0;
2355 2862
2356 iscan->state = WL_ISCAN_STATE_IDLE; 2863 iscan->state = WL_ISCAN_STATE_IDLE;
2357 brcmf_inform_bss(cfg_priv); 2864 brcmf_inform_bss(cfg);
2358 brcmf_notify_iscan_complete(iscan, false); 2865 brcmf_notify_iscan_complete(iscan, false);
2359 2866
2360 return err; 2867 return err;
2361} 2868}
2362 2869
2363static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv) 2870static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
2364{ 2871{
2365 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2872 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2366 s32 err = 0; 2873 s32 err = 0;
2367 2874
2368 /* Reschedule the timer */ 2875 /* Reschedule the timer */
@@ -2372,12 +2879,12 @@ static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
2372 return err; 2879 return err;
2373} 2880}
2374 2881
2375static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv) 2882static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
2376{ 2883{
2377 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2884 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2378 s32 err = 0; 2885 s32 err = 0;
2379 2886
2380 brcmf_inform_bss(cfg_priv); 2887 brcmf_inform_bss(cfg);
2381 brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE); 2888 brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
2382 /* Reschedule the timer */ 2889 /* Reschedule the timer */
2383 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); 2890 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -2386,9 +2893,9 @@ static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
2386 return err; 2893 return err;
2387} 2894}
2388 2895
2389static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv) 2896static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
2390{ 2897{
2391 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; 2898 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2392 s32 err = 0; 2899 s32 err = 0;
2393 2900
2394 iscan->state = WL_ISCAN_STATE_IDLE; 2901 iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2402,7 +2909,7 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
2402 struct brcmf_cfg80211_iscan_ctrl *iscan = 2909 struct brcmf_cfg80211_iscan_ctrl *iscan =
2403 container_of(work, struct brcmf_cfg80211_iscan_ctrl, 2910 container_of(work, struct brcmf_cfg80211_iscan_ctrl,
2404 work); 2911 work);
2405 struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); 2912 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2406 struct brcmf_cfg80211_iscan_eloop *el = &iscan->el; 2913 struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
2407 u32 status = BRCMF_SCAN_RESULTS_PARTIAL; 2914 u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
2408 2915
@@ -2411,12 +2918,12 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
2411 iscan->timer_on = 0; 2918 iscan->timer_on = 0;
2412 } 2919 }
2413 2920
2414 if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) { 2921 if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
2415 status = BRCMF_SCAN_RESULTS_ABORTED; 2922 status = BRCMF_SCAN_RESULTS_ABORTED;
2416 WL_ERR("Abort iscan\n"); 2923 WL_ERR("Abort iscan\n");
2417 } 2924 }
2418 2925
2419 el->handler[status](cfg_priv); 2926 el->handler[status](cfg);
2420} 2927}
2421 2928
2422static void brcmf_iscan_timer(unsigned long data) 2929static void brcmf_iscan_timer(unsigned long data)
@@ -2431,11 +2938,11 @@ static void brcmf_iscan_timer(unsigned long data)
2431 } 2938 }
2432} 2939}
2433 2940
2434static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2941static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
2435{ 2942{
2436 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2943 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2437 2944
2438 if (cfg_priv->iscan_on) { 2945 if (cfg->iscan_on) {
2439 iscan->state = WL_ISCAN_STATE_IDLE; 2946 iscan->state = WL_ISCAN_STATE_IDLE;
2440 INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler); 2947 INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
2441 } 2948 }
@@ -2453,26 +2960,192 @@ static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
2453 el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted; 2960 el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
2454} 2961}
2455 2962
2456static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv) 2963static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
2457{ 2964{
2458 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); 2965 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2459 int err = 0; 2966 int err = 0;
2460 2967
2461 if (cfg_priv->iscan_on) { 2968 if (cfg->iscan_on) {
2462 iscan->ndev = cfg_to_ndev(cfg_priv); 2969 iscan->ndev = cfg_to_ndev(cfg);
2463 brcmf_init_iscan_eloop(&iscan->el); 2970 brcmf_init_iscan_eloop(&iscan->el);
2464 iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; 2971 iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
2465 init_timer(&iscan->timer); 2972 init_timer(&iscan->timer);
2466 iscan->timer.data = (unsigned long) iscan; 2973 iscan->timer.data = (unsigned long) iscan;
2467 iscan->timer.function = brcmf_iscan_timer; 2974 iscan->timer.function = brcmf_iscan_timer;
2468 err = brcmf_invoke_iscan(cfg_priv); 2975 err = brcmf_invoke_iscan(cfg);
2469 if (!err) 2976 if (!err)
2470 iscan->data = cfg_priv; 2977 iscan->data = cfg;
2471 } 2978 }
2472 2979
2473 return err; 2980 return err;
2474} 2981}
2475 2982
2983static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
2984{
2985 struct brcmf_cfg80211_info *cfg =
2986 container_of(work, struct brcmf_cfg80211_info,
2987 escan_timeout_work);
2988
2989 brcmf_notify_escan_complete(cfg,
2990 cfg->escan_info.ndev, true, true);
2991}
2992
2993static void brcmf_escan_timeout(unsigned long data)
2994{
2995 struct brcmf_cfg80211_info *cfg =
2996 (struct brcmf_cfg80211_info *)data;
2997
2998 if (cfg->scan_request) {
2999 WL_ERR("timer expired\n");
3000 if (cfg->escan_on)
3001 schedule_work(&cfg->escan_timeout_work);
3002 }
3003}
3004
3005static s32
3006brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
3007 struct brcmf_bss_info_le *bss_info_le)
3008{
3009 if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
3010 (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) ==
3011 CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
3012 bss_info_le->SSID_len == bss->SSID_len &&
3013 !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
3014 if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
3015 (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
3016 s16 bss_rssi = le16_to_cpu(bss->RSSI);
3017 s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
3018
3019 /* preserve max RSSI if the measurements are
3020 * both on-channel or both off-channel
3021 */
3022 if (bss_info_rssi > bss_rssi)
3023 bss->RSSI = bss_info_le->RSSI;
3024 } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
3025 (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
3026 /* preserve the on-channel rssi measurement
3027 * if the new measurement is off channel
3028 */
3029 bss->RSSI = bss_info_le->RSSI;
3030 bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
3031 }
3032 return 1;
3033 }
3034 return 0;
3035}
3036
3037static s32
3038brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3039 struct net_device *ndev,
3040 const struct brcmf_event_msg *e, void *data)
3041{
3042 s32 status;
3043 s32 err = 0;
3044 struct brcmf_escan_result_le *escan_result_le;
3045 struct brcmf_bss_info_le *bss_info_le;
3046 struct brcmf_bss_info_le *bss = NULL;
3047 u32 bi_length;
3048 struct brcmf_scan_results *list;
3049 u32 i;
3050 bool aborted;
3051
3052 status = be32_to_cpu(e->status);
3053
3054 if (!ndev || !cfg->escan_on ||
3055 !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3056 WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
3057 ndev, cfg->escan_on,
3058 !test_bit(WL_STATUS_SCANNING, &cfg->status));
3059 return -EPERM;
3060 }
3061
3062 if (status == BRCMF_E_STATUS_PARTIAL) {
3063 WL_SCAN("ESCAN Partial result\n");
3064 escan_result_le = (struct brcmf_escan_result_le *) data;
3065 if (!escan_result_le) {
3066 WL_ERR("Invalid escan result (NULL pointer)\n");
3067 goto exit;
3068 }
3069 if (!cfg->scan_request) {
3070 WL_SCAN("result without cfg80211 request\n");
3071 goto exit;
3072 }
3073
3074 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
3075 WL_ERR("Invalid bss_count %d: ignoring\n",
3076 escan_result_le->bss_count);
3077 goto exit;
3078 }
3079 bss_info_le = &escan_result_le->bss_info_le;
3080
3081 bi_length = le32_to_cpu(bss_info_le->length);
3082 if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
3083 WL_ESCAN_RESULTS_FIXED_SIZE)) {
3084 WL_ERR("Invalid bss_info length %d: ignoring\n",
3085 bi_length);
3086 goto exit;
3087 }
3088
3089 if (!(cfg_to_wiphy(cfg)->interface_modes &
3090 BIT(NL80211_IFTYPE_ADHOC))) {
3091 if (le16_to_cpu(bss_info_le->capability) &
3092 WLAN_CAPABILITY_IBSS) {
3093 WL_ERR("Ignoring IBSS result\n");
3094 goto exit;
3095 }
3096 }
3097
3098 list = (struct brcmf_scan_results *)
3099 cfg->escan_info.escan_buf;
3100 if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
3101 WL_ERR("Buffer is too small: ignoring\n");
3102 goto exit;
3103 }
3104
3105 for (i = 0; i < list->count; i++) {
3106 bss = bss ? (struct brcmf_bss_info_le *)
3107 ((unsigned char *)bss +
3108 le32_to_cpu(bss->length)) : list->bss_info_le;
3109 if (brcmf_compare_update_same_bss(bss, bss_info_le))
3110 goto exit;
3111 }
3112 memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
3113 bss_info_le, bi_length);
3114 list->version = le32_to_cpu(bss_info_le->version);
3115 list->buflen += bi_length;
3116 list->count++;
3117 } else {
3118 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
3119 if (cfg->scan_request) {
3120 cfg->bss_list = (struct brcmf_scan_results *)
3121 cfg->escan_info.escan_buf;
3122 brcmf_inform_bss(cfg);
3123 aborted = status != BRCMF_E_STATUS_SUCCESS;
3124 brcmf_notify_escan_complete(cfg, ndev, aborted,
3125 false);
3126 } else
3127 WL_ERR("Unexpected scan result 0x%x\n", status);
3128 }
3129exit:
3130 return err;
3131}
3132
3133static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
3134{
3135
3136 if (cfg->escan_on) {
3137 cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
3138 brcmf_cfg80211_escan_handler;
3139 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
3140 /* Init scan_timeout timer */
3141 init_timer(&cfg->escan_timeout);
3142 cfg->escan_timeout.data = (unsigned long) cfg;
3143 cfg->escan_timeout.function = brcmf_escan_timeout;
3144 INIT_WORK(&cfg->escan_timeout_work,
3145 brcmf_cfg80211_escan_timeout_worker);
3146 }
3147}
3148
2476static __always_inline void brcmf_delay(u32 ms) 3149static __always_inline void brcmf_delay(u32 ms)
2477{ 3150{
2478 if (ms < 1000 / HZ) { 3151 if (ms < 1000 / HZ) {
@@ -2485,7 +3158,7 @@ static __always_inline void brcmf_delay(u32 ms)
2485 3158
2486static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) 3159static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2487{ 3160{
2488 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3161 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2489 3162
2490 /* 3163 /*
2491 * Check for WL_STATUS_READY before any function call which 3164 * Check for WL_STATUS_READY before any function call which
@@ -2494,7 +3167,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2494 */ 3167 */
2495 WL_TRACE("Enter\n"); 3168 WL_TRACE("Enter\n");
2496 3169
2497 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) 3170 if (test_bit(WL_STATUS_READY, &cfg->status))
2498 brcmf_invoke_iscan(wiphy_to_cfg(wiphy)); 3171 brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
2499 3172
2500 WL_TRACE("Exit\n"); 3173 WL_TRACE("Exit\n");
@@ -2504,8 +3177,8 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
2504static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, 3177static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2505 struct cfg80211_wowlan *wow) 3178 struct cfg80211_wowlan *wow)
2506{ 3179{
2507 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3180 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2508 struct net_device *ndev = cfg_to_ndev(cfg_priv); 3181 struct net_device *ndev = cfg_to_ndev(cfg);
2509 3182
2510 WL_TRACE("Enter\n"); 3183 WL_TRACE("Enter\n");
2511 3184
@@ -2519,12 +3192,12 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2519 * While going to suspend if associated with AP disassociate 3192 * While going to suspend if associated with AP disassociate
2520 * from AP to save power while system is in suspended state 3193 * from AP to save power while system is in suspended state
2521 */ 3194 */
2522 if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || 3195 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
2523 test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && 3196 test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
2524 test_bit(WL_STATUS_READY, &cfg_priv->status)) { 3197 test_bit(WL_STATUS_READY, &cfg->status)) {
2525 WL_INFO("Disassociating from AP" 3198 WL_INFO("Disassociating from AP"
2526 " while entering suspend state\n"); 3199 " while entering suspend state\n");
2527 brcmf_link_down(cfg_priv); 3200 brcmf_link_down(cfg);
2528 3201
2529 /* 3202 /*
2530 * Make sure WPA_Supplicant receives all the event 3203 * Make sure WPA_Supplicant receives all the event
@@ -2534,24 +3207,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2534 brcmf_delay(500); 3207 brcmf_delay(500);
2535 } 3208 }
2536 3209
2537 set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); 3210 if (test_bit(WL_STATUS_READY, &cfg->status))
2538 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) 3211 brcmf_abort_scanning(cfg);
2539 brcmf_term_iscan(cfg_priv); 3212 else
2540 3213 clear_bit(WL_STATUS_SCANNING, &cfg->status);
2541 if (cfg_priv->scan_request) {
2542 /* Indidate scan abort to cfg80211 layer */
2543 WL_INFO("Terminating scan in progress\n");
2544 cfg80211_scan_done(cfg_priv->scan_request, true);
2545 cfg_priv->scan_request = NULL;
2546 }
2547 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
2548 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
2549 3214
2550 /* Turn off watchdog timer */ 3215 /* Turn off watchdog timer */
2551 if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { 3216 if (test_bit(WL_STATUS_READY, &cfg->status))
2552 WL_INFO("Enable MPC\n");
2553 brcmf_set_mpc(ndev, 1); 3217 brcmf_set_mpc(ndev, 1);
2554 }
2555 3218
2556 WL_TRACE("Exit\n"); 3219 WL_TRACE("Exit\n");
2557 3220
@@ -2561,14 +3224,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2561static __used s32 3224static __used s32
2562brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len) 3225brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
2563{ 3226{
2564 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 3227 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
2565 u32 buflen; 3228 u32 buflen;
2566 3229
2567 buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf, 3230 buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
2568 WL_DCMD_LEN_MAX); 3231 WL_DCMD_LEN_MAX);
2569 BUG_ON(!buflen); 3232 BUG_ON(!buflen);
2570 3233
2571 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf, 3234 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
2572 buflen); 3235 buflen);
2573} 3236}
2574 3237
@@ -2576,20 +3239,20 @@ static s32
2576brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf, 3239brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
2577 s32 buf_len) 3240 s32 buf_len)
2578{ 3241{
2579 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 3242 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
2580 u32 len; 3243 u32 len;
2581 s32 err = 0; 3244 s32 err = 0;
2582 3245
2583 len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf, 3246 len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
2584 WL_DCMD_LEN_MAX); 3247 WL_DCMD_LEN_MAX);
2585 BUG_ON(!len); 3248 BUG_ON(!len);
2586 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf, 3249 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
2587 WL_DCMD_LEN_MAX); 3250 WL_DCMD_LEN_MAX);
2588 if (err) { 3251 if (err) {
2589 WL_ERR("error (%d)\n", err); 3252 WL_ERR("error (%d)\n", err);
2590 return err; 3253 return err;
2591 } 3254 }
2592 memcpy(buf, cfg_priv->dcmd_buf, buf_len); 3255 memcpy(buf, cfg->dcmd_buf, buf_len);
2593 3256
2594 return err; 3257 return err;
2595} 3258}
@@ -2622,8 +3285,8 @@ static s32
2622brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, 3285brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2623 struct cfg80211_pmksa *pmksa) 3286 struct cfg80211_pmksa *pmksa)
2624{ 3287{
2625 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3288 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2626 struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids; 3289 struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
2627 s32 err = 0; 3290 s32 err = 0;
2628 int i; 3291 int i;
2629 int pmkid_len; 3292 int pmkid_len;
@@ -2651,7 +3314,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2651 for (i = 0; i < WLAN_PMKID_LEN; i++) 3314 for (i = 0; i < WLAN_PMKID_LEN; i++)
2652 WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); 3315 WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
2653 3316
2654 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3317 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2655 3318
2656 WL_TRACE("Exit\n"); 3319 WL_TRACE("Exit\n");
2657 return err; 3320 return err;
@@ -2661,7 +3324,7 @@ static s32
2661brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, 3324brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2662 struct cfg80211_pmksa *pmksa) 3325 struct cfg80211_pmksa *pmksa)
2663{ 3326{
2664 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3327 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2665 struct pmkid_list pmkid; 3328 struct pmkid_list pmkid;
2666 s32 err = 0; 3329 s32 err = 0;
2667 int i, pmkid_len; 3330 int i, pmkid_len;
@@ -2678,30 +3341,30 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2678 for (i = 0; i < WLAN_PMKID_LEN; i++) 3341 for (i = 0; i < WLAN_PMKID_LEN; i++)
2679 WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]); 3342 WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
2680 3343
2681 pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid); 3344 pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
2682 for (i = 0; i < pmkid_len; i++) 3345 for (i = 0; i < pmkid_len; i++)
2683 if (!memcmp 3346 if (!memcmp
2684 (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, 3347 (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
2685 ETH_ALEN)) 3348 ETH_ALEN))
2686 break; 3349 break;
2687 3350
2688 if ((pmkid_len > 0) 3351 if ((pmkid_len > 0)
2689 && (i < pmkid_len)) { 3352 && (i < pmkid_len)) {
2690 memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0, 3353 memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
2691 sizeof(struct pmkid)); 3354 sizeof(struct pmkid));
2692 for (; i < (pmkid_len - 1); i++) { 3355 for (; i < (pmkid_len - 1); i++) {
2693 memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, 3356 memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
2694 &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID, 3357 &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
2695 ETH_ALEN); 3358 ETH_ALEN);
2696 memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID, 3359 memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
2697 &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID, 3360 &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
2698 WLAN_PMKID_LEN); 3361 WLAN_PMKID_LEN);
2699 } 3362 }
2700 cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1); 3363 cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
2701 } else 3364 } else
2702 err = -EINVAL; 3365 err = -EINVAL;
2703 3366
2704 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3367 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2705 3368
2706 WL_TRACE("Exit\n"); 3369 WL_TRACE("Exit\n");
2707 return err; 3370 return err;
@@ -2711,21 +3374,979 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
2711static s32 3374static s32
2712brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) 3375brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
2713{ 3376{
2714 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 3377 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2715 s32 err = 0; 3378 s32 err = 0;
2716 3379
2717 WL_TRACE("Enter\n"); 3380 WL_TRACE("Enter\n");
2718 if (!check_sys_up(wiphy)) 3381 if (!check_sys_up(wiphy))
2719 return -EIO; 3382 return -EIO;
2720 3383
2721 memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list)); 3384 memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
2722 err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); 3385 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
2723 3386
2724 WL_TRACE("Exit\n"); 3387 WL_TRACE("Exit\n");
2725 return err; 3388 return err;
2726 3389
2727} 3390}
2728 3391
3392/*
3393 * PFN result doesn't have all the info which are
3394 * required by the supplicant
3395 * (For e.g IEs) Do a target Escan so that sched scan results are reported
3396 * via wl_inform_single_bss in the required format. Escan does require the
3397 * scan request in the form of cfg80211_scan_request. For timebeing, create
3398 * cfg80211_scan_request one out of the received PNO event.
3399 */
3400static s32
3401brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3402 struct net_device *ndev,
3403 const struct brcmf_event_msg *e, void *data)
3404{
3405 struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
3406 struct cfg80211_scan_request *request = NULL;
3407 struct cfg80211_ssid *ssid = NULL;
3408 struct ieee80211_channel *channel = NULL;
3409 struct wiphy *wiphy = cfg_to_wiphy(cfg);
3410 int err = 0;
3411 int channel_req = 0;
3412 int band = 0;
3413 struct brcmf_pno_scanresults_le *pfn_result;
3414 u32 result_count;
3415 u32 status;
3416
3417 WL_SCAN("Enter\n");
3418
3419 if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
3420 WL_SCAN("PFN NET LOST event. Do Nothing\n");
3421 return 0;
3422 }
3423
3424 pfn_result = (struct brcmf_pno_scanresults_le *)data;
3425 result_count = le32_to_cpu(pfn_result->count);
3426 status = le32_to_cpu(pfn_result->status);
3427
3428 /*
3429 * PFN event is limited to fit 512 bytes so we may get
3430 * multiple NET_FOUND events. For now place a warning here.
3431 */
3432 WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
3433 WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
3434 if (result_count > 0) {
3435 int i;
3436
3437 request = kzalloc(sizeof(*request), GFP_KERNEL);
3438 ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
3439 channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
3440 if (!request || !ssid || !channel) {
3441 err = -ENOMEM;
3442 goto out_err;
3443 }
3444
3445 request->wiphy = wiphy;
3446 data += sizeof(struct brcmf_pno_scanresults_le);
3447 netinfo_start = (struct brcmf_pno_net_info_le *)data;
3448
3449 for (i = 0; i < result_count; i++) {
3450 netinfo = &netinfo_start[i];
3451 if (!netinfo) {
3452 WL_ERR("Invalid netinfo ptr. index: %d\n", i);
3453 err = -EINVAL;
3454 goto out_err;
3455 }
3456
3457 WL_SCAN("SSID:%s Channel:%d\n",
3458 netinfo->SSID, netinfo->channel);
3459 memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
3460 ssid[i].ssid_len = netinfo->SSID_len;
3461 request->n_ssids++;
3462
3463 channel_req = netinfo->channel;
3464 if (channel_req <= CH_MAX_2G_CHANNEL)
3465 band = NL80211_BAND_2GHZ;
3466 else
3467 band = NL80211_BAND_5GHZ;
3468 channel[i].center_freq =
3469 ieee80211_channel_to_frequency(channel_req,
3470 band);
3471 channel[i].band = band;
3472 channel[i].flags |= IEEE80211_CHAN_NO_HT40;
3473 request->channels[i] = &channel[i];
3474 request->n_channels++;
3475 }
3476
3477 /* assign parsed ssid array */
3478 if (request->n_ssids)
3479 request->ssids = &ssid[0];
3480
3481 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3482 /* Abort any on-going scan */
3483 brcmf_abort_scanning(cfg);
3484 }
3485
3486 set_bit(WL_STATUS_SCANNING, &cfg->status);
3487 err = brcmf_do_escan(cfg, wiphy, ndev, request);
3488 if (err) {
3489 clear_bit(WL_STATUS_SCANNING, &cfg->status);
3490 goto out_err;
3491 }
3492 cfg->sched_escan = true;
3493 cfg->scan_request = request;
3494 } else {
3495 WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
3496 goto out_err;
3497 }
3498
3499 kfree(ssid);
3500 kfree(channel);
3501 kfree(request);
3502 return 0;
3503
3504out_err:
3505 kfree(ssid);
3506 kfree(channel);
3507 kfree(request);
3508 cfg80211_sched_scan_stopped(wiphy);
3509 return err;
3510}
3511
3512#ifndef CONFIG_BRCMISCAN
3513static int brcmf_dev_pno_clean(struct net_device *ndev)
3514{
3515 char iovbuf[128];
3516 int ret;
3517
3518 /* Disable pfn */
3519 ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
3520 if (ret == 0) {
3521 /* clear pfn */
3522 ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
3523 iovbuf, sizeof(iovbuf));
3524 }
3525 if (ret < 0)
3526 WL_ERR("failed code %d\n", ret);
3527
3528 return ret;
3529}
3530
3531static int brcmf_dev_pno_config(struct net_device *ndev)
3532{
3533 struct brcmf_pno_param_le pfn_param;
3534 char iovbuf[128];
3535
3536 memset(&pfn_param, 0, sizeof(pfn_param));
3537 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
3538
3539 /* set extra pno params */
3540 pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
3541 pfn_param.repeat = BRCMF_PNO_REPEAT;
3542 pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
3543
3544 /* set up pno scan fr */
3545 pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
3546
3547 return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
3548 &pfn_param, sizeof(pfn_param),
3549 iovbuf, sizeof(iovbuf));
3550}
3551
3552static int
3553brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3554 struct net_device *ndev,
3555 struct cfg80211_sched_scan_request *request)
3556{
3557 char iovbuf[128];
3558 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
3559 struct brcmf_pno_net_param_le pfn;
3560 int i;
3561 int ret = 0;
3562
3563 WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n",
3564 request->n_match_sets, request->n_ssids);
3565 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
3566 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
3567 return -EAGAIN;
3568 }
3569
3570 if (!request || !request->n_ssids || !request->n_match_sets) {
3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
3572 request->n_ssids);
3573 return -EINVAL;
3574 }
3575
3576 if (request->n_ssids > 0) {
3577 for (i = 0; i < request->n_ssids; i++) {
3578 /* Active scan req for ssids */
3579 WL_SCAN(">>> Active scan req for ssid (%s)\n",
3580 request->ssids[i].ssid);
3581
3582 /*
3583 * match_set ssids is a supert set of n_ssid list,
3584 * so we need not add these set seperately.
3585 */
3586 }
3587 }
3588
3589 if (request->n_match_sets > 0) {
3590 /* clean up everything */
3591 ret = brcmf_dev_pno_clean(ndev);
3592 if (ret < 0) {
3593 WL_ERR("failed error=%d\n", ret);
3594 return ret;
3595 }
3596
3597 /* configure pno */
3598 ret = brcmf_dev_pno_config(ndev);
3599 if (ret < 0) {
3600 WL_ERR("PNO setup failed!! ret=%d\n", ret);
3601 return -EINVAL;
3602 }
3603
3604 /* configure each match set */
3605 for (i = 0; i < request->n_match_sets; i++) {
3606 struct cfg80211_ssid *ssid;
3607 u32 ssid_len;
3608
3609 ssid = &request->match_sets[i].ssid;
3610 ssid_len = ssid->ssid_len;
3611
3612 if (!ssid_len) {
3613 WL_ERR("skip broadcast ssid\n");
3614 continue;
3615 }
3616 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
3617 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
3618 pfn.wsec = cpu_to_le32(0);
3619 pfn.infra = cpu_to_le32(1);
3620 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
3621 pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
3622 memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
3623 ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
3624 &pfn, sizeof(pfn),
3625 iovbuf, sizeof(iovbuf));
3626 WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
3627 ret == 0 ? "set" : "failed",
3628 ssid->ssid);
3629 }
3630 /* Enable the PNO */
3631 if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
3632 WL_ERR("PNO enable failed!! ret=%d\n", ret);
3633 return -EINVAL;
3634 }
3635 } else {
3636 return -EINVAL;
3637 }
3638
3639 return 0;
3640}
3641
3642static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
3643 struct net_device *ndev)
3644{
3645 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3646
3647 WL_SCAN("enter\n");
3648 brcmf_dev_pno_clean(ndev);
3649 if (cfg->sched_escan)
3650 brcmf_notify_escan_complete(cfg, ndev, true, true);
3651 return 0;
3652}
3653#endif /* CONFIG_BRCMISCAN */
3654
3655#ifdef CONFIG_NL80211_TESTMODE
3656static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
3657{
3658 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3659 struct net_device *ndev = cfg->wdev->netdev;
3660 struct brcmf_dcmd *dcmd = data;
3661 struct sk_buff *reply;
3662 int ret;
3663
3664 ret = brcmf_netlink_dcmd(ndev, dcmd);
3665 if (ret == 0) {
3666 reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
3667 nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
3668 ret = cfg80211_testmode_reply(reply);
3669 }
3670 return ret;
3671}
3672#endif
3673
3674static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
3675{
3676 s32 err;
3677
3678 /* set auth */
3679 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
3680 if (err < 0) {
3681 WL_ERR("auth error %d\n", err);
3682 return err;
3683 }
3684 /* set wsec */
3685 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
3686 if (err < 0) {
3687 WL_ERR("wsec error %d\n", err);
3688 return err;
3689 }
3690 /* set upper-layer auth */
3691 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
3692 WPA_AUTH_NONE, bssidx);
3693 if (err < 0) {
3694 WL_ERR("wpa_auth error %d\n", err);
3695 return err;
3696 }
3697
3698 return 0;
3699}
3700
3701static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
3702{
3703 if (is_rsn_ie)
3704 return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
3705
3706 return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
3707}
3708
3709static s32
3710brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3711 bool is_rsn_ie, s32 bssidx)
3712{
3713 u32 auth = 0; /* d11 open authentication */
3714 u16 count;
3715 s32 err = 0;
3716 s32 len = 0;
3717 u32 i;
3718 u32 wsec;
3719 u32 pval = 0;
3720 u32 gval = 0;
3721 u32 wpa_auth = 0;
3722 u32 offset;
3723 u8 *data;
3724 u16 rsn_cap;
3725 u32 wme_bss_disable;
3726
3727 WL_TRACE("Enter\n");
3728 if (wpa_ie == NULL)
3729 goto exit;
3730
3731 len = wpa_ie->len + TLV_HDR_LEN;
3732 data = (u8 *)wpa_ie;
3733 offset = 0;
3734 if (!is_rsn_ie)
3735 offset += VS_IE_FIXED_HDR_LEN;
3736 offset += WPA_IE_VERSION_LEN;
3737
3738 /* check for multicast cipher suite */
3739 if (offset + WPA_IE_MIN_OUI_LEN > len) {
3740 err = -EINVAL;
3741 WL_ERR("no multicast cipher suite\n");
3742 goto exit;
3743 }
3744
3745 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3746 err = -EINVAL;
3747 WL_ERR("ivalid OUI\n");
3748 goto exit;
3749 }
3750 offset += TLV_OUI_LEN;
3751
3752 /* pick up multicast cipher */
3753 switch (data[offset]) {
3754 case WPA_CIPHER_NONE:
3755 gval = 0;
3756 break;
3757 case WPA_CIPHER_WEP_40:
3758 case WPA_CIPHER_WEP_104:
3759 gval = WEP_ENABLED;
3760 break;
3761 case WPA_CIPHER_TKIP:
3762 gval = TKIP_ENABLED;
3763 break;
3764 case WPA_CIPHER_AES_CCM:
3765 gval = AES_ENABLED;
3766 break;
3767 default:
3768 err = -EINVAL;
3769 WL_ERR("Invalid multi cast cipher info\n");
3770 goto exit;
3771 }
3772
3773 offset++;
3774 /* walk thru unicast cipher list and pick up what we recognize */
3775 count = data[offset] + (data[offset + 1] << 8);
3776 offset += WPA_IE_SUITE_COUNT_LEN;
3777 /* Check for unicast suite(s) */
3778 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3779 err = -EINVAL;
3780 WL_ERR("no unicast cipher suite\n");
3781 goto exit;
3782 }
3783 for (i = 0; i < count; i++) {
3784 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3785 err = -EINVAL;
3786 WL_ERR("ivalid OUI\n");
3787 goto exit;
3788 }
3789 offset += TLV_OUI_LEN;
3790 switch (data[offset]) {
3791 case WPA_CIPHER_NONE:
3792 break;
3793 case WPA_CIPHER_WEP_40:
3794 case WPA_CIPHER_WEP_104:
3795 pval |= WEP_ENABLED;
3796 break;
3797 case WPA_CIPHER_TKIP:
3798 pval |= TKIP_ENABLED;
3799 break;
3800 case WPA_CIPHER_AES_CCM:
3801 pval |= AES_ENABLED;
3802 break;
3803 default:
3804 WL_ERR("Ivalid unicast security info\n");
3805 }
3806 offset++;
3807 }
3808 /* walk thru auth management suite list and pick up what we recognize */
3809 count = data[offset] + (data[offset + 1] << 8);
3810 offset += WPA_IE_SUITE_COUNT_LEN;
3811 /* Check for auth key management suite(s) */
3812 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3813 err = -EINVAL;
3814 WL_ERR("no auth key mgmt suite\n");
3815 goto exit;
3816 }
3817 for (i = 0; i < count; i++) {
3818 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3819 err = -EINVAL;
3820 WL_ERR("ivalid OUI\n");
3821 goto exit;
3822 }
3823 offset += TLV_OUI_LEN;
3824 switch (data[offset]) {
3825 case RSN_AKM_NONE:
3826 WL_TRACE("RSN_AKM_NONE\n");
3827 wpa_auth |= WPA_AUTH_NONE;
3828 break;
3829 case RSN_AKM_UNSPECIFIED:
3830 WL_TRACE("RSN_AKM_UNSPECIFIED\n");
3831 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
3832 (wpa_auth |= WPA_AUTH_UNSPECIFIED);
3833 break;
3834 case RSN_AKM_PSK:
3835 WL_TRACE("RSN_AKM_PSK\n");
3836 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
3837 (wpa_auth |= WPA_AUTH_PSK);
3838 break;
3839 default:
3840 WL_ERR("Ivalid key mgmt info\n");
3841 }
3842 offset++;
3843 }
3844
3845 if (is_rsn_ie) {
3846 wme_bss_disable = 1;
3847 if ((offset + RSN_CAP_LEN) <= len) {
3848 rsn_cap = data[offset] + (data[offset + 1] << 8);
3849 if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
3850 wme_bss_disable = 0;
3851 }
3852 /* set wme_bss_disable to sync RSN Capabilities */
3853 err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
3854 wme_bss_disable, bssidx);
3855 if (err < 0) {
3856 WL_ERR("wme_bss_disable error %d\n", err);
3857 goto exit;
3858 }
3859 }
3860 /* FOR WPS , set SES_OW_ENABLED */
3861 wsec = (pval | gval | SES_OW_ENABLED);
3862
3863 /* set auth */
3864 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
3865 if (err < 0) {
3866 WL_ERR("auth error %d\n", err);
3867 goto exit;
3868 }
3869 /* set wsec */
3870 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
3871 if (err < 0) {
3872 WL_ERR("wsec error %d\n", err);
3873 goto exit;
3874 }
3875 /* set upper-layer auth */
3876 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
3877 if (err < 0) {
3878 WL_ERR("wpa_auth error %d\n", err);
3879 goto exit;
3880 }
3881
3882exit:
3883 return err;
3884}
3885
3886static s32
3887brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
3888 struct parsed_vndr_ies *vndr_ies)
3889{
3890 s32 err = 0;
3891 struct brcmf_vs_tlv *vndrie;
3892 struct brcmf_tlv *ie;
3893 struct parsed_vndr_ie_info *parsed_info;
3894 s32 remaining_len;
3895
3896 remaining_len = (s32)vndr_ie_len;
3897 memset(vndr_ies, 0, sizeof(*vndr_ies));
3898
3899 ie = (struct brcmf_tlv *)vndr_ie_buf;
3900 while (ie) {
3901 if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
3902 goto next;
3903 vndrie = (struct brcmf_vs_tlv *)ie;
3904 /* len should be bigger than OUI length + one */
3905 if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
3906 WL_ERR("invalid vndr ie. length is too small %d\n",
3907 vndrie->len);
3908 goto next;
3909 }
3910 /* if wpa or wme ie, do not add ie */
3911 if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
3912 ((vndrie->oui_type == WPA_OUI_TYPE) ||
3913 (vndrie->oui_type == WME_OUI_TYPE))) {
3914 WL_TRACE("Found WPA/WME oui. Do not add it\n");
3915 goto next;
3916 }
3917
3918 parsed_info = &vndr_ies->ie_info[vndr_ies->count];
3919
3920 /* save vndr ie information */
3921 parsed_info->ie_ptr = (char *)vndrie;
3922 parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
3923 memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
3924
3925 vndr_ies->count++;
3926
3927 WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
3928 parsed_info->vndrie.oui[0],
3929 parsed_info->vndrie.oui[1],
3930 parsed_info->vndrie.oui[2],
3931 parsed_info->vndrie.oui_type);
3932
3933 if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
3934 break;
3935next:
3936 remaining_len -= ie->len;
3937 if (remaining_len <= 2)
3938 ie = NULL;
3939 else
3940 ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
3941 }
3942 return err;
3943}
3944
3945static u32
3946brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
3947{
3948
3949 __le32 iecount_le;
3950 __le32 pktflag_le;
3951
3952 strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
3953 iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
3954
3955 iecount_le = cpu_to_le32(1);
3956 memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
3957
3958 pktflag_le = cpu_to_le32(pktflag);
3959 memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
3960
3961 memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
3962
3963 return ie_len + VNDR_IE_HDR_SIZE;
3964}
3965
3966s32
3967brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3968 struct net_device *ndev, s32 bssidx, s32 pktflag,
3969 u8 *vndr_ie_buf, u32 vndr_ie_len)
3970{
3971 s32 err = 0;
3972 u8 *iovar_ie_buf;
3973 u8 *curr_ie_buf;
3974 u8 *mgmt_ie_buf = NULL;
3975 u32 mgmt_ie_buf_len = 0;
3976 u32 *mgmt_ie_len = 0;
3977 u32 del_add_ie_buf_len = 0;
3978 u32 total_ie_buf_len = 0;
3979 u32 parsed_ie_buf_len = 0;
3980 struct parsed_vndr_ies old_vndr_ies;
3981 struct parsed_vndr_ies new_vndr_ies;
3982 struct parsed_vndr_ie_info *vndrie_info;
3983 s32 i;
3984 u8 *ptr;
3985 u32 remained_buf_len;
3986
3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
3989 if (!iovar_ie_buf)
3990 return -ENOMEM;
3991 curr_ie_buf = iovar_ie_buf;
3992 if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
3993 test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
3994 switch (pktflag) {
3995 case VNDR_IE_PRBRSP_FLAG:
3996 mgmt_ie_buf = cfg->ap_info->probe_res_ie;
3997 mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
3998 mgmt_ie_buf_len =
3999 sizeof(cfg->ap_info->probe_res_ie);
4000 break;
4001 case VNDR_IE_BEACON_FLAG:
4002 mgmt_ie_buf = cfg->ap_info->beacon_ie;
4003 mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
4004 mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
4005 break;
4006 default:
4007 err = -EPERM;
4008 WL_ERR("not suitable type\n");
4009 goto exit;
4010 }
4011 bssidx = 0;
4012 } else {
4013 err = -EPERM;
4014 WL_ERR("not suitable type\n");
4015 goto exit;
4016 }
4017
4018 if (vndr_ie_len > mgmt_ie_buf_len) {
4019 err = -ENOMEM;
4020 WL_ERR("extra IE size too big\n");
4021 goto exit;
4022 }
4023
4024 /* parse and save new vndr_ie in curr_ie_buff before comparing it */
4025 if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
4026 ptr = curr_ie_buf;
4027 brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
4028 for (i = 0; i < new_vndr_ies.count; i++) {
4029 vndrie_info = &new_vndr_ies.ie_info[i];
4030 memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
4031 vndrie_info->ie_len);
4032 parsed_ie_buf_len += vndrie_info->ie_len;
4033 }
4034 }
4035
4036 if (mgmt_ie_buf != NULL) {
4037 if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
4038 (memcmp(mgmt_ie_buf, curr_ie_buf,
4039 parsed_ie_buf_len) == 0)) {
4040 WL_TRACE("Previous mgmt IE is equals to current IE");
4041 goto exit;
4042 }
4043
4044 /* parse old vndr_ie */
4045 brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
4046
4047 /* make a command to delete old ie */
4048 for (i = 0; i < old_vndr_ies.count; i++) {
4049 vndrie_info = &old_vndr_ies.ie_info[i];
4050
4051 WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
4052 vndrie_info->vndrie.id,
4053 vndrie_info->vndrie.len,
4054 vndrie_info->vndrie.oui[0],
4055 vndrie_info->vndrie.oui[1],
4056 vndrie_info->vndrie.oui[2]);
4057
4058 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4059 vndrie_info->ie_ptr,
4060 vndrie_info->ie_len,
4061 "del");
4062 curr_ie_buf += del_add_ie_buf_len;
4063 total_ie_buf_len += del_add_ie_buf_len;
4064 }
4065 }
4066
4067 *mgmt_ie_len = 0;
4068 /* Add if there is any extra IE */
4069 if (mgmt_ie_buf && parsed_ie_buf_len) {
4070 ptr = mgmt_ie_buf;
4071
4072 remained_buf_len = mgmt_ie_buf_len;
4073
4074 /* make a command to add new ie */
4075 for (i = 0; i < new_vndr_ies.count; i++) {
4076 vndrie_info = &new_vndr_ies.ie_info[i];
4077
4078 WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
4079 vndrie_info->vndrie.id,
4080 vndrie_info->vndrie.len,
4081 vndrie_info->vndrie.oui[0],
4082 vndrie_info->vndrie.oui[1],
4083 vndrie_info->vndrie.oui[2]);
4084
4085 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4086 vndrie_info->ie_ptr,
4087 vndrie_info->ie_len,
4088 "add");
4089 /* verify remained buf size before copy data */
4090 remained_buf_len -= vndrie_info->ie_len;
4091 if (remained_buf_len < 0) {
4092 WL_ERR("no space in mgmt_ie_buf: len left %d",
4093 remained_buf_len);
4094 break;
4095 }
4096
4097 /* save the parsed IE in wl struct */
4098 memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
4099 vndrie_info->ie_len);
4100 *mgmt_ie_len += vndrie_info->ie_len;
4101
4102 curr_ie_buf += del_add_ie_buf_len;
4103 total_ie_buf_len += del_add_ie_buf_len;
4104 }
4105 }
4106 if (total_ie_buf_len) {
4107 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
4108 iovar_ie_buf,
4109 total_ie_buf_len,
4110 cfg->extra_buf,
4111 WL_EXTRA_BUF_MAX, bssidx);
4112 if (err)
4113 WL_ERR("vndr ie set error : %d\n", err);
4114 }
4115
4116exit:
4117 kfree(iovar_ie_buf);
4118 return err;
4119}
4120
4121static s32
4122brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4123 struct cfg80211_ap_settings *settings)
4124{
4125 s32 ie_offset;
4126 struct brcmf_tlv *ssid_ie;
4127 struct brcmf_ssid_le ssid_le;
4128 s32 ioctl_value;
4129 s32 err = -EPERM;
4130 struct brcmf_tlv *rsn_ie;
4131 struct brcmf_vs_tlv *wpa_ie;
4132 struct brcmf_join_params join_params;
4133 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4134 s32 bssidx = 0;
4135
4136 WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
4137 settings->channel_type, settings->beacon_interval,
4138 settings->dtim_period);
4139 WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
4140 settings->ssid, settings->ssid_len, settings->auth_type,
4141 settings->inactivity_timeout);
4142
4143 if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
4144 WL_ERR("Not in AP creation mode\n");
4145 return -EPERM;
4146 }
4147
4148 memset(&ssid_le, 0, sizeof(ssid_le));
4149 if (settings->ssid == NULL || settings->ssid_len == 0) {
4150 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
4151 ssid_ie = brcmf_parse_tlvs(
4152 (u8 *)&settings->beacon.head[ie_offset],
4153 settings->beacon.head_len - ie_offset,
4154 WLAN_EID_SSID);
4155 if (!ssid_ie)
4156 return -EINVAL;
4157
4158 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
4159 ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
4160 WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
4161 } else {
4162 memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
4163 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
4164 }
4165
4166 brcmf_set_mpc(ndev, 0);
4167 ioctl_value = 1;
4168 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
4169 if (err < 0) {
4170 WL_ERR("BRCMF_C_DOWN error %d\n", err);
4171 goto exit;
4172 }
4173 ioctl_value = 1;
4174 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
4175 if (err < 0) {
4176 WL_ERR("SET INFRA error %d\n", err);
4177 goto exit;
4178 }
4179 ioctl_value = 1;
4180 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4181 if (err < 0) {
4182 WL_ERR("setting AP mode failed %d\n", err);
4183 goto exit;
4184 }
4185
4186 /* find the RSN_IE */
4187 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
4188 settings->beacon.tail_len, WLAN_EID_RSN);
4189
4190 /* find the WPA_IE */
4191 wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
4192 settings->beacon.tail_len);
4193
4194 kfree(cfg->ap_info->rsn_ie);
4195 cfg->ap_info->rsn_ie = NULL;
4196 kfree(cfg->ap_info->wpa_ie);
4197 cfg->ap_info->wpa_ie = NULL;
4198
4199 if ((wpa_ie != NULL || rsn_ie != NULL)) {
4200 WL_TRACE("WPA(2) IE is found\n");
4201 if (wpa_ie != NULL) {
4202 /* WPA IE */
4203 err = brcmf_configure_wpaie(ndev, wpa_ie, false,
4204 bssidx);
4205 if (err < 0)
4206 goto exit;
4207 cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
4208 wpa_ie->len +
4209 TLV_HDR_LEN,
4210 GFP_KERNEL);
4211 } else {
4212 /* RSN IE */
4213 err = brcmf_configure_wpaie(ndev,
4214 (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
4215 if (err < 0)
4216 goto exit;
4217 cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
4218 rsn_ie->len +
4219 TLV_HDR_LEN,
4220 GFP_KERNEL);
4221 }
4222 cfg->ap_info->security_mode = true;
4223 } else {
4224 WL_TRACE("No WPA(2) IEs found\n");
4225 brcmf_configure_opensecurity(ndev, bssidx);
4226 cfg->ap_info->security_mode = false;
4227 }
4228 /* Set Beacon IEs to FW */
4229 err = brcmf_set_management_ie(cfg, ndev, bssidx,
4230 VNDR_IE_BEACON_FLAG,
4231 (u8 *)settings->beacon.tail,
4232 settings->beacon.tail_len);
4233 if (err)
4234 WL_ERR("Set Beacon IE Failed\n");
4235 else
4236 WL_TRACE("Applied Vndr IEs for Beacon\n");
4237
4238 /* Set Probe Response IEs to FW */
4239 err = brcmf_set_management_ie(cfg, ndev, bssidx,
4240 VNDR_IE_PRBRSP_FLAG,
4241 (u8 *)settings->beacon.proberesp_ies,
4242 settings->beacon.proberesp_ies_len);
4243 if (err)
4244 WL_ERR("Set Probe Resp IE Failed\n");
4245 else
4246 WL_TRACE("Applied Vndr IEs for Probe Resp\n");
4247
4248 if (settings->beacon_interval) {
4249 ioctl_value = settings->beacon_interval;
4250 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
4251 &ioctl_value);
4252 if (err < 0) {
4253 WL_ERR("Beacon Interval Set Error, %d\n", err);
4254 goto exit;
4255 }
4256 }
4257 if (settings->dtim_period) {
4258 ioctl_value = settings->dtim_period;
4259 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
4260 &ioctl_value);
4261 if (err < 0) {
4262 WL_ERR("DTIM Interval Set Error, %d\n", err);
4263 goto exit;
4264 }
4265 }
4266 ioctl_value = 1;
4267 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4268 if (err < 0) {
4269 WL_ERR("BRCMF_C_UP error (%d)\n", err);
4270 goto exit;
4271 }
4272
4273 memset(&join_params, 0, sizeof(join_params));
4274 /* join parameters starts with ssid */
4275 memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
4276 /* create softap */
4277 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
4278 sizeof(join_params));
4279 if (err < 0) {
4280 WL_ERR("SET SSID error (%d)\n", err);
4281 goto exit;
4282 }
4283 clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
4284 set_bit(WL_STATUS_AP_CREATED, &cfg->status);
4285
4286exit:
4287 if (err)
4288 brcmf_set_mpc(ndev, 1);
4289 return err;
4290}
4291
4292static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
4293{
4294 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4295 s32 ioctl_value;
4296 s32 err = -EPERM;
4297
4298 WL_TRACE("Enter\n");
4299
4300 if (cfg->conf->mode == WL_MODE_AP) {
4301 /* Due to most likely deauths outstanding we sleep */
4302 /* first to make sure they get processed by fw. */
4303 msleep(400);
4304 ioctl_value = 0;
4305 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4306 if (err < 0) {
4307 WL_ERR("setting AP mode failed %d\n", err);
4308 goto exit;
4309 }
4310 ioctl_value = 0;
4311 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4312 if (err < 0) {
4313 WL_ERR("BRCMF_C_UP error %d\n", err);
4314 goto exit;
4315 }
4316 brcmf_set_mpc(ndev, 1);
4317 clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
4318 clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
4319 }
4320exit:
4321 return err;
4322}
4323
4324static int
4325brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
4326 u8 *mac)
4327{
4328 struct brcmf_scb_val_le scbval;
4329 s32 err;
4330
4331 if (!mac)
4332 return -EFAULT;
4333
4334 WL_TRACE("Enter %pM\n", mac);
4335
4336 if (!check_sys_up(wiphy))
4337 return -EIO;
4338
4339 memcpy(&scbval.ea, mac, ETH_ALEN);
4340 scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
4341 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
4342 &scbval, sizeof(scbval));
4343 if (err)
4344 WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
4345
4346 WL_TRACE("Exit\n");
4347 return err;
4348}
4349
2729static struct cfg80211_ops wl_cfg80211_ops = { 4350static struct cfg80211_ops wl_cfg80211_ops = {
2730 .change_virtual_intf = brcmf_cfg80211_change_iface, 4351 .change_virtual_intf = brcmf_cfg80211_change_iface,
2731 .scan = brcmf_cfg80211_scan, 4352 .scan = brcmf_cfg80211_scan,
@@ -2748,7 +4369,18 @@ static struct cfg80211_ops wl_cfg80211_ops = {
2748 .resume = brcmf_cfg80211_resume, 4369 .resume = brcmf_cfg80211_resume,
2749 .set_pmksa = brcmf_cfg80211_set_pmksa, 4370 .set_pmksa = brcmf_cfg80211_set_pmksa,
2750 .del_pmksa = brcmf_cfg80211_del_pmksa, 4371 .del_pmksa = brcmf_cfg80211_del_pmksa,
2751 .flush_pmksa = brcmf_cfg80211_flush_pmksa 4372 .flush_pmksa = brcmf_cfg80211_flush_pmksa,
4373 .start_ap = brcmf_cfg80211_start_ap,
4374 .stop_ap = brcmf_cfg80211_stop_ap,
4375 .del_station = brcmf_cfg80211_del_station,
4376#ifndef CONFIG_BRCMISCAN
4377 /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
4378 .sched_scan_start = brcmf_cfg80211_sched_scan_start,
4379 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
4380#endif
4381#ifdef CONFIG_NL80211_TESTMODE
4382 .testmode_cmd = brcmf_cfg80211_testmode
4383#endif
2752}; 4384};
2753 4385
2754static s32 brcmf_mode_to_nl80211_iftype(s32 mode) 4386static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
@@ -2767,8 +4399,18 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
2767 return err; 4399 return err;
2768} 4400}
2769 4401
2770static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface, 4402static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
2771 struct device *ndev) 4403{
4404#ifndef CONFIG_BRCMFISCAN
4405 /* scheduled scan settings */
4406 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
4407 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
4408 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
4409 wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
4410#endif
4411}
4412
4413static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
2772{ 4414{
2773 struct wireless_dev *wdev; 4415 struct wireless_dev *wdev;
2774 s32 err = 0; 4416 s32 err = 0;
@@ -2777,9 +4419,8 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2777 if (!wdev) 4419 if (!wdev)
2778 return ERR_PTR(-ENOMEM); 4420 return ERR_PTR(-ENOMEM);
2779 4421
2780 wdev->wiphy = 4422 wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
2781 wiphy_new(&wl_cfg80211_ops, 4423 sizeof(struct brcmf_cfg80211_info));
2782 sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
2783 if (!wdev->wiphy) { 4424 if (!wdev->wiphy) {
2784 WL_ERR("Could not allocate wiphy device\n"); 4425 WL_ERR("Could not allocate wiphy device\n");
2785 err = -ENOMEM; 4426 err = -ENOMEM;
@@ -2788,8 +4429,9 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2788 set_wiphy_dev(wdev->wiphy, ndev); 4429 set_wiphy_dev(wdev->wiphy, ndev);
2789 wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; 4430 wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
2790 wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; 4431 wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
2791 wdev->wiphy->interface_modes = 4432 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2792 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 4433 BIT(NL80211_IFTYPE_ADHOC) |
4434 BIT(NL80211_IFTYPE_AP);
2793 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; 4435 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
2794 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set 4436 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
2795 * it as 11a by default. 4437 * it as 11a by default.
@@ -2805,6 +4447,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
2805 * save mode 4447 * save mode
2806 * by default 4448 * by default
2807 */ 4449 */
4450 brcmf_wiphy_pno_params(wdev->wiphy);
2808 err = wiphy_register(wdev->wiphy); 4451 err = wiphy_register(wdev->wiphy);
2809 if (err < 0) { 4452 if (err < 0) {
2810 WL_ERR("Could not register wiphy device (%d)\n", err); 4453 WL_ERR("Could not register wiphy device (%d)\n", err);
@@ -2821,9 +4464,9 @@ wiphy_new_out:
2821 return ERR_PTR(err); 4464 return ERR_PTR(err);
2822} 4465}
2823 4466
2824static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv) 4467static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
2825{ 4468{
2826 struct wireless_dev *wdev = cfg_priv->wdev; 4469 struct wireless_dev *wdev = cfg->wdev;
2827 4470
2828 if (!wdev) { 4471 if (!wdev) {
2829 WL_ERR("wdev is invalid\n"); 4472 WL_ERR("wdev is invalid\n");
@@ -2832,10 +4475,10 @@ static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
2832 wiphy_unregister(wdev->wiphy); 4475 wiphy_unregister(wdev->wiphy);
2833 wiphy_free(wdev->wiphy); 4476 wiphy_free(wdev->wiphy);
2834 kfree(wdev); 4477 kfree(wdev);
2835 cfg_priv->wdev = NULL; 4478 cfg->wdev = NULL;
2836} 4479}
2837 4480
2838static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv, 4481static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
2839 const struct brcmf_event_msg *e) 4482 const struct brcmf_event_msg *e)
2840{ 4483{
2841 u32 event = be32_to_cpu(e->event_type); 4484 u32 event = be32_to_cpu(e->event_type);
@@ -2843,14 +4486,14 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
2843 4486
2844 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { 4487 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
2845 WL_CONN("Processing set ssid\n"); 4488 WL_CONN("Processing set ssid\n");
2846 cfg_priv->link_up = true; 4489 cfg->link_up = true;
2847 return true; 4490 return true;
2848 } 4491 }
2849 4492
2850 return false; 4493 return false;
2851} 4494}
2852 4495
2853static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv, 4496static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
2854 const struct brcmf_event_msg *e) 4497 const struct brcmf_event_msg *e)
2855{ 4498{
2856 u32 event = be32_to_cpu(e->event_type); 4499 u32 event = be32_to_cpu(e->event_type);
@@ -2863,7 +4506,7 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
2863 return false; 4506 return false;
2864} 4507}
2865 4508
2866static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv, 4509static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
2867 const struct brcmf_event_msg *e) 4510 const struct brcmf_event_msg *e)
2868{ 4511{
2869 u32 event = be32_to_cpu(e->event_type); 4512 u32 event = be32_to_cpu(e->event_type);
@@ -2884,9 +4527,9 @@ static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
2884 return false; 4527 return false;
2885} 4528}
2886 4529
2887static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) 4530static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
2888{ 4531{
2889 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4532 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
2890 4533
2891 kfree(conn_info->req_ie); 4534 kfree(conn_info->req_ie);
2892 conn_info->req_ie = NULL; 4535 conn_info->req_ie = NULL;
@@ -2896,30 +4539,30 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2896 conn_info->resp_ie_len = 0; 4539 conn_info->resp_ie_len = 0;
2897} 4540}
2898 4541
2899static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) 4542static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
2900{ 4543{
2901 struct net_device *ndev = cfg_to_ndev(cfg_priv); 4544 struct net_device *ndev = cfg_to_ndev(cfg);
2902 struct brcmf_cfg80211_assoc_ielen_le *assoc_info; 4545 struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
2903 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4546 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
2904 u32 req_len; 4547 u32 req_len;
2905 u32 resp_len; 4548 u32 resp_len;
2906 s32 err = 0; 4549 s32 err = 0;
2907 4550
2908 brcmf_clear_assoc_ies(cfg_priv); 4551 brcmf_clear_assoc_ies(cfg);
2909 4552
2910 err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf, 4553 err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
2911 WL_ASSOC_INFO_MAX); 4554 WL_ASSOC_INFO_MAX);
2912 if (err) { 4555 if (err) {
2913 WL_ERR("could not get assoc info (%d)\n", err); 4556 WL_ERR("could not get assoc info (%d)\n", err);
2914 return err; 4557 return err;
2915 } 4558 }
2916 assoc_info = 4559 assoc_info =
2917 (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf; 4560 (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
2918 req_len = le32_to_cpu(assoc_info->req_len); 4561 req_len = le32_to_cpu(assoc_info->req_len);
2919 resp_len = le32_to_cpu(assoc_info->resp_len); 4562 resp_len = le32_to_cpu(assoc_info->resp_len);
2920 if (req_len) { 4563 if (req_len) {
2921 err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies", 4564 err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
2922 cfg_priv->extra_buf, 4565 cfg->extra_buf,
2923 WL_ASSOC_INFO_MAX); 4566 WL_ASSOC_INFO_MAX);
2924 if (err) { 4567 if (err) {
2925 WL_ERR("could not get assoc req (%d)\n", err); 4568 WL_ERR("could not get assoc req (%d)\n", err);
@@ -2927,7 +4570,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2927 } 4570 }
2928 conn_info->req_ie_len = req_len; 4571 conn_info->req_ie_len = req_len;
2929 conn_info->req_ie = 4572 conn_info->req_ie =
2930 kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len, 4573 kmemdup(cfg->extra_buf, conn_info->req_ie_len,
2931 GFP_KERNEL); 4574 GFP_KERNEL);
2932 } else { 4575 } else {
2933 conn_info->req_ie_len = 0; 4576 conn_info->req_ie_len = 0;
@@ -2935,7 +4578,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2935 } 4578 }
2936 if (resp_len) { 4579 if (resp_len) {
2937 err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies", 4580 err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
2938 cfg_priv->extra_buf, 4581 cfg->extra_buf,
2939 WL_ASSOC_INFO_MAX); 4582 WL_ASSOC_INFO_MAX);
2940 if (err) { 4583 if (err) {
2941 WL_ERR("could not get assoc resp (%d)\n", err); 4584 WL_ERR("could not get assoc resp (%d)\n", err);
@@ -2943,7 +4586,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2943 } 4586 }
2944 conn_info->resp_ie_len = resp_len; 4587 conn_info->resp_ie_len = resp_len;
2945 conn_info->resp_ie = 4588 conn_info->resp_ie =
2946 kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len, 4589 kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
2947 GFP_KERNEL); 4590 GFP_KERNEL);
2948 } else { 4591 } else {
2949 conn_info->resp_ie_len = 0; 4592 conn_info->resp_ie_len = 0;
@@ -2956,12 +4599,13 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
2956} 4599}
2957 4600
2958static s32 4601static s32
2959brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv, 4602brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
2960 struct net_device *ndev, 4603 struct net_device *ndev,
2961 const struct brcmf_event_msg *e) 4604 const struct brcmf_event_msg *e)
2962{ 4605{
2963 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4606 struct brcmf_cfg80211_profile *profile = cfg->profile;
2964 struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); 4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4608 struct wiphy *wiphy = cfg_to_wiphy(cfg);
2965 struct brcmf_channel_info_le channel_le; 4609 struct brcmf_channel_info_le channel_le;
2966 struct ieee80211_channel *notify_channel; 4610 struct ieee80211_channel *notify_channel;
2967 struct ieee80211_supported_band *band; 4611 struct ieee80211_supported_band *band;
@@ -2971,9 +4615,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
2971 4615
2972 WL_TRACE("Enter\n"); 4616 WL_TRACE("Enter\n");
2973 4617
2974 brcmf_get_assoc_ies(cfg_priv); 4618 brcmf_get_assoc_ies(cfg);
2975 brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); 4619 memcpy(profile->bssid, e->addr, ETH_ALEN);
2976 brcmf_update_bss_info(cfg_priv); 4620 brcmf_update_bss_info(cfg);
2977 4621
2978 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, 4622 brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
2979 sizeof(channel_le)); 4623 sizeof(channel_le));
@@ -2989,37 +4633,35 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
2989 freq = ieee80211_channel_to_frequency(target_channel, band->band); 4633 freq = ieee80211_channel_to_frequency(target_channel, band->band);
2990 notify_channel = ieee80211_get_channel(wiphy, freq); 4634 notify_channel = ieee80211_get_channel(wiphy, freq);
2991 4635
2992 cfg80211_roamed(ndev, notify_channel, 4636 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
2993 (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
2994 conn_info->req_ie, conn_info->req_ie_len, 4637 conn_info->req_ie, conn_info->req_ie_len,
2995 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 4638 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
2996 WL_CONN("Report roaming result\n"); 4639 WL_CONN("Report roaming result\n");
2997 4640
2998 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4641 set_bit(WL_STATUS_CONNECTED, &cfg->status);
2999 WL_TRACE("Exit\n"); 4642 WL_TRACE("Exit\n");
3000 return err; 4643 return err;
3001} 4644}
3002 4645
3003static s32 4646static s32
3004brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv, 4647brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
3005 struct net_device *ndev, const struct brcmf_event_msg *e, 4648 struct net_device *ndev, const struct brcmf_event_msg *e,
3006 bool completed) 4649 bool completed)
3007{ 4650{
3008 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); 4651 struct brcmf_cfg80211_profile *profile = cfg->profile;
4652 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
3009 s32 err = 0; 4653 s32 err = 0;
3010 4654
3011 WL_TRACE("Enter\n"); 4655 WL_TRACE("Enter\n");
3012 4656
3013 if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { 4657 if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
3014 if (completed) { 4658 if (completed) {
3015 brcmf_get_assoc_ies(cfg_priv); 4659 brcmf_get_assoc_ies(cfg);
3016 brcmf_update_prof(cfg_priv, NULL, &e->addr, 4660 memcpy(profile->bssid, e->addr, ETH_ALEN);
3017 WL_PROF_BSSID); 4661 brcmf_update_bss_info(cfg);
3018 brcmf_update_bss_info(cfg_priv);
3019 } 4662 }
3020 cfg80211_connect_result(ndev, 4663 cfg80211_connect_result(ndev,
3021 (u8 *)brcmf_read_prof(cfg_priv, 4664 (u8 *)profile->bssid,
3022 WL_PROF_BSSID),
3023 conn_info->req_ie, 4665 conn_info->req_ie,
3024 conn_info->req_ie_len, 4666 conn_info->req_ie_len,
3025 conn_info->resp_ie, 4667 conn_info->resp_ie,
@@ -3028,7 +4670,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
3028 WLAN_STATUS_AUTH_TIMEOUT, 4670 WLAN_STATUS_AUTH_TIMEOUT,
3029 GFP_KERNEL); 4671 GFP_KERNEL);
3030 if (completed) 4672 if (completed)
3031 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4673 set_bit(WL_STATUS_CONNECTED, &cfg->status);
3032 WL_CONN("Report connect result - connection %s\n", 4674 WL_CONN("Report connect result - connection %s\n",
3033 completed ? "succeeded" : "failed"); 4675 completed ? "succeeded" : "failed");
3034 } 4676 }
@@ -3037,52 +4679,93 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
3037} 4679}
3038 4680
3039static s32 4681static s32
3040brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv, 4682brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4683 struct net_device *ndev,
4684 const struct brcmf_event_msg *e, void *data)
4685{
4686 s32 err = 0;
4687 u32 event = be32_to_cpu(e->event_type);
4688 u32 reason = be32_to_cpu(e->reason);
4689 u32 len = be32_to_cpu(e->datalen);
4690 static int generation;
4691
4692 struct station_info sinfo;
4693
4694 WL_CONN("event %d, reason %d\n", event, reason);
4695 memset(&sinfo, 0, sizeof(sinfo));
4696
4697 sinfo.filled = 0;
4698 if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
4699 reason == BRCMF_E_STATUS_SUCCESS) {
4700 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
4701 if (!data) {
4702 WL_ERR("No IEs present in ASSOC/REASSOC_IND");
4703 return -EINVAL;
4704 }
4705 sinfo.assoc_req_ies = data;
4706 sinfo.assoc_req_ies_len = len;
4707 generation++;
4708 sinfo.generation = generation;
4709 cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
4710 } else if ((event == BRCMF_E_DISASSOC_IND) ||
4711 (event == BRCMF_E_DEAUTH_IND) ||
4712 (event == BRCMF_E_DEAUTH)) {
4713 generation++;
4714 sinfo.generation = generation;
4715 cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
4716 }
4717 return err;
4718}
4719
4720static s32
4721brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
3041 struct net_device *ndev, 4722 struct net_device *ndev,
3042 const struct brcmf_event_msg *e, void *data) 4723 const struct brcmf_event_msg *e, void *data)
3043{ 4724{
4725 struct brcmf_cfg80211_profile *profile = cfg->profile;
3044 s32 err = 0; 4726 s32 err = 0;
3045 4727
3046 if (brcmf_is_linkup(cfg_priv, e)) { 4728 if (cfg->conf->mode == WL_MODE_AP) {
4729 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
4730 } else if (brcmf_is_linkup(cfg, e)) {
3047 WL_CONN("Linkup\n"); 4731 WL_CONN("Linkup\n");
3048 if (brcmf_is_ibssmode(cfg_priv)) { 4732 if (brcmf_is_ibssmode(cfg)) {
3049 brcmf_update_prof(cfg_priv, NULL, (void *)e->addr, 4733 memcpy(profile->bssid, e->addr, ETH_ALEN);
3050 WL_PROF_BSSID); 4734 wl_inform_ibss(cfg, ndev, e->addr);
3051 wl_inform_ibss(cfg_priv, ndev, e->addr);
3052 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); 4735 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
3053 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4736 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3054 set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); 4737 set_bit(WL_STATUS_CONNECTED, &cfg->status);
3055 } else 4738 } else
3056 brcmf_bss_connect_done(cfg_priv, ndev, e, true); 4739 brcmf_bss_connect_done(cfg, ndev, e, true);
3057 } else if (brcmf_is_linkdown(cfg_priv, e)) { 4740 } else if (brcmf_is_linkdown(cfg, e)) {
3058 WL_CONN("Linkdown\n"); 4741 WL_CONN("Linkdown\n");
3059 if (brcmf_is_ibssmode(cfg_priv)) { 4742 if (brcmf_is_ibssmode(cfg)) {
3060 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4743 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3061 if (test_and_clear_bit(WL_STATUS_CONNECTED, 4744 if (test_and_clear_bit(WL_STATUS_CONNECTED,
3062 &cfg_priv->status)) 4745 &cfg->status))
3063 brcmf_link_down(cfg_priv); 4746 brcmf_link_down(cfg);
3064 } else { 4747 } else {
3065 brcmf_bss_connect_done(cfg_priv, ndev, e, false); 4748 brcmf_bss_connect_done(cfg, ndev, e, false);
3066 if (test_and_clear_bit(WL_STATUS_CONNECTED, 4749 if (test_and_clear_bit(WL_STATUS_CONNECTED,
3067 &cfg_priv->status)) { 4750 &cfg->status)) {
3068 cfg80211_disconnected(ndev, 0, NULL, 0, 4751 cfg80211_disconnected(ndev, 0, NULL, 0,
3069 GFP_KERNEL); 4752 GFP_KERNEL);
3070 brcmf_link_down(cfg_priv); 4753 brcmf_link_down(cfg);
3071 } 4754 }
3072 } 4755 }
3073 brcmf_init_prof(cfg_priv->profile); 4756 brcmf_init_prof(cfg->profile);
3074 } else if (brcmf_is_nonetwork(cfg_priv, e)) { 4757 } else if (brcmf_is_nonetwork(cfg, e)) {
3075 if (brcmf_is_ibssmode(cfg_priv)) 4758 if (brcmf_is_ibssmode(cfg))
3076 clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); 4759 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
3077 else 4760 else
3078 brcmf_bss_connect_done(cfg_priv, ndev, e, false); 4761 brcmf_bss_connect_done(cfg, ndev, e, false);
3079 } 4762 }
3080 4763
3081 return err; 4764 return err;
3082} 4765}
3083 4766
3084static s32 4767static s32
3085brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv, 4768brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
3086 struct net_device *ndev, 4769 struct net_device *ndev,
3087 const struct brcmf_event_msg *e, void *data) 4770 const struct brcmf_event_msg *e, void *data)
3088{ 4771{
@@ -3091,17 +4774,17 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
3091 u32 status = be32_to_cpu(e->status); 4774 u32 status = be32_to_cpu(e->status);
3092 4775
3093 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { 4776 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
3094 if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) 4777 if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
3095 brcmf_bss_roaming_done(cfg_priv, ndev, e); 4778 brcmf_bss_roaming_done(cfg, ndev, e);
3096 else 4779 else
3097 brcmf_bss_connect_done(cfg_priv, ndev, e, true); 4780 brcmf_bss_connect_done(cfg, ndev, e, true);
3098 } 4781 }
3099 4782
3100 return err; 4783 return err;
3101} 4784}
3102 4785
3103static s32 4786static s32
3104brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv, 4787brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
3105 struct net_device *ndev, 4788 struct net_device *ndev,
3106 const struct brcmf_event_msg *e, void *data) 4789 const struct brcmf_event_msg *e, void *data)
3107{ 4790{
@@ -3120,7 +4803,7 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
3120} 4803}
3121 4804
3122static s32 4805static s32
3123brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv, 4806brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
3124 struct net_device *ndev, 4807 struct net_device *ndev,
3125 const struct brcmf_event_msg *e, void *data) 4808 const struct brcmf_event_msg *e, void *data)
3126{ 4809{
@@ -3133,12 +4816,12 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
3133 4816
3134 WL_TRACE("Enter\n"); 4817 WL_TRACE("Enter\n");
3135 4818
3136 if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) { 4819 if (cfg->iscan_on && cfg->iscan_kickstart) {
3137 WL_TRACE("Exit\n"); 4820 WL_TRACE("Exit\n");
3138 return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv)); 4821 return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
3139 } 4822 }
3140 4823
3141 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { 4824 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
3142 WL_ERR("Scan complete while device not scanning\n"); 4825 WL_ERR("Scan complete while device not scanning\n");
3143 scan_abort = true; 4826 scan_abort = true;
3144 err = -EINVAL; 4827 err = -EINVAL;
@@ -3155,35 +4838,33 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
3155 scan_channel = le32_to_cpu(channel_inform_le.scan_channel); 4838 scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
3156 if (scan_channel) 4839 if (scan_channel)
3157 WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel); 4840 WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
3158 cfg_priv->bss_list = cfg_priv->scan_results; 4841 cfg->bss_list = cfg->scan_results;
3159 bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list; 4842 bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
3160 4843
3161 memset(cfg_priv->scan_results, 0, len); 4844 memset(cfg->scan_results, 0, len);
3162 bss_list_le->buflen = cpu_to_le32(len); 4845 bss_list_le->buflen = cpu_to_le32(len);
3163 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS, 4846 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
3164 cfg_priv->scan_results, len); 4847 cfg->scan_results, len);
3165 if (err) { 4848 if (err) {
3166 WL_ERR("%s Scan_results error (%d)\n", ndev->name, err); 4849 WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
3167 err = -EINVAL; 4850 err = -EINVAL;
3168 scan_abort = true; 4851 scan_abort = true;
3169 goto scan_done_out; 4852 goto scan_done_out;
3170 } 4853 }
3171 cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen); 4854 cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
3172 cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version); 4855 cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
3173 cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count); 4856 cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
3174 4857
3175 err = brcmf_inform_bss(cfg_priv); 4858 err = brcmf_inform_bss(cfg);
3176 if (err) { 4859 if (err)
3177 scan_abort = true; 4860 scan_abort = true;
3178 goto scan_done_out;
3179 }
3180 4861
3181scan_done_out: 4862scan_done_out:
3182 if (cfg_priv->scan_request) { 4863 if (cfg->scan_request) {
3183 WL_SCAN("calling cfg80211_scan_done\n"); 4864 WL_SCAN("calling cfg80211_scan_done\n");
3184 cfg80211_scan_done(cfg_priv->scan_request, scan_abort); 4865 cfg80211_scan_done(cfg->scan_request, scan_abort);
3185 brcmf_set_mpc(ndev, 1); 4866 brcmf_set_mpc(ndev, 1);
3186 cfg_priv->scan_request = NULL; 4867 cfg->scan_request = NULL;
3187 } 4868 }
3188 4869
3189 WL_TRACE("Exit\n"); 4870 WL_TRACE("Exit\n");
@@ -3206,68 +4887,85 @@ static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
3206 memset(el, 0, sizeof(*el)); 4887 memset(el, 0, sizeof(*el));
3207 el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status; 4888 el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
3208 el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status; 4889 el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
4890 el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
4891 el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
4892 el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
4893 el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
4894 el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
3209 el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status; 4895 el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
3210 el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status; 4896 el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
3211 el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status; 4897 el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
4898 el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
4899}
4900
4901static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
4902{
4903 kfree(cfg->scan_results);
4904 cfg->scan_results = NULL;
4905 kfree(cfg->bss_info);
4906 cfg->bss_info = NULL;
4907 kfree(cfg->conf);
4908 cfg->conf = NULL;
4909 kfree(cfg->profile);
4910 cfg->profile = NULL;
4911 kfree(cfg->scan_req_int);
4912 cfg->scan_req_int = NULL;
4913 kfree(cfg->escan_ioctl_buf);
4914 cfg->escan_ioctl_buf = NULL;
4915 kfree(cfg->dcmd_buf);
4916 cfg->dcmd_buf = NULL;
4917 kfree(cfg->extra_buf);
4918 cfg->extra_buf = NULL;
4919 kfree(cfg->iscan);
4920 cfg->iscan = NULL;
4921 kfree(cfg->pmk_list);
4922 cfg->pmk_list = NULL;
4923 if (cfg->ap_info) {
4924 kfree(cfg->ap_info->wpa_ie);
4925 kfree(cfg->ap_info->rsn_ie);
4926 kfree(cfg->ap_info);
4927 cfg->ap_info = NULL;
4928 }
3212} 4929}
3213 4930
3214static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) 4931static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
3215{ 4932{
3216 kfree(cfg_priv->scan_results); 4933 cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
3217 cfg_priv->scan_results = NULL; 4934 if (!cfg->scan_results)
3218 kfree(cfg_priv->bss_info);
3219 cfg_priv->bss_info = NULL;
3220 kfree(cfg_priv->conf);
3221 cfg_priv->conf = NULL;
3222 kfree(cfg_priv->profile);
3223 cfg_priv->profile = NULL;
3224 kfree(cfg_priv->scan_req_int);
3225 cfg_priv->scan_req_int = NULL;
3226 kfree(cfg_priv->dcmd_buf);
3227 cfg_priv->dcmd_buf = NULL;
3228 kfree(cfg_priv->extra_buf);
3229 cfg_priv->extra_buf = NULL;
3230 kfree(cfg_priv->iscan);
3231 cfg_priv->iscan = NULL;
3232 kfree(cfg_priv->pmk_list);
3233 cfg_priv->pmk_list = NULL;
3234}
3235
3236static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
3237{
3238 cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
3239 if (!cfg_priv->scan_results)
3240 goto init_priv_mem_out; 4935 goto init_priv_mem_out;
3241 cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL); 4936 cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
3242 if (!cfg_priv->conf) 4937 if (!cfg->conf)
3243 goto init_priv_mem_out; 4938 goto init_priv_mem_out;
3244 cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL); 4939 cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
3245 if (!cfg_priv->profile) 4940 if (!cfg->profile)
3246 goto init_priv_mem_out; 4941 goto init_priv_mem_out;
3247 cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); 4942 cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
3248 if (!cfg_priv->bss_info) 4943 if (!cfg->bss_info)
3249 goto init_priv_mem_out; 4944 goto init_priv_mem_out;
3250 cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int), 4945 cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
3251 GFP_KERNEL); 4946 GFP_KERNEL);
3252 if (!cfg_priv->scan_req_int) 4947 if (!cfg->scan_req_int)
4948 goto init_priv_mem_out;
4949 cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
4950 if (!cfg->escan_ioctl_buf)
3253 goto init_priv_mem_out; 4951 goto init_priv_mem_out;
3254 cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); 4952 cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
3255 if (!cfg_priv->dcmd_buf) 4953 if (!cfg->dcmd_buf)
3256 goto init_priv_mem_out; 4954 goto init_priv_mem_out;
3257 cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 4955 cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
3258 if (!cfg_priv->extra_buf) 4956 if (!cfg->extra_buf)
3259 goto init_priv_mem_out; 4957 goto init_priv_mem_out;
3260 cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL); 4958 cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
3261 if (!cfg_priv->iscan) 4959 if (!cfg->iscan)
3262 goto init_priv_mem_out; 4960 goto init_priv_mem_out;
3263 cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL); 4961 cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
3264 if (!cfg_priv->pmk_list) 4962 if (!cfg->pmk_list)
3265 goto init_priv_mem_out; 4963 goto init_priv_mem_out;
3266 4964
3267 return 0; 4965 return 0;
3268 4966
3269init_priv_mem_out: 4967init_priv_mem_out:
3270 brcmf_deinit_priv_mem(cfg_priv); 4968 brcmf_deinit_priv_mem(cfg);
3271 4969
3272 return -ENOMEM; 4970 return -ENOMEM;
3273} 4971}
@@ -3277,17 +4975,17 @@ init_priv_mem_out:
3277*/ 4975*/
3278 4976
3279static struct brcmf_cfg80211_event_q *brcmf_deq_event( 4977static struct brcmf_cfg80211_event_q *brcmf_deq_event(
3280 struct brcmf_cfg80211_priv *cfg_priv) 4978 struct brcmf_cfg80211_info *cfg)
3281{ 4979{
3282 struct brcmf_cfg80211_event_q *e = NULL; 4980 struct brcmf_cfg80211_event_q *e = NULL;
3283 4981
3284 spin_lock_irq(&cfg_priv->evt_q_lock); 4982 spin_lock_irq(&cfg->evt_q_lock);
3285 if (!list_empty(&cfg_priv->evt_q_list)) { 4983 if (!list_empty(&cfg->evt_q_list)) {
3286 e = list_first_entry(&cfg_priv->evt_q_list, 4984 e = list_first_entry(&cfg->evt_q_list,
3287 struct brcmf_cfg80211_event_q, evt_q_list); 4985 struct brcmf_cfg80211_event_q, evt_q_list);
3288 list_del(&e->evt_q_list); 4986 list_del(&e->evt_q_list);
3289 } 4987 }
3290 spin_unlock_irq(&cfg_priv->evt_q_lock); 4988 spin_unlock_irq(&cfg->evt_q_lock);
3291 4989
3292 return e; 4990 return e;
3293} 4991}
@@ -3299,23 +4997,33 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
3299*/ 4997*/
3300 4998
3301static s32 4999static s32
3302brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, 5000brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
3303 const struct brcmf_event_msg *msg) 5001 const struct brcmf_event_msg *msg, void *data)
3304{ 5002{
3305 struct brcmf_cfg80211_event_q *e; 5003 struct brcmf_cfg80211_event_q *e;
3306 s32 err = 0; 5004 s32 err = 0;
3307 ulong flags; 5005 ulong flags;
5006 u32 data_len;
5007 u32 total_len;
3308 5008
3309 e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); 5009 total_len = sizeof(struct brcmf_cfg80211_event_q);
5010 if (data)
5011 data_len = be32_to_cpu(msg->datalen);
5012 else
5013 data_len = 0;
5014 total_len += data_len;
5015 e = kzalloc(total_len, GFP_ATOMIC);
3310 if (!e) 5016 if (!e)
3311 return -ENOMEM; 5017 return -ENOMEM;
3312 5018
3313 e->etype = event; 5019 e->etype = event;
3314 memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); 5020 memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
5021 if (data)
5022 memcpy(&e->edata, data, data_len);
3315 5023
3316 spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); 5024 spin_lock_irqsave(&cfg->evt_q_lock, flags);
3317 list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); 5025 list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
3318 spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags); 5026 spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
3319 5027
3320 return err; 5028 return err;
3321} 5029}
@@ -3327,12 +5035,12 @@ static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
3327 5035
3328static void brcmf_cfg80211_event_handler(struct work_struct *work) 5036static void brcmf_cfg80211_event_handler(struct work_struct *work)
3329{ 5037{
3330 struct brcmf_cfg80211_priv *cfg_priv = 5038 struct brcmf_cfg80211_info *cfg =
3331 container_of(work, struct brcmf_cfg80211_priv, 5039 container_of(work, struct brcmf_cfg80211_info,
3332 event_work); 5040 event_work);
3333 struct brcmf_cfg80211_event_q *e; 5041 struct brcmf_cfg80211_event_q *e;
3334 5042
3335 e = brcmf_deq_event(cfg_priv); 5043 e = brcmf_deq_event(cfg);
3336 if (unlikely(!e)) { 5044 if (unlikely(!e)) {
3337 WL_ERR("event queue empty...\n"); 5045 WL_ERR("event queue empty...\n");
3338 return; 5046 return;
@@ -3340,137 +5048,131 @@ static void brcmf_cfg80211_event_handler(struct work_struct *work)
3340 5048
3341 do { 5049 do {
3342 WL_INFO("event type (%d)\n", e->etype); 5050 WL_INFO("event type (%d)\n", e->etype);
3343 if (cfg_priv->el.handler[e->etype]) 5051 if (cfg->el.handler[e->etype])
3344 cfg_priv->el.handler[e->etype](cfg_priv, 5052 cfg->el.handler[e->etype](cfg,
3345 cfg_to_ndev(cfg_priv), 5053 cfg_to_ndev(cfg),
3346 &e->emsg, e->edata); 5054 &e->emsg, e->edata);
3347 else 5055 else
3348 WL_INFO("Unknown Event (%d): ignoring\n", e->etype); 5056 WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
3349 brcmf_put_event(e); 5057 brcmf_put_event(e);
3350 } while ((e = brcmf_deq_event(cfg_priv))); 5058 } while ((e = brcmf_deq_event(cfg)));
3351 5059
3352} 5060}
3353 5061
3354static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv) 5062static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
3355{ 5063{
3356 spin_lock_init(&cfg_priv->evt_q_lock); 5064 spin_lock_init(&cfg->evt_q_lock);
3357 INIT_LIST_HEAD(&cfg_priv->evt_q_list); 5065 INIT_LIST_HEAD(&cfg->evt_q_list);
3358} 5066}
3359 5067
3360static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv) 5068static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
3361{ 5069{
3362 struct brcmf_cfg80211_event_q *e; 5070 struct brcmf_cfg80211_event_q *e;
3363 5071
3364 spin_lock_irq(&cfg_priv->evt_q_lock); 5072 spin_lock_irq(&cfg->evt_q_lock);
3365 while (!list_empty(&cfg_priv->evt_q_list)) { 5073 while (!list_empty(&cfg->evt_q_list)) {
3366 e = list_first_entry(&cfg_priv->evt_q_list, 5074 e = list_first_entry(&cfg->evt_q_list,
3367 struct brcmf_cfg80211_event_q, evt_q_list); 5075 struct brcmf_cfg80211_event_q, evt_q_list);
3368 list_del(&e->evt_q_list); 5076 list_del(&e->evt_q_list);
3369 kfree(e); 5077 kfree(e);
3370 } 5078 }
3371 spin_unlock_irq(&cfg_priv->evt_q_lock); 5079 spin_unlock_irq(&cfg->evt_q_lock);
3372} 5080}
3373 5081
3374static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv) 5082static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
3375{ 5083{
3376 s32 err = 0; 5084 s32 err = 0;
3377 5085
3378 cfg_priv->scan_request = NULL; 5086 cfg->scan_request = NULL;
3379 cfg_priv->pwr_save = true; 5087 cfg->pwr_save = true;
3380 cfg_priv->iscan_on = true; /* iscan on & off switch. 5088#ifdef CONFIG_BRCMISCAN
5089 cfg->iscan_on = true; /* iscan on & off switch.
3381 we enable iscan per default */ 5090 we enable iscan per default */
3382 cfg_priv->roam_on = true; /* roam on & off switch. 5091 cfg->escan_on = false; /* escan on & off switch.
5092 we disable escan per default */
5093#else
5094 cfg->iscan_on = false; /* iscan on & off switch.
5095 we disable iscan per default */
5096 cfg->escan_on = true; /* escan on & off switch.
5097 we enable escan per default */
5098#endif
5099 cfg->roam_on = true; /* roam on & off switch.
3383 we enable roam per default */ 5100 we enable roam per default */
3384 5101
3385 cfg_priv->iscan_kickstart = false; 5102 cfg->iscan_kickstart = false;
3386 cfg_priv->active_scan = true; /* we do active scan for 5103 cfg->active_scan = true; /* we do active scan for
3387 specific scan per default */ 5104 specific scan per default */
3388 cfg_priv->dongle_up = false; /* dongle is not up yet */ 5105 cfg->dongle_up = false; /* dongle is not up yet */
3389 brcmf_init_eq(cfg_priv); 5106 brcmf_init_eq(cfg);
3390 err = brcmf_init_priv_mem(cfg_priv); 5107 err = brcmf_init_priv_mem(cfg);
3391 if (err) 5108 if (err)
3392 return err; 5109 return err;
3393 INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler); 5110 INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
3394 brcmf_init_eloop_handler(&cfg_priv->el); 5111 brcmf_init_eloop_handler(&cfg->el);
3395 mutex_init(&cfg_priv->usr_sync); 5112 mutex_init(&cfg->usr_sync);
3396 err = brcmf_init_iscan(cfg_priv); 5113 err = brcmf_init_iscan(cfg);
3397 if (err) 5114 if (err)
3398 return err; 5115 return err;
3399 brcmf_init_conf(cfg_priv->conf); 5116 brcmf_init_escan(cfg);
3400 brcmf_init_prof(cfg_priv->profile); 5117 brcmf_init_conf(cfg->conf);
3401 brcmf_link_down(cfg_priv); 5118 brcmf_init_prof(cfg->profile);
5119 brcmf_link_down(cfg);
3402 5120
3403 return err; 5121 return err;
3404} 5122}
3405 5123
3406static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv) 5124static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
3407{ 5125{
3408 cancel_work_sync(&cfg_priv->event_work); 5126 cancel_work_sync(&cfg->event_work);
3409 cfg_priv->dongle_up = false; /* dongle down */ 5127 cfg->dongle_up = false; /* dongle down */
3410 brcmf_flush_eq(cfg_priv); 5128 brcmf_flush_eq(cfg);
3411 brcmf_link_down(cfg_priv); 5129 brcmf_link_down(cfg);
3412 brcmf_term_iscan(cfg_priv); 5130 brcmf_abort_scanning(cfg);
3413 brcmf_deinit_priv_mem(cfg_priv); 5131 brcmf_deinit_priv_mem(cfg);
3414} 5132}
3415 5133
3416struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, 5134struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
3417 struct device *busdev, 5135 struct device *busdev,
3418 void *data) 5136 struct brcmf_pub *drvr)
3419{ 5137{
3420 struct wireless_dev *wdev; 5138 struct wireless_dev *wdev;
3421 struct brcmf_cfg80211_priv *cfg_priv; 5139 struct brcmf_cfg80211_info *cfg;
3422 struct brcmf_cfg80211_iface *ci;
3423 struct brcmf_cfg80211_dev *cfg_dev;
3424 s32 err = 0; 5140 s32 err = 0;
3425 5141
3426 if (!ndev) { 5142 if (!ndev) {
3427 WL_ERR("ndev is invalid\n"); 5143 WL_ERR("ndev is invalid\n");
3428 return NULL; 5144 return NULL;
3429 } 5145 }
3430 cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
3431 if (!cfg_dev)
3432 return NULL;
3433 5146
3434 wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev); 5147 wdev = brcmf_alloc_wdev(busdev);
3435 if (IS_ERR(wdev)) { 5148 if (IS_ERR(wdev)) {
3436 kfree(cfg_dev);
3437 return NULL; 5149 return NULL;
3438 } 5150 }
3439 5151
3440 wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS); 5152 wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
3441 cfg_priv = wdev_to_cfg(wdev); 5153 cfg = wdev_to_cfg(wdev);
3442 cfg_priv->wdev = wdev; 5154 cfg->wdev = wdev;
3443 cfg_priv->pub = data; 5155 cfg->pub = drvr;
3444 ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
3445 ci->cfg_priv = cfg_priv;
3446 ndev->ieee80211_ptr = wdev; 5156 ndev->ieee80211_ptr = wdev;
3447 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 5157 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
3448 wdev->netdev = ndev; 5158 wdev->netdev = ndev;
3449 err = wl_init_priv(cfg_priv); 5159 err = wl_init_priv(cfg);
3450 if (err) { 5160 if (err) {
3451 WL_ERR("Failed to init iwm_priv (%d)\n", err); 5161 WL_ERR("Failed to init iwm_priv (%d)\n", err);
3452 goto cfg80211_attach_out; 5162 goto cfg80211_attach_out;
3453 } 5163 }
3454 brcmf_set_drvdata(cfg_dev, ci);
3455 5164
3456 return cfg_dev; 5165 return cfg;
3457 5166
3458cfg80211_attach_out: 5167cfg80211_attach_out:
3459 brcmf_free_wdev(cfg_priv); 5168 brcmf_free_wdev(cfg);
3460 kfree(cfg_dev);
3461 return NULL; 5169 return NULL;
3462} 5170}
3463 5171
3464void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev) 5172void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
3465{ 5173{
3466 struct brcmf_cfg80211_priv *cfg_priv; 5174 wl_deinit_priv(cfg);
3467 5175 brcmf_free_wdev(cfg);
3468 cfg_priv = brcmf_priv_get(cfg_dev);
3469
3470 wl_deinit_priv(cfg_priv);
3471 brcmf_free_wdev(cfg_priv);
3472 brcmf_set_drvdata(cfg_dev, NULL);
3473 kfree(cfg_dev);
3474} 5176}
3475 5177
3476void 5178void
@@ -3478,10 +5180,10 @@ brcmf_cfg80211_event(struct net_device *ndev,
3478 const struct brcmf_event_msg *e, void *data) 5180 const struct brcmf_event_msg *e, void *data)
3479{ 5181{
3480 u32 event_type = be32_to_cpu(e->event_type); 5182 u32 event_type = be32_to_cpu(e->event_type);
3481 struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); 5183 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
3482 5184
3483 if (!brcmf_enq_event(cfg_priv, event_type, e)) 5185 if (!brcmf_enq_event(cfg, event_type, e, data))
3484 schedule_work(&cfg_priv->event_work); 5186 schedule_work(&cfg->event_work);
3485} 5187}
3486 5188
3487static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype) 5189static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
@@ -3502,6 +5204,9 @@ static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
3502 case NL80211_IFTYPE_STATION: 5204 case NL80211_IFTYPE_STATION:
3503 infra = 1; 5205 infra = 1;
3504 break; 5206 break;
5207 case NL80211_IFTYPE_AP:
5208 infra = 1;
5209 break;
3505 default: 5210 default:
3506 err = -EINVAL; 5211 err = -EINVAL;
3507 WL_ERR("invalid type (%d)\n", iftype); 5212 WL_ERR("invalid type (%d)\n", iftype);
@@ -3554,6 +5259,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
3554 setbit(eventmask, BRCMF_E_TXFAIL); 5259 setbit(eventmask, BRCMF_E_TXFAIL);
3555 setbit(eventmask, BRCMF_E_JOIN_START); 5260 setbit(eventmask, BRCMF_E_JOIN_START);
3556 setbit(eventmask, BRCMF_E_SCAN_COMPLETE); 5261 setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
5262 setbit(eventmask, BRCMF_E_ESCAN_RESULT);
5263 setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
3557 5264
3558 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, 5265 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
3559 iovbuf, sizeof(iovbuf)); 5266 iovbuf, sizeof(iovbuf));
@@ -3672,46 +5379,46 @@ dongle_scantime_out:
3672 return err; 5379 return err;
3673} 5380}
3674 5381
3675static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv) 5382static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
3676{ 5383{
3677 struct wiphy *wiphy; 5384 struct wiphy *wiphy;
3678 s32 phy_list; 5385 s32 phy_list;
3679 s8 phy; 5386 s8 phy;
3680 s32 err = 0; 5387 s32 err = 0;
3681 5388
3682 err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST, 5389 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
3683 &phy_list, sizeof(phy_list)); 5390 &phy_list, sizeof(phy_list));
3684 if (err) { 5391 if (err) {
3685 WL_ERR("error (%d)\n", err); 5392 WL_ERR("error (%d)\n", err);
3686 return err; 5393 return err;
3687 } 5394 }
3688 5395
3689 phy = ((char *)&phy_list)[1]; 5396 phy = ((char *)&phy_list)[0];
3690 WL_INFO("%c phy\n", phy); 5397 WL_INFO("%c phy\n", phy);
3691 if (phy == 'n' || phy == 'a') { 5398 if (phy == 'n' || phy == 'a') {
3692 wiphy = cfg_to_wiphy(cfg_priv); 5399 wiphy = cfg_to_wiphy(cfg);
3693 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; 5400 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
3694 } 5401 }
3695 5402
3696 return err; 5403 return err;
3697} 5404}
3698 5405
3699static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv) 5406static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
3700{ 5407{
3701 return wl_update_wiphybands(cfg_priv); 5408 return wl_update_wiphybands(cfg);
3702} 5409}
3703 5410
3704static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv) 5411static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
3705{ 5412{
3706 struct net_device *ndev; 5413 struct net_device *ndev;
3707 struct wireless_dev *wdev; 5414 struct wireless_dev *wdev;
3708 s32 power_mode; 5415 s32 power_mode;
3709 s32 err = 0; 5416 s32 err = 0;
3710 5417
3711 if (cfg_priv->dongle_up) 5418 if (cfg->dongle_up)
3712 return err; 5419 return err;
3713 5420
3714 ndev = cfg_to_ndev(cfg_priv); 5421 ndev = cfg_to_ndev(cfg);
3715 wdev = ndev->ieee80211_ptr; 5422 wdev = ndev->ieee80211_ptr;
3716 5423
3717 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, 5424 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
@@ -3721,21 +5428,21 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
3721 if (err) 5428 if (err)
3722 goto default_conf_out; 5429 goto default_conf_out;
3723 5430
3724 power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF; 5431 power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
3725 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode); 5432 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
3726 if (err) 5433 if (err)
3727 goto default_conf_out; 5434 goto default_conf_out;
3728 WL_INFO("power save set to %s\n", 5435 WL_INFO("power save set to %s\n",
3729 (power_mode ? "enabled" : "disabled")); 5436 (power_mode ? "enabled" : "disabled"));
3730 5437
3731 err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1), 5438 err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
3732 WL_BEACON_TIMEOUT); 5439 WL_BEACON_TIMEOUT);
3733 if (err) 5440 if (err)
3734 goto default_conf_out; 5441 goto default_conf_out;
3735 err = brcmf_dongle_mode(ndev, wdev->iftype); 5442 err = brcmf_dongle_mode(ndev, wdev->iftype);
3736 if (err && err != -EINPROGRESS) 5443 if (err && err != -EINPROGRESS)
3737 goto default_conf_out; 5444 goto default_conf_out;
3738 err = brcmf_dongle_probecap(cfg_priv); 5445 err = brcmf_dongle_probecap(cfg);
3739 if (err) 5446 if (err)
3740 goto default_conf_out; 5447 goto default_conf_out;
3741 5448
@@ -3743,31 +5450,31 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
3743 5450
3744default_conf_out: 5451default_conf_out:
3745 5452
3746 cfg_priv->dongle_up = true; 5453 cfg->dongle_up = true;
3747 5454
3748 return err; 5455 return err;
3749 5456
3750} 5457}
3751 5458
3752static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv) 5459static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
3753{ 5460{
3754 char buf[10+IFNAMSIZ]; 5461 char buf[10+IFNAMSIZ];
3755 struct dentry *fd; 5462 struct dentry *fd;
3756 s32 err = 0; 5463 s32 err = 0;
3757 5464
3758 sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name); 5465 sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
3759 cfg_priv->debugfsdir = debugfs_create_dir(buf, 5466 cfg->debugfsdir = debugfs_create_dir(buf,
3760 cfg_to_wiphy(cfg_priv)->debugfsdir); 5467 cfg_to_wiphy(cfg)->debugfsdir);
3761 5468
3762 fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir, 5469 fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
3763 (u16 *)&cfg_priv->profile->beacon_interval); 5470 (u16 *)&cfg->profile->beacon_interval);
3764 if (!fd) { 5471 if (!fd) {
3765 err = -ENOMEM; 5472 err = -ENOMEM;
3766 goto err_out; 5473 goto err_out;
3767 } 5474 }
3768 5475
3769 fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir, 5476 fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
3770 (u8 *)&cfg_priv->profile->dtim_period); 5477 (u8 *)&cfg->profile->dtim_period);
3771 if (!fd) { 5478 if (!fd) {
3772 err = -ENOMEM; 5479 err = -ENOMEM;
3773 goto err_out; 5480 goto err_out;
@@ -3777,40 +5484,40 @@ err_out:
3777 return err; 5484 return err;
3778} 5485}
3779 5486
3780static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv) 5487static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
3781{ 5488{
3782 debugfs_remove_recursive(cfg_priv->debugfsdir); 5489 debugfs_remove_recursive(cfg->debugfsdir);
3783 cfg_priv->debugfsdir = NULL; 5490 cfg->debugfsdir = NULL;
3784} 5491}
3785 5492
3786static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv) 5493static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
3787{ 5494{
3788 s32 err = 0; 5495 s32 err = 0;
3789 5496
3790 set_bit(WL_STATUS_READY, &cfg_priv->status); 5497 set_bit(WL_STATUS_READY, &cfg->status);
3791 5498
3792 brcmf_debugfs_add_netdev_params(cfg_priv); 5499 brcmf_debugfs_add_netdev_params(cfg);
3793 5500
3794 err = brcmf_config_dongle(cfg_priv); 5501 err = brcmf_config_dongle(cfg);
3795 if (err) 5502 if (err)
3796 return err; 5503 return err;
3797 5504
3798 brcmf_invoke_iscan(cfg_priv); 5505 brcmf_invoke_iscan(cfg);
3799 5506
3800 return err; 5507 return err;
3801} 5508}
3802 5509
3803static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv) 5510static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
3804{ 5511{
3805 /* 5512 /*
3806 * While going down, if associated with AP disassociate 5513 * While going down, if associated with AP disassociate
3807 * from AP to save power 5514 * from AP to save power
3808 */ 5515 */
3809 if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || 5516 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
3810 test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && 5517 test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
3811 test_bit(WL_STATUS_READY, &cfg_priv->status)) { 5518 test_bit(WL_STATUS_READY, &cfg->status)) {
3812 WL_INFO("Disassociating from AP"); 5519 WL_INFO("Disassociating from AP");
3813 brcmf_link_down(cfg_priv); 5520 brcmf_link_down(cfg);
3814 5521
3815 /* Make sure WPA_Supplicant receives all the event 5522 /* Make sure WPA_Supplicant receives all the event
3816 generated due to DISASSOC call to the fw to keep 5523 generated due to DISASSOC call to the fw to keep
@@ -3819,63 +5526,33 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
3819 brcmf_delay(500); 5526 brcmf_delay(500);
3820 } 5527 }
3821 5528
3822 set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); 5529 brcmf_abort_scanning(cfg);
3823 brcmf_term_iscan(cfg_priv); 5530 clear_bit(WL_STATUS_READY, &cfg->status);
3824 if (cfg_priv->scan_request) {
3825 cfg80211_scan_done(cfg_priv->scan_request, true);
3826 /* May need to perform this to cover rmmod */
3827 /* wl_set_mpc(cfg_to_ndev(wl), 1); */
3828 cfg_priv->scan_request = NULL;
3829 }
3830 clear_bit(WL_STATUS_READY, &cfg_priv->status);
3831 clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
3832 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
3833 5531
3834 brcmf_debugfs_remove_netdev(cfg_priv); 5532 brcmf_debugfs_remove_netdev(cfg);
3835 5533
3836 return 0; 5534 return 0;
3837} 5535}
3838 5536
3839s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev) 5537s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
3840{ 5538{
3841 struct brcmf_cfg80211_priv *cfg_priv;
3842 s32 err = 0; 5539 s32 err = 0;
3843 5540
3844 cfg_priv = brcmf_priv_get(cfg_dev); 5541 mutex_lock(&cfg->usr_sync);
3845 mutex_lock(&cfg_priv->usr_sync); 5542 err = __brcmf_cfg80211_up(cfg);
3846 err = __brcmf_cfg80211_up(cfg_priv); 5543 mutex_unlock(&cfg->usr_sync);
3847 mutex_unlock(&cfg_priv->usr_sync);
3848 5544
3849 return err; 5545 return err;
3850} 5546}
3851 5547
3852s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev) 5548s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
3853{ 5549{
3854 struct brcmf_cfg80211_priv *cfg_priv;
3855 s32 err = 0; 5550 s32 err = 0;
3856 5551
3857 cfg_priv = brcmf_priv_get(cfg_dev); 5552 mutex_lock(&cfg->usr_sync);
3858 mutex_lock(&cfg_priv->usr_sync); 5553 err = __brcmf_cfg80211_down(cfg);
3859 err = __brcmf_cfg80211_down(cfg_priv); 5554 mutex_unlock(&cfg->usr_sync);
3860 mutex_unlock(&cfg_priv->usr_sync);
3861 5555
3862 return err; 5556 return err;
3863} 5557}
3864 5558
3865static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
3866 u8 t, u8 l, u8 *v)
3867{
3868 struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
3869 s32 err = 0;
3870
3871 if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
3872 WL_ERR("ei crosses buffer boundary\n");
3873 return -ENOSPC;
3874 }
3875 ie->buf[ie->offset] = t;
3876 ie->buf[ie->offset + 1] = l;
3877 memcpy(&ie->buf[ie->offset + 2], v, l);
3878 ie->offset += l + 2;
3879
3880 return err;
3881}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index b5d9b36df3d0..71ced174748a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,12 +17,6 @@
17#ifndef _wl_cfg80211_h_ 17#ifndef _wl_cfg80211_h_
18#define _wl_cfg80211_h_ 18#define _wl_cfg80211_h_
19 19
20struct brcmf_cfg80211_conf;
21struct brcmf_cfg80211_iface;
22struct brcmf_cfg80211_priv;
23struct brcmf_cfg80211_security;
24struct brcmf_cfg80211_ibss;
25
26#define WL_DBG_NONE 0 20#define WL_DBG_NONE 0
27#define WL_DBG_CONN (1 << 5) 21#define WL_DBG_CONN (1 << 5)
28#define WL_DBG_SCAN (1 << 4) 22#define WL_DBG_SCAN (1 << 4)
@@ -123,13 +117,25 @@ do { \
123#define WL_SCAN_UNASSOC_TIME 40 117#define WL_SCAN_UNASSOC_TIME 40
124#define WL_SCAN_PASSIVE_TIME 120 118#define WL_SCAN_PASSIVE_TIME 120
125 119
120#define WL_ESCAN_BUF_SIZE (1024 * 64)
121#define WL_ESCAN_TIMER_INTERVAL_MS 8000 /* E-Scan timeout */
122
123#define WL_ESCAN_ACTION_START 1
124#define WL_ESCAN_ACTION_CONTINUE 2
125#define WL_ESCAN_ACTION_ABORT 3
126
127#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
128#define IE_MAX_LEN 512
129
126/* dongle status */ 130/* dongle status */
127enum wl_status { 131enum wl_status {
128 WL_STATUS_READY, 132 WL_STATUS_READY,
129 WL_STATUS_SCANNING, 133 WL_STATUS_SCANNING,
130 WL_STATUS_SCAN_ABORTING, 134 WL_STATUS_SCAN_ABORTING,
131 WL_STATUS_CONNECTING, 135 WL_STATUS_CONNECTING,
132 WL_STATUS_CONNECTED 136 WL_STATUS_CONNECTED,
137 WL_STATUS_AP_CREATING,
138 WL_STATUS_AP_CREATED
133}; 139};
134 140
135/* wi-fi mode */ 141/* wi-fi mode */
@@ -169,23 +175,17 @@ struct brcmf_cfg80211_conf {
169 struct ieee80211_channel channel; 175 struct ieee80211_channel channel;
170}; 176};
171 177
178/* forward declaration */
179struct brcmf_cfg80211_info;
180
172/* cfg80211 main event loop */ 181/* cfg80211 main event loop */
173struct brcmf_cfg80211_event_loop { 182struct brcmf_cfg80211_event_loop {
174 s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv, 183 s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
175 struct net_device *ndev, 184 struct net_device *ndev,
176 const struct brcmf_event_msg *e, 185 const struct brcmf_event_msg *e,
177 void *data); 186 void *data);
178}; 187};
179 188
180/* representing interface of cfg80211 plane */
181struct brcmf_cfg80211_iface {
182 struct brcmf_cfg80211_priv *cfg_priv;
183};
184
185struct brcmf_cfg80211_dev {
186 void *driver_data; /* to store cfg80211 object information */
187};
188
189/* basic structure of scan request */ 189/* basic structure of scan request */
190struct brcmf_cfg80211_scan_req { 190struct brcmf_cfg80211_scan_req {
191 struct brcmf_ssid_le ssid_le; 191 struct brcmf_ssid_le ssid_le;
@@ -238,7 +238,7 @@ struct brcmf_cfg80211_profile {
238/* dongle iscan event loop */ 238/* dongle iscan event loop */
239struct brcmf_cfg80211_iscan_eloop { 239struct brcmf_cfg80211_iscan_eloop {
240 s32 (*handler[WL_SCAN_ERSULTS_LAST]) 240 s32 (*handler[WL_SCAN_ERSULTS_LAST])
241 (struct brcmf_cfg80211_priv *cfg_priv); 241 (struct brcmf_cfg80211_info *cfg);
242}; 242};
243 243
244/* dongle iscan controller */ 244/* dongle iscan controller */
@@ -275,92 +275,240 @@ struct brcmf_cfg80211_pmk_list {
275 struct pmkid foo[MAXPMKID - 1]; 275 struct pmkid foo[MAXPMKID - 1];
276}; 276};
277 277
278/* dongle private data of cfg80211 interface */ 278/* dongle escan state */
279struct brcmf_cfg80211_priv { 279enum wl_escan_state {
280 struct wireless_dev *wdev; /* representing wl cfg80211 device */ 280 WL_ESCAN_STATE_IDLE,
281 struct brcmf_cfg80211_conf *conf; /* dongle configuration */ 281 WL_ESCAN_STATE_SCANNING
282 struct cfg80211_scan_request *scan_request; /* scan request 282};
283 object */ 283
284 struct brcmf_cfg80211_event_loop el; /* main event loop */ 284struct escan_info {
285 struct list_head evt_q_list; /* used for event queue */ 285 u32 escan_state;
286 spinlock_t evt_q_lock; /* for event queue synchronization */ 286 u8 escan_buf[WL_ESCAN_BUF_SIZE];
287 struct mutex usr_sync; /* maily for dongle up/down synchronization */ 287 struct wiphy *wiphy;
288 struct brcmf_scan_results *bss_list; /* bss_list holding scanned 288 struct net_device *ndev;
289 ap information */ 289};
290
291/* Structure to hold WPS, WPA IEs for a AP */
292struct ap_info {
293 u8 probe_res_ie[IE_MAX_LEN];
294 u8 beacon_ie[IE_MAX_LEN];
295 u32 probe_res_ie_len;
296 u32 beacon_ie_len;
297 u8 *wpa_ie;
298 u8 *rsn_ie;
299 bool security_mode;
300};
301
302/**
303 * struct brcmf_pno_param_le - PNO scan configuration parameters
304 *
305 * @version: PNO parameters version.
306 * @scan_freq: scan frequency.
307 * @lost_network_timeout: #sec. to declare discovered network as lost.
308 * @flags: Bit field to control features of PFN such as sort criteria auto
309 * enable switch and background scan.
310 * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
311 * criteria.
312 * @bestn: number of best networks in each scan.
313 * @mscan: number of scans recorded.
314 * @repeat: minimum number of scan intervals before scan frequency changes
315 * in adaptive scan.
316 * @exp: exponent of 2 for maximum scan interval.
317 * @slow_freq: slow scan period.
318 */
319struct brcmf_pno_param_le {
320 __le32 version;
321 __le32 scan_freq;
322 __le32 lost_network_timeout;
323 __le16 flags;
324 __le16 rssi_margin;
325 u8 bestn;
326 u8 mscan;
327 u8 repeat;
328 u8 exp;
329 __le32 slow_freq;
330};
331
332/**
333 * struct brcmf_pno_net_param_le - scan parameters per preferred network.
334 *
335 * @ssid: ssid name and its length.
336 * @flags: bit2: hidden.
337 * @infra: BSS vs IBSS.
338 * @auth: Open vs Closed.
339 * @wpa_auth: WPA type.
340 * @wsec: wsec value.
341 */
342struct brcmf_pno_net_param_le {
343 struct brcmf_ssid_le ssid;
344 __le32 flags;
345 __le32 infra;
346 __le32 auth;
347 __le32 wpa_auth;
348 __le32 wsec;
349};
350
351/**
352 * struct brcmf_pno_net_info_le - information per found network.
353 *
354 * @bssid: BSS network identifier.
355 * @channel: channel number only.
356 * @SSID_len: length of ssid.
357 * @SSID: ssid characters.
358 * @RSSI: receive signal strength (in dBm).
359 * @timestamp: age in seconds.
360 */
361struct brcmf_pno_net_info_le {
362 u8 bssid[ETH_ALEN];
363 u8 channel;
364 u8 SSID_len;
365 u8 SSID[32];
366 __le16 RSSI;
367 __le16 timestamp;
368};
369
370/**
371 * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
372 *
373 * @version: PNO version identifier.
374 * @status: indicates completion status of PNO scan.
375 * @count: amount of brcmf_pno_net_info_le entries appended.
376 */
377struct brcmf_pno_scanresults_le {
378 __le32 version;
379 __le32 status;
380 __le32 count;
381};
382
383/**
384 * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
385 *
386 * @wdev: representing wl cfg80211 device.
387 * @conf: dongle configuration.
388 * @scan_request: cfg80211 scan request object.
389 * @el: main event loop.
390 * @evt_q_list: used for event queue.
391 * @evt_q_lock: for event queue synchronization.
392 * @usr_sync: mainly for dongle up/down synchronization.
393 * @bss_list: bss_list holding scanned ap information.
394 * @scan_results: results of the last scan.
395 * @scan_req_int: internal scan request object.
396 * @bss_info: bss information for cfg80211 layer.
397 * @ie: information element object for internal purpose.
398 * @profile: holding dongle profile.
399 * @iscan: iscan controller information.
400 * @conn_info: association info.
401 * @pmk_list: wpa2 pmk list.
402 * @event_work: event handler work struct.
403 * @status: current dongle status.
404 * @pub: common driver information.
405 * @channel: current channel.
406 * @iscan_on: iscan on/off switch.
407 * @iscan_kickstart: indicate iscan already started.
408 * @active_scan: current scan mode.
409 * @sched_escan: e-scan for scheduled scan support running.
410 * @ibss_starter: indicates this sta is ibss starter.
411 * @link_up: link/connection up flag.
412 * @pwr_save: indicate whether dongle to support power save mode.
413 * @dongle_up: indicate whether dongle up or not.
414 * @roam_on: on/off switch for dongle self-roaming.
415 * @scan_tried: indicates if first scan attempted.
416 * @dcmd_buf: dcmd buffer.
417 * @extra_buf: mainly to grab assoc information.
418 * @debugfsdir: debugfs folder for this device.
419 * @escan_on: escan on/off switch.
420 * @escan_info: escan information.
421 * @escan_timeout: Timer for catch scan timeout.
422 * @escan_timeout_work: scan timeout worker.
423 * @escan_ioctl_buf: dongle command buffer for escan commands.
424 * @ap_info: host ap information.
425 * @ci: used to link this structure to netdev private data.
426 */
427struct brcmf_cfg80211_info {
428 struct wireless_dev *wdev;
429 struct brcmf_cfg80211_conf *conf;
430 struct cfg80211_scan_request *scan_request;
431 struct brcmf_cfg80211_event_loop el;
432 struct list_head evt_q_list;
433 spinlock_t evt_q_lock;
434 struct mutex usr_sync;
435 struct brcmf_scan_results *bss_list;
290 struct brcmf_scan_results *scan_results; 436 struct brcmf_scan_results *scan_results;
291 struct brcmf_cfg80211_scan_req *scan_req_int; /* scan request object 437 struct brcmf_cfg80211_scan_req *scan_req_int;
292 for internal purpose */ 438 struct wl_cfg80211_bss_info *bss_info;
293 struct wl_cfg80211_bss_info *bss_info; /* bss information for 439 struct brcmf_cfg80211_ie ie;
294 cfg80211 layer */ 440 struct brcmf_cfg80211_profile *profile;
295 struct brcmf_cfg80211_ie ie; /* information element object for 441 struct brcmf_cfg80211_iscan_ctrl *iscan;
296 internal purpose */ 442 struct brcmf_cfg80211_connect_info conn_info;
297 struct brcmf_cfg80211_profile *profile; /* holding dongle profile */ 443 struct brcmf_cfg80211_pmk_list *pmk_list;
298 struct brcmf_cfg80211_iscan_ctrl *iscan; /* iscan controller */ 444 struct work_struct event_work;
299 struct brcmf_cfg80211_connect_info conn_info; /* association info */ 445 unsigned long status;
300 struct brcmf_cfg80211_pmk_list *pmk_list; /* wpa2 pmk list */ 446 struct brcmf_pub *pub;
301 struct work_struct event_work; /* event handler work struct */ 447 u32 channel;
302 unsigned long status; /* current dongle status */ 448 bool iscan_on;
303 void *pub; 449 bool iscan_kickstart;
304 u32 channel; /* current channel */ 450 bool active_scan;
305 bool iscan_on; /* iscan on/off switch */ 451 bool sched_escan;
306 bool iscan_kickstart; /* indicate iscan already started */ 452 bool ibss_starter;
307 bool active_scan; /* current scan mode */ 453 bool link_up;
308 bool ibss_starter; /* indicates this sta is ibss starter */ 454 bool pwr_save;
309 bool link_up; /* link/connection up flag */ 455 bool dongle_up;
310 bool pwr_save; /* indicate whether dongle to support 456 bool roam_on;
311 power save mode */ 457 bool scan_tried;
312 bool dongle_up; /* indicate whether dongle up or not */ 458 u8 *dcmd_buf;
313 bool roam_on; /* on/off switch for dongle self-roaming */ 459 u8 *extra_buf;
314 bool scan_tried; /* indicates if first scan attempted */
315 u8 *dcmd_buf; /* dcmd buffer */
316 u8 *extra_buf; /* maily to grab assoc information */
317 struct dentry *debugfsdir; 460 struct dentry *debugfsdir;
318 u8 ci[0] __aligned(NETDEV_ALIGN); 461 bool escan_on;
462 struct escan_info escan_info;
463 struct timer_list escan_timeout;
464 struct work_struct escan_timeout_work;
465 u8 *escan_ioctl_buf;
466 struct ap_info *ap_info;
319}; 467};
320 468
321static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w) 469static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
322{ 470{
323 return w->wdev->wiphy; 471 return w->wdev->wiphy;
324} 472}
325 473
326static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w) 474static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
327{ 475{
328 return (struct brcmf_cfg80211_priv *)(wiphy_priv(w)); 476 return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
329} 477}
330 478
331static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd) 479static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
332{ 480{
333 return (struct brcmf_cfg80211_priv *)(wdev_priv(wd)); 481 return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
334} 482}
335 483
336static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg) 484static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
337{ 485{
338 return cfg->wdev->netdev; 486 return cfg->wdev->netdev;
339} 487}
340 488
341static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev) 489static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
342{ 490{
343 return wdev_to_cfg(ndev->ieee80211_ptr); 491 return wdev_to_cfg(ndev->ieee80211_ptr);
344} 492}
345 493
346#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data)) 494#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
347#define cfg_to_iscan(w) (w->iscan) 495#define cfg_to_iscan(w) (w->iscan)
348 496
349static inline struct 497static inline struct
350brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg) 498brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
351{ 499{
352 return &cfg->conn_info; 500 return &cfg->conn_info;
353} 501}
354 502
355extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, 503struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
356 struct device *busdev, 504 struct device *busdev,
357 void *data); 505 struct brcmf_pub *drvr);
358extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg); 506void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
359 507
360/* event handler from dongle */ 508/* event handler from dongle */
361extern void brcmf_cfg80211_event(struct net_device *ndev, 509void brcmf_cfg80211_event(struct net_device *ndev,
362 const struct brcmf_event_msg *e, void *data); 510 const struct brcmf_event_msg *e, void *data);
363extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev); 511s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
364extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev); 512s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
365 513
366#endif /* _wl_cfg80211_h_ */ 514#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 8c9345dd37d2..b89f1272b93f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -535,9 +535,6 @@ void ai_detach(struct si_pub *sih)
535{ 535{
536 struct si_info *sii; 536 struct si_info *sii;
537 537
538 struct si_pub *si_local = NULL;
539 memcpy(&si_local, &sih, sizeof(struct si_pub **));
540
541 sii = container_of(sih, struct si_info, pub); 538 sii = container_of(sih, struct si_info, pub);
542 539
543 if (sii == NULL) 540 if (sii == NULL)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a5edebeb0b4f..a744ea5a9559 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
86MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); 86MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
87MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); 87MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
88MODULE_LICENSE("Dual BSD/GPL"); 88MODULE_LICENSE("Dual BSD/GPL");
89 89/* This needs to be adjusted when brcms_firmwares changes */
90MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
91MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
90 92
91/* recognized BCMA Core IDs */ 93/* recognized BCMA Core IDs */
92static struct bcma_device_id brcms_coreid_table[] = { 94static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
265 } 267 }
266} 268}
267 269
268static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 270static void brcms_ops_tx(struct ieee80211_hw *hw,
271 struct ieee80211_tx_control *control,
272 struct sk_buff *skb)
269{ 273{
270 struct brcms_info *wl = hw->priv; 274 struct brcms_info *wl = hw->priv;
271 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 275 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
277 goto done; 281 goto done;
278 } 282 }
279 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw); 283 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
280 tx_info->rate_driver_data[0] = tx_info->control.sta; 284 tx_info->rate_driver_data[0] = control->sta;
281 done: 285 done:
282 spin_unlock_bh(&wl->lock); 286 spin_unlock_bh(&wl->lock);
283} 287}
@@ -300,7 +304,10 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
300 wl->mute_tx = true; 304 wl->mute_tx = true;
301 305
302 if (!wl->pub->up) 306 if (!wl->pub->up)
303 err = brcms_up(wl); 307 if (!blocked)
308 err = brcms_up(wl);
309 else
310 err = -ERFKILL;
304 else 311 else
305 err = -ENODEV; 312 err = -ENODEV;
306 spin_unlock_bh(&wl->lock); 313 spin_unlock_bh(&wl->lock);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 03ca65324845..75086b37c817 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7512 7512
7513 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); 7513 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
7514 7514
7515 if (channel > 14) { 7515 rx_status->band =
7516 rx_status->band = IEEE80211_BAND_5GHZ; 7516 channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
7517 rx_status->freq = ieee80211_ofdm_chan_to_freq( 7517 rx_status->freq =
7518 WF_CHAN_FACTOR_5_G/2, channel); 7518 ieee80211_channel_to_frequency(channel, rx_status->band);
7519
7520 } else {
7521 rx_status->band = IEEE80211_BAND_2GHZ;
7522 rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
7523 }
7524 7519
7525 rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh); 7520 rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
7526 7521
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index bcc79b4e3267..e8682855b73a 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -34,6 +34,7 @@
34#define BCM43235_CHIP_ID 43235 34#define BCM43235_CHIP_ID 43235
35#define BCM43236_CHIP_ID 43236 35#define BCM43236_CHIP_ID 43236
36#define BCM43238_CHIP_ID 43238 36#define BCM43238_CHIP_ID 43238
37#define BCM43241_CHIP_ID 0x4324
37#define BCM4329_CHIP_ID 0x4329 38#define BCM4329_CHIP_ID 0x4329
38#define BCM4330_CHIP_ID 0x4330 39#define BCM4330_CHIP_ID 0x4330
39#define BCM4331_CHIP_ID 0x4331 40#define BCM4331_CHIP_ID 0x4331
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index f10d30274c23..c11a290a1edf 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -67,11 +67,6 @@
67#define WL_CHANSPEC_BAND_2G 0x2000 67#define WL_CHANSPEC_BAND_2G 0x2000
68#define INVCHANSPEC 255 68#define INVCHANSPEC 255
69 69
70/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
71#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
72#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
73#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
74
75#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK)) 70#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
76#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) 71#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
77 72
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 47932b28aac1..970a48baaf80 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/etherdevice.h>
7#include "hostap_wlan.h" 8#include "hostap_wlan.h"
8#include "hostap.h" 9#include "hostap.h"
9#include "hostap_ap.h" 10#include "hostap_ap.h"
@@ -463,8 +464,7 @@ static void handle_info_queue_scanresults(local_info_t *local)
463 prism2_host_roaming(local); 464 prism2_host_roaming(local);
464 465
465 if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA && 466 if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
466 memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00", 467 !is_zero_ether_addr(local->preferred_ap)) {
467 ETH_ALEN) != 0) {
468 /* 468 /*
469 * Firmware seems to be getting into odd state in host_roaming 469 * Firmware seems to be getting into odd state in host_roaming
470 * mode 2 when hostscan is used without join command, so try 470 * mode 2 when hostscan is used without join command, so try
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 18054d9c6688..ac074731335a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -6,6 +6,7 @@
6#include <linux/ethtool.h> 6#include <linux/ethtool.h>
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/etherdevice.h>
9#include <net/lib80211.h> 10#include <net/lib80211.h>
10 11
11#include "hostap_wlan.h" 12#include "hostap_wlan.h"
@@ -3221,8 +3222,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3221 return -EINVAL; 3222 return -EINVAL;
3222 3223
3223 addr = ext->addr.sa_data; 3224 addr = ext->addr.sa_data;
3224 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3225 if (is_broadcast_ether_addr(addr)) {
3225 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3226 sta_ptr = NULL; 3226 sta_ptr = NULL;
3227 crypt = &local->crypt_info.crypt[i]; 3227 crypt = &local->crypt_info.crypt[i];
3228 } else { 3228 } else {
@@ -3394,8 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
3394 i--; 3394 i--;
3395 3395
3396 addr = ext->addr.sa_data; 3396 addr = ext->addr.sa_data;
3397 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3397 if (is_broadcast_ether_addr(addr)) {
3398 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3399 sta_ptr = NULL; 3398 sta_ptr = NULL;
3400 crypt = &local->crypt_info.crypt[i]; 3399 crypt = &local->crypt_info.crypt[i];
3401 } else { 3400 } else {
@@ -3458,9 +3457,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3458 param->u.crypt.key_len) 3457 param->u.crypt.key_len)
3459 return -EINVAL; 3458 return -EINVAL;
3460 3459
3461 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && 3460 if (is_broadcast_ether_addr(param->sta_addr)) {
3462 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3463 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3464 if (param->u.crypt.idx >= WEP_KEYS) 3461 if (param->u.crypt.idx >= WEP_KEYS)
3465 return -EINVAL; 3462 return -EINVAL;
3466 sta_ptr = NULL; 3463 sta_ptr = NULL;
@@ -3593,9 +3590,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
3593 if (max_key_len < 0) 3590 if (max_key_len < 0)
3594 return -EINVAL; 3591 return -EINVAL;
3595 3592
3596 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && 3593 if (is_broadcast_ether_addr(param->sta_addr)) {
3597 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3598 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3599 sta_ptr = NULL; 3594 sta_ptr = NULL;
3600 if (param->u.crypt.idx >= WEP_KEYS) 3595 if (param->u.crypt.idx >= WEP_KEYS)
3601 param->u.crypt.idx = local->crypt_info.tx_keyidx; 3596 param->u.crypt.idx = local->crypt_info.tx_keyidx;
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 627bc12074c7..15f0fad39add 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -1084,7 +1084,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
1084 __le16 val = cpu_to_le16(reason); 1084 __le16 val = cpu_to_le16(reason);
1085 1085
1086 if (local->iw_mode != IW_MODE_INFRA || 1086 if (local->iw_mode != IW_MODE_INFRA ||
1087 memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 || 1087 is_zero_ether_addr(local->bssid) ||
1088 memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0) 1088 memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
1089 return 0; 1089 return 0;
1090 1090
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 534e6557e7e6..29b8fa1adefd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6962,13 +6962,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6962 struct ipw2100_priv *priv = libipw_priv(dev); 6962 struct ipw2100_priv *priv = libipw_priv(dev);
6963 int err = 0; 6963 int err = 0;
6964 6964
6965 static const unsigned char any[] = {
6966 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6967 };
6968 static const unsigned char off[] = {
6969 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6970 };
6971
6972 // sanity checks 6965 // sanity checks
6973 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 6966 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
6974 return -EINVAL; 6967 return -EINVAL;
@@ -6979,8 +6972,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6979 goto done; 6972 goto done;
6980 } 6973 }
6981 6974
6982 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 6975 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
6983 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 6976 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
6984 /* we disable mandatory BSSID association */ 6977 /* we disable mandatory BSSID association */
6985 IPW_DEBUG_WX("exit - disable mandatory BSSID\n"); 6978 IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
6986 priv->config &= ~CFG_STATIC_BSSID; 6979 priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0df459147394..935120fc8c93 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9037,18 +9037,11 @@ static int ipw_wx_set_wap(struct net_device *dev,
9037{ 9037{
9038 struct ipw_priv *priv = libipw_priv(dev); 9038 struct ipw_priv *priv = libipw_priv(dev);
9039 9039
9040 static const unsigned char any[] = {
9041 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9042 };
9043 static const unsigned char off[] = {
9044 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9045 };
9046
9047 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 9040 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9048 return -EINVAL; 9041 return -EINVAL;
9049 mutex_lock(&priv->mutex); 9042 mutex_lock(&priv->mutex);
9050 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 9043 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
9051 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9044 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
9052 /* we disable mandatory BSSID association */ 9045 /* we disable mandatory BSSID association */
9053 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 9046 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9054 priv->config &= ~CFG_STATIC_BSSID; 9047 priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 1571505b1a38..54aba4744438 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -675,7 +675,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
675 } 675 }
676 done: 676 done:
677 if (ieee->set_security) 677 if (ieee->set_security)
678 ieee->set_security(ieee->dev, &sec); 678 ieee->set_security(dev, &sec);
679 679
680 return ret; 680 return ret;
681} 681}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index faec40467208..e252acb9c862 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
460 * start C_TX command process 460 * start C_TX command process
461 */ 461 */
462static int 462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) 463il3945_tx_skb(struct il_priv *il,
464 struct ieee80211_sta *sta,
465 struct sk_buff *skb)
464{ 466{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
512 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
513 515
514 /* Find idx into station table for destination station */ 516 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 517 sta_id = il_sta_id_or_broadcast(il, sta);
516 if (sta_id == IL_INVALID_STATION) { 518 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 519 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop; 520 goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
2859} 2861}
2860 2862
2861static void 2863static void
2862il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2864il3945_mac_tx(struct ieee80211_hw *hw,
2865 struct ieee80211_tx_control *control,
2866 struct sk_buff *skb)
2863{ 2867{
2864 struct il_priv *il = hw->priv; 2868 struct il_priv *il = hw->priv;
2865 2869
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2868 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2872 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2869 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2873 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2870 2874
2871 if (il3945_tx_skb(il, skb)) 2875 if (il3945_tx_skb(il, control->sta, skb))
2872 dev_kfree_skb_any(skb); 2876 dev_kfree_skb_any(skb);
2873 2877
2874 D_MAC80211("leave\n"); 2878 D_MAC80211("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 34f61a0581a2..eac4dc8bc879 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1526} 1526}
1527 1527
1528static void 1528static void
1529il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, 1529il4965_tx_cmd_build_rate(struct il_priv *il,
1530 struct ieee80211_tx_info *info, __le16 fc) 1530 struct il_tx_cmd *tx_cmd,
1531 struct ieee80211_tx_info *info,
1532 struct ieee80211_sta *sta,
1533 __le16 fc)
1531{ 1534{
1532 const u8 rts_retry_limit = 60; 1535 const u8 rts_retry_limit = 60;
1533 u32 rate_flags; 1536 u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1561 rate_idx = info->control.rates[0].idx; 1564 rate_idx = info->control.rates[0].idx;
1562 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 1565 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1563 || rate_idx > RATE_COUNT_LEGACY) 1566 || rate_idx > RATE_COUNT_LEGACY)
1564 rate_idx = 1567 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1565 rate_lowest_index(&il->bands[info->band],
1566 info->control.sta);
1567 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 1568 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1568 if (info->band == IEEE80211_BAND_5GHZ) 1569 if (info->band == IEEE80211_BAND_5GHZ)
1569 rate_idx += IL_FIRST_OFDM_RATE; 1570 rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1630 * start C_TX command process 1631 * start C_TX command process
1631 */ 1632 */
1632int 1633int
1633il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) 1634il4965_tx_skb(struct il_priv *il,
1635 struct ieee80211_sta *sta,
1636 struct sk_buff *skb)
1634{ 1637{
1635 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1638 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1636 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1639 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1637 struct ieee80211_sta *sta = info->control.sta;
1638 struct il_station_priv *sta_priv = NULL; 1640 struct il_station_priv *sta_priv = NULL;
1639 struct il_tx_queue *txq; 1641 struct il_tx_queue *txq;
1640 struct il_queue *q; 1642 struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1680 sta_id = il->hw_params.bcast_id; 1682 sta_id = il->hw_params.bcast_id;
1681 else { 1683 else {
1682 /* Find idx into station table for destination station */ 1684 /* Find idx into station table for destination station */
1683 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 1685 sta_id = il_sta_id_or_broadcast(il, sta);
1684 1686
1685 if (sta_id == IL_INVALID_STATION) { 1687 if (sta_id == IL_INVALID_STATION) {
1686 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 1688 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1786 /* TODO need this for burst mode later on */ 1788 /* TODO need this for burst mode later on */
1787 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); 1789 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1788 1790
1789 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); 1791 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1790 1792
1791 il_update_stats(il, true, fc, len); 1793 il_update_stats(il, true, fc, len);
1792 /* 1794 /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
5828} 5830}
5829 5831
5830void 5832void
5831il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 5833il4965_mac_tx(struct ieee80211_hw *hw,
5834 struct ieee80211_tx_control *control,
5835 struct sk_buff *skb)
5832{ 5836{
5833 struct il_priv *il = hw->priv; 5837 struct il_priv *il = hw->priv;
5834 5838
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5837 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 5841 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5838 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 5842 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5839 5843
5840 if (il4965_tx_skb(il, skb)) 5844 if (il4965_tx_skb(il, control->sta, skb))
5841 dev_kfree_skb_any(skb); 5845 dev_kfree_skb_any(skb);
5842 5846
5843 D_MACDUMP("leave\n"); 5847 D_MACDUMP("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1db677689cfe..2d092f328547 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); 78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, 79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
80 struct ieee80211_tx_info *info); 80 struct ieee80211_tx_info *info);
81int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); 81int il4965_tx_skb(struct il_priv *il,
82 struct ieee80211_sta *sta,
83 struct sk_buff *skb);
82int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, 84int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
83 struct ieee80211_sta *sta, u16 tid, u16 * ssn); 85 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
84int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, 86int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
163int il4965_eeprom_check_version(struct il_priv *il); 165int il4965_eeprom_check_version(struct il_priv *il);
164 166
165/* mac80211 handlers (for 4965) */ 167/* mac80211 handlers (for 4965) */
166void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 168void il4965_mac_tx(struct ieee80211_hw *hw,
169 struct ieee80211_tx_control *control,
170 struct sk_buff *skb);
167int il4965_mac_start(struct ieee80211_hw *hw); 171int il4965_mac_start(struct ieee80211_hw *hw);
168void il4965_mac_stop(struct ieee80211_hw *hw); 172void il4965_mac_stop(struct ieee80211_hw *hw);
169void il4965_configure_filter(struct ieee80211_hw *hw, 173void il4965_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 0370403fd0bd..318ed3c9fe74 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1586,9 +1586,9 @@ il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1586 return 0; 1586 return 0;
1587 1587
1588 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1588 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1589 memcpy(frame->da, il_bcast_addr, ETH_ALEN); 1589 eth_broadcast_addr(frame->da);
1590 memcpy(frame->sa, ta, ETH_ALEN); 1590 memcpy(frame->sa, ta, ETH_ALEN);
1591 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN); 1591 eth_broadcast_addr(frame->bssid);
1592 frame->seq_ctrl = 0; 1592 frame->seq_ctrl = 0;
1593 1593
1594 len += 24; 1594 len += 24;
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
4860 4860
4861#ifdef CONFIG_PM 4861#ifdef CONFIG_PM
4862 4862
4863int 4863static int
4864il_pci_suspend(struct device *device) 4864il_pci_suspend(struct device *device)
4865{ 4865{
4866 struct pci_dev *pdev = to_pci_dev(device); 4866 struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
4877 4877
4878 return 0; 4878 return 0;
4879} 4879}
4880EXPORT_SYMBOL(il_pci_suspend);
4881 4880
4882int 4881static int
4883il_pci_resume(struct device *device) 4882il_pci_resume(struct device *device)
4884{ 4883{
4885 struct pci_dev *pdev = to_pci_dev(device); 4884 struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
4906 4905
4907 return 0; 4906 return 0;
4908} 4907}
4909EXPORT_SYMBOL(il_pci_resume);
4910 4908
4911const struct dev_pm_ops il_pm_ops = { 4909SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4912 .suspend = il_pci_suspend,
4913 .resume = il_pci_resume,
4914 .freeze = il_pci_suspend,
4915 .thaw = il_pci_resume,
4916 .poweroff = il_pci_suspend,
4917 .restore = il_pci_resume,
4918};
4919EXPORT_SYMBOL(il_pm_ops); 4910EXPORT_SYMBOL(il_pm_ops);
4920 4911
4921#endif /* CONFIG_PM */ 4912#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 724682669060..b4bb813362bd 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1843,8 +1843,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1843 u32 beacon_interval); 1843 u32 beacon_interval);
1844 1844
1845#ifdef CONFIG_PM 1845#ifdef CONFIG_PM
1846int il_pci_suspend(struct device *device);
1847int il_pci_resume(struct device *device);
1848extern const struct dev_pm_ops il_pm_ops; 1846extern const struct dev_pm_ops il_pm_ops;
1849 1847
1850#define IL_LEGACY_PM_OPS (&il_pm_ops) 1848#define IL_LEGACY_PM_OPS (&il_pm_ops)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 9bb16bdf6d26..75e12f29d9eb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
201 201
202 202
203/* tx */ 203/* tx */
204int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 204int iwlagn_tx_skb(struct iwl_priv *priv,
205 struct ieee80211_sta *sta,
206 struct sk_buff *skb);
205int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 207int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
206 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 208 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
207int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, 209int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
485} 487}
486 488
487#ifdef CONFIG_IWLWIFI_DEBUGFS 489#ifdef CONFIG_IWLWIFI_DEBUGFS
488int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); 490int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
489void iwl_dbgfs_unregister(struct iwl_priv *priv);
490#else 491#else
491static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 492static inline int iwl_dbgfs_register(struct iwl_priv *priv,
493 struct dentry *dbgfs_dir)
492{ 494{
493 return 0; 495 return 0;
494} 496}
495static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
496{
497}
498#endif /* CONFIG_IWLWIFI_DEBUGFS */ 497#endif /* CONFIG_IWLWIFI_DEBUGFS */
499 498
500#ifdef CONFIG_IWLWIFI_DEBUG 499#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 4a361c55c543..01128c96b5d8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -1055,8 +1055,9 @@ struct iwl_wep_cmd {
1055#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) 1055#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1056#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) 1056#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1057#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) 1057#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1058#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 1058#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
1059#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 1059#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
1060#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
1060 1061
1061#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 1062#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1062#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 1063#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index a47b306b522c..1a98fa3ab06d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2352 * Create the debugfs files and directories 2352 * Create the debugfs files and directories
2353 * 2353 *
2354 */ 2354 */
2355int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 2355int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2356{ 2356{
2357 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 2357 struct dentry *dir_data, *dir_rf, *dir_debug;
2358 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
2359 2358
2360 dir_drv = debugfs_create_dir(name, phyd); 2359 priv->debugfs_dir = dbgfs_dir;
2361 if (!dir_drv)
2362 return -ENOMEM;
2363
2364 priv->debugfs_dir = dir_drv;
2365 2360
2366 dir_data = debugfs_create_dir("data", dir_drv); 2361 dir_data = debugfs_create_dir("data", dbgfs_dir);
2367 if (!dir_data) 2362 if (!dir_data)
2368 goto err; 2363 goto err;
2369 dir_rf = debugfs_create_dir("rf", dir_drv); 2364 dir_rf = debugfs_create_dir("rf", dbgfs_dir);
2370 if (!dir_rf) 2365 if (!dir_rf)
2371 goto err; 2366 goto err;
2372 dir_debug = debugfs_create_dir("debug", dir_drv); 2367 dir_debug = debugfs_create_dir("debug", dbgfs_dir);
2373 if (!dir_debug) 2368 if (!dir_debug)
2374 goto err; 2369 goto err;
2375 2370
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2415 /* Calibrations disabled/enabled status*/ 2410 /* Calibrations disabled/enabled status*/
2416 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); 2411 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2417 2412
2418 if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) 2413 /*
2419 goto err; 2414 * Create a symlink with mac80211. This is not very robust, as it does
2415 * not remove the symlink created. The implicit assumption is that
2416 * when the opmode exits, mac80211 will also exit, and will remove
2417 * this symlink as part of its cleanup.
2418 */
2419 if (priv->mac80211_registered) {
2420 char buf[100];
2421 struct dentry *mac80211_dir, *dev_dir, *root_dir;
2422
2423 dev_dir = dbgfs_dir->d_parent;
2424 root_dir = dev_dir->d_parent;
2425 mac80211_dir = priv->hw->wiphy->debugfsdir;
2426
2427 snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
2428 dev_dir->d_name.name);
2429
2430 if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
2431 goto err;
2432 }
2433
2420 return 0; 2434 return 0;
2421 2435
2422err: 2436err:
2423 IWL_ERR(priv, "Can't create the debugfs directory\n"); 2437 IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
2424 iwl_dbgfs_unregister(priv);
2425 return -ENOMEM; 2438 return -ENOMEM;
2426} 2439}
2427
2428/**
2429 * Remove the debugfs files and directories
2430 *
2431 */
2432void iwl_dbgfs_unregister(struct iwl_priv *priv)
2433{
2434 if (!priv->debugfs_dir)
2435 return;
2436
2437 debugfs_remove_recursive(priv->debugfs_dir);
2438 priv->debugfs_dir = NULL;
2439}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 054f728f6266..8141f91c3725 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -771,6 +771,7 @@ struct iwl_priv {
771 u8 agg_tids_count; 771 u8 agg_tids_count;
772 772
773 struct iwl_rx_phy_res last_phy_res; 773 struct iwl_rx_phy_res last_phy_res;
774 u32 ampdu_ref;
774 bool last_phy_res_valid; 775 bool last_phy_res_valid;
775 776
776 /* 777 /*
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index a5f7bce96325..ff8162d4c454 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode); 195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
196 } 196 }
197 197
198 hw->wiphy->max_remain_on_channel_duration = 1000; 198 hw->wiphy->max_remain_on_channel_duration = 500;
199 199
200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
201 WIPHY_FLAG_DISABLE_BEACON_HINTS | 201 WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
511} 511}
512#endif 512#endif
513 513
514static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 514static void iwlagn_mac_tx(struct ieee80211_hw *hw,
515 struct ieee80211_tx_control *control,
516 struct sk_buff *skb)
515{ 517{
516 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 518 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
517 519
518 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 520 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
519 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 521 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
520 522
521 if (iwlagn_tx_skb(priv, skb)) 523 if (iwlagn_tx_skb(priv, control->sta, skb))
522 dev_kfree_skb_any(skb); 524 dev_kfree_skb_any(skb);
523} 525}
524 526
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 84d3db5aa506..7ff3f1430678 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
862 * No race since we hold the mutex here and a new one 862 * No race since we hold the mutex here and a new one
863 * can't come in at this time. 863 * can't come in at this time.
864 */ 864 */
865 ieee80211_remain_on_channel_expired(priv->hw); 865 if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
866 ieee80211_remain_on_channel_expired(priv->hw);
866 867
867 exit_pending = 868 exit_pending =
868 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 869 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
994 iwlagn_prepare_restart(priv); 995 iwlagn_prepare_restart(priv);
995 mutex_unlock(&priv->mutex); 996 mutex_unlock(&priv->mutex);
996 iwl_cancel_deferred_work(priv); 997 iwl_cancel_deferred_work(priv);
997 ieee80211_restart_hw(priv->hw); 998 if (priv->mac80211_registered)
999 ieee80211_restart_hw(priv->hw);
1000 else
1001 IWL_ERR(priv,
1002 "Cannot request restart before registrating with mac80211");
998 } else { 1003 } else {
999 WARN_ON(1); 1004 WARN_ON(1);
1000 } 1005 }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1222 1227
1223static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1228static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1224 const struct iwl_cfg *cfg, 1229 const struct iwl_cfg *cfg,
1225 const struct iwl_fw *fw) 1230 const struct iwl_fw *fw,
1231 struct dentry *dbgfs_dir)
1226{ 1232{
1227 struct iwl_priv *priv; 1233 struct iwl_priv *priv;
1228 struct ieee80211_hw *hw; 1234 struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1466 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) 1472 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1467 goto out_destroy_workqueue; 1473 goto out_destroy_workqueue;
1468 1474
1469 if (iwl_dbgfs_register(priv, DRV_NAME)) 1475 if (iwl_dbgfs_register(priv, dbgfs_dir))
1470 IWL_ERR(priv, 1476 goto out_mac80211_unregister;
1471 "failed to create debugfs files. Ignoring error\n");
1472 1477
1473 return op_mode; 1478 return op_mode;
1474 1479
1480out_mac80211_unregister:
1481 iwlagn_mac_unregister(priv);
1475out_destroy_workqueue: 1482out_destroy_workqueue:
1483 iwl_tt_exit(priv);
1484 iwl_testmode_free(priv);
1485 iwl_cancel_deferred_work(priv);
1476 destroy_workqueue(priv->workqueue); 1486 destroy_workqueue(priv->workqueue);
1477 priv->workqueue = NULL; 1487 priv->workqueue = NULL;
1478 iwl_uninit_drv(priv); 1488 iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1493 1503
1494 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 1504 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1495 1505
1496 iwl_dbgfs_unregister(priv);
1497
1498 iwl_testmode_free(priv); 1506 iwl_testmode_free(priv);
1499 iwlagn_mac_unregister(priv); 1507 iwlagn_mac_unregister(priv);
1500 1508
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index fee5cffa1669..5a9c325804f6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -667,6 +667,7 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
667 struct iwl_rx_packet *pkt = rxb_addr(rxb); 667 struct iwl_rx_packet *pkt = rxb_addr(rxb);
668 668
669 priv->last_phy_res_valid = true; 669 priv->last_phy_res_valid = true;
670 priv->ampdu_ref++;
670 memcpy(&priv->last_phy_res, pkt->data, 671 memcpy(&priv->last_phy_res, pkt->data,
671 sizeof(struct iwl_rx_phy_res)); 672 sizeof(struct iwl_rx_phy_res));
672 return 0; 673 return 0;
@@ -981,6 +982,16 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
981 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 982 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
982 rx_status.flag |= RX_FLAG_SHORTPRE; 983 rx_status.flag |= RX_FLAG_SHORTPRE;
983 984
985 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
986 /*
987 * We know which subframes of an A-MPDU belong
988 * together since we get a single PHY response
989 * from the firmware for all of them
990 */
991 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
992 rx_status.ampdu_reference = priv->ampdu_ref;
993 }
994
984 /* Set up the HT phy flags */ 995 /* Set up the HT phy flags */
985 if (rate_n_flags & RATE_MCS_HT_MSK) 996 if (rate_n_flags & RATE_MCS_HT_MSK)
986 rx_status.flag |= RX_FLAG_HT; 997 rx_status.flag |= RX_FLAG_HT;
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index e3467fa86899..bb9f6252d28f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -612,9 +612,9 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
612 return 0; 612 return 0;
613 613
614 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 614 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
615 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); 615 eth_broadcast_addr(frame->da);
616 memcpy(frame->sa, ta, ETH_ALEN); 616 memcpy(frame->sa, ta, ETH_ALEN);
617 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); 617 eth_broadcast_addr(frame->bssid);
618 frame->seq_ctrl = 0; 618 frame->seq_ctrl = 0;
619 619
620 len += 24; 620 len += 24;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b29b798f7550..cd9b6de4273e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -128,10 +128,11 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
128 struct iwl_device_cmd *cmd) 128 struct iwl_device_cmd *cmd)
129{ 129{
130 struct iwl_rx_packet *pkt = rxb_addr(rxb); 130 struct iwl_rx_packet *pkt = rxb_addr(rxb);
131 struct iwl_addsta_cmd *addsta =
132 (struct iwl_addsta_cmd *) cmd->payload;
133 131
134 return iwl_process_add_sta_resp(priv, addsta, pkt); 132 if (!cmd)
133 return 0;
134
135 return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
135} 136}
136 137
137int iwl_send_add_sta(struct iwl_priv *priv, 138int iwl_send_add_sta(struct iwl_priv *priv,
@@ -150,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
150 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
151 152
152 if (!(flags & CMD_ASYNC)) { 153 if (!(flags & CMD_ASYNC)) {
153 cmd.flags |= CMD_WANT_SKB; 154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
154 might_sleep(); 155 might_sleep();
155 } 156 }
156 157
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5971a23aa47d..f5ca73a89870 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, 127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
128 struct iwl_tx_cmd *tx_cmd, 128 struct iwl_tx_cmd *tx_cmd,
129 struct ieee80211_tx_info *info, 129 struct ieee80211_tx_info *info,
130 struct ieee80211_sta *sta,
130 __le16 fc) 131 __le16 fc)
131{ 132{
132 u32 rate_flags; 133 u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
187 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
188 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
189 rate_idx = rate_lowest_index( 190 rate_idx = rate_lowest_index(
190 &priv->eeprom_data->bands[info->band], 191 &priv->eeprom_data->bands[info->band], sta);
191 info->control.sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE; 194 rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
291/* 291/*
292 * start REPLY_TX command process 292 * start REPLY_TX command process
293 */ 293 */
294int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 294int iwlagn_tx_skb(struct iwl_priv *priv,
295 struct ieee80211_sta *sta,
296 struct sk_buff *skb)
295{ 297{
296 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 298 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
345 sta_id = ctx->bcast_sta_id; 347 sta_id = ctx->bcast_sta_id;
346 else { 348 else {
347 /* Find index into station table for destination station */ 349 /* Find index into station table for destination station */
348 sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta); 350 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
349 if (sta_id == IWL_INVALID_STATION) { 351 if (sta_id == IWL_INVALID_STATION) {
350 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 352 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
351 hdr->addr1); 353 hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
355 357
356 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 358 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
357 359
358 if (info->control.sta) 360 if (sta)
359 sta_priv = (void *)info->control.sta->drv_priv; 361 sta_priv = (void *)sta->drv_priv;
360 362
361 if (sta_priv && sta_priv->asleep && 363 if (sta_priv && sta_priv->asleep &&
362 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { 364 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
397 /* TODO need this for burst mode later on */ 399 /* TODO need this for burst mode later on */
398 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); 400 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
399 401
400 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 402 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
401 403
402 memset(&info->status, 0, sizeof(info->status)); 404 memset(&info->status, 0, sizeof(info->status));
403 405
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
431 * only. Check this here. 433 * only. Check this here.
432 */ 434 */
433 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && 435 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
434 tid_data->agg.state != IWL_AGG_OFF, 436 tid_data->agg.state != IWL_AGG_OFF,
435 "Tx while agg.state = %d", tid_data->agg.state)) 437 "Tx while agg.state = %d", tid_data->agg.state))
436 goto drop_unlock_sta; 438 goto drop_unlock_sta;
437 439
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 6d8d6dd7943f..2cb1efbc5ed1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
295static int iwl_verify_sec_sparse(struct iwl_priv *priv, 295static int iwl_verify_sec_sparse(struct iwl_priv *priv,
296 const struct fw_desc *fw_desc) 296 const struct fw_desc *fw_desc)
297{ 297{
298 __le32 *image = (__le32 *)fw_desc->v_addr; 298 __le32 *image = (__le32 *)fw_desc->data;
299 u32 len = fw_desc->len; 299 u32 len = fw_desc->len;
300 u32 val; 300 u32 val;
301 u32 i; 301 u32 i;
@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
319static void iwl_print_mismatch_sec(struct iwl_priv *priv, 319static void iwl_print_mismatch_sec(struct iwl_priv *priv,
320 const struct fw_desc *fw_desc) 320 const struct fw_desc *fw_desc)
321{ 321{
322 __le32 *image = (__le32 *)fw_desc->v_addr; 322 __le32 *image = (__le32 *)fw_desc->data;
323 u32 len = fw_desc->len; 323 u32 len = fw_desc->len;
324 u32 val; 324 u32 val;
325 u32 offs; 325 u32 offs;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06ca505bb2cc..59a5f78402fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,7 @@
29 29
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31#include <linux/device.h> 31#include <linux/device.h>
32#include "iwl-trans.h"
32 33
33 34
34#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 35#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -237,27 +238,34 @@ TRACE_EVENT(iwlwifi_dbg,
237#define TRACE_SYSTEM iwlwifi 238#define TRACE_SYSTEM iwlwifi
238 239
239TRACE_EVENT(iwlwifi_dev_hcmd, 240TRACE_EVENT(iwlwifi_dev_hcmd,
240 TP_PROTO(const struct device *dev, u32 flags, 241 TP_PROTO(const struct device *dev,
241 const void *hcmd0, size_t len0, 242 struct iwl_host_cmd *cmd, u16 total_size,
242 const void *hcmd1, size_t len1, 243 const void *hdr, size_t hdr_len),
243 const void *hcmd2, size_t len2), 244 TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
244 TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
245 TP_STRUCT__entry( 245 TP_STRUCT__entry(
246 DEV_ENTRY 246 DEV_ENTRY
247 __dynamic_array(u8, hcmd0, len0) 247 __dynamic_array(u8, hcmd, total_size)
248 __dynamic_array(u8, hcmd1, len1)
249 __dynamic_array(u8, hcmd2, len2)
250 __field(u32, flags) 248 __field(u32, flags)
251 ), 249 ),
252 TP_fast_assign( 250 TP_fast_assign(
251 int i, offset = hdr_len;
252
253 DEV_ASSIGN; 253 DEV_ASSIGN;
254 memcpy(__get_dynamic_array(hcmd0), hcmd0, len0); 254 __entry->flags = cmd->flags;
255 memcpy(__get_dynamic_array(hcmd1), hcmd1, len1); 255 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
256 memcpy(__get_dynamic_array(hcmd2), hcmd2, len2); 256
257 __entry->flags = flags; 257 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
258 if (!cmd->len[i])
259 continue;
260 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
261 continue;
262 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
263 cmd->data[i], cmd->len[i]);
264 offset += cmd->len[i];
265 }
258 ), 266 ),
259 TP_printk("[%s] hcmd %#.2x (%ssync)", 267 TP_printk("[%s] hcmd %#.2x (%ssync)",
260 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0], 268 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
261 __entry->flags & CMD_ASYNC ? "a" : "") 269 __entry->flags & CMD_ASYNC ? "a" : "")
262); 270);
263 271
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index cc41cfaedfbd..198634b75ed0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -64,6 +64,7 @@
64#include <linux/dma-mapping.h> 64#include <linux/dma-mapping.h>
65#include <linux/firmware.h> 65#include <linux/firmware.h>
66#include <linux/module.h> 66#include <linux/module.h>
67#include <linux/vmalloc.h>
67 68
68#include "iwl-drv.h" 69#include "iwl-drv.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
@@ -101,6 +102,10 @@ MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
103 104
105#ifdef CONFIG_IWLWIFI_DEBUGFS
106static struct dentry *iwl_dbgfs_root;
107#endif
108
104/** 109/**
105 * struct iwl_drv - drv common data 110 * struct iwl_drv - drv common data
106 * @list: list of drv structures using this opmode 111 * @list: list of drv structures using this opmode
@@ -126,6 +131,12 @@ struct iwl_drv {
126 char firmware_name[25]; /* name of firmware file to load */ 131 char firmware_name[25]; /* name of firmware file to load */
127 132
128 struct completion request_firmware_complete; 133 struct completion request_firmware_complete;
134
135#ifdef CONFIG_IWLWIFI_DEBUGFS
136 struct dentry *dbgfs_drv;
137 struct dentry *dbgfs_trans;
138 struct dentry *dbgfs_op_mode;
139#endif
129}; 140};
130 141
131#define DVM_OP_MODE 0 142#define DVM_OP_MODE 0
@@ -154,10 +165,8 @@ struct fw_sec {
154 165
155static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) 166static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
156{ 167{
157 if (desc->v_addr) 168 vfree(desc->data);
158 dma_free_coherent(drv->trans->dev, desc->len, 169 desc->data = NULL;
159 desc->v_addr, desc->p_addr);
160 desc->v_addr = NULL;
161 desc->len = 0; 170 desc->len = 0;
162} 171}
163 172
@@ -176,25 +185,29 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
176} 185}
177 186
178static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, 187static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
179 struct fw_sec *sec) 188 struct fw_sec *sec)
180{ 189{
181 if (!sec || !sec->size) { 190 void *data;
182 desc->v_addr = NULL; 191
192 desc->data = NULL;
193
194 if (!sec || !sec->size)
183 return -EINVAL; 195 return -EINVAL;
184 }
185 196
186 desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size, 197 data = vmalloc(sec->size);
187 &desc->p_addr, GFP_KERNEL); 198 if (!data)
188 if (!desc->v_addr)
189 return -ENOMEM; 199 return -ENOMEM;
190 200
191 desc->len = sec->size; 201 desc->len = sec->size;
192 desc->offset = sec->offset; 202 desc->offset = sec->offset;
193 memcpy(desc->v_addr, sec->data, sec->size); 203 memcpy(data, sec->data, desc->len);
204 desc->data = data;
205
194 return 0; 206 return 0;
195} 207}
196 208
197static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 209static void iwl_req_fw_callback(const struct firmware *ucode_raw,
210 void *context);
198 211
199#define UCODE_EXPERIMENTAL_INDEX 100 212#define UCODE_EXPERIMENTAL_INDEX 100
200#define UCODE_EXPERIMENTAL_TAG "exp" 213#define UCODE_EXPERIMENTAL_TAG "exp"
@@ -231,7 +244,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
231 244
232 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, 245 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
233 drv->trans->dev, 246 drv->trans->dev,
234 GFP_KERNEL, drv, iwl_ucode_callback); 247 GFP_KERNEL, drv, iwl_req_fw_callback);
235} 248}
236 249
237struct fw_img_parsing { 250struct fw_img_parsing {
@@ -759,13 +772,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
759 return 0; 772 return 0;
760} 773}
761 774
775static struct iwl_op_mode *
776_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
777{
778 const struct iwl_op_mode_ops *ops = op->ops;
779 struct dentry *dbgfs_dir = NULL;
780 struct iwl_op_mode *op_mode = NULL;
781
782#ifdef CONFIG_IWLWIFI_DEBUGFS
783 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
784 drv->dbgfs_drv);
785 if (!drv->dbgfs_op_mode) {
786 IWL_ERR(drv,
787 "failed to create opmode debugfs directory\n");
788 return op_mode;
789 }
790 dbgfs_dir = drv->dbgfs_op_mode;
791#endif
792
793 op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
794
795#ifdef CONFIG_IWLWIFI_DEBUGFS
796 if (!op_mode) {
797 debugfs_remove_recursive(drv->dbgfs_op_mode);
798 drv->dbgfs_op_mode = NULL;
799 }
800#endif
801
802 return op_mode;
803}
804
805static void _iwl_op_mode_stop(struct iwl_drv *drv)
806{
807 /* op_mode can be NULL if its start failed */
808 if (drv->op_mode) {
809 iwl_op_mode_stop(drv->op_mode);
810 drv->op_mode = NULL;
811
812#ifdef CONFIG_IWLWIFI_DEBUGFS
813 debugfs_remove_recursive(drv->dbgfs_op_mode);
814 drv->dbgfs_op_mode = NULL;
815#endif
816 }
817}
818
762/** 819/**
763 * iwl_ucode_callback - callback when firmware was loaded 820 * iwl_req_fw_callback - callback when firmware was loaded
764 * 821 *
765 * If loaded successfully, copies the firmware into buffers 822 * If loaded successfully, copies the firmware into buffers
766 * for the card to fetch (via DMA). 823 * for the card to fetch (via DMA).
767 */ 824 */
768static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 825static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
769{ 826{
770 struct iwl_drv *drv = context; 827 struct iwl_drv *drv = context;
771 struct iwl_fw *fw = &drv->fw; 828 struct iwl_fw *fw = &drv->fw;
@@ -908,8 +965,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
908 list_add_tail(&drv->list, &op->drv); 965 list_add_tail(&drv->list, &op->drv);
909 966
910 if (op->ops) { 967 if (op->ops) {
911 const struct iwl_op_mode_ops *ops = op->ops; 968 drv->op_mode = _iwl_op_mode_start(drv, op);
912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
913 969
914 if (!drv->op_mode) { 970 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx); 971 mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1025,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
969 init_completion(&drv->request_firmware_complete); 1025 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list); 1026 INIT_LIST_HEAD(&drv->list);
971 1027
1028#ifdef CONFIG_IWLWIFI_DEBUGFS
1029 /* Create the device debugfs entries. */
1030 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1031 iwl_dbgfs_root);
1032
1033 if (!drv->dbgfs_drv) {
1034 IWL_ERR(drv, "failed to create debugfs directory\n");
1035 goto err_free_drv;
1036 }
1037
1038 /* Create transport layer debugfs dir */
1039 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1040
1041 if (!drv->trans->dbgfs_dir) {
1042 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1043 goto err_free_dbgfs;
1044 }
1045#endif
1046
972 ret = iwl_request_firmware(drv, true); 1047 ret = iwl_request_firmware(drv, true);
973 1048
974 if (ret) { 1049 if (ret) {
975 IWL_ERR(trans, "Couldn't request the fw\n"); 1050 IWL_ERR(trans, "Couldn't request the fw\n");
976 kfree(drv); 1051 goto err_fw;
977 drv = NULL;
978 } 1052 }
979 1053
980 return drv; 1054 return drv;
1055
1056err_fw:
1057#ifdef CONFIG_IWLWIFI_DEBUGFS
1058err_free_dbgfs:
1059 debugfs_remove_recursive(drv->dbgfs_drv);
1060err_free_drv:
1061#endif
1062 kfree(drv);
1063 drv = NULL;
1064
1065 return drv;
981} 1066}
982 1067
983void iwl_drv_stop(struct iwl_drv *drv) 1068void iwl_drv_stop(struct iwl_drv *drv)
984{ 1069{
985 wait_for_completion(&drv->request_firmware_complete); 1070 wait_for_completion(&drv->request_firmware_complete);
986 1071
987 /* op_mode can be NULL if its start failed */ 1072 _iwl_op_mode_stop(drv);
988 if (drv->op_mode)
989 iwl_op_mode_stop(drv->op_mode);
990 1073
991 iwl_dealloc_ucode(drv); 1074 iwl_dealloc_ucode(drv);
992 1075
@@ -1000,6 +1083,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
1000 list_del(&drv->list); 1083 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx); 1084 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002 1085
1086#ifdef CONFIG_IWLWIFI_DEBUGFS
1087 debugfs_remove_recursive(drv->dbgfs_drv);
1088#endif
1089
1003 kfree(drv); 1090 kfree(drv);
1004} 1091}
1005 1092
@@ -1022,15 +1109,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1022{ 1109{
1023 int i; 1110 int i;
1024 struct iwl_drv *drv; 1111 struct iwl_drv *drv;
1112 struct iwlwifi_opmode_table *op;
1025 1113
1026 mutex_lock(&iwlwifi_opmode_table_mtx); 1114 mutex_lock(&iwlwifi_opmode_table_mtx);
1027 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { 1115 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1028 if (strcmp(iwlwifi_opmode_table[i].name, name)) 1116 op = &iwlwifi_opmode_table[i];
1117 if (strcmp(op->name, name))
1029 continue; 1118 continue;
1030 iwlwifi_opmode_table[i].ops = ops; 1119 op->ops = ops;
1031 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) 1120 /* TODO: need to handle exceptional case */
1032 drv->op_mode = ops->start(drv->trans, drv->cfg, 1121 list_for_each_entry(drv, &op->drv, list)
1033 &drv->fw); 1122 drv->op_mode = _iwl_op_mode_start(drv, op);
1123
1034 mutex_unlock(&iwlwifi_opmode_table_mtx); 1124 mutex_unlock(&iwlwifi_opmode_table_mtx);
1035 return 0; 1125 return 0;
1036 } 1126 }
@@ -1051,12 +1141,9 @@ void iwl_opmode_deregister(const char *name)
1051 iwlwifi_opmode_table[i].ops = NULL; 1141 iwlwifi_opmode_table[i].ops = NULL;
1052 1142
1053 /* call the stop routine for all devices */ 1143 /* call the stop routine for all devices */
1054 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) { 1144 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1055 if (drv->op_mode) { 1145 _iwl_op_mode_stop(drv);
1056 iwl_op_mode_stop(drv->op_mode); 1146
1057 drv->op_mode = NULL;
1058 }
1059 }
1060 mutex_unlock(&iwlwifi_opmode_table_mtx); 1147 mutex_unlock(&iwlwifi_opmode_table_mtx);
1061 return; 1148 return;
1062 } 1149 }
@@ -1076,6 +1163,14 @@ static int __init iwl_drv_init(void)
1076 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 1163 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1077 pr_info(DRV_COPYRIGHT "\n"); 1164 pr_info(DRV_COPYRIGHT "\n");
1078 1165
1166#ifdef CONFIG_IWLWIFI_DEBUGFS
1167 /* Create the root of iwlwifi debugfs subsystem. */
1168 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1169
1170 if (!iwl_dbgfs_root)
1171 return -EFAULT;
1172#endif
1173
1079 return iwl_pci_register_driver(); 1174 return iwl_pci_register_driver();
1080} 1175}
1081module_init(iwl_drv_init); 1176module_init(iwl_drv_init);
@@ -1083,6 +1178,10 @@ module_init(iwl_drv_init);
1083static void __exit iwl_drv_exit(void) 1178static void __exit iwl_drv_exit(void)
1084{ 1179{
1085 iwl_pci_unregister_driver(); 1180 iwl_pci_unregister_driver();
1181
1182#ifdef CONFIG_IWLWIFI_DEBUGFS
1183 debugfs_remove_recursive(iwl_dbgfs_root);
1184#endif
1086} 1185}
1087module_exit(iwl_drv_exit); 1186module_exit(iwl_drv_exit);
1088 1187
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 2cbf137b25bf..285de5f68c05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -90,9 +90,9 @@
90 * 4) The bus specific component configures the bus 90 * 4) The bus specific component configures the bus
91 * 5) The bus specific component calls to the drv bus agnostic part 91 * 5) The bus specific component calls to the drv bus agnostic part
92 * (iwl_drv_start) 92 * (iwl_drv_start)
93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback 93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
94 * 7) iwl_ucode_callback parses the fw file 94 * 7) iwl_req_fw_callback parses the fw file
95 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw 95 * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
96 */ 96 */
97 97
98struct iwl_drv; 98struct iwl_drv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 9c07c670a1ce..a5e425718f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -85,8 +85,6 @@ struct iwl_eeprom_data {
85 int n_hw_addrs; 85 int n_hw_addrs;
86 u8 hw_addr[ETH_ALEN]; 86 u8 hw_addr[ETH_ALEN];
87 87
88 u16 radio_config;
89
90 u8 calib_version; 88 u8 calib_version;
91 __le16 calib_voltage; 89 __le16 calib_voltage;
92 90
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 2153e4cc5572..d1a86b66bc51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
124 124
125/* one for each uCode image (inst/data, init/runtime/wowlan) */ 125/* one for each uCode image (inst/data, init/runtime/wowlan) */
126struct fw_desc { 126struct fw_desc {
127 dma_addr_t p_addr; /* hardware address */ 127 const void *data; /* vmalloc'ed data */
128 void *v_addr; /* software address */
129 u32 len; /* size in bytes */ 128 u32 len; /* size in bytes */
130 u32 offset; /* offset in the device */ 129 u32 offset; /* offset in the device */
131}; 130};
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 64886f95664f..c8d9b9517468 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -134,7 +134,8 @@ struct iwl_cfg;
134struct iwl_op_mode_ops { 134struct iwl_op_mode_ops {
135 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
136 const struct iwl_cfg *cfg, 136 const struct iwl_cfg *cfg,
137 const struct iwl_fw *fw); 137 const struct iwl_fw *fw,
138 struct dentry *dbgfs_dir);
138 void (*stop)(struct iwl_op_mode *op_mode); 139 void (*stop)(struct iwl_op_mode *op_mode);
139 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 140 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
140 struct iwl_device_cmd *cmd); 141 struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 92576a3e84ef..ff1154232885 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command 184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
185 * @CMD_ASYNC: Return right away and don't want for the response 185 * @CMD_ASYNC: Return right away and don't want for the response
186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
187 * response. 187 * response. The caller needs to call iwl_free_resp when done.
188 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
189 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
190 * copied. The pointer passed to the response handler is in the transport
191 * ownership and don't need to be freed by the op_mode. This also means
192 * that the pointer is invalidated after the op_mode's handler returns.
188 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 193 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
189 */ 194 */
190enum CMD_MODE { 195enum CMD_MODE {
191 CMD_SYNC = 0, 196 CMD_SYNC = 0,
192 CMD_ASYNC = BIT(0), 197 CMD_ASYNC = BIT(0),
193 CMD_WANT_SKB = BIT(1), 198 CMD_WANT_SKB = BIT(1),
194 CMD_ON_DEMAND = BIT(2), 199 CMD_WANT_HCMD = BIT(2),
200 CMD_ON_DEMAND = BIT(3),
195}; 201};
196 202
197#define DEF_CMD_PAYLOAD_SIZE 320 203#define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
460 size_t dev_cmd_headroom; 466 size_t dev_cmd_headroom;
461 char dev_cmd_pool_name[50]; 467 char dev_cmd_pool_name[50];
462 468
469 struct dentry *dbgfs_dir;
470
463 /* pointer to trans specific struct */ 471 /* pointer to trans specific struct */
464 /*Ensure that this pointer will always be aligned to sizeof pointer */ 472 /*Ensure that this pointer will always be aligned to sizeof pointer */
465 char trans_specific[0] __aligned(sizeof(void *)); 473 char trans_specific[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f4c3500b68c6..2a4675396707 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
263/* PCI registers */ 263/* PCI registers */
264#define PCI_CFG_RETRY_TIMEOUT 0x041 264#define PCI_CFG_RETRY_TIMEOUT 0x041
265 265
266#ifndef CONFIG_IWLWIFI_IDI
267
268static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 266static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
269{ 267{
270 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 268 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -282,8 +280,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
282 if (!trans_pcie->drv) 280 if (!trans_pcie->drv)
283 goto out_free_trans; 281 goto out_free_trans;
284 282
283 /* register transport layer debugfs here */
284 if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
285 goto out_free_drv;
286
285 return 0; 287 return 0;
286 288
289out_free_drv:
290 iwl_drv_stop(trans_pcie->drv);
287out_free_trans: 291out_free_trans:
288 iwl_trans_pcie_free(iwl_trans); 292 iwl_trans_pcie_free(iwl_trans);
289 pci_set_drvdata(pdev, NULL); 293 pci_set_drvdata(pdev, NULL);
@@ -301,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
301 pci_set_drvdata(pdev, NULL); 305 pci_set_drvdata(pdev, NULL);
302} 306}
303 307
304#endif /* CONFIG_IWLWIFI_IDI */
305
306#ifdef CONFIG_PM_SLEEP 308#ifdef CONFIG_PM_SLEEP
307 309
308static int iwl_pci_suspend(struct device *device) 310static int iwl_pci_suspend(struct device *device)
@@ -347,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
347 349
348#endif 350#endif
349 351
350#ifdef CONFIG_IWLWIFI_IDI
351/*
352 * Defined externally in iwl-idi.c
353 */
354int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
355void __devexit iwl_pci_remove(struct pci_dev *pdev);
356
357#endif /* CONFIG_IWLWIFI_IDI */
358
359static struct pci_driver iwl_pci_driver = { 352static struct pci_driver iwl_pci_driver = {
360 .name = DRV_NAME, 353 .name = DRV_NAME,
361 .id_table = iwl_hw_card_ids, 354 .id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 4ffc18dc3a57..401178f44a3b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -184,6 +184,7 @@ struct iwl_queue {
184 184
185struct iwl_pcie_tx_queue_entry { 185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd; 186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd;
187 struct sk_buff *skb; 188 struct sk_buff *skb;
188 struct iwl_cmd_meta meta; 189 struct iwl_cmd_meta meta;
189}; 190};
@@ -310,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
310******************************************************/ 311******************************************************/
311void iwl_bg_rx_replenish(struct work_struct *data); 312void iwl_bg_rx_replenish(struct work_struct *data);
312void iwl_irq_tasklet(struct iwl_trans *trans); 313void iwl_irq_tasklet(struct iwl_trans *trans);
313void iwlagn_rx_replenish(struct iwl_trans *trans); 314void iwl_rx_replenish(struct iwl_trans *trans);
314void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
315 struct iwl_rx_queue *q); 316 struct iwl_rx_queue *q);
316 317
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index d1a61ba6247a..17c8e5d82681 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -35,10 +35,6 @@
35#include "internal.h" 35#include "internal.h"
36#include "iwl-op-mode.h" 36#include "iwl-op-mode.h"
37 37
38#ifdef CONFIG_IWLWIFI_IDI
39#include "iwl-amfh.h"
40#endif
41
42/****************************************************************************** 38/******************************************************************************
43 * 39 *
44 * RX path functions 40 * RX path functions
@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
181} 177}
182 178
183/** 179/**
184 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
185 */ 181 */
186static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr) 182static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
187{ 183{
188 return cpu_to_le32((u32)(dma_addr >> 8)); 184 return cpu_to_le32((u32)(dma_addr >> 8));
189} 185}
190 186
191/** 187/**
192 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool 188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
193 * 189 *
194 * If there are slots in the RX queue that need to be restocked, 190 * If there are slots in the RX queue that need to be restocked,
195 * and we have free pre-allocated buffers, fill the ranks as much 191 * and we have free pre-allocated buffers, fill the ranks as much
@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
199 * also updates the memory address in the firmware to reference the new 195 * also updates the memory address in the firmware to reference the new
200 * target buffer. 196 * target buffer.
201 */ 197 */
202static void iwlagn_rx_queue_restock(struct iwl_trans *trans) 198static void iwl_rx_queue_restock(struct iwl_trans *trans)
203{ 199{
204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
205 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 201 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
207 struct iwl_rx_mem_buffer *rxb; 203 struct iwl_rx_mem_buffer *rxb;
208 unsigned long flags; 204 unsigned long flags;
209 205
206 /*
207 * If the device isn't enabled - not need to try to add buffers...
208 * This can happen when we stop the device and still have an interrupt
209 * pending. We stop the APM before we sync the interrupts / tasklets
210 * because we have to (see comment there). On the other hand, since
211 * the APM is stopped, we cannot access the HW (in particular not prph).
212 * So don't try to restock if the APM has been already stopped.
213 */
214 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
215 return;
216
210 spin_lock_irqsave(&rxq->lock, flags); 217 spin_lock_irqsave(&rxq->lock, flags);
211 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 218 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
212 /* The overwritten rxb must be a used one */ 219 /* The overwritten rxb must be a used one */
@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
219 list_del(element); 226 list_del(element);
220 227
221 /* Point to Rx buffer via next RBD in circular buffer */ 228 /* Point to Rx buffer via next RBD in circular buffer */
222 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma); 229 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
223 rxq->queue[rxq->write] = rxb; 230 rxq->queue[rxq->write] = rxb;
224 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 231 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
225 rxq->free_count--; 232 rxq->free_count--;
@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
230 if (rxq->free_count <= RX_LOW_WATERMARK) 237 if (rxq->free_count <= RX_LOW_WATERMARK)
231 schedule_work(&trans_pcie->rx_replenish); 238 schedule_work(&trans_pcie->rx_replenish);
232 239
233
234 /* If we've added more space for the firmware to place data, tell it. 240 /* If we've added more space for the firmware to place data, tell it.
235 * Increment device's write pointer in multiples of 8. */ 241 * Increment device's write pointer in multiples of 8. */
236 if (rxq->write_actual != (rxq->write & ~0x7)) { 242 if (rxq->write_actual != (rxq->write & ~0x7)) {
@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
241 } 247 }
242} 248}
243 249
244/** 250/*
245 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free 251 * iwl_rx_allocate - allocate a page for each used RBD
246 *
247 * When moving to rx_free an SKB is allocated for the slot.
248 * 252 *
249 * Also restock the Rx queue via iwl_rx_queue_restock. 253 * A used RBD is an Rx buffer that has been given to the stack. To use it again
250 * This is called as a scheduled work item (except for during initialization) 254 * a page must be allocated and the RBD must point to the page. This function
255 * doesn't change the HW pointer but handles the list of pages that is used by
256 * iwl_rx_queue_restock. The latter function will update the HW to use the newly
257 * allocated buffers.
251 */ 258 */
252static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority) 259static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
253{ 260{
254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 261 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
255 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 262 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
328 } 335 }
329} 336}
330 337
331void iwlagn_rx_replenish(struct iwl_trans *trans) 338/*
339 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
340 *
341 * When moving to rx_free an page is allocated for the slot.
342 *
343 * Also restock the Rx queue via iwl_rx_queue_restock.
344 * This is called as a scheduled work item (except for during initialization)
345 */
346void iwl_rx_replenish(struct iwl_trans *trans)
332{ 347{
333 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
334 unsigned long flags; 349 unsigned long flags;
335 350
336 iwlagn_rx_allocate(trans, GFP_KERNEL); 351 iwl_rx_allocate(trans, GFP_KERNEL);
337 352
338 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 353 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
339 iwlagn_rx_queue_restock(trans); 354 iwl_rx_queue_restock(trans);
340 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 355 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
341} 356}
342 357
343static void iwlagn_rx_replenish_now(struct iwl_trans *trans) 358static void iwl_rx_replenish_now(struct iwl_trans *trans)
344{ 359{
345 iwlagn_rx_allocate(trans, GFP_ATOMIC); 360 iwl_rx_allocate(trans, GFP_ATOMIC);
346 361
347 iwlagn_rx_queue_restock(trans); 362 iwl_rx_queue_restock(trans);
348} 363}
349 364
350void iwl_bg_rx_replenish(struct work_struct *data) 365void iwl_bg_rx_replenish(struct work_struct *data)
@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
352 struct iwl_trans_pcie *trans_pcie = 367 struct iwl_trans_pcie *trans_pcie =
353 container_of(data, struct iwl_trans_pcie, rx_replenish); 368 container_of(data, struct iwl_trans_pcie, rx_replenish);
354 369
355 iwlagn_rx_replenish(trans_pcie->trans); 370 iwl_rx_replenish(trans_pcie->trans);
356} 371}
357 372
358static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, 373static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
@@ -421,13 +436,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
421 index = SEQ_TO_INDEX(sequence); 436 index = SEQ_TO_INDEX(sequence);
422 cmd_index = get_cmd_index(&txq->q, index); 437 cmd_index = get_cmd_index(&txq->q, index);
423 438
424 if (reclaim) 439 if (reclaim) {
425 cmd = txq->entries[cmd_index].cmd; 440 struct iwl_pcie_tx_queue_entry *ent;
426 else 441 ent = &txq->entries[cmd_index];
442 cmd = ent->copy_cmd;
443 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
444 } else {
427 cmd = NULL; 445 cmd = NULL;
446 }
428 447
429 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 448 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
430 449
450 if (reclaim) {
451 /* The original command isn't needed any more */
452 kfree(txq->entries[cmd_index].copy_cmd);
453 txq->entries[cmd_index].copy_cmd = NULL;
454 }
455
431 /* 456 /*
432 * After here, we should always check rxcb._page_stolen, 457 * After here, we should always check rxcb._page_stolen,
433 * if it is true then one of the handlers took the page. 458 * if it is true then one of the handlers took the page.
@@ -520,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
520 count++; 545 count++;
521 if (count >= 8) { 546 if (count >= 8) {
522 rxq->read = i; 547 rxq->read = i;
523 iwlagn_rx_replenish_now(trans); 548 iwl_rx_replenish_now(trans);
524 count = 0; 549 count = 0;
525 } 550 }
526 } 551 }
@@ -529,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
529 /* Backtrack one entry */ 554 /* Backtrack one entry */
530 rxq->read = i; 555 rxq->read = i;
531 if (fill_rx) 556 if (fill_rx)
532 iwlagn_rx_replenish_now(trans); 557 iwl_rx_replenish_now(trans);
533 else 558 else
534 iwlagn_rx_queue_restock(trans); 559 iwl_rx_queue_restock(trans);
535} 560}
536 561
537/** 562/**
@@ -713,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
713 /* Disable periodic interrupt; we use it as just a one-shot. */ 738 /* Disable periodic interrupt; we use it as just a one-shot. */
714 iwl_write8(trans, CSR_INT_PERIODIC_REG, 739 iwl_write8(trans, CSR_INT_PERIODIC_REG,
715 CSR_INT_PERIODIC_DIS); 740 CSR_INT_PERIODIC_DIS);
716#ifdef CONFIG_IWLWIFI_IDI 741
717 iwl_amfh_rx_handler();
718#else
719 iwl_rx_handle(trans); 742 iwl_rx_handle(trans);
720#endif 743
721 /* 744 /*
722 * Enable periodic interrupt in 8 msec only if we received 745 * Enable periodic interrupt in 8 msec only if we received
723 * real RX interrupt (instead of just periodic int), to catch 746 * real RX interrupt (instead of just periodic int), to catch
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 063ecaff5b56..fe0fffd04304 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
216 rxq->free_count = 0; 216 rxq->free_count = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags); 217 spin_unlock_irqrestore(&rxq->lock, flags);
218 218
219 iwlagn_rx_replenish(trans); 219 iwl_rx_replenish(trans);
220 220
221 iwl_trans_rx_hw_init(trans, rxq); 221 iwl_trans_rx_hw_init(trans, rxq);
222 222
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
492 iwl_tx_queue_unmap(trans, txq_id); 492 iwl_tx_queue_unmap(trans, txq_id);
493 493
494 /* De-alloc array of command/tx buffers */ 494 /* De-alloc array of command/tx buffers */
495
496 if (txq_id == trans_pcie->cmd_queue) 495 if (txq_id == trans_pcie->cmd_queue)
497 for (i = 0; i < txq->q.n_window; i++) 496 for (i = 0; i < txq->q.n_window; i++) {
498 kfree(txq->entries[i].cmd); 497 kfree(txq->entries[i].cmd);
498 kfree(txq->entries[i].copy_cmd);
499 }
499 500
500 /* De-alloc circular buffer of TFDs */ 501 /* De-alloc circular buffer of TFDs */
501 if (txq->q.n_bd) { 502 if (txq->q.n_bd) {
@@ -851,10 +852,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
851 852
852 iwl_op_mode_nic_config(trans->op_mode); 853 iwl_op_mode_nic_config(trans->op_mode);
853 854
854#ifndef CONFIG_IWLWIFI_IDI
855 /* Allocate the RX queue, or reset if it is already allocated */ 855 /* Allocate the RX queue, or reset if it is already allocated */
856 iwl_rx_init(trans); 856 iwl_rx_init(trans);
857#endif
858 857
859 /* Allocate or reset and init all Tx and Command queues */ 858 /* Allocate or reset and init all Tx and Command queues */
860 if (iwl_tx_init(trans)) 859 if (iwl_tx_init(trans))
@@ -893,6 +892,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
893static int iwl_prepare_card_hw(struct iwl_trans *trans) 892static int iwl_prepare_card_hw(struct iwl_trans *trans)
894{ 893{
895 int ret; 894 int ret;
895 int t = 0;
896 896
897 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 897 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
898 898
@@ -905,30 +905,25 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
905 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 905 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
906 CSR_HW_IF_CONFIG_REG_PREPARE); 906 CSR_HW_IF_CONFIG_REG_PREPARE);
907 907
908 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 908 do {
909 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 909 ret = iwl_set_hw_ready(trans);
910 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 910 if (ret >= 0)
911 return 0;
911 912
912 if (ret < 0) 913 usleep_range(200, 1000);
913 return ret; 914 t += 200;
915 } while (t < 150000);
914 916
915 /* HW should be ready by now, check again. */
916 ret = iwl_set_hw_ready(trans);
917 if (ret >= 0)
918 return 0;
919 return ret; 917 return ret;
920} 918}
921 919
922/* 920/*
923 * ucode 921 * ucode
924 */ 922 */
925static int iwl_load_section(struct iwl_trans *trans, u8 section_num, 923static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
926 const struct fw_desc *section) 924 dma_addr_t phy_addr, u32 byte_cnt)
927{ 925{
928 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 926 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
929 dma_addr_t phy_addr = section->p_addr;
930 u32 byte_cnt = section->len;
931 u32 dst_addr = section->offset;
932 int ret; 927 int ret;
933 928
934 trans_pcie->ucode_write_complete = false; 929 trans_pcie->ucode_write_complete = false;
@@ -942,8 +937,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
942 dst_addr); 937 dst_addr);
943 938
944 iwl_write_direct32(trans, 939 iwl_write_direct32(trans,
945 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 940 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
946 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 941 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
947 942
948 iwl_write_direct32(trans, 943 iwl_write_direct32(trans,
949 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 944 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
@@ -962,33 +957,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
962 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 957 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
963 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 958 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
964 959
965 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
966 section_num);
967 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 960 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
968 trans_pcie->ucode_write_complete, 5 * HZ); 961 trans_pcie->ucode_write_complete, 5 * HZ);
969 if (!ret) { 962 if (!ret) {
970 IWL_ERR(trans, "Could not load the [%d] uCode section\n", 963 IWL_ERR(trans, "Failed to load firmware chunk!\n");
971 section_num);
972 return -ETIMEDOUT; 964 return -ETIMEDOUT;
973 } 965 }
974 966
975 return 0; 967 return 0;
976} 968}
977 969
978static int iwl_load_given_ucode(struct iwl_trans *trans, 970static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
979 const struct fw_img *image) 971 const struct fw_desc *section)
980{ 972{
973 u8 *v_addr;
974 dma_addr_t p_addr;
975 u32 offset;
981 int ret = 0; 976 int ret = 0;
982 int i;
983 977
984 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { 978 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
985 if (!image->sec[i].p_addr) 979 section_num);
986 break; 980
981 v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
982 if (!v_addr)
983 return -ENOMEM;
984
985 for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
986 u32 copy_size;
987
988 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
987 989
988 ret = iwl_load_section(trans, i, &image->sec[i]); 990 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
989 if (ret) 991 ret = iwl_load_firmware_chunk(trans, section->offset + offset,
990 return ret; 992 p_addr, copy_size);
993 if (ret) {
994 IWL_ERR(trans,
995 "Could not load the [%d] uCode section\n",
996 section_num);
997 break;
991 } 998 }
999 }
1000
1001 dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
1002 return ret;
1003}
1004
1005static int iwl_load_given_ucode(struct iwl_trans *trans,
1006 const struct fw_img *image)
1007{
1008 int i, ret = 0;
1009
1010 for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
1011 if (!image->sec[i].data)
1012 break;
1013
1014 ret = iwl_load_section(trans, i, &image->sec[i]);
1015 if (ret)
1016 return ret;
1017 }
992 1018
993 /* Remove all resets to allow NIC to operate */ 1019 /* Remove all resets to allow NIC to operate */
994 iwl_write32(trans, CSR_RESET, 0); 1020 iwl_write32(trans, CSR_RESET, 0);
@@ -1181,9 +1207,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1181 */ 1207 */
1182 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { 1208 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1183 iwl_trans_tx_stop(trans); 1209 iwl_trans_tx_stop(trans);
1184#ifndef CONFIG_IWLWIFI_IDI
1185 iwl_trans_rx_stop(trans); 1210 iwl_trans_rx_stop(trans);
1186#endif 1211
1187 /* Power-down device's busmaster DMA clocks */ 1212 /* Power-down device's busmaster DMA clocks */
1188 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1213 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1189 APMG_CLK_VAL_DMA_CLK_RQT); 1214 APMG_CLK_VAL_DMA_CLK_RQT);
@@ -1454,14 +1479,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1454 bool hw_rfkill; 1479 bool hw_rfkill;
1455 unsigned long flags; 1480 unsigned long flags;
1456 1481
1482 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1483 iwl_disable_interrupts(trans);
1484 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1485
1457 iwl_apm_stop(trans); 1486 iwl_apm_stop(trans);
1458 1487
1459 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 1488 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1460 iwl_disable_interrupts(trans); 1489 iwl_disable_interrupts(trans);
1461 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1490 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1462 1491
1463 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1464
1465 if (!op_mode_leaving) { 1492 if (!op_mode_leaving) {
1466 /* 1493 /*
1467 * Even if we stop the HW, we still want the RF kill 1494 * Even if we stop the HW, we still want the RF kill
@@ -1549,9 +1576,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1549 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1550 1577
1551 iwl_trans_pcie_tx_free(trans); 1578 iwl_trans_pcie_tx_free(trans);
1552#ifndef CONFIG_IWLWIFI_IDI
1553 iwl_trans_pcie_rx_free(trans); 1579 iwl_trans_pcie_rx_free(trans);
1554#endif 1580
1555 if (trans_pcie->irq_requested == true) { 1581 if (trans_pcie->irq_requested == true) {
1556 free_irq(trans_pcie->irq, trans); 1582 free_irq(trans_pcie->irq, trans);
1557 iwl_free_isr_ict(trans); 1583 iwl_free_isr_ict(trans);
@@ -1769,7 +1795,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
1769#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 1795#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1770 if (!debugfs_create_file(#name, mode, parent, trans, \ 1796 if (!debugfs_create_file(#name, mode, parent, trans, \
1771 &iwl_dbgfs_##name##_ops)) \ 1797 &iwl_dbgfs_##name##_ops)) \
1772 return -ENOMEM; \ 1798 goto err; \
1773} while (0) 1799} while (0)
1774 1800
1775/* file operation */ 1801/* file operation */
@@ -2033,6 +2059,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2033 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2059 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2034 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); 2060 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2035 return 0; 2061 return 0;
2062
2063err:
2064 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2065 return -ENOMEM;
2036} 2066}
2037#else 2067#else
2038static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2068static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6baf8deef519..105e3af3c621 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -521,12 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
521 u16 copy_size, cmd_size; 521 u16 copy_size, cmd_size;
522 bool had_nocopy = false; 522 bool had_nocopy = false;
523 int i; 523 int i;
524 u8 *cmd_dest; 524 u32 cmd_pos;
525#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
526 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
527 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
528 int trace_idx;
529#endif
530 525
531 copy_size = sizeof(out_cmd->hdr); 526 copy_size = sizeof(out_cmd->hdr);
532 cmd_size = sizeof(out_cmd->hdr); 527 cmd_size = sizeof(out_cmd->hdr);
@@ -584,15 +579,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
584 INDEX_TO_SEQ(q->write_ptr)); 579 INDEX_TO_SEQ(q->write_ptr));
585 580
586 /* and copy the data that needs to be copied */ 581 /* and copy the data that needs to be copied */
587 582 cmd_pos = offsetof(struct iwl_device_cmd, payload);
588 cmd_dest = out_cmd->payload;
589 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 583 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
590 if (!cmd->len[i]) 584 if (!cmd->len[i])
591 continue; 585 continue;
592 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) 586 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
593 break; 587 break;
594 memcpy(cmd_dest, cmd->data[i], cmd->len[i]); 588 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
595 cmd_dest += cmd->len[i]; 589 cmd_pos += cmd->len[i];
590 }
591
592 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
593
594 /*
595 * since out_cmd will be the source address of the FH, it will write
596 * the retry count there. So when the user needs to receivce the HCMD
597 * that corresponds to the response in the response handler, it needs
598 * to set CMD_WANT_HCMD.
599 */
600 if (cmd->flags & CMD_WANT_HCMD) {
601 txq->entries[idx].copy_cmd =
602 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
603 if (unlikely(!txq->entries[idx].copy_cmd)) {
604 idx = -ENOMEM;
605 goto out;
606 }
596 } 607 }
597 608
598 IWL_DEBUG_HC(trans, 609 IWL_DEBUG_HC(trans,
@@ -612,11 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
612 dma_unmap_len_set(out_meta, len, copy_size); 623 dma_unmap_len_set(out_meta, len, copy_size);
613 624
614 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); 625 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
615#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
616 trace_bufs[0] = &out_cmd->hdr;
617 trace_lens[0] = copy_size;
618 trace_idx = 1;
619#endif
620 626
621 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 627 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
622 if (!cmd->len[i]) 628 if (!cmd->len[i])
@@ -635,25 +641,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
635 641
636 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, 642 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
637 cmd->len[i], 0); 643 cmd->len[i], 0);
638#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
639 trace_bufs[trace_idx] = cmd->data[i];
640 trace_lens[trace_idx] = cmd->len[i];
641 trace_idx++;
642#endif
643 } 644 }
644 645
645 out_meta->flags = cmd->flags; 646 out_meta->flags = cmd->flags;
646 647
647 txq->need_update = 1; 648 txq->need_update = 1;
648 649
649 /* check that tracing gets all possible blocks */ 650 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
650 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); 651 &out_cmd->hdr, copy_size);
651#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
652 trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
653 trace_bufs[0], trace_lens[0],
654 trace_bufs[1], trace_lens[1],
655 trace_bufs[2], trace_lens[2]);
656#endif
657 652
658 /* start timer if queue currently empty */ 653 /* start timer if queue currently empty */
659 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 654 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 26e68326710b..aaa297315c47 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1159,6 +1159,22 @@ void lbs_set_mac_control(struct lbs_private *priv)
1159 lbs_deb_leave(LBS_DEB_CMD); 1159 lbs_deb_leave(LBS_DEB_CMD);
1160} 1160}
1161 1161
1162int lbs_set_mac_control_sync(struct lbs_private *priv)
1163{
1164 struct cmd_ds_mac_control cmd;
1165 int ret = 0;
1166
1167 lbs_deb_enter(LBS_DEB_CMD);
1168
1169 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1170 cmd.action = cpu_to_le16(priv->mac_control);
1171 cmd.reserved = 0;
1172 ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
1173
1174 lbs_deb_leave(LBS_DEB_CMD);
1175 return ret;
1176}
1177
1162/** 1178/**
1163 * lbs_allocate_cmd_buffer - allocates the command buffer and links 1179 * lbs_allocate_cmd_buffer - allocates the command buffer and links
1164 * it to command free queue 1180 * it to command free queue
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index ab07608e13d0..4279e8ab95f2 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -96,6 +96,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv);
96int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 96int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
97 97
98void lbs_set_mac_control(struct lbs_private *priv); 98void lbs_set_mac_control(struct lbs_private *priv);
99int lbs_set_mac_control_sync(struct lbs_private *priv);
99 100
100int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 101int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
101 s16 *maxlevel); 102 s16 *maxlevel);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index fe1ea43c5149..0c02f0483d1f 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -682,8 +682,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
682 682
683 /* Send cmd to FW to enable 11D function */ 683 /* Send cmd to FW to enable 11D function */
684 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); 684 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
685 if (ret)
686 goto done;
685 687
686 lbs_set_mac_control(priv); 688 ret = lbs_set_mac_control_sync(priv);
687done: 689done:
688 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 690 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
689 return ret; 691 return ret;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index a03457292c88..7001856241e6 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
227 lbtf_deb_leave(LBTF_DEB_MAIN); 227 lbtf_deb_leave(LBTF_DEB_MAIN);
228} 228}
229 229
230static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 230static void lbtf_op_tx(struct ieee80211_hw *hw,
231 struct ieee80211_tx_control *control,
232 struct sk_buff *skb)
231{ 233{
232 struct lbtf_private *priv = hw->priv; 234 struct lbtf_private *priv = hw->priv;
233 235
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 00838395778c..429ca3215fdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen");
38MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); 38MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41static u32 wmediumd_pid; 41static u32 wmediumd_portid;
42 42
43static int radios = 2; 43static int radios = 2;
44module_param(radios, int, 0444); 44module_param(radios, int, 0444);
@@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
545 545
546static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, 546static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
547 struct sk_buff *my_skb, 547 struct sk_buff *my_skb,
548 int dst_pid) 548 int dst_portid)
549{ 549{
550 struct sk_buff *skb; 550 struct sk_buff *skb;
551 struct mac80211_hwsim_data *data = hw->priv; 551 struct mac80211_hwsim_data *data = hw->priv;
@@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
619 goto nla_put_failure; 619 goto nla_put_failure;
620 620
621 genlmsg_end(skb, msg_head); 621 genlmsg_end(skb, msg_head);
622 genlmsg_unicast(&init_net, skb, dst_pid); 622 genlmsg_unicast(&init_net, skb, dst_portid);
623 623
624 /* Enqueue the packet */ 624 /* Enqueue the packet */
625 skb_queue_tail(&data->pending, my_skb); 625 skb_queue_tail(&data->pending, my_skb);
@@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
709 return ack; 709 return ack;
710} 710}
711 711
712static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 712static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
713 struct ieee80211_tx_control *control,
714 struct sk_buff *skb)
713{ 715{
714 bool ack; 716 bool ack;
715 struct ieee80211_tx_info *txi; 717 struct ieee80211_tx_info *txi;
716 u32 _pid; 718 u32 _portid;
717 719
718 mac80211_hwsim_monitor_rx(hw, skb); 720 mac80211_hwsim_monitor_rx(hw, skb);
719 721
@@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
724 } 726 }
725 727
726 /* wmediumd mode check */ 728 /* wmediumd mode check */
727 _pid = ACCESS_ONCE(wmediumd_pid); 729 _portid = ACCESS_ONCE(wmediumd_portid);
728 730
729 if (_pid) 731 if (_portid)
730 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 732 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
731 733
732 /* NO wmediumd detected, perfect medium simulation */ 734 /* NO wmediumd detected, perfect medium simulation */
733 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb); 735 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
@@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
812 struct ieee80211_hw *hw = arg; 814 struct ieee80211_hw *hw = arg;
813 struct sk_buff *skb; 815 struct sk_buff *skb;
814 struct ieee80211_tx_info *info; 816 struct ieee80211_tx_info *info;
815 u32 _pid; 817 u32 _portid;
816 818
817 hwsim_check_magic(vif); 819 hwsim_check_magic(vif);
818 820
@@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
829 mac80211_hwsim_monitor_rx(hw, skb); 831 mac80211_hwsim_monitor_rx(hw, skb);
830 832
831 /* wmediumd mode check */ 833 /* wmediumd mode check */
832 _pid = ACCESS_ONCE(wmediumd_pid); 834 _portid = ACCESS_ONCE(wmediumd_portid);
833 835
834 if (_pid) 836 if (_portid)
835 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 837 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
836 838
837 mac80211_hwsim_tx_frame_no_nl(hw, skb); 839 mac80211_hwsim_tx_frame_no_nl(hw, skb);
838 dev_kfree_skb(skb); 840 dev_kfree_skb(skb);
@@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1313 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1315 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1314 struct sk_buff *skb; 1316 struct sk_buff *skb;
1315 struct ieee80211_pspoll *pspoll; 1317 struct ieee80211_pspoll *pspoll;
1316 u32 _pid; 1318 u32 _portid;
1317 1319
1318 if (!vp->assoc) 1320 if (!vp->assoc)
1319 return; 1321 return;
@@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1334 memcpy(pspoll->ta, mac, ETH_ALEN); 1336 memcpy(pspoll->ta, mac, ETH_ALEN);
1335 1337
1336 /* wmediumd mode check */ 1338 /* wmediumd mode check */
1337 _pid = ACCESS_ONCE(wmediumd_pid); 1339 _portid = ACCESS_ONCE(wmediumd_portid);
1338 1340
1339 if (_pid) 1341 if (_portid)
1340 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1342 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1341 1343
1342 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) 1344 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1343 printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__); 1345 printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
@@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1351 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1353 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1352 struct sk_buff *skb; 1354 struct sk_buff *skb;
1353 struct ieee80211_hdr *hdr; 1355 struct ieee80211_hdr *hdr;
1354 u32 _pid; 1356 u32 _portid;
1355 1357
1356 if (!vp->assoc) 1358 if (!vp->assoc)
1357 return; 1359 return;
@@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1373 memcpy(hdr->addr3, vp->bssid, ETH_ALEN); 1375 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
1374 1376
1375 /* wmediumd mode check */ 1377 /* wmediumd mode check */
1376 _pid = ACCESS_ONCE(wmediumd_pid); 1378 _portid = ACCESS_ONCE(wmediumd_portid);
1377 1379
1378 if (_pid) 1380 if (_portid)
1379 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1381 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1380 1382
1381 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) 1383 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1382 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); 1384 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
@@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
1630 if (info == NULL) 1632 if (info == NULL)
1631 goto out; 1633 goto out;
1632 1634
1633 wmediumd_pid = info->snd_pid; 1635 wmediumd_portid = info->snd_portid;
1634 1636
1635 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " 1637 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
1636 "switching to wmediumd mode with pid %d\n", info->snd_pid); 1638 "switching to wmediumd mode with pid %d\n", info->snd_portid);
1637 1639
1638 return 0; 1640 return 0;
1639out: 1641out:
@@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
1670 if (state != NETLINK_URELEASE) 1672 if (state != NETLINK_URELEASE)
1671 return NOTIFY_DONE; 1673 return NOTIFY_DONE;
1672 1674
1673 if (notify->pid == wmediumd_pid) { 1675 if (notify->portid == wmediumd_portid) {
1674 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" 1676 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
1675 " socket, switching to perfect channel medium\n"); 1677 " socket, switching to perfect channel medium\n");
1676 wmediumd_pid = 0; 1678 wmediumd_portid = 0;
1677 } 1679 }
1678 return NOTIFY_DONE; 1680 return NOTIFY_DONE;
1679 1681
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1727#endif 1729#endif
1728 BIT(NL80211_IFTYPE_AP) | 1730 BIT(NL80211_IFTYPE_AP) |
1729 BIT(NL80211_IFTYPE_P2P_GO) }, 1731 BIT(NL80211_IFTYPE_P2P_GO) },
1732 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
1730}; 1733};
1731 1734
1732static const struct ieee80211_iface_combination hwsim_if_comb = { 1735static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
1813 BIT(NL80211_IFTYPE_P2P_CLIENT) | 1816 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1814 BIT(NL80211_IFTYPE_P2P_GO) | 1817 BIT(NL80211_IFTYPE_P2P_GO) |
1815 BIT(NL80211_IFTYPE_ADHOC) | 1818 BIT(NL80211_IFTYPE_ADHOC) |
1816 BIT(NL80211_IFTYPE_MESH_POINT); 1819 BIT(NL80211_IFTYPE_MESH_POINT) |
1820 BIT(NL80211_IFTYPE_P2P_DEVICE);
1817 1821
1818 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1822 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1819 IEEE80211_HW_SIGNAL_DBM | 1823 IEEE80211_HW_SIGNAL_DBM |
@@ -2052,7 +2056,7 @@ failed:
2052 mac80211_hwsim_free(); 2056 mac80211_hwsim_free();
2053 return err; 2057 return err;
2054} 2058}
2055 2059module_init(init_mac80211_hwsim);
2056 2060
2057static void __exit exit_mac80211_hwsim(void) 2061static void __exit exit_mac80211_hwsim(void)
2058{ 2062{
@@ -2063,7 +2067,4 @@ static void __exit exit_mac80211_hwsim(void)
2063 mac80211_hwsim_free(); 2067 mac80211_hwsim_free();
2064 unregister_netdev(hwsim_mon); 2068 unregister_netdev(hwsim_mon);
2065} 2069}
2066
2067
2068module_init(init_mac80211_hwsim);
2069module_exit(exit_mac80211_hwsim); 2070module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index e535c937628b..245a371f1a43 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -176,23 +176,6 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
176} 176}
177 177
178/* 178/*
179 * This function handles the command response of 11n configuration request.
180 *
181 * Handling includes changing the header fields into CPU format.
182 */
183int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
184 struct mwifiex_ds_11n_tx_cfg *tx_cfg)
185{
186 struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
187
188 if (tx_cfg) {
189 tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
190 tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
191 }
192 return 0;
193}
194
195/*
196 * This function prepares command of reconfigure Tx buffer. 179 * This function prepares command of reconfigure Tx buffer.
197 * 180 *
198 * Preparation includes - 181 * Preparation includes -
@@ -258,27 +241,6 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
258} 241}
259 242
260/* 243/*
261 * This function handles the command response of AMSDU aggregation
262 * control request.
263 *
264 * Handling includes changing the header fields into CPU format.
265 */
266int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
267 struct mwifiex_ds_11n_amsdu_aggr_ctrl
268 *amsdu_aggr_ctrl)
269{
270 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
271 &resp->params.amsdu_aggr_ctrl;
272
273 if (amsdu_aggr_ctrl) {
274 amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
275 amsdu_aggr_ctrl->curr_buf_size =
276 le16_to_cpu(amsdu_ctrl->curr_buf_size);
277 }
278 return 0;
279}
280
281/*
282 * This function prepares 11n configuration command. 244 * This function prepares 11n configuration command.
283 * 245 *
284 * Preparation includes - 246 * Preparation includes -
@@ -726,3 +688,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
726 688
727 return count; 689 return count;
728} 690}
691
692/*
693 * This function retrieves the entry for specific tx BA stream table by RA and
694 * deletes it.
695 */
696void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
697{
698 struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
699 unsigned long flags;
700
701 if (!ra)
702 return;
703
704 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
705 list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
706 if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
707 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
708 flags);
709 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
710 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
711 }
712 }
713 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
714
715 return;
716}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 28366e9211fb..46006a54a656 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -28,8 +28,6 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
28 struct host_cmd_ds_command *resp); 28 struct host_cmd_ds_command *resp);
29int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, 29int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
30 struct host_cmd_ds_command *resp); 30 struct host_cmd_ds_command *resp);
31int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
32 struct mwifiex_ds_11n_tx_cfg *tx_cfg);
33int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, 31int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
34 struct mwifiex_ds_11n_tx_cfg *txcfg); 32 struct mwifiex_ds_11n_tx_cfg *txcfg);
35 33
@@ -60,15 +58,13 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
60 struct mwifiex_ds_rx_reorder_tbl *buf); 58 struct mwifiex_ds_rx_reorder_tbl *buf);
61int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv, 59int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
62 struct mwifiex_ds_tx_ba_stream_tbl *buf); 60 struct mwifiex_ds_tx_ba_stream_tbl *buf);
63int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
64 struct mwifiex_ds_11n_amsdu_aggr_ctrl
65 *amsdu_aggr_ctrl);
66int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv, 61int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
67 struct host_cmd_ds_command *cmd, 62 struct host_cmd_ds_command *cmd,
68 int cmd_action, u16 *buf_size); 63 int cmd_action, u16 *buf_size);
69int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd, 64int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
70 int cmd_action, 65 int cmd_action,
71 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); 66 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
67void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
72 68
73/* 69/*
74 * This function checks whether AMPDU is allowed or not for a particular TID. 70 * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +153,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
157 153
158 return false; 154 return false;
159} 155}
156
157/*
158 * This function checks whether associated station is 11n enabled
159 */
160static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
161 struct mwifiex_sta_node *node)
162{
163
164 if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
165 !priv->ap_11n_enabled)
166 return 0;
167
168 return node->is_11n_enabled;
169}
160#endif /* !_MWIFIEX_11N_H_ */ 170#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index ab84eb943749..395f1bfd4102 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
62 }; 62 };
63 struct tx_packet_hdr *tx_header; 63 struct tx_packet_hdr *tx_header;
64 64
65 skb_put(skb_aggr, sizeof(*tx_header)); 65 tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
66
67 tx_header = (struct tx_packet_hdr *) skb_aggr->data;
68 66
69 /* Copy DA and SA */ 67 /* Copy DA and SA */
70 dt_offset = 2 * ETH_ALEN; 68 dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
82 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); 80 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
83 81
84 /* Add payload */ 82 /* Add payload */
85 skb_put(skb_aggr, skb_src->len); 83 memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
86 memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, 84
87 skb_src->len); 85 /* Add padding for new MSDU to start from 4 byte boundary */
88 *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + 86 *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
89 LLC_SNAP_LEN)) & 3)) : 0;
90 skb_put(skb_aggr, *pad);
91 87
92 return skb_aggr->len + *pad; 88 return skb_aggr->len + *pad;
93} 89}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 591ccd33f83c..9402b93b9a36 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
54 tbl->rx_reorder_ptr[i] = NULL; 54 tbl->rx_reorder_ptr[i] = NULL;
55 } 55 }
56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
57 if (rx_tmp_ptr) 57 if (rx_tmp_ptr) {
58 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); 58 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
59 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
60 else
61 mwifiex_process_rx_packet(priv->adapter,
62 rx_tmp_ptr);
63 }
59 } 64 }
60 65
61 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 66 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
97 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 102 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
98 tbl->rx_reorder_ptr[i] = NULL; 103 tbl->rx_reorder_ptr[i] = NULL;
99 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); 104 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
100 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); 105
106 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
107 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
108 else
109 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
101 } 110 }
102 111
103 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 112 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
148 * This function returns the pointer to an entry in Rx reordering 157 * This function returns the pointer to an entry in Rx reordering
149 * table which matches the given TA/TID pair. 158 * table which matches the given TA/TID pair.
150 */ 159 */
151static struct mwifiex_rx_reorder_tbl * 160struct mwifiex_rx_reorder_tbl *
152mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) 161mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
153{ 162{
154 struct mwifiex_rx_reorder_tbl *tbl; 163 struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
167 return NULL; 176 return NULL;
168} 177}
169 178
179/* This function retrieves the pointer to an entry in Rx reordering
180 * table which matches the given TA and deletes it.
181 */
182void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
183{
184 struct mwifiex_rx_reorder_tbl *tbl, *tmp;
185 unsigned long flags;
186
187 if (!ta)
188 return;
189
190 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
191 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
192 if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
193 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
194 flags);
195 mwifiex_del_rx_reorder_entry(priv, tbl);
196 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
197 }
198 }
199 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
200
201 return;
202}
203
170/* 204/*
171 * This function finds the last sequence number used in the packets 205 * This function finds the last sequence number used in the packets
172 * buffered in Rx reordering table. 206 * buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
226 struct mwifiex_rx_reorder_tbl *tbl, *new_node; 260 struct mwifiex_rx_reorder_tbl *tbl, *new_node;
227 u16 last_seq = 0; 261 u16 last_seq = 0;
228 unsigned long flags; 262 unsigned long flags;
263 struct mwifiex_sta_node *node;
229 264
230 /* 265 /*
231 * If we get a TID, ta pair which is already present dispatch all the 266 * If we get a TID, ta pair which is already present dispatch all the
@@ -248,19 +283,26 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
248 new_node->tid = tid; 283 new_node->tid = tid;
249 memcpy(new_node->ta, ta, ETH_ALEN); 284 memcpy(new_node->ta, ta, ETH_ALEN);
250 new_node->start_win = seq_num; 285 new_node->start_win = seq_num;
251 if (mwifiex_queuing_ra_based(priv)) 286
252 /* TODO for adhoc */ 287 if (mwifiex_queuing_ra_based(priv)) {
253 dev_dbg(priv->adapter->dev, 288 dev_dbg(priv->adapter->dev,
254 "info: ADHOC:last_seq=%d start_win=%d\n", 289 "info: AP/ADHOC:last_seq=%d start_win=%d\n",
255 last_seq, new_node->start_win); 290 last_seq, new_node->start_win);
256 else 291 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
292 node = mwifiex_get_sta_entry(priv, ta);
293 if (node)
294 last_seq = node->rx_seq[tid];
295 }
296 } else {
257 last_seq = priv->rx_seq[tid]; 297 last_seq = priv->rx_seq[tid];
298 }
258 299
259 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && 300 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
260 last_seq >= new_node->start_win) 301 last_seq >= new_node->start_win)
261 new_node->start_win = last_seq + 1; 302 new_node->start_win = last_seq + 1;
262 303
263 new_node->win_size = win_size; 304 new_node->win_size = win_size;
305 new_node->flags = 0;
264 306
265 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size, 307 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
266 GFP_KERNEL); 308 GFP_KERNEL);
@@ -396,8 +438,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
396 438
397 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 439 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
398 if (!tbl) { 440 if (!tbl) {
399 if (pkt_type != PKT_TYPE_BAR) 441 if (pkt_type != PKT_TYPE_BAR) {
400 mwifiex_process_rx_packet(priv->adapter, payload); 442 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
443 mwifiex_handle_uap_rx_forward(priv, payload);
444 else
445 mwifiex_process_rx_packet(priv->adapter,
446 payload);
447 }
401 return 0; 448 return 0;
402 } 449 }
403 start_win = tbl->start_win; 450 start_win = tbl->start_win;
@@ -411,13 +458,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
411 * If seq_num is less then starting win then ignore and drop the 458 * If seq_num is less then starting win then ignore and drop the
412 * packet 459 * packet
413 */ 460 */
414 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */ 461 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
415 if (seq_num >= ((start_win + TWOPOW11) & 462 dev_dbg(priv->adapter->dev,
416 (MAX_TID_VALUE - 1)) && (seq_num < start_win)) 463 "RXREOR_FORCE_NO_DROP when HS is activated\n");
464 tbl->flags &= ~RXREOR_FORCE_NO_DROP;
465 } else {
466 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
467 if (seq_num >= ((start_win + TWOPOW11) &
468 (MAX_TID_VALUE - 1)) &&
469 seq_num < start_win)
470 return -1;
471 } else if ((seq_num < start_win) ||
472 (seq_num > (start_win + TWOPOW11))) {
417 return -1; 473 return -1;
418 } else if ((seq_num < start_win) || 474 }
419 (seq_num > (start_win + TWOPOW11))) {
420 return -1;
421 } 475 }
422 476
423 /* 477 /*
@@ -428,8 +482,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
428 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1); 482 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
429 483
430 if (((end_win < start_win) && 484 if (((end_win < start_win) &&
431 (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) && 485 (seq_num < start_win) && (seq_num > end_win)) ||
432 (seq_num > end_win)) ||
433 ((end_win > start_win) && ((seq_num > end_win) || 486 ((end_win > start_win) && ((seq_num > end_win) ||
434 (seq_num < start_win)))) { 487 (seq_num < start_win)))) {
435 end_win = seq_num; 488 end_win = seq_num;
@@ -591,3 +644,29 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
591 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 644 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
592 mwifiex_reset_11n_rx_seq_num(priv); 645 mwifiex_reset_11n_rx_seq_num(priv);
593} 646}
647
648/*
649 * This function updates all rx_reorder_tbl's flags.
650 */
651void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
652{
653 struct mwifiex_private *priv;
654 struct mwifiex_rx_reorder_tbl *tbl;
655 unsigned long lock_flags;
656 int i;
657
658 for (i = 0; i < adapter->priv_num; i++) {
659 priv = adapter->priv[i];
660 if (!priv)
661 continue;
662 if (list_empty(&priv->rx_reorder_tbl_ptr))
663 continue;
664
665 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
666 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
667 tbl->flags = flags;
668 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
669 }
670
671 return;
672}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 6c9815a0f5d8..4064041ac852 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -38,6 +38,12 @@
38#define ADDBA_RSP_STATUS_ACCEPT 0 38#define ADDBA_RSP_STATUS_ACCEPT 0
39 39
40#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff 40#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
41#define BA_SETUP_MAX_PACKET_THRESHOLD 16
42#define BA_SETUP_PACKET_OFFSET 16
43
44enum mwifiex_rxreor_flags {
45 RXREOR_FORCE_NO_DROP = 1<<0,
46};
41 47
42static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv) 48static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
43{ 49{
@@ -68,5 +74,9 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
68 mwifiex_private 74 mwifiex_private
69 *priv, int tid, 75 *priv, int tid,
70 u8 *ta); 76 u8 *ta);
77struct mwifiex_rx_reorder_tbl *
78mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
79void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
80void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
71 81
72#endif /* _MWIFIEX_11N_RXREORDER_H_ */ 82#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 3f66ebb0a630..dd0410d2d465 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
33mwifiex-y += ie.o 33mwifiex-y += ie.o
34mwifiex-y += sta_cmdresp.o 34mwifiex-y += sta_cmdresp.o
35mwifiex-y += sta_event.o 35mwifiex-y += sta_event.o
36mwifiex-y += uap_event.o
36mwifiex-y += sta_tx.o 37mwifiex-y += sta_tx.o
37mwifiex-y += sta_rx.o 38mwifiex-y += sta_rx.o
39mwifiex-y += uap_txrx.o
38mwifiex-y += cfg80211.o 40mwifiex-y += cfg80211.o
39mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 41mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
40obj-$(CONFIG_MWIFIEX) += mwifiex.o 42obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fe42137384da..2691620393ea 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -22,7 +22,7 @@
22 22
23static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { 23static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
24 { 24 {
25 .max = 1, .types = BIT(NL80211_IFTYPE_STATION), 25 .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
26 }, 26 },
27 { 27 {
28 .max = 1, .types = BIT(NL80211_IFTYPE_AP), 28 .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -37,6 +37,36 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
37 .beacon_int_infra_match = true, 37 .beacon_int_infra_match = true,
38}; 38};
39 39
40static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
41 .n_reg_rules = 7,
42 .alpha2 = "99",
43 .reg_rules = {
44 /* Channel 1 - 11 */
45 REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
46 /* Channel 12 - 13 */
47 REG_RULE(2467-10, 2472+10, 20, 3, 20,
48 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
49 /* Channel 14 */
50 REG_RULE(2484-10, 2484+10, 20, 3, 20,
51 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
52 NL80211_RRF_NO_OFDM),
53 /* Channel 36 - 48 */
54 REG_RULE(5180-10, 5240+10, 40, 3, 20,
55 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
56 /* Channel 149 - 165 */
57 REG_RULE(5745-10, 5825+10, 40, 3, 20,
58 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
59 /* Channel 52 - 64 */
60 REG_RULE(5260-10, 5320+10, 40, 3, 30,
61 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
62 NL80211_RRF_DFS),
63 /* Channel 100 - 140 */
64 REG_RULE(5500-10, 5700+10, 40, 3, 30,
65 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
66 NL80211_RRF_DFS),
67 }
68};
69
40/* 70/*
41 * This function maps the nl802.11 channel type into driver channel type. 71 * This function maps the nl802.11 channel type into driver channel type.
42 * 72 *
@@ -47,8 +77,7 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
47 * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW 77 * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
48 * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE 78 * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
49 */ 79 */
50static u8 80u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
51mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
52{ 81{
53 switch (chan_type) { 82 switch (chan_type) {
54 case NL80211_CHAN_NO_HT: 83 case NL80211_CHAN_NO_HT:
@@ -99,7 +128,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
99 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 128 const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
100 const u8 *peer_mac = pairwise ? mac_addr : bc_mac; 129 const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
101 130
102 if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) { 131 if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
103 wiphy_err(wiphy, "deleting the crypto keys\n"); 132 wiphy_err(wiphy, "deleting the crypto keys\n");
104 return -EFAULT; 133 return -EFAULT;
105 } 134 }
@@ -109,6 +138,188 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
109} 138}
110 139
111/* 140/*
141 * This function forms an skb for management frame.
142 */
143static int
144mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
145{
146 u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
147 u16 pkt_len;
148 u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
149 struct timeval tv;
150
151 pkt_len = len + ETH_ALEN;
152
153 skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
154 MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
155 memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
156
157 memcpy(skb_push(skb, sizeof(tx_control)),
158 &tx_control, sizeof(tx_control));
159
160 memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
161
162 /* Add packet data and address4 */
163 memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
164 sizeof(struct ieee80211_hdr_3addr));
165 memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
166 memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
167 buf + sizeof(struct ieee80211_hdr_3addr),
168 len - sizeof(struct ieee80211_hdr_3addr));
169
170 skb->priority = LOW_PRIO_TID;
171 do_gettimeofday(&tv);
172 skb->tstamp = timeval_to_ktime(tv);
173
174 return 0;
175}
176
177/*
178 * CFG802.11 operation handler to transmit a management frame.
179 */
180static int
181mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
182 struct ieee80211_channel *chan, bool offchan,
183 enum nl80211_channel_type channel_type,
184 bool channel_type_valid, unsigned int wait,
185 const u8 *buf, size_t len, bool no_cck,
186 bool dont_wait_for_ack, u64 *cookie)
187{
188 struct sk_buff *skb;
189 u16 pkt_len;
190 const struct ieee80211_mgmt *mgmt;
191 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
192
193 if (!buf || !len) {
194 wiphy_err(wiphy, "invalid buffer and length\n");
195 return -EFAULT;
196 }
197
198 mgmt = (const struct ieee80211_mgmt *)buf;
199 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA &&
200 ieee80211_is_probe_resp(mgmt->frame_control)) {
201 /* Since we support offload probe resp, we need to skip probe
202 * resp in AP or GO mode */
203 wiphy_dbg(wiphy,
204 "info: skip to send probe resp in AP or GO mode\n");
205 return 0;
206 }
207
208 pkt_len = len + ETH_ALEN;
209 skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
210 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
211 pkt_len + sizeof(pkt_len));
212
213 if (!skb) {
214 wiphy_err(wiphy, "allocate skb failed for management frame\n");
215 return -ENOMEM;
216 }
217
218 mwifiex_form_mgmt_frame(skb, buf, len);
219 mwifiex_queue_tx_pkt(priv, skb);
220
221 *cookie = random32() | 1;
222 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
223
224 wiphy_dbg(wiphy, "info: management frame transmitted\n");
225 return 0;
226}
227
228/*
229 * CFG802.11 operation handler to register a mgmt frame.
230 */
231static void
232mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
233 struct wireless_dev *wdev,
234 u16 frame_type, bool reg)
235{
236 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
237
238 if (reg)
239 priv->mgmt_frame_mask |= BIT(frame_type >> 4);
240 else
241 priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
242
243 mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
244 HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
245
246 wiphy_dbg(wiphy, "info: mgmt frame registered\n");
247}
248
249/*
250 * CFG802.11 operation handler to remain on channel.
251 */
252static int
253mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
254 struct wireless_dev *wdev,
255 struct ieee80211_channel *chan,
256 enum nl80211_channel_type channel_type,
257 unsigned int duration, u64 *cookie)
258{
259 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
260 int ret;
261
262 if (!chan || !cookie) {
263 wiphy_err(wiphy, "Invalid parameter for ROC\n");
264 return -EINVAL;
265 }
266
267 if (priv->roc_cfg.cookie) {
268 wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n",
269 priv->roc_cfg.cookie);
270 return -EBUSY;
271 }
272
273 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
274 &channel_type, duration);
275
276 if (!ret) {
277 *cookie = random32() | 1;
278 priv->roc_cfg.cookie = *cookie;
279 priv->roc_cfg.chan = *chan;
280 priv->roc_cfg.chan_type = channel_type;
281
282 cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
283 duration, GFP_ATOMIC);
284
285 wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
286 }
287
288 return ret;
289}
290
291/*
292 * CFG802.11 operation handler to cancel remain on channel.
293 */
294static int
295mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
296 struct wireless_dev *wdev, u64 cookie)
297{
298 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
299 int ret;
300
301 if (cookie != priv->roc_cfg.cookie)
302 return -ENOENT;
303
304 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
305 &priv->roc_cfg.chan,
306 &priv->roc_cfg.chan_type, 0);
307
308 if (!ret) {
309 cfg80211_remain_on_channel_expired(wdev, cookie,
310 &priv->roc_cfg.chan,
311 priv->roc_cfg.chan_type,
312 GFP_ATOMIC);
313
314 memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
315
316 wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
317 }
318
319 return ret;
320}
321
322/*
112 * CFG802.11 operation handler to set Tx power. 323 * CFG802.11 operation handler to set Tx power.
113 */ 324 */
114static int 325static int
@@ -171,7 +382,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
171 382
172 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { 383 if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
173 priv->wep_key_curr_index = key_index; 384 priv->wep_key_curr_index = key_index;
174 } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) { 385 } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
386 NULL, 0)) {
175 wiphy_err(wiphy, "set default Tx key index\n"); 387 wiphy_err(wiphy, "set default Tx key index\n");
176 return -EFAULT; 388 return -EFAULT;
177 } 389 }
@@ -207,7 +419,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
207 return 0; 419 return 0;
208 } 420 }
209 421
210 if (mwifiex_set_encode(priv, params->key, params->key_len, 422 if (mwifiex_set_encode(priv, params, params->key, params->key_len,
211 key_index, peer_mac, 0)) { 423 key_index, peer_mac, 0)) {
212 wiphy_err(wiphy, "crypto keys added\n"); 424 wiphy_err(wiphy, "crypto keys added\n");
213 return -EFAULT; 425 return -EFAULT;
@@ -462,6 +674,76 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
462 return 0; 674 return 0;
463} 675}
464 676
677static int
678mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
679{
680 u16 mode = P2P_MODE_DISABLE;
681
682 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
683 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
684
685 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
686 HostCmd_ACT_GEN_SET, 0, &mode))
687 return -1;
688
689 return 0;
690}
691
692/*
693 * This function initializes the functionalities for P2P client.
694 * The P2P client initialization sequence is:
695 * disable -> device -> client
696 */
697static int
698mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
699{
700 u16 mode;
701
702 if (mwifiex_cfg80211_deinit_p2p(priv))
703 return -1;
704
705 mode = P2P_MODE_DEVICE;
706 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
707 HostCmd_ACT_GEN_SET, 0, &mode))
708 return -1;
709
710 mode = P2P_MODE_CLIENT;
711 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
712 HostCmd_ACT_GEN_SET, 0, &mode))
713 return -1;
714
715 return 0;
716}
717
718/*
719 * This function initializes the functionalities for P2P GO.
720 * The P2P GO initialization sequence is:
721 * disable -> device -> GO
722 */
723static int
724mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
725{
726 u16 mode;
727
728 if (mwifiex_cfg80211_deinit_p2p(priv))
729 return -1;
730
731 mode = P2P_MODE_DEVICE;
732 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
733 HostCmd_ACT_GEN_SET, 0, &mode))
734 return -1;
735
736 mode = P2P_MODE_GO;
737 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
738 HostCmd_ACT_GEN_SET, 0, &mode))
739 return -1;
740
741 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
742 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
743
744 return 0;
745}
746
465/* 747/*
466 * CFG802.11 operation handler to change interface type. 748 * CFG802.11 operation handler to change interface type.
467 */ 749 */
@@ -494,6 +776,16 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
494 switch (type) { 776 switch (type) {
495 case NL80211_IFTYPE_ADHOC: 777 case NL80211_IFTYPE_ADHOC:
496 break; 778 break;
779 case NL80211_IFTYPE_P2P_CLIENT:
780 if (mwifiex_cfg80211_init_p2p_client(priv))
781 return -EFAULT;
782 dev->ieee80211_ptr->iftype = type;
783 return 0;
784 case NL80211_IFTYPE_P2P_GO:
785 if (mwifiex_cfg80211_init_p2p_go(priv))
786 return -EFAULT;
787 dev->ieee80211_ptr->iftype = type;
788 return 0;
497 case NL80211_IFTYPE_UNSPECIFIED: 789 case NL80211_IFTYPE_UNSPECIFIED:
498 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name); 790 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
499 case NL80211_IFTYPE_STATION: /* This shouldn't happen */ 791 case NL80211_IFTYPE_STATION: /* This shouldn't happen */
@@ -519,6 +811,18 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
519 return -EOPNOTSUPP; 811 return -EOPNOTSUPP;
520 } 812 }
521 break; 813 break;
814 case NL80211_IFTYPE_P2P_CLIENT:
815 case NL80211_IFTYPE_P2P_GO:
816 switch (type) {
817 case NL80211_IFTYPE_STATION:
818 if (mwifiex_cfg80211_deinit_p2p(priv))
819 return -EFAULT;
820 dev->ieee80211_ptr->iftype = type;
821 return 0;
822 default:
823 return -EOPNOTSUPP;
824 }
825 break;
522 default: 826 default:
523 wiphy_err(wiphy, "%s: unknown iftype: %d\n", 827 wiphy_err(wiphy, "%s: unknown iftype: %d\n",
524 dev->name, dev->ieee80211_ptr->iftype); 828 dev->name, dev->ieee80211_ptr->iftype);
@@ -657,7 +961,6 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
657} 961}
658 962
659/* Supported rates to be advertised to the cfg80211 */ 963/* Supported rates to be advertised to the cfg80211 */
660
661static struct ieee80211_rate mwifiex_rates[] = { 964static struct ieee80211_rate mwifiex_rates[] = {
662 {.bitrate = 10, .hw_value = 2, }, 965 {.bitrate = 10, .hw_value = 2, },
663 {.bitrate = 20, .hw_value = 4, }, 966 {.bitrate = 20, .hw_value = 4, },
@@ -674,7 +977,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
674}; 977};
675 978
676/* Channel definitions to be advertised to cfg80211 */ 979/* Channel definitions to be advertised to cfg80211 */
677
678static struct ieee80211_channel mwifiex_channels_2ghz[] = { 980static struct ieee80211_channel mwifiex_channels_2ghz[] = {
679 {.center_freq = 2412, .hw_value = 1, }, 981 {.center_freq = 2412, .hw_value = 1, },
680 {.center_freq = 2417, .hw_value = 2, }, 982 {.center_freq = 2417, .hw_value = 2, },
@@ -742,12 +1044,41 @@ static struct ieee80211_supported_band mwifiex_band_5ghz = {
742 1044
743 1045
744/* Supported crypto cipher suits to be advertised to cfg80211 */ 1046/* Supported crypto cipher suits to be advertised to cfg80211 */
745
746static const u32 mwifiex_cipher_suites[] = { 1047static const u32 mwifiex_cipher_suites[] = {
747 WLAN_CIPHER_SUITE_WEP40, 1048 WLAN_CIPHER_SUITE_WEP40,
748 WLAN_CIPHER_SUITE_WEP104, 1049 WLAN_CIPHER_SUITE_WEP104,
749 WLAN_CIPHER_SUITE_TKIP, 1050 WLAN_CIPHER_SUITE_TKIP,
750 WLAN_CIPHER_SUITE_CCMP, 1051 WLAN_CIPHER_SUITE_CCMP,
1052 WLAN_CIPHER_SUITE_AES_CMAC,
1053};
1054
1055/* Supported mgmt frame types to be advertised to cfg80211 */
1056static const struct ieee80211_txrx_stypes
1057mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
1058 [NL80211_IFTYPE_STATION] = {
1059 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1060 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1061 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1062 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1063 },
1064 [NL80211_IFTYPE_AP] = {
1065 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1066 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1067 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1068 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1069 },
1070 [NL80211_IFTYPE_P2P_CLIENT] = {
1071 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1072 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1073 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1074 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1075 },
1076 [NL80211_IFTYPE_P2P_GO] = {
1077 .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1078 BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
1079 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
1080 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
1081 },
751}; 1082};
752 1083
753/* 1084/*
@@ -842,7 +1173,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
842{ 1173{
843 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1174 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
844 1175
845 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1176 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
846 wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__); 1177 wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
847 return -EINVAL; 1178 return -EINVAL;
848 } 1179 }
@@ -906,6 +1237,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
906 if (mwifiex_del_mgmt_ies(priv)) 1237 if (mwifiex_del_mgmt_ies(priv))
907 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); 1238 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
908 1239
1240 priv->ap_11n_enabled = 0;
1241
909 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1242 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
910 HostCmd_ACT_GEN_SET, 0, NULL)) { 1243 HostCmd_ACT_GEN_SET, 0, NULL)) {
911 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1244 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -928,7 +1261,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
928 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1261 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
929 u8 config_bands = 0; 1262 u8 config_bands = 0;
930 1263
931 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) 1264 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
932 return -1; 1265 return -1;
933 if (mwifiex_set_mgmt_ies(priv, &params->beacon)) 1266 if (mwifiex_set_mgmt_ies(priv, &params->beacon))
934 return -1; 1267 return -1;
@@ -965,15 +1298,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
965 1298
966 bss_cfg->channel = 1299 bss_cfg->channel =
967 (u8)ieee80211_frequency_to_channel(params->channel->center_freq); 1300 (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
968 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
969 1301
970 /* Set appropriate bands */ 1302 /* Set appropriate bands */
971 if (params->channel->band == IEEE80211_BAND_2GHZ) { 1303 if (params->channel->band == IEEE80211_BAND_2GHZ) {
1304 bss_cfg->band_cfg = BAND_CONFIG_BG;
1305
972 if (params->channel_type == NL80211_CHAN_NO_HT) 1306 if (params->channel_type == NL80211_CHAN_NO_HT)
973 config_bands = BAND_B | BAND_G; 1307 config_bands = BAND_B | BAND_G;
974 else 1308 else
975 config_bands = BAND_B | BAND_G | BAND_GN; 1309 config_bands = BAND_B | BAND_G | BAND_GN;
976 } else { 1310 } else {
1311 bss_cfg->band_cfg = BAND_CONFIG_A;
1312
977 if (params->channel_type == NL80211_CHAN_NO_HT) 1313 if (params->channel_type == NL80211_CHAN_NO_HT)
978 config_bands = BAND_A; 1314 config_bands = BAND_A;
979 else 1315 else
@@ -984,6 +1320,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
984 ~priv->adapter->fw_bands)) 1320 ~priv->adapter->fw_bands))
985 priv->adapter->config_bands = config_bands; 1321 priv->adapter->config_bands = config_bands;
986 1322
1323 mwifiex_set_uap_rates(bss_cfg, params);
987 mwifiex_send_domain_info_cmd_fw(wiphy); 1324 mwifiex_send_domain_info_cmd_fw(wiphy);
988 1325
989 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 1326 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
@@ -994,6 +1331,12 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
994 1331
995 mwifiex_set_ht_params(priv, bss_cfg, params); 1332 mwifiex_set_ht_params(priv, bss_cfg, params);
996 1333
1334 if (params->inactivity_timeout > 0) {
1335 /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
1336 bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
1337 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
1338 }
1339
997 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, 1340 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
998 HostCmd_ACT_GEN_SET, 0, NULL)) { 1341 HostCmd_ACT_GEN_SET, 0, NULL)) {
999 wiphy_err(wiphy, "Failed to stop the BSS\n"); 1342 wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1149,7 +1492,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1149 ~priv->adapter->fw_bands)) 1492 ~priv->adapter->fw_bands))
1150 priv->adapter->config_bands = config_bands; 1493 priv->adapter->config_bands = config_bands;
1151 } 1494 }
1152 mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
1153 } 1495 }
1154 1496
1155 /* As this is new association, clear locally stored 1497 /* As this is new association, clear locally stored
@@ -1159,7 +1501,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1159 priv->wep_key_curr_index = 0; 1501 priv->wep_key_curr_index = 0;
1160 priv->sec_info.encryption_mode = 0; 1502 priv->sec_info.encryption_mode = 0;
1161 priv->sec_info.is_authtype_auto = 0; 1503 priv->sec_info.is_authtype_auto = 0;
1162 ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1); 1504 ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
1163 1505
1164 if (mode == NL80211_IFTYPE_ADHOC) { 1506 if (mode == NL80211_IFTYPE_ADHOC) {
1165 /* "privacy" is set only for ad-hoc mode */ 1507 /* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1548,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
1206 "info: setting wep encryption" 1548 "info: setting wep encryption"
1207 " with key len %d\n", sme->key_len); 1549 " with key len %d\n", sme->key_len);
1208 priv->wep_key_curr_index = sme->key_idx; 1550 priv->wep_key_curr_index = sme->key_idx;
1209 ret = mwifiex_set_encode(priv, sme->key, sme->key_len, 1551 ret = mwifiex_set_encode(priv, NULL, sme->key,
1210 sme->key_idx, NULL, 0); 1552 sme->key_len, sme->key_idx,
1553 NULL, 0);
1211 } 1554 }
1212 } 1555 }
1213done: 1556done:
@@ -1459,11 +1802,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1459{ 1802{
1460 struct net_device *dev = request->wdev->netdev; 1803 struct net_device *dev = request->wdev->netdev;
1461 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1804 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1462 int i; 1805 int i, offset;
1463 struct ieee80211_channel *chan; 1806 struct ieee80211_channel *chan;
1807 struct ieee_types_header *ie;
1464 1808
1465 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); 1809 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1466 1810
1811 if (atomic_read(&priv->wmm.tx_pkts_queued) >=
1812 MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
1813 dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
1814 return -EBUSY;
1815 }
1816
1467 priv->scan_request = request; 1817 priv->scan_request = request;
1468 1818
1469 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1819 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
@@ -1477,13 +1827,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1477 priv->user_scan_cfg->ssid_list = request->ssids; 1827 priv->user_scan_cfg->ssid_list = request->ssids;
1478 1828
1479 if (request->ie && request->ie_len) { 1829 if (request->ie && request->ie_len) {
1830 offset = 0;
1480 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { 1831 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1481 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) 1832 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
1482 continue; 1833 continue;
1483 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN; 1834 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
1484 memcpy(&priv->vs_ie[i].ie, request->ie, 1835 ie = (struct ieee_types_header *)(request->ie + offset);
1485 request->ie_len); 1836 memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
1486 break; 1837 offset += sizeof(*ie) + ie->len;
1838
1839 if (offset >= request->ie_len)
1840 break;
1487 } 1841 }
1488 } 1842 }
1489 1843
@@ -1592,7 +1946,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
1592 * create a new virtual interface with the given name 1946 * create a new virtual interface with the given name
1593 */ 1947 */
1594struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, 1948struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1595 char *name, 1949 const char *name,
1596 enum nl80211_iftype type, 1950 enum nl80211_iftype type,
1597 u32 *flags, 1951 u32 *flags,
1598 struct vif_params *params) 1952 struct vif_params *params)
@@ -1632,7 +1986,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1632 1986
1633 priv->bss_type = MWIFIEX_BSS_TYPE_STA; 1987 priv->bss_type = MWIFIEX_BSS_TYPE_STA;
1634 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; 1988 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
1635 priv->bss_priority = MWIFIEX_BSS_ROLE_STA; 1989 priv->bss_priority = 0;
1636 priv->bss_role = MWIFIEX_BSS_ROLE_STA; 1990 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
1637 priv->bss_num = 0; 1991 priv->bss_num = 0;
1638 1992
@@ -1655,13 +2009,48 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1655 2009
1656 priv->bss_type = MWIFIEX_BSS_TYPE_UAP; 2010 priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
1657 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; 2011 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
1658 priv->bss_priority = MWIFIEX_BSS_ROLE_UAP; 2012 priv->bss_priority = 0;
1659 priv->bss_role = MWIFIEX_BSS_ROLE_UAP; 2013 priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
1660 priv->bss_started = 0; 2014 priv->bss_started = 0;
1661 priv->bss_num = 0; 2015 priv->bss_num = 0;
1662 priv->bss_mode = type; 2016 priv->bss_mode = type;
1663 2017
1664 break; 2018 break;
2019 case NL80211_IFTYPE_P2P_CLIENT:
2020 priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
2021
2022 if (priv->bss_mode) {
2023 wiphy_err(wiphy, "Can't create multiple P2P ifaces");
2024 return ERR_PTR(-EINVAL);
2025 }
2026
2027 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
2028 if (!wdev)
2029 return ERR_PTR(-ENOMEM);
2030
2031 priv->wdev = wdev;
2032 wdev->wiphy = wiphy;
2033
2034 /* At start-up, wpa_supplicant tries to change the interface
2035 * to NL80211_IFTYPE_STATION if it is not managed mode.
2036 * So, we initialize it to STA mode.
2037 */
2038 wdev->iftype = NL80211_IFTYPE_STATION;
2039 priv->bss_mode = NL80211_IFTYPE_STATION;
2040
2041 /* Setting bss_type to P2P tells firmware that this interface
2042 * is receiving P2P peers found during find phase and doing
2043 * action frame handshake.
2044 */
2045 priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
2046
2047 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
2048 priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
2049 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
2050 priv->bss_started = 0;
2051 priv->bss_num = 0;
2052
2053 break;
1665 default: 2054 default:
1666 wiphy_err(wiphy, "type not supported\n"); 2055 wiphy_err(wiphy, "type not supported\n");
1667 return ERR_PTR(-EINVAL); 2056 return ERR_PTR(-EINVAL);
@@ -1769,6 +2158,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1769 .leave_ibss = mwifiex_cfg80211_leave_ibss, 2158 .leave_ibss = mwifiex_cfg80211_leave_ibss,
1770 .add_key = mwifiex_cfg80211_add_key, 2159 .add_key = mwifiex_cfg80211_add_key,
1771 .del_key = mwifiex_cfg80211_del_key, 2160 .del_key = mwifiex_cfg80211_del_key,
2161 .mgmt_tx = mwifiex_cfg80211_mgmt_tx,
2162 .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
2163 .remain_on_channel = mwifiex_cfg80211_remain_on_channel,
2164 .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
1772 .set_default_key = mwifiex_cfg80211_set_default_key, 2165 .set_default_key = mwifiex_cfg80211_set_default_key,
1773 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, 2166 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1774 .set_tx_power = mwifiex_cfg80211_set_tx_power, 2167 .set_tx_power = mwifiex_cfg80211_set_tx_power,
@@ -1805,8 +2198,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1805 } 2198 }
1806 wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; 2199 wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
1807 wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; 2200 wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
2201 wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
2202 wiphy->max_remain_on_channel_duration = 5000;
1808 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2203 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1809 BIT(NL80211_IFTYPE_ADHOC) | 2204 BIT(NL80211_IFTYPE_ADHOC) |
2205 BIT(NL80211_IFTYPE_P2P_CLIENT) |
2206 BIT(NL80211_IFTYPE_P2P_GO) |
1810 BIT(NL80211_IFTYPE_AP); 2207 BIT(NL80211_IFTYPE_AP);
1811 2208
1812 wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz; 2209 wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -1825,15 +2222,21 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1825 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 2222 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1826 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 2223 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1827 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | 2224 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
1828 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 2225 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
2226 WIPHY_FLAG_CUSTOM_REGULATORY |
2227 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2228
2229 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
1829 2230
1830 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2231 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
1831 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; 2232 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
2233 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
1832 2234
1833 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; 2235 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
1834 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; 2236 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
1835 2237
1836 wiphy->features = NL80211_FEATURE_HT_IBSS; 2238 wiphy->features = NL80211_FEATURE_HT_IBSS |
2239 NL80211_FEATURE_INACTIVITY_TIMER;
1837 2240
1838 /* Reserve space for mwifiex specific private data for BSS */ 2241 /* Reserve space for mwifiex specific private data for BSS */
1839 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 2242 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1854,8 +2257,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
1854 return ret; 2257 return ret;
1855 } 2258 }
1856 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); 2259 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
1857 if (country_code && regulatory_hint(wiphy, country_code)) 2260 if (country_code)
1858 dev_err(adapter->dev, "regulatory_hint() failed\n"); 2261 dev_info(adapter->dev,
2262 "ignoring F/W country code %2.2s\n", country_code);
1859 2263
1860 adapter->wiphy = wiphy; 2264 adapter->wiphy = wiphy;
1861 return ret; 2265 return ret;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 565527aee0ea..8d465107f52b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
460 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 460 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
461 } 461 }
462 462
463 ret = mwifiex_process_sta_event(priv); 463 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
464 ret = mwifiex_process_uap_event(priv);
465 else
466 ret = mwifiex_process_sta_event(priv);
464 467
465 adapter->event_cause = 0; 468 adapter->event_cause = 0;
466 adapter->event_skb = NULL; 469 adapter->event_skb = NULL;
@@ -1085,6 +1088,8 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
1085 if (activated) { 1088 if (activated) {
1086 if (priv->adapter->is_hs_configured) { 1089 if (priv->adapter->is_hs_configured) {
1087 priv->adapter->hs_activated = true; 1090 priv->adapter->hs_activated = true;
1091 mwifiex_update_rxreor_flags(priv->adapter,
1092 RXREOR_FORCE_NO_DROP);
1088 dev_dbg(priv->adapter->dev, "event: hs_activated\n"); 1093 dev_dbg(priv->adapter->dev, "event: hs_activated\n");
1089 priv->adapter->hs_activate_wait_q_woken = true; 1094 priv->adapter->hs_activate_wait_q_woken = true;
1090 wake_up_interruptible( 1095 wake_up_interruptible(
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 070ef25f5186..e9357d87d327 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -28,11 +28,14 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29 29
30 30
31#define MWIFIEX_MAX_BSS_NUM (2) 31#define MWIFIEX_MAX_BSS_NUM (3)
32 32
33#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd) 33#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
34 * + 4 byte alignment 34 * + 4 byte alignment
35 */ 35 */
36#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type)
37 * + sizeof(tx_control)
38 */
36 39
37#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 40#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
38#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 41#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
@@ -60,10 +63,14 @@
60#define MWIFIEX_SDIO_BLOCK_SIZE 256 63#define MWIFIEX_SDIO_BLOCK_SIZE 256
61 64
62#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) 65#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
66#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
67
68#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024
63 69
64enum mwifiex_bss_type { 70enum mwifiex_bss_type {
65 MWIFIEX_BSS_TYPE_STA = 0, 71 MWIFIEX_BSS_TYPE_STA = 0,
66 MWIFIEX_BSS_TYPE_UAP = 1, 72 MWIFIEX_BSS_TYPE_UAP = 1,
73 MWIFIEX_BSS_TYPE_P2P = 2,
67 MWIFIEX_BSS_TYPE_ANY = 0xff, 74 MWIFIEX_BSS_TYPE_ANY = 0xff,
68}; 75};
69 76
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e831b440a24a..dda588b35570 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
65 KEY_TYPE_ID_TKIP, 65 KEY_TYPE_ID_TKIP,
66 KEY_TYPE_ID_AES, 66 KEY_TYPE_ID_AES,
67 KEY_TYPE_ID_WAPI, 67 KEY_TYPE_ID_WAPI,
68 KEY_TYPE_ID_AES_CMAC,
68}; 69};
69#define KEY_MCAST BIT(0) 70#define KEY_MCAST BIT(0)
70#define KEY_UNICAST BIT(1) 71#define KEY_UNICAST BIT(1)
71#define KEY_ENABLED BIT(2) 72#define KEY_ENABLED BIT(2)
73#define KEY_IGTK BIT(10)
72 74
73#define WAPI_KEY_LEN 50 75#define WAPI_KEY_LEN 50
74 76
@@ -92,6 +94,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
92}; 94};
93 95
94#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF))) 96#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
97#define CAL_RSSI(SNR, NF) ((s16)((s16)(SNR)+(s16)(NF)))
95 98
96#define UAP_BSS_PARAMS_I 0 99#define UAP_BSS_PARAMS_I 0
97#define UAP_CUSTOM_IE_I 1 100#define UAP_CUSTOM_IE_I 1
@@ -106,6 +109,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
106#define MGMT_MASK_BEACON 0x100 109#define MGMT_MASK_BEACON 0x100
107 110
108#define TLV_TYPE_UAP_SSID 0x0000 111#define TLV_TYPE_UAP_SSID 0x0000
112#define TLV_TYPE_UAP_RATES 0x0001
109 113
110#define PROPRIETARY_TLV_BASE_ID 0x0100 114#define PROPRIETARY_TLV_BASE_ID 0x0100
111#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) 115#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
@@ -124,6 +128,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 128#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) 129#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 130#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
131#define TLV_TYPE_UAP_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 57)
127#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59) 132#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
128#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 133#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
129#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 134#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -138,6 +143,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
138#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105) 143#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
139#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) 144#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
140#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114) 145#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
146#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
141#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) 147#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
142#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) 148#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
143 149
@@ -257,9 +263,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
257#define HostCmd_CMD_TX_RATE_CFG 0x00d6 263#define HostCmd_CMD_TX_RATE_CFG 0x00d6
258#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4 264#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
259#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5 265#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
266#define HostCmd_CMD_P2P_MODE_CFG 0x00eb
260#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed 267#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
261#define HostCmd_CMD_SET_BSS_MODE 0x00f7 268#define HostCmd_CMD_SET_BSS_MODE 0x00f7
262#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa 269#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
270#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
271#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
263 272
264#define PROTOCOL_NO_SECURITY 0x01 273#define PROTOCOL_NO_SECURITY 0x01
265#define PROTOCOL_STATIC_WEP 0x02 274#define PROTOCOL_STATIC_WEP 0x02
@@ -285,9 +294,17 @@ enum ENH_PS_MODES {
285 DIS_AUTO_PS = 0xfe, 294 DIS_AUTO_PS = 0xfe,
286}; 295};
287 296
297enum P2P_MODES {
298 P2P_MODE_DISABLE = 0,
299 P2P_MODE_DEVICE = 1,
300 P2P_MODE_GO = 2,
301 P2P_MODE_CLIENT = 3,
302};
303
288#define HostCmd_RET_BIT 0x8000 304#define HostCmd_RET_BIT 0x8000
289#define HostCmd_ACT_GEN_GET 0x0000 305#define HostCmd_ACT_GEN_GET 0x0000
290#define HostCmd_ACT_GEN_SET 0x0001 306#define HostCmd_ACT_GEN_SET 0x0001
307#define HostCmd_ACT_GEN_REMOVE 0x0004
291#define HostCmd_ACT_BITWISE_SET 0x0002 308#define HostCmd_ACT_BITWISE_SET 0x0002
292#define HostCmd_ACT_BITWISE_CLR 0x0003 309#define HostCmd_ACT_BITWISE_CLR 0x0003
293#define HostCmd_RESULT_OK 0x0000 310#define HostCmd_RESULT_OK 0x0000
@@ -307,7 +324,7 @@ enum ENH_PS_MODES {
307#define HostCmd_SCAN_RADIO_TYPE_A 1 324#define HostCmd_SCAN_RADIO_TYPE_A 1
308 325
309#define HOST_SLEEP_CFG_CANCEL 0xffffffff 326#define HOST_SLEEP_CFG_CANCEL 0xffffffff
310#define HOST_SLEEP_CFG_COND_DEF 0x0000000f 327#define HOST_SLEEP_CFG_COND_DEF 0x00000000
311#define HOST_SLEEP_CFG_GPIO_DEF 0xff 328#define HOST_SLEEP_CFG_GPIO_DEF 0xff
312#define HOST_SLEEP_CFG_GAP_DEF 0 329#define HOST_SLEEP_CFG_GAP_DEF 0
313 330
@@ -385,6 +402,7 @@ enum ENH_PS_MODES {
385#define EVENT_BW_CHANGE 0x00000048 402#define EVENT_BW_CHANGE 0x00000048
386#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c 403#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
387#define EVENT_HOSTWAKE_STAIE 0x0000004d 404#define EVENT_HOSTWAKE_STAIE 0x0000004d
405#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
388 406
389#define EVENT_ID_MASK 0xffff 407#define EVENT_ID_MASK 0xffff
390#define BSS_NUM_MASK 0xf 408#define BSS_NUM_MASK 0xf
@@ -424,10 +442,10 @@ struct txpd {
424struct rxpd { 442struct rxpd {
425 u8 bss_type; 443 u8 bss_type;
426 u8 bss_num; 444 u8 bss_num;
427 u16 rx_pkt_length; 445 __le16 rx_pkt_length;
428 u16 rx_pkt_offset; 446 __le16 rx_pkt_offset;
429 u16 rx_pkt_type; 447 __le16 rx_pkt_type;
430 u16 seq_num; 448 __le16 seq_num;
431 u8 priority; 449 u8 priority;
432 u8 rx_rate; 450 u8 rx_rate;
433 s8 snr; 451 s8 snr;
@@ -439,6 +457,31 @@ struct rxpd {
439 u8 reserved; 457 u8 reserved;
440} __packed; 458} __packed;
441 459
460struct uap_txpd {
461 u8 bss_type;
462 u8 bss_num;
463 __le16 tx_pkt_length;
464 __le16 tx_pkt_offset;
465 __le16 tx_pkt_type;
466 __le32 tx_control;
467 u8 priority;
468 u8 flags;
469 u8 pkt_delay_2ms;
470 u8 reserved1;
471 __le32 reserved2;
472};
473
474struct uap_rxpd {
475 u8 bss_type;
476 u8 bss_num;
477 __le16 rx_pkt_length;
478 __le16 rx_pkt_offset;
479 __le16 rx_pkt_type;
480 __le16 seq_num;
481 u8 priority;
482 u8 reserved1;
483};
484
442enum mwifiex_chan_scan_mode_bitmasks { 485enum mwifiex_chan_scan_mode_bitmasks {
443 MWIFIEX_PASSIVE_SCAN = BIT(0), 486 MWIFIEX_PASSIVE_SCAN = BIT(0),
444 MWIFIEX_DISABLE_CHAN_FILT = BIT(1), 487 MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +601,13 @@ struct mwifiex_ie_type_key_param_set {
558 u8 key[50]; 601 u8 key[50];
559} __packed; 602} __packed;
560 603
604#define IGTK_PN_LEN 8
605
606struct mwifiex_cmac_param {
607 u8 ipn[IGTK_PN_LEN];
608 u8 key[WLAN_KEY_LEN_AES_CMAC];
609} __packed;
610
561struct host_cmd_ds_802_11_key_material { 611struct host_cmd_ds_802_11_key_material {
562 __le16 action; 612 __le16 action;
563 struct mwifiex_ie_type_key_param_set key_param_set; 613 struct mwifiex_ie_type_key_param_set key_param_set;
@@ -1250,6 +1300,11 @@ struct host_cmd_tlv_ssid {
1250 u8 ssid[0]; 1300 u8 ssid[0];
1251} __packed; 1301} __packed;
1252 1302
1303struct host_cmd_tlv_rates {
1304 struct host_cmd_tlv tlv;
1305 u8 rates[0];
1306} __packed;
1307
1253struct host_cmd_tlv_bcast_ssid { 1308struct host_cmd_tlv_bcast_ssid {
1254 struct host_cmd_tlv tlv; 1309 struct host_cmd_tlv tlv;
1255 u8 bcast_ctl; 1310 u8 bcast_ctl;
@@ -1291,11 +1346,35 @@ struct host_cmd_tlv_channel_band {
1291 u8 channel; 1346 u8 channel;
1292} __packed; 1347} __packed;
1293 1348
1349struct host_cmd_tlv_ageout_timer {
1350 struct host_cmd_tlv tlv;
1351 __le32 sta_ao_timer;
1352} __packed;
1353
1294struct host_cmd_ds_version_ext { 1354struct host_cmd_ds_version_ext {
1295 u8 version_str_sel; 1355 u8 version_str_sel;
1296 char version_str[128]; 1356 char version_str[128];
1297} __packed; 1357} __packed;
1298 1358
1359struct host_cmd_ds_mgmt_frame_reg {
1360 __le16 action;
1361 __le32 mask;
1362} __packed;
1363
1364struct host_cmd_ds_p2p_mode_cfg {
1365 __le16 action;
1366 __le16 mode;
1367} __packed;
1368
1369struct host_cmd_ds_remain_on_chan {
1370 __le16 action;
1371 u8 status;
1372 u8 reserved;
1373 u8 band_cfg;
1374 u8 channel;
1375 __le32 duration;
1376} __packed;
1377
1299struct host_cmd_ds_802_11_ibss_status { 1378struct host_cmd_ds_802_11_ibss_status {
1300 __le16 action; 1379 __le16 action;
1301 __le16 enable; 1380 __le16 enable;
@@ -1307,6 +1386,7 @@ struct host_cmd_ds_802_11_ibss_status {
1307 1386
1308#define CONNECTION_TYPE_INFRA 0 1387#define CONNECTION_TYPE_INFRA 0
1309#define CONNECTION_TYPE_ADHOC 1 1388#define CONNECTION_TYPE_ADHOC 1
1389#define CONNECTION_TYPE_AP 2
1310 1390
1311struct host_cmd_ds_set_bss_mode { 1391struct host_cmd_ds_set_bss_mode {
1312 u8 con_type; 1392 u8 con_type;
@@ -1404,6 +1484,9 @@ struct host_cmd_ds_command {
1404 struct host_cmd_ds_wmm_get_status get_wmm_status; 1484 struct host_cmd_ds_wmm_get_status get_wmm_status;
1405 struct host_cmd_ds_802_11_key_material key_material; 1485 struct host_cmd_ds_802_11_key_material key_material;
1406 struct host_cmd_ds_version_ext verext; 1486 struct host_cmd_ds_version_ext verext;
1487 struct host_cmd_ds_mgmt_frame_reg reg_mask;
1488 struct host_cmd_ds_remain_on_chan roc_cfg;
1489 struct host_cmd_ds_p2p_mode_cfg mode_cfg;
1407 struct host_cmd_ds_802_11_ibss_status ibss_coalescing; 1490 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1408 struct host_cmd_ds_mac_reg_access mac_reg; 1491 struct host_cmd_ds_mac_reg_access mac_reg;
1409 struct host_cmd_ds_bbp_reg_access bbp_reg; 1492 struct host_cmd_ds_bbp_reg_access bbp_reg;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 1d8dd003e396..e38342f86c51 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -114,9 +114,6 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
114 cpu_to_le16(mask); 114 cpu_to_le16(mask);
115 115
116 ie->ie_index = cpu_to_le16(index); 116 ie->ie_index = cpu_to_le16(index);
117 ie->ie_length = priv->mgmt_ie[index].ie_length;
118 memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
119 le16_to_cpu(priv->mgmt_ie[index].ie_length));
120 } else { 117 } else {
121 if (mask != MWIFIEX_DELETE_MASK) 118 if (mask != MWIFIEX_DELETE_MASK)
122 return -1; 119 return -1;
@@ -160,7 +157,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
160 u16 len; 157 u16 len;
161 int ret; 158 int ret;
162 159
163 ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 160 ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
164 if (!ap_custom_ie) 161 if (!ap_custom_ie)
165 return -ENOMEM; 162 return -ENOMEM;
166 163
@@ -214,30 +211,35 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
214 return ret; 211 return ret;
215} 212}
216 213
217/* This function checks if WPS IE is present in passed buffer and copies it to 214/* This function checks if the vendor specified IE is present in passed buffer
218 * mwifiex_ie structure. 215 * and copies it to mwifiex_ie structure.
219 * Function takes pointer to struct mwifiex_ie pointer as argument. 216 * Function takes pointer to struct mwifiex_ie pointer as argument.
220 * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled 217 * If the vendor specified IE is present then memory is allocated for
221 * in with WPS IE. Caller should take care of freeing this memory. 218 * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
219 * this memory.
222 */ 220 */
223static int mwifiex_update_wps_ie(const u8 *ies, int ies_len, 221static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
224 struct mwifiex_ie **ie_ptr, u16 mask) 222 struct mwifiex_ie **ie_ptr, u16 mask,
223 unsigned int oui, u8 oui_type)
225{ 224{
226 struct ieee_types_header *wps_ie; 225 struct ieee_types_header *vs_ie;
227 struct mwifiex_ie *ie = NULL; 226 struct mwifiex_ie *ie = *ie_ptr;
228 const u8 *vendor_ie; 227 const u8 *vendor_ie;
229 228
230 vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 229 vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
231 WLAN_OUI_TYPE_MICROSOFT_WPS,
232 ies, ies_len);
233 if (vendor_ie) { 230 if (vendor_ie) {
234 ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 231 if (!*ie_ptr) {
235 if (!ie) 232 *ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
236 return -ENOMEM; 233 GFP_KERNEL);
234 if (!*ie_ptr)
235 return -ENOMEM;
236 ie = *ie_ptr;
237 }
237 238
238 wps_ie = (struct ieee_types_header *)vendor_ie; 239 vs_ie = (struct ieee_types_header *)vendor_ie;
239 memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2); 240 memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
240 ie->ie_length = cpu_to_le16(wps_ie->len + 2); 241 vs_ie, vs_ie->len + 2);
242 le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
241 ie->mgmt_subtype_mask = cpu_to_le16(mask); 243 ie->mgmt_subtype_mask = cpu_to_le16(mask);
242 ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK); 244 ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
243 } 245 }
@@ -257,20 +259,40 @@ static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
257 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK; 259 u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
258 int ret = 0; 260 int ret = 0;
259 261
260 if (data->beacon_ies && data->beacon_ies_len) 262 if (data->beacon_ies && data->beacon_ies_len) {
261 mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len, 263 mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
262 &beacon_ie, MGMT_MASK_BEACON); 264 &beacon_ie, MGMT_MASK_BEACON,
265 WLAN_OUI_MICROSOFT,
266 WLAN_OUI_TYPE_MICROSOFT_WPS);
267 mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
268 &beacon_ie, MGMT_MASK_BEACON,
269 WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
270 }
263 271
264 if (data->proberesp_ies && data->proberesp_ies_len) 272 if (data->proberesp_ies && data->proberesp_ies_len) {
265 mwifiex_update_wps_ie(data->proberesp_ies, 273 mwifiex_update_vs_ie(data->proberesp_ies,
266 data->proberesp_ies_len, &pr_ie, 274 data->proberesp_ies_len, &pr_ie,
267 MGMT_MASK_PROBE_RESP); 275 MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
276 WLAN_OUI_TYPE_MICROSOFT_WPS);
277 mwifiex_update_vs_ie(data->proberesp_ies,
278 data->proberesp_ies_len, &pr_ie,
279 MGMT_MASK_PROBE_RESP,
280 WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
281 }
268 282
269 if (data->assocresp_ies && data->assocresp_ies_len) 283 if (data->assocresp_ies && data->assocresp_ies_len) {
270 mwifiex_update_wps_ie(data->assocresp_ies, 284 mwifiex_update_vs_ie(data->assocresp_ies,
271 data->assocresp_ies_len, &ar_ie, 285 data->assocresp_ies_len, &ar_ie,
272 MGMT_MASK_ASSOC_RESP | 286 MGMT_MASK_ASSOC_RESP |
273 MGMT_MASK_REASSOC_RESP); 287 MGMT_MASK_REASSOC_RESP,
288 WLAN_OUI_MICROSOFT,
289 WLAN_OUI_TYPE_MICROSOFT_WPS);
290 mwifiex_update_vs_ie(data->assocresp_ies,
291 data->assocresp_ies_len, &ar_ie,
292 MGMT_MASK_ASSOC_RESP |
293 MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
294 WLAN_OUI_TYPE_WFA_P2P);
295 }
274 296
275 if (beacon_ie || pr_ie || ar_ie) { 297 if (beacon_ie || pr_ie || ar_ie) {
276 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie, 298 ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 21fdc6c02775..b5d37a8caa09 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -64,60 +64,77 @@ static void scan_delay_timer_fn(unsigned long data)
64 struct cmd_ctrl_node *cmd_node, *tmp_node; 64 struct cmd_ctrl_node *cmd_node, *tmp_node;
65 unsigned long flags; 65 unsigned long flags;
66 66
67 if (!mwifiex_wmm_lists_empty(adapter)) { 67 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
68 if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { 68 /*
69 * Abort scan operation by cancelling all pending scan
70 * commands
71 */
72 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
73 list_for_each_entry_safe(cmd_node, tmp_node,
74 &adapter->scan_pending_q, list) {
75 list_del(&cmd_node->list);
76 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
77 }
78 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
79
80 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
81 adapter->scan_processing = false;
82 adapter->scan_delay_cnt = 0;
83 adapter->empty_tx_q_cnt = 0;
84 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
85
86 if (priv->user_scan_cfg) {
87 dev_dbg(priv->adapter->dev,
88 "info: %s: scan aborted\n", __func__);
89 cfg80211_scan_done(priv->scan_request, 1);
90 priv->scan_request = NULL;
91 kfree(priv->user_scan_cfg);
92 priv->user_scan_cfg = NULL;
93 }
94
95 if (priv->scan_pending_on_block) {
96 priv->scan_pending_on_block = false;
97 up(&priv->async_sem);
98 }
99 goto done;
100 }
101
102 if (!atomic_read(&priv->adapter->is_tx_received)) {
103 adapter->empty_tx_q_cnt++;
104 if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
69 /* 105 /*
70 * Abort scan operation by cancelling all pending scan 106 * No Tx traffic for 200msec. Get scan command from
71 * command 107 * scan pending queue and put to cmd pending queue to
108 * resume scan operation
72 */ 109 */
110 adapter->scan_delay_cnt = 0;
111 adapter->empty_tx_q_cnt = 0;
73 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 112 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
74 list_for_each_entry_safe(cmd_node, tmp_node, 113 cmd_node = list_first_entry(&adapter->scan_pending_q,
75 &adapter->scan_pending_q, 114 struct cmd_ctrl_node, list);
76 list) { 115 list_del(&cmd_node->list);
77 list_del(&cmd_node->list);
78 cmd_node->wait_q_enabled = false;
79 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
80 }
81 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 116 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
82 flags); 117 flags);
83 118
84 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
85 adapter->scan_processing = false; 120 true);
86 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, 121 queue_work(adapter->workqueue, &adapter->main_work);
87 flags); 122 goto done;
88
89 if (priv->user_scan_cfg) {
90 dev_dbg(priv->adapter->dev,
91 "info: %s: scan aborted\n", __func__);
92 cfg80211_scan_done(priv->scan_request, 1);
93 priv->scan_request = NULL;
94 kfree(priv->user_scan_cfg);
95 priv->user_scan_cfg = NULL;
96 }
97 } else {
98 /*
99 * Tx data queue is still not empty, delay scan
100 * operation further by 20msec.
101 */
102 mod_timer(&priv->scan_delay_timer, jiffies +
103 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
104 adapter->scan_delay_cnt++;
105 } 123 }
106 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
107 } else { 124 } else {
108 /* 125 adapter->empty_tx_q_cnt = 0;
109 * Tx data queue is empty. Get scan command from scan_pending_q
110 * and put to cmd_pending_q to resume scan operation
111 */
112 adapter->scan_delay_cnt = 0;
113 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
114 cmd_node = list_first_entry(&adapter->scan_pending_q,
115 struct cmd_ctrl_node, list);
116 list_del(&cmd_node->list);
117 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
118
119 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
120 } 126 }
127
128 /* Delay scan operation further by 20msec */
129 mod_timer(&priv->scan_delay_timer, jiffies +
130 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
131 adapter->scan_delay_cnt++;
132
133done:
134 if (atomic_read(&priv->adapter->is_tx_received))
135 atomic_set(&priv->adapter->is_tx_received, false);
136
137 return;
121} 138}
122 139
123/* 140/*
@@ -127,7 +144,7 @@ static void scan_delay_timer_fn(unsigned long data)
127 * Additionally, it also initializes all the locks and sets up all the 144 * Additionally, it also initializes all the locks and sets up all the
128 * lists. 145 * lists.
129 */ 146 */
130static int mwifiex_init_priv(struct mwifiex_private *priv) 147int mwifiex_init_priv(struct mwifiex_private *priv)
131{ 148{
132 u32 i; 149 u32 i;
133 150
@@ -196,6 +213,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
196 priv->curr_bcn_size = 0; 213 priv->curr_bcn_size = 0;
197 priv->wps_ie = NULL; 214 priv->wps_ie = NULL;
198 priv->wps_ie_len = 0; 215 priv->wps_ie_len = 0;
216 priv->ap_11n_enabled = 0;
217 memset(&priv->roc_cfg, 0, sizeof(priv->roc_cfg));
199 218
200 priv->scan_block = false; 219 priv->scan_block = false;
201 220
@@ -345,6 +364,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
345 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); 364 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
346 adapter->arp_filter_size = 0; 365 adapter->arp_filter_size = 0;
347 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; 366 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
367 adapter->empty_tx_q_cnt = 0;
348} 368}
349 369
350/* 370/*
@@ -410,6 +430,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
410 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list); 430 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
411 list_del(&priv->tx_ba_stream_tbl_ptr); 431 list_del(&priv->tx_ba_stream_tbl_ptr);
412 list_del(&priv->rx_reorder_tbl_ptr); 432 list_del(&priv->rx_reorder_tbl_ptr);
433 list_del(&priv->sta_list);
413 } 434 }
414 } 435 }
415} 436}
@@ -472,6 +493,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
472 spin_lock_init(&priv->rx_pkt_lock); 493 spin_lock_init(&priv->rx_pkt_lock);
473 spin_lock_init(&priv->wmm.ra_list_spinlock); 494 spin_lock_init(&priv->wmm.ra_list_spinlock);
474 spin_lock_init(&priv->curr_bcn_buf_lock); 495 spin_lock_init(&priv->curr_bcn_buf_lock);
496 spin_lock_init(&priv->sta_list_spinlock);
475 } 497 }
476 } 498 }
477 499
@@ -504,6 +526,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
504 } 526 }
505 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); 527 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
506 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 528 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
529 INIT_LIST_HEAD(&priv->sta_list);
507 530
508 spin_lock_init(&priv->tx_ba_stream_tbl_lock); 531 spin_lock_init(&priv->tx_ba_stream_tbl_lock);
509 spin_lock_init(&priv->rx_reorder_tbl_lock); 532 spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -626,6 +649,17 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
626} 649}
627 650
628/* 651/*
652 * This function frees the private structure, including cleans
653 * up the TX and RX queues and frees the BSS priority tables.
654 */
655void mwifiex_free_priv(struct mwifiex_private *priv)
656{
657 mwifiex_clean_txrx(priv);
658 mwifiex_delete_bss_prio_tbl(priv);
659 mwifiex_free_curr_bcn(priv);
660}
661
662/*
629 * This function is used to shutdown the driver. 663 * This function is used to shutdown the driver.
630 * 664 *
631 * The following operations are performed sequentially - 665 * The following operations are performed sequentially -
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 50191539bb32..4e31c6013ebe 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -81,7 +81,11 @@ struct wep_key {
81 81
82#define KEY_MGMT_ON_HOST 0x03 82#define KEY_MGMT_ON_HOST 0x03
83#define MWIFIEX_AUTH_MODE_AUTO 0xFF 83#define MWIFIEX_AUTH_MODE_AUTO 0xFF
84#define BAND_CONFIG_MANUAL 0x00 84#define BAND_CONFIG_BG 0x00
85#define BAND_CONFIG_A 0x01
86#define MWIFIEX_SUPPORTED_RATES 14
87#define MWIFIEX_SUPPORTED_RATES_EXT 32
88
85struct mwifiex_uap_bss_param { 89struct mwifiex_uap_bss_param {
86 u8 channel; 90 u8 channel;
87 u8 band_cfg; 91 u8 band_cfg;
@@ -100,6 +104,9 @@ struct mwifiex_uap_bss_param {
100 struct wpa_param wpa_cfg; 104 struct wpa_param wpa_cfg;
101 struct wep_key wep_cfg[NUM_WEP_KEYS]; 105 struct wep_key wep_cfg[NUM_WEP_KEYS];
102 struct ieee80211_ht_cap ht_cap; 106 struct ieee80211_ht_cap ht_cap;
107 u8 rates[MWIFIEX_SUPPORTED_RATES];
108 u32 sta_ao_timer;
109 u32 ps_sta_ao_timer;
103}; 110};
104 111
105enum { 112enum {
@@ -213,7 +220,7 @@ struct mwifiex_debug_info {
213}; 220};
214 221
215#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000 222#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
216#define WAPI_RXPN_LEN 16 223#define PN_LEN 16
217 224
218struct mwifiex_ds_encrypt_key { 225struct mwifiex_ds_encrypt_key {
219 u32 key_disable; 226 u32 key_disable;
@@ -222,7 +229,8 @@ struct mwifiex_ds_encrypt_key {
222 u8 key_material[WLAN_MAX_KEY_LEN]; 229 u8 key_material[WLAN_MAX_KEY_LEN];
223 u8 mac_addr[ETH_ALEN]; 230 u8 mac_addr[ETH_ALEN];
224 u32 is_wapi_key; 231 u32 is_wapi_key;
225 u8 wapi_rxpn[WAPI_RXPN_LEN]; 232 u8 pn[PN_LEN]; /* packet number */
233 u8 is_igtk_key;
226}; 234};
227 235
228struct mwifiex_power_cfg { 236struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 46803621d015..eb22dd248d54 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -72,7 +72,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
72 goto error; 72 goto error;
73 73
74 adapter->priv[i]->adapter = adapter; 74 adapter->priv[i]->adapter = adapter;
75 adapter->priv[i]->bss_priority = i;
76 adapter->priv_num++; 75 adapter->priv_num++;
77 } 76 }
78 mwifiex_init_lock_list(adapter); 77 mwifiex_init_lock_list(adapter);
@@ -370,6 +369,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
370 dev_err(adapter->dev, "cannot create default AP interface\n"); 369 dev_err(adapter->dev, "cannot create default AP interface\n");
371 goto err_add_intf; 370 goto err_add_intf;
372 } 371 }
372
373 /* Create P2P interface by default */
374 if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
375 NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
376 dev_err(adapter->dev, "cannot create default P2P interface\n");
377 goto err_add_intf;
378 }
373 rtnl_unlock(); 379 rtnl_unlock();
374 380
375 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); 381 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -470,6 +476,27 @@ mwifiex_close(struct net_device *dev)
470} 476}
471 477
472/* 478/*
479 * Add buffer into wmm tx queue and queue work to transmit it.
480 */
481int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
482{
483 mwifiex_wmm_add_buf_txqueue(priv, skb);
484 atomic_inc(&priv->adapter->tx_pending);
485
486 if (priv->adapter->scan_delay_cnt)
487 atomic_set(&priv->adapter->is_tx_received, true);
488
489 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
490 mwifiex_set_trans_start(priv->netdev);
491 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
492 }
493
494 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
495
496 return 0;
497}
498
499/*
473 * CFG802.11 network device handler for data transmission. 500 * CFG802.11 network device handler for data transmission.
474 */ 501 */
475static int 502static int
@@ -517,15 +544,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
517 tx_info->bss_type = priv->bss_type; 544 tx_info->bss_type = priv->bss_type;
518 mwifiex_fill_buffer(skb); 545 mwifiex_fill_buffer(skb);
519 546
520 mwifiex_wmm_add_buf_txqueue(priv, skb); 547 mwifiex_queue_tx_pkt(priv, skb);
521 atomic_inc(&priv->adapter->tx_pending);
522
523 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
524 mwifiex_set_trans_start(dev);
525 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
526 }
527
528 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
529 548
530 return 0; 549 return 0;
531} 550}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e7c2a82fd610..bfb3fa69805c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -88,13 +88,18 @@ enum {
88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) 88#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
89 89
90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50 90#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
91#define MWIFIEX_MAX_EMPTY_TX_Q_CNT 10
91#define MWIFIEX_SCAN_DELAY_MSEC 20 92#define MWIFIEX_SCAN_DELAY_MSEC 20
92 93
94#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN 2
95
93#define RSN_GTK_OUI_OFFSET 2 96#define RSN_GTK_OUI_OFFSET 2
94 97
95#define MWIFIEX_OUI_NOT_PRESENT 0 98#define MWIFIEX_OUI_NOT_PRESENT 0
96#define MWIFIEX_OUI_PRESENT 1 99#define MWIFIEX_OUI_PRESENT 1
97 100
101#define PKT_TYPE_MGMT 0xE5
102
98/* 103/*
99 * Do not check for data_received for USB, as data_received 104 * Do not check for data_received for USB, as data_received
100 * is handled in mwifiex_usb_recv for USB 105 * is handled in mwifiex_usb_recv for USB
@@ -115,6 +120,7 @@ enum {
115#define MAX_BITMAP_RATES_SIZE 10 120#define MAX_BITMAP_RATES_SIZE 10
116 121
117#define MAX_CHANNEL_BAND_BG 14 122#define MAX_CHANNEL_BAND_BG 14
123#define MAX_CHANNEL_BAND_A 165
118 124
119#define MAX_FREQUENCY_BAND_BG 2484 125#define MAX_FREQUENCY_BAND_BG 2484
120 126
@@ -199,6 +205,9 @@ struct mwifiex_ra_list_tbl {
199 u8 ra[ETH_ALEN]; 205 u8 ra[ETH_ALEN];
200 u32 total_pkts_size; 206 u32 total_pkts_size;
201 u32 is_11n_enabled; 207 u32 is_11n_enabled;
208 u16 max_amsdu;
209 u16 pkt_count;
210 u8 ba_packet_thr;
202}; 211};
203 212
204struct mwifiex_tid_tbl { 213struct mwifiex_tid_tbl {
@@ -245,10 +254,6 @@ struct ieee_types_header {
245 u8 len; 254 u8 len;
246} __packed; 255} __packed;
247 256
248#define MWIFIEX_SUPPORTED_RATES 14
249
250#define MWIFIEX_SUPPORTED_RATES_EXT 32
251
252struct ieee_types_vendor_specific { 257struct ieee_types_vendor_specific {
253 struct ieee_types_vendor_header vend_hdr; 258 struct ieee_types_vendor_header vend_hdr;
254 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)]; 259 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -365,6 +370,12 @@ struct wps {
365 u8 session_enable; 370 u8 session_enable;
366}; 371};
367 372
373struct mwifiex_roc_cfg {
374 u64 cookie;
375 struct ieee80211_channel chan;
376 enum nl80211_channel_type chan_type;
377};
378
368struct mwifiex_adapter; 379struct mwifiex_adapter;
369struct mwifiex_private; 380struct mwifiex_private;
370 381
@@ -431,6 +442,9 @@ struct mwifiex_private {
431 u8 wmm_enabled; 442 u8 wmm_enabled;
432 u8 wmm_qosinfo; 443 u8 wmm_qosinfo;
433 struct mwifiex_wmm_desc wmm; 444 struct mwifiex_wmm_desc wmm;
445 struct list_head sta_list;
446 /* spin lock for associated station list */
447 spinlock_t sta_list_spinlock;
434 struct list_head tx_ba_stream_tbl_ptr; 448 struct list_head tx_ba_stream_tbl_ptr;
435 /* spin lock for tx_ba_stream_tbl_ptr queue */ 449 /* spin lock for tx_ba_stream_tbl_ptr queue */
436 spinlock_t tx_ba_stream_tbl_lock; 450 spinlock_t tx_ba_stream_tbl_lock;
@@ -480,12 +494,16 @@ struct mwifiex_private {
480 s32 cqm_rssi_thold; 494 s32 cqm_rssi_thold;
481 u32 cqm_rssi_hyst; 495 u32 cqm_rssi_hyst;
482 u8 subsc_evt_rssi_state; 496 u8 subsc_evt_rssi_state;
497 struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage;
483 struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX]; 498 struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
484 u16 beacon_idx; 499 u16 beacon_idx;
485 u16 proberesp_idx; 500 u16 proberesp_idx;
486 u16 assocresp_idx; 501 u16 assocresp_idx;
487 u16 rsn_idx; 502 u16 rsn_idx;
488 struct timer_list scan_delay_timer; 503 struct timer_list scan_delay_timer;
504 u8 ap_11n_enabled;
505 u32 mgmt_frame_mask;
506 struct mwifiex_roc_cfg roc_cfg;
489}; 507};
490 508
491enum mwifiex_ba_status { 509enum mwifiex_ba_status {
@@ -517,6 +535,7 @@ struct mwifiex_rx_reorder_tbl {
517 int win_size; 535 int win_size;
518 void **rx_reorder_ptr; 536 void **rx_reorder_ptr;
519 struct reorder_tmr_cnxt timer_context; 537 struct reorder_tmr_cnxt timer_context;
538 u8 flags;
520}; 539};
521 540
522struct mwifiex_bss_prio_node { 541struct mwifiex_bss_prio_node {
@@ -550,6 +569,19 @@ struct mwifiex_bss_priv {
550 u64 fw_tsf; 569 u64 fw_tsf;
551}; 570};
552 571
572/* This is AP specific structure which stores information
573 * about associated STA
574 */
575struct mwifiex_sta_node {
576 struct list_head list;
577 u8 mac_addr[ETH_ALEN];
578 u8 is_wmm_enabled;
579 u8 is_11n_enabled;
580 u8 ampdu_sta[MAX_NUM_TID];
581 u16 rx_seq[MAX_NUM_TID];
582 u16 max_amsdu;
583};
584
553struct mwifiex_if_ops { 585struct mwifiex_if_ops {
554 int (*init_if) (struct mwifiex_adapter *); 586 int (*init_if) (struct mwifiex_adapter *);
555 void (*cleanup_if) (struct mwifiex_adapter *); 587 void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +722,9 @@ struct mwifiex_adapter {
690 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 722 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
691 u16 max_mgmt_ie_index; 723 u16 max_mgmt_ie_index;
692 u8 scan_delay_cnt; 724 u8 scan_delay_cnt;
725 u8 empty_tx_q_cnt;
726 atomic_t is_tx_received;
727 atomic_t pending_bridged_pkts;
693}; 728};
694 729
695int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 730int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -702,6 +737,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
702void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, 737void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
703 struct mwifiex_adapter *adapter); 738 struct mwifiex_adapter *adapter);
704 739
740int mwifiex_init_priv(struct mwifiex_private *priv);
741void mwifiex_free_priv(struct mwifiex_private *priv);
742
705int mwifiex_init_fw(struct mwifiex_adapter *adapter); 743int mwifiex_init_fw(struct mwifiex_adapter *adapter);
706 744
707int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); 745int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -714,6 +752,9 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
714 752
715int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb); 753int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
716 754
755int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
756 struct sk_buff *skb);
757
717int mwifiex_process_event(struct mwifiex_adapter *adapter); 758int mwifiex_process_event(struct mwifiex_adapter *adapter);
718 759
719int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, 760int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
@@ -780,8 +821,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
780 struct host_cmd_ds_command *resp); 821 struct host_cmd_ds_command *resp);
781int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *, 822int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
782 struct sk_buff *skb); 823 struct sk_buff *skb);
824int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
825 struct sk_buff *skb);
826int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
827 struct sk_buff *skb);
783int mwifiex_process_sta_event(struct mwifiex_private *); 828int mwifiex_process_sta_event(struct mwifiex_private *);
829int mwifiex_process_uap_event(struct mwifiex_private *);
830struct mwifiex_sta_node *
831mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
832void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
784void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); 833void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
834void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
785int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); 835int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
786int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, 836int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
787 struct mwifiex_scan_cmd_config *scan_cfg); 837 struct mwifiex_scan_cmd_config *scan_cfg);
@@ -840,6 +890,8 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
840void mwifiex_set_ht_params(struct mwifiex_private *priv, 890void mwifiex_set_ht_params(struct mwifiex_private *priv,
841 struct mwifiex_uap_bss_param *bss_cfg, 891 struct mwifiex_uap_bss_param *bss_cfg,
842 struct cfg80211_ap_settings *params); 892 struct cfg80211_ap_settings *params);
893void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
894 struct cfg80211_ap_settings *params);
843 895
844/* 896/*
845 * This function checks if the queuing is RA based or not. 897 * This function checks if the queuing is RA based or not.
@@ -925,6 +977,14 @@ mwifiex_netdev_get_priv(struct net_device *dev)
925 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev)); 977 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
926} 978}
927 979
980/*
981 * This function checks if a skb holds a management frame.
982 */
983static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
984{
985 return (*(u32 *)skb->data == PKT_TYPE_MGMT);
986}
987
928int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, 988int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
929 u32 func_init_shutdown); 989 u32 func_init_shutdown);
930int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); 990int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -949,14 +1009,21 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
949 const struct mwifiex_user_scan_cfg *user_scan_in); 1009 const struct mwifiex_user_scan_cfg *user_scan_in);
950int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); 1010int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
951 1011
952int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, 1012int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
953 int key_len, u8 key_index, const u8 *mac_addr, 1013 const u8 *key, int key_len, u8 key_index,
954 int disable); 1014 const u8 *mac_addr, int disable);
955 1015
956int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len); 1016int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
957 1017
958int mwifiex_get_ver_ext(struct mwifiex_private *priv); 1018int mwifiex_get_ver_ext(struct mwifiex_private *priv);
959 1019
1020int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1021 struct ieee80211_channel *chan,
1022 enum nl80211_channel_type *channel_type,
1023 unsigned int duration);
1024
1025int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
1026
960int mwifiex_get_stats_info(struct mwifiex_private *priv, 1027int mwifiex_get_stats_info(struct mwifiex_private *priv,
961 struct mwifiex_ds_get_stats *log); 1028 struct mwifiex_ds_get_stats *log);
962 1029
@@ -987,6 +1054,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
987 1054
988int mwifiex_main_process(struct mwifiex_adapter *); 1055int mwifiex_main_process(struct mwifiex_adapter *);
989 1056
1057int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
1058
990int mwifiex_get_bss_info(struct mwifiex_private *, 1059int mwifiex_get_bss_info(struct mwifiex_private *,
991 struct mwifiex_bss_info *); 1060 struct mwifiex_bss_info *);
992int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 1061int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -997,8 +1066,10 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
997int mwifiex_check_network_compatibility(struct mwifiex_private *priv, 1066int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
998 struct mwifiex_bssdescriptor *bss_desc); 1067 struct mwifiex_bssdescriptor *bss_desc);
999 1068
1069u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
1070
1000struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, 1071struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
1001 char *name, 1072 const char *name,
1002 enum nl80211_iftype type, 1073 enum nl80211_iftype type,
1003 u32 *flags, 1074 u32 *flags,
1004 struct vif_params *params); 1075 struct vif_params *params);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 04dc7ca4ac22..e36a75988f87 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -614,9 +614,8 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
614 614
615 /* Increment the TLV header length by the size 615 /* Increment the TLV header length by the size
616 appended */ 616 appended */
617 chan_tlv_out->header.len = 617 le16_add_cpu(&chan_tlv_out->header.len,
618 cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) + 618 sizeof(chan_tlv_out->chan_scan_param));
619 (sizeof(chan_tlv_out->chan_scan_param)));
620 619
621 /* 620 /*
622 * The tlv buffer length is set to the number of bytes 621 * The tlv buffer length is set to the number of bytes
@@ -726,7 +725,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
726 struct mwifiex_ie_types_num_probes *num_probes_tlv; 725 struct mwifiex_ie_types_num_probes *num_probes_tlv;
727 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; 726 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
728 struct mwifiex_ie_types_rates_param_set *rates_tlv; 727 struct mwifiex_ie_types_rates_param_set *rates_tlv;
729 const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
730 u8 *tlv_pos; 728 u8 *tlv_pos;
731 u32 num_probes; 729 u32 num_probes;
732 u32 ssid_len; 730 u32 ssid_len;
@@ -840,8 +838,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
840 * or BSSID filter applied to the scan results in the firmware. 838 * or BSSID filter applied to the scan results in the firmware.
841 */ 839 */
842 if ((i && ssid_filter) || 840 if ((i && ssid_filter) ||
843 memcmp(scan_cfg_out->specific_bssid, &zero_mac, 841 !is_zero_ether_addr(scan_cfg_out->specific_bssid))
844 sizeof(zero_mac)))
845 *filtered_scan = true; 842 *filtered_scan = true;
846 } else { 843 } else {
847 scan_cfg_out->bss_mode = (u8) adapter->scan_mode; 844 scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -989,6 +986,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
989 *max_chan_per_scan = 2; 986 *max_chan_per_scan = 2;
990 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD) 987 else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
991 *max_chan_per_scan = 3; 988 *max_chan_per_scan = 3;
989 else
990 *max_chan_per_scan = 4;
992 } 991 }
993} 992}
994 993
@@ -1433,9 +1432,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
1433 if (ret) 1432 if (ret)
1434 dev_err(priv->adapter->dev, "cannot find ssid " 1433 dev_err(priv->adapter->dev, "cannot find ssid "
1435 "%s\n", bss_desc->ssid.ssid); 1434 "%s\n", bss_desc->ssid.ssid);
1436 break; 1435 break;
1437 default: 1436 default:
1438 ret = 0; 1437 ret = 0;
1439 } 1438 }
1440 } 1439 }
1441 1440
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index df3a33c530cf..5d87195390f8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -551,7 +551,6 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
551 struct host_cmd_tlv_mac_addr *tlv_mac; 551 struct host_cmd_tlv_mac_addr *tlv_mac;
552 u16 key_param_len = 0, cmd_size; 552 u16 key_param_len = 0, cmd_size;
553 int ret = 0; 553 int ret = 0;
554 const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
555 554
556 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL); 555 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
557 key_material->action = cpu_to_le16(cmd_action); 556 key_material->action = cpu_to_le16(cmd_action);
@@ -593,7 +592,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
593 /* set 0 when re-key */ 592 /* set 0 when re-key */
594 key_material->key_param_set.key[1] = 0; 593 key_material->key_param_set.key[1] = 0;
595 594
596 if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) { 595 if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
597 /* WAPI pairwise key: unicast */ 596 /* WAPI pairwise key: unicast */
598 key_material->key_param_set.key_info |= 597 key_material->key_param_set.key_info |=
599 cpu_to_le16(KEY_UNICAST); 598 cpu_to_le16(KEY_UNICAST);
@@ -610,7 +609,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
610 memcpy(&key_material->key_param_set.key[2], 609 memcpy(&key_material->key_param_set.key[2],
611 enc_key->key_material, enc_key->key_len); 610 enc_key->key_material, enc_key->key_len);
612 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len], 611 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
613 enc_key->wapi_rxpn, WAPI_RXPN_LEN); 612 enc_key->pn, PN_LEN);
614 key_material->key_param_set.length = 613 key_material->key_param_set.length =
615 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN); 614 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
616 615
@@ -621,23 +620,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
621 return ret; 620 return ret;
622 } 621 }
623 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) { 622 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
624 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); 623 if (enc_key->is_igtk_key) {
625 key_material->key_param_set.key_type_id = 624 dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
625 key_material->key_param_set.key_type_id =
626 cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
627 if (cmd_oid == KEY_INFO_ENABLED)
628 key_material->key_param_set.key_info =
629 cpu_to_le16(KEY_ENABLED);
630 else
631 key_material->key_param_set.key_info =
632 cpu_to_le16(!KEY_ENABLED);
633
634 key_material->key_param_set.key_info |=
635 cpu_to_le16(KEY_IGTK);
636 } else {
637 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
638 key_material->key_param_set.key_type_id =
626 cpu_to_le16(KEY_TYPE_ID_AES); 639 cpu_to_le16(KEY_TYPE_ID_AES);
627 if (cmd_oid == KEY_INFO_ENABLED) 640 if (cmd_oid == KEY_INFO_ENABLED)
628 key_material->key_param_set.key_info = 641 key_material->key_param_set.key_info =
629 cpu_to_le16(KEY_ENABLED); 642 cpu_to_le16(KEY_ENABLED);
630 else 643 else
631 key_material->key_param_set.key_info = 644 key_material->key_param_set.key_info =
632 cpu_to_le16(!KEY_ENABLED); 645 cpu_to_le16(!KEY_ENABLED);
633 646
634 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST) 647 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
635 /* AES pairwise key: unicast */ 648 /* AES pairwise key: unicast */
636 key_material->key_param_set.key_info |= 649 key_material->key_param_set.key_info |=
637 cpu_to_le16(KEY_UNICAST); 650 cpu_to_le16(KEY_UNICAST);
638 else /* AES group key: multicast */ 651 else /* AES group key: multicast */
639 key_material->key_param_set.key_info |= 652 key_material->key_param_set.key_info |=
640 cpu_to_le16(KEY_MCAST); 653 cpu_to_le16(KEY_MCAST);
654 }
641 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { 655 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
642 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n"); 656 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
643 key_material->key_param_set.key_type_id = 657 key_material->key_param_set.key_type_id =
@@ -668,6 +682,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
668 key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN) 682 key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
669 + sizeof(struct mwifiex_ie_types_header); 683 + sizeof(struct mwifiex_ie_types_header);
670 684
685 if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
686 KEY_TYPE_ID_AES_CMAC) {
687 struct mwifiex_cmac_param *param =
688 (void *)key_material->key_param_set.key;
689
690 memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
691 memcpy(param->key, enc_key->key_material,
692 WLAN_KEY_LEN_AES_CMAC);
693
694 key_param_len = sizeof(struct mwifiex_cmac_param);
695 key_material->key_param_set.key_len =
696 cpu_to_le16(key_param_len);
697 key_param_len += KEYPARAMSET_FIXED_LEN;
698 key_material->key_param_set.length =
699 cpu_to_le16(key_param_len);
700 key_param_len += sizeof(struct mwifiex_ie_types_header);
701 }
702
671 cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN 703 cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
672 + key_param_len); 704 + key_param_len);
673 705
@@ -1135,6 +1167,31 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1135 S_DS_GEN); 1167 S_DS_GEN);
1136 ret = 0; 1168 ret = 0;
1137 break; 1169 break;
1170 case HostCmd_CMD_MGMT_FRAME_REG:
1171 cmd_ptr->command = cpu_to_le16(cmd_no);
1172 cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
1173 cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
1174 cmd_ptr->size =
1175 cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
1176 S_DS_GEN);
1177 ret = 0;
1178 break;
1179 case HostCmd_CMD_REMAIN_ON_CHAN:
1180 cmd_ptr->command = cpu_to_le16(cmd_no);
1181 memcpy(&cmd_ptr->params, data_buf,
1182 sizeof(struct host_cmd_ds_remain_on_chan));
1183 cmd_ptr->size =
1184 cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
1185 S_DS_GEN);
1186 break;
1187 case HostCmd_CMD_P2P_MODE_CFG:
1188 cmd_ptr->command = cpu_to_le16(cmd_no);
1189 cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
1190 cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
1191 cmd_ptr->size =
1192 cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
1193 S_DS_GEN);
1194 break;
1138 case HostCmd_CMD_FUNC_INIT: 1195 case HostCmd_CMD_FUNC_INIT:
1139 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET) 1196 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
1140 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY; 1197 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1204,6 +1261,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1204 else if (priv->bss_mode == NL80211_IFTYPE_STATION) 1261 else if (priv->bss_mode == NL80211_IFTYPE_STATION)
1205 cmd_ptr->params.bss_mode.con_type = 1262 cmd_ptr->params.bss_mode.con_type =
1206 CONNECTION_TYPE_INFRA; 1263 CONNECTION_TYPE_INFRA;
1264 else if (priv->bss_mode == NL80211_IFTYPE_AP)
1265 cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
1207 cmd_ptr->size = cpu_to_le16(sizeof(struct 1266 cmd_ptr->size = cpu_to_le16(sizeof(struct
1208 host_cmd_ds_set_bss_mode) + S_DS_GEN); 1267 host_cmd_ds_set_bss_mode) + S_DS_GEN);
1209 ret = 0; 1268 ret = 0;
@@ -1253,35 +1312,35 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1253 1312
1254 if (first_sta) { 1313 if (first_sta) {
1255 if (priv->adapter->iface_type == MWIFIEX_PCIE) { 1314 if (priv->adapter->iface_type == MWIFIEX_PCIE) {
1256 ret = mwifiex_send_cmd_async(priv, 1315 ret = mwifiex_send_cmd_sync(priv,
1257 HostCmd_CMD_PCIE_DESC_DETAILS, 1316 HostCmd_CMD_PCIE_DESC_DETAILS,
1258 HostCmd_ACT_GEN_SET, 0, NULL); 1317 HostCmd_ACT_GEN_SET, 0, NULL);
1259 if (ret) 1318 if (ret)
1260 return -1; 1319 return -1;
1261 } 1320 }
1262 1321
1263 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT, 1322 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
1264 HostCmd_ACT_GEN_SET, 0, NULL); 1323 HostCmd_ACT_GEN_SET, 0, NULL);
1265 if (ret) 1324 if (ret)
1266 return -1; 1325 return -1;
1267 /* Read MAC address from HW */ 1326 /* Read MAC address from HW */
1268 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC, 1327 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
1269 HostCmd_ACT_GEN_GET, 0, NULL); 1328 HostCmd_ACT_GEN_GET, 0, NULL);
1270 if (ret) 1329 if (ret)
1271 return -1; 1330 return -1;
1272 1331
1273 /* Reconfigure tx buf size */ 1332 /* Reconfigure tx buf size */
1274 ret = mwifiex_send_cmd_async(priv, 1333 ret = mwifiex_send_cmd_sync(priv,
1275 HostCmd_CMD_RECONFIGURE_TX_BUFF, 1334 HostCmd_CMD_RECONFIGURE_TX_BUFF,
1276 HostCmd_ACT_GEN_SET, 0, 1335 HostCmd_ACT_GEN_SET, 0,
1277 &priv->adapter->tx_buf_size); 1336 &priv->adapter->tx_buf_size);
1278 if (ret) 1337 if (ret)
1279 return -1; 1338 return -1;
1280 1339
1281 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1340 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1282 /* Enable IEEE PS by default */ 1341 /* Enable IEEE PS by default */
1283 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; 1342 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1284 ret = mwifiex_send_cmd_async( 1343 ret = mwifiex_send_cmd_sync(
1285 priv, HostCmd_CMD_802_11_PS_MODE_ENH, 1344 priv, HostCmd_CMD_802_11_PS_MODE_ENH,
1286 EN_AUTO_PS, BITMAP_STA_PS, NULL); 1345 EN_AUTO_PS, BITMAP_STA_PS, NULL);
1287 if (ret) 1346 if (ret)
@@ -1290,21 +1349,21 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1290 } 1349 }
1291 1350
1292 /* get tx rate */ 1351 /* get tx rate */
1293 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG, 1352 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
1294 HostCmd_ACT_GEN_GET, 0, NULL); 1353 HostCmd_ACT_GEN_GET, 0, NULL);
1295 if (ret) 1354 if (ret)
1296 return -1; 1355 return -1;
1297 priv->data_rate = 0; 1356 priv->data_rate = 0;
1298 1357
1299 /* get tx power */ 1358 /* get tx power */
1300 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR, 1359 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
1301 HostCmd_ACT_GEN_GET, 0, NULL); 1360 HostCmd_ACT_GEN_GET, 0, NULL);
1302 if (ret) 1361 if (ret)
1303 return -1; 1362 return -1;
1304 1363
1305 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) { 1364 if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
1306 /* set ibss coalescing_status */ 1365 /* set ibss coalescing_status */
1307 ret = mwifiex_send_cmd_async( 1366 ret = mwifiex_send_cmd_sync(
1308 priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, 1367 priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
1309 HostCmd_ACT_GEN_SET, 0, &enable); 1368 HostCmd_ACT_GEN_SET, 0, &enable);
1310 if (ret) 1369 if (ret)
@@ -1314,16 +1373,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1314 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl)); 1373 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
1315 amsdu_aggr_ctrl.enable = true; 1374 amsdu_aggr_ctrl.enable = true;
1316 /* Send request to firmware */ 1375 /* Send request to firmware */
1317 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL, 1376 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
1318 HostCmd_ACT_GEN_SET, 0, 1377 HostCmd_ACT_GEN_SET, 0,
1319 &amsdu_aggr_ctrl); 1378 &amsdu_aggr_ctrl);
1320 if (ret) 1379 if (ret)
1321 return -1; 1380 return -1;
1322 /* MAC Control must be the last command in init_fw */ 1381 /* MAC Control must be the last command in init_fw */
1323 /* set MAC Control */ 1382 /* set MAC Control */
1324 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, 1383 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
1325 HostCmd_ACT_GEN_SET, 0, 1384 HostCmd_ACT_GEN_SET, 0,
1326 &priv->curr_pkt_filter); 1385 &priv->curr_pkt_filter);
1327 if (ret) 1386 if (ret)
1328 return -1; 1387 return -1;
1329 1388
@@ -1332,10 +1391,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1332 /* Enable auto deep sleep */ 1391 /* Enable auto deep sleep */
1333 auto_ds.auto_ds = DEEP_SLEEP_ON; 1392 auto_ds.auto_ds = DEEP_SLEEP_ON;
1334 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; 1393 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
1335 ret = mwifiex_send_cmd_async(priv, 1394 ret = mwifiex_send_cmd_sync(priv,
1336 HostCmd_CMD_802_11_PS_MODE_ENH, 1395 HostCmd_CMD_802_11_PS_MODE_ENH,
1337 EN_AUTO_PS, BITMAP_AUTO_DS, 1396 EN_AUTO_PS, BITMAP_AUTO_DS,
1338 &auto_ds); 1397 &auto_ds);
1339 if (ret) 1398 if (ret)
1340 return -1; 1399 return -1;
1341 } 1400 }
@@ -1343,23 +1402,24 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1343 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 1402 if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
1344 /* Send cmd to FW to enable/disable 11D function */ 1403 /* Send cmd to FW to enable/disable 11D function */
1345 state_11d = ENABLE_11D; 1404 state_11d = ENABLE_11D;
1346 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB, 1405 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
1347 HostCmd_ACT_GEN_SET, DOT11D_I, 1406 HostCmd_ACT_GEN_SET, DOT11D_I,
1348 &state_11d); 1407 &state_11d);
1349 if (ret) 1408 if (ret)
1350 dev_err(priv->adapter->dev, 1409 dev_err(priv->adapter->dev,
1351 "11D: failed to enable 11D\n"); 1410 "11D: failed to enable 11D\n");
1352 } 1411 }
1353 1412
1413 /* set last_init_cmd before sending the command */
1414 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
1415
1354 /* Send cmd to FW to configure 11n specific configuration 1416 /* Send cmd to FW to configure 11n specific configuration
1355 * (Short GI, Channel BW, Green field support etc.) for transmit 1417 * (Short GI, Channel BW, Green field support etc.) for transmit
1356 */ 1418 */
1357 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG; 1419 tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
1358 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG, 1420 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
1359 HostCmd_ACT_GEN_SET, 0, &tx_cfg); 1421 HostCmd_ACT_GEN_SET, 0, &tx_cfg);
1360 1422
1361 /* set last_init_cmd */
1362 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
1363 ret = -EINPROGRESS; 1423 ret = -EINPROGRESS;
1364 1424
1365 return ret; 1425 return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 0b09004ebb25..e380171c4c5d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -123,7 +123,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
123{ 123{
124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp = 124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
125 &resp->params.rssi_info_rsp; 125 &resp->params.rssi_info_rsp;
126 struct mwifiex_ds_misc_subsc_evt subsc_evt; 126 struct mwifiex_ds_misc_subsc_evt *subsc_evt =
127 &priv->async_subsc_evt_storage;
127 128
128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last); 129 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last); 130 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -140,26 +141,27 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
140 if (priv->subsc_evt_rssi_state == EVENT_HANDLED) 141 if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
141 return 0; 142 return 0;
142 143
144 memset(subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
145
143 /* Resubscribe low and high rssi events with new thresholds */ 146 /* Resubscribe low and high rssi events with new thresholds */
144 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt)); 147 subsc_evt->events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
145 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH; 148 subsc_evt->action = HostCmd_ACT_BITWISE_SET;
146 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
147 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) { 149 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
148 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg - 150 subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
149 priv->cqm_rssi_hyst); 151 priv->cqm_rssi_hyst);
150 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); 152 subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
151 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) { 153 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
152 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold); 154 subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
153 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg + 155 subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
154 priv->cqm_rssi_hyst); 156 priv->cqm_rssi_hyst);
155 } 157 }
156 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1; 158 subsc_evt->bcn_l_rssi_cfg.evt_freq = 1;
157 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1; 159 subsc_evt->bcn_h_rssi_cfg.evt_freq = 1;
158 160
159 priv->subsc_evt_rssi_state = EVENT_HANDLED; 161 priv->subsc_evt_rssi_state = EVENT_HANDLED;
160 162
161 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT, 163 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
162 0, 0, &subsc_evt); 164 0, 0, subsc_evt);
163 165
164 return 0; 166 return 0;
165} 167}
@@ -652,6 +654,38 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
652} 654}
653 655
654/* 656/*
657 * This function handles the command response of remain on channel.
658 */
659static int
660mwifiex_ret_remain_on_chan(struct mwifiex_private *priv,
661 struct host_cmd_ds_command *resp,
662 struct host_cmd_ds_remain_on_chan *roc_cfg)
663{
664 struct host_cmd_ds_remain_on_chan *resp_cfg = &resp->params.roc_cfg;
665
666 if (roc_cfg)
667 memcpy(roc_cfg, resp_cfg, sizeof(*roc_cfg));
668
669 return 0;
670}
671
672/*
673 * This function handles the command response of P2P mode cfg.
674 */
675static int
676mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
677 struct host_cmd_ds_command *resp,
678 void *data_buf)
679{
680 struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
681
682 if (data_buf)
683 *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
684
685 return 0;
686}
687
688/*
655 * This function handles the command response of register access. 689 * This function handles the command response of register access.
656 * 690 *
657 * The register value and offset are returned to the user. For EEPROM 691 * The register value and offset are returned to the user. For EEPROM
@@ -736,7 +770,6 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
736{ 770{
737 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp = 771 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
738 &(resp->params.ibss_coalescing); 772 &(resp->params.ibss_coalescing);
739 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
740 773
741 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET) 774 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
742 return 0; 775 return 0;
@@ -745,7 +778,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
745 "info: new BSSID %pM\n", ibss_coal_resp->bssid); 778 "info: new BSSID %pM\n", ibss_coal_resp->bssid);
746 779
747 /* If rsp has NULL BSSID, Just return..... No Action */ 780 /* If rsp has NULL BSSID, Just return..... No Action */
748 if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) { 781 if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
749 dev_warn(priv->adapter->dev, "new BSSID is NULL\n"); 782 dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
750 return 0; 783 return 0;
751 } 784 }
@@ -775,8 +808,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
775 * This function handles the command response for subscribe event command. 808 * This function handles the command response for subscribe event command.
776 */ 809 */
777static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, 810static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
778 struct host_cmd_ds_command *resp, 811 struct host_cmd_ds_command *resp)
779 struct mwifiex_ds_misc_subsc_evt *sub_event)
780{ 812{
781 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event = 813 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
782 &resp->params.subsc_evt; 814 &resp->params.subsc_evt;
@@ -786,10 +818,6 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
786 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n", 818 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
787 le16_to_cpu(cmd_sub_event->events)); 819 le16_to_cpu(cmd_sub_event->events));
788 820
789 /*Return the subscribed event info for a Get request*/
790 if (sub_event)
791 sub_event->events = le16_to_cpu(cmd_sub_event->events);
792
793 return 0; 821 return 0;
794} 822}
795 823
@@ -879,6 +907,13 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
879 case HostCmd_CMD_VERSION_EXT: 907 case HostCmd_CMD_VERSION_EXT:
880 ret = mwifiex_ret_ver_ext(priv, resp, data_buf); 908 ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
881 break; 909 break;
910 case HostCmd_CMD_REMAIN_ON_CHAN:
911 ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
912 break;
913 case HostCmd_CMD_P2P_MODE_CFG:
914 ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
915 break;
916 case HostCmd_CMD_MGMT_FRAME_REG:
882 case HostCmd_CMD_FUNC_INIT: 917 case HostCmd_CMD_FUNC_INIT:
883 case HostCmd_CMD_FUNC_SHUTDOWN: 918 case HostCmd_CMD_FUNC_SHUTDOWN:
884 break; 919 break;
@@ -913,7 +948,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
913 le16_to_cpu(resp->params.tx_buf.mp_end_port)); 948 le16_to_cpu(resp->params.tx_buf.mp_end_port));
914 break; 949 break;
915 case HostCmd_CMD_AMSDU_AGGR_CTRL: 950 case HostCmd_CMD_AMSDU_AGGR_CTRL:
916 ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
917 break; 951 break;
918 case HostCmd_CMD_WMM_GET_STATUS: 952 case HostCmd_CMD_WMM_GET_STATUS:
919 ret = mwifiex_ret_wmm_get_status(priv, resp); 953 ret = mwifiex_ret_wmm_get_status(priv, resp);
@@ -932,12 +966,11 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
932 case HostCmd_CMD_SET_BSS_MODE: 966 case HostCmd_CMD_SET_BSS_MODE:
933 break; 967 break;
934 case HostCmd_CMD_11N_CFG: 968 case HostCmd_CMD_11N_CFG:
935 ret = mwifiex_ret_11n_cfg(resp, data_buf);
936 break; 969 break;
937 case HostCmd_CMD_PCIE_DESC_DETAILS: 970 case HostCmd_CMD_PCIE_DESC_DETAILS:
938 break; 971 break;
939 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: 972 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
940 ret = mwifiex_ret_subsc_evt(priv, resp, data_buf); 973 ret = mwifiex_ret_subsc_evt(priv, resp);
941 break; 974 break;
942 case HostCmd_CMD_UAP_SYS_CONFIG: 975 case HostCmd_CMD_UAP_SYS_CONFIG:
943 break; 976 break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8614a825460..aafde30e714a 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
184int mwifiex_process_sta_event(struct mwifiex_private *priv) 184int mwifiex_process_sta_event(struct mwifiex_private *priv)
185{ 185{
186 struct mwifiex_adapter *adapter = priv->adapter; 186 struct mwifiex_adapter *adapter = priv->adapter;
187 int len, ret = 0; 187 int ret = 0;
188 u32 eventcause = adapter->event_cause; 188 u32 eventcause = adapter->event_cause;
189 struct station_info sinfo; 189 u16 ctrl;
190 struct mwifiex_assoc_event *event;
191 190
192 switch (eventcause) { 191 switch (eventcause) {
193 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: 192 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
279 278
280 case EVENT_MIC_ERR_UNICAST: 279 case EVENT_MIC_ERR_UNICAST:
281 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); 280 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
281 cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
282 NL80211_KEYTYPE_PAIRWISE,
283 -1, NULL, GFP_KERNEL);
282 break; 284 break;
283 285
284 case EVENT_MIC_ERR_MULTICAST: 286 case EVENT_MIC_ERR_MULTICAST:
285 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); 287 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
288 cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
289 NL80211_KEYTYPE_GROUP,
290 -1, NULL, GFP_KERNEL);
286 break; 291 break;
287 case EVENT_MIB_CHANGED: 292 case EVENT_MIB_CHANGED:
288 case EVENT_INIT_DONE: 293 case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
384 adapter->event_body); 389 adapter->event_body);
385 break; 390 break;
386 case EVENT_AMSDU_AGGR_CTRL: 391 case EVENT_AMSDU_AGGR_CTRL:
387 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", 392 ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
388 *(u16 *) adapter->event_body); 393 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
394
389 adapter->tx_buf_size = 395 adapter->tx_buf_size =
390 min(adapter->curr_tx_buf_size, 396 min_t(u16, adapter->curr_tx_buf_size, ctrl);
391 le16_to_cpu(*(__le16 *) adapter->event_body));
392 dev_dbg(adapter->dev, "event: tx_buf_size %d\n", 397 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
393 adapter->tx_buf_size); 398 adapter->tx_buf_size);
394 break; 399 break;
@@ -405,51 +410,18 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
405 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); 410 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
406 break; 411 break;
407 412
408 case EVENT_UAP_STA_ASSOC: 413 case EVENT_REMAIN_ON_CHAN_EXPIRED:
409 memset(&sinfo, 0, sizeof(sinfo)); 414 dev_dbg(adapter->dev, "event: Remain on channel expired\n");
410 event = (struct mwifiex_assoc_event *) 415 cfg80211_remain_on_channel_expired(priv->wdev,
411 (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); 416 priv->roc_cfg.cookie,
412 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { 417 &priv->roc_cfg.chan,
413 len = -1; 418 priv->roc_cfg.chan_type,
414 419 GFP_ATOMIC);
415 if (ieee80211_is_assoc_req(event->frame_control)) 420
416 len = 0; 421 memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
417 else if (ieee80211_is_reassoc_req(event->frame_control)) 422
418 /* There will be ETH_ALEN bytes of
419 * current_ap_addr before the re-assoc ies.
420 */
421 len = ETH_ALEN;
422
423 if (len != -1) {
424 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
425 sinfo.assoc_req_ies = &event->data[len];
426 len = (u8 *)sinfo.assoc_req_ies -
427 (u8 *)&event->frame_control;
428 sinfo.assoc_req_ies_len =
429 le16_to_cpu(event->len) - (u16)len;
430 }
431 }
432 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
433 GFP_KERNEL);
434 break;
435 case EVENT_UAP_STA_DEAUTH:
436 cfg80211_del_sta(priv->netdev, adapter->event_body +
437 MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
438 break;
439 case EVENT_UAP_BSS_IDLE:
440 priv->media_connected = false;
441 break;
442 case EVENT_UAP_BSS_ACTIVE:
443 priv->media_connected = true;
444 break;
445 case EVENT_UAP_BSS_START:
446 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
447 memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
448 break;
449 case EVENT_UAP_MIC_COUNTERMEASURES:
450 /* For future development */
451 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
452 break; 423 break;
424
453 default: 425 default:
454 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 426 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
455 eventcause); 427 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index fb2136089a22..0c9f70b2cbe6 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -26,6 +26,9 @@
26#include "11n.h" 26#include "11n.h"
27#include "cfg80211.h" 27#include "cfg80211.h"
28 28
29static int disconnect_on_suspend = 1;
30module_param(disconnect_on_suspend, int, 0644);
31
29/* 32/*
30 * Copies the multicast address list from device to driver. 33 * Copies the multicast address list from device to driver.
31 * 34 *
@@ -192,6 +195,44 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
192 return ret; 195 return ret;
193} 196}
194 197
198static int mwifiex_process_country_ie(struct mwifiex_private *priv,
199 struct cfg80211_bss *bss)
200{
201 u8 *country_ie, country_ie_len;
202 struct mwifiex_802_11d_domain_reg *domain_info =
203 &priv->adapter->domain_reg;
204
205 country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
206
207 if (!country_ie)
208 return 0;
209
210 country_ie_len = country_ie[1];
211 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
212 return 0;
213
214 domain_info->country_code[0] = country_ie[2];
215 domain_info->country_code[1] = country_ie[3];
216 domain_info->country_code[2] = ' ';
217
218 country_ie_len -= IEEE80211_COUNTRY_STRING_LEN;
219
220 domain_info->no_of_triplet =
221 country_ie_len / sizeof(struct ieee80211_country_ie_triplet);
222
223 memcpy((u8 *)domain_info->triplet,
224 &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
225
226 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
227 HostCmd_ACT_GEN_SET, 0, NULL)) {
228 wiphy_err(priv->adapter->wiphy,
229 "11D: setting domain info in FW\n");
230 return -1;
231 }
232
233 return 0;
234}
235
195/* 236/*
196 * In Ad-Hoc mode, the IBSS is created if not found in scan list. 237 * In Ad-Hoc mode, the IBSS is created if not found in scan list.
197 * In both Ad-Hoc and infra mode, an deauthentication is performed 238 * In both Ad-Hoc and infra mode, an deauthentication is performed
@@ -207,6 +248,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
207 priv->scan_block = false; 248 priv->scan_block = false;
208 249
209 if (bss) { 250 if (bss) {
251 mwifiex_process_country_ie(priv, bss);
252
210 /* Allocate and fill new bss descriptor */ 253 /* Allocate and fill new bss descriptor */
211 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), 254 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
212 GFP_KERNEL); 255 GFP_KERNEL);
@@ -408,6 +451,16 @@ EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
408int mwifiex_enable_hs(struct mwifiex_adapter *adapter) 451int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
409{ 452{
410 struct mwifiex_ds_hs_cfg hscfg; 453 struct mwifiex_ds_hs_cfg hscfg;
454 struct mwifiex_private *priv;
455 int i;
456
457 if (disconnect_on_suspend) {
458 for (i = 0; i < adapter->priv_num; i++) {
459 priv = adapter->priv[i];
460 if (priv)
461 mwifiex_deauthenticate(priv, NULL);
462 }
463 }
411 464
412 if (adapter->hs_activated) { 465 if (adapter->hs_activated) {
413 dev_dbg(adapter->dev, "cmd: HS Already actived\n"); 466 dev_dbg(adapter->dev, "cmd: HS Already actived\n");
@@ -942,20 +995,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
942 * This function allocates the IOCTL request buffer, fills it 995 * This function allocates the IOCTL request buffer, fills it
943 * with requisite parameters and calls the IOCTL handler. 996 * with requisite parameters and calls the IOCTL handler.
944 */ 997 */
945int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, 998int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
946 int key_len, u8 key_index, 999 const u8 *key, int key_len, u8 key_index,
947 const u8 *mac_addr, int disable) 1000 const u8 *mac_addr, int disable)
948{ 1001{
949 struct mwifiex_ds_encrypt_key encrypt_key; 1002 struct mwifiex_ds_encrypt_key encrypt_key;
950 1003
951 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); 1004 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
952 encrypt_key.key_len = key_len; 1005 encrypt_key.key_len = key_len;
1006
1007 if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
1008 encrypt_key.is_igtk_key = true;
1009
953 if (!disable) { 1010 if (!disable) {
954 encrypt_key.key_index = key_index; 1011 encrypt_key.key_index = key_index;
955 if (key_len) 1012 if (key_len)
956 memcpy(encrypt_key.key_material, key, key_len); 1013 memcpy(encrypt_key.key_material, key, key_len);
957 if (mac_addr) 1014 if (mac_addr)
958 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); 1015 memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
1016 if (kp && kp->seq && kp->seq_len)
1017 memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
959 } else { 1018 } else {
960 encrypt_key.key_disable = true; 1019 encrypt_key.key_disable = true;
961 if (mac_addr) 1020 if (mac_addr)
@@ -984,6 +1043,65 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
984 return 0; 1043 return 0;
985} 1044}
986 1045
1046int
1047mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1048 struct ieee80211_channel *chan,
1049 enum nl80211_channel_type *ct,
1050 unsigned int duration)
1051{
1052 struct host_cmd_ds_remain_on_chan roc_cfg;
1053 u8 sc;
1054
1055 memset(&roc_cfg, 0, sizeof(roc_cfg));
1056 roc_cfg.action = cpu_to_le16(action);
1057 if (action == HostCmd_ACT_GEN_SET) {
1058 roc_cfg.band_cfg = chan->band;
1059 sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
1060 roc_cfg.band_cfg |= (sc << 2);
1061
1062 roc_cfg.channel =
1063 ieee80211_frequency_to_channel(chan->center_freq);
1064 roc_cfg.duration = cpu_to_le32(duration);
1065 }
1066 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
1067 action, 0, &roc_cfg)) {
1068 dev_err(priv->adapter->dev, "failed to remain on channel\n");
1069 return -1;
1070 }
1071
1072 return roc_cfg.status;
1073}
1074
1075int
1076mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
1077{
1078 if (GET_BSS_ROLE(priv) == bss_role) {
1079 dev_dbg(priv->adapter->dev,
1080 "info: already in the desired role.\n");
1081 return 0;
1082 }
1083
1084 mwifiex_free_priv(priv);
1085 mwifiex_init_priv(priv);
1086
1087 priv->bss_role = bss_role;
1088 switch (bss_role) {
1089 case MWIFIEX_BSS_ROLE_UAP:
1090 priv->bss_mode = NL80211_IFTYPE_AP;
1091 break;
1092 case MWIFIEX_BSS_ROLE_STA:
1093 case MWIFIEX_BSS_ROLE_ANY:
1094 default:
1095 priv->bss_mode = NL80211_IFTYPE_STATION;
1096 break;
1097 }
1098
1099 mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
1100 HostCmd_ACT_GEN_SET, 0, NULL);
1101
1102 return mwifiex_sta_init_cmd(priv, false);
1103}
1104
987/* 1105/*
988 * Sends IOCTL request to get statistics information. 1106 * Sends IOCTL request to get statistics information.
989 * 1107 *
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 02ce3b77d3e7..07d32b73783e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
54 54
55 local_rx_pd = (struct rxpd *) (skb->data); 55 local_rx_pd = (struct rxpd *) (skb->data);
56 56
57 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + 57 rx_pkt_hdr = (void *)local_rx_pd +
58 local_rx_pd->rx_pkt_offset); 58 le16_to_cpu(local_rx_pd->rx_pkt_offset);
59 59
60 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, 60 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
61 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { 61 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
125 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); 125 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
126 struct rx_packet_hdr *rx_pkt_hdr; 126 struct rx_packet_hdr *rx_pkt_hdr;
127 u8 ta[ETH_ALEN]; 127 u8 ta[ETH_ALEN];
128 u16 rx_pkt_type; 128 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
129 struct mwifiex_private *priv = 129 struct mwifiex_private *priv =
130 mwifiex_get_priv_by_id(adapter, rx_info->bss_num, 130 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
131 rx_info->bss_type); 131 rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
134 return -1; 134 return -1;
135 135
136 local_rx_pd = (struct rxpd *) (skb->data); 136 local_rx_pd = (struct rxpd *) (skb->data);
137 rx_pkt_type = local_rx_pd->rx_pkt_type; 137 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
138 rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
139 rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
140 seq_num = le16_to_cpu(local_rx_pd->seq_num);
138 141
139 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + 142 rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
140 local_rx_pd->rx_pkt_offset);
141 143
142 if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) > 144 if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
143 (u16) skb->len) { 145 dev_err(adapter->dev,
144 dev_err(adapter->dev, "wrong rx packet: len=%d," 146 "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, 147 skb->len, rx_pkt_offset, rx_pkt_length);
146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
147 priv->stats.rx_dropped++; 148 priv->stats.rx_dropped++;
148 149
149 if (adapter->if_ops.data_complete) 150 if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
154 return ret; 155 return ret;
155 } 156 }
156 157
157 if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) { 158 if (rx_pkt_type == PKT_TYPE_AMSDU) {
158 struct sk_buff_head list; 159 struct sk_buff_head list;
159 struct sk_buff *rx_skb; 160 struct sk_buff *rx_skb;
160 161
161 __skb_queue_head_init(&list); 162 __skb_queue_head_init(&list);
162 163
163 skb_pull(skb, local_rx_pd->rx_pkt_offset); 164 skb_pull(skb, rx_pkt_offset);
164 skb_trim(skb, local_rx_pd->rx_pkt_length); 165 skb_trim(skb, rx_pkt_length);
165 166
166 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, 167 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
167 priv->wdev->iftype, 0, false); 168 priv->wdev->iftype, 0, false);
@@ -173,6 +174,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
173 dev_err(adapter->dev, "Rx of A-MSDU failed"); 174 dev_err(adapter->dev, "Rx of A-MSDU failed");
174 } 175 }
175 return 0; 176 return 0;
177 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
178 ret = mwifiex_process_mgmt_packet(adapter, skb);
179 if (ret)
180 dev_err(adapter->dev, "Rx of mgmt packet failed");
181 dev_kfree_skb_any(skb);
182 return ret;
176 } 183 }
177 184
178 /* 185 /*
@@ -189,17 +196,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
189 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); 196 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
190 } else { 197 } else {
191 if (rx_pkt_type != PKT_TYPE_BAR) 198 if (rx_pkt_type != PKT_TYPE_BAR)
192 priv->rx_seq[local_rx_pd->priority] = 199 priv->rx_seq[local_rx_pd->priority] = seq_num;
193 local_rx_pd->seq_num;
194 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address, 200 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
195 ETH_ALEN); 201 ETH_ALEN);
196 } 202 }
197 203
198 /* Reorder and send to OS */ 204 /* Reorder and send to OS */
199 ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num, 205 ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
200 local_rx_pd->priority, ta, 206 ta, (u8) rx_pkt_type, skb);
201 (u8) local_rx_pd->rx_pkt_type,
202 skb);
203 207
204 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { 208 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
205 if (adapter->if_ops.data_complete) 209 if (adapter->if_ops.data_complete)
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 0a046d3a0c16..7b581af24f5f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -48,6 +48,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
48 struct txpd *local_tx_pd; 48 struct txpd *local_tx_pd;
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
50 u8 pad; 50 u8 pad;
51 u16 pkt_type, pkt_offset;
51 52
52 if (!skb->len) { 53 if (!skb->len) {
53 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); 54 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -55,6 +56,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
55 return skb->data; 56 return skb->data;
56 } 57 }
57 58
59 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
60
58 /* If skb->data is not aligned; add padding */ 61 /* If skb->data is not aligned; add padding */
59 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4; 62 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
60 63
@@ -93,7 +96,14 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
93 } 96 }
94 97
95 /* Offset of actual data */ 98 /* Offset of actual data */
96 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad); 99 pkt_offset = sizeof(struct txpd) + pad;
100 if (pkt_type == PKT_TYPE_MGMT) {
101 /* Set the packet type and add header for management frame */
102 local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type);
103 pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
104 }
105
106 local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
97 107
98 /* make space for INTF_HEADER_LEN */ 108 /* make space for INTF_HEADER_LEN */
99 skb_push(skb, INTF_HEADER_LEN); 109 skb_push(skb, INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index cecb27283196..2af263992e83 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
51 rx_info->bss_num = priv->bss_num; 51 rx_info->bss_num = priv->bss_num;
52 rx_info->bss_type = priv->bss_type; 52 rx_info->bss_type = priv->bss_type;
53 53
54 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
55 return mwifiex_process_uap_rx_packet(adapter, skb);
56
54 return mwifiex_process_sta_rx_packet(adapter, skb); 57 return mwifiex_process_sta_rx_packet(adapter, skb);
55} 58}
56EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); 59EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -72,7 +75,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
72 u8 *head_ptr; 75 u8 *head_ptr;
73 struct txpd *local_tx_pd = NULL; 76 struct txpd *local_tx_pd = NULL;
74 77
75 head_ptr = mwifiex_process_sta_txpd(priv, skb); 78 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
79 head_ptr = mwifiex_process_uap_txpd(priv, skb);
80 else
81 head_ptr = mwifiex_process_sta_txpd(priv, skb);
82
76 if (head_ptr) { 83 if (head_ptr) {
77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 84 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
78 local_tx_pd = 85 local_tx_pd =
@@ -157,6 +164,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
157 priv->stats.tx_errors++; 164 priv->stats.tx_errors++;
158 } 165 }
159 166
167 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
168 atomic_dec_return(&adapter->pending_bridged_pkts);
160 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING) 169 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
161 goto done; 170 goto done;
162 171
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f40e93fe894a..d95a2d558fcf 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
167 if (ht_ie) { 167 if (ht_ie) {
168 memcpy(&bss_cfg->ht_cap, ht_ie + 2, 168 memcpy(&bss_cfg->ht_cap, ht_ie + 2,
169 sizeof(struct ieee80211_ht_cap)); 169 sizeof(struct ieee80211_ht_cap));
170 priv->ap_11n_enabled = 1;
170 } else { 171 } else {
171 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap)); 172 memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
172 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP); 173 bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
@@ -176,6 +177,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
176 return; 177 return;
177} 178}
178 179
180/* This function finds supported rates IE from beacon parameter and sets
181 * these rates into bss_config structure.
182 */
183void
184mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
185 struct cfg80211_ap_settings *params)
186{
187 struct ieee_types_header *rate_ie;
188 int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
189 const u8 *var_pos = params->beacon.head + var_offset;
190 int len = params->beacon.head_len - var_offset;
191
192 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
193 if (rate_ie)
194 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
195
196 return;
197}
198
179/* This function initializes some of mwifiex_uap_bss_param variables. 199/* This function initializes some of mwifiex_uap_bss_param variables.
180 * This helps FW in ignoring invalid values. These values may or may not 200 * This helps FW in ignoring invalid values. These values may or may not
181 * be get updated to valid ones at later stage. 201 * be get updated to valid ones at later stage.
@@ -322,8 +342,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
322 struct host_cmd_tlv_retry_limit *retry_limit; 342 struct host_cmd_tlv_retry_limit *retry_limit;
323 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol; 343 struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
324 struct host_cmd_tlv_auth_type *auth_type; 344 struct host_cmd_tlv_auth_type *auth_type;
345 struct host_cmd_tlv_rates *tlv_rates;
346 struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
325 struct mwifiex_ie_types_htcap *htcap; 347 struct mwifiex_ie_types_htcap *htcap;
326 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf; 348 struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
349 int i;
327 u16 cmd_size = *param_size; 350 u16 cmd_size = *param_size;
328 351
329 if (bss_cfg->ssid.ssid_len) { 352 if (bss_cfg->ssid.ssid_len) {
@@ -343,7 +366,23 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
343 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); 366 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
344 tlv += sizeof(struct host_cmd_tlv_bcast_ssid); 367 tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
345 } 368 }
346 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 369 if (bss_cfg->rates[0]) {
370 tlv_rates = (struct host_cmd_tlv_rates *)tlv;
371 tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
372
373 for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
374 i++)
375 tlv_rates->rates[i] = bss_cfg->rates[i];
376
377 tlv_rates->tlv.len = cpu_to_le16(i);
378 cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
379 tlv += sizeof(struct host_cmd_tlv_rates) + i;
380 }
381 if (bss_cfg->channel &&
382 ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
383 bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
384 (bss_cfg->band_cfg == BAND_CONFIG_A &&
385 bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
347 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 386 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
348 chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); 387 chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
349 chan_band->tlv.len = 388 chan_band->tlv.len =
@@ -459,6 +498,27 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
459 tlv += sizeof(struct mwifiex_ie_types_htcap); 498 tlv += sizeof(struct mwifiex_ie_types_htcap);
460 } 499 }
461 500
501 if (bss_cfg->sta_ao_timer) {
502 ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
503 ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
504 ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
505 sizeof(struct host_cmd_tlv));
506 ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
507 cmd_size += sizeof(*ao_timer);
508 tlv += sizeof(*ao_timer);
509 }
510
511 if (bss_cfg->ps_sta_ao_timer) {
512 ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
513 ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
514 ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
515 sizeof(struct host_cmd_tlv));
516 ps_ao_timer->sta_ao_timer =
517 cpu_to_le32(bss_cfg->ps_sta_ao_timer);
518 cmd_size += sizeof(*ps_ao_timer);
519 tlv += sizeof(*ps_ao_timer);
520 }
521
462 *param_size = cmd_size; 522 *param_size = cmd_size;
463 523
464 return 0; 524 return 0;
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644
index 000000000000..a33fa394e349
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -0,0 +1,290 @@
1/*
2 * Marvell Wireless LAN device driver: AP event handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "main.h"
22#include "11n.h"
23
24/*
25 * This function will return the pointer to station entry in station list
26 * table which matches specified mac address.
27 * This function should be called after acquiring RA list spinlock.
28 * NULL is returned if station entry is not found in associated STA list.
29 */
30struct mwifiex_sta_node *
31mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
32{
33 struct mwifiex_sta_node *node;
34
35 if (!mac)
36 return NULL;
37
38 list_for_each_entry(node, &priv->sta_list, list) {
39 if (!memcmp(node->mac_addr, mac, ETH_ALEN))
40 return node;
41 }
42
43 return NULL;
44}
45
46/*
47 * This function will add a sta_node entry to associated station list
48 * table with the given mac address.
49 * If entry exist already, existing entry is returned.
50 * If received mac address is NULL, NULL is returned.
51 */
52static struct mwifiex_sta_node *
53mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
54{
55 struct mwifiex_sta_node *node;
56 unsigned long flags;
57
58 if (!mac)
59 return NULL;
60
61 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
62 node = mwifiex_get_sta_entry(priv, mac);
63 if (node)
64 goto done;
65
66 node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
67 if (!node)
68 goto done;
69
70 memcpy(node->mac_addr, mac, ETH_ALEN);
71 list_add_tail(&node->list, &priv->sta_list);
72
73done:
74 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
75 return node;
76}
77
78/*
79 * This function will search for HT IE in association request IEs
80 * and set station HT parameters accordingly.
81 */
82static void
83mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
84 int ies_len, struct mwifiex_sta_node *node)
85{
86 const struct ieee80211_ht_cap *ht_cap;
87
88 if (!ies)
89 return;
90
91 ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
92 if (ht_cap) {
93 node->is_11n_enabled = 1;
94 node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
95 IEEE80211_HT_CAP_MAX_AMSDU ?
96 MWIFIEX_TX_DATA_BUF_SIZE_8K :
97 MWIFIEX_TX_DATA_BUF_SIZE_4K;
98 } else {
99 node->is_11n_enabled = 0;
100 }
101
102 return;
103}
104
105/*
106 * This function will delete a station entry from station list
107 */
108static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
109{
110 struct mwifiex_sta_node *node, *tmp;
111 unsigned long flags;
112
113 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
114
115 node = mwifiex_get_sta_entry(priv, mac);
116 if (node) {
117 list_for_each_entry_safe(node, tmp, &priv->sta_list,
118 list) {
119 list_del(&node->list);
120 kfree(node);
121 }
122 }
123
124 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
125 return;
126}
127
128/*
129 * This function will delete all stations from associated station list.
130 */
131static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
132{
133 struct mwifiex_sta_node *node, *tmp;
134 unsigned long flags;
135
136 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
137
138 list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
139 list_del(&node->list);
140 kfree(node);
141 }
142
143 INIT_LIST_HEAD(&priv->sta_list);
144 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
145 return;
146}
147
148/*
149 * This function handles AP interface specific events generated by firmware.
150 *
151 * Event specific routines are called by this function based
152 * upon the generated event cause.
153 *
154 *
155 * Events supported for AP -
156 * - EVENT_UAP_STA_ASSOC
157 * - EVENT_UAP_STA_DEAUTH
158 * - EVENT_UAP_BSS_ACTIVE
159 * - EVENT_UAP_BSS_START
160 * - EVENT_UAP_BSS_IDLE
161 * - EVENT_UAP_MIC_COUNTERMEASURES:
162 */
163int mwifiex_process_uap_event(struct mwifiex_private *priv)
164{
165 struct mwifiex_adapter *adapter = priv->adapter;
166 int len, i;
167 u32 eventcause = adapter->event_cause;
168 struct station_info sinfo;
169 struct mwifiex_assoc_event *event;
170 struct mwifiex_sta_node *node;
171 u8 *deauth_mac;
172 struct host_cmd_ds_11n_batimeout *ba_timeout;
173 u16 ctrl;
174
175 switch (eventcause) {
176 case EVENT_UAP_STA_ASSOC:
177 memset(&sinfo, 0, sizeof(sinfo));
178 event = (struct mwifiex_assoc_event *)
179 (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
180 if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
181 len = -1;
182
183 if (ieee80211_is_assoc_req(event->frame_control))
184 len = 0;
185 else if (ieee80211_is_reassoc_req(event->frame_control))
186 /* There will be ETH_ALEN bytes of
187 * current_ap_addr before the re-assoc ies.
188 */
189 len = ETH_ALEN;
190
191 if (len != -1) {
192 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
193 sinfo.assoc_req_ies = &event->data[len];
194 len = (u8 *)sinfo.assoc_req_ies -
195 (u8 *)&event->frame_control;
196 sinfo.assoc_req_ies_len =
197 le16_to_cpu(event->len) - (u16)len;
198 }
199 }
200 cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
201 GFP_KERNEL);
202
203 node = mwifiex_add_sta_entry(priv, event->sta_addr);
204 if (!node) {
205 dev_warn(adapter->dev,
206 "could not create station entry!\n");
207 return -1;
208 }
209
210 if (!priv->ap_11n_enabled)
211 break;
212
213 mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
214 sinfo.assoc_req_ies_len, node);
215
216 for (i = 0; i < MAX_NUM_TID; i++) {
217 if (node->is_11n_enabled)
218 node->ampdu_sta[i] =
219 priv->aggr_prio_tbl[i].ampdu_user;
220 else
221 node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
222 }
223 memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
224 break;
225 case EVENT_UAP_STA_DEAUTH:
226 deauth_mac = adapter->event_body +
227 MWIFIEX_UAP_EVENT_EXTRA_HEADER;
228 cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
229
230 if (priv->ap_11n_enabled) {
231 mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
232 mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
233 }
234 mwifiex_del_sta_entry(priv, deauth_mac);
235 break;
236 case EVENT_UAP_BSS_IDLE:
237 priv->media_connected = false;
238 mwifiex_clean_txrx(priv);
239 mwifiex_del_all_sta_list(priv);
240 break;
241 case EVENT_UAP_BSS_ACTIVE:
242 priv->media_connected = true;
243 break;
244 case EVENT_UAP_BSS_START:
245 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
246 memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
247 ETH_ALEN);
248 break;
249 case EVENT_UAP_MIC_COUNTERMEASURES:
250 /* For future development */
251 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
252 break;
253 case EVENT_AMSDU_AGGR_CTRL:
254 ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
255 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
256
257 if (priv->media_connected) {
258 adapter->tx_buf_size =
259 min_t(u16, adapter->curr_tx_buf_size, ctrl);
260 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
261 adapter->tx_buf_size);
262 }
263 break;
264 case EVENT_ADDBA:
265 dev_dbg(adapter->dev, "event: ADDBA Request\n");
266 if (priv->media_connected)
267 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
268 HostCmd_ACT_GEN_SET, 0,
269 adapter->event_body);
270 break;
271 case EVENT_DELBA:
272 dev_dbg(adapter->dev, "event: DELBA Request\n");
273 if (priv->media_connected)
274 mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
275 break;
276 case EVENT_BA_STREAM_TIEMOUT:
277 dev_dbg(adapter->dev, "event: BA Stream timeout\n");
278 if (priv->media_connected) {
279 ba_timeout = (void *)adapter->event_body;
280 mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
281 }
282 break;
283 default:
284 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
285 eventcause);
286 break;
287 }
288
289 return 0;
290}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644
index 000000000000..0966ac24b3b4
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -0,0 +1,340 @@
1/*
2 * Marvell Wireless LAN device driver: AP TX and RX data handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "main.h"
23#include "wmm.h"
24#include "11n_aggr.h"
25#include "11n_rxreorder.h"
26
27static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
28 struct sk_buff *skb)
29{
30 struct mwifiex_adapter *adapter = priv->adapter;
31 struct uap_rxpd *uap_rx_pd;
32 struct rx_packet_hdr *rx_pkt_hdr;
33 struct sk_buff *new_skb;
34 struct mwifiex_txinfo *tx_info;
35 int hdr_chop;
36 struct timeval tv;
37 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
38
39 uap_rx_pd = (struct uap_rxpd *)(skb->data);
40 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
41
42 if ((atomic_read(&adapter->pending_bridged_pkts) >=
43 MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
44 dev_err(priv->adapter->dev,
45 "Tx: Bridge packet limit reached. Drop packet!\n");
46 kfree_skb(skb);
47 return;
48 }
49
50 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
51 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
52 /* Chop off the rxpd + the excess memory from
53 * 802.2/llc/snap header that was removed.
54 */
55 hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
56 else
57 /* Chop off the rxpd */
58 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
59
60 /* Chop off the leading header bytes so the it points
61 * to the start of either the reconstructed EthII frame
62 * or the 802.2/llc/snap frame.
63 */
64 skb_pull(skb, hdr_chop);
65
66 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
67 dev_dbg(priv->adapter->dev,
68 "data: Tx: insufficient skb headroom %d\n",
69 skb_headroom(skb));
70 /* Insufficient skb headroom - allocate a new skb */
71 new_skb =
72 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
73 if (unlikely(!new_skb)) {
74 dev_err(priv->adapter->dev,
75 "Tx: cannot allocate new_skb\n");
76 kfree_skb(skb);
77 priv->stats.tx_dropped++;
78 return;
79 }
80
81 kfree_skb(skb);
82 skb = new_skb;
83 dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
84 skb_headroom(skb));
85 }
86
87 tx_info = MWIFIEX_SKB_TXCB(skb);
88 tx_info->bss_num = priv->bss_num;
89 tx_info->bss_type = priv->bss_type;
90 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
91
92 do_gettimeofday(&tv);
93 skb->tstamp = timeval_to_ktime(tv);
94 mwifiex_wmm_add_buf_txqueue(priv, skb);
95 atomic_inc(&adapter->tx_pending);
96 atomic_inc(&adapter->pending_bridged_pkts);
97
98 if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
99 mwifiex_set_trans_start(priv->netdev);
100 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
101 }
102 return;
103}
104
105/*
106 * This function contains logic for AP packet forwarding.
107 *
108 * If a packet is multicast/broadcast, it is sent to kernel/upper layer
109 * as well as queued back to AP TX queue so that it can be sent to other
110 * associated stations.
111 * If a packet is unicast and RA is present in associated station list,
112 * it is again requeued into AP TX queue.
113 * If a packet is unicast and RA is not in associated station list,
114 * packet is forwarded to kernel to handle routing logic.
115 */
116int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
117 struct sk_buff *skb)
118{
119 struct mwifiex_adapter *adapter = priv->adapter;
120 struct uap_rxpd *uap_rx_pd;
121 struct rx_packet_hdr *rx_pkt_hdr;
122 u8 ra[ETH_ALEN];
123 struct sk_buff *skb_uap;
124
125 uap_rx_pd = (struct uap_rxpd *)(skb->data);
126 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
127
128 /* don't do packet forwarding in disconnected state */
129 if (!priv->media_connected) {
130 dev_err(adapter->dev, "drop packet in disconnected state.\n");
131 dev_kfree_skb_any(skb);
132 return 0;
133 }
134
135 memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
136
137 if (is_multicast_ether_addr(ra)) {
138 skb_uap = skb_copy(skb, GFP_ATOMIC);
139 mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
140 } else {
141 if (mwifiex_get_sta_entry(priv, ra)) {
142 /* Requeue Intra-BSS packet */
143 mwifiex_uap_queue_bridged_pkt(priv, skb);
144 return 0;
145 }
146 }
147
148 /* Forward unicat/Inter-BSS packets to kernel. */
149 return mwifiex_process_rx_packet(adapter, skb);
150}
151
152/*
153 * This function processes the packet received on AP interface.
154 *
155 * The function looks into the RxPD and performs sanity tests on the
156 * received buffer to ensure its a valid packet before processing it
157 * further. If the packet is determined to be aggregated, it is
158 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
159 *
160 * The completion callback is called after processing is complete.
161 */
162int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
163 struct sk_buff *skb)
164{
165 int ret;
166 struct uap_rxpd *uap_rx_pd;
167 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
168 struct rx_packet_hdr *rx_pkt_hdr;
169 u16 rx_pkt_type;
170 u8 ta[ETH_ALEN], pkt_type;
171 struct mwifiex_sta_node *node;
172
173 struct mwifiex_private *priv =
174 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
175 rx_info->bss_type);
176
177 if (!priv)
178 return -1;
179
180 uap_rx_pd = (struct uap_rxpd *)(skb->data);
181 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
182 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
183
184 if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
185 le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
186 dev_err(adapter->dev,
187 "wrong rx packet: len=%d, offset=%d, length=%d\n",
188 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
189 le16_to_cpu(uap_rx_pd->rx_pkt_length));
190 priv->stats.rx_dropped++;
191
192 if (adapter->if_ops.data_complete)
193 adapter->if_ops.data_complete(adapter, skb);
194 else
195 dev_kfree_skb_any(skb);
196
197 return 0;
198 }
199
200 if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
201 struct sk_buff_head list;
202 struct sk_buff *rx_skb;
203
204 __skb_queue_head_init(&list);
205 skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
206 skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
207
208 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
209 priv->wdev->iftype, 0, false);
210
211 while (!skb_queue_empty(&list)) {
212 rx_skb = __skb_dequeue(&list);
213 ret = mwifiex_recv_packet(adapter, rx_skb);
214 if (ret)
215 dev_err(adapter->dev,
216 "AP:Rx A-MSDU failed");
217 }
218
219 return 0;
220 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
221 ret = mwifiex_process_mgmt_packet(adapter, skb);
222 if (ret)
223 dev_err(adapter->dev, "Rx of mgmt packet failed");
224 dev_kfree_skb_any(skb);
225 return ret;
226 }
227
228 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
229
230 if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
231 node = mwifiex_get_sta_entry(priv, ta);
232 if (node)
233 node->rx_seq[uap_rx_pd->priority] =
234 le16_to_cpu(uap_rx_pd->seq_num);
235 }
236
237 if (!priv->ap_11n_enabled ||
238 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
239 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
240 ret = mwifiex_handle_uap_rx_forward(priv, skb);
241 return ret;
242 }
243
244 /* Reorder and send to kernel */
245 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
246 ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
247 uap_rx_pd->priority, ta, pkt_type,
248 skb);
249
250 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
251 if (adapter->if_ops.data_complete)
252 adapter->if_ops.data_complete(adapter, skb);
253 else
254 dev_kfree_skb_any(skb);
255 }
256
257 if (ret)
258 priv->stats.rx_dropped++;
259
260 return ret;
261}
262
263/*
264 * This function fills the TxPD for AP tx packets.
265 *
266 * The Tx buffer received by this function should already have the
267 * header space allocated for TxPD.
268 *
269 * This function inserts the TxPD in between interface header and actual
270 * data and adjusts the buffer pointers accordingly.
271 *
272 * The following TxPD fields are set by this function, as required -
273 * - BSS number
274 * - Tx packet length and offset
275 * - Priority
276 * - Packet delay
277 * - Priority specific Tx control
278 * - Flags
279 */
280void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
281 struct sk_buff *skb)
282{
283 struct mwifiex_adapter *adapter = priv->adapter;
284 struct uap_txpd *txpd;
285 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
286 int pad, len;
287 u16 pkt_type;
288
289 if (!skb->len) {
290 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
291 tx_info->status_code = -1;
292 return skb->data;
293 }
294
295 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
296
297 /* If skb->data is not aligned, add padding */
298 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
299
300 len = sizeof(*txpd) + pad;
301
302 BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
303
304 skb_push(skb, len);
305
306 txpd = (struct uap_txpd *)skb->data;
307 memset(txpd, 0, sizeof(*txpd));
308 txpd->bss_num = priv->bss_num;
309 txpd->bss_type = priv->bss_type;
310 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
311
312 txpd->priority = (u8)skb->priority;
313 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
314
315 if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
316 /*
317 * Set the priority specific tx_control field, setting of 0 will
318 * cause the default value to be used later in this function.
319 */
320 txpd->tx_control =
321 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
322
323 /* Offset of actual data */
324 if (pkt_type == PKT_TYPE_MGMT) {
325 /* Set the packet type and add header for management frame */
326 txpd->tx_pkt_type = cpu_to_le16(pkt_type);
327 len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
328 }
329
330 txpd->tx_pkt_offset = cpu_to_le16(len);
331
332 /* make space for INTF_HEADER_LEN */
333 skb_push(skb, INTF_HEADER_LEN);
334
335 if (!txpd->tx_control)
336 /* TxCtrl set by user or default */
337 txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
338
339 return skb->data;
340}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 2864c74bdb6f..ae88f80cf86b 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -142,6 +142,46 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
142} 142}
143 143
144/* 144/*
145 * This function processes the received management packet and send it
146 * to the kernel.
147 */
148int
149mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
150 struct sk_buff *skb)
151{
152 struct rxpd *rx_pd;
153 struct mwifiex_private *priv;
154 u16 pkt_len;
155
156 if (!skb)
157 return -1;
158
159 rx_pd = (struct rxpd *)skb->data;
160 priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
161 if (!priv)
162 return -1;
163
164 skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
165 skb_pull(skb, sizeof(pkt_len));
166
167 pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
168
169 /* Remove address4 */
170 memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
171 skb->data + sizeof(struct ieee80211_hdr),
172 pkt_len - sizeof(struct ieee80211_hdr));
173
174 pkt_len -= ETH_ALEN + sizeof(pkt_len);
175 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
176
177 cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
178 CAL_RSSI(rx_pd->snr, rx_pd->nf),
179 skb->data, pkt_len, GFP_ATOMIC);
180
181 return 0;
182}
183
184/*
145 * This function processes the received packet before sending it to the 185 * This function processes the received packet before sending it to the
146 * kernel. 186 * kernel.
147 * 187 *
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 3fa4d4176993..600d8194610e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
127 return ra_list; 127 return ra_list;
128} 128}
129 129
130/* This function returns random no between 16 and 32 to be used as threshold
131 * for no of packets after which BA setup is initiated.
132 */
133static u8 mwifiex_get_random_ba_threshold(void)
134{
135 u32 sec, usec;
136 struct timeval ba_tstamp;
137 u8 ba_threshold;
138
139 /* setup ba_packet_threshold here random number between
140 * [BA_SETUP_PACKET_OFFSET,
141 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
142 */
143
144 do_gettimeofday(&ba_tstamp);
145 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
146 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
147 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
148 + BA_SETUP_PACKET_OFFSET;
149
150 return ba_threshold;
151}
152
130/* 153/*
131 * This function allocates and adds a RA list for all TIDs 154 * This function allocates and adds a RA list for all TIDs
132 * with the given RA. 155 * with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
137 int i; 160 int i;
138 struct mwifiex_ra_list_tbl *ra_list; 161 struct mwifiex_ra_list_tbl *ra_list;
139 struct mwifiex_adapter *adapter = priv->adapter; 162 struct mwifiex_adapter *adapter = priv->adapter;
163 struct mwifiex_sta_node *node;
164 unsigned long flags;
165
166 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
167 node = mwifiex_get_sta_entry(priv, ra);
168 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
140 169
141 for (i = 0; i < MAX_NUM_TID; ++i) { 170 for (i = 0; i < MAX_NUM_TID; ++i) {
142 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); 171 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
145 if (!ra_list) 174 if (!ra_list)
146 break; 175 break;
147 176
148 if (!mwifiex_queuing_ra_based(priv)) 177 ra_list->is_11n_enabled = 0;
178 if (!mwifiex_queuing_ra_based(priv)) {
149 ra_list->is_11n_enabled = IS_11N_ENABLED(priv); 179 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
150 else 180 } else {
151 ra_list->is_11n_enabled = false; 181 ra_list->is_11n_enabled =
182 mwifiex_is_sta_11n_enabled(priv, node);
183 if (ra_list->is_11n_enabled)
184 ra_list->max_amsdu = node->max_amsdu;
185 }
152 186
153 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", 187 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
154 ra_list, ra_list->is_11n_enabled); 188 ra_list, ra_list->is_11n_enabled);
155 189
190 if (ra_list->is_11n_enabled) {
191 ra_list->pkt_count = 0;
192 ra_list->ba_packet_thr =
193 mwifiex_get_random_ba_threshold();
194 }
156 list_add_tail(&ra_list->list, 195 list_add_tail(&ra_list->list,
157 &priv->wmm.tid_tbl_ptr[i].ra_list); 196 &priv->wmm.tid_tbl_ptr[i].ra_list);
158 197
@@ -423,7 +462,7 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
423 for (i = 0; i < adapter->priv_num; ++i) { 462 for (i = 0; i < adapter->priv_num; ++i) {
424 priv = adapter->priv[i]; 463 priv = adapter->priv[i];
425 if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) 464 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
426 return false; 465 return false;
427 } 466 }
428 467
429 return true; 468 return true;
@@ -609,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
609 u8 ra[ETH_ALEN], tid_down; 648 u8 ra[ETH_ALEN], tid_down;
610 unsigned long flags; 649 unsigned long flags;
611 650
612 if (!priv->media_connected) { 651 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
613 dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); 652 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
614 mwifiex_write_data_complete(adapter, skb, -1); 653 mwifiex_write_data_complete(adapter, skb, -1);
615 return; 654 return;
@@ -624,7 +663,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
624 /* In case of infra as we have already created the list during 663 /* In case of infra as we have already created the list during
625 association we just don't have to call get_queue_raptr, we will 664 association we just don't have to call get_queue_raptr, we will
626 have only 1 raptr for a tid in case of infra */ 665 have only 1 raptr for a tid in case of infra */
627 if (!mwifiex_queuing_ra_based(priv)) { 666 if (!mwifiex_queuing_ra_based(priv) &&
667 !mwifiex_is_skb_mgmt_frame(skb)) {
628 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list)) 668 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
629 ra_list = list_first_entry( 669 ra_list = list_first_entry(
630 &priv->wmm.tid_tbl_ptr[tid_down].ra_list, 670 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
@@ -633,7 +673,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
633 ra_list = NULL; 673 ra_list = NULL;
634 } else { 674 } else {
635 memcpy(ra, skb->data, ETH_ALEN); 675 memcpy(ra, skb->data, ETH_ALEN);
636 if (ra[0] & 0x01) 676 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
637 memset(ra, 0xff, ETH_ALEN); 677 memset(ra, 0xff, ETH_ALEN);
638 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); 678 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
639 } 679 }
@@ -647,6 +687,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
647 skb_queue_tail(&ra_list->skb_head, skb); 687 skb_queue_tail(&ra_list->skb_head, skb);
648 688
649 ra_list->total_pkts_size += skb->len; 689 ra_list->total_pkts_size += skb->len;
690 ra_list->pkt_count++;
650 691
651 atomic_inc(&priv->wmm.tx_pkts_queued); 692 atomic_inc(&priv->wmm.tx_pkts_queued);
652 693
@@ -867,17 +908,16 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
867 if (adapter->bss_prio_tbl[j].bss_prio_cur == 908 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
868 (struct mwifiex_bss_prio_node *) 909 (struct mwifiex_bss_prio_node *)
869 &adapter->bss_prio_tbl[j].bss_prio_head) { 910 &adapter->bss_prio_tbl[j].bss_prio_head) {
870 bssprio_node = 911 adapter->bss_prio_tbl[j].bss_prio_cur =
871 list_first_entry(&adapter->bss_prio_tbl[j] 912 list_first_entry(&adapter->bss_prio_tbl[j]
872 .bss_prio_head, 913 .bss_prio_head,
873 struct mwifiex_bss_prio_node, 914 struct mwifiex_bss_prio_node,
874 list); 915 list);
875 bssprio_head = bssprio_node;
876 } else {
877 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
878 bssprio_head = bssprio_node;
879 } 916 }
880 917
918 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
919 bssprio_head = bssprio_node;
920
881 do { 921 do {
882 priv_tmp = bssprio_node->priv; 922 priv_tmp = bssprio_node->priv;
883 hqp = &priv_tmp->wmm.highest_queued_prio; 923 hqp = &priv_tmp->wmm.highest_queued_prio;
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
986{ 1026{
987 int count = 0, total_size = 0; 1027 int count = 0, total_size = 0;
988 struct sk_buff *skb, *tmp; 1028 struct sk_buff *skb, *tmp;
1029 int max_amsdu_size;
1030
1031 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1032 ptr->is_11n_enabled)
1033 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1034 else
1035 max_amsdu_size = max_buf_size;
989 1036
990 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) { 1037 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
991 total_size += skb->len; 1038 total_size += skb->len;
992 if (total_size >= max_buf_size) 1039 if (total_size >= max_amsdu_size)
993 break; 1040 break;
994 if (++count >= MIN_NUM_AMSDU) 1041 if (++count >= MIN_NUM_AMSDU)
995 return true; 1042 return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1050 skb_queue_tail(&ptr->skb_head, skb); 1097 skb_queue_tail(&ptr->skb_head, skb);
1051 1098
1052 ptr->total_pkts_size += skb->len; 1099 ptr->total_pkts_size += skb->len;
1100 ptr->pkt_count++;
1053 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 1101 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1054 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1102 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1055 ra_list_flags); 1103 ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1231 /* ra_list_spinlock has been freed in 1279 /* ra_list_spinlock has been freed in
1232 mwifiex_send_single_packet() */ 1280 mwifiex_send_single_packet() */
1233 } else { 1281 } else {
1234 if (mwifiex_is_ampdu_allowed(priv, tid)) { 1282 if (mwifiex_is_ampdu_allowed(priv, tid) &&
1283 ptr->pkt_count > ptr->ba_packet_thr) {
1235 if (mwifiex_space_avail_for_new_ba_stream(adapter)) { 1284 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1236 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1285 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1237 BA_SETUP_INPROGRESS); 1286 BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224e03ade145..5099e5375cb3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
1830} 1830}
1831 1831
1832static void 1832static void
1833mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1833mwl8k_txq_xmit(struct ieee80211_hw *hw,
1834 int index,
1835 struct ieee80211_sta *sta,
1836 struct sk_buff *skb)
1834{ 1837{
1835 struct mwl8k_priv *priv = hw->priv; 1838 struct mwl8k_priv *priv = hw->priv;
1836 struct ieee80211_tx_info *tx_info; 1839 struct ieee80211_tx_info *tx_info;
1837 struct mwl8k_vif *mwl8k_vif; 1840 struct mwl8k_vif *mwl8k_vif;
1838 struct ieee80211_sta *sta;
1839 struct ieee80211_hdr *wh; 1841 struct ieee80211_hdr *wh;
1840 struct mwl8k_tx_queue *txq; 1842 struct mwl8k_tx_queue *txq;
1841 struct mwl8k_tx_desc *tx; 1843 struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1867 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1869 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1868 1870
1869 tx_info = IEEE80211_SKB_CB(skb); 1871 tx_info = IEEE80211_SKB_CB(skb);
1870 sta = tx_info->control.sta;
1871 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1872 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1872 1873
1873 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1874 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
2019 tx->pkt_phys_addr = cpu_to_le32(dma); 2020 tx->pkt_phys_addr = cpu_to_le32(dma);
2020 tx->pkt_len = cpu_to_le16(skb->len); 2021 tx->pkt_len = cpu_to_le16(skb->len);
2021 tx->rate_info = 0; 2022 tx->rate_info = 0;
2022 if (!priv->ap_fw && tx_info->control.sta != NULL) 2023 if (!priv->ap_fw && sta != NULL)
2023 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; 2024 tx->peer_id = MWL8K_STA(sta)->peer_id;
2024 else 2025 else
2025 tx->peer_id = 0; 2026 tx->peer_id = 0;
2026 2027
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
4364/* 4365/*
4365 * Core driver operations. 4366 * Core driver operations.
4366 */ 4367 */
4367static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 4368static void mwl8k_tx(struct ieee80211_hw *hw,
4369 struct ieee80211_tx_control *control,
4370 struct sk_buff *skb)
4368{ 4371{
4369 struct mwl8k_priv *priv = hw->priv; 4372 struct mwl8k_priv *priv = hw->priv;
4370 int index = skb_get_queue_mapping(skb); 4373 int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
4376 return; 4379 return;
4377 } 4380 }
4378 4381
4379 mwl8k_txq_xmit(hw, index, skb); 4382 mwl8k_txq_xmit(hw, index, control->sta, skb);
4380} 4383}
4381 4384
4382static int mwl8k_start(struct ieee80211_hw *hw) 4385static int mwl8k_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 33747e131a96..3b5508f982e8 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -7,6 +7,7 @@
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/wireless.h> 8#include <linux/wireless.h>
9#include <linux/ieee80211.h> 9#include <linux/ieee80211.h>
10#include <linux/etherdevice.h>
10#include <net/iw_handler.h> 11#include <net/iw_handler.h>
11#include <net/cfg80211.h> 12#include <net/cfg80211.h>
12#include <net/cfg80211-wext.h> 13#include <net/cfg80211-wext.h>
@@ -159,15 +160,13 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
159 struct orinoco_private *priv = ndev_priv(dev); 160 struct orinoco_private *priv = ndev_priv(dev);
160 int err = -EINPROGRESS; /* Call commit handler */ 161 int err = -EINPROGRESS; /* Call commit handler */
161 unsigned long flags; 162 unsigned long flags;
162 static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
163 static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
164 163
165 if (orinoco_lock(priv, &flags) != 0) 164 if (orinoco_lock(priv, &flags) != 0)
166 return -EBUSY; 165 return -EBUSY;
167 166
168 /* Enable automatic roaming - no sanity checks are needed */ 167 /* Enable automatic roaming - no sanity checks are needed */
169 if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 || 168 if (is_zero_ether_addr(ap_addr->sa_data) ||
170 memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) { 169 is_broadcast_ether_addr(ap_addr->sa_data)) {
171 priv->bssid_fixed = 0; 170 priv->bssid_fixed = 0;
172 memset(priv->desired_bssid, 0, ETH_ALEN); 171 memset(priv->desired_bssid, 0, ETH_ALEN);
173 172
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 14037092ba89..1ef1bfe6a9d7 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -76,6 +76,7 @@ struct p54_channel_entry {
76 u16 freq; 76 u16 freq;
77 u16 data; 77 u16 data;
78 int index; 78 int index;
79 int max_power;
79 enum ieee80211_band band; 80 enum ieee80211_band band;
80}; 81};
81 82
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
173 for (i = 0, j = 0; (j < list->band_channel_num[band]) && 174 for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
174 (i < list->entries); i++) { 175 (i < list->entries); i++) {
175 struct p54_channel_entry *chan = &list->channels[i]; 176 struct p54_channel_entry *chan = &list->channels[i];
177 struct ieee80211_channel *dest = &tmp->channels[j];
176 178
177 if (chan->band != band) 179 if (chan->band != band)
178 continue; 180 continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
190 continue; 192 continue;
191 } 193 }
192 194
193 tmp->channels[j].band = chan->band; 195 dest->band = chan->band;
194 tmp->channels[j].center_freq = chan->freq; 196 dest->center_freq = chan->freq;
197 dest->max_power = chan->max_power;
195 priv->survey[*chan_num].channel = &tmp->channels[j]; 198 priv->survey[*chan_num].channel = &tmp->channels[j];
196 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | 199 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
197 SURVEY_INFO_CHANNEL_TIME | 200 SURVEY_INFO_CHANNEL_TIME |
198 SURVEY_INFO_CHANNEL_TIME_BUSY | 201 SURVEY_INFO_CHANNEL_TIME_BUSY |
199 SURVEY_INFO_CHANNEL_TIME_TX; 202 SURVEY_INFO_CHANNEL_TIME_TX;
200 tmp->channels[j].hw_value = (*chan_num); 203 dest->hw_value = (*chan_num);
201 j++; 204 j++;
202 (*chan_num)++; 205 (*chan_num)++;
203 } 206 }
@@ -229,10 +232,11 @@ err_out:
229 return ret; 232 return ret;
230} 233}
231 234
232static void p54_update_channel_param(struct p54_channel_list *list, 235static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
233 u16 freq, u16 data) 236 u16 freq, u16 data)
234{ 237{
235 int band, i; 238 int i;
239 struct p54_channel_entry *entry = NULL;
236 240
237 /* 241 /*
238 * usually all lists in the eeprom are mostly sorted. 242 * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
241 */ 245 */
242 for (i = list->entries; i >= 0; i--) { 246 for (i = list->entries; i >= 0; i--) {
243 if (freq == list->channels[i].freq) { 247 if (freq == list->channels[i].freq) {
244 list->channels[i].data |= data; 248 entry = &list->channels[i];
245 break; 249 break;
246 } 250 }
247 } 251 }
248 252
249 if ((i < 0) && (list->entries < list->max_entries)) { 253 if ((i < 0) && (list->entries < list->max_entries)) {
250 /* entry does not exist yet. Initialize a new one. */ 254 /* entry does not exist yet. Initialize a new one. */
251 band = p54_get_band_from_freq(freq); 255 int band = p54_get_band_from_freq(freq);
252 256
253 /* 257 /*
254 * filter out frequencies which don't belong into 258 * filter out frequencies which don't belong into
255 * any supported band. 259 * any supported band.
256 */ 260 */
257 if (band < 0) 261 if (band >= 0) {
258 return ; 262 i = list->entries++;
263 list->band_channel_num[band]++;
264
265 entry = &list->channels[i];
266 entry->freq = freq;
267 entry->band = band;
268 entry->index = ieee80211_frequency_to_channel(freq);
269 entry->max_power = 0;
270 entry->data = 0;
271 }
272 }
259 273
260 i = list->entries++; 274 if (entry)
261 list->band_channel_num[band]++; 275 entry->data |= data;
262 276
263 list->channels[i].freq = freq; 277 return entry;
264 list->channels[i].data = data; 278}
265 list->channels[i].band = band; 279
266 list->channels[i].index = ieee80211_frequency_to_channel(freq); 280static int p54_get_maxpower(struct p54_common *priv, void *data)
267 /* TODO: parse output_limit and fill max_power */ 281{
282 switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
283 case PDR_SYNTH_FRONTEND_LONGBOW: {
284 struct pda_channel_output_limit_longbow *pda = data;
285 int j;
286 u16 rawpower = 0;
287 pda = data;
288 for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
289 struct pda_channel_output_limit_point_longbow *point =
290 &pda->point[j];
291 rawpower = max_t(u16,
292 rawpower, le16_to_cpu(point->val_qpsk));
293 rawpower = max_t(u16,
294 rawpower, le16_to_cpu(point->val_bpsk));
295 rawpower = max_t(u16,
296 rawpower, le16_to_cpu(point->val_16qam));
297 rawpower = max_t(u16,
298 rawpower, le16_to_cpu(point->val_64qam));
299 }
300 /* longbow seems to use 1/16 dBm units */
301 return rawpower / 16;
302 }
303
304 case PDR_SYNTH_FRONTEND_DUETTE3:
305 case PDR_SYNTH_FRONTEND_DUETTE2:
306 case PDR_SYNTH_FRONTEND_FRISBEE:
307 case PDR_SYNTH_FRONTEND_XBOW: {
308 struct pda_channel_output_limit *pda = data;
309 u8 rawpower = 0;
310 rawpower = max(rawpower, pda->val_qpsk);
311 rawpower = max(rawpower, pda->val_bpsk);
312 rawpower = max(rawpower, pda->val_16qam);
313 rawpower = max(rawpower, pda->val_64qam);
314 /* raw values are in 1/4 dBm units */
315 return rawpower / 4;
316 }
317
318 default:
319 return 20;
268 } 320 }
269} 321}
270 322
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
315 } 367 }
316 368
317 if (i < priv->output_limit->entries) { 369 if (i < priv->output_limit->entries) {
318 freq = le16_to_cpup((__le16 *) (i * 370 struct p54_channel_entry *tmp;
319 priv->output_limit->entry_size + 371
320 priv->output_limit->offset + 372 void *data = (void *) ((unsigned long) i *
321 priv->output_limit->data)); 373 priv->output_limit->entry_size +
322 374 priv->output_limit->offset +
323 p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); 375 priv->output_limit->data);
376
377 freq = le16_to_cpup((__le16 *) data);
378 tmp = p54_update_channel_param(list, freq,
379 CHAN_HAS_LIMIT);
380 if (tmp) {
381 tmp->max_power = p54_get_maxpower(priv, data);
382 }
324 } 383 }
325 384
326 if (i < priv->curve_data->entries) { 385 if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
834 goto err; 893 goto err;
835 } 894 }
836 895
896 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
897
837 err = p54_generate_channel_lists(dev); 898 err = p54_generate_channel_lists(dev);
838 if (err) 899 if (err)
839 goto err; 900 goto err;
840 901
841 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
842 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) 902 if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
843 p54_init_xbow_synth(priv); 903 p54_init_xbow_synth(priv);
844 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) 904 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index afde72b84606..20ebe39a3f4e 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
57 u8 rate_set_size; 57 u8 rate_set_size;
58} __packed; 58} __packed;
59 59
60struct pda_channel_output_limit_point_longbow {
61 __le16 val_bpsk;
62 __le16 val_qpsk;
63 __le16 val_16qam;
64 __le16 val_64qam;
65} __packed;
66
67struct pda_channel_output_limit_longbow {
68 __le16 freq;
69 struct pda_channel_output_limit_point_longbow point[3];
70} __packed;
71
60struct pda_pa_curve_data_sample_rev0 { 72struct pda_pa_curve_data_sample_rev0 {
61 u8 rf_power; 73 u8 rf_power;
62 u8 pa_detector; 74 u8 pa_detector;
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 3d8d622bec55..de1d46bf97df 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
526void p54_unregister_leds(struct p54_common *priv); 526void p54_unregister_leds(struct p54_common *priv);
527 527
528/* xmit functions */ 528/* xmit functions */
529void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); 529void p54_tx_80211(struct ieee80211_hw *dev,
530 struct ieee80211_tx_control *control,
531 struct sk_buff *skb);
530int p54_tx_cancel(struct p54_common *priv, __le32 req_id); 532int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
531void p54_tx(struct p54_common *priv, struct sk_buff *skb); 533void p54_tx(struct p54_common *priv, struct sk_buff *skb);
532 534
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7cffea795ad2..aadda99989c0 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -139,6 +139,7 @@ static int p54_beacon_format_ie_tim(struct sk_buff *skb)
139static int p54_beacon_update(struct p54_common *priv, 139static int p54_beacon_update(struct p54_common *priv,
140 struct ieee80211_vif *vif) 140 struct ieee80211_vif *vif)
141{ 141{
142 struct ieee80211_tx_control control = { };
142 struct sk_buff *beacon; 143 struct sk_buff *beacon;
143 int ret; 144 int ret;
144 145
@@ -158,7 +159,7 @@ static int p54_beacon_update(struct p54_common *priv,
158 * to cancel the old beacon template by hand, instead the firmware 159 * to cancel the old beacon template by hand, instead the firmware
159 * will release the previous one through the feedback mechanism. 160 * will release the previous one through the feedback mechanism.
160 */ 161 */
161 p54_tx_80211(priv->hw, beacon); 162 p54_tx_80211(priv->hw, &control, beacon);
162 priv->tsf_high32 = 0; 163 priv->tsf_high32 = 0;
163 priv->tsf_low32 = 0; 164 priv->tsf_low32 = 0;
164 165
@@ -514,6 +515,17 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
514 if (modparam_nohwcrypt) 515 if (modparam_nohwcrypt)
515 return -EOPNOTSUPP; 516 return -EOPNOTSUPP;
516 517
518 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
519 /*
520 * Unfortunately most/all firmwares are trying to decrypt
521 * incoming management frames if a suitable key can be found.
522 * However, in doing so the data in these frames gets
523 * corrupted. So, we can't have firmware supported crypto
524 * offload in this case.
525 */
526 return -EOPNOTSUPP;
527 }
528
517 mutex_lock(&priv->conf_mutex); 529 mutex_lock(&priv->conf_mutex);
518 if (cmd == SET_KEY) { 530 if (cmd == SET_KEY) {
519 switch (key->cipher) { 531 switch (key->cipher) {
@@ -737,6 +749,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
737 IEEE80211_HW_SIGNAL_DBM | 749 IEEE80211_HW_SIGNAL_DBM |
738 IEEE80211_HW_SUPPORTS_PS | 750 IEEE80211_HW_SUPPORTS_PS |
739 IEEE80211_HW_PS_NULLFUNC_STACK | 751 IEEE80211_HW_PS_NULLFUNC_STACK |
752 IEEE80211_HW_MFP_CAPABLE |
740 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 753 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
741 754
742 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 755 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 89318adc8c7f..b4390797d78c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
488 return 0; 488 return 0;
489} 489}
490 490
491static void p54p_firmware_step2(const struct firmware *fw,
492 void *context)
493{
494 struct p54p_priv *priv = context;
495 struct ieee80211_hw *dev = priv->common.hw;
496 struct pci_dev *pdev = priv->pdev;
497 int err;
498
499 if (!fw) {
500 dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
501 err = -ENOENT;
502 goto out;
503 }
504
505 priv->firmware = fw;
506
507 err = p54p_open(dev);
508 if (err)
509 goto out;
510 err = p54_read_eeprom(dev);
511 p54p_stop(dev);
512 if (err)
513 goto out;
514
515 err = p54_register_common(dev, &pdev->dev);
516 if (err)
517 goto out;
518
519out:
520
521 complete(&priv->fw_loaded);
522
523 if (err) {
524 struct device *parent = pdev->dev.parent;
525
526 if (parent)
527 device_lock(parent);
528
529 /*
530 * This will indirectly result in a call to p54p_remove.
531 * Hence, we don't need to bother with freeing any
532 * allocated ressources at all.
533 */
534 device_release_driver(&pdev->dev);
535
536 if (parent)
537 device_unlock(parent);
538 }
539
540 pci_dev_put(pdev);
541}
542
491static int __devinit p54p_probe(struct pci_dev *pdev, 543static int __devinit p54p_probe(struct pci_dev *pdev,
492 const struct pci_device_id *id) 544 const struct pci_device_id *id)
493{ 545{
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
496 unsigned long mem_addr, mem_len; 548 unsigned long mem_addr, mem_len;
497 int err; 549 int err;
498 550
551 pci_dev_get(pdev);
499 err = pci_enable_device(pdev); 552 err = pci_enable_device(pdev);
500 if (err) { 553 if (err) {
501 dev_err(&pdev->dev, "Cannot enable new PCI device\n"); 554 dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
537 priv = dev->priv; 590 priv = dev->priv;
538 priv->pdev = pdev; 591 priv->pdev = pdev;
539 592
593 init_completion(&priv->fw_loaded);
540 SET_IEEE80211_DEV(dev, &pdev->dev); 594 SET_IEEE80211_DEV(dev, &pdev->dev);
541 pci_set_drvdata(pdev, dev); 595 pci_set_drvdata(pdev, dev);
542 596
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
561 spin_lock_init(&priv->lock); 615 spin_lock_init(&priv->lock);
562 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); 616 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
563 617
564 err = request_firmware(&priv->firmware, "isl3886pci", 618 err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
565 &priv->pdev->dev); 619 &priv->pdev->dev, GFP_KERNEL,
566 if (err) { 620 priv, p54p_firmware_step2);
567 dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); 621 if (!err)
568 err = request_firmware(&priv->firmware, "isl3886", 622 return 0;
569 &priv->pdev->dev);
570 if (err)
571 goto err_free_common;
572 }
573
574 err = p54p_open(dev);
575 if (err)
576 goto err_free_common;
577 err = p54_read_eeprom(dev);
578 p54p_stop(dev);
579 if (err)
580 goto err_free_common;
581
582 err = p54_register_common(dev, &pdev->dev);
583 if (err)
584 goto err_free_common;
585
586 return 0;
587 623
588 err_free_common:
589 release_firmware(priv->firmware);
590 pci_free_consistent(pdev, sizeof(*priv->ring_control), 624 pci_free_consistent(pdev, sizeof(*priv->ring_control),
591 priv->ring_control, priv->ring_control_dma); 625 priv->ring_control, priv->ring_control_dma);
592 626
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
601 pci_release_regions(pdev); 635 pci_release_regions(pdev);
602 err_disable_dev: 636 err_disable_dev:
603 pci_disable_device(pdev); 637 pci_disable_device(pdev);
638 pci_dev_put(pdev);
604 return err; 639 return err;
605} 640}
606 641
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
612 if (!dev) 647 if (!dev)
613 return; 648 return;
614 649
615 p54_unregister_common(dev);
616 priv = dev->priv; 650 priv = dev->priv;
651 wait_for_completion(&priv->fw_loaded);
652 p54_unregister_common(dev);
617 release_firmware(priv->firmware); 653 release_firmware(priv->firmware);
618 pci_free_consistent(pdev, sizeof(*priv->ring_control), 654 pci_free_consistent(pdev, sizeof(*priv->ring_control),
619 priv->ring_control, priv->ring_control_dma); 655 priv->ring_control, priv->ring_control_dma);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 7aa509f7e387..68405c142f97 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -105,6 +105,7 @@ struct p54p_priv {
105 struct sk_buff *tx_buf_data[32]; 105 struct sk_buff *tx_buf_data[32];
106 struct sk_buff *tx_buf_mgmt[4]; 106 struct sk_buff *tx_buf_mgmt[4];
107 struct completion boot_comp; 107 struct completion boot_comp;
108 struct completion fw_loaded;
108}; 109};
109 110
110#endif /* P54USB_H */ 111#endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f38786e02623..5861e13a6fd8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
676EXPORT_SYMBOL_GPL(p54_rx); 676EXPORT_SYMBOL_GPL(p54_rx);
677 677
678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, 678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
679 struct ieee80211_tx_info *info, u8 *queue, 679 struct ieee80211_tx_info *info,
680 u32 *extra_len, u16 *flags, u16 *aid, 680 struct ieee80211_sta *sta,
681 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
681 bool *burst_possible) 682 bool *burst_possible)
682{ 683{
683 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
746 } 747 }
747 } 748 }
748 749
749 if (info->control.sta) 750 if (sta)
750 *aid = info->control.sta->aid; 751 *aid = sta->aid;
751 break; 752 break;
752 } 753 }
753} 754}
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
767 } 768 }
768} 769}
769 770
770void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) 771void p54_tx_80211(struct ieee80211_hw *dev,
772 struct ieee80211_tx_control *control,
773 struct sk_buff *skb)
771{ 774{
772 struct p54_common *priv = dev->priv; 775 struct p54_common *priv = dev->priv;
773 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
784 u8 nrates = 0, nremaining = 8; 787 u8 nrates = 0, nremaining = 8;
785 bool burst_allowed = false; 788 bool burst_allowed = false;
786 789
787 p54_tx_80211_header(priv, skb, info, &queue, &extra_len, 790 p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
788 &hdr_flags, &aid, &burst_allowed); 791 &hdr_flags, &aid, &burst_allowed);
789 792
790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 793 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 7a4ae9ee1c63..bd1f0cb56085 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1959,9 +1959,6 @@ static int rndis_scan(struct wiphy *wiphy,
1959 */ 1959 */
1960 rndis_check_bssid_list(usbdev, NULL, NULL); 1960 rndis_check_bssid_list(usbdev, NULL, NULL);
1961 1961
1962 if (!request)
1963 return -EINVAL;
1964
1965 if (priv->scan_request && priv->scan_request != request) 1962 if (priv->scan_request && priv->scan_request != request)
1966 return -EBUSY; 1963 return -EBUSY;
1967 1964
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 64328af496f5..e3a2d9070cf6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205 u32 reg; 205 u32 reg;
206 206
207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
208 return rt2x00_get_field32(reg, GPIOCSR_BIT0); 208 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
209} 209}
210 210
211#ifdef CONFIG_RT2X00_LIB_LEDS 211#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1629,7 +1629,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1629 * rfkill switch GPIO pin correctly. 1629 * rfkill switch GPIO pin correctly.
1630 */ 1630 */
1631 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 1631 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
1632 rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1); 1632 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
1633 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); 1633 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
1634 1634
1635 /* 1635 /*
@@ -1789,7 +1789,6 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
1789 1789
1790static const struct rt2x00_ops rt2400pci_ops = { 1790static const struct rt2x00_ops rt2400pci_ops = {
1791 .name = KBUILD_MODNAME, 1791 .name = KBUILD_MODNAME,
1792 .max_sta_intf = 1,
1793 .max_ap_intf = 1, 1792 .max_ap_intf = 1,
1794 .eeprom_size = EEPROM_SIZE, 1793 .eeprom_size = EEPROM_SIZE,
1795 .rf_size = RF_SIZE, 1794 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index 7564ae992b73..e4b07f0aa3cc 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -660,17 +660,26 @@
660 660
661/* 661/*
662 * GPIOCSR: GPIO control register. 662 * GPIOCSR: GPIO control register.
663 * GPIOCSR_VALx: Actual GPIO pin x value
664 * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
663 */ 665 */
664#define GPIOCSR 0x0120 666#define GPIOCSR 0x0120
665#define GPIOCSR_BIT0 FIELD32(0x00000001) 667#define GPIOCSR_VAL0 FIELD32(0x00000001)
666#define GPIOCSR_BIT1 FIELD32(0x00000002) 668#define GPIOCSR_VAL1 FIELD32(0x00000002)
667#define GPIOCSR_BIT2 FIELD32(0x00000004) 669#define GPIOCSR_VAL2 FIELD32(0x00000004)
668#define GPIOCSR_BIT3 FIELD32(0x00000008) 670#define GPIOCSR_VAL3 FIELD32(0x00000008)
669#define GPIOCSR_BIT4 FIELD32(0x00000010) 671#define GPIOCSR_VAL4 FIELD32(0x00000010)
670#define GPIOCSR_BIT5 FIELD32(0x00000020) 672#define GPIOCSR_VAL5 FIELD32(0x00000020)
671#define GPIOCSR_BIT6 FIELD32(0x00000040) 673#define GPIOCSR_VAL6 FIELD32(0x00000040)
672#define GPIOCSR_BIT7 FIELD32(0x00000080) 674#define GPIOCSR_VAL7 FIELD32(0x00000080)
673#define GPIOCSR_BIT8 FIELD32(0x00000100) 675#define GPIOCSR_DIR0 FIELD32(0x00000100)
676#define GPIOCSR_DIR1 FIELD32(0x00000200)
677#define GPIOCSR_DIR2 FIELD32(0x00000400)
678#define GPIOCSR_DIR3 FIELD32(0x00000800)
679#define GPIOCSR_DIR4 FIELD32(0x00001000)
680#define GPIOCSR_DIR5 FIELD32(0x00002000)
681#define GPIOCSR_DIR6 FIELD32(0x00004000)
682#define GPIOCSR_DIR7 FIELD32(0x00008000)
674 683
675/* 684/*
676 * BBPPCSR: BBP Pin control register. 685 * BBPPCSR: BBP Pin control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 3de0406735f6..479d756e275b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205 u32 reg; 205 u32 reg;
206 206
207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 207 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
208 return rt2x00_get_field32(reg, GPIOCSR_BIT0); 208 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
209} 209}
210 210
211#ifdef CONFIG_RT2X00_LIB_LEDS 211#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2081,7 +2081,6 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
2081 2081
2082static const struct rt2x00_ops rt2500pci_ops = { 2082static const struct rt2x00_ops rt2500pci_ops = {
2083 .name = KBUILD_MODNAME, 2083 .name = KBUILD_MODNAME,
2084 .max_sta_intf = 1,
2085 .max_ap_intf = 1, 2084 .max_ap_intf = 1,
2086 .eeprom_size = EEPROM_SIZE, 2085 .eeprom_size = EEPROM_SIZE,
2087 .rf_size = RF_SIZE, 2086 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 2aad7ba8a100..9c10068e4987 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -789,16 +789,18 @@
789 789
790/* 790/*
791 * GPIOCSR: GPIO control register. 791 * GPIOCSR: GPIO control register.
792 * GPIOCSR_VALx: GPIO value
793 * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
792 */ 794 */
793#define GPIOCSR 0x0120 795#define GPIOCSR 0x0120
794#define GPIOCSR_BIT0 FIELD32(0x00000001) 796#define GPIOCSR_VAL0 FIELD32(0x00000001)
795#define GPIOCSR_BIT1 FIELD32(0x00000002) 797#define GPIOCSR_VAL1 FIELD32(0x00000002)
796#define GPIOCSR_BIT2 FIELD32(0x00000004) 798#define GPIOCSR_VAL2 FIELD32(0x00000004)
797#define GPIOCSR_BIT3 FIELD32(0x00000008) 799#define GPIOCSR_VAL3 FIELD32(0x00000008)
798#define GPIOCSR_BIT4 FIELD32(0x00000010) 800#define GPIOCSR_VAL4 FIELD32(0x00000010)
799#define GPIOCSR_BIT5 FIELD32(0x00000020) 801#define GPIOCSR_VAL5 FIELD32(0x00000020)
800#define GPIOCSR_BIT6 FIELD32(0x00000040) 802#define GPIOCSR_VAL6 FIELD32(0x00000040)
801#define GPIOCSR_BIT7 FIELD32(0x00000080) 803#define GPIOCSR_VAL7 FIELD32(0x00000080)
802#define GPIOCSR_DIR0 FIELD32(0x00000100) 804#define GPIOCSR_DIR0 FIELD32(0x00000100)
803#define GPIOCSR_DIR1 FIELD32(0x00000200) 805#define GPIOCSR_DIR1 FIELD32(0x00000200)
804#define GPIOCSR_DIR2 FIELD32(0x00000400) 806#define GPIOCSR_DIR2 FIELD32(0x00000400)
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 89fee311d8fd..a12e84f892be 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
283 u16 reg; 283 u16 reg;
284 284
285 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); 285 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
286 return rt2x00_get_field16(reg, MAC_CSR19_BIT7); 286 return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
287} 287}
288 288
289#ifdef CONFIG_RT2X00_LIB_LEDS 289#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1786,7 +1786,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1786 * rfkill switch GPIO pin correctly. 1786 * rfkill switch GPIO pin correctly.
1787 */ 1787 */
1788 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg); 1788 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
1789 rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0); 1789 rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0);
1790 rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); 1790 rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
1791 1791
1792 /* 1792 /*
@@ -1896,7 +1896,6 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
1896 1896
1897static const struct rt2x00_ops rt2500usb_ops = { 1897static const struct rt2x00_ops rt2500usb_ops = {
1898 .name = KBUILD_MODNAME, 1898 .name = KBUILD_MODNAME,
1899 .max_sta_intf = 1,
1900 .max_ap_intf = 1, 1899 .max_ap_intf = 1,
1901 .eeprom_size = EEPROM_SIZE, 1900 .eeprom_size = EEPROM_SIZE,
1902 .rf_size = RF_SIZE, 1901 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 196bd5103e4f..1b91a4cef965 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -187,17 +187,26 @@
187 187
188/* 188/*
189 * MAC_CSR19: GPIO control register. 189 * MAC_CSR19: GPIO control register.
190 * MAC_CSR19_VALx: GPIO value
191 * MAC_CSR19_DIRx: GPIO direction: 0 = input; 1 = output
190 */ 192 */
191#define MAC_CSR19 0x0426 193#define MAC_CSR19 0x0426
192#define MAC_CSR19_BIT0 FIELD16(0x0001) 194#define MAC_CSR19_VAL0 FIELD16(0x0001)
193#define MAC_CSR19_BIT1 FIELD16(0x0002) 195#define MAC_CSR19_VAL1 FIELD16(0x0002)
194#define MAC_CSR19_BIT2 FIELD16(0x0004) 196#define MAC_CSR19_VAL2 FIELD16(0x0004)
195#define MAC_CSR19_BIT3 FIELD16(0x0008) 197#define MAC_CSR19_VAL3 FIELD16(0x0008)
196#define MAC_CSR19_BIT4 FIELD16(0x0010) 198#define MAC_CSR19_VAL4 FIELD16(0x0010)
197#define MAC_CSR19_BIT5 FIELD16(0x0020) 199#define MAC_CSR19_VAL5 FIELD16(0x0020)
198#define MAC_CSR19_BIT6 FIELD16(0x0040) 200#define MAC_CSR19_VAL6 FIELD16(0x0040)
199#define MAC_CSR19_BIT7 FIELD16(0x0080) 201#define MAC_CSR19_VAL7 FIELD16(0x0080)
200#define MAC_CSR19_BIT8 FIELD16(0x0100) 202#define MAC_CSR19_DIR0 FIELD16(0x0100)
203#define MAC_CSR19_DIR1 FIELD16(0x0200)
204#define MAC_CSR19_DIR2 FIELD16(0x0400)
205#define MAC_CSR19_DIR3 FIELD16(0x0800)
206#define MAC_CSR19_DIR4 FIELD16(0x1000)
207#define MAC_CSR19_DIR5 FIELD16(0x2000)
208#define MAC_CSR19_DIR6 FIELD16(0x4000)
209#define MAC_CSR19_DIR7 FIELD16(0x8000)
201 210
202/* 211/*
203 * MAC_CSR20: LED control register. 212 * MAC_CSR20: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index e252e9bafd0e..6d67c3ede651 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -439,26 +439,33 @@
439#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000) 439#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
440 440
441/* 441/*
442 * GPIO_CTRL_CFG: 442 * GPIO_CTRL:
443 * GPIOD: GPIO direction, 0: Output, 1: Input 443 * GPIO_CTRL_VALx: GPIO value
444 */ 444 * GPIO_CTRL_DIRx: GPIO direction: 0 = output; 1 = input
445#define GPIO_CTRL_CFG 0x0228 445 */
446#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) 446#define GPIO_CTRL 0x0228
447#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002) 447#define GPIO_CTRL_VAL0 FIELD32(0x00000001)
448#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004) 448#define GPIO_CTRL_VAL1 FIELD32(0x00000002)
449#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008) 449#define GPIO_CTRL_VAL2 FIELD32(0x00000004)
450#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010) 450#define GPIO_CTRL_VAL3 FIELD32(0x00000008)
451#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) 451#define GPIO_CTRL_VAL4 FIELD32(0x00000010)
452#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) 452#define GPIO_CTRL_VAL5 FIELD32(0x00000020)
453#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) 453#define GPIO_CTRL_VAL6 FIELD32(0x00000040)
454#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100) 454#define GPIO_CTRL_VAL7 FIELD32(0x00000080)
455#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200) 455#define GPIO_CTRL_DIR0 FIELD32(0x00000100)
456#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400) 456#define GPIO_CTRL_DIR1 FIELD32(0x00000200)
457#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800) 457#define GPIO_CTRL_DIR2 FIELD32(0x00000400)
458#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000) 458#define GPIO_CTRL_DIR3 FIELD32(0x00000800)
459#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000) 459#define GPIO_CTRL_DIR4 FIELD32(0x00001000)
460#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000) 460#define GPIO_CTRL_DIR5 FIELD32(0x00002000)
461#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000) 461#define GPIO_CTRL_DIR6 FIELD32(0x00004000)
462#define GPIO_CTRL_DIR7 FIELD32(0x00008000)
463#define GPIO_CTRL_VAL8 FIELD32(0x00010000)
464#define GPIO_CTRL_VAL9 FIELD32(0x00020000)
465#define GPIO_CTRL_VAL10 FIELD32(0x00040000)
466#define GPIO_CTRL_DIR8 FIELD32(0x01000000)
467#define GPIO_CTRL_DIR9 FIELD32(0x02000000)
468#define GPIO_CTRL_DIR10 FIELD32(0x04000000)
462 469
463/* 470/*
464 * MCU_CMD_CFG 471 * MCU_CMD_CFG
@@ -1936,6 +1943,11 @@ struct mac_iveiv_entry {
1936#define BBP47_TSSI_ADC6 FIELD8(0x80) 1943#define BBP47_TSSI_ADC6 FIELD8(0x80)
1937 1944
1938/* 1945/*
1946 * BBP 49
1947 */
1948#define BBP49_UPDATE_FLAG FIELD8(0x01)
1949
1950/*
1939 * BBP 109 1951 * BBP 109
1940 */ 1952 */
1941#define BBP109_TX0_POWER FIELD8(0x0f) 1953#define BBP109_TX0_POWER FIELD8(0x0f)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b93516d832fb..540c94f8505a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -923,8 +923,8 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
923 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg); 923 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
924 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0); 924 return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
925 } else { 925 } else {
926 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 926 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
927 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2); 927 return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
928 } 928 }
929} 929}
930EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); 930EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1570,10 +1570,10 @@ static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
1570 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff, 1570 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
1571 eesk_pin, 0); 1571 eesk_pin, 0);
1572 1572
1573 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 1573 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
1574 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 1574 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
1575 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3); 1575 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3);
1576 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 1576 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
1577} 1577}
1578 1578
1579void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) 1579void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
@@ -1615,6 +1615,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1615 case 1: 1615 case 1:
1616 if (rt2x00_rt(rt2x00dev, RT3070) || 1616 if (rt2x00_rt(rt2x00dev, RT3070) ||
1617 rt2x00_rt(rt2x00dev, RT3090) || 1617 rt2x00_rt(rt2x00dev, RT3090) ||
1618 rt2x00_rt(rt2x00dev, RT3352) ||
1618 rt2x00_rt(rt2x00dev, RT3390)) { 1619 rt2x00_rt(rt2x00dev, RT3390)) {
1619 rt2x00_eeprom_read(rt2x00dev, 1620 rt2x00_eeprom_read(rt2x00dev,
1620 EEPROM_NIC_CONF1, &eeprom); 1621 EEPROM_NIC_CONF1, &eeprom);
@@ -1762,36 +1763,15 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1762 1763
1763 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 1764 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
1764 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); 1765 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
1766 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
1767 rt2x00dev->default_ant.rx_chain_num <= 1);
1768 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
1769 rt2x00dev->default_ant.rx_chain_num <= 2);
1765 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); 1770 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
1766 if (rt2x00_rt(rt2x00dev, RT3390)) { 1771 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
1767 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1772 rt2x00dev->default_ant.tx_chain_num <= 1);
1768 rt2x00dev->default_ant.rx_chain_num == 1); 1773 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
1769 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1774 rt2x00dev->default_ant.tx_chain_num <= 2);
1770 rt2x00dev->default_ant.tx_chain_num == 1);
1771 } else {
1772 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
1773 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
1774 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
1775 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
1776
1777 switch (rt2x00dev->default_ant.tx_chain_num) {
1778 case 1:
1779 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
1780 /* fall through */
1781 case 2:
1782 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
1783 break;
1784 }
1785
1786 switch (rt2x00dev->default_ant.rx_chain_num) {
1787 case 1:
1788 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
1789 /* fall through */
1790 case 2:
1791 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
1792 break;
1793 }
1794 }
1795 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 1775 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1796 1776
1797 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 1777 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
@@ -1995,13 +1975,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1995 rt2800_rfcsr_write(rt2x00dev, 29, 0x9f); 1975 rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
1996 } 1976 }
1997 1977
1998 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 1978 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
1999 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0); 1979 rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
2000 if (rf->channel <= 14) 1980 if (rf->channel <= 14)
2001 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1); 1981 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
2002 else 1982 else
2003 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0); 1983 rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0);
2004 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 1984 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
2005 1985
2006 rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr); 1986 rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
2007 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 1987 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
@@ -2053,6 +2033,60 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
2053 } 2033 }
2054} 2034}
2055 2035
2036static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
2037 struct ieee80211_conf *conf,
2038 struct rf_channel *rf,
2039 struct channel_info *info)
2040{
2041 u8 rfcsr;
2042
2043 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
2044 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
2045
2046 rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
2047 rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
2048 rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
2049
2050 if (info->default_power1 > POWER_BOUND)
2051 rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
2052 else
2053 rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
2054
2055 if (info->default_power2 > POWER_BOUND)
2056 rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
2057 else
2058 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
2059
2060 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2061 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2062 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2063 else
2064 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2065
2066 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2067
2068 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2069 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
2070 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
2071
2072 if ( rt2x00dev->default_ant.tx_chain_num == 2 )
2073 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2074 else
2075 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
2076
2077 if ( rt2x00dev->default_ant.rx_chain_num == 2 )
2078 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2079 else
2080 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
2081
2082 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2083 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2084
2085 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2086
2087 rt2800_rfcsr_write(rt2x00dev, 31, 80);
2088}
2089
2056static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 2090static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2057 struct ieee80211_conf *conf, 2091 struct ieee80211_conf *conf,
2058 struct rf_channel *rf, 2092 struct rf_channel *rf,
@@ -2182,6 +2216,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2182 case RF3290: 2216 case RF3290:
2183 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info); 2217 rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
2184 break; 2218 break;
2219 case RF3322:
2220 rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
2221 break;
2185 case RF5360: 2222 case RF5360:
2186 case RF5370: 2223 case RF5370:
2187 case RF5372: 2224 case RF5372:
@@ -2194,6 +2231,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2194 } 2231 }
2195 2232
2196 if (rt2x00_rf(rt2x00dev, RF3290) || 2233 if (rt2x00_rf(rt2x00dev, RF3290) ||
2234 rt2x00_rf(rt2x00dev, RF3322) ||
2197 rt2x00_rf(rt2x00dev, RF5360) || 2235 rt2x00_rf(rt2x00dev, RF5360) ||
2198 rt2x00_rf(rt2x00dev, RF5370) || 2236 rt2x00_rf(rt2x00dev, RF5370) ||
2199 rt2x00_rf(rt2x00dev, RF5372) || 2237 rt2x00_rf(rt2x00dev, RF5372) ||
@@ -2212,10 +2250,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2212 /* 2250 /*
2213 * Change BBP settings 2251 * Change BBP settings
2214 */ 2252 */
2215 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); 2253 if (rt2x00_rt(rt2x00dev, RT3352)) {
2216 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); 2254 rt2800_bbp_write(rt2x00dev, 27, 0x0);
2217 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); 2255 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
2218 rt2800_bbp_write(rt2x00dev, 86, 0); 2256 rt2800_bbp_write(rt2x00dev, 27, 0x20);
2257 rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
2258 } else {
2259 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2260 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
2261 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
2262 rt2800_bbp_write(rt2x00dev, 86, 0);
2263 }
2219 2264
2220 if (rf->channel <= 14) { 2265 if (rf->channel <= 14) {
2221 if (!rt2x00_rt(rt2x00dev, RT5390) && 2266 if (!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -2310,6 +2355,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2310 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg); 2355 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
2311 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg); 2356 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
2312 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg); 2357 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
2358
2359 /*
2360 * Clear update flag
2361 */
2362 if (rt2x00_rt(rt2x00dev, RT3352)) {
2363 rt2800_bbp_read(rt2x00dev, 49, &bbp);
2364 rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
2365 rt2800_bbp_write(rt2x00dev, 49, bbp);
2366 }
2313} 2367}
2314 2368
2315static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) 2369static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
@@ -2821,23 +2875,32 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
2821 2875
2822static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) 2876static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2823{ 2877{
2878 u8 vgc;
2879
2824 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 2880 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
2825 if (rt2x00_rt(rt2x00dev, RT3070) || 2881 if (rt2x00_rt(rt2x00dev, RT3070) ||
2826 rt2x00_rt(rt2x00dev, RT3071) || 2882 rt2x00_rt(rt2x00dev, RT3071) ||
2827 rt2x00_rt(rt2x00dev, RT3090) || 2883 rt2x00_rt(rt2x00dev, RT3090) ||
2828 rt2x00_rt(rt2x00dev, RT3290) || 2884 rt2x00_rt(rt2x00dev, RT3290) ||
2829 rt2x00_rt(rt2x00dev, RT3390) || 2885 rt2x00_rt(rt2x00dev, RT3390) ||
2886 rt2x00_rt(rt2x00dev, RT3572) ||
2830 rt2x00_rt(rt2x00dev, RT5390) || 2887 rt2x00_rt(rt2x00dev, RT5390) ||
2831 rt2x00_rt(rt2x00dev, RT5392)) 2888 rt2x00_rt(rt2x00dev, RT5392))
2832 return 0x1c + (2 * rt2x00dev->lna_gain); 2889 vgc = 0x1c + (2 * rt2x00dev->lna_gain);
2833 else 2890 else
2834 return 0x2e + rt2x00dev->lna_gain; 2891 vgc = 0x2e + rt2x00dev->lna_gain;
2892 } else { /* 5GHZ band */
2893 if (rt2x00_rt(rt2x00dev, RT3572))
2894 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
2895 else {
2896 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
2897 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
2898 else
2899 vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
2900 }
2835 } 2901 }
2836 2902
2837 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 2903 return vgc;
2838 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
2839 else
2840 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
2841} 2904}
2842 2905
2843static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, 2906static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
@@ -2998,11 +3061,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2998 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3061 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
2999 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 3062 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
3000 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030); 3063 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
3064 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
3065 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
3066 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3067 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
3001 } else if (rt2x00_rt(rt2x00dev, RT3572)) { 3068 } else if (rt2x00_rt(rt2x00dev, RT3572)) {
3002 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3069 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
3003 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3070 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3004 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3071 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
3005 rt2x00_rt(rt2x00dev, RT5392)) { 3072 rt2x00_rt(rt2x00dev, RT5392)) {
3006 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 3073 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
3007 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3074 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3008 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 3075 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3378,6 +3445,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3378 rt2800_wait_bbp_ready(rt2x00dev))) 3445 rt2800_wait_bbp_ready(rt2x00dev)))
3379 return -EACCES; 3446 return -EACCES;
3380 3447
3448 if (rt2x00_rt(rt2x00dev, RT3352)) {
3449 rt2800_bbp_write(rt2x00dev, 3, 0x00);
3450 rt2800_bbp_write(rt2x00dev, 4, 0x50);
3451 }
3452
3381 if (rt2x00_rt(rt2x00dev, RT3290) || 3453 if (rt2x00_rt(rt2x00dev, RT3290) ||
3382 rt2x00_rt(rt2x00dev, RT5390) || 3454 rt2x00_rt(rt2x00dev, RT5390) ||
3383 rt2x00_rt(rt2x00dev, RT5392)) { 3455 rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3388,15 +3460,20 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3388 3460
3389 if (rt2800_is_305x_soc(rt2x00dev) || 3461 if (rt2800_is_305x_soc(rt2x00dev) ||
3390 rt2x00_rt(rt2x00dev, RT3290) || 3462 rt2x00_rt(rt2x00dev, RT3290) ||
3463 rt2x00_rt(rt2x00dev, RT3352) ||
3391 rt2x00_rt(rt2x00dev, RT3572) || 3464 rt2x00_rt(rt2x00dev, RT3572) ||
3392 rt2x00_rt(rt2x00dev, RT5390) || 3465 rt2x00_rt(rt2x00dev, RT5390) ||
3393 rt2x00_rt(rt2x00dev, RT5392)) 3466 rt2x00_rt(rt2x00dev, RT5392))
3394 rt2800_bbp_write(rt2x00dev, 31, 0x08); 3467 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3395 3468
3469 if (rt2x00_rt(rt2x00dev, RT3352))
3470 rt2800_bbp_write(rt2x00dev, 47, 0x48);
3471
3396 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 3472 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
3397 rt2800_bbp_write(rt2x00dev, 66, 0x38); 3473 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3398 3474
3399 if (rt2x00_rt(rt2x00dev, RT3290) || 3475 if (rt2x00_rt(rt2x00dev, RT3290) ||
3476 rt2x00_rt(rt2x00dev, RT3352) ||
3400 rt2x00_rt(rt2x00dev, RT5390) || 3477 rt2x00_rt(rt2x00dev, RT5390) ||
3401 rt2x00_rt(rt2x00dev, RT5392)) 3478 rt2x00_rt(rt2x00dev, RT5392))
3402 rt2800_bbp_write(rt2x00dev, 68, 0x0b); 3479 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
@@ -3405,6 +3482,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3405 rt2800_bbp_write(rt2x00dev, 69, 0x16); 3482 rt2800_bbp_write(rt2x00dev, 69, 0x16);
3406 rt2800_bbp_write(rt2x00dev, 73, 0x12); 3483 rt2800_bbp_write(rt2x00dev, 73, 0x12);
3407 } else if (rt2x00_rt(rt2x00dev, RT3290) || 3484 } else if (rt2x00_rt(rt2x00dev, RT3290) ||
3485 rt2x00_rt(rt2x00dev, RT3352) ||
3408 rt2x00_rt(rt2x00dev, RT5390) || 3486 rt2x00_rt(rt2x00dev, RT5390) ||
3409 rt2x00_rt(rt2x00dev, RT5392)) { 3487 rt2x00_rt(rt2x00dev, RT5392)) {
3410 rt2800_bbp_write(rt2x00dev, 69, 0x12); 3488 rt2800_bbp_write(rt2x00dev, 69, 0x12);
@@ -3436,15 +3514,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3436 } else if (rt2800_is_305x_soc(rt2x00dev)) { 3514 } else if (rt2800_is_305x_soc(rt2x00dev)) {
3437 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 3515 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
3438 rt2800_bbp_write(rt2x00dev, 80, 0x08); 3516 rt2800_bbp_write(rt2x00dev, 80, 0x08);
3439 } else { 3517 } else if (rt2x00_rt(rt2x00dev, RT3290)) {
3440 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3441 }
3442
3443 if (rt2x00_rt(rt2x00dev, RT3290)) {
3444 rt2800_bbp_write(rt2x00dev, 74, 0x0b); 3518 rt2800_bbp_write(rt2x00dev, 74, 0x0b);
3445 rt2800_bbp_write(rt2x00dev, 79, 0x18); 3519 rt2800_bbp_write(rt2x00dev, 79, 0x18);
3446 rt2800_bbp_write(rt2x00dev, 80, 0x09); 3520 rt2800_bbp_write(rt2x00dev, 80, 0x09);
3447 rt2800_bbp_write(rt2x00dev, 81, 0x33); 3521 rt2800_bbp_write(rt2x00dev, 81, 0x33);
3522 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
3523 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
3524 rt2800_bbp_write(rt2x00dev, 80, 0x08);
3525 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3526 } else {
3527 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3448 } 3528 }
3449 3529
3450 rt2800_bbp_write(rt2x00dev, 82, 0x62); 3530 rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -3465,18 +3545,21 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3465 rt2800_bbp_write(rt2x00dev, 84, 0x99); 3545 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3466 3546
3467 if (rt2x00_rt(rt2x00dev, RT3290) || 3547 if (rt2x00_rt(rt2x00dev, RT3290) ||
3548 rt2x00_rt(rt2x00dev, RT3352) ||
3468 rt2x00_rt(rt2x00dev, RT5390) || 3549 rt2x00_rt(rt2x00dev, RT5390) ||
3469 rt2x00_rt(rt2x00dev, RT5392)) 3550 rt2x00_rt(rt2x00dev, RT5392))
3470 rt2800_bbp_write(rt2x00dev, 86, 0x38); 3551 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3471 else 3552 else
3472 rt2800_bbp_write(rt2x00dev, 86, 0x00); 3553 rt2800_bbp_write(rt2x00dev, 86, 0x00);
3473 3554
3474 if (rt2x00_rt(rt2x00dev, RT5392)) 3555 if (rt2x00_rt(rt2x00dev, RT3352) ||
3556 rt2x00_rt(rt2x00dev, RT5392))
3475 rt2800_bbp_write(rt2x00dev, 88, 0x90); 3557 rt2800_bbp_write(rt2x00dev, 88, 0x90);
3476 3558
3477 rt2800_bbp_write(rt2x00dev, 91, 0x04); 3559 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3478 3560
3479 if (rt2x00_rt(rt2x00dev, RT3290) || 3561 if (rt2x00_rt(rt2x00dev, RT3290) ||
3562 rt2x00_rt(rt2x00dev, RT3352) ||
3480 rt2x00_rt(rt2x00dev, RT5390) || 3563 rt2x00_rt(rt2x00dev, RT5390) ||
3481 rt2x00_rt(rt2x00dev, RT5392)) 3564 rt2x00_rt(rt2x00dev, RT5392))
3482 rt2800_bbp_write(rt2x00dev, 92, 0x02); 3565 rt2800_bbp_write(rt2x00dev, 92, 0x02);
@@ -3493,6 +3576,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3493 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 3576 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
3494 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 3577 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
3495 rt2x00_rt(rt2x00dev, RT3290) || 3578 rt2x00_rt(rt2x00dev, RT3290) ||
3579 rt2x00_rt(rt2x00dev, RT3352) ||
3496 rt2x00_rt(rt2x00dev, RT3572) || 3580 rt2x00_rt(rt2x00dev, RT3572) ||
3497 rt2x00_rt(rt2x00dev, RT5390) || 3581 rt2x00_rt(rt2x00dev, RT5390) ||
3498 rt2x00_rt(rt2x00dev, RT5392) || 3582 rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3502,6 +3586,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3502 rt2800_bbp_write(rt2x00dev, 103, 0x00); 3586 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3503 3587
3504 if (rt2x00_rt(rt2x00dev, RT3290) || 3588 if (rt2x00_rt(rt2x00dev, RT3290) ||
3589 rt2x00_rt(rt2x00dev, RT3352) ||
3505 rt2x00_rt(rt2x00dev, RT5390) || 3590 rt2x00_rt(rt2x00dev, RT5390) ||
3506 rt2x00_rt(rt2x00dev, RT5392)) 3591 rt2x00_rt(rt2x00dev, RT5392))
3507 rt2800_bbp_write(rt2x00dev, 104, 0x92); 3592 rt2800_bbp_write(rt2x00dev, 104, 0x92);
@@ -3510,6 +3595,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3510 rt2800_bbp_write(rt2x00dev, 105, 0x01); 3595 rt2800_bbp_write(rt2x00dev, 105, 0x01);
3511 else if (rt2x00_rt(rt2x00dev, RT3290)) 3596 else if (rt2x00_rt(rt2x00dev, RT3290))
3512 rt2800_bbp_write(rt2x00dev, 105, 0x1c); 3597 rt2800_bbp_write(rt2x00dev, 105, 0x1c);
3598 else if (rt2x00_rt(rt2x00dev, RT3352))
3599 rt2800_bbp_write(rt2x00dev, 105, 0x34);
3513 else if (rt2x00_rt(rt2x00dev, RT5390) || 3600 else if (rt2x00_rt(rt2x00dev, RT5390) ||
3514 rt2x00_rt(rt2x00dev, RT5392)) 3601 rt2x00_rt(rt2x00dev, RT5392))
3515 rt2800_bbp_write(rt2x00dev, 105, 0x3c); 3602 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
@@ -3519,11 +3606,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3519 if (rt2x00_rt(rt2x00dev, RT3290) || 3606 if (rt2x00_rt(rt2x00dev, RT3290) ||
3520 rt2x00_rt(rt2x00dev, RT5390)) 3607 rt2x00_rt(rt2x00dev, RT5390))
3521 rt2800_bbp_write(rt2x00dev, 106, 0x03); 3608 rt2800_bbp_write(rt2x00dev, 106, 0x03);
3609 else if (rt2x00_rt(rt2x00dev, RT3352))
3610 rt2800_bbp_write(rt2x00dev, 106, 0x05);
3522 else if (rt2x00_rt(rt2x00dev, RT5392)) 3611 else if (rt2x00_rt(rt2x00dev, RT5392))
3523 rt2800_bbp_write(rt2x00dev, 106, 0x12); 3612 rt2800_bbp_write(rt2x00dev, 106, 0x12);
3524 else 3613 else
3525 rt2800_bbp_write(rt2x00dev, 106, 0x35); 3614 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3526 3615
3616 if (rt2x00_rt(rt2x00dev, RT3352))
3617 rt2800_bbp_write(rt2x00dev, 120, 0x50);
3618
3527 if (rt2x00_rt(rt2x00dev, RT3290) || 3619 if (rt2x00_rt(rt2x00dev, RT3290) ||
3528 rt2x00_rt(rt2x00dev, RT5390) || 3620 rt2x00_rt(rt2x00dev, RT5390) ||
3529 rt2x00_rt(rt2x00dev, RT5392)) 3621 rt2x00_rt(rt2x00dev, RT5392))
@@ -3534,6 +3626,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3534 rt2800_bbp_write(rt2x00dev, 135, 0xf6); 3626 rt2800_bbp_write(rt2x00dev, 135, 0xf6);
3535 } 3627 }
3536 3628
3629 if (rt2x00_rt(rt2x00dev, RT3352))
3630 rt2800_bbp_write(rt2x00dev, 137, 0x0f);
3631
3537 if (rt2x00_rt(rt2x00dev, RT3071) || 3632 if (rt2x00_rt(rt2x00dev, RT3071) ||
3538 rt2x00_rt(rt2x00dev, RT3090) || 3633 rt2x00_rt(rt2x00dev, RT3090) ||
3539 rt2x00_rt(rt2x00dev, RT3390) || 3634 rt2x00_rt(rt2x00dev, RT3390) ||
@@ -3574,6 +3669,28 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3574 rt2800_bbp_write(rt2x00dev, 3, value); 3669 rt2800_bbp_write(rt2x00dev, 3, value);
3575 } 3670 }
3576 3671
3672 if (rt2x00_rt(rt2x00dev, RT3352)) {
3673 rt2800_bbp_write(rt2x00dev, 163, 0xbd);
3674 /* Set ITxBF timeout to 0x9c40=1000msec */
3675 rt2800_bbp_write(rt2x00dev, 179, 0x02);
3676 rt2800_bbp_write(rt2x00dev, 180, 0x00);
3677 rt2800_bbp_write(rt2x00dev, 182, 0x40);
3678 rt2800_bbp_write(rt2x00dev, 180, 0x01);
3679 rt2800_bbp_write(rt2x00dev, 182, 0x9c);
3680 rt2800_bbp_write(rt2x00dev, 179, 0x00);
3681 /* Reprogram the inband interface to put right values in RXWI */
3682 rt2800_bbp_write(rt2x00dev, 142, 0x04);
3683 rt2800_bbp_write(rt2x00dev, 143, 0x3b);
3684 rt2800_bbp_write(rt2x00dev, 142, 0x06);
3685 rt2800_bbp_write(rt2x00dev, 143, 0xa0);
3686 rt2800_bbp_write(rt2x00dev, 142, 0x07);
3687 rt2800_bbp_write(rt2x00dev, 143, 0xa1);
3688 rt2800_bbp_write(rt2x00dev, 142, 0x08);
3689 rt2800_bbp_write(rt2x00dev, 143, 0xa2);
3690
3691 rt2800_bbp_write(rt2x00dev, 148, 0xc8);
3692 }
3693
3577 if (rt2x00_rt(rt2x00dev, RT5390) || 3694 if (rt2x00_rt(rt2x00dev, RT5390) ||
3578 rt2x00_rt(rt2x00dev, RT5392)) { 3695 rt2x00_rt(rt2x00dev, RT5392)) {
3579 int ant, div_mode; 3696 int ant, div_mode;
@@ -3587,16 +3704,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3587 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { 3704 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
3588 u32 reg; 3705 u32 reg;
3589 3706
3590 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 3707 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
3591 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 3708 rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
3592 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0); 3709 rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
3593 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0); 3710 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
3594 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0); 3711 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
3595 if (ant == 0) 3712 if (ant == 0)
3596 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1); 3713 rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
3597 else if (ant == 1) 3714 else if (ant == 1)
3598 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1); 3715 rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
3599 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 3716 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
3600 } 3717 }
3601 3718
3602 /* This chip has hardware antenna diversity*/ 3719 /* This chip has hardware antenna diversity*/
@@ -3707,6 +3824,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3707 !rt2x00_rt(rt2x00dev, RT3071) && 3824 !rt2x00_rt(rt2x00dev, RT3071) &&
3708 !rt2x00_rt(rt2x00dev, RT3090) && 3825 !rt2x00_rt(rt2x00dev, RT3090) &&
3709 !rt2x00_rt(rt2x00dev, RT3290) && 3826 !rt2x00_rt(rt2x00dev, RT3290) &&
3827 !rt2x00_rt(rt2x00dev, RT3352) &&
3710 !rt2x00_rt(rt2x00dev, RT3390) && 3828 !rt2x00_rt(rt2x00dev, RT3390) &&
3711 !rt2x00_rt(rt2x00dev, RT3572) && 3829 !rt2x00_rt(rt2x00dev, RT3572) &&
3712 !rt2x00_rt(rt2x00dev, RT5390) && 3830 !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3903,6 +4021,70 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3903 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 4021 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
3904 rt2800_rfcsr_write(rt2x00dev, 31, 0x00); 4022 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
3905 return 0; 4023 return 0;
4024 } else if (rt2x00_rt(rt2x00dev, RT3352)) {
4025 rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
4026 rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
4027 rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
4028 rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
4029 rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
4030 rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
4031 rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
4032 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
4033 rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
4034 rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
4035 rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
4036 rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
4037 rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
4038 rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
4039 rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
4040 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
4041 rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
4042 rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
4043 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
4044 rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
4045 rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
4046 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
4047 rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
4048 rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
4049 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
4050 rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
4051 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4052 rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
4053 rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
4054 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
4055 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
4056 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
4057 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
4058 rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
4059 rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
4060 rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
4061 rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
4062 rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
4063 rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
4064 rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
4065 rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
4066 rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
4067 rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
4068 rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
4069 rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
4070 rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
4071 rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
4072 rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
4073 rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
4074 rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
4075 rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
4076 rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
4077 rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
4078 rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
4079 rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
4080 rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
4081 rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
4082 rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
4083 rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
4084 rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
4085 rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
4086 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
4087 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
3906 } else if (rt2x00_rt(rt2x00dev, RT5390)) { 4088 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
3907 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 4089 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
3908 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 4090 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
@@ -4104,6 +4286,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4104 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19); 4286 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
4105 } else if (rt2x00_rt(rt2x00dev, RT3071) || 4287 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4106 rt2x00_rt(rt2x00dev, RT3090) || 4288 rt2x00_rt(rt2x00dev, RT3090) ||
4289 rt2x00_rt(rt2x00dev, RT3352) ||
4107 rt2x00_rt(rt2x00dev, RT3390) || 4290 rt2x00_rt(rt2x00dev, RT3390) ||
4108 rt2x00_rt(rt2x00dev, RT3572)) { 4291 rt2x00_rt(rt2x00dev, RT3572)) {
4109 drv_data->calibration_bw20 = 4292 drv_data->calibration_bw20 =
@@ -4392,7 +4575,7 @@ void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
4392} 4575}
4393EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); 4576EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
4394 4577
4395int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) 4578static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4396{ 4579{
4397 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 4580 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
4398 u16 word; 4581 u16 word;
@@ -4400,6 +4583,11 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4400 u8 default_lna_gain; 4583 u8 default_lna_gain;
4401 4584
4402 /* 4585 /*
4586 * Read the EEPROM.
4587 */
4588 rt2800_read_eeprom(rt2x00dev);
4589
4590 /*
4403 * Start validation of the data that has been read. 4591 * Start validation of the data that has been read.
4404 */ 4592 */
4405 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 4593 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
@@ -4521,9 +4709,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4521 4709
4522 return 0; 4710 return 0;
4523} 4711}
4524EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
4525 4712
4526int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) 4713static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4527{ 4714{
4528 u32 reg; 4715 u32 reg;
4529 u16 value; 4716 u16 value;
@@ -4562,6 +4749,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4562 case RT3071: 4749 case RT3071:
4563 case RT3090: 4750 case RT3090:
4564 case RT3290: 4751 case RT3290:
4752 case RT3352:
4565 case RT3390: 4753 case RT3390:
4566 case RT3572: 4754 case RT3572:
4567 case RT5390: 4755 case RT5390:
@@ -4584,6 +4772,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4584 case RF3052: 4772 case RF3052:
4585 case RF3290: 4773 case RF3290:
4586 case RF3320: 4774 case RF3320:
4775 case RF3322:
4587 case RF5360: 4776 case RF5360:
4588 case RF5370: 4777 case RF5370:
4589 case RF5372: 4778 case RF5372:
@@ -4608,6 +4797,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4608 4797
4609 if (rt2x00_rt(rt2x00dev, RT3070) || 4798 if (rt2x00_rt(rt2x00dev, RT3070) ||
4610 rt2x00_rt(rt2x00dev, RT3090) || 4799 rt2x00_rt(rt2x00dev, RT3090) ||
4800 rt2x00_rt(rt2x00dev, RT3352) ||
4611 rt2x00_rt(rt2x00dev, RT3390)) { 4801 rt2x00_rt(rt2x00dev, RT3390)) {
4612 value = rt2x00_get_field16(eeprom, 4802 value = rt2x00_get_field16(eeprom,
4613 EEPROM_NIC_CONF1_ANT_DIVERSITY); 4803 EEPROM_NIC_CONF1_ANT_DIVERSITY);
@@ -4681,7 +4871,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4681 4871
4682 return 0; 4872 return 0;
4683} 4873}
4684EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
4685 4874
4686/* 4875/*
4687 * RF value list for rt28xx 4876 * RF value list for rt28xx
@@ -4824,7 +5013,7 @@ static const struct rf_channel rf_vals_3x[] = {
4824 {173, 0x61, 0, 9}, 5013 {173, 0x61, 0, 9},
4825}; 5014};
4826 5015
4827int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 5016static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4828{ 5017{
4829 struct hw_mode_spec *spec = &rt2x00dev->spec; 5018 struct hw_mode_spec *spec = &rt2x00dev->spec;
4830 struct channel_info *info; 5019 struct channel_info *info;
@@ -4901,6 +5090,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4901 rt2x00_rf(rt2x00dev, RF3022) || 5090 rt2x00_rf(rt2x00dev, RF3022) ||
4902 rt2x00_rf(rt2x00dev, RF3290) || 5091 rt2x00_rf(rt2x00dev, RF3290) ||
4903 rt2x00_rf(rt2x00dev, RF3320) || 5092 rt2x00_rf(rt2x00dev, RF3320) ||
5093 rt2x00_rf(rt2x00dev, RF3322) ||
4904 rt2x00_rf(rt2x00dev, RF5360) || 5094 rt2x00_rf(rt2x00dev, RF5360) ||
4905 rt2x00_rf(rt2x00dev, RF5370) || 5095 rt2x00_rf(rt2x00dev, RF5370) ||
4906 rt2x00_rf(rt2x00dev, RF5372) || 5096 rt2x00_rf(rt2x00dev, RF5372) ||
@@ -5000,7 +5190,72 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5000 5190
5001 return 0; 5191 return 0;
5002} 5192}
5003EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode); 5193
5194int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
5195{
5196 int retval;
5197 u32 reg;
5198
5199 /*
5200 * Allocate eeprom data.
5201 */
5202 retval = rt2800_validate_eeprom(rt2x00dev);
5203 if (retval)
5204 return retval;
5205
5206 retval = rt2800_init_eeprom(rt2x00dev);
5207 if (retval)
5208 return retval;
5209
5210 /*
5211 * Enable rfkill polling by setting GPIO direction of the
5212 * rfkill switch GPIO pin correctly.
5213 */
5214 rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
5215 rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1);
5216 rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
5217
5218 /*
5219 * Initialize hw specifications.
5220 */
5221 retval = rt2800_probe_hw_mode(rt2x00dev);
5222 if (retval)
5223 return retval;
5224
5225 /*
5226 * Set device capabilities.
5227 */
5228 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
5229 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
5230 if (!rt2x00_is_usb(rt2x00dev))
5231 __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
5232
5233 /*
5234 * Set device requirements.
5235 */
5236 if (!rt2x00_is_soc(rt2x00dev))
5237 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
5238 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
5239 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
5240 if (!rt2800_hwcrypt_disabled(rt2x00dev))
5241 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
5242 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
5243 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
5244 if (rt2x00_is_usb(rt2x00dev))
5245 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
5246 else {
5247 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
5248 __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
5249 }
5250
5251 /*
5252 * Set the rssi offset.
5253 */
5254 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
5255
5256 return 0;
5257}
5258EXPORT_SYMBOL_GPL(rt2800_probe_hw);
5004 5259
5005/* 5260/*
5006 * IEEE80211 stack callback functions. 5261 * IEEE80211 stack callback functions.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 18a0b67b4c68..a128ceadcb3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -43,6 +43,9 @@ struct rt2800_ops {
43 const unsigned int offset, 43 const unsigned int offset,
44 const struct rt2x00_field32 field, u32 *reg); 44 const struct rt2x00_field32 field, u32 *reg);
45 45
46 void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
47 bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
48
46 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, 49 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
47 const u8 *data, const size_t len); 50 const u8 *data, const size_t len);
48 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev); 51 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
@@ -114,6 +117,20 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
114 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); 117 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
115} 118}
116 119
120static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
121{
122 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
123
124 rt2800ops->read_eeprom(rt2x00dev);
125}
126
127static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
128{
129 const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
130
131 return rt2800ops->hwcrypt_disabled(rt2x00dev);
132}
133
117static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev, 134static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
118 const u8 *data, const size_t len) 135 const u8 *data, const size_t len)
119{ 136{
@@ -191,9 +208,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
191 208
192int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 209int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
193void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 210void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
194int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev); 211
195int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev); 212int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
196int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
197 213
198void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32, 214void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
199 u16 *iv16); 215 u16 *iv16);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 4765bbd654cd..27829e1e2e38 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -54,6 +54,11 @@ static bool modparam_nohwcrypt = false;
54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 54module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 55MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
56 56
57static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
58{
59 return modparam_nohwcrypt;
60}
61
57static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) 62static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
58{ 63{
59 unsigned int i; 64 unsigned int i;
@@ -965,85 +970,14 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
965/* 970/*
966 * Device probe functions. 971 * Device probe functions.
967 */ 972 */
968static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) 973static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
969{ 974{
970 /*
971 * Read EEPROM into buffer
972 */
973 if (rt2x00_is_soc(rt2x00dev)) 975 if (rt2x00_is_soc(rt2x00dev))
974 rt2800pci_read_eeprom_soc(rt2x00dev); 976 rt2800pci_read_eeprom_soc(rt2x00dev);
975 else if (rt2800pci_efuse_detect(rt2x00dev)) 977 else if (rt2800pci_efuse_detect(rt2x00dev))
976 rt2800pci_read_eeprom_efuse(rt2x00dev); 978 rt2800pci_read_eeprom_efuse(rt2x00dev);
977 else 979 else
978 rt2800pci_read_eeprom_pci(rt2x00dev); 980 rt2800pci_read_eeprom_pci(rt2x00dev);
979
980 return rt2800_validate_eeprom(rt2x00dev);
981}
982
983static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
984{
985 int retval;
986 u32 reg;
987
988 /*
989 * Allocate eeprom data.
990 */
991 retval = rt2800pci_validate_eeprom(rt2x00dev);
992 if (retval)
993 return retval;
994
995 retval = rt2800_init_eeprom(rt2x00dev);
996 if (retval)
997 return retval;
998
999 /*
1000 * Enable rfkill polling by setting GPIO direction of the
1001 * rfkill switch GPIO pin correctly.
1002 */
1003 rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
1004 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
1005 rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
1006
1007 /*
1008 * Initialize hw specifications.
1009 */
1010 retval = rt2800_probe_hw_mode(rt2x00dev);
1011 if (retval)
1012 return retval;
1013
1014 /*
1015 * This device has multiple filters for control frames
1016 * and has a separate filter for PS Poll frames.
1017 */
1018 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
1019 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
1020
1021 /*
1022 * This device has a pre tbtt interrupt and thus fetches
1023 * a new beacon directly prior to transmission.
1024 */
1025 __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
1026
1027 /*
1028 * This device requires firmware.
1029 */
1030 if (!rt2x00_is_soc(rt2x00dev))
1031 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
1032 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
1033 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
1034 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
1035 __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
1036 if (!modparam_nohwcrypt)
1037 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
1038 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
1039 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
1040
1041 /*
1042 * Set the rssi offset.
1043 */
1044 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
1045
1046 return 0;
1047} 981}
1048 982
1049static const struct ieee80211_ops rt2800pci_mac80211_ops = { 983static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1081,6 +1015,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
1081 .register_multiread = rt2x00pci_register_multiread, 1015 .register_multiread = rt2x00pci_register_multiread,
1082 .register_multiwrite = rt2x00pci_register_multiwrite, 1016 .register_multiwrite = rt2x00pci_register_multiwrite,
1083 .regbusy_read = rt2x00pci_regbusy_read, 1017 .regbusy_read = rt2x00pci_regbusy_read,
1018 .read_eeprom = rt2800pci_read_eeprom,
1019 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
1084 .drv_write_firmware = rt2800pci_write_firmware, 1020 .drv_write_firmware = rt2800pci_write_firmware,
1085 .drv_init_registers = rt2800pci_init_registers, 1021 .drv_init_registers = rt2800pci_init_registers,
1086 .drv_get_txwi = rt2800pci_get_txwi, 1022 .drv_get_txwi = rt2800pci_get_txwi,
@@ -1093,7 +1029,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1093 .tbtt_tasklet = rt2800pci_tbtt_tasklet, 1029 .tbtt_tasklet = rt2800pci_tbtt_tasklet,
1094 .rxdone_tasklet = rt2800pci_rxdone_tasklet, 1030 .rxdone_tasklet = rt2800pci_rxdone_tasklet,
1095 .autowake_tasklet = rt2800pci_autowake_tasklet, 1031 .autowake_tasklet = rt2800pci_autowake_tasklet,
1096 .probe_hw = rt2800pci_probe_hw, 1032 .probe_hw = rt2800_probe_hw,
1097 .get_firmware_name = rt2800pci_get_firmware_name, 1033 .get_firmware_name = rt2800pci_get_firmware_name,
1098 .check_firmware = rt2800_check_firmware, 1034 .check_firmware = rt2800_check_firmware,
1099 .load_firmware = rt2800_load_firmware, 1035 .load_firmware = rt2800_load_firmware,
@@ -1152,7 +1088,6 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
1152static const struct rt2x00_ops rt2800pci_ops = { 1088static const struct rt2x00_ops rt2800pci_ops = {
1153 .name = KBUILD_MODNAME, 1089 .name = KBUILD_MODNAME,
1154 .drv_data_size = sizeof(struct rt2800_drv_data), 1090 .drv_data_size = sizeof(struct rt2800_drv_data),
1155 .max_sta_intf = 1,
1156 .max_ap_intf = 8, 1091 .max_ap_intf = 8,
1157 .eeprom_size = EEPROM_SIZE, 1092 .eeprom_size = EEPROM_SIZE,
1158 .rf_size = RF_SIZE, 1093 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6b4226b71618..c9e9370eb789 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -49,6 +49,11 @@ static bool modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 51
52static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
53{
54 return modparam_nohwcrypt;
55}
56
52/* 57/*
53 * Queue handlers. 58 * Queue handlers.
54 */ 59 */
@@ -730,73 +735,27 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
730/* 735/*
731 * Device probe functions. 736 * Device probe functions.
732 */ 737 */
733static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) 738static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
734{ 739{
735 if (rt2800_efuse_detect(rt2x00dev)) 740 if (rt2800_efuse_detect(rt2x00dev))
736 rt2800_read_eeprom_efuse(rt2x00dev); 741 rt2800_read_eeprom_efuse(rt2x00dev);
737 else 742 else
738 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 743 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
739 EEPROM_SIZE); 744 EEPROM_SIZE);
740
741 return rt2800_validate_eeprom(rt2x00dev);
742} 745}
743 746
744static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) 747static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
745{ 748{
746 int retval; 749 int retval;
747 u32 reg;
748 750
749 /* 751 retval = rt2800_probe_hw(rt2x00dev);
750 * Allocate eeprom data.
751 */
752 retval = rt2800usb_validate_eeprom(rt2x00dev);
753 if (retval) 752 if (retval)
754 return retval; 753 return retval;
755 754
756 retval = rt2800_init_eeprom(rt2x00dev);
757 if (retval)
758 return retval;
759
760 /*
761 * Enable rfkill polling by setting GPIO direction of the
762 * rfkill switch GPIO pin correctly.
763 */
764 rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
765 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
766 rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
767
768 /*
769 * Initialize hw specifications.
770 */
771 retval = rt2800_probe_hw_mode(rt2x00dev);
772 if (retval)
773 return retval;
774
775 /*
776 * This device has multiple filters for control frames
777 * and has a separate filter for PS Poll frames.
778 */
779 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
780 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
781
782 /*
783 * This device requires firmware.
784 */
785 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
786 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
787 if (!modparam_nohwcrypt)
788 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
789 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
790 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
791 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
792 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
793
794 rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
795
796 /* 755 /*
797 * Set the rssi offset. 756 * Set txstatus timer function.
798 */ 757 */
799 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; 758 rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
800 759
801 /* 760 /*
802 * Overwrite TX done handler 761 * Overwrite TX done handler
@@ -842,6 +801,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
842 .register_multiread = rt2x00usb_register_multiread, 801 .register_multiread = rt2x00usb_register_multiread,
843 .register_multiwrite = rt2x00usb_register_multiwrite, 802 .register_multiwrite = rt2x00usb_register_multiwrite,
844 .regbusy_read = rt2x00usb_regbusy_read, 803 .regbusy_read = rt2x00usb_regbusy_read,
804 .read_eeprom = rt2800usb_read_eeprom,
805 .hwcrypt_disabled = rt2800usb_hwcrypt_disabled,
845 .drv_write_firmware = rt2800usb_write_firmware, 806 .drv_write_firmware = rt2800usb_write_firmware,
846 .drv_init_registers = rt2800usb_init_registers, 807 .drv_init_registers = rt2800usb_init_registers,
847 .drv_get_txwi = rt2800usb_get_txwi, 808 .drv_get_txwi = rt2800usb_get_txwi,
@@ -909,7 +870,6 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
909static const struct rt2x00_ops rt2800usb_ops = { 870static const struct rt2x00_ops rt2800usb_ops = {
910 .name = KBUILD_MODNAME, 871 .name = KBUILD_MODNAME,
911 .drv_data_size = sizeof(struct rt2800_drv_data), 872 .drv_data_size = sizeof(struct rt2800_drv_data),
912 .max_sta_intf = 1,
913 .max_ap_intf = 8, 873 .max_ap_intf = 8,
914 .eeprom_size = EEPROM_SIZE, 874 .eeprom_size = EEPROM_SIZE,
915 .rf_size = RF_SIZE, 875 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8afb546c2b2d..0751b35ef6dc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -188,6 +188,7 @@ struct rt2x00_chip {
188#define RT3071 0x3071 188#define RT3071 0x3071
189#define RT3090 0x3090 /* 2.4GHz PCIe */ 189#define RT3090 0x3090 /* 2.4GHz PCIe */
190#define RT3290 0x3290 190#define RT3290 0x3290
191#define RT3352 0x3352 /* WSOC */
191#define RT3390 0x3390 192#define RT3390 0x3390
192#define RT3572 0x3572 193#define RT3572 0x3572
193#define RT3593 0x3593 194#define RT3593 0x3593
@@ -655,7 +656,6 @@ struct rt2x00lib_ops {
655struct rt2x00_ops { 656struct rt2x00_ops {
656 const char *name; 657 const char *name;
657 const unsigned int drv_data_size; 658 const unsigned int drv_data_size;
658 const unsigned int max_sta_intf;
659 const unsigned int max_ap_intf; 659 const unsigned int max_ap_intf;
660 const unsigned int eeprom_size; 660 const unsigned int eeprom_size;
661 const unsigned int rf_size; 661 const unsigned int rf_size;
@@ -741,6 +741,14 @@ enum rt2x00_capability_flags {
741}; 741};
742 742
743/* 743/*
744 * Interface combinations
745 */
746enum {
747 IF_COMB_AP = 0,
748 NUM_IF_COMB,
749};
750
751/*
744 * rt2x00 device structure. 752 * rt2x00 device structure.
745 */ 753 */
746struct rt2x00_dev { 754struct rt2x00_dev {
@@ -867,6 +875,12 @@ struct rt2x00_dev {
867 unsigned int intf_beaconing; 875 unsigned int intf_beaconing;
868 876
869 /* 877 /*
878 * Interface combinations
879 */
880 struct ieee80211_iface_limit if_limits_ap;
881 struct ieee80211_iface_combination if_combinations[NUM_IF_COMB];
882
883 /*
870 * Link quality 884 * Link quality
871 */ 885 */
872 struct link link; 886 struct link link;
@@ -1287,7 +1301,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
1287/* 1301/*
1288 * mac80211 handlers. 1302 * mac80211 handlers.
1289 */ 1303 */
1290void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 1304void rt2x00mac_tx(struct ieee80211_hw *hw,
1305 struct ieee80211_tx_control *control,
1306 struct sk_buff *skb);
1291int rt2x00mac_start(struct ieee80211_hw *hw); 1307int rt2x00mac_start(struct ieee80211_hw *hw);
1292void rt2x00mac_stop(struct ieee80211_hw *hw); 1308void rt2x00mac_stop(struct ieee80211_hw *hw);
1293int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1309int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 3f07e36f462b..69097d1faeb6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
194 */ 194 */
195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
196 while (skb) { 196 while (skb) {
197 rt2x00mac_tx(rt2x00dev->hw, skb); 197 rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
199 } 199 }
200} 200}
@@ -1118,6 +1118,34 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1118 rt2x00dev->intf_associated = 0; 1118 rt2x00dev->intf_associated = 0;
1119} 1119}
1120 1120
1121static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1122{
1123 struct ieee80211_iface_limit *if_limit;
1124 struct ieee80211_iface_combination *if_combination;
1125
1126 /*
1127 * Build up AP interface limits structure.
1128 */
1129 if_limit = &rt2x00dev->if_limits_ap;
1130 if_limit->max = rt2x00dev->ops->max_ap_intf;
1131 if_limit->types = BIT(NL80211_IFTYPE_AP);
1132
1133 /*
1134 * Build up AP interface combinations structure.
1135 */
1136 if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
1137 if_combination->limits = if_limit;
1138 if_combination->n_limits = 1;
1139 if_combination->max_interfaces = if_limit->max;
1140 if_combination->num_different_channels = 1;
1141
1142 /*
1143 * Finally, specify the possible combinations to mac80211.
1144 */
1145 rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
1146 rt2x00dev->hw->wiphy->n_iface_combinations = 1;
1147}
1148
1121/* 1149/*
1122 * driver allocation handlers. 1150 * driver allocation handlers.
1123 */ 1151 */
@@ -1126,6 +1154,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1126 int retval = -ENOMEM; 1154 int retval = -ENOMEM;
1127 1155
1128 /* 1156 /*
1157 * Set possible interface combinations.
1158 */
1159 rt2x00lib_set_if_combinations(rt2x00dev);
1160
1161 /*
1129 * Allocate the driver data memory, if necessary. 1162 * Allocate the driver data memory, if necessary.
1130 */ 1163 */
1131 if (rt2x00dev->ops->drv_data_size > 0) { 1164 if (rt2x00dev->ops->drv_data_size > 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ff26c2159bf..98a9e48f8e4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
99 return retval; 99 return retval;
100} 100}
101 101
102void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 102void rt2x00mac_tx(struct ieee80211_hw *hw,
103 struct ieee80211_tx_control *control,
104 struct sk_buff *skb)
103{ 105{
104 struct rt2x00_dev *rt2x00dev = hw->priv; 106 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -212,46 +214,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
212 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 214 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
213 return -ENODEV; 215 return -ENODEV;
214 216
215 switch (vif->type) {
216 case NL80211_IFTYPE_AP:
217 /*
218 * We don't support mixed combinations of
219 * sta and ap interfaces.
220 */
221 if (rt2x00dev->intf_sta_count)
222 return -ENOBUFS;
223
224 /*
225 * Check if we exceeded the maximum amount
226 * of supported interfaces.
227 */
228 if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
229 return -ENOBUFS;
230
231 break;
232 case NL80211_IFTYPE_STATION:
233 case NL80211_IFTYPE_ADHOC:
234 case NL80211_IFTYPE_MESH_POINT:
235 case NL80211_IFTYPE_WDS:
236 /*
237 * We don't support mixed combinations of
238 * sta and ap interfaces.
239 */
240 if (rt2x00dev->intf_ap_count)
241 return -ENOBUFS;
242
243 /*
244 * Check if we exceeded the maximum amount
245 * of supported interfaces.
246 */
247 if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
248 return -ENOBUFS;
249
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 /* 217 /*
256 * Loop through all beacon queues to find a free 218 * Loop through all beacon queues to find a free
257 * entry. Since there are as much beacon entries 219 * entry. Since there are as much beacon entries
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f7e74a0a7759..e488b944a034 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
316 struct sk_buff *skb, 316 struct sk_buff *skb,
317 struct txentry_desc *txdesc, 317 struct txentry_desc *txdesc,
318 struct ieee80211_sta *sta,
318 const struct rt2x00_rate *hwrate) 319 const struct rt2x00_rate *hwrate)
319{ 320{
320 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 321 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
322 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 323 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
323 struct rt2x00_sta *sta_priv = NULL; 324 struct rt2x00_sta *sta_priv = NULL;
324 325
325 if (tx_info->control.sta) { 326 if (sta) {
326 txdesc->u.ht.mpdu_density = 327 txdesc->u.ht.mpdu_density =
327 tx_info->control.sta->ht_cap.ampdu_density; 328 sta->ht_cap.ampdu_density;
328 329
329 sta_priv = sta_to_rt2x00_sta(tx_info->control.sta); 330 sta_priv = sta_to_rt2x00_sta(sta);
330 txdesc->u.ht.wcid = sta_priv->wcid; 331 txdesc->u.ht.wcid = sta_priv->wcid;
331 } 332 }
332 333
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
341 * MIMO PS should be set to 1 for STA's using dynamic SM PS 342 * MIMO PS should be set to 1 for STA's using dynamic SM PS
342 * when using more then one tx stream (>MCS7). 343 * when using more then one tx stream (>MCS7).
343 */ 344 */
344 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && 345 if (sta && txdesc->u.ht.mcs > 7 &&
345 ((tx_info->control.sta->ht_cap.cap & 346 ((sta->ht_cap.cap &
346 IEEE80211_HT_CAP_SM_PS) >> 347 IEEE80211_HT_CAP_SM_PS) >>
347 IEEE80211_HT_CAP_SM_PS_SHIFT) == 348 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
348 WLAN_HT_CAP_SM_PS_DYNAMIC) 349 WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
409 410
410static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 411static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
411 struct sk_buff *skb, 412 struct sk_buff *skb,
412 struct txentry_desc *txdesc) 413 struct txentry_desc *txdesc,
414 struct ieee80211_sta *sta)
413{ 415{
414 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
503 505
504 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 506 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
505 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 507 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
506 hwrate); 508 sta, hwrate);
507 else 509 else
508 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 510 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
509 hwrate); 511 hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
595 * after that we are free to use the skb->cb array 597 * after that we are free to use the skb->cb array
596 * for our information. 598 * for our information.
597 */ 599 */
598 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc); 600 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
599 601
600 /* 602 /*
601 * All information is retrieved from the skb->cb array, 603 * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
740 * after that we are free to use the skb->cb array 742 * after that we are free to use the skb->cb array
741 * for our information. 743 * for our information.
742 */ 744 */
743 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc); 745 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
744 746
745 /* 747 /*
746 * Fill in skb descriptor 748 * Fill in skb descriptor
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b8ec96163922..d6582a2fa353 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
243 u32 reg; 243 u32 reg;
244 244
245 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 245 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
246 return rt2x00_get_field32(reg, MAC_CSR13_BIT5); 246 return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
247} 247}
248 248
249#ifdef CONFIG_RT2X00_LIB_LEDS 249#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -715,11 +715,11 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
715 715
716 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 716 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
717 717
718 rt2x00_set_field32(&reg, MAC_CSR13_BIT4, p1); 718 rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
719 rt2x00_set_field32(&reg, MAC_CSR13_BIT12, 0); 719 rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
720 720
721 rt2x00_set_field32(&reg, MAC_CSR13_BIT3, !p2); 721 rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
722 rt2x00_set_field32(&reg, MAC_CSR13_BIT11, 0); 722 rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
723 723
724 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 724 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
725} 725}
@@ -2855,7 +2855,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2855 * rfkill switch GPIO pin correctly. 2855 * rfkill switch GPIO pin correctly.
2856 */ 2856 */
2857 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 2857 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
2858 rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1); 2858 rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
2859 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 2859 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
2860 2860
2861 /* 2861 /*
@@ -3045,7 +3045,6 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
3045 3045
3046static const struct rt2x00_ops rt61pci_ops = { 3046static const struct rt2x00_ops rt61pci_ops = {
3047 .name = KBUILD_MODNAME, 3047 .name = KBUILD_MODNAME,
3048 .max_sta_intf = 1,
3049 .max_ap_intf = 4, 3048 .max_ap_intf = 4,
3050 .eeprom_size = EEPROM_SIZE, 3049 .eeprom_size = EEPROM_SIZE,
3051 .rf_size = RF_SIZE, 3050 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f3da5a56766..9bc6b6044e34 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -357,22 +357,22 @@ struct hw_pairwise_ta_entry {
357 357
358/* 358/*
359 * MAC_CSR13: GPIO. 359 * MAC_CSR13: GPIO.
360 * MAC_CSR13_VALx: GPIO value
361 * MAC_CSR13_DIRx: GPIO direction: 0 = output; 1 = input
360 */ 362 */
361#define MAC_CSR13 0x3034 363#define MAC_CSR13 0x3034
362#define MAC_CSR13_BIT0 FIELD32(0x00000001) 364#define MAC_CSR13_VAL0 FIELD32(0x00000001)
363#define MAC_CSR13_BIT1 FIELD32(0x00000002) 365#define MAC_CSR13_VAL1 FIELD32(0x00000002)
364#define MAC_CSR13_BIT2 FIELD32(0x00000004) 366#define MAC_CSR13_VAL2 FIELD32(0x00000004)
365#define MAC_CSR13_BIT3 FIELD32(0x00000008) 367#define MAC_CSR13_VAL3 FIELD32(0x00000008)
366#define MAC_CSR13_BIT4 FIELD32(0x00000010) 368#define MAC_CSR13_VAL4 FIELD32(0x00000010)
367#define MAC_CSR13_BIT5 FIELD32(0x00000020) 369#define MAC_CSR13_VAL5 FIELD32(0x00000020)
368#define MAC_CSR13_BIT6 FIELD32(0x00000040) 370#define MAC_CSR13_DIR0 FIELD32(0x00000100)
369#define MAC_CSR13_BIT7 FIELD32(0x00000080) 371#define MAC_CSR13_DIR1 FIELD32(0x00000200)
370#define MAC_CSR13_BIT8 FIELD32(0x00000100) 372#define MAC_CSR13_DIR2 FIELD32(0x00000400)
371#define MAC_CSR13_BIT9 FIELD32(0x00000200) 373#define MAC_CSR13_DIR3 FIELD32(0x00000800)
372#define MAC_CSR13_BIT10 FIELD32(0x00000400) 374#define MAC_CSR13_DIR4 FIELD32(0x00001000)
373#define MAC_CSR13_BIT11 FIELD32(0x00000800) 375#define MAC_CSR13_DIR5 FIELD32(0x00002000)
374#define MAC_CSR13_BIT12 FIELD32(0x00001000)
375#define MAC_CSR13_BIT13 FIELD32(0x00002000)
376 376
377/* 377/*
378 * MAC_CSR14: LED control register. 378 * MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 248436c13ce0..e5eb43b3eee7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -189,7 +189,7 @@ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
189 u32 reg; 189 u32 reg;
190 190
191 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg); 191 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
192 return rt2x00_get_field32(reg, MAC_CSR13_BIT7); 192 return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
193} 193}
194 194
195#ifdef CONFIG_RT2X00_LIB_LEDS 195#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2195,7 +2195,7 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2195 * rfkill switch GPIO pin correctly. 2195 * rfkill switch GPIO pin correctly.
2196 */ 2196 */
2197 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg); 2197 rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
2198 rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0); 2198 rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0);
2199 rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); 2199 rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
2200 2200
2201 /* 2201 /*
@@ -2382,7 +2382,6 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
2382 2382
2383static const struct rt2x00_ops rt73usb_ops = { 2383static const struct rt2x00_ops rt73usb_ops = {
2384 .name = KBUILD_MODNAME, 2384 .name = KBUILD_MODNAME,
2385 .max_sta_intf = 1,
2386 .max_ap_intf = 4, 2385 .max_ap_intf = 4,
2387 .eeprom_size = EEPROM_SIZE, 2386 .eeprom_size = EEPROM_SIZE,
2388 .rf_size = RF_SIZE, 2387 .rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index df1cc116b83b..7577e0ba3877 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -267,24 +267,26 @@ struct hw_pairwise_ta_entry {
267 267
268/* 268/*
269 * MAC_CSR13: GPIO. 269 * MAC_CSR13: GPIO.
270 * MAC_CSR13_VALx: GPIO value
271 * MAC_CSR13_DIRx: GPIO direction: 0 = input; 1 = output
270 */ 272 */
271#define MAC_CSR13 0x3034 273#define MAC_CSR13 0x3034
272#define MAC_CSR13_BIT0 FIELD32(0x00000001) 274#define MAC_CSR13_VAL0 FIELD32(0x00000001)
273#define MAC_CSR13_BIT1 FIELD32(0x00000002) 275#define MAC_CSR13_VAL1 FIELD32(0x00000002)
274#define MAC_CSR13_BIT2 FIELD32(0x00000004) 276#define MAC_CSR13_VAL2 FIELD32(0x00000004)
275#define MAC_CSR13_BIT3 FIELD32(0x00000008) 277#define MAC_CSR13_VAL3 FIELD32(0x00000008)
276#define MAC_CSR13_BIT4 FIELD32(0x00000010) 278#define MAC_CSR13_VAL4 FIELD32(0x00000010)
277#define MAC_CSR13_BIT5 FIELD32(0x00000020) 279#define MAC_CSR13_VAL5 FIELD32(0x00000020)
278#define MAC_CSR13_BIT6 FIELD32(0x00000040) 280#define MAC_CSR13_VAL6 FIELD32(0x00000040)
279#define MAC_CSR13_BIT7 FIELD32(0x00000080) 281#define MAC_CSR13_VAL7 FIELD32(0x00000080)
280#define MAC_CSR13_BIT8 FIELD32(0x00000100) 282#define MAC_CSR13_DIR0 FIELD32(0x00000100)
281#define MAC_CSR13_BIT9 FIELD32(0x00000200) 283#define MAC_CSR13_DIR1 FIELD32(0x00000200)
282#define MAC_CSR13_BIT10 FIELD32(0x00000400) 284#define MAC_CSR13_DIR2 FIELD32(0x00000400)
283#define MAC_CSR13_BIT11 FIELD32(0x00000800) 285#define MAC_CSR13_DIR3 FIELD32(0x00000800)
284#define MAC_CSR13_BIT12 FIELD32(0x00001000) 286#define MAC_CSR13_DIR4 FIELD32(0x00001000)
285#define MAC_CSR13_BIT13 FIELD32(0x00002000) 287#define MAC_CSR13_DIR5 FIELD32(0x00002000)
286#define MAC_CSR13_BIT14 FIELD32(0x00004000) 288#define MAC_CSR13_DIR6 FIELD32(0x00004000)
287#define MAC_CSR13_BIT15 FIELD32(0x00008000) 289#define MAC_CSR13_DIR7 FIELD32(0x00008000)
288 290
289/* 291/*
290 * MAC_CSR14: LED control register. 292 * MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index aceaf689f737..021d83e1b1d3 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
244 return IRQ_HANDLED; 244 return IRQ_HANDLED;
245} 245}
246 246
247static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 247static void rtl8180_tx(struct ieee80211_hw *dev,
248 struct ieee80211_tx_control *control,
249 struct sk_buff *skb)
248{ 250{
249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
250 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
710 /* TODO: use actual beacon queue */ 712 /* TODO: use actual beacon queue */
711 skb_set_queue_mapping(skb, 0); 713 skb_set_queue_mapping(skb, 0);
712 714
713 rtl8180_tx(dev, skb); 715 rtl8180_tx(dev, NULL, skb);
714 716
715resched: 717resched:
716 /* 718 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 533024095c43..7811b6315973 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
228 } 228 }
229} 229}
230 230
231static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 231static void rtl8187_tx(struct ieee80211_hw *dev,
232 struct ieee80211_tx_control *control,
233 struct sk_buff *skb)
232{ 234{
233 struct rtl8187_priv *priv = dev->priv; 235 struct rtl8187_priv *priv = dev->priv;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 236 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
1076 /* TODO: use actual beacon queue */ 1078 /* TODO: use actual beacon queue */
1077 skb_set_queue_mapping(skb, 0); 1079 skb_set_queue_mapping(skb, 0);
1078 1080
1079 rtl8187_tx(dev, skb); 1081 rtl8187_tx(dev, NULL, skb);
1080 1082
1081resched: 1083resched:
1082 /* 1084 /*
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index cefac6a43601..6b28e92d1d21 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192CE 1config RTL8192CE
2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" 2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
3 depends on MAC80211 && PCI && EXPERIMENTAL 3 depends on MAC80211 && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select RTLWIFI 5 select RTLWIFI
6 select RTL8192C_COMMON 6 select RTL8192C_COMMON
@@ -12,7 +12,7 @@ config RTL8192CE
12 12
13config RTL8192SE 13config RTL8192SE
14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" 14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
15 depends on MAC80211 && EXPERIMENTAL && PCI 15 depends on MAC80211 && PCI
16 select FW_LOADER 16 select FW_LOADER
17 select RTLWIFI 17 select RTLWIFI
18 ---help--- 18 ---help---
@@ -23,7 +23,7 @@ config RTL8192SE
23 23
24config RTL8192DE 24config RTL8192DE
25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" 25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
26 depends on MAC80211 && EXPERIMENTAL && PCI 26 depends on MAC80211 && PCI
27 select FW_LOADER 27 select FW_LOADER
28 select RTLWIFI 28 select RTLWIFI
29 ---help--- 29 ---help---
@@ -34,7 +34,7 @@ config RTL8192DE
34 34
35config RTL8192CU 35config RTL8192CU
36 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 36 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
37 depends on MAC80211 && USB && EXPERIMENTAL 37 depends on MAC80211 && USB
38 select FW_LOADER 38 select FW_LOADER
39 select RTLWIFI 39 select RTLWIFI
40 select RTL8192C_COMMON 40 select RTL8192C_COMMON
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 942e56b77b60..59381fe8ed06 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
1342 1342
1343 info->control.rates[0].idx = 0; 1343 info->control.rates[0].idx = 0;
1344 info->control.sta = sta;
1345 info->band = hw->conf.channel->band; 1344 info->band = hw->conf.channel->band;
1346 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 1345 rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
1347 } 1346 }
1348err_free: 1347err_free:
1349 return 0; 1348 return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a18ad2a98938..a7c0e52869ba 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
124 mutex_unlock(&rtlpriv->locks.conf_mutex); 124 mutex_unlock(&rtlpriv->locks.conf_mutex);
125} 125}
126 126
127static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 127static void rtl_op_tx(struct ieee80211_hw *hw,
128 struct ieee80211_tx_control *control,
129 struct sk_buff *skb)
128{ 130{
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 131 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 132 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
138 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) 140 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
139 goto err_free; 141 goto err_free;
140 142
141 if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) 143 if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
142 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 144 rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
143 145
144 return; 146 return;
145 147
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5983631a1b1a..abc306b502ac 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -502,7 +502,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
502 _rtl_update_earlymode_info(hw, skb, 502 _rtl_update_earlymode_info(hw, skb,
503 &tcb_desc, tid); 503 &tcb_desc, tid);
504 504
505 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 505 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
506 } 506 }
507 } 507 }
508} 508}
@@ -927,7 +927,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
927 info = IEEE80211_SKB_CB(pskb); 927 info = IEEE80211_SKB_CB(pskb);
928 pdesc = &ring->desc[0]; 928 pdesc = &ring->desc[0];
929 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 929 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
930 info, pskb, BEACON_QUEUE, &tcb_desc); 930 info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
931 931
932 __skb_queue_tail(&ring->queue, pskb); 932 __skb_queue_tail(&ring->queue, pskb);
933 933
@@ -1303,11 +1303,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1303} 1303}
1304 1304
1305static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, 1305static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1306 struct ieee80211_sta *sta,
1306 struct sk_buff *skb) 1307 struct sk_buff *skb)
1307{ 1308{
1308 struct rtl_priv *rtlpriv = rtl_priv(hw); 1309 struct rtl_priv *rtlpriv = rtl_priv(hw);
1309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1310 struct ieee80211_sta *sta = info->control.sta;
1311 struct rtl_sta_info *sta_entry = NULL; 1310 struct rtl_sta_info *sta_entry = NULL;
1312 u8 tid = rtl_get_tid(skb); 1311 u8 tid = rtl_get_tid(skb);
1313 1312
@@ -1335,13 +1334,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1335 return true; 1334 return true;
1336} 1335}
1337 1336
1338static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 1337static int rtl_pci_tx(struct ieee80211_hw *hw,
1339 struct rtl_tcb_desc *ptcb_desc) 1338 struct ieee80211_sta *sta,
1339 struct sk_buff *skb,
1340 struct rtl_tcb_desc *ptcb_desc)
1340{ 1341{
1341 struct rtl_priv *rtlpriv = rtl_priv(hw); 1342 struct rtl_priv *rtlpriv = rtl_priv(hw);
1342 struct rtl_sta_info *sta_entry = NULL; 1343 struct rtl_sta_info *sta_entry = NULL;
1343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1344 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1344 struct ieee80211_sta *sta = info->control.sta;
1345 struct rtl8192_tx_ring *ring; 1345 struct rtl8192_tx_ring *ring;
1346 struct rtl_tx_desc *pdesc; 1346 struct rtl_tx_desc *pdesc;
1347 u8 idx; 1347 u8 idx;
@@ -1416,7 +1416,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1416 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 1416 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1417 1417
1418 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, 1418 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1419 info, skb, hw_queue, ptcb_desc); 1419 info, sta, skb, hw_queue, ptcb_desc);
1420 1420
1421 __skb_queue_tail(&ring->queue, skb); 1421 __skb_queue_tail(&ring->queue, skb);
1422 1422
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a45afda8259c..1ca4e25c143b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -167,7 +167,7 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
168 dm_digtable->cur_igvalue = 0x20; 168 dm_digtable->cur_igvalue = 0x20;
169 dm_digtable->pre_igvalue = 0x0; 169 dm_digtable->pre_igvalue = 0x0;
170 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 170 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT; 171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -190,7 +190,7 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
190 long rssi_val_min = 0; 190 long rssi_val_min = 0;
191 191
192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) && 192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
193 (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) { 193 (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) 194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
195 rssi_val_min = 195 rssi_val_min =
196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > 196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -199,8 +199,8 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
200 else 200 else
201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
202 } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT || 202 } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
203 dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) { 203 dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
205 } else if (dm_digtable->curmultista_connectstate == 205 } else if (dm_digtable->curmultista_connectstate ==
206 DIG_MULTISTA_CONNECT) { 206 DIG_MULTISTA_CONNECT) {
@@ -334,7 +334,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
334 multi_sta = true; 334 multi_sta = true;
335 335
336 if (!multi_sta || 336 if (!multi_sta ||
337 dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { 337 dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
338 initialized = false; 338 initialized = false;
339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
340 return; 340 return;
@@ -378,15 +378,15 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
379 379
380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
381 "presta_connectstate = %x, cursta_connectctate = %x\n", 381 "presta_connectstate = %x, cursta_connectstate = %x\n",
382 dm_digtable->presta_connectstate, 382 dm_digtable->presta_connectstate,
383 dm_digtable->cursta_connectctate); 383 dm_digtable->cursta_connectstate);
384 384
385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate 385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
386 || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT 386 || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
387 || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { 387 || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
388 388
389 if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) { 389 if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
390 dm_digtable->rssi_val_min = 390 dm_digtable->rssi_val_min =
391 rtl92c_dm_initial_gain_min_pwdb(hw); 391 rtl92c_dm_initial_gain_min_pwdb(hw);
392 rtl92c_dm_ctrl_initgain_by_rssi(hw); 392 rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -407,7 +407,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
409 409
410 if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) { 410 if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); 411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
412 412
413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +484,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
484 return; 484 return;
485 485
486 if (mac->link_state >= MAC80211_LINKED) 486 if (mac->link_state >= MAC80211_LINKED)
487 dm_digtable->cursta_connectctate = DIG_STA_CONNECT; 487 dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
488 else 488 else
489 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 489 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
490 490
491 rtl92c_dm_initial_gain_sta(hw); 491 rtl92c_dm_initial_gain_sta(hw);
492 rtl92c_dm_initial_gain_multi_sta(hw); 492 rtl92c_dm_initial_gain_multi_sta(hw);
493 rtl92c_dm_cck_packet_detection_thresh(hw); 493 rtl92c_dm_cck_packet_detection_thresh(hw);
494 494
495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate; 495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
496 496
497} 497}
498 498
@@ -1214,18 +1214,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1214 "PreState = %d, CurState = %d\n", 1214 "PreState = %d, CurState = %d\n",
1215 p_ra->pre_ratr_state, p_ra->ratr_state); 1215 p_ra->pre_ratr_state, p_ra->ratr_state);
1216 1216
1217 /* Only the PCI card uses sta in the update rate table 1217 rcu_read_lock();
1218 * callback routine */ 1218 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1219 if (rtlhal->interface == INTF_PCI) {
1220 rcu_read_lock();
1221 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1222 }
1223 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 1219 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1224 p_ra->ratr_state); 1220 p_ra->ratr_state);
1225 1221
1226 p_ra->pre_ratr_state = p_ra->ratr_state; 1222 p_ra->pre_ratr_state = p_ra->ratr_state;
1227 if (rtlhal->interface == INTF_PCI) 1223 rcu_read_unlock();
1228 rcu_read_unlock();
1229 } 1224 }
1230 } 1225 }
1231} 1226}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 8a7b864faca3..883f23ae9519 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -577,8 +577,7 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
577 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 577 ring = &rtlpci->tx_ring[BEACON_QUEUE];
578 578
579 pskb = __skb_dequeue(&ring->queue); 579 pskb = __skb_dequeue(&ring->queue);
580 if (pskb) 580 kfree_skb(pskb);
581 kfree_skb(pskb);
582 581
583 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 582 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
584 583
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index dd4bb0950a57..86d73b32d995 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1914,8 +1914,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1914 } 1914 }
1915 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1915 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1916 "ratr_bitmap :%x\n", ratr_bitmap); 1916 "ratr_bitmap :%x\n", ratr_bitmap);
1917 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | 1917 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
1918 (ratr_index << 28)); 1918 (ratr_index << 28);
1919 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 1919 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1920 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1920 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1921 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", 1921 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 7d8f96405f42..ea2e1bd847c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
344 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 344 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
345}; 345};
346 346
347DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { 347static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
348 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, 348 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
349 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, 349 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
350 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, 350 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52166640f167..390d6d4fcaa0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
596 596
597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
598 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 598 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
599 struct ieee80211_tx_info *info, struct sk_buff *skb, 599 struct ieee80211_tx_info *info,
600 struct ieee80211_sta *sta,
601 struct sk_buff *skb,
600 u8 hw_queue, struct rtl_tcb_desc *tcb_desc) 602 u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
601{ 603{
602 struct rtl_priv *rtlpriv = rtl_priv(hw); 604 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
604 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 606 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 607 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
606 bool defaultadapter = true; 608 bool defaultadapter = true;
607 struct ieee80211_sta *sta;
608 u8 *pdesc = pdesc_tx; 609 u8 *pdesc = pdesc_tx;
609 u16 seq_number; 610 u16 seq_number;
610 __le16 fc = hdr->frame_control; 611 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c4adb9777365..a7cdd514cb2e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -713,6 +713,7 @@ struct rx_desc_92c {
713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
714 struct ieee80211_hdr *hdr, 714 struct ieee80211_hdr *hdr,
715 u8 *pdesc, struct ieee80211_tx_info *info, 715 u8 *pdesc, struct ieee80211_tx_info *info,
716 struct ieee80211_sta *sta,
716 struct sk_buff *skb, u8 hw_queue, 717 struct sk_buff *skb, u8 hw_queue,
717 struct rtl_tcb_desc *ptcb_desc); 718 struct rtl_tcb_desc *ptcb_desc);
718bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, 719bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 2e6eb356a93e..6e66f04c363f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,12 +491,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); 491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
492 for (index = 0; index < 16; index++) 492 for (index = 0; index < 16; index++)
493 checksum = checksum ^ (*(ptr + index)); 493 checksum = checksum ^ (*(ptr + index));
494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum)); 494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
495} 495}
496 496
497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
498 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 498 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
499 struct ieee80211_tx_info *info, struct sk_buff *skb, 499 struct ieee80211_tx_info *info,
500 struct ieee80211_sta *sta,
501 struct sk_buff *skb,
500 u8 queue_index, 502 u8 queue_index,
501 struct rtl_tcb_desc *tcb_desc) 503 struct rtl_tcb_desc *tcb_desc)
502{ 504{
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 506 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 507 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true; 508 bool defaultadapter = true;
507 struct ieee80211_sta *sta = info->control.sta = info->control.sta;
508 u8 *qc = ieee80211_get_qos_ctl(hdr); 509 u8 *qc = ieee80211_get_qos_ctl(hdr);
509 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
510 u16 seq_number; 511 u16 seq_number;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 332b06e78b00..725c53accc58 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
420 struct sk_buff_head *); 420 struct sk_buff_head *);
421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
422 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 422 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
423 struct ieee80211_tx_info *info, struct sk_buff *skb, 423 struct ieee80211_tx_info *info,
424 struct ieee80211_sta *sta,
425 struct sk_buff *skb,
424 u8 queue_index, 426 u8 queue_index,
425 struct rtl_tcb_desc *tcb_desc); 427 struct rtl_tcb_desc *tcb_desc);
426void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, 428void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index c0201ed69dd7..ed868c396c25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -164,7 +164,7 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
165 de_digtable->cur_igvalue = 0x20; 165 de_digtable->cur_igvalue = 0x20;
166 de_digtable->pre_igvalue = 0x0; 166 de_digtable->pre_igvalue = 0x0;
167 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 167 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT; 168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -310,7 +310,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
310 struct dig_t *de_digtable = &rtlpriv->dm_digtable; 310 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
311 unsigned long flag = 0; 311 unsigned long flag = 0;
312 312
313 if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) { 313 if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25) 315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
316 de_digtable->cur_cck_pd_state = 316 de_digtable->cur_cck_pd_state =
@@ -342,7 +342,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; 342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
343 } 343 }
344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", 344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
345 de_digtable->cursta_connectctate == DIG_STA_CONNECT ? 345 de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); 346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", 347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? 348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -428,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); 428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
429 /* Decide the current status and if modify initial gain or not */ 429 /* Decide the current status and if modify initial gain or not */
430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) 430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
431 de_digtable->cursta_connectctate = DIG_STA_CONNECT; 431 de_digtable->cursta_connectstate = DIG_STA_CONNECT;
432 else 432 else
433 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT; 433 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
434 434
435 /* adjust initial gain according to false alarm counter */ 435 /* adjust initial gain according to false alarm counter */
436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) 436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index eb22dccc418b..23177076b97f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -570,8 +570,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
570 570
571 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 571 ring = &rtlpci->tx_ring[BEACON_QUEUE];
572 pskb = __skb_dequeue(&ring->queue); 572 pskb = __skb_dequeue(&ring->queue);
573 if (pskb) 573 kfree_skb(pskb);
574 kfree_skb(pskb);
575 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 574 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
576 pdesc = &ring->desc[idx]; 575 pdesc = &ring->desc[idx];
577 /* discard output from call below */ 576 /* discard output from call below */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 442031256bce..db0086062d05 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1314,7 +1314,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
1314 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; 1314 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
1315 1315
1316 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n"); 1316 RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
1317 /*----Restore RFENV control type----*/ ; 1317 /*----Restore RFENV control type----*/
1318 switch (rfpath) { 1318 switch (rfpath) {
1319 case RF90_PATH_A: 1319 case RF90_PATH_A:
1320 case RF90_PATH_C: 1320 case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f80690d82c11..4686f340b9d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
551 551
552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
553 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 553 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
554 struct ieee80211_tx_info *info, struct sk_buff *skb, 554 struct ieee80211_tx_info *info,
555 struct ieee80211_sta *sta,
556 struct sk_buff *skb,
555 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 557 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
556{ 558{
557 struct rtl_priv *rtlpriv = rtl_priv(hw); 559 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
559 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 561 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 562 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 563 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
562 struct ieee80211_sta *sta = info->control.sta;
563 u8 *pdesc = pdesc_tx; 564 u8 *pdesc = pdesc_tx;
564 u16 seq_number; 565 u16 seq_number;
565 __le16 fc = hdr->frame_control; 566 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 057a52431b00..c1b5dfb79d53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -730,6 +730,7 @@ struct rx_desc_92d {
730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
731 struct ieee80211_hdr *hdr, 731 struct ieee80211_hdr *hdr,
732 u8 *pdesc, struct ieee80211_tx_info *info, 732 u8 *pdesc, struct ieee80211_tx_info *info,
733 struct ieee80211_sta *sta,
733 struct sk_buff *skb, u8 hw_queue, 734 struct sk_buff *skb, u8 hw_queue,
734 struct rtl_tcb_desc *ptcb_desc); 735 struct rtl_tcb_desc *ptcb_desc);
735bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, 736bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36d1cb3aef8a..e3cf4c02122a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
591 591
592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, 592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
593 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 593 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
594 struct ieee80211_tx_info *info, struct sk_buff *skb, 594 struct ieee80211_tx_info *info,
595 struct ieee80211_sta *sta,
596 struct sk_buff *skb,
595 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 597 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
596{ 598{
597 struct rtl_priv *rtlpriv = rtl_priv(hw); 599 struct rtl_priv *rtlpriv = rtl_priv(hw);
598 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 600 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 601 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 602 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
601 struct ieee80211_sta *sta = info->control.sta;
602 u8 *pdesc = pdesc_tx; 603 u8 *pdesc = pdesc_tx;
603 u16 seq_number; 604 u16 seq_number;
604 __le16 fc = hdr->frame_control; 605 __le16 fc = hdr->frame_control;
@@ -755,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
755 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); 756 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
756 757
757 /* DOWRD 8 */ 758 /* DOWRD 8 */
758 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 759 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
759 760
760 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); 761 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
761} 762}
@@ -785,7 +786,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
785 /* 92SE need not to set TX packet size when firmware download */ 786 /* 92SE need not to set TX packet size when firmware download */
786 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len)); 787 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
787 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); 788 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
788 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 789 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
789 790
790 wmb(); 791 wmb();
791 SET_TX_DESC_OWN(pdesc, 1); 792 SET_TX_DESC_OWN(pdesc, 1);
@@ -804,7 +805,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
804 SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq); 805 SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
805 806
806 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); 807 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
807 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 808 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
808 809
809 wmb(); 810 wmb();
810 SET_TX_DESC_OWN(pdesc, 1); 811 SET_TX_DESC_OWN(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 011e7b0695f2..64dd66f287c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -31,6 +31,7 @@
31 31
32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, 32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
33 u8 *pdesc, struct ieee80211_tx_info *info, 33 u8 *pdesc, struct ieee80211_tx_info *info,
34 struct ieee80211_sta *sta,
34 struct sk_buff *skb, u8 hw_queue, 35 struct sk_buff *skb, u8 hw_queue,
35 struct rtl_tcb_desc *ptcb_desc); 36 struct rtl_tcb_desc *ptcb_desc);
36void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, 37void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index aa970fc18a21..030beb45d8b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -120,7 +120,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
120 120
121 if (status < 0 && count++ < 4) 121 if (status < 0 && count++ < 4)
122 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", 122 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
123 value, status, le32_to_cpu(*(u32 *)pdata)); 123 value, status, *(u32 *)pdata);
124 return status; 124 return status;
125} 125}
126 126
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
848 _rtl_submit_tx_urb(hw, _urb); 848 _rtl_submit_tx_urb(hw, _urb);
849} 849}
850 850
851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, 851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
852 u16 hw_queue) 852 struct ieee80211_sta *sta,
853 struct sk_buff *skb,
854 u16 hw_queue)
853{ 855{
854 struct rtl_priv *rtlpriv = rtl_priv(hw); 856 struct rtl_priv *rtlpriv = rtl_priv(hw);
855 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 857 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
891 seq_number += 1; 893 seq_number += 1;
892 seq_number <<= 4; 894 seq_number <<= 4;
893 } 895 }
894 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, 896 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
895 hw_queue, &tcb_desc); 897 hw_queue, &tcb_desc);
896 if (!ieee80211_has_morefrags(hdr->frame_control)) { 898 if (!ieee80211_has_morefrags(hdr->frame_control)) {
897 if (qc) 899 if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
901 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 903 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
902} 904}
903 905
904static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 906static int rtl_usb_tx(struct ieee80211_hw *hw,
907 struct ieee80211_sta *sta,
908 struct sk_buff *skb,
905 struct rtl_tcb_desc *dummy) 909 struct rtl_tcb_desc *dummy)
906{ 910{
907 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); 911 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
913 if (unlikely(is_hal_stop(rtlhal))) 917 if (unlikely(is_hal_stop(rtlhal)))
914 goto err_free; 918 goto err_free;
915 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); 919 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
916 _rtl_usb_tx_preprocess(hw, skb, hw_queue); 920 _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
917 _rtl_usb_transmit(hw, skb, hw_queue); 921 _rtl_usb_transmit(hw, skb, hw_queue);
918 return NETDEV_TX_OK; 922 return NETDEV_TX_OK;
919 923
@@ -923,6 +927,7 @@ err_free:
923} 927}
924 928
925static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, 929static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
930 struct ieee80211_sta *sta,
926 struct sk_buff *skb) 931 struct sk_buff *skb)
927{ 932{
928 return false; 933 return false;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaa21f29710..f1b6bc693b0a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
122 EEPROM_BOOT_EFUSE, 122 EEPROM_BOOT_EFUSE,
123}; 123};
124 124
125enum rtl_status { 125enum ttl_status {
126 RTL_STATUS_INTERFACE_START = 0, 126 RTL_STATUS_INTERFACE_START = 0,
127}; 127};
128 128
@@ -135,7 +135,7 @@ enum hardware_type {
135 HARDWARE_TYPE_RTL8192CU, 135 HARDWARE_TYPE_RTL8192CU,
136 HARDWARE_TYPE_RTL8192DE, 136 HARDWARE_TYPE_RTL8192DE,
137 HARDWARE_TYPE_RTL8192DU, 137 HARDWARE_TYPE_RTL8192DU,
138 HARDWARE_TYPE_RTL8723E, 138 HARDWARE_TYPE_RTL8723AE,
139 HARDWARE_TYPE_RTL8723U, 139 HARDWARE_TYPE_RTL8723U,
140 140
141 /* keep it last */ 141 /* keep it last */
@@ -389,6 +389,7 @@ enum rt_enc_alg {
389 RSERVED_ENCRYPTION = 3, 389 RSERVED_ENCRYPTION = 3,
390 AESCCMP_ENCRYPTION = 4, 390 AESCCMP_ENCRYPTION = 4,
391 WEP104_ENCRYPTION = 5, 391 WEP104_ENCRYPTION = 5,
392 AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
392}; 393};
393 394
394enum rtl_hal_state { 395enum rtl_hal_state {
@@ -873,6 +874,7 @@ struct rtl_phy {
873 u32 adda_backup[16]; 874 u32 adda_backup[16];
874 u32 iqk_mac_backup[IQK_MAC_REG_NUM]; 875 u32 iqk_mac_backup[IQK_MAC_REG_NUM];
875 u32 iqk_bb_backup[10]; 876 u32 iqk_bb_backup[10];
877 bool iqk_initialized;
876 878
877 /* Dual mac */ 879 /* Dual mac */
878 bool need_iqk; 880 bool need_iqk;
@@ -910,6 +912,8 @@ struct rtl_phy {
910#define RTL_AGG_OPERATIONAL 3 912#define RTL_AGG_OPERATIONAL 3
911#define RTL_AGG_OFF 0 913#define RTL_AGG_OFF 0
912#define RTL_AGG_ON 1 914#define RTL_AGG_ON 1
915#define RTL_RX_AGG_START 1
916#define RTL_RX_AGG_STOP 0
913#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2 917#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
914#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3 918#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
915 919
@@ -920,6 +924,7 @@ struct rtl_ht_agg {
920 u64 bitmap; 924 u64 bitmap;
921 u32 rate_n_flags; 925 u32 rate_n_flags;
922 u8 agg_state; 926 u8 agg_state;
927 u8 rx_agg_state;
923}; 928};
924 929
925struct rtl_tid_data { 930struct rtl_tid_data {
@@ -927,11 +932,19 @@ struct rtl_tid_data {
927 struct rtl_ht_agg agg; 932 struct rtl_ht_agg agg;
928}; 933};
929 934
935struct rssi_sta {
936 long undecorated_smoothed_pwdb;
937};
938
930struct rtl_sta_info { 939struct rtl_sta_info {
940 struct list_head list;
931 u8 ratr_index; 941 u8 ratr_index;
932 u8 wireless_mode; 942 u8 wireless_mode;
933 u8 mimo_ps; 943 u8 mimo_ps;
934 struct rtl_tid_data tids[MAX_TID_COUNT]; 944 struct rtl_tid_data tids[MAX_TID_COUNT];
945
946 /* just used for ap adhoc or mesh*/
947 struct rssi_sta rssi_stat;
935} __packed; 948} __packed;
936 949
937struct rtl_priv; 950struct rtl_priv;
@@ -1034,6 +1047,11 @@ struct rtl_mac {
1034struct rtl_hal { 1047struct rtl_hal {
1035 struct ieee80211_hw *hw; 1048 struct ieee80211_hw *hw;
1036 1049
1050 bool up_first_time;
1051 bool first_init;
1052 bool being_init_adapter;
1053 bool bbrf_ready;
1054
1037 enum intf_type interface; 1055 enum intf_type interface;
1038 u16 hw_type; /*92c or 92d or 92s and so on */ 1056 u16 hw_type; /*92c or 92d or 92s and so on */
1039 u8 ic_class; 1057 u8 ic_class;
@@ -1048,6 +1066,7 @@ struct rtl_hal {
1048 u16 fw_subversion; 1066 u16 fw_subversion;
1049 bool h2c_setinprogress; 1067 bool h2c_setinprogress;
1050 u8 last_hmeboxnum; 1068 u8 last_hmeboxnum;
1069 bool fw_ready;
1051 /*Reserve page start offset except beacon in TxQ. */ 1070 /*Reserve page start offset except beacon in TxQ. */
1052 u8 fw_rsvdpage_startoffset; 1071 u8 fw_rsvdpage_startoffset;
1053 u8 h2c_txcmd_seq; 1072 u8 h2c_txcmd_seq;
@@ -1083,6 +1102,8 @@ struct rtl_hal {
1083 bool load_imrandiqk_setting_for2g; 1102 bool load_imrandiqk_setting_for2g;
1084 1103
1085 bool disable_amsdu_8k; 1104 bool disable_amsdu_8k;
1105 bool master_of_dmsp;
1106 bool slave_of_dmsp;
1086}; 1107};
1087 1108
1088struct rtl_security { 1109struct rtl_security {
@@ -1144,6 +1165,9 @@ struct rtl_dm {
1144 bool disable_tx_int; 1165 bool disable_tx_int;
1145 char ofdm_index[2]; 1166 char ofdm_index[2];
1146 char cck_index; 1167 char cck_index;
1168
1169 /* DMSP */
1170 bool supp_phymode_switch;
1147}; 1171};
1148 1172
1149#define EFUSE_MAX_LOGICAL_SIZE 256 1173#define EFUSE_MAX_LOGICAL_SIZE 256
@@ -1337,6 +1361,10 @@ struct rtl_stats {
1337}; 1361};
1338 1362
1339struct rt_link_detect { 1363struct rt_link_detect {
1364 /* count for roaming */
1365 u32 bcn_rx_inperiod;
1366 u32 roam_times;
1367
1340 u32 num_tx_in4period[4]; 1368 u32 num_tx_in4period[4];
1341 u32 num_rx_in4period[4]; 1369 u32 num_rx_in4period[4];
1342 1370
@@ -1344,6 +1372,8 @@ struct rt_link_detect {
1344 u32 num_rx_inperiod; 1372 u32 num_rx_inperiod;
1345 1373
1346 bool busytraffic; 1374 bool busytraffic;
1375 bool tx_busy_traffic;
1376 bool rx_busy_traffic;
1347 bool higher_busytraffic; 1377 bool higher_busytraffic;
1348 bool higher_busyrxtraffic; 1378 bool higher_busyrxtraffic;
1349 1379
@@ -1418,6 +1448,7 @@ struct rtl_hal_ops {
1418 void (*fill_tx_desc) (struct ieee80211_hw *hw, 1448 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1419 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1449 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1420 struct ieee80211_tx_info *info, 1450 struct ieee80211_tx_info *info,
1451 struct ieee80211_sta *sta,
1421 struct sk_buff *skb, u8 hw_queue, 1452 struct sk_buff *skb, u8 hw_queue,
1422 struct rtl_tcb_desc *ptcb_desc); 1453 struct rtl_tcb_desc *ptcb_desc);
1423 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc, 1454 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1454,7 +1485,12 @@ struct rtl_hal_ops {
1454 u32 regaddr, u32 bitmask); 1485 u32 regaddr, u32 bitmask);
1455 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1486 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1456 u32 regaddr, u32 bitmask, u32 data); 1487 u32 regaddr, u32 bitmask, u32 data);
1488 void (*allow_all_destaddr)(struct ieee80211_hw *hw,
1489 bool allow_all_da, bool write_into_reg);
1457 void (*linked_set_reg) (struct ieee80211_hw *hw); 1490 void (*linked_set_reg) (struct ieee80211_hw *hw);
1491 void (*check_switch_to_dmdp) (struct ieee80211_hw *hw);
1492 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
1493 void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
1458 bool (*phy_rf6052_config) (struct ieee80211_hw *hw); 1494 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
1459 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, 1495 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
1460 u8 *powerlevel); 1496 u8 *powerlevel);
@@ -1474,12 +1510,18 @@ struct rtl_intf_ops {
1474 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); 1510 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
1475 int (*adapter_start) (struct ieee80211_hw *hw); 1511 int (*adapter_start) (struct ieee80211_hw *hw);
1476 void (*adapter_stop) (struct ieee80211_hw *hw); 1512 void (*adapter_stop) (struct ieee80211_hw *hw);
1513 bool (*check_buddy_priv)(struct ieee80211_hw *hw,
1514 struct rtl_priv **buddy_priv);
1477 1515
1478 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb, 1516 int (*adapter_tx) (struct ieee80211_hw *hw,
1479 struct rtl_tcb_desc *ptcb_desc); 1517 struct ieee80211_sta *sta,
1518 struct sk_buff *skb,
1519 struct rtl_tcb_desc *ptcb_desc);
1480 void (*flush)(struct ieee80211_hw *hw, bool drop); 1520 void (*flush)(struct ieee80211_hw *hw, bool drop);
1481 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1521 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1482 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); 1522 bool (*waitq_insert) (struct ieee80211_hw *hw,
1523 struct ieee80211_sta *sta,
1524 struct sk_buff *skb);
1483 1525
1484 /*pci */ 1526 /*pci */
1485 void (*disable_aspm) (struct ieee80211_hw *hw); 1527 void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1554,11 +1596,16 @@ struct rtl_locks {
1554 spinlock_t h2c_lock; 1596 spinlock_t h2c_lock;
1555 spinlock_t rf_ps_lock; 1597 spinlock_t rf_ps_lock;
1556 spinlock_t rf_lock; 1598 spinlock_t rf_lock;
1599 spinlock_t lps_lock;
1557 spinlock_t waitq_lock; 1600 spinlock_t waitq_lock;
1601 spinlock_t entry_list_lock;
1558 spinlock_t usb_lock; 1602 spinlock_t usb_lock;
1559 1603
1560 /*Dual mac*/ 1604 /*Dual mac*/
1561 spinlock_t cck_and_rw_pagea_lock; 1605 spinlock_t cck_and_rw_pagea_lock;
1606
1607 /*Easy concurrent*/
1608 spinlock_t check_sendpkt_lock;
1562}; 1609};
1563 1610
1564struct rtl_works { 1611struct rtl_works {
@@ -1566,6 +1613,7 @@ struct rtl_works {
1566 1613
1567 /*timer */ 1614 /*timer */
1568 struct timer_list watchdog_timer; 1615 struct timer_list watchdog_timer;
1616 struct timer_list dualmac_easyconcurrent_retrytimer;
1569 1617
1570 /*task */ 1618 /*task */
1571 struct tasklet_struct irq_tasklet; 1619 struct tasklet_struct irq_tasklet;
@@ -1593,6 +1641,31 @@ struct rtl_debug {
1593 char proc_name[20]; 1641 char proc_name[20];
1594}; 1642};
1595 1643
1644#define MIMO_PS_STATIC 0
1645#define MIMO_PS_DYNAMIC 1
1646#define MIMO_PS_NOLIMIT 3
1647
1648struct rtl_dualmac_easy_concurrent_ctl {
1649 enum band_type currentbandtype_backfordmdp;
1650 bool close_bbandrf_for_dmsp;
1651 bool change_to_dmdp;
1652 bool change_to_dmsp;
1653 bool switch_in_process;
1654};
1655
1656struct rtl_dmsp_ctl {
1657 bool activescan_for_slaveofdmsp;
1658 bool scan_for_anothermac_fordmsp;
1659 bool scan_for_itself_fordmsp;
1660 bool writedig_for_anothermacofdmsp;
1661 u32 curdigvalue_for_anothermacofdmsp;
1662 bool changecckpdstate_for_anothermacofdmsp;
1663 u8 curcckpdstate_for_anothermacofdmsp;
1664 bool changetxhighpowerlvl_for_anothermacofdmsp;
1665 u8 curtxhighlvl_for_anothermacofdmsp;
1666 long rssivalmin_for_anothermacofdmsp;
1667};
1668
1596struct ps_t { 1669struct ps_t {
1597 u8 pre_ccastate; 1670 u8 pre_ccastate;
1598 u8 cur_ccasate; 1671 u8 cur_ccasate;
@@ -1619,7 +1692,7 @@ struct dig_t {
1619 u8 dig_twoport_algorithm; 1692 u8 dig_twoport_algorithm;
1620 u8 dig_dbgmode; 1693 u8 dig_dbgmode;
1621 u8 dig_slgorithm_switch; 1694 u8 dig_slgorithm_switch;
1622 u8 cursta_connectctate; 1695 u8 cursta_connectstate;
1623 u8 presta_connectstate; 1696 u8 presta_connectstate;
1624 u8 curmultista_connectstate; 1697 u8 curmultista_connectstate;
1625 char backoff_val; 1698 char backoff_val;
@@ -1652,8 +1725,20 @@ struct dig_t {
1652 char backoffval_range_min; 1725 char backoffval_range_min;
1653}; 1726};
1654 1727
1728struct rtl_global_var {
1729 /* from this list we can get
1730 * other adapter's rtl_priv */
1731 struct list_head glb_priv_list;
1732 spinlock_t glb_list_lock;
1733};
1734
1655struct rtl_priv { 1735struct rtl_priv {
1656 struct completion firmware_loading_complete; 1736 struct completion firmware_loading_complete;
1737 struct list_head list;
1738 struct rtl_priv *buddy_priv;
1739 struct rtl_global_var *glb_var;
1740 struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
1741 struct rtl_dmsp_ctl dmsp_ctl;
1657 struct rtl_locks locks; 1742 struct rtl_locks locks;
1658 struct rtl_works works; 1743 struct rtl_works works;
1659 struct rtl_mac mac80211; 1744 struct rtl_mac mac80211;
@@ -1674,6 +1759,9 @@ struct rtl_priv {
1674 1759
1675 struct rtl_rate_priv *rate_priv; 1760 struct rtl_rate_priv *rate_priv;
1676 1761
1762 /* sta entry list for ap adhoc or mesh */
1763 struct list_head entry_list;
1764
1677 struct rtl_debug dbg; 1765 struct rtl_debug dbg;
1678 int max_fw_size; 1766 int max_fw_size;
1679 1767
@@ -1815,9 +1903,9 @@ struct bt_coexist_info {
1815 EF1BYTE(*((u8 *)(_ptr))) 1903 EF1BYTE(*((u8 *)(_ptr)))
1816/* Read le16 data from memory and convert to host ordering */ 1904/* Read le16 data from memory and convert to host ordering */
1817#define READEF2BYTE(_ptr) \ 1905#define READEF2BYTE(_ptr) \
1818 EF2BYTE(*((u16 *)(_ptr))) 1906 EF2BYTE(*(_ptr))
1819#define READEF4BYTE(_ptr) \ 1907#define READEF4BYTE(_ptr) \
1820 EF4BYTE(*((u32 *)(_ptr))) 1908 EF4BYTE(*(_ptr))
1821 1909
1822/* Write data to memory */ 1910/* Write data to memory */
1823#define WRITEEF1BYTE(_ptr, _val) \ 1911#define WRITEEF1BYTE(_ptr, _val) \
@@ -1826,7 +1914,7 @@ struct bt_coexist_info {
1826#define WRITEEF2BYTE(_ptr, _val) \ 1914#define WRITEEF2BYTE(_ptr, _val) \
1827 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1915 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1828#define WRITEEF4BYTE(_ptr, _val) \ 1916#define WRITEEF4BYTE(_ptr, _val) \
1829 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1917 (*((u32 *)(_ptr))) = EF2BYTE(_val)
1830 1918
1831/* Create a bit mask 1919/* Create a bit mask
1832 * Examples: 1920 * Examples:
@@ -1859,9 +1947,9 @@ struct bt_coexist_info {
1859 * 4-byte pointer in little-endian system. 1947 * 4-byte pointer in little-endian system.
1860 */ 1948 */
1861#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ 1949#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
1862 (EF4BYTE(*((u32 *)(__pstart)))) 1950 (EF4BYTE(*((__le32 *)(__pstart))))
1863#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ 1951#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
1864 (EF2BYTE(*((u16 *)(__pstart)))) 1952 (EF2BYTE(*((__le16 *)(__pstart))))
1865#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ 1953#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1866 (EF1BYTE(*((u8 *)(__pstart)))) 1954 (EF1BYTE(*((u8 *)(__pstart))))
1867 1955
@@ -1908,13 +1996,13 @@ value to host byte ordering.*/
1908 * Set subfield of little-endian 4-byte value to specified value. 1996 * Set subfield of little-endian 4-byte value to specified value.
1909 */ 1997 */
1910#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1998#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
1911 *((u32 *)(__pstart)) = EF4BYTE \ 1999 *((u32 *)(__pstart)) = \
1912 ( \ 2000 ( \
1913 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ 2001 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1914 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \ 2002 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1915 ); 2003 );
1916#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ 2004#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1917 *((u16 *)(__pstart)) = EF2BYTE \ 2005 *((u16 *)(__pstart)) = \
1918 ( \ 2006 ( \
1919 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ 2007 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1920 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \ 2008 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
@@ -2100,4 +2188,11 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
2100 return ieee80211_find_sta(vif, bssid); 2188 return ieee80211_find_sta(vif, bssid);
2101} 2189}
2102 2190
2191static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
2192 u8 *mac_addr)
2193{
2194 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2195 return ieee80211_find_sta(mac->vif, mac_addr);
2196}
2197
2103#endif 2198#endif
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3118c425bcf1..441cbccbd381 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -354,7 +354,9 @@ out:
354 return ret; 354 return ret;
355} 355}
356 356
357static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 357static void wl1251_op_tx(struct ieee80211_hw *hw,
358 struct ieee80211_tx_control *control,
359 struct sk_buff *skb)
358{ 360{
359 struct wl1251 *wl = hw->priv; 361 struct wl1251 *wl = hw->priv;
360 unsigned long flags; 362 unsigned long flags;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index f429fc110cb0..dadf1dbb002a 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -32,7 +32,6 @@
32#include "../wlcore/acx.h" 32#include "../wlcore/acx.h"
33#include "../wlcore/tx.h" 33#include "../wlcore/tx.h"
34#include "../wlcore/rx.h" 34#include "../wlcore/rx.h"
35#include "../wlcore/io.h"
36#include "../wlcore/boot.h" 35#include "../wlcore/boot.h"
37 36
38#include "wl12xx.h" 37#include "wl12xx.h"
@@ -1185,9 +1184,16 @@ static int wl12xx_enable_interrupts(struct wl1271 *wl)
1185 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, 1184 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1186 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK)); 1185 WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
1187 if (ret < 0) 1186 if (ret < 0)
1188 goto out; 1187 goto disable_interrupts;
1189 1188
1190 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL); 1189 ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1190 if (ret < 0)
1191 goto disable_interrupts;
1192
1193 return ret;
1194
1195disable_interrupts:
1196 wlcore_disable_interrupts(wl);
1191 1197
1192out: 1198out:
1193 return ret; 1199 return ret;
@@ -1583,7 +1589,10 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1583 return wlcore_set_key(wl, cmd, vif, sta, key_conf); 1589 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1584} 1590}
1585 1591
1592static int wl12xx_setup(struct wl1271 *wl);
1593
1586static struct wlcore_ops wl12xx_ops = { 1594static struct wlcore_ops wl12xx_ops = {
1595 .setup = wl12xx_setup,
1587 .identify_chip = wl12xx_identify_chip, 1596 .identify_chip = wl12xx_identify_chip,
1588 .identify_fw = wl12xx_identify_fw, 1597 .identify_fw = wl12xx_identify_fw,
1589 .boot = wl12xx_boot, 1598 .boot = wl12xx_boot,
@@ -1624,26 +1633,15 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1624 }, 1633 },
1625}; 1634};
1626 1635
1627static int __devinit wl12xx_probe(struct platform_device *pdev) 1636static int wl12xx_setup(struct wl1271 *wl)
1628{ 1637{
1629 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 1638 struct wl12xx_priv *priv = wl->priv;
1630 struct wl1271 *wl; 1639 struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
1631 struct ieee80211_hw *hw;
1632 struct wl12xx_priv *priv;
1633
1634 hw = wlcore_alloc_hw(sizeof(*priv));
1635 if (IS_ERR(hw)) {
1636 wl1271_error("can't allocate hw");
1637 return PTR_ERR(hw);
1638 }
1639 1640
1640 wl = hw->priv;
1641 priv = wl->priv;
1642 wl->ops = &wl12xx_ops;
1643 wl->ptable = wl12xx_ptable;
1644 wl->rtable = wl12xx_rtable; 1641 wl->rtable = wl12xx_rtable;
1645 wl->num_tx_desc = 16; 1642 wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
1646 wl->num_rx_desc = 8; 1643 wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
1644 wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
1647 wl->band_rate_to_idx = wl12xx_band_rate_to_idx; 1645 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1648 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX; 1646 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1649 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0; 1647 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1695,7 +1693,36 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
1695 wl1271_error("Invalid tcxo parameter %s", tcxo_param); 1693 wl1271_error("Invalid tcxo parameter %s", tcxo_param);
1696 } 1694 }
1697 1695
1698 return wlcore_probe(wl, pdev); 1696 return 0;
1697}
1698
1699static int __devinit wl12xx_probe(struct platform_device *pdev)
1700{
1701 struct wl1271 *wl;
1702 struct ieee80211_hw *hw;
1703 int ret;
1704
1705 hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
1706 WL12XX_AGGR_BUFFER_SIZE);
1707 if (IS_ERR(hw)) {
1708 wl1271_error("can't allocate hw");
1709 ret = PTR_ERR(hw);
1710 goto out;
1711 }
1712
1713 wl = hw->priv;
1714 wl->ops = &wl12xx_ops;
1715 wl->ptable = wl12xx_ptable;
1716 ret = wlcore_probe(wl, pdev);
1717 if (ret)
1718 goto out_free;
1719
1720 return ret;
1721
1722out_free:
1723 wlcore_free_hw(wl);
1724out:
1725 return ret;
1699} 1726}
1700 1727
1701static const struct platform_device_id wl12xx_id_table[] __devinitconst = { 1728static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
@@ -1714,17 +1741,7 @@ static struct platform_driver wl12xx_driver = {
1714 } 1741 }
1715}; 1742};
1716 1743
1717static int __init wl12xx_init(void) 1744module_platform_driver(wl12xx_driver);
1718{
1719 return platform_driver_register(&wl12xx_driver);
1720}
1721module_init(wl12xx_init);
1722
1723static void __exit wl12xx_exit(void)
1724{
1725 platform_driver_unregister(&wl12xx_driver);
1726}
1727module_exit(wl12xx_exit);
1728 1745
1729module_param_named(fref, fref_param, charp, 0); 1746module_param_named(fref, fref_param, charp, 0);
1730MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52"); 1747MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 26990fb4edea..7182bbf6625d 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
38#define WL128X_SUBTYPE_VER 2 38#define WL128X_SUBTYPE_VER 2
39#define WL128X_MINOR_VER 115 39#define WL128X_MINOR_VER 115
40 40
41#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
42
43#define WL12XX_NUM_TX_DESCRIPTORS 16
44#define WL12XX_NUM_RX_DESCRIPTORS 8
45
46#define WL12XX_NUM_MAC_ADDRESSES 2
47
41struct wl127x_rx_mem_pool_addr { 48struct wl127x_rx_mem_pool_addr {
42 u32 addr; 49 u32 addr;
43 u32 addr_extra; 50 u32 addr_extra;
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 3ce6f1039af3..7f1669cdea09 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -220,7 +220,7 @@ static ssize_t clear_fw_stats_write(struct file *file,
220 220
221 mutex_lock(&wl->mutex); 221 mutex_lock(&wl->mutex);
222 222
223 if (wl->state == WL1271_STATE_OFF) 223 if (unlikely(wl->state != WLCORE_STATE_ON))
224 goto out; 224 goto out;
225 225
226 ret = wl18xx_acx_clear_statistics(wl); 226 ret = wl18xx_acx_clear_statistics(wl);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 69042bb9a097..a39682a7c25f 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -30,7 +30,6 @@
30#include "../wlcore/acx.h" 30#include "../wlcore/acx.h"
31#include "../wlcore/tx.h" 31#include "../wlcore/tx.h"
32#include "../wlcore/rx.h" 32#include "../wlcore/rx.h"
33#include "../wlcore/io.h"
34#include "../wlcore/boot.h" 33#include "../wlcore/boot.h"
35 34
36#include "reg.h" 35#include "reg.h"
@@ -46,7 +45,6 @@
46static char *ht_mode_param = NULL; 45static char *ht_mode_param = NULL;
47static char *board_type_param = NULL; 46static char *board_type_param = NULL;
48static bool checksum_param = false; 47static bool checksum_param = false;
49static bool enable_11a_param = true;
50static int num_rx_desc_param = -1; 48static int num_rx_desc_param = -1;
51 49
52/* phy paramters */ 50/* phy paramters */
@@ -416,7 +414,7 @@ static struct wlcore_conf wl18xx_conf = {
416 .snr_threshold = 0, 414 .snr_threshold = 0,
417 }, 415 },
418 .ht = { 416 .ht = {
419 .rx_ba_win_size = 10, 417 .rx_ba_win_size = 32,
420 .tx_ba_win_size = 64, 418 .tx_ba_win_size = 64,
421 .inactivity_timeout = 10000, 419 .inactivity_timeout = 10000,
422 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, 420 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
@@ -506,8 +504,8 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
506 .rdl = 0x01, 504 .rdl = 0x01,
507 .auto_detect = 0x00, 505 .auto_detect = 0x00,
508 .dedicated_fem = FEM_NONE, 506 .dedicated_fem = FEM_NONE,
509 .low_band_component = COMPONENT_2_WAY_SWITCH, 507 .low_band_component = COMPONENT_3_WAY_SWITCH,
510 .low_band_component_type = 0x06, 508 .low_band_component_type = 0x04,
511 .high_band_component = COMPONENT_2_WAY_SWITCH, 509 .high_band_component = COMPONENT_2_WAY_SWITCH,
512 .high_band_component_type = 0x09, 510 .high_band_component_type = 0x09,
513 .tcxo_ldo_voltage = 0x00, 511 .tcxo_ldo_voltage = 0x00,
@@ -813,6 +811,13 @@ static int wl18xx_enable_interrupts(struct wl1271 *wl)
813 811
814 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, 812 ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
815 WL1271_ACX_INTR_ALL & ~intr_mask); 813 WL1271_ACX_INTR_ALL & ~intr_mask);
814 if (ret < 0)
815 goto disable_interrupts;
816
817 return ret;
818
819disable_interrupts:
820 wlcore_disable_interrupts(wl);
816 821
817out: 822out:
818 return ret; 823 return ret;
@@ -1203,6 +1208,12 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
1203 struct wl18xx_static_data_priv *static_data_priv = 1208 struct wl18xx_static_data_priv *static_data_priv =
1204 (struct wl18xx_static_data_priv *) static_data->priv; 1209 (struct wl18xx_static_data_priv *) static_data->priv;
1205 1210
1211 strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
1212 sizeof(wl->chip.phy_fw_ver_str));
1213
1214 /* make sure the string is NULL-terminated */
1215 wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
1216
1206 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version); 1217 wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
1207 1218
1208 return 0; 1219 return 0;
@@ -1241,13 +1252,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1241 if (!change_spare) 1252 if (!change_spare)
1242 return wlcore_set_key(wl, cmd, vif, sta, key_conf); 1253 return wlcore_set_key(wl, cmd, vif, sta, key_conf);
1243 1254
1244 /*
1245 * stop the queues and flush to ensure the next packets are
1246 * in sync with FW spare block accounting
1247 */
1248 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1249 wl1271_tx_flush(wl);
1250
1251 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf); 1255 ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
1252 if (ret < 0) 1256 if (ret < 0)
1253 goto out; 1257 goto out;
@@ -1270,7 +1274,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
1270 } 1274 }
1271 1275
1272out: 1276out:
1273 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
1274 return ret; 1277 return ret;
1275} 1278}
1276 1279
@@ -1293,7 +1296,10 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
1293 return buf_offset; 1296 return buf_offset;
1294} 1297}
1295 1298
1299static int wl18xx_setup(struct wl1271 *wl);
1300
1296static struct wlcore_ops wl18xx_ops = { 1301static struct wlcore_ops wl18xx_ops = {
1302 .setup = wl18xx_setup,
1297 .identify_chip = wl18xx_identify_chip, 1303 .identify_chip = wl18xx_identify_chip,
1298 .boot = wl18xx_boot, 1304 .boot = wl18xx_boot,
1299 .plt_init = wl18xx_plt_init, 1305 .plt_init = wl18xx_plt_init,
@@ -1374,27 +1380,15 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
1374 }, 1380 },
1375}; 1381};
1376 1382
1377static int __devinit wl18xx_probe(struct platform_device *pdev) 1383static int wl18xx_setup(struct wl1271 *wl)
1378{ 1384{
1379 struct wl1271 *wl; 1385 struct wl18xx_priv *priv = wl->priv;
1380 struct ieee80211_hw *hw;
1381 struct wl18xx_priv *priv;
1382 int ret; 1386 int ret;
1383 1387
1384 hw = wlcore_alloc_hw(sizeof(*priv));
1385 if (IS_ERR(hw)) {
1386 wl1271_error("can't allocate hw");
1387 ret = PTR_ERR(hw);
1388 goto out;
1389 }
1390
1391 wl = hw->priv;
1392 priv = wl->priv;
1393 wl->ops = &wl18xx_ops;
1394 wl->ptable = wl18xx_ptable;
1395 wl->rtable = wl18xx_rtable; 1388 wl->rtable = wl18xx_rtable;
1396 wl->num_tx_desc = 32; 1389 wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
1397 wl->num_rx_desc = 32; 1390 wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
1391 wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
1398 wl->band_rate_to_idx = wl18xx_band_rate_to_idx; 1392 wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
1399 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX; 1393 wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
1400 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0; 1394 wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1405,9 +1399,9 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1405 if (num_rx_desc_param != -1) 1399 if (num_rx_desc_param != -1)
1406 wl->num_rx_desc = num_rx_desc_param; 1400 wl->num_rx_desc = num_rx_desc_param;
1407 1401
1408 ret = wl18xx_conf_init(wl, &pdev->dev); 1402 ret = wl18xx_conf_init(wl, wl->dev);
1409 if (ret < 0) 1403 if (ret < 0)
1410 goto out_free; 1404 return ret;
1411 1405
1412 /* If the module param is set, update it in conf */ 1406 /* If the module param is set, update it in conf */
1413 if (board_type_param) { 1407 if (board_type_param) {
@@ -1424,27 +1418,14 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1424 } else { 1418 } else {
1425 wl1271_error("invalid board type '%s'", 1419 wl1271_error("invalid board type '%s'",
1426 board_type_param); 1420 board_type_param);
1427 ret = -EINVAL; 1421 return -EINVAL;
1428 goto out_free;
1429 } 1422 }
1430 } 1423 }
1431 1424
1432 /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */ 1425 if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
1433 switch (priv->conf.phy.board_type) {
1434 case BOARD_TYPE_HDK_18XX:
1435 case BOARD_TYPE_COM8_18XX:
1436 priv->conf.phy.low_band_component_type = 0x06;
1437 break;
1438 case BOARD_TYPE_FPGA_18XX:
1439 case BOARD_TYPE_DVP_18XX:
1440 case BOARD_TYPE_EVB_18XX:
1441 priv->conf.phy.low_band_component_type = 0x05;
1442 break;
1443 default:
1444 wl1271_error("invalid board type '%d'", 1426 wl1271_error("invalid board type '%d'",
1445 priv->conf.phy.board_type); 1427 priv->conf.phy.board_type);
1446 ret = -EINVAL; 1428 return -EINVAL;
1447 goto out_free;
1448 } 1429 }
1449 1430
1450 if (low_band_component_param != -1) 1431 if (low_band_component_param != -1)
@@ -1476,22 +1457,21 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1476 priv->conf.ht.mode = HT_MODE_SISO20; 1457 priv->conf.ht.mode = HT_MODE_SISO20;
1477 else { 1458 else {
1478 wl1271_error("invalid ht_mode '%s'", ht_mode_param); 1459 wl1271_error("invalid ht_mode '%s'", ht_mode_param);
1479 ret = -EINVAL; 1460 return -EINVAL;
1480 goto out_free;
1481 } 1461 }
1482 } 1462 }
1483 1463
1484 if (priv->conf.ht.mode == HT_MODE_DEFAULT) { 1464 if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
1485 /* 1465 /*
1486 * Only support mimo with multiple antennas. Fall back to 1466 * Only support mimo with multiple antennas. Fall back to
1487 * siso20. 1467 * siso40.
1488 */ 1468 */
1489 if (wl18xx_is_mimo_supported(wl)) 1469 if (wl18xx_is_mimo_supported(wl))
1490 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 1470 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1491 &wl18xx_mimo_ht_cap_2ghz); 1471 &wl18xx_mimo_ht_cap_2ghz);
1492 else 1472 else
1493 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, 1473 wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
1494 &wl18xx_siso20_ht_cap); 1474 &wl18xx_siso40_ht_cap_2ghz);
1495 1475
1496 /* 5Ghz is always wide */ 1476 /* 5Ghz is always wide */
1497 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, 1477 wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
@@ -1513,9 +1493,34 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
1513 wl18xx_ops.init_vif = NULL; 1493 wl18xx_ops.init_vif = NULL;
1514 } 1494 }
1515 1495
1516 wl->enable_11a = enable_11a_param; 1496 /* Enable 11a Band only if we have 5G antennas */
1497 wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
1498
1499 return 0;
1500}
1501
1502static int __devinit wl18xx_probe(struct platform_device *pdev)
1503{
1504 struct wl1271 *wl;
1505 struct ieee80211_hw *hw;
1506 int ret;
1507
1508 hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
1509 WL18XX_AGGR_BUFFER_SIZE);
1510 if (IS_ERR(hw)) {
1511 wl1271_error("can't allocate hw");
1512 ret = PTR_ERR(hw);
1513 goto out;
1514 }
1515
1516 wl = hw->priv;
1517 wl->ops = &wl18xx_ops;
1518 wl->ptable = wl18xx_ptable;
1519 ret = wlcore_probe(wl, pdev);
1520 if (ret)
1521 goto out_free;
1517 1522
1518 return wlcore_probe(wl, pdev); 1523 return ret;
1519 1524
1520out_free: 1525out_free:
1521 wlcore_free_hw(wl); 1526 wlcore_free_hw(wl);
@@ -1539,18 +1544,7 @@ static struct platform_driver wl18xx_driver = {
1539 } 1544 }
1540}; 1545};
1541 1546
1542static int __init wl18xx_init(void) 1547module_platform_driver(wl18xx_driver);
1543{
1544 return platform_driver_register(&wl18xx_driver);
1545}
1546module_init(wl18xx_init);
1547
1548static void __exit wl18xx_exit(void)
1549{
1550 platform_driver_unregister(&wl18xx_driver);
1551}
1552module_exit(wl18xx_exit);
1553
1554module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR); 1548module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
1555MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20"); 1549MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
1556 1550
@@ -1561,9 +1555,6 @@ MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
1561module_param_named(checksum, checksum_param, bool, S_IRUSR); 1555module_param_named(checksum, checksum_param, bool, S_IRUSR);
1562MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)"); 1556MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
1563 1557
1564module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
1565MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
1566
1567module_param_named(dc2dc, dc2dc_param, int, S_IRUSR); 1558module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
1568MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)"); 1559MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
1569 1560
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 6452396fa1d4..96a1e438d677 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -33,6 +33,13 @@
33 33
34#define WL18XX_CMD_MAX_SIZE 740 34#define WL18XX_CMD_MAX_SIZE 740
35 35
36#define WL18XX_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
37
38#define WL18XX_NUM_TX_DESCRIPTORS 32
39#define WL18XX_NUM_RX_DESCRIPTORS 32
40
41#define WL18XX_NUM_MAC_ADDRESSES 3
42
36struct wl18xx_priv { 43struct wl18xx_priv {
37 /* buffer for sending commands to FW */ 44 /* buffer for sending commands to FW */
38 u8 cmd_buf[WL18XX_CMD_MAX_SIZE]; 45 u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 20e1bd923832..eaef3f41b252 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -59,6 +59,9 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
59 u16 status; 59 u16 status;
60 u16 poll_count = 0; 60 u16 poll_count = 0;
61 61
62 if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
63 return -EIO;
64
62 cmd = buf; 65 cmd = buf;
63 cmd->id = cpu_to_le16(id); 66 cmd->id = cpu_to_le16(id);
64 cmd->status = 0; 67 cmd->status = 0;
@@ -990,7 +993,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
990 993
991 ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV, 994 ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
992 skb->data, skb->len, 995 skb->data, skb->len,
993 CMD_TEMPL_KLV_IDX_NULL_DATA, 996 wlvif->sta.klv_template_id,
994 wlvif->basic_rate); 997 wlvif->basic_rate);
995 998
996out: 999out:
@@ -1785,10 +1788,17 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1785 wlvif->bss_type == BSS_TYPE_IBSS))) 1788 wlvif->bss_type == BSS_TYPE_IBSS)))
1786 return -EINVAL; 1789 return -EINVAL;
1787 1790
1788 ret = wl12xx_cmd_role_start_dev(wl, wlvif); 1791 ret = wl12xx_cmd_role_enable(wl,
1792 wl12xx_wlvif_to_vif(wlvif)->addr,
1793 WL1271_ROLE_DEVICE,
1794 &wlvif->dev_role_id);
1789 if (ret < 0) 1795 if (ret < 0)
1790 goto out; 1796 goto out;
1791 1797
1798 ret = wl12xx_cmd_role_start_dev(wl, wlvif);
1799 if (ret < 0)
1800 goto out_disable;
1801
1792 ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id); 1802 ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
1793 if (ret < 0) 1803 if (ret < 0)
1794 goto out_stop; 1804 goto out_stop;
@@ -1797,6 +1807,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1797 1807
1798out_stop: 1808out_stop:
1799 wl12xx_cmd_role_stop_dev(wl, wlvif); 1809 wl12xx_cmd_role_stop_dev(wl, wlvif);
1810out_disable:
1811 wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
1800out: 1812out:
1801 return ret; 1813 return ret;
1802} 1814}
@@ -1824,6 +1836,11 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1824 ret = wl12xx_cmd_role_stop_dev(wl, wlvif); 1836 ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
1825 if (ret < 0) 1837 if (ret < 0)
1826 goto out; 1838 goto out;
1839
1840 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
1841 if (ret < 0)
1842 goto out;
1843
1827out: 1844out:
1828 return ret; 1845 return ret;
1829} 1846}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 4ef0b095f0d6..2409f3d71f63 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -157,11 +157,6 @@ enum wl1271_commands {
157 157
158#define MAX_CMD_PARAMS 572 158#define MAX_CMD_PARAMS 572
159 159
160enum {
161 CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
162 CMD_TEMPL_KLV_IDX_MAX = 4
163};
164
165enum cmd_templ { 160enum cmd_templ {
166 CMD_TEMPL_NULL_DATA = 0, 161 CMD_TEMPL_NULL_DATA = 0,
167 CMD_TEMPL_BEACON, 162 CMD_TEMPL_BEACON,
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index d77224f2ac6b..9e40760bafe1 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -412,8 +412,7 @@ struct conf_rx_settings {
412#define CONF_TX_RATE_RETRY_LIMIT 10 412#define CONF_TX_RATE_RETRY_LIMIT 10
413 413
414/* basic rates for p2p operations (probe req/resp, etc.) */ 414/* basic rates for p2p operations (probe req/resp, etc.) */
415#define CONF_TX_RATE_MASK_BASIC_P2P (CONF_HW_BIT_RATE_6MBPS | \ 415#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS
416 CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
417 416
418/* 417/*
419 * Rates supported for data packets when operating as AP. Note the absence 418 * Rates supported for data packets when operating as AP. Note the absence
diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index 6b800b3cbea5..db4bf5a68ce2 100644
--- a/drivers/net/wireless/ti/wlcore/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -28,7 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/printk.h> 29#include <linux/printk.h>
30 30
31#define DRIVER_NAME "wl12xx" 31#define DRIVER_NAME "wlcore"
32#define DRIVER_PREFIX DRIVER_NAME ": " 32#define DRIVER_PREFIX DRIVER_NAME ": "
33 33
34enum { 34enum {
@@ -73,11 +73,21 @@ extern u32 wl12xx_debug_level;
73#define wl1271_info(fmt, arg...) \ 73#define wl1271_info(fmt, arg...) \
74 pr_info(DRIVER_PREFIX fmt "\n", ##arg) 74 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
75 75
76/* define the debug macro differently if dynamic debug is supported */
77#if defined(CONFIG_DYNAMIC_DEBUG)
76#define wl1271_debug(level, fmt, arg...) \ 78#define wl1271_debug(level, fmt, arg...) \
77 do { \ 79 do { \
78 if (level & wl12xx_debug_level) \ 80 if (unlikely(level & wl12xx_debug_level)) \
79 pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ 81 dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
82 } while (0)
83#else
84#define wl1271_debug(level, fmt, arg...) \
85 do { \
86 if (unlikely(level & wl12xx_debug_level)) \
87 printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \
88 ##arg); \
80 } while (0) 89 } while (0)
90#endif
81 91
82/* TODO: use pr_debug_hex_dump when it becomes available */ 92/* TODO: use pr_debug_hex_dump when it becomes available */
83#define wl1271_dump(level, prefix, buf, len) \ 93#define wl1271_dump(level, prefix, buf, len) \
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 80dbc5304fac..c86bb00c2488 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -62,11 +62,14 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
62 62
63 mutex_lock(&wl->mutex); 63 mutex_lock(&wl->mutex);
64 64
65 if (unlikely(wl->state != WLCORE_STATE_ON))
66 goto out;
67
65 ret = wl1271_ps_elp_wakeup(wl); 68 ret = wl1271_ps_elp_wakeup(wl);
66 if (ret < 0) 69 if (ret < 0)
67 goto out; 70 goto out;
68 71
69 if (wl->state == WL1271_STATE_ON && !wl->plt && 72 if (!wl->plt &&
70 time_after(jiffies, wl->stats.fw_stats_update + 73 time_after(jiffies, wl->stats.fw_stats_update +
71 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) { 74 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
72 wl1271_acx_statistics(wl, wl->stats.fw_stats); 75 wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -286,7 +289,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
286 289
287 wl->conf.conn.dynamic_ps_timeout = value; 290 wl->conf.conn.dynamic_ps_timeout = value;
288 291
289 if (wl->state == WL1271_STATE_OFF) 292 if (unlikely(wl->state != WLCORE_STATE_ON))
290 goto out; 293 goto out;
291 294
292 ret = wl1271_ps_elp_wakeup(wl); 295 ret = wl1271_ps_elp_wakeup(wl);
@@ -353,7 +356,7 @@ static ssize_t forced_ps_write(struct file *file,
353 356
354 wl->conf.conn.forced_ps = value; 357 wl->conf.conn.forced_ps = value;
355 358
356 if (wl->state == WL1271_STATE_OFF) 359 if (unlikely(wl->state != WLCORE_STATE_ON))
357 goto out; 360 goto out;
358 361
359 ret = wl1271_ps_elp_wakeup(wl); 362 ret = wl1271_ps_elp_wakeup(wl);
@@ -486,6 +489,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
486 DRIVER_STATE_PRINT_HEX(platform_quirks); 489 DRIVER_STATE_PRINT_HEX(platform_quirks);
487 DRIVER_STATE_PRINT_HEX(chip.id); 490 DRIVER_STATE_PRINT_HEX(chip.id);
488 DRIVER_STATE_PRINT_STR(chip.fw_ver_str); 491 DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
492 DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
489 DRIVER_STATE_PRINT_INT(sched_scanning); 493 DRIVER_STATE_PRINT_INT(sched_scanning);
490 494
491#undef DRIVER_STATE_PRINT_INT 495#undef DRIVER_STATE_PRINT_INT
@@ -999,7 +1003,7 @@ static ssize_t sleep_auth_write(struct file *file,
999 1003
1000 wl->conf.conn.sta_sleep_auth = value; 1004 wl->conf.conn.sta_sleep_auth = value;
1001 1005
1002 if (wl->state == WL1271_STATE_OFF) { 1006 if (unlikely(wl->state != WLCORE_STATE_ON)) {
1003 /* this will show up on "read" in case we are off */ 1007 /* this will show up on "read" in case we are off */
1004 wl->sleep_auth = value; 1008 wl->sleep_auth = value;
1005 goto out; 1009 goto out;
@@ -1060,14 +1064,16 @@ static ssize_t dev_mem_read(struct file *file,
1060 1064
1061 mutex_lock(&wl->mutex); 1065 mutex_lock(&wl->mutex);
1062 1066
1063 if (wl->state == WL1271_STATE_OFF) { 1067 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
1064 ret = -EFAULT; 1068 ret = -EFAULT;
1065 goto skip_read; 1069 goto skip_read;
1066 } 1070 }
1067 1071
1068 ret = wl1271_ps_elp_wakeup(wl); 1072 /*
1069 if (ret < 0) 1073 * Don't fail if elp_wakeup returns an error, so the device's memory
1070 goto skip_read; 1074 * could be read even if the FW crashed
1075 */
1076 wl1271_ps_elp_wakeup(wl);
1071 1077
1072 /* store current partition and switch partition */ 1078 /* store current partition and switch partition */
1073 memcpy(&old_part, &wl->curr_part, sizeof(old_part)); 1079 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1145,14 +1151,16 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
1145 1151
1146 mutex_lock(&wl->mutex); 1152 mutex_lock(&wl->mutex);
1147 1153
1148 if (wl->state == WL1271_STATE_OFF) { 1154 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
1149 ret = -EFAULT; 1155 ret = -EFAULT;
1150 goto skip_write; 1156 goto skip_write;
1151 } 1157 }
1152 1158
1153 ret = wl1271_ps_elp_wakeup(wl); 1159 /*
1154 if (ret < 0) 1160 * Don't fail if elp_wakeup returns an error, so the device's memory
1155 goto skip_write; 1161 * could be read even if the FW crashed
1162 */
1163 wl1271_ps_elp_wakeup(wl);
1156 1164
1157 /* store current partition and switch partition */ 1165 /* store current partition and switch partition */
1158 memcpy(&old_part, &wl->curr_part, sizeof(old_part)); 1166 memcpy(&old_part, &wl->curr_part, sizeof(old_part));
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index a3c867786df8..32d157f62f31 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -141,7 +141,7 @@ int wl1271_init_templates_config(struct wl1271 *wl)
141 if (ret < 0) 141 if (ret < 0)
142 return ret; 142 return ret;
143 143
144 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 144 for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
145 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, 145 ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
146 CMD_TEMPL_KLV, NULL, 146 CMD_TEMPL_KLV, NULL,
147 sizeof(struct ieee80211_qos_hdr), 147 sizeof(struct ieee80211_qos_hdr),
@@ -371,15 +371,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
371 struct ieee80211_vif *vif) 371 struct ieee80211_vif *vif)
372{ 372{
373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
374 int ret, i; 374 int ret;
375
376 /* disable all keep-alive templates */
377 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
378 ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
379 ACX_KEEP_ALIVE_TPL_INVALID);
380 if (ret < 0)
381 return ret;
382 }
383 375
384 /* disable the keep-alive feature */ 376 /* disable the keep-alive feature */
385 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); 377 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 259149f36fae..f48530fec14f 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -64,7 +64,7 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
64 return -EIO; 64 return -EIO;
65 65
66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed); 66 ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
67 if (ret && wl->state != WL1271_STATE_OFF) 67 if (ret && wl->state != WLCORE_STATE_OFF)
68 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); 68 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
69 69
70 return ret; 70 return ret;
@@ -80,7 +80,7 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
80 return -EIO; 80 return -EIO;
81 81
82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed); 82 ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
83 if (ret && wl->state != WL1271_STATE_OFF) 83 if (ret && wl->state != WLCORE_STATE_OFF)
84 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); 84 set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
85 85
86 return ret; 86 return ret;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 72548609f711..25530c8760cb 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -248,7 +248,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
248 248
249 mutex_lock(&wl->mutex); 249 mutex_lock(&wl->mutex);
250 250
251 if (unlikely(wl->state == WL1271_STATE_OFF)) 251 if (unlikely(wl->state != WLCORE_STATE_ON))
252 goto out; 252 goto out;
253 253
254 /* Tx went out in the meantime - everything is ok */ 254 /* Tx went out in the meantime - everything is ok */
@@ -512,7 +512,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
512 512
513 wl1271_debug(DEBUG_IRQ, "IRQ work"); 513 wl1271_debug(DEBUG_IRQ, "IRQ work");
514 514
515 if (unlikely(wl->state == WL1271_STATE_OFF)) 515 if (unlikely(wl->state != WLCORE_STATE_ON))
516 goto out; 516 goto out;
517 517
518 ret = wl1271_ps_elp_wakeup(wl); 518 ret = wl1271_ps_elp_wakeup(wl);
@@ -696,7 +696,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
696 * we can't call wl12xx_get_vif_count() here because 696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value 697 * wl->mutex is taken, so use the cached last_vif_count value
698 */ 698 */
699 if (wl->last_vif_count > 1) { 699 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
700 fw_type = WL12XX_FW_TYPE_MULTI; 700 fw_type = WL12XX_FW_TYPE_MULTI;
701 fw_name = wl->mr_fw_name; 701 fw_name = wl->mr_fw_name;
702 } else { 702 } else {
@@ -744,38 +744,14 @@ out:
744 return ret; 744 return ret;
745} 745}
746 746
747static void wl1271_fetch_nvs(struct wl1271 *wl)
748{
749 const struct firmware *fw;
750 int ret;
751
752 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
753
754 if (ret < 0) {
755 wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
756 WL12XX_NVS_NAME, ret);
757 return;
758 }
759
760 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
761
762 if (!wl->nvs) {
763 wl1271_error("could not allocate memory for the nvs file");
764 goto out;
765 }
766
767 wl->nvs_len = fw->size;
768
769out:
770 release_firmware(fw);
771}
772
773void wl12xx_queue_recovery_work(struct wl1271 *wl) 747void wl12xx_queue_recovery_work(struct wl1271 *wl)
774{ 748{
775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 749 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
776 750
777 /* Avoid a recursive recovery */ 751 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 752 if (wl->state == WLCORE_STATE_ON) {
753 wl->state = WLCORE_STATE_RESTARTING;
754 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
779 wlcore_disable_interrupts_nosync(wl); 755 wlcore_disable_interrupts_nosync(wl);
780 ieee80211_queue_work(wl->hw, &wl->recovery_work); 756 ieee80211_queue_work(wl->hw, &wl->recovery_work);
781 } 757 }
@@ -913,7 +889,7 @@ static void wl1271_recovery_work(struct work_struct *work)
913 889
914 mutex_lock(&wl->mutex); 890 mutex_lock(&wl->mutex);
915 891
916 if (wl->state != WL1271_STATE_ON || wl->plt) 892 if (wl->state == WLCORE_STATE_OFF || wl->plt)
917 goto out_unlock; 893 goto out_unlock;
918 894
919 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { 895 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1081,7 +1057,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1081 1057
1082 wl1271_notice("power up"); 1058 wl1271_notice("power up");
1083 1059
1084 if (wl->state != WL1271_STATE_OFF) { 1060 if (wl->state != WLCORE_STATE_OFF) {
1085 wl1271_error("cannot go into PLT state because not " 1061 wl1271_error("cannot go into PLT state because not "
1086 "in off state: %d", wl->state); 1062 "in off state: %d", wl->state);
1087 ret = -EBUSY; 1063 ret = -EBUSY;
@@ -1102,7 +1078,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1102 if (ret < 0) 1078 if (ret < 0)
1103 goto power_off; 1079 goto power_off;
1104 1080
1105 wl->state = WL1271_STATE_ON; 1081 wl->state = WLCORE_STATE_ON;
1106 wl1271_notice("firmware booted in PLT mode %s (%s)", 1082 wl1271_notice("firmware booted in PLT mode %s (%s)",
1107 PLT_MODE[plt_mode], 1083 PLT_MODE[plt_mode],
1108 wl->chip.fw_ver_str); 1084 wl->chip.fw_ver_str);
@@ -1171,7 +1147,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1171 wl1271_power_off(wl); 1147 wl1271_power_off(wl);
1172 wl->flags = 0; 1148 wl->flags = 0;
1173 wl->sleep_auth = WL1271_PSM_ILLEGAL; 1149 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1174 wl->state = WL1271_STATE_OFF; 1150 wl->state = WLCORE_STATE_OFF;
1175 wl->plt = false; 1151 wl->plt = false;
1176 wl->plt_mode = PLT_OFF; 1152 wl->plt_mode = PLT_OFF;
1177 wl->rx_counter = 0; 1153 wl->rx_counter = 0;
@@ -1181,7 +1157,9 @@ out:
1181 return ret; 1157 return ret;
1182} 1158}
1183 1159
1184static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1160static void wl1271_op_tx(struct ieee80211_hw *hw,
1161 struct ieee80211_tx_control *control,
1162 struct sk_buff *skb)
1185{ 1163{
1186 struct wl1271 *wl = hw->priv; 1164 struct wl1271 *wl = hw->priv;
1187 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1165 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1175,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1197 mapping = skb_get_queue_mapping(skb); 1175 mapping = skb_get_queue_mapping(skb);
1198 q = wl1271_tx_get_queue(mapping); 1176 q = wl1271_tx_get_queue(mapping);
1199 1177
1200 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 1178 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1201 1179
1202 spin_lock_irqsave(&wl->wl_lock, flags); 1180 spin_lock_irqsave(&wl->wl_lock, flags);
1203 1181
@@ -1600,12 +1578,6 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1600 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1578 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1601 goto out; 1579 goto out;
1602 1580
1603 if ((wl->conf.conn.suspend_wake_up_event ==
1604 wl->conf.conn.wake_up_event) &&
1605 (wl->conf.conn.suspend_listen_interval ==
1606 wl->conf.conn.listen_interval))
1607 goto out;
1608
1609 ret = wl1271_ps_elp_wakeup(wl); 1581 ret = wl1271_ps_elp_wakeup(wl);
1610 if (ret < 0) 1582 if (ret < 0)
1611 goto out; 1583 goto out;
@@ -1614,6 +1586,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 if (ret < 0) 1586 if (ret < 0)
1615 goto out_sleep; 1587 goto out_sleep;
1616 1588
1589 if ((wl->conf.conn.suspend_wake_up_event ==
1590 wl->conf.conn.wake_up_event) &&
1591 (wl->conf.conn.suspend_listen_interval ==
1592 wl->conf.conn.listen_interval))
1593 goto out_sleep;
1594
1617 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1595 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1618 wl->conf.conn.suspend_wake_up_event, 1596 wl->conf.conn.suspend_wake_up_event,
1619 wl->conf.conn.suspend_listen_interval); 1597 wl->conf.conn.suspend_listen_interval);
@@ -1669,11 +1647,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1669 if ((!is_ap) && (!is_sta)) 1647 if ((!is_ap) && (!is_sta))
1670 return; 1648 return;
1671 1649
1672 if (is_sta && 1650 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1673 ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval)))
1677 return; 1651 return;
1678 1652
1679 ret = wl1271_ps_elp_wakeup(wl); 1653 ret = wl1271_ps_elp_wakeup(wl);
@@ -1683,6 +1657,12 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1683 if (is_sta) { 1657 if (is_sta) {
1684 wl1271_configure_wowlan(wl, NULL); 1658 wl1271_configure_wowlan(wl, NULL);
1685 1659
1660 if ((wl->conf.conn.suspend_wake_up_event ==
1661 wl->conf.conn.wake_up_event) &&
1662 (wl->conf.conn.suspend_listen_interval ==
1663 wl->conf.conn.listen_interval))
1664 goto out_sleep;
1665
1686 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1666 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1687 wl->conf.conn.wake_up_event, 1667 wl->conf.conn.wake_up_event,
1688 wl->conf.conn.listen_interval); 1668 wl->conf.conn.listen_interval);
@@ -1695,6 +1675,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1695 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 1675 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1696 } 1676 }
1697 1677
1678out_sleep:
1698 wl1271_ps_elp_sleep(wl); 1679 wl1271_ps_elp_sleep(wl);
1699} 1680}
1700 1681
@@ -1831,7 +1812,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1831{ 1812{
1832 int i; 1813 int i;
1833 1814
1834 if (wl->state == WL1271_STATE_OFF) { 1815 if (wl->state == WLCORE_STATE_OFF) {
1835 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1816 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 &wl->flags)) 1817 &wl->flags))
1837 wlcore_enable_interrupts(wl); 1818 wlcore_enable_interrupts(wl);
@@ -1843,7 +1824,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1843 * this must be before the cancel_work calls below, so that the work 1824 * this must be before the cancel_work calls below, so that the work
1844 * functions don't perform further work. 1825 * functions don't perform further work.
1845 */ 1826 */
1846 wl->state = WL1271_STATE_OFF; 1827 wl->state = WLCORE_STATE_OFF;
1847 1828
1848 /* 1829 /*
1849 * Use the nosync variant to disable interrupts, so the mutex could be 1830 * Use the nosync variant to disable interrupts, so the mutex could be
@@ -1854,6 +1835,8 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
1854 mutex_unlock(&wl->mutex); 1835 mutex_unlock(&wl->mutex);
1855 1836
1856 wlcore_synchronize_interrupts(wl); 1837 wlcore_synchronize_interrupts(wl);
1838 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1839 cancel_work_sync(&wl->recovery_work);
1857 wl1271_flush_deferred_work(wl); 1840 wl1271_flush_deferred_work(wl);
1858 cancel_delayed_work_sync(&wl->scan_complete_work); 1841 cancel_delayed_work_sync(&wl->scan_complete_work);
1859 cancel_work_sync(&wl->netstack_work); 1842 cancel_work_sync(&wl->netstack_work);
@@ -1956,6 +1939,27 @@ static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1956 *idx = WL12XX_MAX_RATE_POLICIES; 1939 *idx = WL12XX_MAX_RATE_POLICIES;
1957} 1940}
1958 1941
1942static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
1943{
1944 u8 policy = find_first_zero_bit(wl->klv_templates_map,
1945 WLCORE_MAX_KLV_TEMPLATES);
1946 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
1947 return -EBUSY;
1948
1949 __set_bit(policy, wl->klv_templates_map);
1950 *idx = policy;
1951 return 0;
1952}
1953
1954static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
1955{
1956 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
1957 return;
1958
1959 __clear_bit(*idx, wl->klv_templates_map);
1960 *idx = WLCORE_MAX_KLV_TEMPLATES;
1961}
1962
1959static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1963static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1960{ 1964{
1961 switch (wlvif->bss_type) { 1965 switch (wlvif->bss_type) {
@@ -2020,6 +2024,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2020 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2024 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2021 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2025 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2022 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2026 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2027 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2023 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2028 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2024 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; 2029 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2025 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; 2030 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2096,7 +2101,7 @@ irq_disable:
2096 /* Unlocking the mutex in the middle of handling is 2101 /* Unlocking the mutex in the middle of handling is
2097 inherently unsafe. In this case we deem it safe to do, 2102 inherently unsafe. In this case we deem it safe to do,
2098 because we need to let any possibly pending IRQ out of 2103 because we need to let any possibly pending IRQ out of
2099 the system (and while we are WL1271_STATE_OFF the IRQ 2104 the system (and while we are WLCORE_STATE_OFF the IRQ
2100 work function will not do anything.) Also, any other 2105 work function will not do anything.) Also, any other
2101 possible concurrent operations will fail due to the 2106 possible concurrent operations will fail due to the
2102 current state, hence the wl1271 struct should be safe. */ 2107 current state, hence the wl1271 struct should be safe. */
@@ -2131,7 +2136,7 @@ power_off:
2131 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2136 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2132 wl->enable_11a ? "" : "not "); 2137 wl->enable_11a ? "" : "not ");
2133 2138
2134 wl->state = WL1271_STATE_ON; 2139 wl->state = WLCORE_STATE_ON;
2135out: 2140out:
2136 return booted; 2141 return booted;
2137} 2142}
@@ -2165,7 +2170,11 @@ static bool wl12xx_need_fw_change(struct wl1271 *wl,
2165 wl->last_vif_count = vif_count; 2170 wl->last_vif_count = vif_count;
2166 2171
2167 /* no need for fw change if the device is OFF */ 2172 /* no need for fw change if the device is OFF */
2168 if (wl->state == WL1271_STATE_OFF) 2173 if (wl->state == WLCORE_STATE_OFF)
2174 return false;
2175
2176 /* no need for fw change if a single fw is used */
2177 if (!wl->mr_fw_name)
2169 return false; 2178 return false;
2170 2179
2171 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) 2180 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
@@ -2247,7 +2256,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2247 * TODO: after the nvs issue will be solved, move this block 2256 * TODO: after the nvs issue will be solved, move this block
2248 * to start(), and make sure here the driver is ON. 2257 * to start(), and make sure here the driver is ON.
2249 */ 2258 */
2250 if (wl->state == WL1271_STATE_OFF) { 2259 if (wl->state == WLCORE_STATE_OFF) {
2251 /* 2260 /*
2252 * we still need this in order to configure the fw 2261 * we still need this in order to configure the fw
2253 * while uploading the nvs 2262 * while uploading the nvs
@@ -2261,21 +2270,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2261 } 2270 }
2262 } 2271 }
2263 2272
2264 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2265 wlvif->bss_type == BSS_TYPE_IBSS) {
2266 /*
2267 * The device role is a special role used for
2268 * rx and tx frames prior to association (as
2269 * the STA role can get packets only from
2270 * its associated bssid)
2271 */
2272 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2273 WL1271_ROLE_DEVICE,
2274 &wlvif->dev_role_id);
2275 if (ret < 0)
2276 goto out;
2277 }
2278
2279 ret = wl12xx_cmd_role_enable(wl, vif->addr, 2273 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2280 role_type, &wlvif->role_id); 2274 role_type, &wlvif->role_id);
2281 if (ret < 0) 2275 if (ret < 0)
@@ -2314,7 +2308,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2314 return; 2308 return;
2315 2309
2316 /* because of hardware recovery, we may get here twice */ 2310 /* because of hardware recovery, we may get here twice */
2317 if (wl->state != WL1271_STATE_ON) 2311 if (wl->state == WLCORE_STATE_OFF)
2318 return; 2312 return;
2319 2313
2320 wl1271_info("down"); 2314 wl1271_info("down");
@@ -2344,10 +2338,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2344 wlvif->bss_type == BSS_TYPE_IBSS) { 2338 wlvif->bss_type == BSS_TYPE_IBSS) {
2345 if (wl12xx_dev_role_started(wlvif)) 2339 if (wl12xx_dev_role_started(wlvif))
2346 wl12xx_stop_dev(wl, wlvif); 2340 wl12xx_stop_dev(wl, wlvif);
2347
2348 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2349 if (ret < 0)
2350 goto deinit;
2351 } 2341 }
2352 2342
2353 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); 2343 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
@@ -2366,6 +2356,7 @@ deinit:
2366 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2356 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2367 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2357 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2368 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2358 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2359 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2369 } else { 2360 } else {
2370 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2361 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2371 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2362 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
@@ -2430,12 +2421,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2421 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2431 struct wl12xx_vif *iter; 2422 struct wl12xx_vif *iter;
2432 struct vif_counter_data vif_count; 2423 struct vif_counter_data vif_count;
2433 bool cancel_recovery = true;
2434 2424
2435 wl12xx_get_vif_count(hw, vif, &vif_count); 2425 wl12xx_get_vif_count(hw, vif, &vif_count);
2436 mutex_lock(&wl->mutex); 2426 mutex_lock(&wl->mutex);
2437 2427
2438 if (wl->state == WL1271_STATE_OFF || 2428 if (wl->state == WLCORE_STATE_OFF ||
2439 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2429 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2440 goto out; 2430 goto out;
2441 2431
@@ -2455,12 +2445,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2455 wl12xx_force_active_psm(wl); 2445 wl12xx_force_active_psm(wl);
2456 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2446 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2457 wl12xx_queue_recovery_work(wl); 2447 wl12xx_queue_recovery_work(wl);
2458 cancel_recovery = false;
2459 } 2448 }
2460out: 2449out:
2461 mutex_unlock(&wl->mutex); 2450 mutex_unlock(&wl->mutex);
2462 if (cancel_recovery)
2463 cancel_work_sync(&wl->recovery_work);
2464} 2451}
2465 2452
2466static int wl12xx_op_change_interface(struct ieee80211_hw *hw, 2453static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
@@ -2534,7 +2521,7 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2534 goto out; 2521 goto out;
2535 2522
2536 ret = wl1271_acx_keep_alive_config(wl, wlvif, 2523 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2537 CMD_TEMPL_KLV_IDX_NULL_DATA, 2524 wlvif->sta.klv_template_id,
2538 ACX_KEEP_ALIVE_TPL_VALID); 2525 ACX_KEEP_ALIVE_TPL_VALID);
2539 if (ret < 0) 2526 if (ret < 0)
2540 goto out; 2527 goto out;
@@ -2554,6 +2541,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2554 ieee80211_chswitch_done(vif, false); 2541 ieee80211_chswitch_done(vif, false);
2555 } 2542 }
2556 2543
2544 /* invalidate keep-alive template */
2545 wl1271_acx_keep_alive_config(wl, wlvif,
2546 wlvif->sta.klv_template_id,
2547 ACX_KEEP_ALIVE_TPL_INVALID);
2548
2557 /* to stop listening to a channel, we disconnect */ 2549 /* to stop listening to a channel, we disconnect */
2558 ret = wl12xx_cmd_role_stop_sta(wl, wlvif); 2550 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2559 if (ret < 0) 2551 if (ret < 0)
@@ -2594,11 +2586,6 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2594 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 2586 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2595 if (ret < 0) 2587 if (ret < 0)
2596 goto out; 2588 goto out;
2597 ret = wl1271_acx_keep_alive_config(
2598 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2599 ACX_KEEP_ALIVE_TPL_INVALID);
2600 if (ret < 0)
2601 goto out;
2602 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 2589 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2603 } else { 2590 } else {
2604 /* The current firmware only supports sched_scan in idle */ 2591 /* The current firmware only supports sched_scan in idle */
@@ -2770,7 +2757,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2770 if (changed & IEEE80211_CONF_CHANGE_POWER) 2757 if (changed & IEEE80211_CONF_CHANGE_POWER)
2771 wl->power_level = conf->power_level; 2758 wl->power_level = conf->power_level;
2772 2759
2773 if (unlikely(wl->state == WL1271_STATE_OFF)) 2760 if (unlikely(wl->state != WLCORE_STATE_ON))
2774 goto out; 2761 goto out;
2775 2762
2776 ret = wl1271_ps_elp_wakeup(wl); 2763 ret = wl1271_ps_elp_wakeup(wl);
@@ -2804,10 +2791,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2804{ 2791{
2805 struct wl1271_filter_params *fp; 2792 struct wl1271_filter_params *fp;
2806 struct netdev_hw_addr *ha; 2793 struct netdev_hw_addr *ha;
2807 struct wl1271 *wl = hw->priv;
2808
2809 if (unlikely(wl->state == WL1271_STATE_OFF))
2810 return 0;
2811 2794
2812 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 2795 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2813 if (!fp) { 2796 if (!fp) {
@@ -2856,7 +2839,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2856 *total &= WL1271_SUPPORTED_FILTERS; 2839 *total &= WL1271_SUPPORTED_FILTERS;
2857 changed &= WL1271_SUPPORTED_FILTERS; 2840 changed &= WL1271_SUPPORTED_FILTERS;
2858 2841
2859 if (unlikely(wl->state == WL1271_STATE_OFF)) 2842 if (unlikely(wl->state != WLCORE_STATE_ON))
2860 goto out; 2843 goto out;
2861 2844
2862 ret = wl1271_ps_elp_wakeup(wl); 2845 ret = wl1271_ps_elp_wakeup(wl);
@@ -3080,8 +3063,45 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3080 struct ieee80211_key_conf *key_conf) 3063 struct ieee80211_key_conf *key_conf)
3081{ 3064{
3082 struct wl1271 *wl = hw->priv; 3065 struct wl1271 *wl = hw->priv;
3066 int ret;
3067 bool might_change_spare =
3068 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3069 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3083 3070
3084 return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); 3071 if (might_change_spare) {
3072 /*
3073 * stop the queues and flush to ensure the next packets are
3074 * in sync with FW spare block accounting
3075 */
3076 mutex_lock(&wl->mutex);
3077 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3078 mutex_unlock(&wl->mutex);
3079
3080 wl1271_tx_flush(wl);
3081 }
3082
3083 mutex_lock(&wl->mutex);
3084
3085 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3086 ret = -EAGAIN;
3087 goto out_wake_queues;
3088 }
3089
3090 ret = wl1271_ps_elp_wakeup(wl);
3091 if (ret < 0)
3092 goto out_wake_queues;
3093
3094 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3095
3096 wl1271_ps_elp_sleep(wl);
3097
3098out_wake_queues:
3099 if (might_change_spare)
3100 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3101
3102 mutex_unlock(&wl->mutex);
3103
3104 return ret;
3085} 3105}
3086 3106
3087int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 3107int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
@@ -3103,17 +3123,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3103 key_conf->keylen, key_conf->flags); 3123 key_conf->keylen, key_conf->flags);
3104 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 3124 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3105 3125
3106 mutex_lock(&wl->mutex);
3107
3108 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3109 ret = -EAGAIN;
3110 goto out_unlock;
3111 }
3112
3113 ret = wl1271_ps_elp_wakeup(wl);
3114 if (ret < 0)
3115 goto out_unlock;
3116
3117 switch (key_conf->cipher) { 3126 switch (key_conf->cipher) {
3118 case WLAN_CIPHER_SUITE_WEP40: 3127 case WLAN_CIPHER_SUITE_WEP40:
3119 case WLAN_CIPHER_SUITE_WEP104: 3128 case WLAN_CIPHER_SUITE_WEP104:
@@ -3143,8 +3152,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3143 default: 3152 default:
3144 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 3153 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3145 3154
3146 ret = -EOPNOTSUPP; 3155 return -EOPNOTSUPP;
3147 goto out_sleep;
3148 } 3156 }
3149 3157
3150 switch (cmd) { 3158 switch (cmd) {
@@ -3155,7 +3163,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3155 tx_seq_32, tx_seq_16, sta); 3163 tx_seq_32, tx_seq_16, sta);
3156 if (ret < 0) { 3164 if (ret < 0) {
3157 wl1271_error("Could not add or replace key"); 3165 wl1271_error("Could not add or replace key");
3158 goto out_sleep; 3166 return ret;
3159 } 3167 }
3160 3168
3161 /* 3169 /*
@@ -3169,7 +3177,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3169 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 3177 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3170 if (ret < 0) { 3178 if (ret < 0) {
3171 wl1271_warning("build arp rsp failed: %d", ret); 3179 wl1271_warning("build arp rsp failed: %d", ret);
3172 goto out_sleep; 3180 return ret;
3173 } 3181 }
3174 } 3182 }
3175 break; 3183 break;
@@ -3181,22 +3189,15 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3181 0, 0, sta); 3189 0, 0, sta);
3182 if (ret < 0) { 3190 if (ret < 0) {
3183 wl1271_error("Could not remove key"); 3191 wl1271_error("Could not remove key");
3184 goto out_sleep; 3192 return ret;
3185 } 3193 }
3186 break; 3194 break;
3187 3195
3188 default: 3196 default:
3189 wl1271_error("Unsupported key cmd 0x%x", cmd); 3197 wl1271_error("Unsupported key cmd 0x%x", cmd);
3190 ret = -EOPNOTSUPP; 3198 return -EOPNOTSUPP;
3191 break;
3192 } 3199 }
3193 3200
3194out_sleep:
3195 wl1271_ps_elp_sleep(wl);
3196
3197out_unlock:
3198 mutex_unlock(&wl->mutex);
3199
3200 return ret; 3201 return ret;
3201} 3202}
3202EXPORT_SYMBOL_GPL(wlcore_set_key); 3203EXPORT_SYMBOL_GPL(wlcore_set_key);
@@ -3219,7 +3220,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3219 3220
3220 mutex_lock(&wl->mutex); 3221 mutex_lock(&wl->mutex);
3221 3222
3222 if (wl->state == WL1271_STATE_OFF) { 3223 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3223 /* 3224 /*
3224 * We cannot return -EBUSY here because cfg80211 will expect 3225 * We cannot return -EBUSY here because cfg80211 will expect
3225 * a call to ieee80211_scan_completed if we do - in this case 3226 * a call to ieee80211_scan_completed if we do - in this case
@@ -3259,7 +3260,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3259 3260
3260 mutex_lock(&wl->mutex); 3261 mutex_lock(&wl->mutex);
3261 3262
3262 if (wl->state == WL1271_STATE_OFF) 3263 if (unlikely(wl->state != WLCORE_STATE_ON))
3263 goto out; 3264 goto out;
3264 3265
3265 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 3266 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -3308,7 +3309,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3308 3309
3309 mutex_lock(&wl->mutex); 3310 mutex_lock(&wl->mutex);
3310 3311
3311 if (wl->state == WL1271_STATE_OFF) { 3312 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3312 ret = -EAGAIN; 3313 ret = -EAGAIN;
3313 goto out; 3314 goto out;
3314 } 3315 }
@@ -3345,7 +3346,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3345 3346
3346 mutex_lock(&wl->mutex); 3347 mutex_lock(&wl->mutex);
3347 3348
3348 if (wl->state == WL1271_STATE_OFF) 3349 if (unlikely(wl->state != WLCORE_STATE_ON))
3349 goto out; 3350 goto out;
3350 3351
3351 ret = wl1271_ps_elp_wakeup(wl); 3352 ret = wl1271_ps_elp_wakeup(wl);
@@ -3366,7 +3367,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3366 3367
3367 mutex_lock(&wl->mutex); 3368 mutex_lock(&wl->mutex);
3368 3369
3369 if (unlikely(wl->state == WL1271_STATE_OFF)) { 3370 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3370 ret = -EAGAIN; 3371 ret = -EAGAIN;
3371 goto out; 3372 goto out;
3372 } 3373 }
@@ -3395,7 +3396,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3395 3396
3396 mutex_lock(&wl->mutex); 3397 mutex_lock(&wl->mutex);
3397 3398
3398 if (unlikely(wl->state == WL1271_STATE_OFF)) { 3399 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3399 ret = -EAGAIN; 3400 ret = -EAGAIN;
3400 goto out; 3401 goto out;
3401 } 3402 }
@@ -4171,7 +4172,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4171 4172
4172 mutex_lock(&wl->mutex); 4173 mutex_lock(&wl->mutex);
4173 4174
4174 if (unlikely(wl->state == WL1271_STATE_OFF)) 4175 if (unlikely(wl->state != WLCORE_STATE_ON))
4175 goto out; 4176 goto out;
4176 4177
4177 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4178 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
@@ -4255,7 +4256,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4255 4256
4256 mutex_lock(&wl->mutex); 4257 mutex_lock(&wl->mutex);
4257 4258
4258 if (unlikely(wl->state == WL1271_STATE_OFF)) 4259 if (unlikely(wl->state != WLCORE_STATE_ON))
4259 goto out; 4260 goto out;
4260 4261
4261 ret = wl1271_ps_elp_wakeup(wl); 4262 ret = wl1271_ps_elp_wakeup(wl);
@@ -4454,7 +4455,7 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4454 4455
4455 mutex_lock(&wl->mutex); 4456 mutex_lock(&wl->mutex);
4456 4457
4457 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4458 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4458 ret = -EBUSY; 4459 ret = -EBUSY;
4459 goto out; 4460 goto out;
4460 } 4461 }
@@ -4493,7 +4494,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4493 4494
4494 mutex_lock(&wl->mutex); 4495 mutex_lock(&wl->mutex);
4495 4496
4496 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4497 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4497 ret = -EAGAIN; 4498 ret = -EAGAIN;
4498 goto out; 4499 goto out;
4499 } 4500 }
@@ -4611,7 +4612,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4611 mask->control[i].legacy, 4612 mask->control[i].legacy,
4612 i); 4613 i);
4613 4614
4614 if (unlikely(wl->state == WL1271_STATE_OFF)) 4615 if (unlikely(wl->state != WLCORE_STATE_ON))
4615 goto out; 4616 goto out;
4616 4617
4617 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 4618 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
@@ -4647,12 +4648,14 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4647 4648
4648 mutex_lock(&wl->mutex); 4649 mutex_lock(&wl->mutex);
4649 4650
4650 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4651 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4651 wl12xx_for_each_wlvif_sta(wl, wlvif) { 4652 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4652 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 4653 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4653 ieee80211_chswitch_done(vif, false); 4654 ieee80211_chswitch_done(vif, false);
4654 } 4655 }
4655 goto out; 4656 goto out;
4657 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4658 goto out;
4656 } 4659 }
4657 4660
4658 ret = wl1271_ps_elp_wakeup(wl); 4661 ret = wl1271_ps_elp_wakeup(wl);
@@ -4687,7 +4690,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4687 4690
4688 mutex_lock(&wl->mutex); 4691 mutex_lock(&wl->mutex);
4689 4692
4690 if (unlikely(wl->state == WL1271_STATE_OFF)) 4693 if (unlikely(wl->state != WLCORE_STATE_ON))
4691 goto out; 4694 goto out;
4692 4695
4693 /* packets are considered pending if in the TX queue or the FW */ 4696 /* packets are considered pending if in the TX queue or the FW */
@@ -4936,7 +4939,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4936 4939
4937 wl->sg_enabled = res; 4940 wl->sg_enabled = res;
4938 4941
4939 if (wl->state == WL1271_STATE_OFF) 4942 if (unlikely(wl->state != WLCORE_STATE_ON))
4940 goto out; 4943 goto out;
4941 4944
4942 ret = wl1271_ps_elp_wakeup(wl); 4945 ret = wl1271_ps_elp_wakeup(wl);
@@ -5054,7 +5057,7 @@ static void wl1271_connection_loss_work(struct work_struct *work)
5054 5057
5055 mutex_lock(&wl->mutex); 5058 mutex_lock(&wl->mutex);
5056 5059
5057 if (unlikely(wl->state == WL1271_STATE_OFF)) 5060 if (unlikely(wl->state != WLCORE_STATE_ON))
5058 goto out; 5061 goto out;
5059 5062
5060 /* Call mac80211 connection loss */ 5063 /* Call mac80211 connection loss */
@@ -5068,18 +5071,17 @@ out:
5068 mutex_unlock(&wl->mutex); 5071 mutex_unlock(&wl->mutex);
5069} 5072}
5070 5073
5071static void wl12xx_derive_mac_addresses(struct wl1271 *wl, 5074static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5072 u32 oui, u32 nic, int n)
5073{ 5075{
5074 int i; 5076 int i;
5075 5077
5076 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d", 5078 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5077 oui, nic, n); 5079 oui, nic);
5078 5080
5079 if (nic + n - 1 > 0xffffff) 5081 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5080 wl1271_warning("NIC part of the MAC address wraps around!"); 5082 wl1271_warning("NIC part of the MAC address wraps around!");
5081 5083
5082 for (i = 0; i < n; i++) { 5084 for (i = 0; i < wl->num_mac_addr; i++) {
5083 wl->addresses[i].addr[0] = (u8)(oui >> 16); 5085 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5084 wl->addresses[i].addr[1] = (u8)(oui >> 8); 5086 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5085 wl->addresses[i].addr[2] = (u8) oui; 5087 wl->addresses[i].addr[2] = (u8) oui;
@@ -5089,7 +5091,22 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5089 nic++; 5091 nic++;
5090 } 5092 }
5091 5093
5092 wl->hw->wiphy->n_addresses = n; 5094 /* we may be one address short at the most */
5095 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5096
5097 /*
5098 * turn on the LAA bit in the first address and use it as
5099 * the last address.
5100 */
5101 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5102 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5103 memcpy(&wl->addresses[idx], &wl->addresses[0],
5104 sizeof(wl->addresses[0]));
5105 /* LAA bit */
5106 wl->addresses[idx].addr[2] |= BIT(1);
5107 }
5108
5109 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5093 wl->hw->wiphy->addresses = wl->addresses; 5110 wl->hw->wiphy->addresses = wl->addresses;
5094} 5111}
5095 5112
@@ -5128,8 +5145,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
5128 if (wl->mac80211_registered) 5145 if (wl->mac80211_registered)
5129 return 0; 5146 return 0;
5130 5147
5131 wl1271_fetch_nvs(wl); 5148 if (wl->nvs_len >= 12) {
5132 if (wl->nvs != NULL) {
5133 /* NOTE: The wl->nvs->nvs element must be first, in 5149 /* NOTE: The wl->nvs->nvs element must be first, in
5134 * order to simplify the casting, we assume it is at 5150 * order to simplify the casting, we assume it is at
5135 * the beginning of the wl->nvs structure. 5151 * the beginning of the wl->nvs structure.
@@ -5149,7 +5165,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
5149 nic_addr = wl->fuse_nic_addr + 1; 5165 nic_addr = wl->fuse_nic_addr + 1;
5150 } 5166 }
5151 5167
5152 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2); 5168 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5153 5169
5154 ret = ieee80211_register_hw(wl->hw); 5170 ret = ieee80211_register_hw(wl->hw);
5155 if (ret < 0) { 5171 if (ret < 0) {
@@ -5179,7 +5195,7 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
5179 5195
5180static const struct ieee80211_iface_limit wlcore_iface_limits[] = { 5196static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5181 { 5197 {
5182 .max = 2, 5198 .max = 3,
5183 .types = BIT(NL80211_IFTYPE_STATION), 5199 .types = BIT(NL80211_IFTYPE_STATION),
5184 }, 5200 },
5185 { 5201 {
@@ -5194,7 +5210,7 @@ static const struct ieee80211_iface_combination
5194wlcore_iface_combinations[] = { 5210wlcore_iface_combinations[] = {
5195 { 5211 {
5196 .num_different_channels = 1, 5212 .num_different_channels = 1,
5197 .max_interfaces = 2, 5213 .max_interfaces = 3,
5198 .limits = wlcore_iface_limits, 5214 .limits = wlcore_iface_limits,
5199 .n_limits = ARRAY_SIZE(wlcore_iface_limits), 5215 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5200 }, 5216 },
@@ -5310,7 +5326,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5310 5326
5311#define WL1271_DEFAULT_CHANNEL 0 5327#define WL1271_DEFAULT_CHANNEL 0
5312 5328
5313struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size) 5329struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
5314{ 5330{
5315 struct ieee80211_hw *hw; 5331 struct ieee80211_hw *hw;
5316 struct wl1271 *wl; 5332 struct wl1271 *wl;
@@ -5390,17 +5406,19 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5390 5406
5391 spin_lock_init(&wl->wl_lock); 5407 spin_lock_init(&wl->wl_lock);
5392 5408
5393 wl->state = WL1271_STATE_OFF; 5409 wl->state = WLCORE_STATE_OFF;
5394 wl->fw_type = WL12XX_FW_TYPE_NONE; 5410 wl->fw_type = WL12XX_FW_TYPE_NONE;
5395 mutex_init(&wl->mutex); 5411 mutex_init(&wl->mutex);
5396 mutex_init(&wl->flush_mutex); 5412 mutex_init(&wl->flush_mutex);
5413 init_completion(&wl->nvs_loading_complete);
5397 5414
5398 order = get_order(WL1271_AGGR_BUFFER_SIZE); 5415 order = get_order(aggr_buf_size);
5399 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5416 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5400 if (!wl->aggr_buf) { 5417 if (!wl->aggr_buf) {
5401 ret = -ENOMEM; 5418 ret = -ENOMEM;
5402 goto err_wq; 5419 goto err_wq;
5403 } 5420 }
5421 wl->aggr_buf_size = aggr_buf_size;
5404 5422
5405 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); 5423 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5406 if (!wl->dummy_packet) { 5424 if (!wl->dummy_packet) {
@@ -5463,8 +5481,7 @@ int wlcore_free_hw(struct wl1271 *wl)
5463 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5481 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5464 free_page((unsigned long)wl->fwlog); 5482 free_page((unsigned long)wl->fwlog);
5465 dev_kfree_skb(wl->dummy_packet); 5483 dev_kfree_skb(wl->dummy_packet);
5466 free_pages((unsigned long)wl->aggr_buf, 5484 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5467 get_order(WL1271_AGGR_BUFFER_SIZE));
5468 5485
5469 wl1271_debugfs_exit(wl); 5486 wl1271_debugfs_exit(wl);
5470 5487
@@ -5514,17 +5531,32 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5514 return IRQ_WAKE_THREAD; 5531 return IRQ_WAKE_THREAD;
5515} 5532}
5516 5533
5517int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) 5534static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5518{ 5535{
5536 struct wl1271 *wl = context;
5537 struct platform_device *pdev = wl->pdev;
5519 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 5538 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5520 unsigned long irqflags; 5539 unsigned long irqflags;
5521 int ret; 5540 int ret;
5522 5541
5523 if (!wl->ops || !wl->ptable) { 5542 if (fw) {
5524 ret = -EINVAL; 5543 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5525 goto out_free_hw; 5544 if (!wl->nvs) {
5545 wl1271_error("Could not allocate nvs data");
5546 goto out;
5547 }
5548 wl->nvs_len = fw->size;
5549 } else {
5550 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5551 WL12XX_NVS_NAME);
5552 wl->nvs = NULL;
5553 wl->nvs_len = 0;
5526 } 5554 }
5527 5555
5556 ret = wl->ops->setup(wl);
5557 if (ret < 0)
5558 goto out_free_nvs;
5559
5528 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); 5560 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5529 5561
5530 /* adjust some runtime configuration parameters */ 5562 /* adjust some runtime configuration parameters */
@@ -5533,11 +5565,8 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5533 wl->irq = platform_get_irq(pdev, 0); 5565 wl->irq = platform_get_irq(pdev, 0);
5534 wl->platform_quirks = pdata->platform_quirks; 5566 wl->platform_quirks = pdata->platform_quirks;
5535 wl->set_power = pdata->set_power; 5567 wl->set_power = pdata->set_power;
5536 wl->dev = &pdev->dev;
5537 wl->if_ops = pdata->ops; 5568 wl->if_ops = pdata->ops;
5538 5569
5539 platform_set_drvdata(pdev, wl);
5540
5541 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 5570 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5542 irqflags = IRQF_TRIGGER_RISING; 5571 irqflags = IRQF_TRIGGER_RISING;
5543 else 5572 else
@@ -5548,7 +5577,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5548 pdev->name, wl); 5577 pdev->name, wl);
5549 if (ret < 0) { 5578 if (ret < 0) {
5550 wl1271_error("request_irq() failed: %d", ret); 5579 wl1271_error("request_irq() failed: %d", ret);
5551 goto out_free_hw; 5580 goto out_free_nvs;
5552 } 5581 }
5553 5582
5554#ifdef CONFIG_PM 5583#ifdef CONFIG_PM
@@ -5607,6 +5636,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5607 goto out_hw_pg_ver; 5636 goto out_hw_pg_ver;
5608 } 5637 }
5609 5638
5639 wl->initialized = true;
5610 goto out; 5640 goto out;
5611 5641
5612out_hw_pg_ver: 5642out_hw_pg_ver:
@@ -5621,10 +5651,33 @@ out_unreg:
5621out_irq: 5651out_irq:
5622 free_irq(wl->irq, wl); 5652 free_irq(wl->irq, wl);
5623 5653
5624out_free_hw: 5654out_free_nvs:
5625 wlcore_free_hw(wl); 5655 kfree(wl->nvs);
5626 5656
5627out: 5657out:
5658 release_firmware(fw);
5659 complete_all(&wl->nvs_loading_complete);
5660}
5661
5662int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5663{
5664 int ret;
5665
5666 if (!wl->ops || !wl->ptable)
5667 return -EINVAL;
5668
5669 wl->dev = &pdev->dev;
5670 wl->pdev = pdev;
5671 platform_set_drvdata(pdev, wl);
5672
5673 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5674 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5675 wl, wlcore_nvs_cb);
5676 if (ret < 0) {
5677 wl1271_error("request_firmware_nowait failed: %d", ret);
5678 complete_all(&wl->nvs_loading_complete);
5679 }
5680
5628 return ret; 5681 return ret;
5629} 5682}
5630EXPORT_SYMBOL_GPL(wlcore_probe); 5683EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -5633,6 +5686,10 @@ int __devexit wlcore_remove(struct platform_device *pdev)
5633{ 5686{
5634 struct wl1271 *wl = platform_get_drvdata(pdev); 5687 struct wl1271 *wl = platform_get_drvdata(pdev);
5635 5688
5689 wait_for_completion(&wl->nvs_loading_complete);
5690 if (!wl->initialized)
5691 return 0;
5692
5636 if (wl->irq_wake_enabled) { 5693 if (wl->irq_wake_enabled) {
5637 device_init_wakeup(wl->dev, 0); 5694 device_init_wakeup(wl->dev, 0);
5638 disable_irq_wake(wl->irq); 5695 disable_irq_wake(wl->irq);
@@ -5663,3 +5720,4 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5663MODULE_LICENSE("GPL"); 5720MODULE_LICENSE("GPL");
5664MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 5721MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5665MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 5722MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5723MODULE_FIRMWARE(WL12XX_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 46d36fd30eba..4d1414a673fb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,7 +28,7 @@
28 28
29#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
30 30
31#define ELP_ENTRY_DELAY 5 31#define ELP_ENTRY_DELAY 30
32 32
33void wl1271_elp_work(struct work_struct *work) 33void wl1271_elp_work(struct work_struct *work)
34{ 34{
@@ -44,7 +44,7 @@ void wl1271_elp_work(struct work_struct *work)
44 44
45 mutex_lock(&wl->mutex); 45 mutex_lock(&wl->mutex);
46 46
47 if (unlikely(wl->state == WL1271_STATE_OFF)) 47 if (unlikely(wl->state != WLCORE_STATE_ON))
48 goto out; 48 goto out;
49 49
50 /* our work might have been already cancelled */ 50 /* our work might have been already cancelled */
@@ -98,11 +98,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
98 return; 98 return;
99 } 99 }
100 100
101 if (wl->conf.conn.forced_ps) 101 timeout = ELP_ENTRY_DELAY;
102 timeout = ELP_ENTRY_DELAY;
103 else
104 timeout = wl->conf.conn.dynamic_ps_timeout;
105
106 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 102 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
107 msecs_to_jiffies(timeout)); 103 msecs_to_jiffies(timeout));
108} 104}
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index f55e2f9e7ac5..9ee0ec6fd1db 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -221,7 +221,7 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
221 pkt_len = wlcore_rx_get_buf_size(wl, des); 221 pkt_len = wlcore_rx_get_buf_size(wl, des);
222 align_pkt_len = wlcore_rx_get_align_buf_size(wl, 222 align_pkt_len = wlcore_rx_get_align_buf_size(wl,
223 pkt_len); 223 pkt_len);
224 if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE) 224 if (buf_size + align_pkt_len > wl->aggr_buf_size)
225 break; 225 break;
226 buf_size += align_pkt_len; 226 buf_size += align_pkt_len;
227 rx_counter++; 227 rx_counter++;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index dbeca1bfbb2c..d00501493dfe 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -46,7 +46,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
46 46
47 mutex_lock(&wl->mutex); 47 mutex_lock(&wl->mutex);
48 48
49 if (wl->state == WL1271_STATE_OFF) 49 if (unlikely(wl->state != WLCORE_STATE_ON))
50 goto out; 50 goto out;
51 51
52 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 52 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -184,11 +184,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
184 if (passive) 184 if (passive)
185 scan_options |= WL1271_SCAN_OPT_PASSIVE; 185 scan_options |= WL1271_SCAN_OPT_PASSIVE;
186 186
187 if (wlvif->bss_type == BSS_TYPE_AP_BSS || 187 cmd->params.role_id = wlvif->role_id;
188 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
189 cmd->params.role_id = wlvif->role_id;
190 else
191 cmd->params.role_id = wlvif->dev_role_id;
192 188
193 if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { 189 if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
194 ret = -EINVAL; 190 ret = -EINVAL;
@@ -593,7 +589,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
593 goto out; 589 goto out;
594 } 590 }
595 591
596 cmd->role_id = wlvif->dev_role_id; 592 cmd->role_id = wlvif->role_id;
597 if (!n_match_ssids) { 593 if (!n_match_ssids) {
598 /* No filter, with ssids */ 594 /* No filter, with ssids */
599 type = SCAN_SSID_FILTER_DISABLED; 595 type = SCAN_SSID_FILTER_DISABLED;
@@ -683,7 +679,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
683 if (!cfg) 679 if (!cfg)
684 return -ENOMEM; 680 return -ENOMEM;
685 681
686 cfg->role_id = wlvif->dev_role_id; 682 cfg->role_id = wlvif->role_id;
687 cfg->rssi_threshold = c->rssi_threshold; 683 cfg->rssi_threshold = c->rssi_threshold;
688 cfg->snr_threshold = c->snr_threshold; 684 cfg->snr_threshold = c->snr_threshold;
689 cfg->n_probe_reqs = c->num_probe_reqs; 685 cfg->n_probe_reqs = c->num_probe_reqs;
@@ -718,7 +714,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
718 if (!force_passive && cfg->active[0]) { 714 if (!force_passive && cfg->active[0]) {
719 u8 band = IEEE80211_BAND_2GHZ; 715 u8 band = IEEE80211_BAND_2GHZ;
720 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 716 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
721 wlvif->dev_role_id, band, 717 wlvif->role_id, band,
722 req->ssids[0].ssid, 718 req->ssids[0].ssid,
723 req->ssids[0].ssid_len, 719 req->ssids[0].ssid_len,
724 ies->ie[band], 720 ies->ie[band],
@@ -732,7 +728,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
732 if (!force_passive && cfg->active[1]) { 728 if (!force_passive && cfg->active[1]) {
733 u8 band = IEEE80211_BAND_5GHZ; 729 u8 band = IEEE80211_BAND_5GHZ;
734 ret = wl12xx_cmd_build_probe_req(wl, wlvif, 730 ret = wl12xx_cmd_build_probe_req(wl, wlvif,
735 wlvif->dev_role_id, band, 731 wlvif->role_id, band,
736 req->ssids[0].ssid, 732 req->ssids[0].ssid,
737 req->ssids[0].ssid_len, 733 req->ssids[0].ssid_len,
738 ies->ie[band], 734 ies->ie[band],
@@ -774,7 +770,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
774 if (!start) 770 if (!start)
775 return -ENOMEM; 771 return -ENOMEM;
776 772
777 start->role_id = wlvif->dev_role_id; 773 start->role_id = wlvif->role_id;
778 start->tag = WL1271_SCAN_DEFAULT_TAG; 774 start->tag = WL1271_SCAN_DEFAULT_TAG;
779 775
780 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start, 776 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -810,7 +806,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
810 return; 806 return;
811 } 807 }
812 808
813 stop->role_id = wlvif->dev_role_id; 809 stop->role_id = wlvif->role_id;
814 stop->tag = WL1271_SCAN_DEFAULT_TAG; 810 stop->tag = WL1271_SCAN_DEFAULT_TAG;
815 811
816 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop, 812 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 8da4ed243ebc..a519bc3adec1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -66,7 +66,13 @@
66/* HW limitation: maximum possible chunk size is 4095 bytes */ 66/* HW limitation: maximum possible chunk size is 4095 bytes */
67#define WSPI_MAX_CHUNK_SIZE 4092 67#define WSPI_MAX_CHUNK_SIZE 4092
68 68
69#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) 69/*
70 * only support SPI for 12xx - this code should be reworked when 18xx
71 * support is introduced
72 */
73#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
74
75#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
70 76
71struct wl12xx_spi_glue { 77struct wl12xx_spi_glue {
72 struct device *dev; 78 struct device *dev;
@@ -271,7 +277,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
271 u32 chunk_len; 277 u32 chunk_len;
272 int i; 278 int i;
273 279
274 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE); 280 WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
275 281
276 spi_message_init(&m); 282 spi_message_init(&m);
277 memset(t, 0, sizeof(t)); 283 memset(t, 0, sizeof(t));
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 49e5ee1525c9..f3442762d884 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -92,7 +92,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
92 92
93 mutex_lock(&wl->mutex); 93 mutex_lock(&wl->mutex);
94 94
95 if (wl->state == WL1271_STATE_OFF) { 95 if (unlikely(wl->state != WLCORE_STATE_ON)) {
96 ret = -EINVAL; 96 ret = -EINVAL;
97 goto out; 97 goto out;
98 } 98 }
@@ -164,7 +164,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
164 164
165 mutex_lock(&wl->mutex); 165 mutex_lock(&wl->mutex);
166 166
167 if (wl->state == WL1271_STATE_OFF) { 167 if (unlikely(wl->state != WLCORE_STATE_ON)) {
168 ret = -EINVAL; 168 ret = -EINVAL;
169 goto out; 169 goto out;
170 } 170 }
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0081f746482..a90d3cd09408 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
130} 130}
131EXPORT_SYMBOL(wl12xx_is_dummy_packet); 131EXPORT_SYMBOL(wl12xx_is_dummy_packet);
132 132
133u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 133static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 struct sk_buff *skb) 134 struct sk_buff *skb, struct ieee80211_sta *sta)
135{ 135{
136 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 136 if (sta) {
137
138 if (control->control.sta) {
139 struct wl1271_station *wl_sta; 137 struct wl1271_station *wl_sta;
140 138
141 wl_sta = (struct wl1271_station *) 139 wl_sta = (struct wl1271_station *)sta->drv_priv;
142 control->control.sta->drv_priv;
143 return wl_sta->hlid; 140 return wl_sta->hlid;
144 } else { 141 } else {
145 struct ieee80211_hdr *hdr; 142 struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156} 153}
157 154
158u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 155u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
159 struct sk_buff *skb) 156 struct sk_buff *skb, struct ieee80211_sta *sta)
160{ 157{
161 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
162 159
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
164 return wl->system_hlid; 161 return wl->system_hlid;
165 162
166 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 163 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
167 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); 164 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
168 165
169 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 166 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
170 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && 167 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -196,7 +193,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 int id, ret = -EBUSY, ac; 193 int id, ret = -EBUSY, ac;
197 u32 spare_blocks; 194 u32 spare_blocks;
198 195
199 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 196 if (buf_offset + total_len > wl->aggr_buf_size)
200 return -EAGAIN; 197 return -EAGAIN;
201 198
202 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); 199 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
@@ -322,8 +319,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
322 if (hlid == wlvif->ap.global_hlid) 319 if (hlid == wlvif->ap.global_hlid)
323 rate_idx = wlvif->ap.mgmt_rate_idx; 320 rate_idx = wlvif->ap.mgmt_rate_idx;
324 else if (hlid == wlvif->ap.bcast_hlid || 321 else if (hlid == wlvif->ap.bcast_hlid ||
325 skb->protocol == cpu_to_be16(ETH_P_PAE)) 322 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
326 /* send AP bcast and EAPOLs using the min basic rate */ 323 !ieee80211_is_data(frame_control))
324 /*
325 * send non-data, bcast and EAPOLs using the
326 * min basic rate
327 */
327 rate_idx = wlvif->ap.bcast_rate_idx; 328 rate_idx = wlvif->ap.bcast_rate_idx;
328 else 329 else
329 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 330 rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -344,13 +345,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
344 345
345/* caller must hold wl->mutex */ 346/* caller must hold wl->mutex */
346static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 347static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
347 struct sk_buff *skb, u32 buf_offset) 348 struct sk_buff *skb, u32 buf_offset, u8 hlid)
348{ 349{
349 struct ieee80211_tx_info *info; 350 struct ieee80211_tx_info *info;
350 u32 extra = 0; 351 u32 extra = 0;
351 int ret = 0; 352 int ret = 0;
352 u32 total_len; 353 u32 total_len;
353 u8 hlid;
354 bool is_dummy; 354 bool is_dummy;
355 bool is_gem = false; 355 bool is_gem = false;
356 356
@@ -359,9 +359,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
359 return -EINVAL; 359 return -EINVAL;
360 } 360 }
361 361
362 if (hlid == WL12XX_INVALID_LINK_ID) {
363 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
364 return -EINVAL;
365 }
366
362 info = IEEE80211_SKB_CB(skb); 367 info = IEEE80211_SKB_CB(skb);
363 368
364 /* TODO: handle dummy packets on multi-vifs */
365 is_dummy = wl12xx_is_dummy_packet(wl, skb); 369 is_dummy = wl12xx_is_dummy_packet(wl, skb);
366 370
367 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 371 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +390,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
386 390
387 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 391 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
388 } 392 }
389 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
390 if (hlid == WL12XX_INVALID_LINK_ID) {
391 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
392 return -EINVAL;
393 }
394 393
395 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 394 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
396 is_gem); 395 is_gem);
@@ -517,7 +516,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
517} 516}
518 517
519static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, 518static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
520 struct wl12xx_vif *wlvif) 519 struct wl12xx_vif *wlvif,
520 u8 *hlid)
521{ 521{
522 struct sk_buff *skb = NULL; 522 struct sk_buff *skb = NULL;
523 int i, h, start_hlid; 523 int i, h, start_hlid;
@@ -544,10 +544,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
544 if (!skb) 544 if (!skb)
545 wlvif->last_tx_hlid = 0; 545 wlvif->last_tx_hlid = 0;
546 546
547 *hlid = wlvif->last_tx_hlid;
547 return skb; 548 return skb;
548} 549}
549 550
550static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 551static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
551{ 552{
552 unsigned long flags; 553 unsigned long flags;
553 struct wl12xx_vif *wlvif = wl->last_wlvif; 554 struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +557,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
556 /* continue from last wlvif (round robin) */ 557 /* continue from last wlvif (round robin) */
557 if (wlvif) { 558 if (wlvif) {
558 wl12xx_for_each_wlvif_continue(wl, wlvif) { 559 wl12xx_for_each_wlvif_continue(wl, wlvif) {
559 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 560 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
560 if (skb) { 561 if (skb) {
561 wl->last_wlvif = wlvif; 562 wl->last_wlvif = wlvif;
562 break; 563 break;
@@ -565,13 +566,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
565 } 566 }
566 567
567 /* dequeue from the system HLID before the restarting wlvif list */ 568 /* dequeue from the system HLID before the restarting wlvif list */
568 if (!skb) 569 if (!skb) {
569 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); 570 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
571 *hlid = wl->system_hlid;
572 }
570 573
571 /* do a new pass over the wlvif list */ 574 /* do a new pass over the wlvif list */
572 if (!skb) { 575 if (!skb) {
573 wl12xx_for_each_wlvif(wl, wlvif) { 576 wl12xx_for_each_wlvif(wl, wlvif) {
574 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 577 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
575 if (skb) { 578 if (skb) {
576 wl->last_wlvif = wlvif; 579 wl->last_wlvif = wlvif;
577 break; 580 break;
@@ -591,6 +594,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
591 int q; 594 int q;
592 595
593 skb = wl->dummy_packet; 596 skb = wl->dummy_packet;
597 *hlid = wl->system_hlid;
594 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 598 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
595 spin_lock_irqsave(&wl->wl_lock, flags); 599 spin_lock_irqsave(&wl->wl_lock, flags);
596 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 600 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +606,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
602} 606}
603 607
604static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 608static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
605 struct sk_buff *skb) 609 struct sk_buff *skb, u8 hlid)
606{ 610{
607 unsigned long flags; 611 unsigned long flags;
608 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 612 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +614,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
610 if (wl12xx_is_dummy_packet(wl, skb)) { 614 if (wl12xx_is_dummy_packet(wl, skb)) {
611 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 615 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
612 } else { 616 } else {
613 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
614 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 617 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
615 618
616 /* make sure we dequeue the same packet next time */ 619 /* make sure we dequeue the same packet next time */
@@ -686,26 +689,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
686 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 689 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
687 int ret = 0; 690 int ret = 0;
688 int bus_ret = 0; 691 int bus_ret = 0;
692 u8 hlid;
689 693
690 if (unlikely(wl->state == WL1271_STATE_OFF)) 694 if (unlikely(wl->state != WLCORE_STATE_ON))
691 return 0; 695 return 0;
692 696
693 while ((skb = wl1271_skb_dequeue(wl))) { 697 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
694 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 698 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
695 bool has_data = false; 699 bool has_data = false;
696 700
697 wlvif = NULL; 701 wlvif = NULL;
698 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) 702 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
699 wlvif = wl12xx_vif_to_data(info->control.vif); 703 wlvif = wl12xx_vif_to_data(info->control.vif);
704 else
705 hlid = wl->system_hlid;
700 706
701 has_data = wlvif && wl1271_tx_is_data_present(skb); 707 has_data = wlvif && wl1271_tx_is_data_present(skb);
702 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); 708 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
709 hlid);
703 if (ret == -EAGAIN) { 710 if (ret == -EAGAIN) {
704 /* 711 /*
705 * Aggregation buffer is full. 712 * Aggregation buffer is full.
706 * Flush buffer and try again. 713 * Flush buffer and try again.
707 */ 714 */
708 wl1271_skb_queue_head(wl, wlvif, skb); 715 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
709 716
710 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 717 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
711 last_len); 718 last_len);
@@ -722,7 +729,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
722 * Firmware buffer is full. 729 * Firmware buffer is full.
723 * Queue back last skb, and stop aggregating. 730 * Queue back last skb, and stop aggregating.
724 */ 731 */
725 wl1271_skb_queue_head(wl, wlvif, skb); 732 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
726 /* No work left, avoid scheduling redundant tx work */ 733 /* No work left, avoid scheduling redundant tx work */
727 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 734 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
728 goto out_ack; 735 goto out_ack;
@@ -732,7 +739,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
732 * fw still expects dummy packet, 739 * fw still expects dummy packet,
733 * so re-enqueue it 740 * so re-enqueue it
734 */ 741 */
735 wl1271_skb_queue_head(wl, wlvif, skb); 742 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
736 else 743 else
737 ieee80211_free_txskb(wl->hw, skb); 744 ieee80211_free_txskb(wl->hw, skb);
738 goto out_ack; 745 goto out_ack;
@@ -1069,39 +1076,54 @@ void wl12xx_tx_reset(struct wl1271 *wl)
1069/* caller must *NOT* hold wl->mutex */ 1076/* caller must *NOT* hold wl->mutex */
1070void wl1271_tx_flush(struct wl1271 *wl) 1077void wl1271_tx_flush(struct wl1271 *wl)
1071{ 1078{
1072 unsigned long timeout; 1079 unsigned long timeout, start_time;
1073 int i; 1080 int i;
1074 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1081 start_time = jiffies;
1082 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1075 1083
1076 /* only one flush should be in progress, for consistent queue state */ 1084 /* only one flush should be in progress, for consistent queue state */
1077 mutex_lock(&wl->flush_mutex); 1085 mutex_lock(&wl->flush_mutex);
1078 1086
1087 mutex_lock(&wl->mutex);
1088 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1089 mutex_unlock(&wl->mutex);
1090 goto out;
1091 }
1092
1079 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1093 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1080 1094
1081 while (!time_after(jiffies, timeout)) { 1095 while (!time_after(jiffies, timeout)) {
1082 mutex_lock(&wl->mutex); 1096 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1083 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
1084 wl->tx_frames_cnt, 1097 wl->tx_frames_cnt,
1085 wl1271_tx_total_queue_count(wl)); 1098 wl1271_tx_total_queue_count(wl));
1099
1100 /* force Tx and give the driver some time to flush data */
1101 mutex_unlock(&wl->mutex);
1102 if (wl1271_tx_total_queue_count(wl))
1103 wl1271_tx_work(&wl->tx_work);
1104 msleep(20);
1105 mutex_lock(&wl->mutex);
1106
1086 if ((wl->tx_frames_cnt == 0) && 1107 if ((wl->tx_frames_cnt == 0) &&
1087 (wl1271_tx_total_queue_count(wl) == 0)) { 1108 (wl1271_tx_total_queue_count(wl) == 0)) {
1088 mutex_unlock(&wl->mutex); 1109 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1089 goto out; 1110 jiffies_to_msecs(jiffies - start_time));
1111 goto out_wake;
1090 } 1112 }
1091 mutex_unlock(&wl->mutex);
1092 msleep(1);
1093 } 1113 }
1094 1114
1095 wl1271_warning("Unable to flush all TX buffers, timed out."); 1115 wl1271_warning("Unable to flush all TX buffers, "
1116 "timed out (timeout %d ms",
1117 WL1271_TX_FLUSH_TIMEOUT / 1000);
1096 1118
1097 /* forcibly flush all Tx buffers on our queues */ 1119 /* forcibly flush all Tx buffers on our queues */
1098 mutex_lock(&wl->mutex);
1099 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1120 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1100 wl1271_tx_reset_link_queues(wl, i); 1121 wl1271_tx_reset_link_queues(wl, i);
1101 mutex_unlock(&wl->mutex);
1102 1122
1103out: 1123out_wake:
1104 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1124 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1125 mutex_unlock(&wl->mutex);
1126out:
1105 mutex_unlock(&wl->flush_mutex); 1127 mutex_unlock(&wl->flush_mutex);
1106} 1128}
1107EXPORT_SYMBOL_GPL(wl1271_tx_flush); 1129EXPORT_SYMBOL_GPL(wl1271_tx_flush);
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 1e939b016155..349520d8b724 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
244 enum ieee80211_band rate_band); 244 enum ieee80211_band rate_band);
245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
246u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
247 struct sk_buff *skb);
248u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 246u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
249 struct sk_buff *skb); 247 struct sk_buff *skb, struct ieee80211_sta *sta);
250void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); 248void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
251void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 249void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
252bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 250bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0ce7a8ebbd46..68584aa0f2b0 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -31,12 +31,19 @@
31/* The maximum number of Tx descriptors in all chip families */ 31/* The maximum number of Tx descriptors in all chip families */
32#define WLCORE_MAX_TX_DESCRIPTORS 32 32#define WLCORE_MAX_TX_DESCRIPTORS 32
33 33
34/*
35 * We always allocate this number of mac addresses. If we don't
36 * have enough allocated addresses, the LAA bit is used
37 */
38#define WLCORE_NUM_MAC_ADDRESSES 3
39
34/* forward declaration */ 40/* forward declaration */
35struct wl1271_tx_hw_descr; 41struct wl1271_tx_hw_descr;
36enum wl_rx_buf_align; 42enum wl_rx_buf_align;
37struct wl1271_rx_descriptor; 43struct wl1271_rx_descriptor;
38 44
39struct wlcore_ops { 45struct wlcore_ops {
46 int (*setup)(struct wl1271 *wl);
40 int (*identify_chip)(struct wl1271 *wl); 47 int (*identify_chip)(struct wl1271 *wl);
41 int (*identify_fw)(struct wl1271 *wl); 48 int (*identify_fw)(struct wl1271 *wl);
42 int (*boot)(struct wl1271 *wl); 49 int (*boot)(struct wl1271 *wl);
@@ -139,10 +146,12 @@ struct wl1271_stats {
139}; 146};
140 147
141struct wl1271 { 148struct wl1271 {
149 bool initialized;
142 struct ieee80211_hw *hw; 150 struct ieee80211_hw *hw;
143 bool mac80211_registered; 151 bool mac80211_registered;
144 152
145 struct device *dev; 153 struct device *dev;
154 struct platform_device *pdev;
146 155
147 void *if_priv; 156 void *if_priv;
148 157
@@ -153,7 +162,7 @@ struct wl1271 {
153 162
154 spinlock_t wl_lock; 163 spinlock_t wl_lock;
155 164
156 enum wl1271_state state; 165 enum wlcore_state state;
157 enum wl12xx_fw_type fw_type; 166 enum wl12xx_fw_type fw_type;
158 bool plt; 167 bool plt;
159 enum plt_mode plt_mode; 168 enum plt_mode plt_mode;
@@ -181,7 +190,7 @@ struct wl1271 {
181 u32 fuse_nic_addr; 190 u32 fuse_nic_addr;
182 191
183 /* we have up to 2 MAC addresses */ 192 /* we have up to 2 MAC addresses */
184 struct mac_address addresses[2]; 193 struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES];
185 int channel; 194 int channel;
186 u8 system_hlid; 195 u8 system_hlid;
187 196
@@ -190,6 +199,8 @@ struct wl1271 {
190 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 199 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
191 unsigned long rate_policies_map[ 200 unsigned long rate_policies_map[
192 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; 201 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
202 unsigned long klv_templates_map[
203 BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
193 204
194 struct list_head wlvif_list; 205 struct list_head wlvif_list;
195 206
@@ -237,6 +248,7 @@ struct wl1271 {
237 248
238 /* Intermediate buffer, used for packet aggregation */ 249 /* Intermediate buffer, used for packet aggregation */
239 u8 *aggr_buf; 250 u8 *aggr_buf;
251 u32 aggr_buf_size;
240 252
241 /* Reusable dummy packet template */ 253 /* Reusable dummy packet template */
242 struct sk_buff *dummy_packet; 254 struct sk_buff *dummy_packet;
@@ -393,13 +405,18 @@ struct wl1271 {
393 /* sleep auth value currently configured to FW */ 405 /* sleep auth value currently configured to FW */
394 int sleep_auth; 406 int sleep_auth;
395 407
408 /* the number of allocated MAC addresses in this chip */
409 int num_mac_addr;
410
396 /* the minimum FW version required for the driver to work */ 411 /* the minimum FW version required for the driver to work */
397 unsigned int min_fw_ver[NUM_FW_VER]; 412 unsigned int min_fw_ver[NUM_FW_VER];
413
414 struct completion nvs_loading_complete;
398}; 415};
399 416
400int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 417int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
401int __devexit wlcore_remove(struct platform_device *pdev); 418int __devexit wlcore_remove(struct platform_device *pdev);
402struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size); 419struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
403int wlcore_free_hw(struct wl1271 *wl); 420int wlcore_free_hw(struct wl1271 *wl);
404int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 421int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
405 struct ieee80211_vif *vif, 422 struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index c0505635bb00..6678d4b18611 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -66,6 +66,7 @@
66#define WLCORE_NUM_BANDS 2 66#define WLCORE_NUM_BANDS 2
67 67
68#define WL12XX_MAX_RATE_POLICIES 16 68#define WL12XX_MAX_RATE_POLICIES 16
69#define WLCORE_MAX_KLV_TEMPLATES 4
69 70
70/* Defined by FW as 0. Will not be freed or allocated. */ 71/* Defined by FW as 0. Will not be freed or allocated. */
71#define WL12XX_SYSTEM_HLID 0 72#define WL12XX_SYSTEM_HLID 0
@@ -83,11 +84,10 @@
83#define WL1271_AP_BSS_INDEX 0 84#define WL1271_AP_BSS_INDEX 0
84#define WL1271_AP_DEF_BEACON_EXP 20 85#define WL1271_AP_DEF_BEACON_EXP 20
85 86
86#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE) 87enum wlcore_state {
87 88 WLCORE_STATE_OFF,
88enum wl1271_state { 89 WLCORE_STATE_RESTARTING,
89 WL1271_STATE_OFF, 90 WLCORE_STATE_ON,
90 WL1271_STATE_ON,
91}; 91};
92 92
93enum wl12xx_fw_type { 93enum wl12xx_fw_type {
@@ -124,6 +124,7 @@ struct wl1271_chip {
124 u32 id; 124 u32 id;
125 char fw_ver_str[ETHTOOL_BUSINFO_LEN]; 125 char fw_ver_str[ETHTOOL_BUSINFO_LEN];
126 unsigned int fw_ver[NUM_FW_VER]; 126 unsigned int fw_ver[NUM_FW_VER];
127 char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
127}; 128};
128 129
129#define NUM_TX_QUEUES 4 130#define NUM_TX_QUEUES 4
@@ -337,6 +338,8 @@ struct wl12xx_vif {
337 u8 ap_rate_idx; 338 u8 ap_rate_idx;
338 u8 p2p_rate_idx; 339 u8 p2p_rate_idx;
339 340
341 u8 klv_template_id;
342
340 bool qos; 343 bool qos;
341 } sta; 344 } sta;
342 struct { 345 struct {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 00f6e69c1dcd..730186d0449b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1520,13 +1520,12 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
1520 union iwreq_data *wrqu, char *extra) 1520 union iwreq_data *wrqu, char *extra)
1521{ 1521{
1522 struct wl3501_card *this = netdev_priv(dev); 1522 struct wl3501_card *this = netdev_priv(dev);
1523 static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
1524 int rc = -EINVAL; 1523 int rc = -EINVAL;
1525 1524
1526 /* FIXME: we support other ARPHRDs...*/ 1525 /* FIXME: we support other ARPHRDs...*/
1527 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 1526 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
1528 goto out; 1527 goto out;
1529 if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) { 1528 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
1530 /* FIXME: rescan? */ 1529 /* FIXME: rescan? */
1531 } else 1530 } else
1532 memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 1531 memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c9e2660e1263..114364b5d466 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
937 * control block of the skbuff will be initialized. If necessary the incoming 937 * control block of the skbuff will be initialized. If necessary the incoming
938 * mac80211 queues will be stopped. 938 * mac80211 queues will be stopped.
939 */ 939 */
940static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 940static void zd_op_tx(struct ieee80211_hw *hw,
941 struct ieee80211_tx_control *control,
942 struct sk_buff *skb)
941{ 943{
942 struct zd_mac *mac = zd_hw_mac(hw); 944 struct zd_mac *mac = zd_hw_mac(hw);
943 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 945 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
1176 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif); 1178 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
1177 if (!skb) 1179 if (!skb)
1178 break; 1180 break;
1179 zd_op_tx(mac->hw, skb); 1181 zd_op_tx(mac->hw, NULL, skb);
1180 } 1182 }
1181 1183
1182 /* 1184 /*
@@ -1399,7 +1401,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1399 1401
1400 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1402 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1401 IEEE80211_HW_SIGNAL_UNSPEC | 1403 IEEE80211_HW_SIGNAL_UNSPEC |
1402 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1404 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1405 IEEE80211_HW_MFP_CAPABLE;
1403 1406
1404 hw->wiphy->interface_modes = 1407 hw->wiphy->interface_modes =
1405 BIT(NL80211_IFTYPE_MESH_POINT) | 1408 BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 650f79a1f2bd..c934fe8583f5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
1712 break; 1712 break;
1713 1713
1714 case XenbusStateConnected: 1714 case XenbusStateConnected:
1715 netif_notify_peers(netdev); 1715 netdev_notify_peers(netdev);
1716 break; 1716 break;
1717 1717
1718 case XenbusStateClosing: 1718 case XenbusStateClosing:
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 3b20b73ee649..ec857676c39f 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -5,21 +5,9 @@
5menu "Near Field Communication (NFC) devices" 5menu "Near Field Communication (NFC) devices"
6 depends on NFC 6 depends on NFC
7 7
8config PN544_NFC
9 tristate "PN544 NFC driver"
10 depends on I2C
11 select CRC_CCITT
12 default n
13 ---help---
14 Say yes if you want PN544 Near Field Communication driver.
15 This is for i2c connected version. If unsure, say N here.
16
17 To compile this driver as a module, choose m here. The module will
18 be called pn544.
19
20config PN544_HCI_NFC 8config PN544_HCI_NFC
21 tristate "HCI PN544 NFC driver" 9 tristate "HCI PN544 NFC driver"
22 depends on I2C && NFC_SHDLC 10 depends on I2C && NFC_HCI && NFC_SHDLC
23 select CRC_CCITT 11 select CRC_CCITT
24 default n 12 default n
25 ---help--- 13 ---help---
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 473e44cef612..bf05831fdf09 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,6 @@
2# Makefile for nfc devices 2# Makefile for nfc devices
3# 3#
4 4
5obj-$(CONFIG_PN544_NFC) += pn544.o
6obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o 5obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o
7obj-$(CONFIG_NFC_PN533) += pn533.o 6obj-$(CONFIG_NFC_PN533) += pn533.o
8obj-$(CONFIG_NFC_WILINK) += nfcwilink.o 7obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index e7fd4938f9bc..50b1ee41afc6 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -352,8 +352,6 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
352 struct nfcwilink *drv = priv_data; 352 struct nfcwilink *drv = priv_data;
353 int rc; 353 int rc;
354 354
355 nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
356
357 if (!skb) 355 if (!skb)
358 return -EFAULT; 356 return -EFAULT;
359 357
@@ -362,6 +360,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
362 return -EFAULT; 360 return -EFAULT;
363 } 361 }
364 362
363 nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
364
365 /* strip the ST header 365 /* strip the ST header
366 (apart for the chnl byte, which is not received in the hdr) */ 366 (apart for the chnl byte, which is not received in the hdr) */
367 skb_pull(skb, (NFCWILINK_HDR_LEN-1)); 367 skb_pull(skb, (NFCWILINK_HDR_LEN-1));
@@ -604,21 +604,7 @@ static struct platform_driver nfcwilink_driver = {
604 }, 604 },
605}; 605};
606 606
607/* ------- Module Init/Exit interfaces ------ */ 607module_platform_driver(nfcwilink_driver);
608static int __init nfcwilink_init(void)
609{
610 printk(KERN_INFO "NFC Driver for TI WiLink");
611
612 return platform_driver_register(&nfcwilink_driver);
613}
614
615static void __exit nfcwilink_exit(void)
616{
617 platform_driver_unregister(&nfcwilink_driver);
618}
619
620module_init(nfcwilink_init);
621module_exit(nfcwilink_exit);
622 608
623/* ------ Module Info ------ */ 609/* ------ Module Info ------ */
624 610
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index d606f52fec84..97c440a8cd61 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -356,6 +356,7 @@ struct pn533 {
356 356
357 struct workqueue_struct *wq; 357 struct workqueue_struct *wq;
358 struct work_struct cmd_work; 358 struct work_struct cmd_work;
359 struct work_struct cmd_complete_work;
359 struct work_struct poll_work; 360 struct work_struct poll_work;
360 struct work_struct mi_work; 361 struct work_struct mi_work;
361 struct work_struct tg_work; 362 struct work_struct tg_work;
@@ -383,6 +384,19 @@ struct pn533 {
383 u8 tgt_mode; 384 u8 tgt_mode;
384 385
385 u32 device_type; 386 u32 device_type;
387
388 struct list_head cmd_queue;
389 u8 cmd_pending;
390};
391
392struct pn533_cmd {
393 struct list_head queue;
394 struct pn533_frame *out_frame;
395 struct pn533_frame *in_frame;
396 int in_frame_len;
397 pn533_cmd_complete_t cmd_complete;
398 void *arg;
399 gfp_t flags;
386}; 400};
387 401
388struct pn533_frame { 402struct pn533_frame {
@@ -487,7 +501,7 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
487 501
488static void pn533_wq_cmd_complete(struct work_struct *work) 502static void pn533_wq_cmd_complete(struct work_struct *work)
489{ 503{
490 struct pn533 *dev = container_of(work, struct pn533, cmd_work); 504 struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
491 struct pn533_frame *in_frame; 505 struct pn533_frame *in_frame;
492 int rc; 506 int rc;
493 507
@@ -502,7 +516,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
502 PN533_FRAME_CMD_PARAMS_LEN(in_frame)); 516 PN533_FRAME_CMD_PARAMS_LEN(in_frame));
503 517
504 if (rc != -EINPROGRESS) 518 if (rc != -EINPROGRESS)
505 mutex_unlock(&dev->cmd_lock); 519 queue_work(dev->wq, &dev->cmd_work);
506} 520}
507 521
508static void pn533_recv_response(struct urb *urb) 522static void pn533_recv_response(struct urb *urb)
@@ -550,7 +564,7 @@ static void pn533_recv_response(struct urb *urb)
550 dev->wq_in_frame = in_frame; 564 dev->wq_in_frame = in_frame;
551 565
552sched_wq: 566sched_wq:
553 queue_work(dev->wq, &dev->cmd_work); 567 queue_work(dev->wq, &dev->cmd_complete_work);
554} 568}
555 569
556static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) 570static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -606,7 +620,7 @@ static void pn533_recv_ack(struct urb *urb)
606 620
607sched_wq: 621sched_wq:
608 dev->wq_in_frame = NULL; 622 dev->wq_in_frame = NULL;
609 queue_work(dev->wq, &dev->cmd_work); 623 queue_work(dev->wq, &dev->cmd_complete_work);
610} 624}
611 625
612static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) 626static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -669,6 +683,31 @@ error:
669 return rc; 683 return rc;
670} 684}
671 685
686static void pn533_wq_cmd(struct work_struct *work)
687{
688 struct pn533 *dev = container_of(work, struct pn533, cmd_work);
689 struct pn533_cmd *cmd;
690
691 mutex_lock(&dev->cmd_lock);
692
693 if (list_empty(&dev->cmd_queue)) {
694 dev->cmd_pending = 0;
695 mutex_unlock(&dev->cmd_lock);
696 return;
697 }
698
699 cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
700
701 mutex_unlock(&dev->cmd_lock);
702
703 __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
704 cmd->in_frame_len, cmd->cmd_complete,
705 cmd->arg, cmd->flags);
706
707 list_del(&cmd->queue);
708 kfree(cmd);
709}
710
672static int pn533_send_cmd_frame_async(struct pn533 *dev, 711static int pn533_send_cmd_frame_async(struct pn533 *dev,
673 struct pn533_frame *out_frame, 712 struct pn533_frame *out_frame,
674 struct pn533_frame *in_frame, 713 struct pn533_frame *in_frame,
@@ -676,21 +715,44 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
676 pn533_cmd_complete_t cmd_complete, 715 pn533_cmd_complete_t cmd_complete,
677 void *arg, gfp_t flags) 716 void *arg, gfp_t flags)
678{ 717{
679 int rc; 718 struct pn533_cmd *cmd;
719 int rc = 0;
680 720
681 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 721 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
682 722
683 if (!mutex_trylock(&dev->cmd_lock)) 723 mutex_lock(&dev->cmd_lock);
684 return -EBUSY;
685 724
686 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame, 725 if (!dev->cmd_pending) {
687 in_frame_len, cmd_complete, arg, flags); 726 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
688 if (rc) 727 in_frame_len, cmd_complete,
689 goto error; 728 arg, flags);
729 if (!rc)
730 dev->cmd_pending = 1;
690 731
691 return 0; 732 goto unlock;
692error: 733 }
734
735 nfc_dev_dbg(&dev->interface->dev, "%s Queueing command", __func__);
736
737 cmd = kzalloc(sizeof(struct pn533_cmd), flags);
738 if (!cmd) {
739 rc = -ENOMEM;
740 goto unlock;
741 }
742
743 INIT_LIST_HEAD(&cmd->queue);
744 cmd->out_frame = out_frame;
745 cmd->in_frame = in_frame;
746 cmd->in_frame_len = in_frame_len;
747 cmd->cmd_complete = cmd_complete;
748 cmd->arg = arg;
749 cmd->flags = flags;
750
751 list_add_tail(&cmd->queue, &dev->cmd_queue);
752
753unlock:
693 mutex_unlock(&dev->cmd_lock); 754 mutex_unlock(&dev->cmd_lock);
755
694 return rc; 756 return rc;
695} 757}
696 758
@@ -1305,8 +1367,6 @@ static void pn533_listen_mode_timer(unsigned long data)
1305 1367
1306 dev->cancel_listen = 1; 1368 dev->cancel_listen = 1;
1307 1369
1308 mutex_unlock(&dev->cmd_lock);
1309
1310 pn533_poll_next_mod(dev); 1370 pn533_poll_next_mod(dev);
1311 1371
1312 queue_work(dev->wq, &dev->poll_work); 1372 queue_work(dev->wq, &dev->poll_work);
@@ -2131,7 +2191,7 @@ error_cmd:
2131 2191
2132 kfree(arg); 2192 kfree(arg);
2133 2193
2134 mutex_unlock(&dev->cmd_lock); 2194 queue_work(dev->wq, &dev->cmd_work);
2135} 2195}
2136 2196
2137static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 2197static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -2330,13 +2390,12 @@ static int pn533_probe(struct usb_interface *interface,
2330 NULL, 0, 2390 NULL, 0,
2331 pn533_send_complete, dev); 2391 pn533_send_complete, dev);
2332 2392
2333 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete); 2393 INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
2394 INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
2334 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv); 2395 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
2335 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data); 2396 INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
2336 INIT_WORK(&dev->poll_work, pn533_wq_poll); 2397 INIT_WORK(&dev->poll_work, pn533_wq_poll);
2337 dev->wq = alloc_workqueue("pn533", 2398 dev->wq = alloc_ordered_workqueue("pn533", 0);
2338 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
2339 1);
2340 if (dev->wq == NULL) 2399 if (dev->wq == NULL)
2341 goto error; 2400 goto error;
2342 2401
@@ -2346,6 +2405,8 @@ static int pn533_probe(struct usb_interface *interface,
2346 2405
2347 skb_queue_head_init(&dev->resp_q); 2406 skb_queue_head_init(&dev->resp_q);
2348 2407
2408 INIT_LIST_HEAD(&dev->cmd_queue);
2409
2349 usb_set_intfdata(interface, dev); 2410 usb_set_intfdata(interface, dev);
2350 2411
2351 pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION); 2412 pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
@@ -2417,6 +2478,7 @@ error:
2417static void pn533_disconnect(struct usb_interface *interface) 2478static void pn533_disconnect(struct usb_interface *interface)
2418{ 2479{
2419 struct pn533 *dev; 2480 struct pn533 *dev;
2481 struct pn533_cmd *cmd, *n;
2420 2482
2421 dev = usb_get_intfdata(interface); 2483 dev = usb_get_intfdata(interface);
2422 usb_set_intfdata(interface, NULL); 2484 usb_set_intfdata(interface, NULL);
@@ -2433,6 +2495,11 @@ static void pn533_disconnect(struct usb_interface *interface)
2433 2495
2434 del_timer(&dev->listen_timer); 2496 del_timer(&dev->listen_timer);
2435 2497
2498 list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
2499 list_del(&cmd->queue);
2500 kfree(cmd);
2501 }
2502
2436 kfree(dev->in_frame); 2503 kfree(dev->in_frame);
2437 usb_free_urb(dev->in_urb); 2504 usb_free_urb(dev->in_urb);
2438 kfree(dev->out_frame); 2505 kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
deleted file mode 100644
index 724f65d8f9e4..000000000000
--- a/drivers/nfc/pn544.c
+++ /dev/null
@@ -1,893 +0,0 @@
1/*
2 * Driver for the PN544 NFC chip.
3 *
4 * Copyright (C) Nokia Corporation
5 *
6 * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
7 * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/completion.h>
24#include <linux/crc-ccitt.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/miscdevice.h>
29#include <linux/module.h>
30#include <linux/mutex.h>
31#include <linux/nfc/pn544.h>
32#include <linux/poll.h>
33#include <linux/regulator/consumer.h>
34#include <linux/serial_core.h> /* for TCGETS */
35#include <linux/slab.h>
36
37#define DRIVER_CARD "PN544 NFC"
38#define DRIVER_DESC "NFC driver for PN544"
39
40static struct i2c_device_id pn544_id_table[] = {
41 { PN544_DRIVER_NAME, 0 },
42 { }
43};
44MODULE_DEVICE_TABLE(i2c, pn544_id_table);
45
46#define HCI_MODE 0
47#define FW_MODE 1
48
49enum pn544_state {
50 PN544_ST_COLD,
51 PN544_ST_FW_READY,
52 PN544_ST_READY,
53};
54
55enum pn544_irq {
56 PN544_NONE,
57 PN544_INT,
58};
59
60struct pn544_info {
61 struct miscdevice miscdev;
62 struct i2c_client *i2c_dev;
63 struct regulator_bulk_data regs[3];
64
65 enum pn544_state state;
66 wait_queue_head_t read_wait;
67 loff_t read_offset;
68 enum pn544_irq read_irq;
69 struct mutex read_mutex; /* Serialize read_irq access */
70 struct mutex mutex; /* Serialize info struct access */
71 u8 *buf;
72 size_t buflen;
73};
74
75static const char reg_vdd_io[] = "Vdd_IO";
76static const char reg_vbat[] = "VBat";
77static const char reg_vsim[] = "VSim";
78
79/* sysfs interface */
80static ssize_t pn544_test(struct device *dev,
81 struct device_attribute *attr, char *buf)
82{
83 struct pn544_info *info = dev_get_drvdata(dev);
84 struct i2c_client *client = info->i2c_dev;
85 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
86
87 return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
88}
89
90static int pn544_enable(struct pn544_info *info, int mode)
91{
92 struct pn544_nfc_platform_data *pdata;
93 struct i2c_client *client = info->i2c_dev;
94
95 int r;
96
97 r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
98 if (r < 0)
99 return r;
100
101 pdata = client->dev.platform_data;
102 info->read_irq = PN544_NONE;
103 if (pdata->enable)
104 pdata->enable(mode);
105
106 if (mode) {
107 info->state = PN544_ST_FW_READY;
108 dev_dbg(&client->dev, "now in FW-mode\n");
109 } else {
110 info->state = PN544_ST_READY;
111 dev_dbg(&client->dev, "now in HCI-mode\n");
112 }
113
114 usleep_range(10000, 15000);
115
116 return 0;
117}
118
119static void pn544_disable(struct pn544_info *info)
120{
121 struct pn544_nfc_platform_data *pdata;
122 struct i2c_client *client = info->i2c_dev;
123
124 pdata = client->dev.platform_data;
125 if (pdata->disable)
126 pdata->disable();
127
128 info->state = PN544_ST_COLD;
129
130 dev_dbg(&client->dev, "Now in OFF-mode\n");
131
132 msleep(PN544_RESETVEN_TIME);
133
134 info->read_irq = PN544_NONE;
135 regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
136}
137
138static int check_crc(u8 *buf, int buflen)
139{
140 u8 len;
141 u16 crc;
142
143 len = buf[0] + 1;
144 if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
145 pr_err(PN544_DRIVER_NAME
146 ": CRC; corrupt packet len %u (%d)\n", len, buflen);
147 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
148 16, 2, buf, buflen, false);
149 return -EPERM;
150 }
151 crc = crc_ccitt(0xffff, buf, len - 2);
152 crc = ~crc;
153
154 if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
155 pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
156 crc, buf[len-1], buf[len-2]);
157
158 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
159 16, 2, buf, buflen, false);
160 return -EPERM;
161 }
162 return 0;
163}
164
165static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
166{
167 int r;
168
169 if (len < 4 || len != (buf[0] + 1)) {
170 dev_err(&client->dev, "%s: Illegal message length: %d\n",
171 __func__, len);
172 return -EINVAL;
173 }
174
175 if (check_crc(buf, len))
176 return -EINVAL;
177
178 usleep_range(3000, 6000);
179
180 r = i2c_master_send(client, buf, len);
181 dev_dbg(&client->dev, "send: %d\n", r);
182
183 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
184 usleep_range(6000, 10000);
185 r = i2c_master_send(client, buf, len);
186 dev_dbg(&client->dev, "send2: %d\n", r);
187 }
188
189 if (r != len)
190 return -EREMOTEIO;
191
192 return r;
193}
194
195static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
196{
197 int r;
198 u8 len;
199
200 /*
201 * You could read a packet in one go, but then you'd need to read
202 * max size and rest would be 0xff fill, so we do split reads.
203 */
204 r = i2c_master_recv(client, &len, 1);
205 dev_dbg(&client->dev, "recv1: %d\n", r);
206
207 if (r != 1)
208 return -EREMOTEIO;
209
210 if (len < PN544_LLC_HCI_OVERHEAD)
211 len = PN544_LLC_HCI_OVERHEAD;
212 else if (len > (PN544_MSG_MAX_SIZE - 1))
213 len = PN544_MSG_MAX_SIZE - 1;
214
215 if (1 + len > buflen) /* len+(data+crc16) */
216 return -EMSGSIZE;
217
218 buf[0] = len;
219
220 r = i2c_master_recv(client, buf + 1, len);
221 dev_dbg(&client->dev, "recv2: %d\n", r);
222
223 if (r != len)
224 return -EREMOTEIO;
225
226 usleep_range(3000, 6000);
227
228 return r + 1;
229}
230
231static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
232{
233 int r;
234
235 dev_dbg(&client->dev, "%s\n", __func__);
236
237 if (len < PN544_FW_HEADER_SIZE ||
238 (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
239 return -EINVAL;
240
241 r = i2c_master_send(client, buf, len);
242 dev_dbg(&client->dev, "fw send: %d\n", r);
243
244 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
245 usleep_range(6000, 10000);
246 r = i2c_master_send(client, buf, len);
247 dev_dbg(&client->dev, "fw send2: %d\n", r);
248 }
249
250 if (r != len)
251 return -EREMOTEIO;
252
253 return r;
254}
255
256static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
257{
258 int r, len;
259
260 if (buflen < PN544_FW_HEADER_SIZE)
261 return -EINVAL;
262
263 r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
264 dev_dbg(&client->dev, "FW recv1: %d\n", r);
265
266 if (r < 0)
267 return r;
268
269 if (r < PN544_FW_HEADER_SIZE)
270 return -EINVAL;
271
272 len = (buf[1] << 8) + buf[2];
273 if (len == 0) /* just header, no additional data */
274 return r;
275
276 if (len > buflen - PN544_FW_HEADER_SIZE)
277 return -EMSGSIZE;
278
279 r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
280 dev_dbg(&client->dev, "fw recv2: %d\n", r);
281
282 if (r != len)
283 return -EINVAL;
284
285 return r + PN544_FW_HEADER_SIZE;
286}
287
288static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
289{
290 struct pn544_info *info = dev_id;
291 struct i2c_client *client = info->i2c_dev;
292
293 BUG_ON(!info);
294 BUG_ON(irq != info->i2c_dev->irq);
295
296 dev_dbg(&client->dev, "IRQ\n");
297
298 mutex_lock(&info->read_mutex);
299 info->read_irq = PN544_INT;
300 mutex_unlock(&info->read_mutex);
301
302 wake_up_interruptible(&info->read_wait);
303
304 return IRQ_HANDLED;
305}
306
307static enum pn544_irq pn544_irq_state(struct pn544_info *info)
308{
309 enum pn544_irq irq;
310
311 mutex_lock(&info->read_mutex);
312 irq = info->read_irq;
313 mutex_unlock(&info->read_mutex);
314 /*
315 * XXX: should we check GPIO-line status directly?
316 * return pdata->irq_status() ? PN544_INT : PN544_NONE;
317 */
318
319 return irq;
320}
321
322static ssize_t pn544_read(struct file *file, char __user *buf,
323 size_t count, loff_t *offset)
324{
325 struct pn544_info *info = container_of(file->private_data,
326 struct pn544_info, miscdev);
327 struct i2c_client *client = info->i2c_dev;
328 enum pn544_irq irq;
329 size_t len;
330 int r = 0;
331
332 dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
333 info, count);
334
335 mutex_lock(&info->mutex);
336
337 if (info->state == PN544_ST_COLD) {
338 r = -ENODEV;
339 goto out;
340 }
341
342 irq = pn544_irq_state(info);
343 if (irq == PN544_NONE) {
344 if (file->f_flags & O_NONBLOCK) {
345 r = -EAGAIN;
346 goto out;
347 }
348
349 if (wait_event_interruptible(info->read_wait,
350 (info->read_irq == PN544_INT))) {
351 r = -ERESTARTSYS;
352 goto out;
353 }
354 }
355
356 if (info->state == PN544_ST_FW_READY) {
357 len = min(count, info->buflen);
358
359 mutex_lock(&info->read_mutex);
360 r = pn544_fw_read(info->i2c_dev, info->buf, len);
361 info->read_irq = PN544_NONE;
362 mutex_unlock(&info->read_mutex);
363
364 if (r < 0) {
365 dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
366 goto out;
367 }
368
369 print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
370 16, 2, info->buf, r, false);
371
372 *offset += r;
373 if (copy_to_user(buf, info->buf, r)) {
374 r = -EFAULT;
375 goto out;
376 }
377 } else {
378 len = min(count, info->buflen);
379
380 mutex_lock(&info->read_mutex);
381 r = pn544_i2c_read(info->i2c_dev, info->buf, len);
382 info->read_irq = PN544_NONE;
383 mutex_unlock(&info->read_mutex);
384
385 if (r < 0) {
386 dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
387 goto out;
388 }
389 print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
390 16, 2, info->buf, r, false);
391
392 *offset += r;
393 if (copy_to_user(buf, info->buf, r)) {
394 r = -EFAULT;
395 goto out;
396 }
397 }
398
399out:
400 mutex_unlock(&info->mutex);
401
402 return r;
403}
404
405static unsigned int pn544_poll(struct file *file, poll_table *wait)
406{
407 struct pn544_info *info = container_of(file->private_data,
408 struct pn544_info, miscdev);
409 struct i2c_client *client = info->i2c_dev;
410 int r = 0;
411
412 dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
413
414 mutex_lock(&info->mutex);
415
416 if (info->state == PN544_ST_COLD) {
417 r = -ENODEV;
418 goto out;
419 }
420
421 poll_wait(file, &info->read_wait, wait);
422
423 if (pn544_irq_state(info) == PN544_INT) {
424 r = POLLIN | POLLRDNORM;
425 goto out;
426 }
427out:
428 mutex_unlock(&info->mutex);
429
430 return r;
431}
432
433static ssize_t pn544_write(struct file *file, const char __user *buf,
434 size_t count, loff_t *ppos)
435{
436 struct pn544_info *info = container_of(file->private_data,
437 struct pn544_info, miscdev);
438 struct i2c_client *client = info->i2c_dev;
439 ssize_t len;
440 int r;
441
442 dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
443 info, count);
444
445 mutex_lock(&info->mutex);
446
447 if (info->state == PN544_ST_COLD) {
448 r = -ENODEV;
449 goto out;
450 }
451
452 /*
453 * XXX: should we detect rset-writes and clean possible
454 * read_irq state
455 */
456 if (info->state == PN544_ST_FW_READY) {
457 size_t fw_len;
458
459 if (count < PN544_FW_HEADER_SIZE) {
460 r = -EINVAL;
461 goto out;
462 }
463
464 len = min(count, info->buflen);
465 if (copy_from_user(info->buf, buf, len)) {
466 r = -EFAULT;
467 goto out;
468 }
469
470 print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
471 16, 2, info->buf, len, false);
472
473 fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
474 info->buf[2];
475
476 if (len > fw_len) /* 1 msg at a time */
477 len = fw_len;
478
479 r = pn544_fw_write(info->i2c_dev, info->buf, len);
480 } else {
481 if (count < PN544_LLC_MIN_SIZE) {
482 r = -EINVAL;
483 goto out;
484 }
485
486 len = min(count, info->buflen);
487 if (copy_from_user(info->buf, buf, len)) {
488 r = -EFAULT;
489 goto out;
490 }
491
492 print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
493 16, 2, info->buf, len, false);
494
495 if (len > (info->buf[0] + 1)) /* 1 msg at a time */
496 len = info->buf[0] + 1;
497
498 r = pn544_i2c_write(info->i2c_dev, info->buf, len);
499 }
500out:
501 mutex_unlock(&info->mutex);
502
503 return r;
504
505}
506
507static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
508{
509 struct pn544_info *info = container_of(file->private_data,
510 struct pn544_info, miscdev);
511 struct i2c_client *client = info->i2c_dev;
512 struct pn544_nfc_platform_data *pdata;
513 unsigned int val;
514 int r = 0;
515
516 dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
517
518 mutex_lock(&info->mutex);
519
520 if (info->state == PN544_ST_COLD) {
521 r = -ENODEV;
522 goto out;
523 }
524
525 pdata = info->i2c_dev->dev.platform_data;
526 switch (cmd) {
527 case PN544_GET_FW_MODE:
528 dev_dbg(&client->dev, "%s: PN544_GET_FW_MODE\n", __func__);
529
530 val = (info->state == PN544_ST_FW_READY);
531 if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
532 r = -EFAULT;
533 goto out;
534 }
535
536 break;
537
538 case PN544_SET_FW_MODE:
539 dev_dbg(&client->dev, "%s: PN544_SET_FW_MODE\n", __func__);
540
541 if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
542 r = -EFAULT;
543 goto out;
544 }
545
546 if (val) {
547 if (info->state == PN544_ST_FW_READY)
548 break;
549
550 pn544_disable(info);
551 r = pn544_enable(info, FW_MODE);
552 if (r < 0)
553 goto out;
554 } else {
555 if (info->state == PN544_ST_READY)
556 break;
557 pn544_disable(info);
558 r = pn544_enable(info, HCI_MODE);
559 if (r < 0)
560 goto out;
561 }
562 file->f_pos = info->read_offset;
563 break;
564
565 case TCGETS:
566 dev_dbg(&client->dev, "%s: TCGETS\n", __func__);
567
568 r = -ENOIOCTLCMD;
569 break;
570
571 default:
572 dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
573 r = -ENOIOCTLCMD;
574 break;
575 }
576
577out:
578 mutex_unlock(&info->mutex);
579
580 return r;
581}
582
583static int pn544_open(struct inode *inode, struct file *file)
584{
585 struct pn544_info *info = container_of(file->private_data,
586 struct pn544_info, miscdev);
587 struct i2c_client *client = info->i2c_dev;
588 int r = 0;
589
590 dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
591 info, info->i2c_dev);
592
593 mutex_lock(&info->mutex);
594
595 /*
596 * Only 1 at a time.
597 * XXX: maybe user (counter) would work better
598 */
599 if (info->state != PN544_ST_COLD) {
600 r = -EBUSY;
601 goto out;
602 }
603
604 file->f_pos = info->read_offset;
605 r = pn544_enable(info, HCI_MODE);
606
607out:
608 mutex_unlock(&info->mutex);
609 return r;
610}
611
612static int pn544_close(struct inode *inode, struct file *file)
613{
614 struct pn544_info *info = container_of(file->private_data,
615 struct pn544_info, miscdev);
616 struct i2c_client *client = info->i2c_dev;
617
618 dev_dbg(&client->dev, "%s: info: %p, client %p\n",
619 __func__, info, info->i2c_dev);
620
621 mutex_lock(&info->mutex);
622 pn544_disable(info);
623 mutex_unlock(&info->mutex);
624
625 return 0;
626}
627
628static const struct file_operations pn544_fops = {
629 .owner = THIS_MODULE,
630 .llseek = no_llseek,
631 .read = pn544_read,
632 .write = pn544_write,
633 .poll = pn544_poll,
634 .open = pn544_open,
635 .release = pn544_close,
636 .unlocked_ioctl = pn544_ioctl,
637};
638
639#ifdef CONFIG_PM
640static int pn544_suspend(struct device *dev)
641{
642 struct i2c_client *client = to_i2c_client(dev);
643 struct pn544_info *info;
644 int r = 0;
645
646 dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
647
648 info = i2c_get_clientdata(client);
649 dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
650 info, client);
651
652 mutex_lock(&info->mutex);
653
654 switch (info->state) {
655 case PN544_ST_FW_READY:
656 /* Do not suspend while upgrading FW, please! */
657 r = -EPERM;
658 break;
659
660 case PN544_ST_READY:
661 /*
662 * CHECK: Device should be in standby-mode. No way to check?
663 * Allowing low power mode for the regulator is potentially
664 * dangerous if pn544 does not go to suspension.
665 */
666 break;
667
668 case PN544_ST_COLD:
669 break;
670 };
671
672 mutex_unlock(&info->mutex);
673 return r;
674}
675
676static int pn544_resume(struct device *dev)
677{
678 struct i2c_client *client = to_i2c_client(dev);
679 struct pn544_info *info = i2c_get_clientdata(client);
680 int r = 0;
681
682 dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
683 info, client);
684
685 mutex_lock(&info->mutex);
686
687 switch (info->state) {
688 case PN544_ST_READY:
689 /*
690 * CHECK: If regulator low power mode is allowed in
691 * pn544_suspend, we should go back to normal mode
692 * here.
693 */
694 break;
695
696 case PN544_ST_COLD:
697 break;
698
699 case PN544_ST_FW_READY:
700 break;
701 };
702
703 mutex_unlock(&info->mutex);
704
705 return r;
706}
707
708static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
709#endif
710
711static struct device_attribute pn544_attr =
712 __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
713
714static int __devinit pn544_probe(struct i2c_client *client,
715 const struct i2c_device_id *id)
716{
717 struct pn544_info *info;
718 struct pn544_nfc_platform_data *pdata;
719 int r = 0;
720
721 dev_dbg(&client->dev, "%s\n", __func__);
722 dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
723
724 /* private data allocation */
725 info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
726 if (!info) {
727 dev_err(&client->dev,
728 "Cannot allocate memory for pn544_info.\n");
729 r = -ENOMEM;
730 goto err_info_alloc;
731 }
732
733 info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
734 info->buf = kzalloc(info->buflen, GFP_KERNEL);
735 if (!info->buf) {
736 dev_err(&client->dev,
737 "Cannot allocate memory for pn544_info->buf.\n");
738 r = -ENOMEM;
739 goto err_buf_alloc;
740 }
741
742 info->regs[0].supply = reg_vdd_io;
743 info->regs[1].supply = reg_vbat;
744 info->regs[2].supply = reg_vsim;
745 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
746 info->regs);
747 if (r < 0)
748 goto err_kmalloc;
749
750 info->i2c_dev = client;
751 info->state = PN544_ST_COLD;
752 info->read_irq = PN544_NONE;
753 mutex_init(&info->read_mutex);
754 mutex_init(&info->mutex);
755 init_waitqueue_head(&info->read_wait);
756 i2c_set_clientdata(client, info);
757 pdata = client->dev.platform_data;
758 if (!pdata) {
759 dev_err(&client->dev, "No platform data\n");
760 r = -EINVAL;
761 goto err_reg;
762 }
763
764 if (!pdata->request_resources) {
765 dev_err(&client->dev, "request_resources() missing\n");
766 r = -EINVAL;
767 goto err_reg;
768 }
769
770 r = pdata->request_resources(client);
771 if (r) {
772 dev_err(&client->dev, "Cannot get platform resources\n");
773 goto err_reg;
774 }
775
776 r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
777 IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
778 info);
779 if (r < 0) {
780 dev_err(&client->dev, "Unable to register IRQ handler\n");
781 goto err_res;
782 }
783
784 /* If we don't have the test we don't need the sysfs file */
785 if (pdata->test) {
786 r = device_create_file(&client->dev, &pn544_attr);
787 if (r) {
788 dev_err(&client->dev,
789 "sysfs registration failed, error %d\n", r);
790 goto err_irq;
791 }
792 }
793
794 info->miscdev.minor = MISC_DYNAMIC_MINOR;
795 info->miscdev.name = PN544_DRIVER_NAME;
796 info->miscdev.fops = &pn544_fops;
797 info->miscdev.parent = &client->dev;
798 r = misc_register(&info->miscdev);
799 if (r < 0) {
800 dev_err(&client->dev, "Device registration failed\n");
801 goto err_sysfs;
802 }
803
804 dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
805 __func__, info, pdata, client);
806
807 return 0;
808
809err_sysfs:
810 if (pdata->test)
811 device_remove_file(&client->dev, &pn544_attr);
812err_irq:
813 free_irq(client->irq, info);
814err_res:
815 if (pdata->free_resources)
816 pdata->free_resources();
817err_reg:
818 regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
819err_kmalloc:
820 kfree(info->buf);
821err_buf_alloc:
822 kfree(info);
823err_info_alloc:
824 return r;
825}
826
827static __devexit int pn544_remove(struct i2c_client *client)
828{
829 struct pn544_info *info = i2c_get_clientdata(client);
830 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
831
832 dev_dbg(&client->dev, "%s\n", __func__);
833
834 misc_deregister(&info->miscdev);
835 if (pdata->test)
836 device_remove_file(&client->dev, &pn544_attr);
837
838 if (info->state != PN544_ST_COLD) {
839 if (pdata->disable)
840 pdata->disable();
841
842 info->read_irq = PN544_NONE;
843 }
844
845 free_irq(client->irq, info);
846 if (pdata->free_resources)
847 pdata->free_resources();
848
849 regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
850 kfree(info->buf);
851 kfree(info);
852
853 return 0;
854}
855
856static struct i2c_driver pn544_driver = {
857 .driver = {
858 .name = PN544_DRIVER_NAME,
859#ifdef CONFIG_PM
860 .pm = &pn544_pm_ops,
861#endif
862 },
863 .probe = pn544_probe,
864 .id_table = pn544_id_table,
865 .remove = __devexit_p(pn544_remove),
866};
867
868static int __init pn544_init(void)
869{
870 int r;
871
872 pr_debug(DRIVER_DESC ": %s\n", __func__);
873
874 r = i2c_add_driver(&pn544_driver);
875 if (r) {
876 pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
877 return r;
878 }
879
880 return 0;
881}
882
883static void __exit pn544_exit(void)
884{
885 i2c_del_driver(&pn544_driver);
886 pr_info(DRIVER_DESC ", Exiting.\n");
887}
888
889module_init(pn544_init);
890module_exit(pn544_exit);
891
892MODULE_LICENSE("GPL");
893MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index aa71807189ba..c9c8570273ab 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -29,7 +29,7 @@
29 29
30#include <linux/nfc.h> 30#include <linux/nfc.h>
31#include <net/nfc/hci.h> 31#include <net/nfc/hci.h>
32#include <net/nfc/shdlc.h> 32#include <net/nfc/llc.h>
33 33
34#include <linux/nfc/pn544.h> 34#include <linux/nfc/pn544.h>
35 35
@@ -128,10 +128,12 @@ static struct nfc_hci_gate pn544_gates[] = {
128 128
129/* Largest headroom needed for outgoing custom commands */ 129/* Largest headroom needed for outgoing custom commands */
130#define PN544_CMDS_HEADROOM 2 130#define PN544_CMDS_HEADROOM 2
131#define PN544_FRAME_HEADROOM 1
132#define PN544_FRAME_TAILROOM 2
131 133
132struct pn544_hci_info { 134struct pn544_hci_info {
133 struct i2c_client *i2c_dev; 135 struct i2c_client *i2c_dev;
134 struct nfc_shdlc *shdlc; 136 struct nfc_hci_dev *hdev;
135 137
136 enum pn544_state state; 138 enum pn544_state state;
137 139
@@ -146,6 +148,9 @@ struct pn544_hci_info {
146 * < 0 if hardware error occured (e.g. i2c err) 148 * < 0 if hardware error occured (e.g. i2c err)
147 * and prevents normal operation. 149 * and prevents normal operation.
148 */ 150 */
151 int async_cb_type;
152 data_exchange_cb_t async_cb;
153 void *async_cb_context;
149}; 154};
150 155
151static void pn544_hci_platform_init(struct pn544_hci_info *info) 156static void pn544_hci_platform_init(struct pn544_hci_info *info)
@@ -230,8 +235,12 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
230 r = i2c_master_send(client, buf, len); 235 r = i2c_master_send(client, buf, len);
231 } 236 }
232 237
233 if (r >= 0 && r != len) 238 if (r >= 0) {
234 r = -EREMOTEIO; 239 if (r != len)
240 return -EREMOTEIO;
241 else
242 return 0;
243 }
235 244
236 return r; 245 return r;
237} 246}
@@ -341,13 +350,16 @@ flush:
341static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id) 350static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
342{ 351{
343 struct pn544_hci_info *info = dev_id; 352 struct pn544_hci_info *info = dev_id;
344 struct i2c_client *client = info->i2c_dev; 353 struct i2c_client *client;
345 struct sk_buff *skb = NULL; 354 struct sk_buff *skb = NULL;
346 int r; 355 int r;
347 356
348 BUG_ON(!info); 357 if (!info || irq != info->i2c_dev->irq) {
349 BUG_ON(irq != info->i2c_dev->irq); 358 WARN_ON_ONCE(1);
359 return IRQ_NONE;
360 }
350 361
362 client = info->i2c_dev;
351 dev_dbg(&client->dev, "IRQ\n"); 363 dev_dbg(&client->dev, "IRQ\n");
352 364
353 if (info->hard_fault != 0) 365 if (info->hard_fault != 0)
@@ -357,21 +369,21 @@ static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
357 if (r == -EREMOTEIO) { 369 if (r == -EREMOTEIO) {
358 info->hard_fault = r; 370 info->hard_fault = r;
359 371
360 nfc_shdlc_recv_frame(info->shdlc, NULL); 372 nfc_hci_recv_frame(info->hdev, NULL);
361 373
362 return IRQ_HANDLED; 374 return IRQ_HANDLED;
363 } else if ((r == -ENOMEM) || (r == -EBADMSG)) { 375 } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
364 return IRQ_HANDLED; 376 return IRQ_HANDLED;
365 } 377 }
366 378
367 nfc_shdlc_recv_frame(info->shdlc, skb); 379 nfc_hci_recv_frame(info->hdev, skb);
368 380
369 return IRQ_HANDLED; 381 return IRQ_HANDLED;
370} 382}
371 383
372static int pn544_hci_open(struct nfc_shdlc *shdlc) 384static int pn544_hci_open(struct nfc_hci_dev *hdev)
373{ 385{
374 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 386 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
375 int r = 0; 387 int r = 0;
376 388
377 mutex_lock(&info->info_lock); 389 mutex_lock(&info->info_lock);
@@ -391,9 +403,9 @@ out:
391 return r; 403 return r;
392} 404}
393 405
394static void pn544_hci_close(struct nfc_shdlc *shdlc) 406static void pn544_hci_close(struct nfc_hci_dev *hdev)
395{ 407{
396 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 408 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
397 409
398 mutex_lock(&info->info_lock); 410 mutex_lock(&info->info_lock);
399 411
@@ -408,9 +420,8 @@ out:
408 mutex_unlock(&info->info_lock); 420 mutex_unlock(&info->info_lock);
409} 421}
410 422
411static int pn544_hci_ready(struct nfc_shdlc *shdlc) 423static int pn544_hci_ready(struct nfc_hci_dev *hdev)
412{ 424{
413 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
414 struct sk_buff *skb; 425 struct sk_buff *skb;
415 static struct hw_config { 426 static struct hw_config {
416 u8 adr[2]; 427 u8 adr[2];
@@ -576,21 +587,45 @@ static int pn544_hci_ready(struct nfc_shdlc *shdlc)
576 return 0; 587 return 0;
577} 588}
578 589
579static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb) 590static void pn544_hci_add_len_crc(struct sk_buff *skb)
580{ 591{
581 struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc); 592 u16 crc;
593 int len;
594
595 len = skb->len + 2;
596 *skb_push(skb, 1) = len;
597
598 crc = crc_ccitt(0xffff, skb->data, skb->len);
599 crc = ~crc;
600 *skb_put(skb, 1) = crc & 0xff;
601 *skb_put(skb, 1) = crc >> 8;
602}
603
604static void pn544_hci_remove_len_crc(struct sk_buff *skb)
605{
606 skb_pull(skb, PN544_FRAME_HEADROOM);
607 skb_trim(skb, PN544_FRAME_TAILROOM);
608}
609
610static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
611{
612 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
582 struct i2c_client *client = info->i2c_dev; 613 struct i2c_client *client = info->i2c_dev;
614 int r;
583 615
584 if (info->hard_fault != 0) 616 if (info->hard_fault != 0)
585 return info->hard_fault; 617 return info->hard_fault;
586 618
587 return pn544_hci_i2c_write(client, skb->data, skb->len); 619 pn544_hci_add_len_crc(skb);
620 r = pn544_hci_i2c_write(client, skb->data, skb->len);
621 pn544_hci_remove_len_crc(skb);
622
623 return r;
588} 624}
589 625
590static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, 626static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
591 u32 im_protocols, u32 tm_protocols) 627 u32 im_protocols, u32 tm_protocols)
592{ 628{
593 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
594 u8 phases = 0; 629 u8 phases = 0;
595 int r; 630 int r;
596 u8 duration[2]; 631 u8 duration[2];
@@ -641,7 +676,7 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
641 return r; 676 return r;
642} 677}
643 678
644static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate, 679static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
645 struct nfc_target *target) 680 struct nfc_target *target)
646{ 681{
647 switch (gate) { 682 switch (gate) {
@@ -659,11 +694,10 @@ static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
659 return 0; 694 return 0;
660} 695}
661 696
662static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc, 697static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
663 u8 gate, 698 u8 gate,
664 struct nfc_target *target) 699 struct nfc_target *target)
665{ 700{
666 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
667 struct sk_buff *uid_skb; 701 struct sk_buff *uid_skb;
668 int r = 0; 702 int r = 0;
669 703
@@ -704,6 +738,26 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
704 return r; 738 return r;
705} 739}
706 740
741#define PN544_CB_TYPE_READER_F 1
742
743static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
744 int err)
745{
746 struct pn544_hci_info *info = context;
747
748 switch (info->async_cb_type) {
749 case PN544_CB_TYPE_READER_F:
750 if (err == 0)
751 skb_pull(skb, 1);
752 info->async_cb(info->async_cb_context, skb, err);
753 break;
754 default:
755 if (err == 0)
756 kfree_skb(skb);
757 break;
758 }
759}
760
707#define MIFARE_CMD_AUTH_KEY_A 0x60 761#define MIFARE_CMD_AUTH_KEY_A 0x60
708#define MIFARE_CMD_AUTH_KEY_B 0x61 762#define MIFARE_CMD_AUTH_KEY_B 0x61
709#define MIFARE_CMD_HEADER 2 763#define MIFARE_CMD_HEADER 2
@@ -715,13 +769,12 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
715 * <= 0: driver handled the data exchange 769 * <= 0: driver handled the data exchange
716 * 1: driver doesn't especially handle, please do standard processing 770 * 1: driver doesn't especially handle, please do standard processing
717 */ 771 */
718static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc, 772static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
719 struct nfc_target *target, 773 struct nfc_target *target,
720 struct sk_buff *skb, 774 struct sk_buff *skb, data_exchange_cb_t cb,
721 struct sk_buff **res_skb) 775 void *cb_context)
722{ 776{
723 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc); 777 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
724 int r;
725 778
726 pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__, 779 pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
727 target->hci_reader_gate); 780 target->hci_reader_gate);
@@ -746,41 +799,43 @@ static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
746 memcpy(data, uid, MIFARE_UID_LEN); 799 memcpy(data, uid, MIFARE_UID_LEN);
747 } 800 }
748 801
749 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 802 return nfc_hci_send_cmd_async(hdev,
750 PN544_MIFARE_CMD, 803 target->hci_reader_gate,
751 skb->data, skb->len, res_skb); 804 PN544_MIFARE_CMD,
805 skb->data, skb->len,
806 cb, cb_context);
752 } else 807 } else
753 return 1; 808 return 1;
754 case PN544_RF_READER_F_GATE: 809 case PN544_RF_READER_F_GATE:
755 *skb_push(skb, 1) = 0; 810 *skb_push(skb, 1) = 0;
756 *skb_push(skb, 1) = 0; 811 *skb_push(skb, 1) = 0;
757 812
758 r = nfc_hci_send_cmd(hdev, target->hci_reader_gate, 813 info->async_cb_type = PN544_CB_TYPE_READER_F;
759 PN544_FELICA_RAW, 814 info->async_cb = cb;
760 skb->data, skb->len, res_skb); 815 info->async_cb_context = cb_context;
761 if (r == 0) 816
762 skb_pull(*res_skb, 1); 817 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
763 return r; 818 PN544_FELICA_RAW, skb->data,
819 skb->len,
820 pn544_hci_data_exchange_cb, info);
764 case PN544_RF_READER_JEWEL_GATE: 821 case PN544_RF_READER_JEWEL_GATE:
765 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 822 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
766 PN544_JEWEL_RAW_CMD, 823 PN544_JEWEL_RAW_CMD, skb->data,
767 skb->data, skb->len, res_skb); 824 skb->len, cb, cb_context);
768 default: 825 default:
769 return 1; 826 return 1;
770 } 827 }
771} 828}
772 829
773static int pn544_hci_check_presence(struct nfc_shdlc *shdlc, 830static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
774 struct nfc_target *target) 831 struct nfc_target *target)
775{ 832{
776 struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
777
778 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 833 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
779 PN544_RF_READER_CMD_PRESENCE_CHECK, 834 PN544_RF_READER_CMD_PRESENCE_CHECK,
780 NULL, 0, NULL); 835 NULL, 0, NULL);
781} 836}
782 837
783static struct nfc_shdlc_ops pn544_shdlc_ops = { 838static struct nfc_hci_ops pn544_hci_ops = {
784 .open = pn544_hci_open, 839 .open = pn544_hci_open,
785 .close = pn544_hci_close, 840 .close = pn544_hci_close,
786 .hci_ready = pn544_hci_ready, 841 .hci_ready = pn544_hci_ready,
@@ -848,8 +903,8 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
848 pn544_hci_platform_init(info); 903 pn544_hci_platform_init(info);
849 904
850 r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn, 905 r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
851 IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME, 906 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
852 info); 907 PN544_HCI_DRIVER_NAME, info);
853 if (r < 0) { 908 if (r < 0) {
854 dev_err(&client->dev, "Unable to register IRQ handler\n"); 909 dev_err(&client->dev, "Unable to register IRQ handler\n");
855 goto err_rti; 910 goto err_rti;
@@ -872,22 +927,30 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
872 NFC_PROTO_ISO14443_B_MASK | 927 NFC_PROTO_ISO14443_B_MASK |
873 NFC_PROTO_NFC_DEP_MASK; 928 NFC_PROTO_NFC_DEP_MASK;
874 929
875 info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops, 930 info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
876 &init_data, protocols, 931 protocols, LLC_SHDLC_NAME,
877 PN544_CMDS_HEADROOM, 0, 932 PN544_FRAME_HEADROOM +
878 PN544_HCI_LLC_MAX_PAYLOAD, 933 PN544_CMDS_HEADROOM,
879 dev_name(&client->dev)); 934 PN544_FRAME_TAILROOM,
880 if (!info->shdlc) { 935 PN544_HCI_LLC_MAX_PAYLOAD);
881 dev_err(&client->dev, "Cannot allocate nfc shdlc.\n"); 936 if (!info->hdev) {
937 dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
882 r = -ENOMEM; 938 r = -ENOMEM;
883 goto err_allocshdlc; 939 goto err_alloc_hdev;
884 } 940 }
885 941
886 nfc_shdlc_set_clientdata(info->shdlc, info); 942 nfc_hci_set_clientdata(info->hdev, info);
943
944 r = nfc_hci_register_device(info->hdev);
945 if (r)
946 goto err_regdev;
887 947
888 return 0; 948 return 0;
889 949
890err_allocshdlc: 950err_regdev:
951 nfc_hci_free_device(info->hdev);
952
953err_alloc_hdev:
891 free_irq(client->irq, info); 954 free_irq(client->irq, info);
892 955
893err_rti: 956err_rti:
@@ -908,7 +971,7 @@ static __devexit int pn544_hci_remove(struct i2c_client *client)
908 971
909 dev_dbg(&client->dev, "%s\n", __func__); 972 dev_dbg(&client->dev, "%s\n", __func__);
910 973
911 nfc_shdlc_free(info->shdlc); 974 nfc_hci_free_device(info->hdev);
912 975
913 if (info->state != PN544_ST_COLD) { 976 if (info->state != PN544_ST_COLD) {
914 if (pdata->disable) 977 if (pdata->disable)
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 1e528b539a07..79f4bce061bd 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -143,10 +143,12 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
143 kt = timespec_to_ktime(ts); 143 kt = timespec_to_ktime(ts);
144 delta = ktime_to_ns(kt); 144 delta = ktime_to_ns(kt);
145 err = ops->adjtime(ops, delta); 145 err = ops->adjtime(ops, delta);
146
147 } else if (tx->modes & ADJ_FREQUENCY) { 146 } else if (tx->modes & ADJ_FREQUENCY) {
148
149 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq)); 147 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
148 ptp->dialed_frequency = tx->freq;
149 } else if (tx->modes == 0) {
150 tx->freq = ptp->dialed_frequency;
151 err = 0;
150 } 152 }
151 153
152 return err; 154 return err;
@@ -180,7 +182,8 @@ static void delete_ptp_clock(struct posix_clock *pc)
180 182
181/* public interface */ 183/* public interface */
182 184
183struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info) 185struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
186 struct device *parent)
184{ 187{
185 struct ptp_clock *ptp; 188 struct ptp_clock *ptp;
186 int err = 0, index, major = MAJOR(ptp_devt); 189 int err = 0, index, major = MAJOR(ptp_devt);
@@ -213,7 +216,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
213 init_waitqueue_head(&ptp->tsev_wq); 216 init_waitqueue_head(&ptp->tsev_wq);
214 217
215 /* Create a new device in our class. */ 218 /* Create a new device in our class. */
216 ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp, 219 ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
217 "ptp%d", ptp->index); 220 "ptp%d", ptp->index);
218 if (IS_ERR(ptp->dev)) 221 if (IS_ERR(ptp->dev))
219 goto no_device; 222 goto no_device;
@@ -300,6 +303,11 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
300 pps_get_ts(&evt); 303 pps_get_ts(&evt);
301 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 304 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
302 break; 305 break;
306
307 case PTP_CLOCK_PPSUSR:
308 pps_event(ptp->pps_source, &event->pps_times,
309 PTP_PPS_EVENT, NULL);
310 break;
303 } 311 }
304} 312}
305EXPORT_SYMBOL(ptp_clock_event); 313EXPORT_SYMBOL(ptp_clock_event);
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index e03c40692b00..d49b85164fd2 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -298,7 +298,7 @@ static int __init ptp_ixp_init(void)
298 298
299 ixp_clock.caps = ptp_ixp_caps; 299 ixp_clock.caps = ptp_ixp_caps;
300 300
301 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps); 301 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps, NULL);
302 302
303 if (IS_ERR(ixp_clock.ptp_clock)) 303 if (IS_ERR(ixp_clock.ptp_clock))
304 return PTR_ERR(ixp_clock.ptp_clock); 304 return PTR_ERR(ixp_clock.ptp_clock);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 3a9c17eced10..e624e4dd2abb 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -627,7 +627,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
627 } 627 }
628 628
629 chip->caps = ptp_pch_caps; 629 chip->caps = ptp_pch_caps;
630 chip->ptp_clock = ptp_clock_register(&chip->caps); 630 chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
631 631
632 if (IS_ERR(chip->ptp_clock)) 632 if (IS_ERR(chip->ptp_clock))
633 return PTR_ERR(chip->ptp_clock); 633 return PTR_ERR(chip->ptp_clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 4d5b5082c3b1..69d32070cc65 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -45,6 +45,7 @@ struct ptp_clock {
45 dev_t devid; 45 dev_t devid;
46 int index; /* index into clocks.map */ 46 int index; /* index into clocks.map */
47 struct pps_device *pps_source; 47 struct pps_device *pps_source;
48 long dialed_frequency; /* remembers the frequency adjustment */
48 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */ 49 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
49 struct mutex tsevq_mux; /* one process at a time reading the fifo */ 50 struct mutex tsevq_mux; /* one process at a time reading the fifo */
50 wait_queue_head_t tsev_wq; 51 wait_queue_head_t tsev_wq;
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index d4ade9e92fbb..fb92524d24ef 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1523,7 +1523,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1523 goto done; 1523 goto done;
1524 default: 1524 default:
1525 break; 1525 break;
1526 }; 1526 }
1527 1527
1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 5227e5734a9d..98ea9cc6f1aa 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1454,7 +1454,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1454 ch_fsm_len, GFP_KERNEL); 1454 ch_fsm_len, GFP_KERNEL);
1455 } 1455 }
1456 if (ch->fsm == NULL) 1456 if (ch->fsm == NULL)
1457 goto free_return; 1457 goto nomem_return;
1458 1458
1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE); 1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1460 1460
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a3adf4b1c60d..2ca0f1dd7a00 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -282,7 +282,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
282 282
283 LCS_DBF_TEXT(3, setup, "iwritccw"); 283 LCS_DBF_TEXT(3, setup, "iwritccw");
284 /* Setup write ccws. */ 284 /* Setup write ccws. */
285 memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1); 285 memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
286 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 286 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
287 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; 287 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
288 card->write.ccws[cnt].count = 0; 288 card->write.ccws[cnt].count = 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cf6da7fafe54..3e25d3150456 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -489,7 +489,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
489 atomic_set(&reply->refcnt, 1); 489 atomic_set(&reply->refcnt, 1);
490 atomic_set(&reply->received, 0); 490 atomic_set(&reply->received, 0);
491 reply->card = card; 491 reply->card = card;
492 }; 492 }
493 return reply; 493 return reply;
494} 494}
495 495
@@ -1257,7 +1257,30 @@ static void qeth_clean_channel(struct qeth_channel *channel)
1257 kfree(channel->iob[cnt].data); 1257 kfree(channel->iob[cnt].data);
1258} 1258}
1259 1259
1260static void qeth_get_channel_path_desc(struct qeth_card *card) 1260static void qeth_set_single_write_queues(struct qeth_card *card)
1261{
1262 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1263 (card->qdio.no_out_queues == 4))
1264 qeth_free_qdio_buffers(card);
1265
1266 card->qdio.no_out_queues = 1;
1267 if (card->qdio.default_out_queue != 0)
1268 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1269
1270 card->qdio.default_out_queue = 0;
1271}
1272
1273static void qeth_set_multiple_write_queues(struct qeth_card *card)
1274{
1275 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1276 (card->qdio.no_out_queues == 1)) {
1277 qeth_free_qdio_buffers(card);
1278 card->qdio.default_out_queue = 2;
1279 }
1280 card->qdio.no_out_queues = 4;
1281}
1282
1283static void qeth_update_from_chp_desc(struct qeth_card *card)
1261{ 1284{
1262 struct ccw_device *ccwdev; 1285 struct ccw_device *ccwdev;
1263 struct channelPath_dsc { 1286 struct channelPath_dsc {
@@ -1274,38 +1297,23 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
1274 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1297 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1275 1298
1276 ccwdev = card->data.ccwdev; 1299 ccwdev = card->data.ccwdev;
1277 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 1300 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1278 if (chp_dsc != NULL) { 1301 if (!chp_dsc)
1279 if (card->info.type != QETH_CARD_TYPE_IQD) { 1302 goto out;
1280 /* CHPP field bit 6 == 1 -> single queue */ 1303
1281 if ((chp_dsc->chpp & 0x02) == 0x02) { 1304 card->info.func_level = 0x4100 + chp_dsc->desc;
1282 if ((atomic_read(&card->qdio.state) != 1305 if (card->info.type == QETH_CARD_TYPE_IQD)
1283 QETH_QDIO_UNINITIALIZED) && 1306 goto out;
1284 (card->qdio.no_out_queues == 4)) 1307
1285 /* change from 4 to 1 outbound queues */ 1308 /* CHPP field bit 6 == 1 -> single queue */
1286 qeth_free_qdio_buffers(card); 1309 if ((chp_dsc->chpp & 0x02) == 0x02)
1287 card->qdio.no_out_queues = 1; 1310 qeth_set_single_write_queues(card);
1288 if (card->qdio.default_out_queue != 0) 1311 else
1289 dev_info(&card->gdev->dev, 1312 qeth_set_multiple_write_queues(card);
1290 "Priority Queueing not supported\n"); 1313out:
1291 card->qdio.default_out_queue = 0; 1314 kfree(chp_dsc);
1292 } else {
1293 if ((atomic_read(&card->qdio.state) !=
1294 QETH_QDIO_UNINITIALIZED) &&
1295 (card->qdio.no_out_queues == 1)) {
1296 /* change from 1 to 4 outbound queues */
1297 qeth_free_qdio_buffers(card);
1298 card->qdio.default_out_queue = 2;
1299 }
1300 card->qdio.no_out_queues = 4;
1301 }
1302 }
1303 card->info.func_level = 0x4100 + chp_dsc->desc;
1304 kfree(chp_dsc);
1305 }
1306 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1315 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1307 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1316 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1308 return;
1309} 1317}
1310 1318
1311static void qeth_init_qdio_info(struct qeth_card *card) 1319static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1473,7 +1481,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
1473 card->qdio.no_in_queues = 1; 1481 card->qdio.no_in_queues = 1;
1474 card->info.is_multicast_different = 1482 card->info.is_multicast_different =
1475 known_devices[i][QETH_MULTICAST_IND]; 1483 known_devices[i][QETH_MULTICAST_IND];
1476 qeth_get_channel_path_desc(card); 1484 qeth_update_from_chp_desc(card);
1477 return 0; 1485 return 0;
1478 } 1486 }
1479 i++; 1487 i++;
@@ -2029,7 +2037,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2029 if (time_after(jiffies, timeout)) 2037 if (time_after(jiffies, timeout))
2030 goto time_err; 2038 goto time_err;
2031 cpu_relax(); 2039 cpu_relax();
2032 }; 2040 }
2033 } 2041 }
2034 2042
2035 if (reply->rc == -EIO) 2043 if (reply->rc == -EIO)
@@ -4735,7 +4743,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
4735 4743
4736 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 4744 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
4737 atomic_set(&card->force_alloc_skb, 0); 4745 atomic_set(&card->force_alloc_skb, 0);
4738 qeth_get_channel_path_desc(card); 4746 qeth_update_from_chp_desc(card);
4739retry: 4747retry:
4740 if (retries) 4748 if (retries)
4741 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 4749 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c5f03fa70fba..4cd310cb5bdf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -794,6 +794,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
794 rc = -EEXIST; 794 rc = -EEXIST;
795 spin_unlock_irqrestore(&card->ip_lock, flags); 795 spin_unlock_irqrestore(&card->ip_lock, flags);
796 if (rc) { 796 if (rc) {
797 kfree(ipaddr);
797 return rc; 798 return rc;
798 } 799 }
799 if (!qeth_l3_add_ip(card, ipaddr)) 800 if (!qeth_l3_add_ip(card, ipaddr))
@@ -858,6 +859,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
858 rc = -EEXIST; 859 rc = -EEXIST;
859 spin_unlock_irqrestore(&card->ip_lock, flags); 860 spin_unlock_irqrestore(&card->ip_lock, flags);
860 if (rc) { 861 if (rc) {
862 kfree(ipaddr);
861 return rc; 863 return rc;
862 } 864 }
863 if (!qeth_l3_add_ip(card, ipaddr)) 865 if (!qeth_l3_add_ip(card, ipaddr))
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 8818dd681c19..65123a21b97e 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -33,40 +33,6 @@
33struct sock *scsi_nl_sock = NULL; 33struct sock *scsi_nl_sock = NULL;
34EXPORT_SYMBOL_GPL(scsi_nl_sock); 34EXPORT_SYMBOL_GPL(scsi_nl_sock);
35 35
36static DEFINE_SPINLOCK(scsi_nl_lock);
37static struct list_head scsi_nl_drivers;
38
39static u32 scsi_nl_state;
40#define STATE_EHANDLER_BSY 0x00000001
41
42struct scsi_nl_transport {
43 int (*msg_handler)(struct sk_buff *);
44 void (*event_handler)(struct notifier_block *, unsigned long, void *);
45 unsigned int refcnt;
46 int flags;
47};
48
49/* flags values (bit flags) */
50#define HANDLER_DELETING 0x1
51
52static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
53 { {NULL, }, };
54
55
56struct scsi_nl_drvr {
57 struct list_head next;
58 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
59 u32 len, u32 pid);
60 void (*devt_handler)(struct notifier_block *nb,
61 unsigned long event, void *notify_ptr);
62 struct scsi_host_template *hostt;
63 u64 vendor_id;
64 unsigned int refcnt;
65 int flags;
66};
67
68
69
70/** 36/**
71 * scsi_nl_rcv_msg - Receive message handler. 37 * scsi_nl_rcv_msg - Receive message handler.
72 * @skb: socket receive buffer 38 * @skb: socket receive buffer
@@ -81,7 +47,6 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
81{ 47{
82 struct nlmsghdr *nlh; 48 struct nlmsghdr *nlh;
83 struct scsi_nl_hdr *hdr; 49 struct scsi_nl_hdr *hdr;
84 unsigned long flags;
85 u32 rlen; 50 u32 rlen;
86 int err, tport; 51 int err, tport;
87 52
@@ -126,22 +91,24 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
126 /* 91 /*
127 * Deliver message to the appropriate transport 92 * Deliver message to the appropriate transport
128 */ 93 */
129 spin_lock_irqsave(&scsi_nl_lock, flags);
130
131 tport = hdr->transport; 94 tport = hdr->transport;
132 if ((tport < SCSI_NL_MAX_TRANSPORTS) && 95 if (tport == SCSI_NL_TRANSPORT) {
133 !(transports[tport].flags & HANDLER_DELETING) && 96 switch (hdr->msgtype) {
134 (transports[tport].msg_handler)) { 97 case SCSI_NL_SHOST_VENDOR:
135 transports[tport].refcnt++; 98 /* Locate the driver that corresponds to the message */
136 spin_unlock_irqrestore(&scsi_nl_lock, flags); 99 err = -ESRCH;
137 err = transports[tport].msg_handler(skb); 100 break;
138 spin_lock_irqsave(&scsi_nl_lock, flags); 101 default:
139 transports[tport].refcnt--; 102 err = -EBADR;
140 } else 103 break;
104 }
105 if (err)
106 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
107 __func__, hdr->msgtype, err);
108 }
109 else
141 err = -ENOENT; 110 err = -ENOENT;
142 111
143 spin_unlock_irqrestore(&scsi_nl_lock, flags);
144
145next_msg: 112next_msg:
146 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 113 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
147 netlink_ack(skb, nlh, err); 114 netlink_ack(skb, nlh, err);
@@ -150,333 +117,6 @@ next_msg:
150 } 117 }
151} 118}
152 119
153
154/**
155 * scsi_nl_rcv_event - Event handler for a netlink socket.
156 * @this: event notifier block
157 * @event: event type
158 * @ptr: event payload
159 *
160 **/
161static int
162scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
163{
164 struct netlink_notify *n = ptr;
165 struct scsi_nl_drvr *driver;
166 unsigned long flags;
167 int tport;
168
169 if (n->protocol != NETLINK_SCSITRANSPORT)
170 return NOTIFY_DONE;
171
172 spin_lock_irqsave(&scsi_nl_lock, flags);
173 scsi_nl_state |= STATE_EHANDLER_BSY;
174
175 /*
176 * Pass event on to any transports that may be listening
177 */
178 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
179 if (!(transports[tport].flags & HANDLER_DELETING) &&
180 (transports[tport].event_handler)) {
181 spin_unlock_irqrestore(&scsi_nl_lock, flags);
182 transports[tport].event_handler(this, event, ptr);
183 spin_lock_irqsave(&scsi_nl_lock, flags);
184 }
185 }
186
187 /*
188 * Pass event on to any drivers that may be listening
189 */
190 list_for_each_entry(driver, &scsi_nl_drivers, next) {
191 if (!(driver->flags & HANDLER_DELETING) &&
192 (driver->devt_handler)) {
193 spin_unlock_irqrestore(&scsi_nl_lock, flags);
194 driver->devt_handler(this, event, ptr);
195 spin_lock_irqsave(&scsi_nl_lock, flags);
196 }
197 }
198
199 scsi_nl_state &= ~STATE_EHANDLER_BSY;
200 spin_unlock_irqrestore(&scsi_nl_lock, flags);
201
202 return NOTIFY_DONE;
203}
204
205static struct notifier_block scsi_netlink_notifier = {
206 .notifier_call = scsi_nl_rcv_event,
207};
208
209
210/*
211 * GENERIC SCSI transport receive and event handlers
212 */
213
214/**
215 * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
216 * @skb: socket receive buffer
217 **/
218static int
219scsi_generic_msg_handler(struct sk_buff *skb)
220{
221 struct nlmsghdr *nlh = nlmsg_hdr(skb);
222 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
223 struct scsi_nl_drvr *driver;
224 struct Scsi_Host *shost;
225 unsigned long flags;
226 int err = 0, match, pid;
227
228 pid = NETLINK_CREDS(skb)->pid;
229
230 switch (snlh->msgtype) {
231 case SCSI_NL_SHOST_VENDOR:
232 {
233 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
234
235 /* Locate the driver that corresponds to the message */
236 spin_lock_irqsave(&scsi_nl_lock, flags);
237 match = 0;
238 list_for_each_entry(driver, &scsi_nl_drivers, next) {
239 if (driver->vendor_id == msg->vendor_id) {
240 match = 1;
241 break;
242 }
243 }
244
245 if ((!match) || (!driver->dmsg_handler)) {
246 spin_unlock_irqrestore(&scsi_nl_lock, flags);
247 err = -ESRCH;
248 goto rcv_exit;
249 }
250
251 if (driver->flags & HANDLER_DELETING) {
252 spin_unlock_irqrestore(&scsi_nl_lock, flags);
253 err = -ESHUTDOWN;
254 goto rcv_exit;
255 }
256
257 driver->refcnt++;
258 spin_unlock_irqrestore(&scsi_nl_lock, flags);
259
260
261 /* if successful, scsi_host_lookup takes a shost reference */
262 shost = scsi_host_lookup(msg->host_no);
263 if (!shost) {
264 err = -ENODEV;
265 goto driver_exit;
266 }
267
268 /* is this host owned by the vendor ? */
269 if (shost->hostt != driver->hostt) {
270 err = -EINVAL;
271 goto vendormsg_put;
272 }
273
274 /* pass message on to the driver */
275 err = driver->dmsg_handler(shost, (void *)&msg[1],
276 msg->vmsg_datalen, pid);
277
278vendormsg_put:
279 /* release reference by scsi_host_lookup */
280 scsi_host_put(shost);
281
282driver_exit:
283 /* release our own reference on the registration object */
284 spin_lock_irqsave(&scsi_nl_lock, flags);
285 driver->refcnt--;
286 spin_unlock_irqrestore(&scsi_nl_lock, flags);
287 break;
288 }
289
290 default:
291 err = -EBADR;
292 break;
293 }
294
295rcv_exit:
296 if (err)
297 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
298 __func__, snlh->msgtype, err);
299 return err;
300}
301
302
303/**
304 * scsi_nl_add_transport -
305 * Registers message and event handlers for a transport. Enables
306 * receipt of netlink messages and events to a transport.
307 *
308 * @tport: transport registering handlers
309 * @msg_handler: receive message handler callback
310 * @event_handler: receive event handler callback
311 **/
312int
313scsi_nl_add_transport(u8 tport,
314 int (*msg_handler)(struct sk_buff *),
315 void (*event_handler)(struct notifier_block *, unsigned long, void *))
316{
317 unsigned long flags;
318 int err = 0;
319
320 if (tport >= SCSI_NL_MAX_TRANSPORTS)
321 return -EINVAL;
322
323 spin_lock_irqsave(&scsi_nl_lock, flags);
324
325 if (scsi_nl_state & STATE_EHANDLER_BSY) {
326 spin_unlock_irqrestore(&scsi_nl_lock, flags);
327 msleep(1);
328 spin_lock_irqsave(&scsi_nl_lock, flags);
329 }
330
331 if (transports[tport].msg_handler || transports[tport].event_handler) {
332 err = -EALREADY;
333 goto register_out;
334 }
335
336 transports[tport].msg_handler = msg_handler;
337 transports[tport].event_handler = event_handler;
338 transports[tport].flags = 0;
339 transports[tport].refcnt = 0;
340
341register_out:
342 spin_unlock_irqrestore(&scsi_nl_lock, flags);
343
344 return err;
345}
346EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
347
348
349/**
350 * scsi_nl_remove_transport -
351 * Disable transport receiption of messages and events
352 *
353 * @tport: transport deregistering handlers
354 *
355 **/
356void
357scsi_nl_remove_transport(u8 tport)
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&scsi_nl_lock, flags);
362 if (scsi_nl_state & STATE_EHANDLER_BSY) {
363 spin_unlock_irqrestore(&scsi_nl_lock, flags);
364 msleep(1);
365 spin_lock_irqsave(&scsi_nl_lock, flags);
366 }
367
368 if (tport < SCSI_NL_MAX_TRANSPORTS) {
369 transports[tport].flags |= HANDLER_DELETING;
370
371 while (transports[tport].refcnt != 0) {
372 spin_unlock_irqrestore(&scsi_nl_lock, flags);
373 schedule_timeout_uninterruptible(HZ/4);
374 spin_lock_irqsave(&scsi_nl_lock, flags);
375 }
376 transports[tport].msg_handler = NULL;
377 transports[tport].event_handler = NULL;
378 transports[tport].flags = 0;
379 }
380
381 spin_unlock_irqrestore(&scsi_nl_lock, flags);
382
383 return;
384}
385EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
386
387
388/**
389 * scsi_nl_add_driver -
390 * A driver is registering its interfaces for SCSI netlink messages
391 *
392 * @vendor_id: A unique identification value for the driver.
393 * @hostt: address of the driver's host template. Used
394 * to verify an shost is bound to the driver
395 * @nlmsg_handler: receive message handler callback
396 * @nlevt_handler: receive event handler callback
397 *
398 * Returns:
399 * 0 on Success
400 * error result otherwise
401 **/
402int
403scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
404 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
405 u32 len, u32 pid),
406 void (*nlevt_handler)(struct notifier_block *nb,
407 unsigned long event, void *notify_ptr))
408{
409 struct scsi_nl_drvr *driver;
410 unsigned long flags;
411
412 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
413 if (unlikely(!driver)) {
414 printk(KERN_ERR "%s: allocation failure\n", __func__);
415 return -ENOMEM;
416 }
417
418 driver->dmsg_handler = nlmsg_handler;
419 driver->devt_handler = nlevt_handler;
420 driver->hostt = hostt;
421 driver->vendor_id = vendor_id;
422
423 spin_lock_irqsave(&scsi_nl_lock, flags);
424 if (scsi_nl_state & STATE_EHANDLER_BSY) {
425 spin_unlock_irqrestore(&scsi_nl_lock, flags);
426 msleep(1);
427 spin_lock_irqsave(&scsi_nl_lock, flags);
428 }
429 list_add_tail(&driver->next, &scsi_nl_drivers);
430 spin_unlock_irqrestore(&scsi_nl_lock, flags);
431
432 return 0;
433}
434EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
435
436
437/**
438 * scsi_nl_remove_driver -
439 * An driver is unregistering with the SCSI netlink messages
440 *
441 * @vendor_id: The unique identification value for the driver.
442 **/
443void
444scsi_nl_remove_driver(u64 vendor_id)
445{
446 struct scsi_nl_drvr *driver;
447 unsigned long flags;
448
449 spin_lock_irqsave(&scsi_nl_lock, flags);
450 if (scsi_nl_state & STATE_EHANDLER_BSY) {
451 spin_unlock_irqrestore(&scsi_nl_lock, flags);
452 msleep(1);
453 spin_lock_irqsave(&scsi_nl_lock, flags);
454 }
455
456 list_for_each_entry(driver, &scsi_nl_drivers, next) {
457 if (driver->vendor_id == vendor_id) {
458 driver->flags |= HANDLER_DELETING;
459 while (driver->refcnt != 0) {
460 spin_unlock_irqrestore(&scsi_nl_lock, flags);
461 schedule_timeout_uninterruptible(HZ/4);
462 spin_lock_irqsave(&scsi_nl_lock, flags);
463 }
464 list_del(&driver->next);
465 kfree(driver);
466 spin_unlock_irqrestore(&scsi_nl_lock, flags);
467 return;
468 }
469 }
470
471 spin_unlock_irqrestore(&scsi_nl_lock, flags);
472
473 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
474 __func__, (unsigned long long)vendor_id);
475 return;
476}
477EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
478
479
480/** 120/**
481 * scsi_netlink_init - Called by SCSI subsystem to initialize 121 * scsi_netlink_init - Called by SCSI subsystem to initialize
482 * the SCSI transport netlink interface 122 * the SCSI transport netlink interface
@@ -485,36 +125,19 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
485void 125void
486scsi_netlink_init(void) 126scsi_netlink_init(void)
487{ 127{
488 int error;
489 struct netlink_kernel_cfg cfg = { 128 struct netlink_kernel_cfg cfg = {
490 .input = scsi_nl_rcv_msg, 129 .input = scsi_nl_rcv_msg,
491 .groups = SCSI_NL_GRP_CNT, 130 .groups = SCSI_NL_GRP_CNT,
492 }; 131 };
493 132
494 INIT_LIST_HEAD(&scsi_nl_drivers);
495
496 error = netlink_register_notifier(&scsi_netlink_notifier);
497 if (error) {
498 printk(KERN_ERR "%s: register of event handler failed - %d\n",
499 __func__, error);
500 return;
501 }
502
503 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, 133 scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
504 THIS_MODULE, &cfg); 134 &cfg);
505 if (!scsi_nl_sock) { 135 if (!scsi_nl_sock) {
506 printk(KERN_ERR "%s: register of receive handler failed\n", 136 printk(KERN_ERR "%s: register of receive handler failed\n",
507 __func__); 137 __func__);
508 netlink_unregister_notifier(&scsi_netlink_notifier);
509 return; 138 return;
510 } 139 }
511 140
512 /* Register the entry points for the generic SCSI transport */
513 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
514 scsi_generic_msg_handler, NULL);
515 if (error)
516 printk(KERN_ERR "%s: register of GENERIC transport handler"
517 " failed - %d\n", __func__, error);
518 return; 141 return;
519} 142}
520 143
@@ -526,158 +149,10 @@ scsi_netlink_init(void)
526void 149void
527scsi_netlink_exit(void) 150scsi_netlink_exit(void)
528{ 151{
529 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
530
531 if (scsi_nl_sock) { 152 if (scsi_nl_sock) {
532 netlink_kernel_release(scsi_nl_sock); 153 netlink_kernel_release(scsi_nl_sock);
533 netlink_unregister_notifier(&scsi_netlink_notifier);
534 } 154 }
535 155
536 return; 156 return;
537} 157}
538 158
539
540/*
541 * Exported Interfaces
542 */
543
544/**
545 * scsi_nl_send_transport_msg -
546 * Generic function to send a single message from a SCSI transport to
547 * a single process
548 *
549 * @pid: receiving pid
550 * @hdr: message payload
551 *
552 **/
553void
554scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
555{
556 struct sk_buff *skb;
557 struct nlmsghdr *nlh;
558 const char *fn;
559 char *datab;
560 u32 len, skblen;
561 int err;
562
563 if (!scsi_nl_sock) {
564 err = -ENOENT;
565 fn = "netlink socket";
566 goto msg_fail;
567 }
568
569 len = NLMSG_SPACE(hdr->msglen);
570 skblen = NLMSG_SPACE(len);
571
572 skb = alloc_skb(skblen, GFP_KERNEL);
573 if (!skb) {
574 err = -ENOBUFS;
575 fn = "alloc_skb";
576 goto msg_fail;
577 }
578
579 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
580 if (!nlh) {
581 err = -ENOBUFS;
582 fn = "nlmsg_put";
583 goto msg_fail_skb;
584 }
585 datab = NLMSG_DATA(nlh);
586 memcpy(datab, hdr, hdr->msglen);
587
588 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
589 if (err < 0) {
590 fn = "nlmsg_unicast";
591 /* nlmsg_unicast already kfree_skb'd */
592 goto msg_fail;
593 }
594
595 return;
596
597msg_fail_skb:
598 kfree_skb(skb);
599msg_fail:
600 printk(KERN_WARNING
601 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
602 "msglen %d: %s : err %d\n",
603 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
604 fn, err);
605 return;
606}
607EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
608
609
610/**
611 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
612 * to a specific process id.
613 *
614 * @pid: process id of the receiver
615 * @host_no: host # sending the message
616 * @vendor_id: unique identifier for the driver's vendor
617 * @data_len: amount, in bytes, of vendor unique payload data
618 * @data_buf: pointer to vendor unique data buffer
619 *
620 * Returns:
621 * 0 on successful return
622 * otherwise, failing error code
623 *
624 * Notes:
625 * This routine assumes no locks are held on entry.
626 */
627int
628scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
629 char *data_buf, u32 data_len)
630{
631 struct sk_buff *skb;
632 struct nlmsghdr *nlh;
633 struct scsi_nl_host_vendor_msg *msg;
634 u32 len, skblen;
635 int err;
636
637 if (!scsi_nl_sock) {
638 err = -ENOENT;
639 goto send_vendor_fail;
640 }
641
642 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
643 skblen = NLMSG_SPACE(len);
644
645 skb = alloc_skb(skblen, GFP_KERNEL);
646 if (!skb) {
647 err = -ENOBUFS;
648 goto send_vendor_fail;
649 }
650
651 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
652 skblen - sizeof(*nlh), 0);
653 if (!nlh) {
654 err = -ENOBUFS;
655 goto send_vendor_fail_skb;
656 }
657 msg = NLMSG_DATA(nlh);
658
659 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
660 SCSI_NL_SHOST_VENDOR, len);
661 msg->vendor_id = vendor_id;
662 msg->host_no = host_no;
663 msg->vmsg_datalen = data_len; /* bytes */
664 memcpy(&msg[1], data_buf, data_len);
665
666 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
667 if (err)
668 /* nlmsg_multicast already kfree_skb'd */
669 goto send_vendor_fail;
670
671 return 0;
672
673send_vendor_fail_skb:
674 kfree_skb(skb);
675send_vendor_fail:
676 printk(KERN_WARNING
677 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
678 __func__, host_no, err);
679 return err;
680}
681EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
682
683
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fa1dfaa83e32..31969f2e13ce 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2119,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2119 switch (nlh->nlmsg_type) { 2119 switch (nlh->nlmsg_type) {
2120 case ISCSI_UEVENT_CREATE_SESSION: 2120 case ISCSI_UEVENT_CREATE_SESSION:
2121 err = iscsi_if_create_session(priv, ep, ev, 2121 err = iscsi_if_create_session(priv, ep, ev,
2122 NETLINK_CB(skb).pid, 2122 NETLINK_CB(skb).portid,
2123 ev->u.c_session.initial_cmdsn, 2123 ev->u.c_session.initial_cmdsn,
2124 ev->u.c_session.cmds_max, 2124 ev->u.c_session.cmds_max,
2125 ev->u.c_session.queue_depth); 2125 ev->u.c_session.queue_depth);
@@ -2132,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2132 } 2132 }
2133 2133
2134 err = iscsi_if_create_session(priv, ep, ev, 2134 err = iscsi_if_create_session(priv, ep, ev,
2135 NETLINK_CB(skb).pid, 2135 NETLINK_CB(skb).portid,
2136 ev->u.c_bound_session.initial_cmdsn, 2136 ev->u.c_bound_session.initial_cmdsn,
2137 ev->u.c_bound_session.cmds_max, 2137 ev->u.c_bound_session.cmds_max,
2138 ev->u.c_bound_session.queue_depth); 2138 ev->u.c_bound_session.queue_depth);
@@ -2969,8 +2969,7 @@ static __init int iscsi_transport_init(void)
2969 if (err) 2969 if (err)
2970 goto unregister_conn_class; 2970 goto unregister_conn_class;
2971 2971
2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 2972 nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
2973 THIS_MODULE, &cfg);
2974 if (!nls) { 2973 if (!nls) {
2975 err = -ENOBUFS; 2974 err = -ENOBUFS;
2976 goto unregister_session_class; 2975 goto unregister_session_class;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 7e2ddc042f5b..c6250867a95d 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -190,16 +190,30 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
190{ 190{
191 struct ssb_bus *bus = mcore->dev->bus; 191 struct ssb_bus *bus = mcore->dev->bus;
192 192
193 mcore->flash_buswidth = 2; 193 /* When there is no chipcommon on the bus there is 4MB flash */
194 if (bus->chipco.dev) { 194 if (!bus->chipco.dev) {
195 mcore->flash_window = 0x1c000000; 195 mcore->flash_buswidth = 2;
196 mcore->flash_window_size = 0x02000000; 196 mcore->flash_window = SSB_FLASH1;
197 mcore->flash_window_size = SSB_FLASH1_SZ;
198 return;
199 }
200
201 /* There is ChipCommon, so use it to read info about flash */
202 switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
203 case SSB_CHIPCO_FLASHT_STSER:
204 case SSB_CHIPCO_FLASHT_ATSER:
205 pr_err("Serial flash not supported\n");
206 break;
207 case SSB_CHIPCO_FLASHT_PARA:
208 pr_debug("Found parallel flash\n");
209 mcore->flash_window = SSB_FLASH2;
210 mcore->flash_window_size = SSB_FLASH2_SZ;
197 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) 211 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
198 & SSB_CHIPCO_CFG_DS16) == 0) 212 & SSB_CHIPCO_CFG_DS16) == 0)
199 mcore->flash_buswidth = 1; 213 mcore->flash_buswidth = 1;
200 } else { 214 else
201 mcore->flash_window = 0x1fc00000; 215 mcore->flash_buswidth = 2;
202 mcore->flash_window_size = 0x00400000; 216 break;
203 } 217 }
204} 218}
205 219
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 3abb31df8f28..20d0aec52e72 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -95,7 +95,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
95 init_MUTEX(&netlink_mutex); 95 init_MUTEX(&netlink_mutex);
96#endif 96#endif
97 97
98 sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg); 98 sock = netlink_kernel_create(&init_net, unit, &cfg);
99 99
100 if (sock) 100 if (sock)
101 rcv_cb = cb; 101 rcv_cb = cb;
@@ -135,7 +135,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
135 } 135 }
136 memcpy(nlmsg_data(nlh), msg, len); 136 memcpy(nlmsg_data(nlh), msg, len);
137 137
138 NETLINK_CB(skb).pid = 0; 138 NETLINK_CB(skb).portid = 0;
139 NETLINK_CB(skb).dst_group = 0; 139 NETLINK_CB(skb).dst_group = 0;
140 140
141 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); 141 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 0ca857ac473e..48aa1361903e 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
119 *total_flags = new_flags; 119 *total_flags = new_flags;
120} 120}
121 121
122static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 122static void wbsoft_tx(struct ieee80211_hw *dev,
123 struct ieee80211_tx_control *control,
124 struct sk_buff *skb)
123{ 125{
124 struct wbsoft_priv *priv = dev->priv; 126 struct wbsoft_priv *priv = dev->priv;
125 127
diff --git a/firmware/Makefile b/firmware/Makefile
index fdc9ff045ef8..eeb14030d8a2 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -42,7 +42,6 @@ fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
42fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin 42fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
43fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \ 43fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
44 cxgb3/t3c_psram-1.1.0.bin \ 44 cxgb3/t3c_psram-1.1.0.bin \
45 cxgb3/t3fw-7.10.0.bin \
46 cxgb3/ael2005_opt_edc.bin \ 45 cxgb3/ael2005_opt_edc.bin \
47 cxgb3/ael2005_twx_edc.bin \ 46 cxgb3/ael2005_twx_edc.bin \
48 cxgb3/ael2020_twx_edc.bin 47 cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
deleted file mode 100644
index 96399d87bd35..000000000000
--- a/firmware/cxgb3/t3fw-7.10.0.bin.ihex
+++ /dev/null
@@ -1,1935 +0,0 @@
1:1000000060007400200380002003700000001000D6
2:1000100000002000E100028400070000E1000288E7
3:1000200000010000E0000000E00000A0010000006E
4:1000300044444440E3000183200200002001E0002A
5:100040002001FF101FFFD0001FFFC000E300043C91
6:100050000200000020006C841FFFC2A020006CCCB6
7:100060001FFFC2A420006D0C1FFFC2A820006D80DE
8:100070001FFFC2AC200003C0C00000E43100EA3121
9:1000800000A13100A03103020002ED306E2A05000C
10:10009000ED3100020002160012FFDBC03014FFDA5F
11:1000A000D30FD30FD30F03431F244C107249F0D347
12:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
13:1000C000D30FD30F03431F244C107249F0D30FD327
14:1000D0000FD30F14FFCE03421F14FFCB03421F1296
15:1000E000FFCCC0302D37302D37342D37382D373CED
16:1000F000233D017233ED00020012FFC4C0302F37E0
17:10010000002F37102F37202F3730233D017233ED6A
18:1001100000020012FFBEC0302737002737102737F4
19:1001200020273730233D017233ED03020012FFB95F
20:1001300013FFBA0C0200932012FFB913FFB90C028F
21:1001400000932012FFB8C0319320822012FFB71312
22:10015000FFB7932012FFB715FFB316FFB6C030D715
23:100160002005660160001B00000000000000000088
24:10017000043605000200D30FD30F05330C6E3B1479
25:100180000747140704437631E604360505330C6F40
26:100190003BED00020012FFA615FFA3230A00D720A3
27:1001A000070443043E0505330C0747146F3BF00377
28:1001B000020012FFA1C03014FFA1D30FD30FD30F41
29:1001C0009340B4447249F2D30FD30FD30F14FF9B63
30:1001D000834014FF9B834012FF9B230A0014FF9A65
31:1001E000D30FD30FD30F9340B4447249F2D30FD33C
32:1001F0000FD30F14FF95834012FF95C92F832084DE
33:10020000218522BC22743B0F8650B4559630B433FE
34:100210007433F463FFE60000653FE1655FDE12FFC3
35:100220007C230A0028374028374428374828374C91
36:10023000233D017233ED03020000020012FF7AC079
37:1002400032032E0503020012FF7813FF819320C0B2
38:1002500011014931004831010200C00014FF7E0441
39:10026000D23115FF7D945014FF7D04D33115FF7CEE
40:10027000945014FF7C04D43115FF7C24560014FFE5
41:100280007B04D53115FF7B24560010FF7A03000054
42:10029000000000000000000000000000000000005E
43:1002A000000000000000000000000000000000004E
44:1002B000000000000000000000000000000000003E
45:1002C000000000000000000000000000000000002E
46:1002D000000000000000000000000000000000001E
47:1002E000000000000000000000000000000000000E
48:1002F00000000000000000000000000000000000FE
49:1003000000000000000000000000000000000000ED
50:1003100000000000000000000000000000000000DD
51:1003200000000000000000000000000000000000CD
52:1003300000000000000000000000000000000000BD
53:1003400000000000000000000000000000000000AD
54:10035000000000000000000000000000000000009D
55:10036000000000000000000000000000000000008D
56:10037000000000000000000000000000000000007D
57:10038000000000000000000000000000000000006D
58:10039000000000000000000000000000000000005D
59:1003A000000000000000000000000000000000004D
60:1003B000000000000000000000000000000000003D
61:1003C000000000000000000000000000000000002D
62:1003D000000000000000000000000000000000001D
63:1003E000000000000000000000000000000000000D
64:1003F00000000000000000000000000000000000FD
65:1004000000000000000000000000000000000000EC
66:1004100000000000000000000000000000000000DC
67:1004200063FFFC000000000000000000000000006E
68:100430000000000000000000000000001FFC0000A1
69:100440001FFC0000E30005C81FFC00001FFC0000AB
70:10045000E30005C81FFC00001FFC0000E30005C806
71:100460001FFFC0001FFFC000E30005C81FFFC00042
72:100470001FFFC018E30005C81FFFC0181FFFC018EA
73:10048000E30005E01FFFC0181FFFC294E30005E072
74:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
75:1004A0001FFFC59CE300085C200000002000016ADB
76:1004B000E3000B582000018020000180E3000CC401
77:1004C0002000020020000203E3000CC42000021CF4
78:1004D00020000220E3000CC8200002202000022699
79:1004E000E3000CCC2000023C20000240E3000CD4CE
80:1004F0002000024020000249E3000CD82000024CFA
81:1005000020000250E3000CE42000025020000259B9
82:10051000E3000CE82000025C20000260E3000CF421
83:100520002000026020000269E3000CF82000026C49
84:1005300020000270E3000D04200002702000027908
85:10054000E3000D082000028C2000028CE3000D1453
86:100550002000029020000293E3000D14200002AC62
87:10056000200002B0E3000D18200002D0200002F2AB
88:10057000E3000D1C200003B0200003B0E3000D4099
89:10058000200003B0200003B0E3000D40200003B0C2
90:10059000200003B0E3000D40200003B0200003B0B2
91:1005A000E3000D40200003B020006EA4E3000D40E6
92:1005B00020006EA420006EA4E30078340000000048
93:1005C00000000000000000001FFC00001FFC0000F5
94:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
95:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
96:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
97:10060000003FFFFF8040000010000000080FFFFFC8
98:100610001FFFC27D000FFFFF804FFFFF8000000023
99:1006200000000880B000000560500000600000007D
100:1006300040000011350000004100000010000001E2
101:100640002000000000001000400000000500000035
102:10065000800000190400000000000800E100020012
103:1006600010000005806000007000000020000009FC
104:10067000001FF8008000001EA0000000F80000002D
105:1006800007FFFFFF080000001800000001008001C4
106:10069000420000001FFFC22D1FFFC0EC00010080C0
107:1006A000604000001A0000000C0000001000000A6A
108:1006B000000030000001000080000018FC00000075
109:1006C0008000000100004000600008008000001C65
110:1006D0008000001A030000008000040004030403EB
111:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
112:1006F000FFFFF000000016D00000FFF7A50000008B
113:100700001FFFC4C01FFFC4710001000800000B20C0
114:10071000202FFF801FFFC46500002C00FFFEFFF8A4
115:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
116:100730000000FFEF010011001FFFC3E21FFFC5A073
117:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
118:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
119:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
120:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
121:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
122:10079000000100817FFFFFFFE1000600000027103D
123:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
124:1007B0000003D0901FFFC5742B5063802B507980AD
125:1007C0002B5090802B50A6801FFFC4790100110F81
126:1007D000202FFE0020300080202FFF000000FFFFB0
127:1007E0000001FFF82B50B2002B50B208000100109E
128:1007F0002B50B1802B50B2802B50BA000001001159
129:100800002B50BD282B50BC802B50BDA020300000A9
130:10081000DFFFFE005000000200C0000002000000E8
131:10082000FFFFF7F41FFFC07C000FF800044000003A
132:10083000001000000C4000001C400000E00000A080
133:100840001FFFC5501FFD00081FFFC5641FFFC578AF
134:100850001FFFC58CE1000690E10006EC00000000DF
135:100860000000000000000000000000000100000087
136:100870000000000000000000000000002010004008
137:10088000201000402010004020140080200C0000A8
138:10089000200C0000200C00002010004020140080DC
139:1008A0002014008020140080201800C0201C0100AB
140:1008B000201C0100201C010020200140201800C045
141:1008C000201800C0201800C0201C0100201800C003
142:1008D000201800C0201800C0201C0100202001406A
143:1008E00020200140202001402020094020200940F4
144:1008F000202009402020094020240980FFFFFFFF1D
145:10090000FFFFFFFFFFFFFFFF0000000000000000EF
146:1009100000000000000000000000000020005588DA
147:1009200020005458200055882000558820005394FA
148:100930002000539420005394200051D4200051D41F
149:10094000200051CC2000513820004FE020004DC045
150:1009500020004B94000000000000000020005558CB
151:1009600020005424200054C8200054C82000527C89
152:100970002000527C2000527C2000527C2000527CBF
153:10098000200051C42000527C20004F0020004D70F8
154:1009900020004B40000000000000000020000BF091
155:1009A00020003ADC200004C02000473020000BE883
156:1009B000200041F4200003F0200046F020004B1CF2
157:1009C00020003F0020003E1C20003A58200038E85C
158:1009D00020003658200031B820003C7820002DD06F
159:1009E0002000286420006828200023F0200020D068
160:1009F0002000207C20001D68200018602000158841
161:100A000020000E5420000C3420001134200013204C
162:100A1000200043EC20003EB420000BF8200004C06E
163:100A200000000000000000000000000000000000C6
164:100A300000000000000000000000000000000000B6
165:100A400000000000000000000000000000000000A6
166:100A50000000000000000000000000000000000096
167:100A60000000000000000000000000000000000086
168:100A70000000000000000000000000000000000076
169:100A80000000000000000000000000000000000066
170:100A90000000000000000000000000000000000056
171:100AA0003264000000000000326400006400640052
172:100AB00064006400640064006400640000000000DE
173:100AC0000000000000000000000000000000000026
174:100AD0000000000000000000000000000000000016
175:100AE0000000000000000000000000000000000006
176:100AF00000000000000000000000000000000000F6
177:100B000000000000000010000000000000000000D5
178:100B100000000000000000000000000000001000C5
179:100B200000000000000000000000000000000000C5
180:100B300000432380000000000000000000000000CF
181:100B400000000000000000000000000000000000A5
182:100B50000000000000000000005C94015D94025E53
183:100B600094035F94004300000000000000000000B8
184:100B70000000000000000000000000000000000075
185:100B80000000000000000000000000000000000065
186:100B90000000000000000000005C90015D90025E1B
187:100BA00090035F9000530000000000000000000070
188:100BB0000000000000000000000000000000000035
189:100BC0000000000000000000000000000000000025
190:100BD0000000000000000000009C94001D90019D9A
191:100BE00094029E94039F94040894050994060A9421
192:100BF000070B94004300000000000000000000000C
193:100C000000000000000000000000000000000000E4
194:100C10000000000000000000009C90019D90029EDA
195:100C200090071D90039F90047890057990067A9024
196:100C3000077B90005300000000000000000000004F
197:100C400000000000000000000000000000000000A4
198:100C5000000000000000000000DC94001D9001DD99
199:100C60009402DE9403DF940404940505940606942C
200:100C70000707940808940909940A0A940B0B940036
201:100C80004300000000000000000000000000000021
202:100C9000000000000000000000DC9001DD9002DE9A
203:100CA000900B1D9003DF9004B49005B59006B690AC
204:100CB00007B79008B89009B9900ABA900BBB90009A
205:100CC0005300000063FFFC0020006C6010FFFF0A6F
206:100CD0000000000020006C8400D23110FFFE0A00EA
207:100CE0000000000020006CCC00D33110FFFE0A0091
208:100CF0000000000020006D0C00D43110FFFE0A003F
209:100D00000000000020006D8000D53110FFFE0A00B9
210:100D10000000000063FFFC00E00000A012FFF7826B
211:100D200020028257C82163FFFC12FFF303E830045E
212:100D3000EE3005C03093209421952263FFFC000023
213:100D40001FFFD000000400201FFFC5A01FFFC6909A
214:100D5000200A0011FFFB13FFFB03E631010200161E
215:100D6000FFFA17FFFAD30F776B069060B4667763CC
216:100D7000F85415F3541AA50F140063FFF90000008E
217:100D80006C1004C020D10F006C1004C0C71AEF060D
218:100D9000D830BC2BD72085720D4211837105450BCD
219:100DA000957202330C2376017B3B04233D0893713B
220:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
221:100DC000088202280A01038E380E0E42C8EE29A6B8
222:100DD0007E6D4A0500208800308C8271D10FC0F0F2
223:100DE000028F387FC0EA63FFE400C0F1C050037E89
224:100DF0000CA2EE0E3D1208820203F538050542CB27
225:100E00005729A67E2FDC100F4F366DFA050020887B
226:100E100000308CBC75C03008E208280A0105833810
227:100E2000030342C93E29A67E0D480CD30F6D8A05E7
228:100E300000208800B08C8271D10FC05008F5387541
229:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
230:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
231:100E6000221DC0D07B81352920060BB702299CFAB0
232:100E7000655008282072288CFF2824726491642A07
233:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
234:100E90003FC1CE7CA13669AC336000370029200603
235:100EA000D7D0299CFACC57282072288CFF2824728E
236:100EB0006491392AD0000CA80C6481680EA90C64D6
237:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
238:100ED0000F2D25028A32C0900A6E5065E5B529248F
239:100EE00067090F4765F5B12C200C1FEEB30CCE112E
240:100EF000AFEE29E286B44879830260058219EEAF2D
241:100F000009C90A2992A36890078F2009FF0C65F58B
242:100F10006E2FE28564F56865559628221D7B810554
243:100F2000D9B060000200C0908B9417EEA50B881416
244:100F300087740B0B47A87718EEA309BB100877023C
245:100F400097F018EEA117EEA208A8010B8802074738
246:100F5000021BEE9E97F10B880298F22790232B90AC
247:100F60002204781006BB1007471208BB0228902104
248:100F70000777100C88100788020B880217EE968BF3
249:100F80003307BB0187340B880298F3979997F48B4A
250:100F90009587399BF588968B3898F688979BF897B4
251:100FA000F998F717EE8D28E28507C7082D74CF084A
252:100FB000480B28E68565550F2B221E28221D7B89AC
253:100FC000022B0A0064BF052CB00728B000DA200607
254:100FD000880A28824CC0D10B8000DBA065AFE76394
255:100FE000FEEA0000292072659E946004E72A2072C0
256:100FF00065AEBF6004DE00002EB0032C2067D4E095
257:1010000065C1058A328C330AFF500C4554BC5564C7
258:10101000F4EB19EE72882A09A90109880C64821F71
259:10102000C0926000DD2ED0032A2067D4E065A0D8EE
260:101030008A328B330AFC500B4554BC5564C4BE192C
261:10104000EE67882A09A9017989D50BEA5064A4E3DF
262:101050000CEE11C0F02F16132E16168AE78CE82A14
263:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
264:101070006583468837DBC0AE89991E789B022BCCEE
265:10108000012B161B29120E2B0A0029161A7FC307E3
266:101090007FC9027EAB01C0B165B49D8B352F0A00BC
267:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
268:1010B00012162B1619005104C0C100CC1A2CCCFFFB
269:1010C0002C16170CFC132C16182B121A2A121BDCC8
270:1010D000505819B6C0D0C0902E5CF42C12172812AC
271:1010E000182F121B2A121A08FF010CAA01883407B4
272:1010F0004C0AAB8B2812192BC6162F86082A860994
273:101100002E74102924672E70038975B1EA2A74039E
274:10111000B09909490C659DB42B20672D250265B354
275:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
276:10113000B00728B000DA2006880A28824CC0D10BFC
277:101140008000DBA065AFE763FD8289BAB199659045
278:101150009788341CEE2398BA8F331EEE1C0F4F5421
279:101160002FB42C8D2A8A320EDD020CAC017DC966AB
280:101170000A49516F92608A3375A65B2CB0130AED51
281:10118000510DCD010D0D410C0C417DC9492EB01200
282:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
283:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
284:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
285:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
286:1011D000EB01C0B164B161C091292467C020D10F77
287:1011E00000008ADAB1AA64A0C02C20672D25026510
288:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
289:1012000065D28A0A4E516FE202600281C0902924A1
290:1012100067090F4765F2F828221D7B89022B0A0017
291:1012200064BCA92CB00728B000DA2006880A2882FE
292:101230004CC0D10B8000DBA065AFE763FC8E0000E3
293:101240000CE9506492ED0CEF11C080281611AFBF6D
294:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
295:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
296:1012700089991C789B022CEC012C161B29120C2C32
297:101280000A0029161A7AE3077AE9027FBB01C0C176
298:1012900065C2A58B352C0A002A0A007AE30564E1B1
299:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
300:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
301:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
302:1012D000920263FF018A330AAB5064BEF92CD0132B
303:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
304:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
305:101300008A362FD2097CA3077AC9027EFB01C0B1BD
306:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
307:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
308:10133000027DEB01C0C165CE9DC090292467C0200D
309:10134000D10F88378C3698140CE90C29161408F83C
310:101350000C981D78FB07281214B088281614891DD4
311:101360009F159B16C0F02B121429161A2B161B8BD7
312:10137000147AE30B7AE90688158E1678EB01C0F132
313:1013800065F1BA29121A2F12118A352E121B9A1AD8
314:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
315:1013A000881AC0F098107AE30A7EA9052A12017AF9
316:1013B0008B01C0F164F08160018389368B37991706
317:1013C0000BE80C981F09C90C29161578EB07281291
318:1013D00015B088281615D9C09A199E188A1F2E1282
319:1013E000152A161A2E161BDAC0C0E08C177F930B35
320:1013F0007FA90688188F1978FB01C0E165E13E29B5
321:10140000121A2F12138A352E121B9A1BAFEE2F12AF
322:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
323:1014200098127AE30A7EA9052A12037A8B01C0F189
324:1014300065F10A2E12162E16192A121B005104C02D
325:10144000E100EE1AB0EE2E16170EFF132F16180F2E
326:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
327:10146000AA2A161B2C161A63FC5E00007FB30263C7
328:10147000FE3163FE2B7EB30263FC3063FC2A000066
329:101480006450C0DA20DBC058168AC020D10FC0914A
330:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
331:1014A0000A80C09A2924682C7007581575D2A0D1DB
332:1014B0000F03470B18ED4DDB70A8287873022B7DC6
333:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
334:1014D00063FAE4000029221D2D25027B9901C0B08A
335:1014E000C9B62CB00728B000DA2006880A28824C3A
336:1014F000C0D10B8000DBA065AFE7C020D10FC09149
337:1015000063FBFF00022A0258024C0AA202060000F6
338:10151000022A025802490AA202060000DB70DA2001
339:10152000C0D12E0A80C09E2924682C7007581554FB
340:10153000C020D10FC09463FBC9C09663FBC4C096A2
341:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
342:10155000C2A02AB4002C200C63FF27008D358CB765
343:101560007DCB0263FDD263FC6D8F358ED77FEB029E
344:1015700063FDC563FC6000006C1004C020D10F0047
345:101580006C1004C020D10F006C10042B221E2822E6
346:101590001DC0A0C0942924062A25027B8901DBA056
347:1015A000C9B913ED04DA2028B0002CB00703880A6B
348:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
349:1015C0000F0000006C10042C20062A210268C805B8
350:1015D00028CCF965812E0A094C6591048F30C1B879
351:1015E0000F8F147FB00528212365812716ECF3297E
352:1015F000629E6F98026000F819ECEF2992266890BD
353:10160000078A2009AA0C65A0E72A629D64A0E12B45
354:10161000200C0CB911A6992D92866FD9026000DBBF
355:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
356:101630000C65E0C7279285C0E06470BF1DECEC68C4
357:10164000434E1CECEB8A2B0CAA029A708920089955
358:10165000110D99029971882A98748F329F752821EB
359:1016600004088811987718ECDC0CBF11A6FF2DF246
360:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
361:1016800074DB40580E81D2A0D10FC020D10F0000D2
362:101690000029CCF96490B12C20668931B1CC0C0CB6
363:1016A000472C24666EC60260008509F85065807F6D
364:1016B0001CECD18A2B0F08400B881008AA020CAA38
365:1016C000029A7089200899110D99029971883398AE
366:1016D000738C329C728A2A9A748934997563FF7D5F
367:1016E00000CC57DA20DB30DC4058155FC020D10F2A
368:1016F00000DA20C0B65815EE63FFE500DA20581571
369:10170000EC63FFDC00DA20DB30DC40DD5058167A79
370:10171000D2A0D10FC858DA20DB305814C72A2102D2
371:1017200065AFBDC09409A90229250263FFB200007C
372:101730002B21045814731DECADC0E02E24668F30AD
373:101740002B200C0F8F1463FF66292138C088798302
374:101750001F8C310CFC5064CF562B2104C0C0581490
375:10176000681DECA2C0E08F302B200C0F8F1463FF9C
376:101770003E2C20662B2104B1CC0C0C472C2466583F
377:1017800014601DEC9AC0E02E24668F302B200C0FC5
378:101790008F1463FF1A0000006C1004C0B7C0A116BC
379:1017A000EC9615EC88D720D840B822C04005350209
380:1017B0009671957002A438040442C94B1AEC7B1947
381:1017C000EC7C29A67EC140D30F6D4A0500808800BD
382:1017D000208C220A88A272D10FC05008A53875B09B
383:1017E000E363FFD76C10069313941129200665520A
384:1017F00088C0716898052A9CF965A29816EC6F2933
385:1018000021028A1309094C6590CD8AA00A6A512ADF
386:10181000ACFD65A0C2CC5FDB30DA208C115815120C
387:10182000C0519A13C7BF9BA98E132EE20968E060CE
388:101830002F629E1DEC606FF8026000842DD2266836
389:10184000D0052F22007DF9782C629DC79064C0706E
390:101850009C108A132B200C2AA0200CBD11A6DD0A97
391:101860004F14BFA809880129D286AF88288C09792E
392:101870008B591FEC520FBF0A2FF2A368F0052822E4
393:10188000007F894729D285D4906590756000430018
394:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
395:1018A000BF0A6E96102FF2A368F00488207F890586
396:1018B00029D285659165DA2058157DC95C6001FFE4
397:1018C00000DA20C0B658157A60000C00C09063FFA3
398:1018D000B50000DA205815766551E48D138C11DBC4
399:1018E000D08DD0022A020D6D515813E39A1364A1D2
400:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
401:10190000FD00C091C0F12820062C2066288CF9A784
402:10191000CC0C0C472C24666FC6098D138DD170DE5C
403:1019200002290A00099D02648159C9D38A102B211A
404:10193000045813F38A13C0B02B24662EA2092AA0E0
405:10194000200E28141CEC298D1315EC1DC1700A778C
406:101950003685562DDC28AC2C9C12DED0A8557CD3C5
407:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
408:10197000D4A028200CB455C0D02B0A882F0A800C84
409:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
410:1019900012DEF0AC882D84CF28120229120378F3CE
411:1019A000022EFDF8289020D3E007880CC1700808AB
412:1019B00047289420087736657FAB891313EC10898C
413:1019C00090C0F47797491BEC0EC1CA2821048513F7
414:1019D000099E4006EE11875304881185520E880235
415:1019E0000C88029BA09FA18F2B9DA598A497A795DB
416:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
417:101A00001106CC082BC2852DE4CF2BBC202BC6851C
418:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
419:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
420:101A300088201EEBE38F138EE48FF40888110A8848
421:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
422:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
423:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
424:101A70002F24722C2502C020D10F871387700707EF
425:101A80004763FD6E282138C099798B0263FE9ADD89
426:101A9000F063FE9500DA20DB308C11DD505815968E
427:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
428:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
429:101AC000C020D10F6C1006292102C0D07597102AB2
430:101AD00032047FA70A8B357FBF052D25020DD90261
431:101AE000090C4C65C18216EBB41EEBB228629EC095
432:101AF000FA78F30260018829E2266890078A2009B3
433:101B0000AA0C65A17A2A629DDFA064A1772B200C24
434:101B10000CBC11A6CC29C286C08C79830260015707
435:101B200019EBA709B90A2992A368900788200988A8
436:101B30000C65814327C2851CEBA964713A89310980
437:101B40008B140CBB016FB11D2C20669F10B1CC0C07
438:101B50000C472C24666EC60260014009FF5065F1F7
439:101B60003A8A102AAC188934C0C47F973C18EBA974
440:101B70001BEBA88F359C719B708B209D7408BB025A
441:101B80009B72C08298751BEBA40F08409B730F8853
442:101B90001198777FF70B2F2102284A0008FF022FA8
443:101BA0002502C0B4600004000000C0B07E97048F1E
444:101BB000362F25227D970488372825217C9736C02B
445:101BC000F1C0900AF9382F3C200909426490861927
446:101BD000EB7618EB7728967E00F08800A08C00F05A
447:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
448:101BF0002AAC182A669D89307797388F338A321835
449:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
450:101C10008498E1882B9DE59AE69FE71AEB78099F67
451:101C20004006FF110FCC020A880298E2C1FC0FCCDB
452:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
453:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
454:101C5000CF5CC020D10FC081C0900F8938C0877978
455:101C6000880263FF7263FF6600CC57DA20DB30DC4A
456:101C7000405813FDC020D10FDA2058148D63FFE8BF
457:101C8000C0A063FE82DA20C0B658148963FFD90071
458:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
459:101CA000045813171EEB44C0D02D246663FEB10008
460:101CB0006C1006D62019EB3F1EEB4128610217EB92
461:101CC0003E08084C65805F8A300A6A5169A3572B29
462:101CD000729E6EB83F2A922668A0048C607AC9343E
463:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
464:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
465:101D00002C160068E0052F62007EF91522D285CFDF
466:101D10002560000D00DA60C0B6581465C85A60012D
467:101D20000F00DA60581462655106DC40DB308D30FC
468:101D3000DA600D6D515812D0D3A064A0F384A1C015
469:101D40005104044763FF6D00C0B02C60668931B157
470:101D5000CC0C0C472C64666FC60270960A2B61048B
471:101D60005812E7C0B02B64666550B42A3C10C0E737
472:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
473:101D8000EB0A18EB0B28967E8D106DDA0500A08803
474:101D900000C08CC0A089301DEB1A77975388328C15
475:101DA000108F3302CE0BC02492E12261049DE00427
476:101DB00022118D6B9BE59FE798E61FEB1009984079
477:101DC0000688110822020FDD02C18D9DE208220261
478:101DD00092E4B4C22E600C1FEB000CE811A7882C13
479:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
480:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
481:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
482:101E1000C0F00ADF387FE80263FF6C63FF600000F8
483:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
484:101E300063C020D10F0000006C10042920062A2264
485:101E40001EC0392C221D232468C0307AC107DDA0B2
486:101E5000600004000000C0D06E9738C08F2E0A804A
487:101E60002B2014C0962924060EBB022E21022B24FF
488:101E7000147E8004232502DE307AC10EC8ABDBD08D
489:101E8000DA202C0A00580B062E21020E0F4CC8FE39
490:101E90006000690068956528210208084C65805C2F
491:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
492:101EB0002668B0048C207BC95329A29D1FEAC16407
493:101EC000904A9390C0C31DEAD52B21049D9608BB70
494:101ED000110CBB029B979B911CEAD2C08523E4A204
495:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
496:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
497:101F00000F8EF912EAC82E2689C020D10FDA20C020
498:101F1000B65813E7C020D10F6C10062A2006941083
499:101F200068A80528ACF965825029210209094C6589
500:101F3000920ACC5FDB30DA208C1058134BC051D39F
501:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
502:101F50008F3A16EA99B1FB64B13128629E6F88020C
503:101F60006001ED294C332992266890078A2009AA3E
504:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
505:101F8000B7110677082972867983026001CD0CB9F2
506:101F90000A2992A36890082C220009CC0C65C1BBC9
507:101FA0002772856471B5282006288CF96481E52C98
508:101FB00020668931B1CC0C0C472C24666EC60260B9
509:101FC00001A109F85065819B2A21048CE488361E02
510:101FD000EA7D088914A9CC08084709881019EA92F3
511:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
512:101FF000CC029C722E302C293013283012049910F8
513:102000000688100CEE109F740EAE0209880208EECE
514:10201000029E738C3704AA119C758938C0F4997696
515:102020008839C0C1987718EA828E359C7B9E780EDD
516:102030008E1408EE029E7A8E301CEA7177E73088A3
517:102040003289339C7C9F7D0E9C4006CC118F2B29BE
518:1020500076132D76112876120CAA0218EA68C1C9E7
519:102060000CAA022A761008FF029F7EC0AA60000117
520:10207000C0A6A4BC0CB911A6992892852DC4CF087E
521:10208000A80B289685655100C020D10F2B200C0C81
522:10209000B7110677082A72860CB90A6FA902600187
523:1020A000182992A36890082A220009AA0C65A109A0
524:1020B0002A728564A1032C203D0C2C4064C08C8CBA
525:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
526:1020D0001464C0777CF374283013C0FC78F86CC0AB
527:1020E00090292467090C4765C0D719EA4718EA45C3
528:1020F0008F208C3508FF110C8C1408FF0288E49F98
529:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
530:102110008809880298A218EA3DA4BC2F72852DC4B4
531:10212000CF2FFC102F76852F210229207208FF0265
532:10213000B2992924722F2502C020D10F00CC57DA82
533:1021400020DB308C105812C8C020D10FC09163FF23
534:102150008FDA20C0B658135663FFE100DA20581317
535:102160005463FFD82B21045811E61EEA152B200CCE
536:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
537:10218000DD505813DDD2A0D10F2A2C748B10580BC0
538:10219000BED2A0D10F292138C08879832E8C310C72
539:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
540:1021B000EA048F3A2B200C63FE0DDA2058133C639F
541:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
542:1021D0002104B1CC0C0C472C24665811C91EE9F817
543:1021E0002B200CC0D02D24668F3A63FDDA0000004E
544:1021F0006C10089514C061C1B0D9402A203DC04080
545:102200000BAA010A64382A200629160568A8052C9D
546:10221000ACF965C33F1DE9EA6440052F120464F27E
547:10222000A02621021EE9E606064C6562E615E9E2F3
548:102230006440D98A352930039A130A990C6490CCEA
549:102240002C200C8B139C100CCC11A5CC9C112CC2F7
550:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
551:10226000A368E0098620D30F0E660C6562C2881150
552:102270002882856482BA891364905EDA80D9308CB2
553:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
554:102290007FB718B88A293C10853608C6110E660229
555:1022A0009681058514A5D50F550295800418146DE7
556:1022B0008927889608CB110888140EBB02A8D82954
557:1022C0009C200F88029BA198A088929BA308881449
558:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
559:1022E000131EE9BD86118D10286285AEDD08FF0B37
560:1022F0002CD4CF2821022F66858B352A207209889D
561:1023000002ABAA2825022A2472C020D10F29529E8E
562:1023100018E9A96F980260020B28822668800829B4
563:10232000220008990C6591FC2A529DC1CE9A126434
564:10233000A1F22B200C2620060CB8110588082D824E
565:10234000860EBE0A7DC3026002052EE2A368E00885
566:102350002F22000EFF0C65F1F6288285D780DE80E3
567:102360006482009816266CF96462012C206688311C
568:102370002CCC010C0C472C24666EC6026001BC08F4
569:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
570:10239000048B2D2830102F211D0C88100BFB090AEF
571:1023A00088020988020CBB026441529B709D71989F
572:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
573:1023C0007FD714273C10BCE92632168C3996E69C40
574:1023D000E78A37B4382AE6080B131464304A2A8295
575:1023E0001686799A9696978C778A7D9C982B821779
576:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
577:10240000086DB9218BC996A52692162AAC18B899E1
578:102410009BA196A08BC786CD9BA22B921596A49BC1
579:10242000A386CB2CCC2026A605C0346BD4200D3B34
580:102430000C0DD8090E880A7FB705C0909988BC8812
581:10244000C0900B1A126DAA069988998B288C18C017
582:10245000D01BE97A1CE97916E96EB1FF2A211C2309
583:10246000E6130F0F4F26E6122F251D7FA906C0F099
584:10247000C08028251D05F6111AE9678F202BE61567
585:102480002CE6162DE61726E6180AFA022AE6142983
586:102490002006299CF96490F829200C8D14C0801A1C
587:1024A000E94E0C9C11AA99A5CCDA202BC285289460
588:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
589:1024C000D10F8A356FA546D8308BD56DA90C8A8679
590:1024D0000A8A14CBA77AB335288C10C080282467C9
591:1024E000080B4765B10BDA20DB302C12055811DEE2
592:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
593:102500006461059B709D719872C04D63FEA4C0818B
594:1025100063FFC9008814CC87DA20DB308C15581192
595:10252000D2C020D10FDA20C0B658126163FFE40098
596:1025300000DA208B1058125E63FFD8009E178A12B3
597:102540002B21045810EF8E17C09029246663FE34A7
598:10255000C08063FE06DA20DB308C15DD505812E6B1
599:10256000D2A0D10FDA2058125263FFA7002B2138D6
600:10257000C0A87BAB026001048C310CFC5064CE041B
601:102580008A122B2104C0C098175810DD8E1763FDE6
602:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
603:1025A00028206A7F87050826416460A3C09016E949
604:1025B000141CE9232A200723E61BB1AA0CFD0226DE
605:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
606:1025D0001C8B260A0A472BE6208B282AE53E2BE691
607:1025E000212924072820062A2064688346B44463EE
608:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
609:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
610:1026100032162B76129D712D761328761489960A20
611:102620002A14AA990C9902997069ED71C14663FD4B
612:102630008100000064AFB51DE8E22C20168DD20A9F
613:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
614:102650002B21046EB81E2C2066B8CC0C0C472C2401
615:1026600066C9C09E178A125810A68E17C0348F20D4
616:10267000C0D02D2466C06826240663FF2E8A122B44
617:1026800021042C20669817B1CC0C0C472C246658DA
618:10269000109C8E178716C0D02D246663FCE68D35FE
619:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
620:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
621:1026C0001027D63006460127D6320A440117E8DF24
622:1026D00024D631A74727D63324F21596B794B68D62
623:1026E000C3BCBB9DB58D35299C107D83C22F211D98
624:1026F000C14663FD330000006C1006292006289CAB
625:10270000F86582BF2921022B200C09094C6590E154
626:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
627:102720000260028C19E8A609B90A2992A3689007E9
628:102730008C2009CC0C65C27829A2856492722D6226
629:102740009E1AE89C6FD80260026E2AA22629160102
630:1027500068A0082B22000ABB0C65B25C29629DC1EF
631:102760008C6492542A21200A806099102C203CC746
632:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
633:10278000260DBD112DDC1C0D0D410EDD038E27B174
634:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
635:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
636:1027B000A16000093E01073EB1780987390B770A0D
637:1027C00077EB0260020A2C2123282121B1CC0C0CCA
638:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
639:1027E000DB30581095292102CC96C0E80E9E022EAF
640:1027F0002502CC57DA20DB30DC4058111BC020D139
641:102800000F2C20668931B1CC0C0C472C24666EC687
642:10281000026001D309FD5065D1CD2F0A012E301180
643:1028200029221464E01128221B090C4400C1040071
644:10283000FA1A0A880228261B2E3010C0A0C0B094B5
645:102840001295131CE85F88302CC022088D147787FE
646:1028500004C0F10CFA38C041C0F225203CC0840805
647:1028600058010F5F010F4B3805354007BB10C0F012
648:10287000084F3808FF100FBB0228ECFEC0F0084FCD
649:1028800038842B0BA8100AFF102A21200F88020B76
650:10289000880208440218E86E8F1108440228212596
651:1028A0000A2A140828140488110A88022A21049488
652:1028B000F08B2004E41008BB1104BB02C04A04BB27
653:1028C000029BF1842A08AB110BEB0294F40A541119
654:1028D0000B44020555100D1B4094F707BB100B5518
655:1028E00002085502C08195F68433C05094F3B19428
656:1028F0008B3295F898F99BF2C080C1BC24261499BC
657:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
658:10291000FE883998FF853525F6108436851324F610
659:10292000118B3784122BF612C0B064C07E893077C9
660:1029300097438D3288332E30108F111CE83109995E
661:10294000400699112CF614C0C42CF6158C2B2DF6CC
662:102950001A28F61B2BF61904A81109880208EE02A2
663:1029600019E827C18008EE0209C90229F6162EF6D9
664:1029700018C09E600001C09A2F200C18E8170CFEAA
665:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
666:1029900085C87F8A268929A7AA9A260A990C090937
667:1029A00048292525655050C020D10F00C09A63FFEB
668:1029B000C6DA2058113F63FE38DA20C0B658113C01
669:1029C00063FE2E0068973C2B9CFD64BE24C020D182
670:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
671:1029E000DC3865CDE063FE098A102B2104580FC442
672:1029F000C0B02B246663FE21DB402A2C745809A248
673:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
674:102A100020D10F006C1004290A801EE80E1FE80E5A
675:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
676:102A3000029ED19CD0C051C07013E80A14E8091856
677:102A4000E8072AB285A82804240A234691A986B853
678:102A5000AA2AB685A98827849F25649FD10F0000E4
679:102A60006C100AD630283010292006288CF9648290
680:102A70009B68980B2A9CF965A1B2022A02580FABF9
681:102A800089371BE7CFC89164520E2A21020A0C4CE9
682:102A900065C2588D3019E7C874D7052E212365E229
683:102AA0009E2F929E1AE7C46FF8026002532AA22654
684:102AB00068A0082C22000ACC0C65C2442A929D64AE
685:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
686:102AD00018E7BC64B0052880217B8B432B200C18A1
687:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
688:102AF0002EE2A368E0052F22007EF9372CC2859CC8
689:102B00001864C2332B212F87660B7B360B790C6F31
690:102B10009D266ED2462C203D7BC740CE5560001EC0
691:102B20002A200CC1B28C205811229A1864A2458D1B
692:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
693:102B4000E06000022E60030EDB0C6EB20EDC700C37
694:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
695:102B6000C1C82D21205810BC8C268B279A160CBB6F
696:102B70000C7AB3348F18896399F3886298F28E6562
697:102B80009EF82D60108A189D1768D729C0D09DA97E
698:102B90002C22182B22139CAB9BAA97A58E667E73C2
699:102BA00002600097CF5860001FDA208B1658108201
700:102BB00065A13863FFBDC081C0908F18C0A29AF98B
701:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
702:102BD0001026C051D6A0C0C02BA0102CA4039B1758
703:102BE0002C1208022A02066B02DF702D60038E177A
704:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
705:102C0000188C148B16ACAC2C64038A268929ABAAC9
706:102C10000A990C9A26886609094829252507880CEF
707:102C200098662F2218A7FF2F261863FE96DA20DB5E
708:102C300030DC40DD50581130D2A0D10FC0302C20F4
709:102C4000668961B1CC0C0C472C24666EC60260000C
710:102C5000D2C03009FD5065D0CA8E6764E0696470E7
711:102C600066DB608C18DF70DA202D60038E170CDDB8
712:102C7000119E10AD6D2DDC201EE7755800F923263E
713:102C800018DA208B16DC402F2213DD50B1FF2F26DF
714:102C900013580FC5D2A0D10F0028203D0848406529
715:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
716:102CB00014CACF7CD32D2AAC10C090292467090DEB
717:102CC0004764DDC5600092002C1208066B022D6C73
718:102CD00020077F028E17DA209E101EE75C58007DC9
719:102CE00063FF9A00C09163FFD1000000655081DA54
720:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
721:102D00000FDA20C0B658106A63FFE000006F95022A
722:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
723:102D2000D2A0D10F8A152B2104580EF52324662832
724:102D30006010981763FF2100DA2058105D63FFAB25
725:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
726:102D50009409A90229250263FF91DB30DC40DD5094
727:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
728:102D70000FC020D10FDA202B200C58107263FF6B8C
729:102D80006C1004282006C062288CF8658125C0508C
730:102D9000C7DF2B221BC0E12A206B29212300A104BD
731:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
732:102DB0002A246B04E4390DCC030CBB012B261B64C5
733:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
734:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
735:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
736:102DF000E71E64B09B8C2B2421040DCC029CB08870
737:102E000020C0C50888110C880298B1882A0844118E
738:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
739:102E20000E9E0825E4CF2DDC282DA6852921020938
740:102E3000094C68941A689820C9402A210265A00BA1
741:102E40002A221E2B221D7AB10265A079C020D10F43
742:102E50002C212365CFDE6000082E21212D21237E29
743:102E6000DBD52B221E2F221D2525027BF901C0B0A8
744:102E700064BFC413E6D02CB00728B000DA20038862
745:102E80000A28824CC0D10B8000DBA065AFE763FF4E
746:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
747:102EA000A08B2008BB1106BB029BA1893499A263A9
748:102EB000FF790000262468DA20DB30DC40DD505842
749:102EC000108ED2A0D10FDA202B200C580FF9C02081
750:102ED000D10F00006C1006073D14C080DC30DB40D1
751:102EE000DA20C047C02123BC3003283808084277C5
752:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
753:102F0000D30F6DDA0500508800308CC0E0C020255A
754:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
755:102F2000220F8940941077F704C081048238C0F1E1
756:102F30000B2810C044C02204540104FD3802520181
757:102F400002FE3808DD10821C07EE100E6E020EDD48
758:102F500002242CFEC0E004FE380AEE100E88020D9A
759:102F600088028DAB1EE69B08D8020E880298B0C07E
760:102F7000E80428100E5E0184A025A125084411084C
761:102F80004402052514045511043402C0810E8E3903
762:102F900094B18FAA84109FB475660C26A11FC0F24D
763:102FA000062614600009000026A120C0F20626149F
764:102FB0000565020F770107873905E61007781008C5
765:102FC000660206550295B625A1040AE611085811B5
766:102FD00008280208660296B7C060644056649053A1
767:102FE000067E11C0F489C288C30B340B96459847FE
768:102FF000994618E6829F410459110E99021FE680F6
769:10300000020E4708D80298420E99029F40C1E00E76
770:10301000990299442FA00CB4380CF91114E66F1ED4
771:10302000E666A4FFAE992E928526F4CF0E880B2873
772:103030009685D10F2BA00C1FE6601CE6670CBE1115
773:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
774:10305000D10FC08005283878480263FEA263FE962F
775:103060006C1006C0C06570F18830C03008871477D6
776:103070008712C0B0C0A619E652299022C030CC9762
777:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
778:103090008225203C0B3F109712831CC070085801FA
779:1030A0000D5D01089738C0800B98380777100488A9
780:1030B00010086802087702C0800D98382D3CFE0881
781:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
782:1030D0000CB8100FDD02053B400EDD029D4089203B
783:1030E000043D100899110D99022D210409A9020827
784:1030F000DD119941872A05B9100D3D020ABB110D5A
785:10310000BB02087702974428212587120828140457
786:103110008811071E4007EE100E99027566092621D8
787:103120001F062614600006002621200626140868C3
788:10313000029B47098802984629200CD2C0C0800C07
789:103140009E111BE6251FE61CAB99AFEE2DE28528EC
790:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
791:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
792:10317000981008770C9FD898D989538F5299119934
793:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
794:1031900088108D1108E70C9751AD8DD7F078DB01C1
795:1031A000B1F79D5397528830C03008871408884083
796:1031B000648ED565BEC963FEBC0000006C1004D7E8
797:1031C00020B03A8820C0308221CAA0742B1E2972F8
798:1031D000046D080FC980C9918575B133A2527A3B3D
799:1031E0000B742B0863FFE900649FECD10FD240D130
800:1031F0000F0000006C100AD6302E3027D950DA406C
801:1032000015E5F02430269A1529160464E00264932B
802:10321000732920062A9CF865A3CE2A2102270A04D6
803:103220000A0B4C65B3978C3074C7052D212365D4E8
804:10323000A0C0A62B0A032C2200580F3664A3B9178E
805:10324000E5DE8E389A1664E3BA2F6027285021C92C
806:10325000F37E8311C2B08C202A200C580F55D7A0C2
807:10326000CDA16004A200C2B08C202A200C580F29E6
808:10327000D7A064A4862F212E8B680FBF360FB90C00
809:103280006F9D54296027D5B06E920528203D7B8F15
810:103290004CDA20DB50C1C42D211F580EEF8B269A2B
811:1032A000189A1989272AAC380B990C7A9353896399
812:1032B000C08099738F6298789F728E659E798D67B2
813:1032C0009D7B8C6695759C7A8E687E53026000B1FA
814:1032D0008B1465B050600038DBF063FFA5008A14E2
815:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
816:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
817:10330000FFE2DA208B18580EAC65A2B163FF9E0075
818:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
819:103320002D16042CA403DC70DA20DB60DF502D6046
820:1033300003C0E09E109D171EE5B90CDD110D6D0850
821:103340002DDC285BFF478E668F678817AF5FA8A8C4
822:1033500028640375FB01B1EE8A189E669F67892673
823:103360008829AA9909880C99268E6808084805EECC
824:103370000C28252515E5939E6865EECC63FEE600D6
825:103380000000C9432F21232B21212FFC010F0F4FB8
826:103390002F25237FBB026003142C20668961B1CCEA
827:1033A0000C0C472C24666EC60260022809FD50658D
828:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
829:1033C00020DB601EE5AB2D6003C08098100CDD1182
830:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
831:1033E0008C202A200C580ECB0AA70265A00FC0B073
832:1033F0002C22002A200C580EC7D7A064AFEFDA2089
833:10340000C1BCC1C82D21208F188E268929AFEE9E00
834:10341000260E990C090948292525580E8FC090C001
835:1034200050C0C288609A191EE566C0A12EE022082D
836:103430008F14778704C0810E8938C0800B93102DBC
837:10344000203C2921200CDC0104DB010929140BA8F4
838:10345000380CA5380D3D401CE57E8B2B08881007E5
839:1034600055100855020533022821250F154003BBCE
840:10347000020CBB0207551005D3100828140ADD11F1
841:103480000488110988020533022921040833029BAC
842:1034900070C0808A201BE57708AA110BAA029A71D6
843:1034A000C0A1852A9376957408931103DD020ADD85
844:1034B000029D778C63C1DC9C738B6298789A799BB0
845:1034C00072232214C0C0B1352526149C7B9D7593B0
846:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
847:1034E00025621B957F2362172376102D62182D7697
848:1034F000112C62192C761264E0B98E6077E73DC01A
849:10350000FE13E53E1DE53FC1818A628B6304951180
850:103510000E9C4006CC110C5502247615085502C0AD
851:10352000802D76148D2B2B761B2A761A287619255A
852:10353000761803DD022D76166000030000C0FA2E17
853:10354000200C19E52518E51CA9E90CEE11A8EEC020
854:10355000802DE2852894CF0DFD0B2DE685DA208B9A
855:10356000198C158D14580D90D2A0D10FDC70DF503E
856:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
857:103580005563FE53002B203D0B4B4065BC826FE51D
858:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
859:1035A000F3162AAC10C090292467090F4764FC6009
860:1035B00060015F00C0FA63FF85C09163FFE8881473
861:1035C000658168DA20DB608C15580DA7C020C0909B
862:1035D00029A403D10F8A162B2104580CC9C0A02A94
863:1035E00024668E6863FDCA00002B9CF965B0FDDA85
864:1035F00020580CCE63FC220000DA20C0B6580E2CF6
865:1036000063FFBA002B200C0CBE11A7EE2DE286C181
866:10361000C27DC30260011819E4E909B90A2992A31D
867:103620006890082A220009AA0C65A10326E2856495
868:1036300060FD2C20668931B1CC0C0C472C24666FC0
869:10364000C60270960C8A162B2104580CADC0D02DE2
870:1036500024668E3077E74D1CE4E91BE4E98F32885D
871:1036600033C0A42D21040E994006991104DD1109DF
872:10367000DD029A61C19009DD029B60C0908B2B9D99
873:10368000649F66986799650CBB029B6228200C1AA0
874:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
875:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
876:1036B0008B142C2523C8B7022A02066B02580CDE95
877:1036C0002A210265AEF7C0D80DAD022D250263FE9A
878:1036D000EC008E14C8E8DA20DB30580CD72A21021F
879:1036E00065AEDA07AF022F250263FED100DA20DBD8
880:1036F000308C158D14580E80D2A0D10FDA202B20DB
881:103700000C580DEB63FEB600DA202B200C580E0D82
882:1037100063FEAADA20DB308C152D12042E0A8028D5
883:103720000A00282468580CD663FAE500C020D10F9F
884:10373000DA20580DDF8914CD92DA20DB308C155851
885:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
886:103750000F2A2C748B1558064CD2A0D10F000000F4
887:103760006C100E28210224160108084C6583A91F3D
888:10377000E49229F29E6F98026003AD1EE48E29E266
889:10378000266890082A220009AA0C65A39B24F29DB2
890:103790006443952A31160A4B412B240BB4BB0B0B07
891:1037A000472B240C0CB611AF66286286C1CC78C3B7
892:1037B0000260037F19E48209B90A2992A36890077D
893:1037C0008C2009CC0C65C36B276285647365293135
894:1037D00009C0D02D24668C3599139C2A88369C14F8
895:1037E000982B8E3798159E169E2C8C38C0E10C5C59
896:1037F000149C179C2D88392925042E251D28251C4D
897:103800002C3028C0822C243C2930290C0C4708C8B5
898:103810000129243D29311598189912090841089960
899:103820000C299CEC29251F7EC725921C8212282A70
900:1038300000082060991B01023E00093EB128098260
901:1038400039891B0E221102990C821C29251F821C0A
902:10385000941D951E24211F15E4880451609A10C1FF
903:10386000802B1610252014961F05054301063E00E7
904:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
905:10388000441C893D2E26132E26142E26152E246B1D
906:1038900025241406D61CC05025261825261B2524B1
907:1038A000672524682832112525232525242525254B
908:1038B00025252C2925222D25202B252124252E26A2
909:1038C000252F14E46F16E46D1BE45298192D211C6A
910:1038D000C08498719B70892095759577957F967CAB
911:1038E000967E98799B7894731BE46714E4680C388F
912:1038F000400288100C064015E464016610947D9B1C
913:1039000074841D1BE444086602957B18E431851E0F
914:103910000B99029972997A0866022B121096768694
915:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
916:1039300000A10400E81A7D8B0263FFEE891AC0E043
917:10394000961F1DE43E2B1610951E941D28203D2920
918:10395000761A297612C040C051C0B22D76130806DF
919:10396000408D170B8801065E380AEE101BE44A08EA
920:103970005438B0A609661188140B44102B761B042A
921:10398000EE028B1614E44308DA1406EE020D8810DA
922:103990002A761E86131AE41C04EE020D66110866D0
923:1039A000022E76160D14141EE41A0D44110BD814B1
924:1039B0000866020A44022E76182E76102476172600
925:1039C000761FC084287619287611C76F0C24400F03
926:1039D00044111CE3FB26761D26761C2676152676DA
927:1039E000148A262676242676252976222E762028E5
928:1039F00076218E1888150DB91016E4278BC70D880F
929:103A0000110E5E39ADBB851904EE022676230988B6
930:103A100002861F89102876260A04480544110505E8
931:103A2000480E551105440204EE02851E841D2E76B3
932:103A3000272820069B2D29246A2E31172B12102EA1
933:103A40002538CC83C0D02D2407C0D7090840648016
934:103A50008E9A290928416480AA64E0B42D2406C006
935:103A60009809E9362D0AA02A628501C404ADAA2D61
936:103A700021042A668508DD11883F8E3E2732100812
937:103A8000EA1800C40408E8180088110ECE5308771D
938:103A900002C08308DD029D4118E401090D4E9840E3
939:103AA00088209A4397449D4517E3FE1DE3CB058884
940:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
941:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
942:103AD00097CA28A4A268711C655060C020D10F004D
943:103AE0002D2406C080C09809E9360E893863FF731B
944:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
945:103B0000D600000065EF54C098C0D82D240663FF8E
946:103B1000522D2406C09063FF4ACC57DA20DB308C4C
947:103B200011580C51C020D10F00DA20C0B6580CE05B
948:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
949:103B400011580551D2A0D10F6C10062820068A33D7
950:103B50006F8202600161C05013E39729210216E3CE
951:103B600096699204252502D9502C20159A2814E331
952:103B7000948F2627200B0AFE0C0477092B712064F2
953:103B8000E1398E428D436FBC0260016F00E104B0E9
954:103B9000C800881A08A80808D80298272B200668A9
955:103BA000B32ECE972B221E2C221D0111027BC901A0
956:103BB000C0B064B0172CB00728B000DA2003880A20
957:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
958:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
959:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
960:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
961:103C0000007E894F29C2851EE37E6490461FE38CA7
962:103C10009E90C084989128200A95930F88029892CC
963:103C20008E200FEE029E942F200788262F950A984B
964:103C3000969A972E200625240768E3432921022A15
965:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
966:103C500063FF4E002E2065CBEDC082282465C9F697
967:103C600005E4310002002A62821BE36D2941020B48
968:103C7000AA022A668209E43129210263FF23000097
969:103C800064DFB88F422E201600F1040DEE0C00EE1A
970:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
971:103CA000B0293221283223B4992936217989A92BC8
972:103CB00032222B362163FFA0C020D10F9F2725245D
973:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
974:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
975:103CE000D0868E290EAE0C66E089C0F128205A28B5
976:103CF0008CFE08CF3865FEE863FF580000E00493AF
977:103D000010C0810AF30C038339C78F08D80308A8B1
978:103D10000108F80C080819A83303C80CA8B82875BE
979:103D200020030B472B24158310CBB700E104B0BC54
980:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
981:103D4000990209094F29250263FE50002D206A0DB2
982:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
983:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
984:103D7000FF79000028221F658E2763FF6E25240629
985:103D800029210263FE1B00006C10066571332B4C69
986:103D900018C0C7293C18C0A1C08009A8380808422B
987:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
988:103DB0000F6DAA0500B08800908C8940C0A00988CA
989:103DC000471FE32B080B47094C50090D5304DD1026
990:103DD000B4CC04CC100D5D029D310CBB029B30882D
991:103DE000438E2098350FEE029E328D26D850A6DDE8
992:103DF0009D268E40C0900E5E5064E0971CE3111E1D
993:103E0000E300038B0BC0F49FB19EB02D200A99B341
994:103E10000CDD029DB28F200CFF029FB48E262D2058
995:103E2000079EB68C282DB50A9CB72924072F20069B
996:103E30002B206469F339CBB61DE2E22320168DD224
997:103E40000B330C00D10400331AB48DA3C393292281
998:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
999:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
1000:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
1001:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
1002:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
1003:103EA000AFEEACBB22B28529E4CF02820B22B685ED
1004:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
1005:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
1006:103ED00018DD50580A9B8940C08063FEE3066E02DD
1007:103EE000022A02DB30DC40DD505800049A10DB501F
1008:103EF000DA70580465881063FEF700006C100692B3
1009:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
1010:103F10000BD9A07DA30229ADF875C302600084C04F
1011:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
1012:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
1013:103F4000D8F000808800708C879068B124B2227706
1014:103F5000D3278891C0D0CB879890279C1000708879
1015:103F600000F08C9D91CB6FC08108BB0375CB36638D
1016:103F7000FFB4B1222EEC1863FFD485920D770C8626
1017:103F8000939790A6D67D6B01B1559693959260005C
1018:103F900016B3CC2D9C188810D9D078D3C729DDF85A
1019:103FA00063FFC100C0238A421BE2C000CD322D4412
1020:103FB000029B3092318942854379A1051EE2BC0EF5
1021:103FC000550187121BE2AB897095350B9902993226
1022:103FD00088420A880C98428676A6A696768F44AFC9
1023:103FE000AF9F44D10F0000006C10089311D63088A9
1024:103FF00030C0910863510808470598389812282165
1025:1040000002293CFD08084C6581656591628A630A56
1026:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
1027:104020005ACCC42D0A022D245A7FE0026002158961
1028:104030002888261FE29F09880C65820F2E200B0F0F
1029:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
1030:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
1031:1040600080084837B88DB488981089601AE2557B6B
1032:1040700096218B622AA0219C147BA3179D132A20D2
1033:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
1034:104090006001C4002E200C1BE2480CEA110BAA0898
1035:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
1036:1040B000F0052822007F892C2BA28564B0AA876294
1037:1040C0008826DE700C7936097A0C6FAD1C8F279B21
1038:1040D0001508FF0C77F3197E7B729D139C149B15BA
1039:1040E000CF56600025C0B063FFD0D79063FFDD00DE
1040:1040F000009D139C14DA20DB70580B2F8B158C1449
1041:104100008D1365A06A8E6263FFCC00DA208B11DC10
1042:1041100040580AD5D6A08B15C051DE70DA20DC607D
1043:10412000DD405BFF768D138C14D9A02E200C1BE292
1044:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
1045:104140002EF4CF0B990B29A68563FF1D00DA20DC26
1046:1041500060DD40DE708912282007DF50A9882824FE
1047:10416000075BFF09D2A0D10F00DBE0DA20580B502B
1048:104170006550EF2A20140A3A4065A0EBDB60DC4072
1049:10418000DD30022A025809BCD6A064A0D584A183E0
1050:10419000A00404470305479512036351C05163FE11
1051:1041A0005C2C2006D30F28CCFD6480A568C704C012
1052:1041B000932924062C2006C0B18D641FE2019D279F
1053:1041C0009D289D298FF29D2600F10400BB1A00F066
1054:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
1055:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
1056:1041F000BB36C0E20B0B470EBB372B241618E1F978
1057:104200000A09450D0B422B240B29240AB4BE2E2487
1058:104210000C7D88572920162FCCFDB09D0A5C520DCD
1059:10422000CC362C246465FDEC0C0C4764CDE618E11B
1060:10423000E48E2888820C9F0C00810400FF1AAFEEE8
1061:104240009E2963FDCF1CE21163FE13001CE20B6389
1062:10425000FE0C8D6563FFA500DA202B200C580B396E
1063:10426000645F0FC020D10F00C020D10FC09329245C
1064:1042700016C09363FFA000006C1004C06017E1CD6E
1065:104280001DE1D0C3812931012A300829240A78A1EF
1066:1042900008C3B27BA172D260D10FC0C16550512654
1067:1042A00025022AD0202F200B290AFB2B20142E2098
1068:1042B0001526241509BB010DFF0928F1202B241414
1069:1042C000A8EE2EF52064A0A92B221E28221D011184
1070:1042D000027B8901DB6064B0172CB00728B000DADC
1071:1042E0002007880A28824CC0D10B8000DBA065AF74
1072:1042F000E7DB30DC40DD50DA205800DE29210209FE
1073:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
1074:10431000372ED02064E02D022A02033B02DC40DD70
1075:10432000505800D4D2A0D10F2B2014B0BB2B241492
1076:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
1077:1043400002D2A0D10FC020D10F2E200669E2C126D3
1078:1043500024062B221E2F221D29200B2820150D9903
1079:10436000092A9120262415AA882895207BF14960E6
1080:104370000048B0BB2B24140B0A4164A0627CB70236
1081:104380002C25022B221E2C221DD30F7BC901C0B06D
1082:10439000C9B62CB00728B000DA2007880A28824C5A
1083:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
1084:1043B000262406D2A0D10F0000DB601DE18164BF7E
1085:1043C0004F2CB00728B000DA2007880A28824CC09A
1086:1043D000D10B8000DBA065AFE71DE17963FF310001
1087:1043E00026240663FF9C00006C1004282006260A81
1088:1043F000046F856364502A2920147D9724022A02C1
1089:10440000DB30DC40DD50580019292102090A4CC874
1090:10441000A2C020D10FC0B10B9B022B2502C020D11E
1091:104420000F00022A02033B022C0A015800D1C9AA3C
1092:10443000DA20DB30DC40580A0C29A011D3A07E978B
1093:10444000082C0AFD0C9C012CA411C0512D2014062F
1094:10445000DD022D241463FFA4DA20DB30DC40DD50C4
1095:10446000C0E0580987D2A0D10F0000006C100616DA
1096:10447000E1521CE152655157C0E117E14E2821027B
1097:104480002D220008084C6580932B32000B695129BE
1098:104490009CFD6590872A629E6EA84C2A722668A0B1
1099:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
1100:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
1101:1044C0002FF2A368F0052822007F89072DD285D31B
1102:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
1103:1044E00020580883600035002D21041BE1757DBB39
1104:1044F00024DA20C0B658087ECA546001030B2B5042
1105:104500002B240BB4BB0B0B472B240C63FFA0DA202E
1106:10451000580A67600006DA20C0B6580A656550E0A0
1107:10452000DC40DB302D3200022A020D6D515808D2DA
1108:104530001CE123D3A064A0C8C05184A18EA00404B0
1109:10454000470E0E4763FF3500002B2104C08B8931D5
1110:10455000C070DF7009F950098F386EB8172C2066CB
1111:10456000AECC0C0C472C24667CFB099D105808E44B
1112:104570008D1027246694D11EE126B8DC9ED06550AC
1113:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
1114:10459000F119E10518E10728967EB04BD30F6DBAEB
1115:1045A0000500A08800C08C2C200CC0201DE10B0C45
1116:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
1117:1045C000F685D10FC0800AB83878D0CD63FFC1001E
1118:1045D0008E300E0E4763FEA12A2C742B0A01044D67
1119:1045E000025808D72F200C12E0FC0CF911A699A252
1120:1045F000FF27F4CF289285D2A008480B289685D1B2
1121:104600000FC020D10F0000006C1004C060CB55DB40
1122:1046100030DC40055D02022A025BFF942921020979
1123:10462000084CC882D2A0D10F2B2014B0BB2B24146D
1124:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
1125:10464000D2A0D10F0000022A02033B02066C02C076
1126:10465000D0C7F72E201428310126250228240A0F5E
1127:10466000EE012E241458010E63FFA300262406D267
1128:10467000A0D10F006C1006282102D62008084C6536
1129:10468000809D2B200C12E0CC0CB811A2882A8286C7
1130:10469000B5497A930260009719E0C909B90A2992CD
1131:1046A000A36890082A620009AA0C65A08228828566
1132:1046B0001CE0D46480799C80B887B14B9B819B10AF
1133:1046C000655074C0A7D970280A01C0D0078D380D75
1134:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
1135:1046E0000F6D4A0500808800908C2E3008C0A00015
1136:1046F000EE322E740028600C19E0B80C8D11A2DD8A
1137:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
1138:10471000D685D10FC0F0038F387FA0C063FFB400EF
1139:10472000CC582A6C74DB30DC4058080BC020D10F09
1140:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
1141:104740007058087F2E30088B1000EE322E7400282F
1142:10475000600C19E0A10C8D11A2DDA988C0202CD21B
1143:10476000852284CFD2A00CBC0B2CD685D10F0000A3
1144:104770006C1004292014282006B19929241468817A
1145:1047800024C0AF2C0A012B21022C24067BA004C0DC
1146:10479000D02D2502022A02033B02044C02C0D0584D
1147:1047A00000C0D2A0D10FC020D10F00006C1004298E
1148:1047B0003101C2B429240A2A3011C28378A16C7B4A
1149:1047C000A1696450472C2006C0686FC562CA572D86
1150:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
1151:1047E000292102090E4CC8E2C020D10FC0F10F9F51
1152:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
1153:10480000DC28201406880228241463FFC7292015F9
1154:104810001BE06C2A200BC0C09C240BAA092BA120F2
1155:104820002C2415AB9929A52063FF9900C020D10F36
1156:10483000DA20DB30DC40DD50C0E0580891D2A0D156
1157:104840000F0000006C1004CB5513E06725221F0DEC
1158:10485000461106550CA32326221E25261F06440BAF
1159:1048600024261E734B1DC852D240D10F280A80C087
1160:104870004024261FA82828261E28261DD240D10FF6
1161:10488000C020D10F244DF824261E63FFD80000005D
1162:104890006C1004D620282006C0706E85026000D4FB
1163:1048A0001DE04E19E04612E0442A8CFC64A1302B36
1164:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
1165:1048C000B8110288082E828609B90A7EC3026000E8
1166:1048D0009A2992A368900509AA0C65A08E28828562
1167:1048E000648088B8891BE04A94819B80655155C0DB
1168:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
1169:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
1170:104910000500808800908CC0A029600C0C9C11A21E
1171:10492000CC2BC285AD990B4B0B2BC6852860062777
1172:1049300094CF6881222D6015D2A0C9D2C0E22E6426
1173:1049400006D10F00C0F008AF387FB0BD63FFB100E3
1174:10495000276406D2A0D10F00D2A0D10F00CC57DA25
1175:1049600060DB30DC405808C0C020D10FDA60580945
1176:104970005063FFE80028221E29221DD30F789901D9
1177:10498000C080C1D6C1C11BE018C122AB6B6480429C
1178:1049900078913F2A80000CAE0C64E0BB02AF0C643F
1179:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
1180:1049B000E864E0A32FACE764F09D2EACE664E097DA
1181:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
1182:1049D00065AFBC28612308D739D97060007B00001F
1183:1049E0002B600C0CB811A2882C82862A0A087CAB9A
1184:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
1185:104A00002A828564A0691FDFFE276504C0E3C0C455
1186:104A10002E64069CA11CE02B9FA02E600A97A30C7D
1187:104A2000EE029EA28F600CFF029FA42E60147AEF0C
1188:104A30004627A417ADBC2F828527C4CF2FFC202F7B
1189:104A4000868563FE692A6C74C0B1DC90DD4058072E
1190:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
1191:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
1192:104A7000DA6058090F63FEE4290A0129A4170DBF63
1193:104A8000082E828527F4CF2EEC202E868564500BCD
1194:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
1195:104AA0006C10062B221E28221D93107B8901C0B09A
1196:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
1197:104AC0000747010E4E01AD2D9E11C0402E0A146401
1198:104AD000B06E6D084428221D7B81652AB0007EA13E
1199:104AE0003B7FA1477B51207CA14968A91768AA1484
1200:104AF00073A111C09F79A10CC18B78A107C1AE2908
1201:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
1202:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
1203:104B200089116987BB649FB863FFDC00647FB4634D
1204:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
1205:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
1206:104B5000107CB1217AB901C0B0C9B913DF96DA204F
1207:104B600028B0002CB00703880A28824CC0D10B80E3
1208:104B700000DBA065AFE7D240D10F8910659FD463F9
1209:104B8000FFF300006C1008C0D0C8598C30292102F6
1210:104B90000C0C4760000C8E300E1E5065E19E2921E2
1211:104BA00002C0C116DF85090B4C65B0908A300A6ED1
1212:104BB0005168E3026000852F629E1BDF7E6EF85312
1213:104BC0002BB22668B0052E22007BE94727629DB7ED
1214:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
1215:104BE000F2869E12798B4117DF7507B70A2772A3E9
1216:104BF000687004882077893029F285DF90D7906526
1217:104C000090652A210419DFAE7A9B22DA205806B873
1218:104C1000600029002C21041BDFAA7CBB18DA20C00D
1219:104C2000B65806B3C95860014CC09063FFCCDA2077
1220:104C300058089F600006DA20C0B658089D655135B7
1221:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
1222:104C5000D3A064A120292102C05184A18CA0040406
1223:104C6000470C0C4763FF3E00C09B8831DBD008F83F
1224:104C700050089B3828210498116E8823282066ACA0
1225:104C80008C0C0C472C24667CBB159F139E148A1039
1226:104C90008B1158071B8E148F13C0D02D24668A30B9
1227:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
1228:104CB0000827FC106550A4B83ADF70C051C08007C7
1229:104CC000583808084264806718DF3819DF392986A8
1230:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
1231:104CE000A08930B4E37F9628C0F207E90B2C940822
1232:104CF0009B909F912F200C12DF380CF811A6882969
1233:104D00008285A2FF2DF4CFD2A009330B238685D153
1234:104D10000F22200C891218DF300C2B11A6BBA82201
1235:104D20002D24CF2CB285D2A00C990B29B685D10F9A
1236:104D3000C087C0900A593879809663FF8ADB30DAE1
1237:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
1238:104D500065AE4D2D2502C09063FE45009E142A2CA1
1239:104D600074C0B1DC70DD405806F68E14C0D01BDF75
1240:104D700028C1C863FF6AC020D10F00006C1006284C
1241:104D8000210217DF0D08084C65824929729E6F9831
1242:104D90000260025019DF082A922668A0078B200AB9
1243:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
1244:104DB000C0602B3008C0F164B0712E0AFFB0B86437
1245:104DC00081512DBCFE64D0F364505C2A2C74044BDA
1246:104DD000025800AD0AA2020600000000001ADF0817
1247:104DE0002C20076EBB0260022218DEFE13DF081BB8
1248:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
1249:104E000099D223B08026B480B13308330293D318EB
1250:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
1251:104E2000EC0B2CD685655FA2C020D10F2B21048806
1252:104E300031DE6008F85008CE386EB8102C2066B10C
1253:104E4000CC0C0C472C24667CEB026001AF2E30109A
1254:104E50002930112C301300993200CB3264E1452AFD
1255:104E600030141EDF1A00AA3278CF050E9C092BC41D
1256:104E70007F1CDF1766A0050E98092A8480B4A71846
1257:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
1258:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
1259:104EA0003303AC882A848B2CD03627848C03CC0126
1260:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
1261:104EC000FB11A7BB2DB286C0987D9302600121190A
1262:104ED000DEBB09F90A2992A36890082D220009DD9A
1263:104EE0000C65D10C2DB285DE6064D10488312B2194
1264:104EF0000408F85008CE386FB80263FEDF2C206635
1265:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
1266:104F100060013100293108292504283014B0886443
1267:104F200080A62B31092B240AC0812B30162FD423C5
1268:104F30002B240BB4BC2C240C8D378B36292504DE96
1269:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
1270:104F50009C1101C4048F380DBE1800C4040DB8188C
1271:104F600000881108FF02C08308CC0218DECC9CA187
1272:104F700098A018DECB8C209EA39FA405CC110BCF4C
1273:104F800053C1E09EA50CFF0208FF029FA218DE8914
1274:104F90002624662C729D2684A22CCC182C769D6328
1275:104FA000FE250000002D30121CDECD00DA3278DF45
1276:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
1277:104FC0002A301100AA3263FEEC2E240A2B31099BF1
1278:104FD0002B63FF5300CC57DA20DB30DC405807222C
1279:104FE000C020D10F00DA20C0B65807B163FFE5003A
1280:104FF00000DBF0DA205807AE63FFD9000058064006
1281:105000001DDE70C0F126246663FE41008B20280A55
1282:10501000FFB1CE23200A2C21040E0E472E24077840
1283:1050200031359AD02CD50A96D319DEA62ED416C0C7
1284:105030008398D1C0E309B80298D409390299D226DD
1285:10504000240763FDC958062E8D102624662B2104E3
1286:105050002F200C63FD86000008B81119DE6808EEE9
1287:1050600002882B9ED59AD0C0EF09880298D204C935
1288:10507000110E990299D4C0E49ED163FFC1000000D3
1289:105080006C1004C020D10F006C100485210D381164
1290:1050900014DE478622A42408660C962205330B935F
1291:1050A00021743B13C862D230D10FC030BC29992182
1292:1050B00099209322D230D10F233DF8932163FFE34F
1293:1050C0006C100AD620941817DE3CD930B8389819DD
1294:1050D0009914655256C0E1D2E02E61021DDE390EF0
1295:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
1296:1050F000F1558EE129D0230E8F5077E66B8F181E65
1297:10510000DE78B0FF0FF4110F1F146590CE18DE7516
1298:105110008C60A8CCC0B119DE2728600B09CC0B0D20
1299:10512000880929812028811E2A0A0009880C08BACA
1300:10513000381BDE6B0CA90A2992947B9B0260008CC1
1301:105140002B600C94160CBD11A7DD29D286B84879C6
1302:1051500083026000D219DE1909B80A2882A39817C1
1303:105160006880026000A36000A51ADE5F84180AEE62
1304:1051700001CA981BDE108C192BB0008CC06EB313C3
1305:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
1306:10519000AE6000380C0C5360000900000018DE51AE
1307:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
1308:1051B000880929812028811E2A0A0009880C08BA3A
1309:1051C000380CA90A2992947E930263FF72DA60C0B8
1310:1051D000BA58073764507360026A00001ADDF68C13
1311:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
1312:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
1313:1052000063FFC9000C0C5363FF0989607899182962
1314:10521000D285C9922B729E1DDDE76EB8232DD22652
1315:10522000991369D00B60000DDA60580721600017F0
1316:105230000088607D890A9A1A29729D9C129915CF5F
1317:1052400095DA60C0B658071A6551F98D148C18DBD1
1318:10525000D08DD0066A020D6D51580587D3A09A14DF
1319:1052600064A1E182A085A1B8AF9F1905054702029C
1320:10527000479518C05163FE602B6104C08B8931C013
1321:10528000A009F950098A386EB81F2C6066A2CC0CB0
1322:105290000C472C64667CAB119F119E1B8A15580528
1323:1052A000988E1B8F11C0A02A64669F1164F0E58957
1324:1052B0001388190FFD022E0A006DD9172F810300E4
1325:1052C000908DAEFE0080889F9200908C008088B800
1326:1052D0009900908C65514E8A10851A8B301FDDC85D
1327:1052E000881229600708580A2C82942D61040ECC7C
1328:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
1329:105300005D50A29909094729C48065D0DA2E600C46
1330:10531000C0D01FDDB10CE811AFEEA7882282852D29
1331:10532000E4CF02420B228685D2A0D10F8E300E0E22
1332:105330004763FDA2A29C0C0C472C64077AB6CD8B68
1333:10534000602E600A280AFF08E80C64810E18DDDD73
1334:1053500083168213B33902330B2C34162D350AC051
1335:105360002392319F30C020923308B20208E80292A3
1336:10537000349832C0802864072B600CD2A01CDD96C4
1337:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
1338:105390002DE685D10F8B1888138D30B88C0D8F4773
1339:1053A0000D4950B4990499100D0D5F04DD1009FFEB
1340:1053B000029F800DBB029B8165508D851AB83AC053
1341:1053C000F1C0800CF83808084264806B1BDD771947
1342:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
1343:1053E000C08CC0A063FEF30082138B161DDD8828DD
1344:1053F000600AC0E02EC4800D880202B20B99239F80
1345:1054000020C0D298229D2122600CB2BB0C2D11A786
1346:10541000DD28D28508BB0B18DD702BD685A8222E7F
1347:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
1348:10543000FF168E1B63FEA300C087C0900AF938795F
1349:10544000809263FF86C020D10F9E1B2A6C74C0B16E
1350:105450008D1858053B8E1B851A63FE7E886B821360
1351:10546000891608BE110ECE0202920B9E25B4991E1B
1352:10547000DD639F200E88029822C0EF04D8110E88A9
1353:10548000029824C0E49E21C080D2A02B600C286426
1354:10549000071CDD510CBE11A7EE2DE285ACBB28B474
1355:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
1356:1054B00020D10F006C10048633C071C03060000131
1357:1054C000B13300310400741A0462017460F1D10F29
1358:1054D0006C1004022A02033B025BFFF61CDD391B41
1359:1054E000DD83C79F88B009A903098A019AB0798032
1360:1054F0001EC0F00FE4311DDD300002002BD2821EF1
1361:10550000DD7C2AC1020EBB022BD6820AE431D10F08
1362:1055100028C102C19009880208084F28C50208E482
1363:1055200031D10F006C1004C0C00CE43112DD251A1B
1364:10553000DD2200020029A28218DD701BDD6E26210B
1365:10554000020B990108660129A68226250206E4318C
1366:1055500014DD6B15DD66236A9023261685502426FC
1367:1055600015252617222C50D10F0000006C1008D6EC
1368:10557000102B0A64291AB41ADD0F0D23111CDD103B
1369:105580000F2511B81898130E551118DD5DAC55A8EC
1370:1055900038AA332C80FF2A80FEA933288D01298068
1371:1055A0000108AA112880000CAA02088811098802A3
1372:1055B00008AA1C288C0828160458086814DD010A5B
1373:1055C000A70224411A2A30802B120407AA2858085F
1374:1055D00063B1338B13B4559A6004AC28B4662C566F
1375:1055E0002B7B69E016DD3A9412C050C0D017DCF472
1376:1055F0009D15D370D4102F60802E60829F169E1749
1377:10560000881672891A8D128C402A607F0DCC282B47
1378:105610003A200CAA28580851C0B10ABE372E354886
1379:105620008F1772F91A8D128C402A60810DCC282BAD
1380:105630003A200CAA28580849C0B10ABE372E354A6C
1381:10564000B233B444B1556952B6B466C0508F15B880
1382:1056500077D370B2FF9F156EF899D10F6C1004C00C
1383:1056600021D10F006C1004270A001CDCD31FDCE4DE
1384:105670001EDCE71DDCD01ADD141BDD22C02824B09F
1385:10568000006D2A75AA48288080C09164806100411D
1386:105690000415DCCBC03125503600361A06550105FD
1387:1056A00095390C56110C66082962966E974D0D5966
1388:1056B0000A29922468900812DD0602420872993B7A
1389:1056C00023629512DCC8CB349F300282020E440262
1390:1056D000C092993194329233AD52246295C0902495
1391:1056E0004C1024669524B0002924A0AA42292480C5
1392:1056F000B177B14404044224B400D10FD10FD10FCB
1393:105700006C10041ADCAC2AA00058021C5BFFD50206
1394:105710002A02033B025BFFD11BDCAAC9A12CB10208
1395:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
1396:10573000C0A00AE43118DCA00002002F828219DC2C
1397:10574000B32EB10209FF022F86820EE431D10F0081
1398:105750006C1004C02002E43114DC9A16DC970002BD
1399:1057600000226282234102732F0603E431C020D15C
1400:105770000F19DCE61ADCE52841020A2A0109880132
1401:105780002A668228450208E43115DCDC12DCE125BA
1402:105790004621D10F6C1004292006289CF96480A0B2
1403:1057A0002A9CFD65A0968A288D262F0A087AD9049E
1404:1057B0002B221FC8BD2C206464C0812E22090EAE8E
1405:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
1406:1057D000C28619DC7A78F3026000AD09B90A299211
1407:1057E000A36890082E220009EE0C65E09B29C28573
1408:1057F0001FDC846490929F90C0E41FDC919E9128EE
1409:10580000200AC0E09E930F8802989288200F880299
1410:1058100098942F20079A979D962F950A2E24072853
1411:10582000200629206468833328C28512DC6B288C0B
1412:1058300020A2B22E24CF28C685C020D10FC020D1EF
1413:105840000F2A206A0111020A2A4165AF52DA20C0EC
1414:10585000B05805EA64AFE5C021D10F00649FC81FAE
1415:10586000DC582D20168FF209DD0C00F10400DD1A42
1416:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
1417:105880008C2028C685C020D10FC021D10F00000078
1418:105890006C1004260A001BDC9F15DC4928206517C4
1419:1058A000DC46288CFE6480940C4D110DBD082CD272
1420:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
1421:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
1422:1058D000F52AD6F406E4310002002872822AFAFF83
1423:1058E000004104290A012F510200991A0A9903095B
1424:1058F00088012876820FE4312624652BD2F48E5C51
1425:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
1426:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
1427:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
1428:10593000020B0B4F2B55020BE431D10F00DB30DA99
1429:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
1430:10595000FFA8000006E4310002002F728218DC303C
1431:105960002E510208FF022F76820EE431D10F000083
1432:105970006C1004C03003E43116DC1015DC11000299
1433:105980000024628274472118DC64875C084801287F
1434:105990006682CD7319DC620C2A11AA99229283299E
1435:1059A00092847291038220CC292B51020BE431C0E6
1436:1059B00020D10F001FDC5B2E51020FEE012E55028D
1437:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
1438:1059D000561DD10F6C10061BDBF71EDBF922B00041
1439:1059E0001ADC526F23721DDC39C04818DC511FDCF1
1440:1059F0004FDC10D5C083F000808600508A6D4A4F7E
1441:105A00000F35110D34092440800B560A296294B1D8
1442:105A1000330E55092251480F44110C440A8740099E
1443:105A2000A80C02883622514907883608770CA899B5
1444:105A30002966949740296295874109A80C02883607
1445:105A400007883608770CA899296695974103034281
1446:105A5000B13808084298F0D10F1CDC3613DC372728
1447:105A6000B0002332B5647057C091C0D016DC351534
1448:105A7000DC33C0402AC00003884328C4006D793C51
1449:105A8000004104B14400971A7780148E502FB295CC
1450:105A90002DB695AFEE2EED2006EE369E5060001826
1451:105AA00077A00983509D5023B69560000223B295DC
1452:105AB000223D2006223622B695B455B8BBD10F0040
1453:105AC00003884328C400D10F6C1004C04004E431A3
1454:105AD00015DC1D000200885013DC1CCB815BFFBD70
1455:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
1456:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
1457:105B0000990C0929146000050BA90C092914993076
1458:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
1459:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
1460:105B3000D10F000004E4311EDBA00002002DE28240
1461:105B40002FBAFF2C51020FDD012DE6820CE431D17A
1462:105B50000F0000006C1004D10F0000006C1004C096
1463:105B600020D10F006C100413DBFAC0D103230923EA
1464:105B7000318FC0A06F340260008D19DB8F1BDB906A
1465:105B800017DBF30C2811A8772672832572822CFA72
1466:105B9000FF76514788502E7285255C0425768275E4
1467:105BA000E9052572842576827659292E72842E760F
1468:105BB000822E76830AE431000200239282002104BF
1469:105BC0002FB10200D61A0C66030633012396820F0A
1470:105BD000E43126728325728260000200D8A07659D3
1471:105BE000220AE43100020023928200210400D21A2A
1472:105BF0002FB1020C22030232012296820FE431D22D
1473:105C000080D10F00D280D10FC020D10F6C1004DBE7
1474:105C100030862015DB68280A00282502DA2028B003
1475:105C2000002CB00705880A28824C2D0A010B800041
1476:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
1477:105C4000769101D10F2BA6A3D10F00006C1004C0D8
1478:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
1479:105C60007786758574C0A076516288508E77B4555A
1480:105C7000957475E903857695747659278F769F75A7
1481:105C80009F740AE431000200239282B42E2FB102E5
1482:105C900000E10400D61A0C66030633012396820F36
1483:105CA000E431867583747639280AE4310002002EC7
1484:105CB0009282B42200210424B10200DF1A0CFF03F7
1485:105CC0000FEE012E968204E431D280D10FD8A07657
1486:105CD00051D6D280D10F00006C1004290A801EDB3F
1487:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
1488:105CF000B2850FCC029ED19CD0C051C07013DB592D
1489:105D000014DB5818DB562AB285A82804240A234637
1490:105D100091A986B8AA2AB685A98827849F25649F59
1491:105D2000D10F00006C100419DB8B0C2A11A9A98972
1492:105D300090C484798B761BDB79ABAC2AC2832CC2EE
1493:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
1494:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
1495:105D6000C94AA929299D0129901F68913270A6036B
1496:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
1497:105D8000FFB3D230D10F000013DB7503A3018C31B8
1498:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
1499:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
1500:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
1501:105DC00028300022300708481209880A28824CDC53
1502:105DD000200B80001BDAF90C4A11ABAA29A2840916
1503:105DE000290B29A684D10F006C1004C04118DAF2E7
1504:105DF00017DAF40C2611A727277038A866256286C3
1505:105E0000007104A35500441A75414822628415DBD1
1506:105E10001502320BC922882117DAF10884140744CD
1507:105E200001754905C834C020D10FD10F0809471D9D
1508:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
1509:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
1510:105E50006684D10FC020D10F6C1004DB30C0D01885
1511:105E6000DAD5DA2025300022300708580A28824C7B
1512:105E7000DC200B80008931709E121BDACF0C4A1196
1513:105E8000ABAA29A28409290B29A684D10F09C952DA
1514:105E900068532600910418DACAC0A12F811600AAFF
1515:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
1516:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
1517:105EC0009A0A0A472EF11600A10400881A08EE0269
1518:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
1519:105EE0000B2BC684D10F00006C1004DB30C0D0191E
1520:105EF000DAB1DA2028300022300709880A28824CDB
1521:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
1522:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
1523:105F200016DAA80C2711A626266038A87225228624
1524:105F3000006104A35500441A7541082222840232EC
1525:105F40000BD10F00C020D10F6C100415DB050249E6
1526:105F5000142956112452120208430F8811C07300ED
1527:105F6000810400361A008104C78F00771A0877036E
1528:105F7000074401064402245612D10F006C10066E2D
1529:105F800023026000AC6420A7C0A0851013DADD16E0
1530:105F9000DAF4C040A6AA2BA2AE0B19416490666841
1531:105FA000915D68925268933C2AA2AA283C7F288C73
1532:105FB0007F0A0A4D2980012880002AACF208881146
1533:105FC0000988027589462B3D0129B0002BB00108D4
1534:105FD00099110B99027A9934B8332A2A00B1447284
1535:105FE00049B160004A7FBF0715DADF63FFB90000DF
1536:105FF000253AE863FFB10000253AE863FFA90000F5
1537:10600000250A6463FFA1C05A63FF9C0000705F080B
1538:106010002534FF058C142C34FE70AF0B0A8D142E22
1539:106020003D012AE4012DE400DA405BFD5063FFA747
1540:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
1541:106040001BDACBC080C07160000D00000022A438B4
1542:10605000B1AA299C107B915F26928679C2156E6247
1543:1060600062C0206D080AB12200210400741A764B28
1544:10607000DB63FFEE2292850D6311032514645FCF6D
1545:10608000D650032D436DD9039820B4220644146DD5
1546:106090004922982098219822982398249825982678
1547:1060A000982798289829982A982B982C982D982EDC
1548:1060B000982F222C4063FF971EDA4027E68027E6C0
1549:1060C00081D10F00C02063FF830000006C1004C06A
1550:1060D00062C04112DA3B1ADA3713DA522AA00023DF
1551:1060E000322D19DA9F2BACFE2992AE6EA30260000E
1552:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
1553:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
1554:10611000015805922B211A0ABB28D3A09B50580581
1555:10612000A92B52000ABB082A0A005805A815DA91C3
1556:106130002D21022C3AE80C3C2804DD022D25029C7E
1557:10614000505805A08B50AABBC0A15805A01CDA8AE4
1558:106150002D21020C3C2806DD0213DA882D25029C35
1559:10616000305805988B30AABBC0A25805982A210246
1560:10617000C0B40BAA020A0A4F2A25025805ACD10F57
1561:10618000242423C3CC2C251A63FF760018DA801C44
1562:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
1563:1061A0001FDA7C2D203624F47A24F47E24F4820E27
1564:1061B000DD0124F4862E0AF707552806DD02C07596
1565:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
1566:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
1567:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
1568:1061F0002E0A800D0D4627546C24546B0EDD022DA3
1569:10620000243663FEFC0000006C10042A0A302B0ABE
1570:10621000035BFF4D12DA53C390292616C3A1C0B306
1571:10622000C08A2826175BFF48C03CC3B12B26161A2C
1572:10623000D9E42AA02023261764A079C3A2C0B15BA9
1573:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
1574:10625000C0B12326175BFF3CC28F282616C0FE2F35
1575:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
1576:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
1577:106280002926175BFF31C3C62C2616C1B32A0AA2E2
1578:106290002B2617C0B35BFF2C290AA2292616C1851D
1579:1062A000282617C2FB2F2616C0E72E26171DDA391F
1580:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
1581:1062C0006C10041CDA031BD9ED18DA3317DA341614
1582:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
1583:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
1584:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
1585:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
1586:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
1587:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
1588:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
1589:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
1590:106350008A2E369F2C369E2C369DB1ACC07919D929
1591:10636000D81BDA1413DA121ADA1218DA1314D9D97C
1592:1063700016DA1304F42812DA1204660C040506A2D5
1593:1063800052A858AA5AA3539B3029A50027848AC033
1594:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
1595:1063A0005726361D26361E2E361F16DA0813DA0833
1596:1063B000A65504330C2826C82E75002D54AC2E5437
1597:1063C000AB2E54AA2326E62326E52E26E7D10F007E
1598:1063D0006C100613D99417D9E224723D2232937FB0
1599:1063E0002F0B6D08052832937F8F0263FFF3C0C423
1600:1063F000C0B01AD973C051D94004593929A4206EAC
1601:1064000044020BB502C3281ED96EDDB025E4220577
1602:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
1603:1064200016D9E11DD9ED94102A724517D9AB6DA983
1604:106430004BD450B3557A5B17DF50756B071FD9608B
1605:106440008FF00F5F0C12D9A302F228AE2222D68160
1606:10645000D54013D9A0746B0715D95A855005450C42
1607:10646000035328B145A73FA832A93322369D2236CF
1608:106470009E2436802B369F2BF48B2CF48C14D969F8
1609:1064800024424DC030041414C84C6D0806B13304C6
1610:106490001414C84263FFF20015D947C44000310408
1611:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
1612:1064B00018D95D2B824D29824E29A5202882537A36
1613:1064C000871E2C54008E106FE45D12D93D2F2121C0
1614:1064D0002321202F251F04330C23252023251ED103
1615:1064E0000FC06218D99F88807E87D98910265400F2
1616:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
1617:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
1618:106510002AB1200A1A1403AA0C2AB5202AB5212A66
1619:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
1620:10653000202BC51F03DD0C2DC5202DC51ED10F003E
1621:106540006C100619D91F14D98612D93615D9A3C7CC
1622:106550003FC0E02E56A82E56A92E56AA2E56AB2383
1623:10656000262918D946DB101CD99DC0D42A42452DB6
1624:1065700016012C160000B0890A880C98905BFF94D5
1625:106580002C22E318D90F0C5C149C842B22E48C84FD
1626:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
1627:1065A0002A86062922CD0959142986072F22892FE8
1628:1065B00086095BFF435BFF1423463BC1B01ED90035
1629:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
1630:1065D000E5025804965BFEBD5BFE96C050C0B01647
1631:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
1632:1065F0002DC0306000440000007F9F0FB155091985
1633:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
1634:106610005008580A28822C2B0A000B8000005104D5
1635:10662000D2A0C091C7AF00991A0A99039912CE3827
1636:1066300064206BD3202B20072516032C12022A621C
1637:10664000827CA86318D8DC01110208580A28822C21
1638:10665000DA500B8000D2A0643FD58A310A8A140434
1639:10666000AA01C82A2B22010B8B1404BB017BA9456C
1640:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
1641:1066800019D8CD1AD91488130ADA28DC801DD951FB
1642:1066900009880A28823C0DAA080B8000652F93D335
1643:1066A00020C0B063FF9400007FAF34B155005004A8
1644:1066B0000A091963FF42DAB07B7B081AD8C12AA203
1645:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
1646:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
1647:1066E000645F60C050C0B0C7CE9C1263FF5500000D
1648:1066F0006C100427221EC08008E4311BD8AF0002B2
1649:10670000002AB28219D8AF003104C06100661A298C
1650:1067100091020A6A022AB68209E43115D90C0C38B2
1651:1067200011A8532832822432842A8CFC7841102903
1652:1067300021022A368297A0096902292502D10F0079
1653:106740002B21022C32850B6B022CCCFC2C36829731
1654:10675000C02B2502D10F00006C1004C0E71DD89299
1655:106760001CD8940D4911D7208B228A200B4B0BD2B9
1656:10677000A007A80C9B72288CF4C8346F8E026000AE
1657:10678000A31FD88AA298AF7B78B334C93DC081C01B
1658:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
1659:1067A0000500308800508C887008980878B16DD248
1660:1067B000A09870D10FC0F0038F387FE0DE63FFD860
1661:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
1662:1067D0005002F5380505426450792CD67E0B3612EE
1663:1067E0002F6C100F4F366DFA0500808800208C0644
1664:1067F000440CC081C05003B208237C0C03853805CB
1665:10680000054264505A2CD67ED30F6D4A050020886D
1666:1068100000308CD2A0A798BC889870D10FD2A0BCB1
1667:10682000799970D10FD2302BAD08C0F1C0500BF563
1668:1068300038050542CB542CD67E083F14260A100F8B
1669:10684000660C0646366D6A0500208800B08C8270A2
1670:1068500063FF2D00C05003F53875E08063FF7A00B8
1671:10686000C06002863876E09F63FF9900C05003F550
1672:106870003875E0C463FFBE006C1004D62068520F68
1673:10688000695324DA20DB30DC405800F7D2A0D10F66
1674:10689000DA20DB30DC405800F49A2424240EC02196
1675:1068A00022640FC020D10F00B83BB04C2A2C748951
1676:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
1677:1068C0002D240E2890072D9003A488B088B1DD2DCB
1678:1068D00094032894075BFFA069511DC0E082242A1D
1679:1068E000600F18D8BF2A240329600E8F202924079F
1680:1068F00008FF029F209E64D10FC020D10F0000002E
1681:106900006C1004942319D8B7C0B3083A110BAA022B
1682:10691000992019D8299A2116D827C05028929D2548
1683:1069200064A2288C1828969DD10F00006C100428B2
1684:106930002066C038232406B788282466D10F0000BB
1685:106940006C10060D3C111AD819D820035B0C862256
1686:106950000D55118221AA8902320B928105630C9395
1687:10696000820C550C792B54CB531CD8111DD80FC059
1688:10697000F7A256C031C0A0043A380A0A42769343BF
1689:10698000044302C9AB2CD67ED30F6DBA0500208814
1690:1069900000308C8281A25272917D92818382C83EA6
1691:1069A000D10FC071C06002763876F0DB63FFD5008E
1692:1069B000C020BC89998199809282D10F222DF892B2
1693:1069C0008163FFA219D7FA02860CA9669611D940F5
1694:1069D000063612961006BB0C64A0442CD67E8A1094
1695:1069E000D30F6DAA0500208800908CBC828311C053
1696:1069F000E0A433240A01034E380E0E42CAEC2CD612
1697:106A00007E6DBA0500208800308C821102520CA2E3
1698:106A100082BC22928163FF83BC82928163FF7C00EF
1699:106A2000C06002363876F0B563FFAF00C070024731
1700:106A30003877F0CC63FFC6006C100414D7EBC1525A
1701:106A4000A424CA3128221D73811C292102659016B5
1702:106A50002A300075A912022A02033B022C3007C01B
1703:106A6000D25801D5653FDCD10F2B300703BB0B0B90
1704:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
1705:106A80006C1004292006C0706E9741292102C08F26
1706:106A90002A2014C0B62B240606AA022A24147980C0
1707:106AA000022725022A221E2C221D7AC10EC8ABDA2B
1708:106AB00020DB302C0A00033D025BF7F96450892D7E
1709:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
1710:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
1711:106AE000D7B529A29EC08A798B712BE22668B004A3
1712:106AF0008C207BC96629A29D1FD7B264905D9790B8
1713:106B0000C0C31DD7C62B21049D9608BB110CBB0228
1714:106B10009B919B971CD7C3C08527E4A22BA29D28DD
1715:106B200024068DFA282102B0DD2BBC302BA69D9DBA
1716:106B3000FA0C8802282502C8D2C020D10F8EF91283
1717:106B4000D7B92E2689C020D10F283000688938DABD
1718:106B500020DB30DC4058004463FF6300022A022B34
1719:106B60000A065800D3220A00D10F655010293000C0
1720:106B7000689924022A02033B02DC4058003BC020F3
1721:106B8000D10FD270D10F00002A2C74033B02044CA9
1722:106B9000025BFEF163FF2700DB30DC402A2C745BD4
1723:106BA000FEEEC020D10F00006C1004C83F8926887B
1724:106BB00029A399992609880C080848282525CC522C
1725:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
1726:106BD0006C1004D820D73082220D451105220C926A
1727:106BE0008264207407420B13D771D420A3837323CC
1728:106BF00002242DF8858074514CBC82C0906D08161B
1729:106C000000408800708C773903D720C0918680744B
1730:106C10003901D42074610263FFE2CA98C097C04171
1731:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
1732:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
1733:106C400000208800308C9780D270D10FBC8FC0E0BC
1734:106C50000F4E387E90E263FFD6BC8292819280C054
1735:106C6000209282D10F0000006C1006C0D71CD74EB6
1736:106C70001BD7500D4911D7202E221F28221D0E4E42
1737:106C80000BD280078A0C2E761F2AAC80C8346FAED8
1738:106C9000026000CB2F0A801AD754A29EAA7A7EA344
1739:106CA0003FC93FC0E1C05002E538050542CA552B37
1740:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
1741:106CC000721DAE9E0EA50C645086D2802E761DC01D
1742:106CD00091298403D10FC05003E53875D0D363FFE9
1743:106CE000CD15D741027E0CA5EE643051C0A1250A16
1744:106CF0000002A538033A020505426450922BC67E75
1745:106D00000E35129510255C10054536D30F6D5A05CA
1746:106D100000A08800208CC0A1A3E2C05023FA800309
1747:106D2000730C03A538AF730505426450722BC67E01
1748:106D3000851005450C6D5A0500208800308CD280E6
1749:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
1750:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
1751:106D6000D2302E8D08C0F1C0500EF538050542CB4B
1752:106D7000592BC67E0A3F14C1600F660C064636D3F7
1753:106D80000F6D6A0500208800E08C22721D63FF03EE
1754:106D9000C061C05003653875D80263FF6263FF5C51
1755:106DA000C05002A53875D08763FF8100C06003F62C
1756:106DB0003876D0BF63FFB9006C10042A2015292053
1757:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
1758:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
1759:106DE0000A472A2415CAAF8B438942B0A8009104F0
1760:106DF00000881AA8FF0FBB029B278F260FB80C78BC
1761:106E00003B1AC020D10F0000292102C0A20A99021A
1762:106E1000292502C021D10F008B2763FFDC2BD12055
1763:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
1764:106E30008B438C288F42B0AD00F10400DD1AADCC3D
1765:106E40000CBB029B27DA20B7EB580019C021D10FE9
1766:106E50009F2763FFEF0000006C100428203C643083
1767:106E60004705306000073E01053EB156076539050C
1768:106E70004928C77FA933030641076603B1660606A2
1769:106E800041A6337E871E222125291AFC732B150269
1770:106E9000380C09816000063E01023EB124064239E9
1771:106EA00003220AD10FD230D10FC05163FFC00000BE
1772:106EB0006C100427221EC08008E4311DD6BF0002DA
1773:106EC000002CD2821BD6BF003104C06100661A2B91
1774:106ED000B1020C6C022CD6820BE43119D7440C3A67
1775:106EE00011AA932832829780253282243284B455A5
1776:106EF00025368275410A292102096902292502D114
1777:106F00000F2A21022B32830A6A022B36822A25029B
1778:106F1000D10F00006C100418D6A80C2711087708B0
1779:106F2000267286253C04765B1315D6A405220A2218
1780:106F300022A3682002742904227285D10FC020D1B7
1781:106F40000F0000006C100419D6A727221EC080096C
1782:106F5000770208E4311DD6980002002CD2821BD69D
1783:106F600098003104C06100661A2BB1020C6C022C2F
1784:106F7000D6820BE43119D71D0C3A11AA932832821C
1785:106F80009780253282243284B45525368275410B90
1786:106F90002A21020A6A022A2502D10F002B21022C83
1787:106FA00032830B6B022C36822B2502D10F0000009E
1788:106FB0006C10041BD6810C2A11ABAA29A286B43806
1789:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
1790:106FD000290868B00274B90D299D0129901F6E928D
1791:106FE0000822A285D10FC020D10FC892C020D10F96
1792:106FF000DA205BEE88C020D10F0000006C10041472
1793:10700000D66E28429E19D66B6F88026000BA29920C
1794:10701000266890078A2009AA0C65A0AC2A429DC068
1795:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
1796:10703000C28609B90A7ED30260009A2992A3689099
1797:10704000078D2009DD0C65D08C25C2856450862D06
1798:107050002104C0306ED80D2C2066B8CC0C0C472C07
1799:10706000246665C07B1CD6E218D66B1AD66219D688
1800:10707000731DD667C0E49E519D508F209357935542
1801:1070800099539A569A5408FF021AD6839F5288261B
1802:107090009F5A9E599D58935E9C5D935C9A5B08082D
1803:1070A00048058811985FC0D81FD64C0CB911A49917
1804:1070B000289285AFBF23F4CF288C402896858E2652
1805:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
1806:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
1807:1070E0000FDBD05BFE072324662B200C63FF7500AB
1808:1070F000C72FD10FC72FD10F6C1004C85B292006F2
1809:1071000068941C689607C020D10FC020D10FDA20E8
1810:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
1811:107120002E200C18D6250CEF11A8FF29F286C08856
1812:10713000798B791AD6220AEA0A2AA2A368A0048BBC
1813:10714000207AB96823F2856430621BD62C290A8024
1814:107150002C20682820672D21040B881104DD1108DC
1815:10716000DD020DCC02C0842D4A100DCC021DD624A8
1816:1071700098319D308A2B99379C340BAA02C0C09C51
1817:10718000359C369A322A2C74DB4028F285C0D328ED
1818:107190008C2028F6852C25042D24061FD60FDD40D3
1819:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
1820:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
1821:1071C00024160128ACF86583862B2122C0F22A21DF
1822:1071D00024CC572AAC010A0A4F2A25247ABB026024
1823:1071E000037F2C21020C0C4C65C3192E22158D3205
1824:1071F000C0910EDD0C65D39088381ED5EF64836B8B
1825:107200008C37C0B8C0960CB9399914B49A9A120D3B
1826:10721000991199138F6718D5EAC9FB2880217F83BC
1827:10722000168B142C22002A200C5BFF61D4A064A3CF
1828:10723000B38F6760002800002B200C89120CBA1154
1829:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
1830:10725000A368D00488207D893024A28564436427F4
1831:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
1832:1072700070C1C42D211F5BFEF889268827DDA00977
1833:10728000880C7A8B179A10600006C04063FFCC0010
1834:1072900000DA208B105BFEC88D1065A267C0E09EEF
1835:1072A000488C649C498B658A669B4A9A4B97458FAC
1836:1072B000677F7302600120CD529D10DA20DB302CF5
1837:1072C00012015BFE698D10C051D6A08FA7C0C08A85
1838:1072D00068974D9A4C8869896A984E994F8E6A8A48
1839:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
1840:1072F0000B8E1477B701C0A1C091C08493159D1760
1841:107300009516C0D025203CC030085801089338C0DD
1842:1073100082083310085B010535400B9D3807DD10EE
1843:107320000BAB100E19402A211F07991003DD020D27
1844:10733000BB020553100933020A55112921250A2AD7
1845:10734000140929140499110A99020933028A2B2974
1846:1073500021040BAA021BD6270899110955020855CA
1847:10736000020BAA029A408920881408991109880200
1848:1073700019D5A61DD62109880298418B2A934695D6
1849:107380004783150DBB0285168D179B448A65896658
1850:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
1851:1073A00088268E29AD87972607EE0C0E0E482E25CF
1852:1073B000259B672B200C87131ED5800CB911AE9925
1853:1073C000289285A78828968517D584C090A7BB29C1
1854:1073D000B4CF871863FE3C008C60C0E0C091C0F061
1855:1073E000C034C0B82A210428203C08AA110B8B0104
1856:1073F000038301039F380B9B39C03208FF100388B9
1857:1074000001089E380C881407EE100FEE0203880165
1858:1074100008983905BF1029211F0ABB1107881008D9
1859:10742000FF020BAA0218D57809291403AA022B21FE
1860:107430002583200B2B1404BB110833110FBB020B47
1861:1074400099028B148F2A0B33020833028B2B647042
1862:10745000868868974D984C8769886A9341994697C2
1863:107460004E984FC07077C701C0719A4718D5E30B8B
1864:107470007C100CEC0208F802984418D5E00CBC0211
1865:1074800008CC029C402A200C295CFEC0801FD54AF3
1866:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
1867:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
1868:1074B0002524B1AA2A26156490DBC9D28F262E2254
1869:1074C000090DFF082F26060FEE0C0E0E482E25255F
1870:1074D0006550E4C020D10F00C07093419F4499468D
1871:1074E0009A4777C70A1CD5362CC022C0810C873832
1872:1074F0001CD5C40B781008E80208B8020C88029862
1873:107500004063FF8000CC57DA20DB608C115BFDD636
1874:10751000292102689806689403C020D10F2B221EEF
1875:10752000C0A029221D2A25027B9901C0B064BFE8B2
1876:1075300013D5212CB00728B000DA2003880A28824E
1877:107540004CC0D10B8000DBA065AFE763FFCA000031
1878:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
1879:10756000D10FC16DC19D29252C60000429252CD681
1880:10757000902624672F2468DA20DB308C11DD502E12
1881:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
1882:1075900063FFDD000000C8DF8C268B29ADCC9C2664
1883:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
1884:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
1885:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
1886:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
1887:1075E000252463FF1FDA202B200C5BFE5663FF145B
1888:1075F00012D5858220028257C82163FFFC12D581F3
1889:1076000003E83004EE3005B13093209421952263D5
1890:10761000FFFC000010D57D910092019302940311AC
1891:10762000D554821001EA30A21101F031C04004E4C7
1892:107630001600020011D5768210234A00032202921E
1893:107640001011D540C021921004E4318403830282DA
1894:1076500001810000D23001230000000010D56D919F
1895:107660000092019302940311D543821001EA30A2E3
1896:107670001101F131C04004E41600020011D564820A
1897:107680001013D4E7032202921004E431840383022E
1898:107690008201810000D330013300000010D55E91DB
1899:1076A00000810165104981026510448103CF1F925A
1900:1076B000019302940311D531821001EA30A2110125
1901:1076C000F231C04004E41600020011D550821013BC
1902:1076D000D4CF032202921004E43184038302820196
1903:1076E000C010910391029101810000D43001430048
1904:1076F00012D500C03028374028374428374828376B
1905:107700004C233D017233ED03020063FFFC000000D7
1906:1077100010D542910092019302940311D54082103A
1907:10772000921011D4F28310032202921011D53D124F
1908:10773000D5049210C04004E41600020011D5348232
1909:107740001013D4EB032202921004E4318403830269
1910:107750008201810000D53001530000006C10026EE0
1911:10776000322FD620056F04043F04745B2A05440CB5
1912:1077700000410400331A220A006D490D73630403AB
1913:10778000660CB1220F2211031314736302222C0121
1914:10779000D10FC83BD10F000073630CC021D10F0083
1915:1077A0000000000044495630C020D10F6C10020088
1916:1077B00040046B4C07032318020219D10F0203196E
1917:1077C000C020D10F6C100202EA30D10F6C1002CC35
1918:1077D0002503F03160000F006F220503F1316000D6
1919:1077E000056F230503F231000200D10F6C1002CCAB
1920:1077F0002502F030D10F00006F220402F130D10FCA
1921:107800006F230402F230D10FC020D10F6C1002227E
1922:107810000A20230A006D280E2837402837442837CD
1923:107820004828374C233D01030200D10F6C1002029F
1924:10783000E431D10F0A0000004368656C73696F2062
1925:1078400046572044454255473D3020284275696CD3
1926:1078500074204D6F6E204D61722020382031373AF0
1927:1078600032383A3135205053542032303130206F85
1928:107870006E20636C656F70617472612E61736963F1
1929:1078800064657369676E6572732E636F6D3A2F68F6
1930:107890006F6D652F66656C69782F772F66775F3718
1931:1078A0002E392D6977617270292C205665727369A3
1932:1078B0006F6E2054337878203030372E30612E3080
1933:1078C00030202D20313030373061303010070A0041
1934:0478D0000BDFE8756D
1935:00000001FF
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index ef17e0169da1..60a327863b11 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -14,7 +14,7 @@
14#include "dlm_internal.h" 14#include "dlm_internal.h"
15 15
16static uint32_t dlm_nl_seqnum; 16static uint32_t dlm_nl_seqnum;
17static uint32_t listener_nlpid; 17static uint32_t listener_nlportid;
18 18
19static struct genl_family family = { 19static struct genl_family family = {
20 .id = GENL_ID_GENERATE, 20 .id = GENL_ID_GENERATE,
@@ -64,13 +64,13 @@ static int send_data(struct sk_buff *skb)
64 return rv; 64 return rv;
65 } 65 }
66 66
67 return genlmsg_unicast(&init_net, skb, listener_nlpid); 67 return genlmsg_unicast(&init_net, skb, listener_nlportid);
68} 68}
69 69
70static int user_cmd(struct sk_buff *skb, struct genl_info *info) 70static int user_cmd(struct sk_buff *skb, struct genl_info *info)
71{ 71{
72 listener_nlpid = info->snd_pid; 72 listener_nlportid = info->snd_portid;
73 printk("user_cmd nlpid %u\n", listener_nlpid); 73 printk("user_cmd nlpid %u\n", listener_nlportid);
74 return 0; 74 return 0;
75} 75}
76 76
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index c57e064666e4..7f1c0f00db9b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -192,6 +192,7 @@ header-y += in_route.h
192header-y += sock_diag.h 192header-y += sock_diag.h
193header-y += inet_diag.h 193header-y += inet_diag.h
194header-y += unix_diag.h 194header-y += unix_diag.h
195header-y += packet_diag.h
195header-y += inotify.h 196header-y += inotify.h
196header-y += input.h 197header-y += input.h
197header-y += ioctl.h 198header-y += ioctl.h
@@ -359,6 +360,7 @@ header-y += sysctl.h
359header-y += sysinfo.h 360header-y += sysinfo.h
360header-y += taskstats.h 361header-y += taskstats.h
361header-y += tcp.h 362header-y += tcp.h
363header-y += tcp_metrics.h
362header-y += telephony.h 364header-y += telephony.h
363header-y += termios.h 365header-y += termios.h
364header-y += time.h 366header-y += time.h
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index d323a4b4143c..6ba45d2b99db 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -100,6 +100,7 @@
100#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ 100#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
101#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */ 101#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */
102#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */ 102#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */
103#define BCMA_CC_CHIPST_5357_NAND_BOOT BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
103#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */ 104#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
104#define BCMA_CC_JCMD_START 0x80000000 105#define BCMA_CC_JCMD_START 0x80000000
105#define BCMA_CC_JCMD_BUSY 0x80000000 106#define BCMA_CC_JCMD_BUSY 0x80000000
@@ -266,6 +267,29 @@
266#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004 267#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
267#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1 268#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1
268#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001 269#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001
270/* Block 0x140 - 0x190 registers are chipset specific */
271#define BCMA_CC_4706_FLASHSCFG 0x18C /* Flash struct configuration */
272#define BCMA_CC_4706_FLASHSCFG_MASK 0x000000ff
273#define BCMA_CC_4706_FLASHSCFG_SF1 0x00000001 /* 2nd serial flash present */
274#define BCMA_CC_4706_FLASHSCFG_PF1 0x00000002 /* 2nd parallel flash present */
275#define BCMA_CC_4706_FLASHSCFG_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */
276#define BCMA_CC_4706_FLASHSCFG_NF1 0x00000008 /* 2nd NAND flash present */
277#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK 0x000000f0
278#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */
279#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */
280#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */
281#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */
282#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */
283#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */
284#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */
285/* NAND flash registers for BCM4706 (corerev = 31) */
286#define BCMA_CC_NFLASH_CTL 0x01A0
287#define BCMA_CC_NFLASH_CTL_ERR 0x08000000
288#define BCMA_CC_NFLASH_CONF 0x01A4
289#define BCMA_CC_NFLASH_COL_ADDR 0x01A8
290#define BCMA_CC_NFLASH_ROW_ADDR 0x01AC
291#define BCMA_CC_NFLASH_DATA 0x01B0
292#define BCMA_CC_NFLASH_WAITCNT0 0x01B4
269/* 0x1E0 is defined as shared BCMA_CLKCTLST */ 293/* 0x1E0 is defined as shared BCMA_CLKCTLST */
270#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ 294#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
271#define BCMA_CC_UART0_DATA 0x0300 295#define BCMA_CC_UART0_DATA 0x0300
@@ -325,6 +349,60 @@
325#define BCMA_CC_PLLCTL_ADDR 0x0660 349#define BCMA_CC_PLLCTL_ADDR 0x0660
326#define BCMA_CC_PLLCTL_DATA 0x0664 350#define BCMA_CC_PLLCTL_DATA 0x0664
327#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ 351#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
352/* NAND flash MLC controller registers (corerev >= 38) */
353#define BCMA_CC_NAND_REVISION 0x0C00
354#define BCMA_CC_NAND_CMD_START 0x0C04
355#define BCMA_CC_NAND_CMD_ADDR_X 0x0C08
356#define BCMA_CC_NAND_CMD_ADDR 0x0C0C
357#define BCMA_CC_NAND_CMD_END_ADDR 0x0C10
358#define BCMA_CC_NAND_CS_NAND_SELECT 0x0C14
359#define BCMA_CC_NAND_CS_NAND_XOR 0x0C18
360#define BCMA_CC_NAND_SPARE_RD0 0x0C20
361#define BCMA_CC_NAND_SPARE_RD4 0x0C24
362#define BCMA_CC_NAND_SPARE_RD8 0x0C28
363#define BCMA_CC_NAND_SPARE_RD12 0x0C2C
364#define BCMA_CC_NAND_SPARE_WR0 0x0C30
365#define BCMA_CC_NAND_SPARE_WR4 0x0C34
366#define BCMA_CC_NAND_SPARE_WR8 0x0C38
367#define BCMA_CC_NAND_SPARE_WR12 0x0C3C
368#define BCMA_CC_NAND_ACC_CONTROL 0x0C40
369#define BCMA_CC_NAND_CONFIG 0x0C48
370#define BCMA_CC_NAND_TIMING_1 0x0C50
371#define BCMA_CC_NAND_TIMING_2 0x0C54
372#define BCMA_CC_NAND_SEMAPHORE 0x0C58
373#define BCMA_CC_NAND_DEVID 0x0C60
374#define BCMA_CC_NAND_DEVID_X 0x0C64
375#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68
376#define BCMA_CC_NAND_INTFC_STATUS 0x0C6C
377#define BCMA_CC_NAND_ECC_CORR_ADDR_X 0x0C70
378#define BCMA_CC_NAND_ECC_CORR_ADDR 0x0C74
379#define BCMA_CC_NAND_ECC_UNC_ADDR_X 0x0C78
380#define BCMA_CC_NAND_ECC_UNC_ADDR 0x0C7C
381#define BCMA_CC_NAND_READ_ERROR_COUNT 0x0C80
382#define BCMA_CC_NAND_CORR_STAT_THRESHOLD 0x0C84
383#define BCMA_CC_NAND_READ_ADDR_X 0x0C90
384#define BCMA_CC_NAND_READ_ADDR 0x0C94
385#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X 0x0C98
386#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C
387#define BCMA_CC_NAND_COPY_BACK_ADDR_X 0x0CA0
388#define BCMA_CC_NAND_COPY_BACK_ADDR 0x0CA4
389#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X 0x0CA8
390#define BCMA_CC_NAND_BLOCK_ERASE_ADDR 0x0CAC
391#define BCMA_CC_NAND_INV_READ_ADDR_X 0x0CB0
392#define BCMA_CC_NAND_INV_READ_ADDR 0x0CB4
393#define BCMA_CC_NAND_BLK_WR_PROTECT 0x0CC0
394#define BCMA_CC_NAND_ACC_CONTROL_CS1 0x0CD0
395#define BCMA_CC_NAND_CONFIG_CS1 0x0CD4
396#define BCMA_CC_NAND_TIMING_1_CS1 0x0CD8
397#define BCMA_CC_NAND_TIMING_2_CS1 0x0CDC
398#define BCMA_CC_NAND_SPARE_RD16 0x0D30
399#define BCMA_CC_NAND_SPARE_RD20 0x0D34
400#define BCMA_CC_NAND_SPARE_RD24 0x0D38
401#define BCMA_CC_NAND_SPARE_RD28 0x0D3C
402#define BCMA_CC_NAND_CACHE_ADDR 0x0D40
403#define BCMA_CC_NAND_CACHE_DATA 0x0D44
404#define BCMA_CC_NAND_CTRL_CONFIG 0x0D48
405#define BCMA_CC_NAND_CTRL_STATUS 0x0D4C
328 406
329/* Divider allocation in 4716/47162/5356 */ 407/* Divider allocation in 4716/47162/5356 */
330#define BCMA_CC_PMU5_MAINPLL_CPU 1 408#define BCMA_CC_PMU5_MAINPLL_CPU 1
@@ -415,6 +493,13 @@
415/* 4313 Chip specific ChipControl register bits */ 493/* 4313 Chip specific ChipControl register bits */
416#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ 494#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
417 495
496/* BCM5357 ChipControl register bits */
497#define BCMA_CHIPCTL_5357_EXTPA BIT(14)
498#define BCMA_CHIPCTL_5357_ANT_MUX_2O3 BIT(15)
499#define BCMA_CHIPCTL_5357_NFLASH BIT(16)
500#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18)
501#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19)
502
418/* Data for the PMU, if available. 503/* Data for the PMU, if available.
419 * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) 504 * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
420 */ 505 */
@@ -430,6 +515,26 @@ struct bcma_pflash {
430 u32 window_size; 515 u32 window_size;
431}; 516};
432 517
518#ifdef CONFIG_BCMA_SFLASH
519struct bcma_sflash {
520 bool present;
521 u32 window;
522 u32 blocksize;
523 u16 numblocks;
524 u32 size;
525};
526#endif
527
528#ifdef CONFIG_BCMA_NFLASH
529struct mtd_info;
530
531struct bcma_nflash {
532 bool present;
533
534 struct mtd_info *mtd;
535};
536#endif
537
433struct bcma_serial_port { 538struct bcma_serial_port {
434 void *regs; 539 void *regs;
435 unsigned long clockspeed; 540 unsigned long clockspeed;
@@ -450,6 +555,12 @@ struct bcma_drv_cc {
450 struct bcma_chipcommon_pmu pmu; 555 struct bcma_chipcommon_pmu pmu;
451#ifdef CONFIG_BCMA_DRIVER_MIPS 556#ifdef CONFIG_BCMA_DRIVER_MIPS
452 struct bcma_pflash pflash; 557 struct bcma_pflash pflash;
558#ifdef CONFIG_BCMA_SFLASH
559 struct bcma_sflash sflash;
560#endif
561#ifdef CONFIG_BCMA_NFLASH
562 struct bcma_nflash nflash;
563#endif
453 564
454 int nr_serial_ports; 565 int nr_serial_ports;
455 struct bcma_serial_port serial_ports[4]; 566 struct bcma_serial_port serial_ports[4];
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 5a71d5719640..6c9cb93ae3de 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -11,11 +11,13 @@
11#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ 11#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
12#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ 12#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
13#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */ 13#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */
14#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
14#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ 15#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
15#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */ 16#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */
16#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */ 17#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */
17#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */ 18#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */
18#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */ 19#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */
20#define BCMA_CLKCTLST_EXTRESST_SHIFT 24
19/* Is there any BCM4328 on BCMA bus? */ 21/* Is there any BCM4328 on BCMA bus? */
20#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ 22#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */
21#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ 23#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
@@ -83,4 +85,6 @@
83 * (2 ZettaBytes), high 32 bits 85 * (2 ZettaBytes), high 32 bits
84 */ 86 */
85 87
88#define BCMA_SFLASH 0x1c000000
89
86#endif /* LINUX_BCMA_REGS_H_ */ 90#endif /* LINUX_BCMA_REGS_H_ */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index d426336d92d9..b006ba0a9f42 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -151,6 +151,17 @@ static inline void eth_broadcast_addr(u8 *addr)
151} 151}
152 152
153/** 153/**
154 * eth_zero_addr - Assign zero address
155 * @addr: Pointer to a six-byte array containing the Ethernet address
156 *
157 * Assign the zero address to the given address array.
158 */
159static inline void eth_zero_addr(u8 *addr)
160{
161 memset(addr, 0x00, ETH_ALEN);
162}
163
164/**
154 * eth_hw_addr_random - Generate software assigned random Ethernet and 165 * eth_hw_addr_random - Generate software assigned random Ethernet and
155 * set device flag 166 * set device flag
156 * @dev: pointer to net_device structure 167 * @dev: pointer to net_device structure
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 21eff418091b..fcb4f8e60c1c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -45,8 +45,10 @@ struct ethtool_cmd {
45 * bits) in Mbps. Please use 45 * bits) in Mbps. Please use
46 * ethtool_cmd_speed()/_set() to 46 * ethtool_cmd_speed()/_set() to
47 * access it */ 47 * access it */
48 __u8 eth_tp_mdix; 48 __u8 eth_tp_mdix; /* twisted pair MDI-X status */
49 __u8 reserved2; 49 __u8 eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
50 * link should be renegotiated if necessary
51 */
50 __u32 lp_advertising; /* Features the link partner advertises */ 52 __u32 lp_advertising; /* Features the link partner advertises */
51 __u32 reserved[2]; 53 __u32 reserved[2];
52}; 54};
@@ -1229,10 +1231,13 @@ struct ethtool_ops {
1229#define AUTONEG_DISABLE 0x00 1231#define AUTONEG_DISABLE 0x00
1230#define AUTONEG_ENABLE 0x01 1232#define AUTONEG_ENABLE 0x01
1231 1233
1232/* Mode MDI or MDI-X */ 1234/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then
1233#define ETH_TP_MDI_INVALID 0x00 1235 * the driver is required to renegotiate link
1234#define ETH_TP_MDI 0x01 1236 */
1235#define ETH_TP_MDI_X 0x02 1237#define ETH_TP_MDI_INVALID 0x00 /* status: unknown; control: unsupported */
1238#define ETH_TP_MDI 0x01 /* status: MDI; control: force MDI */
1239#define ETH_TP_MDI_X 0x02 /* status: MDI-X; control: force MDI-X */
1240#define ETH_TP_MDI_AUTO 0x03 /* control: auto-select */
1236 1241
1237/* Wake-On-Lan options. */ 1242/* Wake-On-Lan options. */
1238#define WAKE_PHY (1 << 0) 1243#define WAKE_PHY (1 << 0)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 82b01357af8b..2ded090e10f4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -74,6 +74,9 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
74#define BPF_LSH 0x60 74#define BPF_LSH 0x60
75#define BPF_RSH 0x70 75#define BPF_RSH 0x70
76#define BPF_NEG 0x80 76#define BPF_NEG 0x80
77#define BPF_MOD 0x90
78#define BPF_XOR 0xa0
79
77#define BPF_JA 0x00 80#define BPF_JA 0x00
78#define BPF_JEQ 0x10 81#define BPF_JEQ 0x10
79#define BPF_JGT 0x20 82#define BPF_JGT 0x20
@@ -196,10 +199,14 @@ enum {
196 BPF_S_ALU_MUL_K, 199 BPF_S_ALU_MUL_K,
197 BPF_S_ALU_MUL_X, 200 BPF_S_ALU_MUL_X,
198 BPF_S_ALU_DIV_X, 201 BPF_S_ALU_DIV_X,
202 BPF_S_ALU_MOD_K,
203 BPF_S_ALU_MOD_X,
199 BPF_S_ALU_AND_K, 204 BPF_S_ALU_AND_K,
200 BPF_S_ALU_AND_X, 205 BPF_S_ALU_AND_X,
201 BPF_S_ALU_OR_K, 206 BPF_S_ALU_OR_K,
202 BPF_S_ALU_OR_X, 207 BPF_S_ALU_OR_X,
208 BPF_S_ALU_XOR_K,
209 BPF_S_ALU_XOR_X,
203 BPF_S_ALU_LSH_K, 210 BPF_S_ALU_LSH_K,
204 BPF_S_ALU_LSH_X, 211 BPF_S_ALU_LSH_X,
205 BPF_S_ALU_RSH_K, 212 BPF_S_ALU_RSH_K,
diff --git a/include/linux/hash.h b/include/linux/hash.h
index b80506bdd733..24df9e70406f 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -67,4 +67,14 @@ static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
67{ 67{
68 return hash_long((unsigned long)ptr, bits); 68 return hash_long((unsigned long)ptr, bits);
69} 69}
70
71static inline u32 hash32_ptr(const void *ptr)
72{
73 unsigned long val = (unsigned long)ptr;
74
75#if BITS_PER_LONG == 64
76 val ^= (val >> 32);
77#endif
78 return (u32)val;
79}
70#endif /* _LINUX_HASH_H */ 80#endif /* _LINUX_HASH_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index e02fc682bb68..2385119f8bb0 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1935,36 +1935,6 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
1935} 1935}
1936 1936
1937/** 1937/**
1938 * ieee80211_fhss_chan_to_freq - get channel frequency
1939 * @channel: the FHSS channel
1940 *
1941 * Convert IEEE802.11 FHSS channel to frequency (MHz)
1942 * Ref IEEE 802.11-2007 section 14.6
1943 */
1944static inline int ieee80211_fhss_chan_to_freq(int channel)
1945{
1946 if ((channel > 1) && (channel < 96))
1947 return channel + 2400;
1948 else
1949 return -1;
1950}
1951
1952/**
1953 * ieee80211_freq_to_fhss_chan - get channel
1954 * @freq: the channels frequency
1955 *
1956 * Convert frequency (MHz) to IEEE802.11 FHSS channel
1957 * Ref IEEE 802.11-2007 section 14.6
1958 */
1959static inline int ieee80211_freq_to_fhss_chan(int freq)
1960{
1961 if ((freq > 2401) && (freq < 2496))
1962 return freq - 2400;
1963 else
1964 return -1;
1965}
1966
1967/**
1968 * ieee80211_dsss_chan_to_freq - get channel center frequency 1938 * ieee80211_dsss_chan_to_freq - get channel center frequency
1969 * @channel: the DSSS channel 1939 * @channel: the DSSS channel
1970 * 1940 *
@@ -2000,56 +1970,6 @@ static inline int ieee80211_freq_to_dsss_chan(int freq)
2000 return -1; 1970 return -1;
2001} 1971}
2002 1972
2003/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
2004 * Ref IEEE 802.11-2007 section 18.4.6.2
2005 *
2006 * The channels and frequencies are the same as those defined for DSSS
2007 */
2008#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
2009#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
2010
2011/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
2012 * Ref IEEE 802.11-2007 section 19.4.2
2013 */
2014#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
2015#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
2016
2017/**
2018 * ieee80211_ofdm_chan_to_freq - get channel center frequency
2019 * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
2020 * @channel: the OFDM channel
2021 *
2022 * Convert IEEE802.11 OFDM channel to center frequency (MHz)
2023 * Ref IEEE 802.11-2007 section 17.3.8.3.2
2024 */
2025static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
2026{
2027 if ((channel > 0) && (channel <= 200) &&
2028 (s_freq >= 4000))
2029 return s_freq + (channel * 5);
2030 else
2031 return -1;
2032}
2033
2034/**
2035 * ieee80211_freq_to_ofdm_channel - get channel
2036 * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
2037 * @freq: the frequency
2038 *
2039 * Convert frequency (MHz) to IEEE802.11 OFDM channel
2040 * Ref IEEE 802.11-2007 section 17.3.8.3.2
2041 *
2042 * This routine selects the channel with the closest center frequency.
2043 */
2044static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
2045{
2046 if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
2047 (s_freq >= 4000))
2048 return (freq + 2 - s_freq) / 5;
2049 else
2050 return -1;
2051}
2052
2053/** 1973/**
2054 * ieee80211_tu_to_usec - convert time units (TU) to microseconds 1974 * ieee80211_tu_to_usec - convert time units (TU) to microseconds
2055 * @tu: the TUs 1975 * @tu: the TUs
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index f0e69c6e8208..9adcc29f084a 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -92,6 +92,7 @@
92#define ARPHRD_PHONET 820 /* PhoNet media type */ 92#define ARPHRD_PHONET 820 /* PhoNet media type */
93#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ 93#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
94#define ARPHRD_CAIF 822 /* CAIF media type */ 94#define ARPHRD_CAIF 822 /* CAIF media type */
95#define ARPHRD_IP6GRE 823 /* GRE over IPv6 */
95 96
96#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ 97#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
97#define ARPHRD_NONE 0xFFFE /* zero header length */ 98#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index ac173bd2ab65..e4dad4ddf085 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -272,6 +272,22 @@ enum macvlan_mode {
272 272
273#define MACVLAN_FLAG_NOPROMISC 1 273#define MACVLAN_FLAG_NOPROMISC 1
274 274
275/* VXLAN section */
276enum {
277 IFLA_VXLAN_UNSPEC,
278 IFLA_VXLAN_ID,
279 IFLA_VXLAN_GROUP,
280 IFLA_VXLAN_LINK,
281 IFLA_VXLAN_LOCAL,
282 IFLA_VXLAN_TTL,
283 IFLA_VXLAN_TOS,
284 IFLA_VXLAN_LEARNING,
285 IFLA_VXLAN_AGEING,
286 IFLA_VXLAN_LIMIT,
287 __IFLA_VXLAN_MAX
288};
289#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
290
275/* SR-IOV virtual function management section */ 291/* SR-IOV virtual function management section */
276 292
277enum { 293enum {
@@ -398,4 +414,22 @@ struct ifla_port_vsi {
398 __u8 pad[3]; 414 __u8 pad[3];
399}; 415};
400 416
417
418/* IPoIB section */
419
420enum {
421 IFLA_IPOIB_UNSPEC,
422 IFLA_IPOIB_PKEY,
423 IFLA_IPOIB_MODE,
424 IFLA_IPOIB_UMCAST,
425 __IFLA_IPOIB_MAX
426};
427
428enum {
429 IPOIB_MODE_DATAGRAM = 0, /* using unreliable datagram QPs */
430 IPOIB_MODE_CONNECTED = 1, /* using connected QPs */
431};
432
433#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
434
401#endif /* _LINUX_IF_LINK_H */ 435#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index aa2e167e1ef4..6d88a7f57680 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -67,6 +67,9 @@ struct team_port {
67 struct netpoll *np; 67 struct netpoll *np;
68#endif 68#endif
69 69
70 s32 priority; /* lower number ~ higher priority */
71 u16 queue_id;
72 struct list_head qom_list; /* node in queue override mapping list */
70 long mode_priv[0]; 73 long mode_priv[0];
71}; 74};
72 75
@@ -105,7 +108,7 @@ struct team_mode_ops {
105 bool (*transmit)(struct team *team, struct sk_buff *skb); 108 bool (*transmit)(struct team *team, struct sk_buff *skb);
106 int (*port_enter)(struct team *team, struct team_port *port); 109 int (*port_enter)(struct team *team, struct team_port *port);
107 void (*port_leave)(struct team *team, struct team_port *port); 110 void (*port_leave)(struct team *team, struct team_port *port);
108 void (*port_change_mac)(struct team *team, struct team_port *port); 111 void (*port_change_dev_addr)(struct team *team, struct team_port *port);
109 void (*port_enabled)(struct team *team, struct team_port *port); 112 void (*port_enabled)(struct team *team, struct team_port *port);
110 void (*port_disabled)(struct team *team, struct team_port *port); 113 void (*port_disabled)(struct team *team, struct team_port *port);
111}; 114};
@@ -115,6 +118,7 @@ enum team_option_type {
115 TEAM_OPTION_TYPE_STRING, 118 TEAM_OPTION_TYPE_STRING,
116 TEAM_OPTION_TYPE_BINARY, 119 TEAM_OPTION_TYPE_BINARY,
117 TEAM_OPTION_TYPE_BOOL, 120 TEAM_OPTION_TYPE_BOOL,
121 TEAM_OPTION_TYPE_S32,
118}; 122};
119 123
120struct team_option_inst_info { 124struct team_option_inst_info {
@@ -131,6 +135,7 @@ struct team_gsetter_ctx {
131 u32 len; 135 u32 len;
132 } bin_val; 136 } bin_val;
133 bool bool_val; 137 bool bool_val;
138 s32 s32_val;
134 } data; 139 } data;
135 struct team_option_inst_info *info; 140 struct team_option_inst_info *info;
136}; 141};
@@ -182,6 +187,8 @@ struct team {
182 187
183 const struct team_mode *mode; 188 const struct team_mode *mode;
184 struct team_mode_ops ops; 189 struct team_mode_ops ops;
190 bool queue_override_enabled;
191 struct list_head *qom_lists; /* array of queue override mapping lists */
185 long mode_priv[TEAM_MODE_PRIV_LONGS]; 192 long mode_priv[TEAM_MODE_PRIV_LONGS];
186}; 193};
187 194
@@ -231,7 +238,7 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
231 return NULL; 238 return NULL;
232} 239}
233 240
234extern int team_port_set_team_mac(struct team_port *port); 241extern int team_port_set_team_dev_addr(struct team_port *port);
235extern int team_options_register(struct team *team, 242extern int team_options_register(struct team *team,
236 const struct team_option *option, 243 const struct team_option *option,
237 size_t option_count); 244 size_t option_count);
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 5efff60b6f56..8c5035ac3142 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -75,6 +75,9 @@ enum {
75 IFLA_GRE_TTL, 75 IFLA_GRE_TTL,
76 IFLA_GRE_TOS, 76 IFLA_GRE_TOS,
77 IFLA_GRE_PMTUDISC, 77 IFLA_GRE_PMTUDISC,
78 IFLA_GRE_ENCAP_LIMIT,
79 IFLA_GRE_FLOWINFO,
80 IFLA_GRE_FLAGS,
78 __IFLA_GRE_MAX, 81 __IFLA_GRE_MAX,
79}; 82};
80 83
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a810987cb80e..e6ff12dd717b 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,8 +74,6 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
74/* found in socket.c */ 74/* found in socket.c */
75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
76 76
77struct vlan_info;
78
79static inline int is_vlan_dev(struct net_device *dev) 77static inline int is_vlan_dev(struct net_device *dev)
80{ 78{
81 return dev->priv_flags & IFF_802_1Q_VLAN; 79 return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@ extern int vlan_vids_add_by_dev(struct net_device *dev,
101 const struct net_device *by_dev); 99 const struct net_device *by_dev);
102extern void vlan_vids_del_by_dev(struct net_device *dev, 100extern void vlan_vids_del_by_dev(struct net_device *dev,
103 const struct net_device *by_dev); 101 const struct net_device *by_dev);
102
103extern bool vlan_uses_dev(const struct net_device *dev);
104#else 104#else
105static inline struct net_device * 105static inline struct net_device *
106__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) 106__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@ static inline void vlan_vids_del_by_dev(struct net_device *dev,
151 const struct net_device *by_dev) 151 const struct net_device *by_dev)
152{ 152{
153} 153}
154
155static inline bool vlan_uses_dev(const struct net_device *dev)
156{
157 return false;
158}
154#endif 159#endif
155 160
156/** 161/**
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 67f9ddacb70c..d032780d0ce5 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -104,9 +104,14 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
104#define IN_DEV_ANDCONF(in_dev, attr) \ 104#define IN_DEV_ANDCONF(in_dev, attr) \
105 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ 105 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
106 IN_DEV_CONF_GET((in_dev), attr)) 106 IN_DEV_CONF_GET((in_dev), attr))
107#define IN_DEV_ORCONF(in_dev, attr) \ 107
108 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \ 108#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
109 (IPV4_DEVCONF_ALL(net, attr) || \
109 IN_DEV_CONF_GET((in_dev), attr)) 110 IN_DEV_CONF_GET((in_dev), attr))
111
112#define IN_DEV_ORCONF(in_dev, attr) \
113 IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
114
110#define IN_DEV_MAXCONF(in_dev, attr) \ 115#define IN_DEV_MAXCONF(in_dev, attr) \
111 (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ 116 (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
112 IN_DEV_CONF_GET((in_dev), attr))) 117 IN_DEV_CONF_GET((in_dev), attr)))
@@ -133,6 +138,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
133 IN_DEV_ORCONF((in_dev), \ 138 IN_DEV_ORCONF((in_dev), \
134 PROMOTE_SECONDARIES) 139 PROMOTE_SECONDARIES)
135#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) 140#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
141#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
142 IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
136 143
137#define IN_DEV_RX_REDIRECTS(in_dev) \ 144#define IN_DEV_RX_REDIRECTS(in_dev) \
138 ((IN_DEV_FORWARD(in_dev) && \ 145 ((IN_DEV_FORWARD(in_dev) && \
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index bf22b0317902..48af63c9a48d 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -31,4 +31,21 @@ struct ip6_tnl_parm {
31 struct in6_addr raddr; /* remote tunnel end-point address */ 31 struct in6_addr raddr; /* remote tunnel end-point address */
32}; 32};
33 33
34struct ip6_tnl_parm2 {
35 char name[IFNAMSIZ]; /* name of tunnel device */
36 int link; /* ifindex of underlying L2 interface */
37 __u8 proto; /* tunnel protocol */
38 __u8 encap_limit; /* encapsulation limit for tunnel */
39 __u8 hop_limit; /* hop limit for tunnel */
40 __be32 flowinfo; /* traffic class and flowlabel for tunnel */
41 __u32 flags; /* tunnel flags */
42 struct in6_addr laddr; /* local tunnel end-point address */
43 struct in6_addr raddr; /* remote tunnel end-point address */
44
45 __be16 i_flags;
46 __be16 o_flags;
47 __be32 i_key;
48 __be32 o_key;
49};
50
34#endif 51#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 879db26ec401..0b94e91ed685 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -256,6 +256,7 @@ struct inet6_skb_parm {
256#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 256#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
257 __u16 dsthao; 257 __u16 dsthao;
258#endif 258#endif
259 __u16 frag_max_size;
259 260
260#define IP6SKB_XFRM_TRANSFORMED 1 261#define IP6SKB_XFRM_TRANSFORMED 1
261#define IP6SKB_FORWARDED 2 262#define IP6SKB_FORWARDED 2
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 82680541576d..05e3c2c7a8cf 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -312,7 +312,13 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
312extern unsigned long timeval_to_jiffies(const struct timeval *value); 312extern unsigned long timeval_to_jiffies(const struct timeval *value);
313extern void jiffies_to_timeval(const unsigned long jiffies, 313extern void jiffies_to_timeval(const unsigned long jiffies,
314 struct timeval *value); 314 struct timeval *value);
315
315extern clock_t jiffies_to_clock_t(unsigned long x); 316extern clock_t jiffies_to_clock_t(unsigned long x);
317static inline clock_t jiffies_delta_to_clock_t(long delta)
318{
319 return jiffies_to_clock_t(max(0L, delta));
320}
321
316extern unsigned long clock_t_to_jiffies(unsigned long x); 322extern unsigned long clock_t_to_jiffies(unsigned long x);
317extern u64 jiffies_64_to_clock_t(u64 x); 323extern u64 jiffies_64_to_clock_t(u64 x);
318extern u64 nsec_to_clock_t(u64 x); 324extern u64 nsec_to_clock_t(u64 x);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 7cccafe50e7b..6c406845f7e2 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -377,5 +377,88 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
377extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, 377extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
378 struct mii_ioctl_data *mii_data, int cmd); 378 struct mii_ioctl_data *mii_data, int cmd);
379 379
380/**
381 * mmd_eee_cap_to_ethtool_sup_t
382 * @eee_cap: value of the MMD EEE Capability register
383 *
384 * A small helper function that translates MMD EEE Capability (3.20) bits
385 * to ethtool supported settings.
386 */
387static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
388{
389 u32 supported = 0;
390
391 if (eee_cap & MDIO_EEE_100TX)
392 supported |= SUPPORTED_100baseT_Full;
393 if (eee_cap & MDIO_EEE_1000T)
394 supported |= SUPPORTED_1000baseT_Full;
395 if (eee_cap & MDIO_EEE_10GT)
396 supported |= SUPPORTED_10000baseT_Full;
397 if (eee_cap & MDIO_EEE_1000KX)
398 supported |= SUPPORTED_1000baseKX_Full;
399 if (eee_cap & MDIO_EEE_10GKX4)
400 supported |= SUPPORTED_10000baseKX4_Full;
401 if (eee_cap & MDIO_EEE_10GKR)
402 supported |= SUPPORTED_10000baseKR_Full;
403
404 return supported;
405}
406
407/**
408 * mmd_eee_adv_to_ethtool_adv_t
409 * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
410 *
411 * A small helper function that translates the MMD EEE Advertisment (7.60)
412 * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
413 * settings.
414 */
415static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
416{
417 u32 adv = 0;
418
419 if (eee_adv & MDIO_EEE_100TX)
420 adv |= ADVERTISED_100baseT_Full;
421 if (eee_adv & MDIO_EEE_1000T)
422 adv |= ADVERTISED_1000baseT_Full;
423 if (eee_adv & MDIO_EEE_10GT)
424 adv |= ADVERTISED_10000baseT_Full;
425 if (eee_adv & MDIO_EEE_1000KX)
426 adv |= ADVERTISED_1000baseKX_Full;
427 if (eee_adv & MDIO_EEE_10GKX4)
428 adv |= ADVERTISED_10000baseKX4_Full;
429 if (eee_adv & MDIO_EEE_10GKR)
430 adv |= ADVERTISED_10000baseKR_Full;
431
432 return adv;
433}
434
435/**
436 * ethtool_adv_to_mmd_eee_adv_t
437 * @adv: the ethtool advertisement settings
438 *
439 * A small helper function that translates ethtool advertisement settings
440 * to EEE advertisements for the MMD EEE Advertisement (7.60) and
441 * MMD EEE Link Partner Ability (7.61) registers.
442 */
443static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
444{
445 u16 reg = 0;
446
447 if (adv & ADVERTISED_100baseT_Full)
448 reg |= MDIO_EEE_100TX;
449 if (adv & ADVERTISED_1000baseT_Full)
450 reg |= MDIO_EEE_1000T;
451 if (adv & ADVERTISED_10000baseT_Full)
452 reg |= MDIO_EEE_10GT;
453 if (adv & ADVERTISED_1000baseKX_Full)
454 reg |= MDIO_EEE_1000KX;
455 if (adv & ADVERTISED_10000baseKX4_Full)
456 reg |= MDIO_EEE_10GKX4;
457 if (adv & ADVERTISED_10000baseKR_Full)
458 reg |= MDIO_EEE_10GKR;
459
460 return reg;
461}
462
380#endif /* __KERNEL__ */ 463#endif /* __KERNEL__ */
381#endif /* __LINUX_MDIO_H__ */ 464#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5f49cc0a107e..01646aa53b0e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -338,18 +338,16 @@ struct napi_struct {
338 338
339 unsigned long state; 339 unsigned long state;
340 int weight; 340 int weight;
341 unsigned int gro_count;
341 int (*poll)(struct napi_struct *, int); 342 int (*poll)(struct napi_struct *, int);
342#ifdef CONFIG_NETPOLL 343#ifdef CONFIG_NETPOLL
343 spinlock_t poll_lock; 344 spinlock_t poll_lock;
344 int poll_owner; 345 int poll_owner;
345#endif 346#endif
346
347 unsigned int gro_count;
348
349 struct net_device *dev; 347 struct net_device *dev;
350 struct list_head dev_list;
351 struct sk_buff *gro_list; 348 struct sk_buff *gro_list;
352 struct sk_buff *skb; 349 struct sk_buff *skb;
350 struct list_head dev_list;
353}; 351};
354 352
355enum { 353enum {
@@ -906,11 +904,12 @@ struct netdev_fcoe_hbainfo {
906 * feature set might be less than what was returned by ndo_fix_features()). 904 * feature set might be less than what was returned by ndo_fix_features()).
907 * Must return >0 or -errno if it changed dev->features itself. 905 * Must return >0 or -errno if it changed dev->features itself.
908 * 906 *
909 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev, 907 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
910 * unsigned char *addr, u16 flags) 908 * struct net_device *dev,
909 * const unsigned char *addr, u16 flags)
911 * Adds an FDB entry to dev for addr. 910 * Adds an FDB entry to dev for addr.
912 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev, 911 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
913 * unsigned char *addr) 912 * const unsigned char *addr)
914 * Deletes the FDB entry from dev coresponding to addr. 913 * Deletes the FDB entry from dev coresponding to addr.
915 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 914 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
916 * struct net_device *dev, int idx) 915 * struct net_device *dev, int idx)
@@ -1016,12 +1015,13 @@ struct net_device_ops {
1016 void (*ndo_neigh_destroy)(struct neighbour *n); 1015 void (*ndo_neigh_destroy)(struct neighbour *n);
1017 1016
1018 int (*ndo_fdb_add)(struct ndmsg *ndm, 1017 int (*ndo_fdb_add)(struct ndmsg *ndm,
1018 struct nlattr *tb[],
1019 struct net_device *dev, 1019 struct net_device *dev,
1020 unsigned char *addr, 1020 const unsigned char *addr,
1021 u16 flags); 1021 u16 flags);
1022 int (*ndo_fdb_del)(struct ndmsg *ndm, 1022 int (*ndo_fdb_del)(struct ndmsg *ndm,
1023 struct net_device *dev, 1023 struct net_device *dev,
1024 unsigned char *addr); 1024 const unsigned char *addr);
1025 int (*ndo_fdb_dump)(struct sk_buff *skb, 1025 int (*ndo_fdb_dump)(struct sk_buff *skb,
1026 struct netlink_callback *cb, 1026 struct netlink_callback *cb,
1027 struct net_device *dev, 1027 struct net_device *dev,
@@ -1322,6 +1322,8 @@ struct net_device {
1322 /* phy device may attach itself for hardware timestamping */ 1322 /* phy device may attach itself for hardware timestamping */
1323 struct phy_device *phydev; 1323 struct phy_device *phydev;
1324 1324
1325 struct lock_class_key *qdisc_tx_busylock;
1326
1325 /* group the device belongs to */ 1327 /* group the device belongs to */
1326 int group; 1328 int group;
1327 1329
@@ -1401,6 +1403,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1401 f(dev, &dev->_tx[i], arg); 1403 f(dev, &dev->_tx[i], arg);
1402} 1404}
1403 1405
1406extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1407 struct sk_buff *skb);
1408
1404/* 1409/*
1405 * Net namespace inlines 1410 * Net namespace inlines
1406 */ 1411 */
@@ -1553,7 +1558,7 @@ struct packet_type {
1553#define NETDEV_PRE_TYPE_CHANGE 0x000E 1558#define NETDEV_PRE_TYPE_CHANGE 0x000E
1554#define NETDEV_POST_TYPE_CHANGE 0x000F 1559#define NETDEV_POST_TYPE_CHANGE 0x000F
1555#define NETDEV_POST_INIT 0x0010 1560#define NETDEV_POST_INIT 0x0010
1556#define NETDEV_UNREGISTER_BATCH 0x0011 1561#define NETDEV_UNREGISTER_FINAL 0x0011
1557#define NETDEV_RELEASE 0x0012 1562#define NETDEV_RELEASE 0x0012
1558#define NETDEV_NOTIFY_PEERS 0x0013 1563#define NETDEV_NOTIFY_PEERS 0x0013
1559#define NETDEV_JOIN 0x0014 1564#define NETDEV_JOIN 0x0014
@@ -2227,6 +2232,7 @@ static inline void dev_hold(struct net_device *dev)
2227 * kind of lower layer not just hardware media. 2232 * kind of lower layer not just hardware media.
2228 */ 2233 */
2229 2234
2235extern void linkwatch_init_dev(struct net_device *dev);
2230extern void linkwatch_fire_event(struct net_device *dev); 2236extern void linkwatch_fire_event(struct net_device *dev);
2231extern void linkwatch_forget_dev(struct net_device *dev); 2237extern void linkwatch_forget_dev(struct net_device *dev);
2232 2238
@@ -2249,8 +2255,6 @@ extern void netif_carrier_on(struct net_device *dev);
2249 2255
2250extern void netif_carrier_off(struct net_device *dev); 2256extern void netif_carrier_off(struct net_device *dev);
2251 2257
2252extern void netif_notify_peers(struct net_device *dev);
2253
2254/** 2258/**
2255 * netif_dormant_on - mark device as dormant. 2259 * netif_dormant_on - mark device as dormant.
2256 * @dev: network device 2260 * @dev: network device
@@ -2560,9 +2564,9 @@ extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2560extern void __hw_addr_init(struct netdev_hw_addr_list *list); 2564extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2561 2565
2562/* Functions used for device addresses handling */ 2566/* Functions used for device addresses handling */
2563extern int dev_addr_add(struct net_device *dev, unsigned char *addr, 2567extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2564 unsigned char addr_type); 2568 unsigned char addr_type);
2565extern int dev_addr_del(struct net_device *dev, unsigned char *addr, 2569extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2566 unsigned char addr_type); 2570 unsigned char addr_type);
2567extern int dev_addr_add_multiple(struct net_device *to_dev, 2571extern int dev_addr_add_multiple(struct net_device *to_dev,
2568 struct net_device *from_dev, 2572 struct net_device *from_dev,
@@ -2574,20 +2578,20 @@ extern void dev_addr_flush(struct net_device *dev);
2574extern int dev_addr_init(struct net_device *dev); 2578extern int dev_addr_init(struct net_device *dev);
2575 2579
2576/* Functions used for unicast addresses handling */ 2580/* Functions used for unicast addresses handling */
2577extern int dev_uc_add(struct net_device *dev, unsigned char *addr); 2581extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2578extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr); 2582extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2579extern int dev_uc_del(struct net_device *dev, unsigned char *addr); 2583extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2580extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2584extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2581extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2585extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2582extern void dev_uc_flush(struct net_device *dev); 2586extern void dev_uc_flush(struct net_device *dev);
2583extern void dev_uc_init(struct net_device *dev); 2587extern void dev_uc_init(struct net_device *dev);
2584 2588
2585/* Functions used for multicast addresses handling */ 2589/* Functions used for multicast addresses handling */
2586extern int dev_mc_add(struct net_device *dev, unsigned char *addr); 2590extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2587extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); 2591extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2588extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr); 2592extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2589extern int dev_mc_del(struct net_device *dev, unsigned char *addr); 2593extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2590extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); 2594extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2591extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2595extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2592extern void dev_mc_unsync(struct net_device *to, struct net_device *from); 2596extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2593extern void dev_mc_flush(struct net_device *dev); 2597extern void dev_mc_flush(struct net_device *dev);
@@ -2599,8 +2603,7 @@ extern void __dev_set_rx_mode(struct net_device *dev);
2599extern int dev_set_promiscuity(struct net_device *dev, int inc); 2603extern int dev_set_promiscuity(struct net_device *dev, int inc);
2600extern int dev_set_allmulti(struct net_device *dev, int inc); 2604extern int dev_set_allmulti(struct net_device *dev, int inc);
2601extern void netdev_state_change(struct net_device *dev); 2605extern void netdev_state_change(struct net_device *dev);
2602extern int netdev_bonding_change(struct net_device *dev, 2606extern void netdev_notify_peers(struct net_device *dev);
2603 unsigned long event);
2604extern void netdev_features_change(struct net_device *dev); 2607extern void netdev_features_change(struct net_device *dev);
2605/* Load a device via the kmod */ 2608/* Load a device via the kmod */
2606extern void dev_load(struct net *net, const char *name); 2609extern void dev_load(struct net *net, const char *name);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index c613cf0d7884..1dcf2a38e51f 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -342,7 +342,7 @@ extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
342extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); 342extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
343 343
344#include <net/flow.h> 344#include <net/flow.h>
345extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); 345extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
346 346
347static inline void 347static inline void
348nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 348nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
@@ -350,13 +350,11 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
350#ifdef CONFIG_NF_NAT_NEEDED 350#ifdef CONFIG_NF_NAT_NEEDED
351 void (*decodefn)(struct sk_buff *, struct flowi *); 351 void (*decodefn)(struct sk_buff *, struct flowi *);
352 352
353 if (family == AF_INET) { 353 rcu_read_lock();
354 rcu_read_lock(); 354 decodefn = rcu_dereference(nf_nat_decode_session_hook);
355 decodefn = rcu_dereference(ip_nat_decode_session); 355 if (decodefn)
356 if (decodefn) 356 decodefn(skb, fl);
357 decodefn(skb, fl); 357 rcu_read_unlock();
358 rcu_read_unlock();
359 }
360#endif 358#endif
361} 359}
362 360
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 2edc64cab739..528697b3c152 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -190,6 +190,7 @@ enum ip_set_dim {
190 * If changed, new revision of iptables match/target is required. 190 * If changed, new revision of iptables match/target is required.
191 */ 191 */
192 IPSET_DIM_MAX = 6, 192 IPSET_DIM_MAX = 6,
193 IPSET_BIT_RETURN_NOMATCH = 7,
193}; 194};
194 195
195/* Option flags for kernel operations */ 196/* Option flags for kernel operations */
@@ -198,6 +199,7 @@ enum ip_set_kopt {
198 IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE), 199 IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
199 IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO), 200 IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
200 IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE), 201 IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
202 IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH),
201}; 203};
202 204
203#ifdef __KERNEL__ 205#ifdef __KERNEL__
@@ -206,9 +208,15 @@ enum ip_set_kopt {
206#include <linux/netlink.h> 208#include <linux/netlink.h>
207#include <linux/netfilter.h> 209#include <linux/netfilter.h>
208#include <linux/netfilter/x_tables.h> 210#include <linux/netfilter/x_tables.h>
211#include <linux/stringify.h>
209#include <linux/vmalloc.h> 212#include <linux/vmalloc.h>
210#include <net/netlink.h> 213#include <net/netlink.h>
211 214
215#define _IP_SET_MODULE_DESC(a, b, c) \
216 MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
217#define IP_SET_MODULE_DESC(a, b, c) \
218 _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
219
212/* Set features */ 220/* Set features */
213enum ip_set_feature { 221enum ip_set_feature {
214 IPSET_TYPE_IP_FLAG = 0, 222 IPSET_TYPE_IP_FLAG = 0,
@@ -223,6 +231,8 @@ enum ip_set_feature {
223 IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG), 231 IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
224 IPSET_TYPE_IFACE_FLAG = 5, 232 IPSET_TYPE_IFACE_FLAG = 5,
225 IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG), 233 IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
234 IPSET_TYPE_NOMATCH_FLAG = 6,
235 IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
226 /* Strictly speaking not a feature, but a flag for dumping: 236 /* Strictly speaking not a feature, but a flag for dumping:
227 * this settype must be dumped last */ 237 * this settype must be dumped last */
228 IPSET_DUMP_LAST_FLAG = 7, 238 IPSET_DUMP_LAST_FLAG = 7,
@@ -249,7 +259,7 @@ struct ip_set_type_variant {
249 * returns negative error code, 259 * returns negative error code,
250 * zero for no match/success to add/delete 260 * zero for no match/success to add/delete
251 * positive for matching element */ 261 * positive for matching element */
252 int (*kadt)(struct ip_set *set, const struct sk_buff * skb, 262 int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
253 const struct xt_action_param *par, 263 const struct xt_action_param *par,
254 enum ipset_adt adt, const struct ip_set_adt_opt *opt); 264 enum ipset_adt adt, const struct ip_set_adt_opt *opt);
255 265
@@ -424,7 +434,8 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
424 return ret; 434 return ret;
425} 435}
426 436
427static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr) 437static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
438 const struct in6_addr *ipaddrptr)
428{ 439{
429 struct nlattr *__nested = ipset_nest_start(skb, type); 440 struct nlattr *__nested = ipset_nest_start(skb, type);
430 int ret; 441 int ret;
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index b114d35aea5e..ef9acd3c8450 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -137,50 +137,59 @@ htable_bits(u32 hashsize)
137#endif 137#endif
138 138
139#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128) 139#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
140#ifdef IP_SET_HASH_WITH_MULTI
141#define NETS_LENGTH(family) (SET_HOST_MASK(family) + 1)
142#else
143#define NETS_LENGTH(family) SET_HOST_MASK(family)
144#endif
140 145
141/* Network cidr size book keeping when the hash stores different 146/* Network cidr size book keeping when the hash stores different
142 * sized networks */ 147 * sized networks */
143static void 148static void
144add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) 149add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
145{ 150{
146 u8 i; 151 int i, j;
147
148 ++h->nets[cidr-1].nets;
149
150 pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
151 152
152 if (h->nets[cidr-1].nets > 1) 153 /* Add in increasing prefix order, so larger cidr first */
153 return; 154 for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
154 155 if (j != -1)
155 /* New cidr size */ 156 continue;
156 for (i = 0; i < host_mask && h->nets[i].cidr; i++) { 157 else if (h->nets[i].cidr < cidr)
157 /* Add in increasing prefix order, so larger cidr first */ 158 j = i;
158 if (h->nets[i].cidr < cidr) 159 else if (h->nets[i].cidr == cidr) {
159 swap(h->nets[i].cidr, cidr); 160 h->nets[i].nets++;
161 return;
162 }
163 }
164 if (j != -1) {
165 for (; i > j; i--) {
166 h->nets[i].cidr = h->nets[i - 1].cidr;
167 h->nets[i].nets = h->nets[i - 1].nets;
168 }
160 } 169 }
161 if (i < host_mask) 170 h->nets[i].cidr = cidr;
162 h->nets[i].cidr = cidr; 171 h->nets[i].nets = 1;
163} 172}
164 173
165static void 174static void
166del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask) 175del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
167{ 176{
168 u8 i; 177 u8 i, j;
169
170 --h->nets[cidr-1].nets;
171 178
172 pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets); 179 for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
180 ;
181 h->nets[i].nets--;
173 182
174 if (h->nets[cidr-1].nets != 0) 183 if (h->nets[i].nets != 0)
175 return; 184 return;
176 185
177 /* All entries with this cidr size deleted, so cleanup h->cidr[] */ 186 for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
178 for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) { 187 h->nets[j].cidr = h->nets[j + 1].cidr;
179 if (h->nets[i].cidr == cidr) 188 h->nets[j].nets = h->nets[j + 1].nets;
180 h->nets[i].cidr = cidr = h->nets[i+1].cidr;
181 } 189 }
182 h->nets[i - 1].cidr = 0;
183} 190}
191#else
192#define NETS_LENGTH(family) 0
184#endif 193#endif
185 194
186/* Destroy the hashtable part of the set */ 195/* Destroy the hashtable part of the set */
@@ -202,14 +211,14 @@ ahash_destroy(struct htable *t)
202 211
203/* Calculate the actual memory size of the set data */ 212/* Calculate the actual memory size of the set data */
204static size_t 213static size_t
205ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask) 214ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 nets_length)
206{ 215{
207 u32 i; 216 u32 i;
208 struct htable *t = h->table; 217 struct htable *t = h->table;
209 size_t memsize = sizeof(*h) 218 size_t memsize = sizeof(*h)
210 + sizeof(*t) 219 + sizeof(*t)
211#ifdef IP_SET_HASH_WITH_NETS 220#ifdef IP_SET_HASH_WITH_NETS
212 + sizeof(struct ip_set_hash_nets) * host_mask 221 + sizeof(struct ip_set_hash_nets) * nets_length
213#endif 222#endif
214 + jhash_size(t->htable_bits) * sizeof(struct hbucket); 223 + jhash_size(t->htable_bits) * sizeof(struct hbucket);
215 224
@@ -238,7 +247,7 @@ ip_set_hash_flush(struct ip_set *set)
238 } 247 }
239#ifdef IP_SET_HASH_WITH_NETS 248#ifdef IP_SET_HASH_WITH_NETS
240 memset(h->nets, 0, sizeof(struct ip_set_hash_nets) 249 memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
241 * SET_HOST_MASK(set->family)); 250 * NETS_LENGTH(set->family));
242#endif 251#endif
243 h->elements = 0; 252 h->elements = 0;
244} 253}
@@ -271,9 +280,6 @@ ip_set_hash_destroy(struct ip_set *set)
271(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ 280(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \
272 & jhash_mask(htable_bits)) 281 & jhash_mask(htable_bits))
273 282
274#define CONCAT(a, b, c) a##b##c
275#define TOKEN(a, b, c) CONCAT(a, b, c)
276
277/* Type/family dependent function prototypes */ 283/* Type/family dependent function prototypes */
278 284
279#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal) 285#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal)
@@ -478,7 +484,7 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
478 } 484 }
479 485
480#ifdef IP_SET_HASH_WITH_NETS 486#ifdef IP_SET_HASH_WITH_NETS
481 add_cidr(h, CIDR(d->cidr), HOST_MASK); 487 add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
482#endif 488#endif
483 h->elements++; 489 h->elements++;
484out: 490out:
@@ -513,7 +519,7 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
513 n->pos--; 519 n->pos--;
514 h->elements--; 520 h->elements--;
515#ifdef IP_SET_HASH_WITH_NETS 521#ifdef IP_SET_HASH_WITH_NETS
516 del_cidr(h, CIDR(d->cidr), HOST_MASK); 522 del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
517#endif 523#endif
518 if (n->pos + AHASH_INIT_SIZE < n->size) { 524 if (n->pos + AHASH_INIT_SIZE < n->size) {
519 void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) 525 void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -546,10 +552,10 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
546 const struct type_pf_elem *data; 552 const struct type_pf_elem *data;
547 int i, j = 0; 553 int i, j = 0;
548 u32 key, multi = 0; 554 u32 key, multi = 0;
549 u8 host_mask = SET_HOST_MASK(set->family); 555 u8 nets_length = NETS_LENGTH(set->family);
550 556
551 pr_debug("test by nets\n"); 557 pr_debug("test by nets\n");
552 for (; j < host_mask && h->nets[j].cidr && !multi; j++) { 558 for (; j < nets_length && h->nets[j].nets && !multi; j++) {
553 type_pf_data_netmask(d, h->nets[j].cidr); 559 type_pf_data_netmask(d, h->nets[j].cidr);
554 key = HKEY(d, h->initval, t->htable_bits); 560 key = HKEY(d, h->initval, t->htable_bits);
555 n = hbucket(t, key); 561 n = hbucket(t, key);
@@ -604,7 +610,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
604 memsize = ahash_memsize(h, with_timeout(h->timeout) 610 memsize = ahash_memsize(h, with_timeout(h->timeout)
605 ? sizeof(struct type_pf_telem) 611 ? sizeof(struct type_pf_telem)
606 : sizeof(struct type_pf_elem), 612 : sizeof(struct type_pf_elem),
607 set->family == AF_INET ? 32 : 128); 613 NETS_LENGTH(set->family));
608 read_unlock_bh(&set->lock); 614 read_unlock_bh(&set->lock);
609 615
610 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 616 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
@@ -690,7 +696,7 @@ nla_put_failure:
690} 696}
691 697
692static int 698static int
693type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, 699type_pf_kadt(struct ip_set *set, const struct sk_buff *skb,
694 const struct xt_action_param *par, 700 const struct xt_action_param *par,
695 enum ipset_adt adt, const struct ip_set_adt_opt *opt); 701 enum ipset_adt adt, const struct ip_set_adt_opt *opt);
696static int 702static int
@@ -783,7 +789,7 @@ type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
783 789
784/* Delete expired elements from the hashtable */ 790/* Delete expired elements from the hashtable */
785static void 791static void
786type_pf_expire(struct ip_set_hash *h) 792type_pf_expire(struct ip_set_hash *h, u8 nets_length)
787{ 793{
788 struct htable *t = h->table; 794 struct htable *t = h->table;
789 struct hbucket *n; 795 struct hbucket *n;
@@ -798,7 +804,7 @@ type_pf_expire(struct ip_set_hash *h)
798 if (type_pf_data_expired(data)) { 804 if (type_pf_data_expired(data)) {
799 pr_debug("expired %u/%u\n", i, j); 805 pr_debug("expired %u/%u\n", i, j);
800#ifdef IP_SET_HASH_WITH_NETS 806#ifdef IP_SET_HASH_WITH_NETS
801 del_cidr(h, CIDR(data->cidr), HOST_MASK); 807 del_cidr(h, CIDR(data->cidr), nets_length);
802#endif 808#endif
803 if (j != n->pos - 1) 809 if (j != n->pos - 1)
804 /* Not last one */ 810 /* Not last one */
@@ -839,7 +845,7 @@ type_pf_tresize(struct ip_set *set, bool retried)
839 if (!retried) { 845 if (!retried) {
840 i = h->elements; 846 i = h->elements;
841 write_lock_bh(&set->lock); 847 write_lock_bh(&set->lock);
842 type_pf_expire(set->data); 848 type_pf_expire(set->data, NETS_LENGTH(set->family));
843 write_unlock_bh(&set->lock); 849 write_unlock_bh(&set->lock);
844 if (h->elements < i) 850 if (h->elements < i)
845 return 0; 851 return 0;
@@ -904,7 +910,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
904 910
905 if (h->elements >= h->maxelem) 911 if (h->elements >= h->maxelem)
906 /* FIXME: when set is full, we slow down here */ 912 /* FIXME: when set is full, we slow down here */
907 type_pf_expire(h); 913 type_pf_expire(h, NETS_LENGTH(set->family));
908 if (h->elements >= h->maxelem) { 914 if (h->elements >= h->maxelem) {
909 if (net_ratelimit()) 915 if (net_ratelimit())
910 pr_warning("Set %s is full, maxelem %u reached\n", 916 pr_warning("Set %s is full, maxelem %u reached\n",
@@ -933,8 +939,8 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
933 if (j != AHASH_MAX(h) + 1) { 939 if (j != AHASH_MAX(h) + 1) {
934 data = ahash_tdata(n, j); 940 data = ahash_tdata(n, j);
935#ifdef IP_SET_HASH_WITH_NETS 941#ifdef IP_SET_HASH_WITH_NETS
936 del_cidr(h, CIDR(data->cidr), HOST_MASK); 942 del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
937 add_cidr(h, CIDR(d->cidr), HOST_MASK); 943 add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
938#endif 944#endif
939 type_pf_data_copy(data, d); 945 type_pf_data_copy(data, d);
940 type_pf_data_timeout_set(data, timeout); 946 type_pf_data_timeout_set(data, timeout);
@@ -952,7 +958,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
952 } 958 }
953 959
954#ifdef IP_SET_HASH_WITH_NETS 960#ifdef IP_SET_HASH_WITH_NETS
955 add_cidr(h, CIDR(d->cidr), HOST_MASK); 961 add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
956#endif 962#endif
957 h->elements++; 963 h->elements++;
958out: 964out:
@@ -986,7 +992,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
986 n->pos--; 992 n->pos--;
987 h->elements--; 993 h->elements--;
988#ifdef IP_SET_HASH_WITH_NETS 994#ifdef IP_SET_HASH_WITH_NETS
989 del_cidr(h, CIDR(d->cidr), HOST_MASK); 995 del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
990#endif 996#endif
991 if (n->pos + AHASH_INIT_SIZE < n->size) { 997 if (n->pos + AHASH_INIT_SIZE < n->size) {
992 void *tmp = kzalloc((n->size - AHASH_INIT_SIZE) 998 void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -1016,9 +1022,9 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
1016 struct hbucket *n; 1022 struct hbucket *n;
1017 int i, j = 0; 1023 int i, j = 0;
1018 u32 key, multi = 0; 1024 u32 key, multi = 0;
1019 u8 host_mask = SET_HOST_MASK(set->family); 1025 u8 nets_length = NETS_LENGTH(set->family);
1020 1026
1021 for (; j < host_mask && h->nets[j].cidr && !multi; j++) { 1027 for (; j < nets_length && h->nets[j].nets && !multi; j++) {
1022 type_pf_data_netmask(d, h->nets[j].cidr); 1028 type_pf_data_netmask(d, h->nets[j].cidr);
1023 key = HKEY(d, h->initval, t->htable_bits); 1029 key = HKEY(d, h->initval, t->htable_bits);
1024 n = hbucket(t, key); 1030 n = hbucket(t, key);
@@ -1147,7 +1153,7 @@ type_pf_gc(unsigned long ul_set)
1147 1153
1148 pr_debug("called\n"); 1154 pr_debug("called\n");
1149 write_lock_bh(&set->lock); 1155 write_lock_bh(&set->lock);
1150 type_pf_expire(h); 1156 type_pf_expire(h, NETS_LENGTH(set->family));
1151 write_unlock_bh(&set->lock); 1157 write_unlock_bh(&set->lock);
1152 1158
1153 h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ; 1159 h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h
index 0bb5a6976bf3..4b59a1584959 100644
--- a/include/linux/netfilter/nf_conntrack_amanda.h
+++ b/include/linux/netfilter/nf_conntrack_amanda.h
@@ -4,6 +4,7 @@
4 4
5extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, 5extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
6 enum ip_conntrack_info ctinfo, 6 enum ip_conntrack_info ctinfo,
7 unsigned int protoff,
7 unsigned int matchoff, 8 unsigned int matchoff,
8 unsigned int matchlen, 9 unsigned int matchlen,
9 struct nf_conntrack_expect *exp); 10 struct nf_conntrack_expect *exp);
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
index 3e3aa08980c3..8faf3f792d13 100644
--- a/include/linux/netfilter/nf_conntrack_ftp.h
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -18,13 +18,17 @@ enum nf_ct_ftp_type {
18 18
19#define FTP_PORT 21 19#define FTP_PORT 21
20 20
21#define NF_CT_FTP_SEQ_PICKUP (1 << 0)
22
21#define NUM_SEQ_TO_REMEMBER 2 23#define NUM_SEQ_TO_REMEMBER 2
22/* This structure exists only once per master */ 24/* This structure exists only once per master */
23struct nf_ct_ftp_master { 25struct nf_ct_ftp_master {
24 /* Valid seq positions for cmd matching after newline */ 26 /* Valid seq positions for cmd matching after newline */
25 u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER]; 27 u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
26 /* 0 means seq_match_aft_nl not set */ 28 /* 0 means seq_match_aft_nl not set */
27 int seq_aft_nl_num[IP_CT_DIR_MAX]; 29 u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
30 /* pickup sequence tracking, useful for conntrackd */
31 u_int16_t flags[IP_CT_DIR_MAX];
28}; 32};
29 33
30struct nf_conntrack_expect; 34struct nf_conntrack_expect;
@@ -34,6 +38,7 @@ struct nf_conntrack_expect;
34extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, 38extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
35 enum ip_conntrack_info ctinfo, 39 enum ip_conntrack_info ctinfo,
36 enum nf_ct_ftp_type type, 40 enum nf_ct_ftp_type type,
41 unsigned int protoff,
37 unsigned int matchoff, 42 unsigned int matchoff,
38 unsigned int matchlen, 43 unsigned int matchlen,
39 struct nf_conntrack_expect *exp); 44 struct nf_conntrack_expect *exp);
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 26f9226ea72b..f381020eee92 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -36,12 +36,12 @@ extern void nf_conntrack_h245_expect(struct nf_conn *new,
36 struct nf_conntrack_expect *this); 36 struct nf_conntrack_expect *this);
37extern void nf_conntrack_q931_expect(struct nf_conn *new, 37extern void nf_conntrack_q931_expect(struct nf_conn *new,
38 struct nf_conntrack_expect *this); 38 struct nf_conntrack_expect *this);
39extern int (*set_h245_addr_hook) (struct sk_buff *skb, 39extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
40 unsigned char **data, int dataoff, 40 unsigned char **data, int dataoff,
41 H245_TransportAddress *taddr, 41 H245_TransportAddress *taddr,
42 union nf_inet_addr *addr, 42 union nf_inet_addr *addr,
43 __be16 port); 43 __be16 port);
44extern int (*set_h225_addr_hook) (struct sk_buff *skb, 44extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
45 unsigned char **data, int dataoff, 45 unsigned char **data, int dataoff,
46 TransportAddress *taddr, 46 TransportAddress *taddr,
47 union nf_inet_addr *addr, 47 union nf_inet_addr *addr,
@@ -49,40 +49,45 @@ extern int (*set_h225_addr_hook) (struct sk_buff *skb,
49extern int (*set_sig_addr_hook) (struct sk_buff *skb, 49extern int (*set_sig_addr_hook) (struct sk_buff *skb,
50 struct nf_conn *ct, 50 struct nf_conn *ct,
51 enum ip_conntrack_info ctinfo, 51 enum ip_conntrack_info ctinfo,
52 unsigned char **data, 52 unsigned int protoff, unsigned char **data,
53 TransportAddress *taddr, int count); 53 TransportAddress *taddr, int count);
54extern int (*set_ras_addr_hook) (struct sk_buff *skb, 54extern int (*set_ras_addr_hook) (struct sk_buff *skb,
55 struct nf_conn *ct, 55 struct nf_conn *ct,
56 enum ip_conntrack_info ctinfo, 56 enum ip_conntrack_info ctinfo,
57 unsigned char **data, 57 unsigned int protoff, unsigned char **data,
58 TransportAddress *taddr, int count); 58 TransportAddress *taddr, int count);
59extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, 59extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
60 struct nf_conn *ct, 60 struct nf_conn *ct,
61 enum ip_conntrack_info ctinfo, 61 enum ip_conntrack_info ctinfo,
62 unsigned char **data, int dataoff, 62 unsigned int protoff, unsigned char **data,
63 int dataoff,
63 H245_TransportAddress *taddr, 64 H245_TransportAddress *taddr,
64 __be16 port, __be16 rtp_port, 65 __be16 port, __be16 rtp_port,
65 struct nf_conntrack_expect *rtp_exp, 66 struct nf_conntrack_expect *rtp_exp,
66 struct nf_conntrack_expect *rtcp_exp); 67 struct nf_conntrack_expect *rtcp_exp);
67extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, 68extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
68 enum ip_conntrack_info ctinfo, 69 enum ip_conntrack_info ctinfo,
70 unsigned int protoff,
69 unsigned char **data, int dataoff, 71 unsigned char **data, int dataoff,
70 H245_TransportAddress *taddr, __be16 port, 72 H245_TransportAddress *taddr, __be16 port,
71 struct nf_conntrack_expect *exp); 73 struct nf_conntrack_expect *exp);
72extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, 74extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
73 enum ip_conntrack_info ctinfo, 75 enum ip_conntrack_info ctinfo,
76 unsigned int protoff,
74 unsigned char **data, int dataoff, 77 unsigned char **data, int dataoff,
75 TransportAddress *taddr, __be16 port, 78 TransportAddress *taddr, __be16 port,
76 struct nf_conntrack_expect *exp); 79 struct nf_conntrack_expect *exp);
77extern int (*nat_callforwarding_hook) (struct sk_buff *skb, 80extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
78 struct nf_conn *ct, 81 struct nf_conn *ct,
79 enum ip_conntrack_info ctinfo, 82 enum ip_conntrack_info ctinfo,
83 unsigned int protoff,
80 unsigned char **data, int dataoff, 84 unsigned char **data, int dataoff,
81 TransportAddress *taddr, 85 TransportAddress *taddr,
82 __be16 port, 86 __be16 port,
83 struct nf_conntrack_expect *exp); 87 struct nf_conntrack_expect *exp);
84extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, 88extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
85 enum ip_conntrack_info ctinfo, 89 enum ip_conntrack_info ctinfo,
90 unsigned int protoff,
86 unsigned char **data, TransportAddress *taddr, 91 unsigned char **data, TransportAddress *taddr,
87 int idx, __be16 port, 92 int idx, __be16 port,
88 struct nf_conntrack_expect *exp); 93 struct nf_conntrack_expect *exp);
diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h
index 36282bf71b63..4bb9bae67176 100644
--- a/include/linux/netfilter/nf_conntrack_irc.h
+++ b/include/linux/netfilter/nf_conntrack_irc.h
@@ -7,6 +7,7 @@
7 7
8extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, 8extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
9 enum ip_conntrack_info ctinfo, 9 enum ip_conntrack_info ctinfo,
10 unsigned int protoff,
10 unsigned int matchoff, 11 unsigned int matchoff,
11 unsigned int matchlen, 12 unsigned int matchlen,
12 struct nf_conntrack_expect *exp); 13 struct nf_conntrack_expect *exp);
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index 3bbde0c3a8a6..2ab2830316b7 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -303,12 +303,14 @@ struct nf_conntrack_expect;
303extern int 303extern int
304(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, 304(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
305 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 305 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
306 unsigned int protoff,
306 struct PptpControlHeader *ctlh, 307 struct PptpControlHeader *ctlh,
307 union pptp_ctrl_union *pptpReq); 308 union pptp_ctrl_union *pptpReq);
308 309
309extern int 310extern int
310(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, 311(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
311 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 312 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
313 unsigned int protoff,
312 struct PptpControlHeader *ctlh, 314 struct PptpControlHeader *ctlh,
313 union pptp_ctrl_union *pptpReq); 315 union pptp_ctrl_union *pptpReq);
314 316
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 89f2a627f3f0..387bdd02945d 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -37,10 +37,12 @@ struct sdp_media_type {
37struct sip_handler { 37struct sip_handler {
38 const char *method; 38 const char *method;
39 unsigned int len; 39 unsigned int len;
40 int (*request)(struct sk_buff *skb, unsigned int dataoff, 40 int (*request)(struct sk_buff *skb, unsigned int protoff,
41 unsigned int dataoff,
41 const char **dptr, unsigned int *datalen, 42 const char **dptr, unsigned int *datalen,
42 unsigned int cseq); 43 unsigned int cseq);
43 int (*response)(struct sk_buff *skb, unsigned int dataoff, 44 int (*response)(struct sk_buff *skb, unsigned int protoff,
45 unsigned int dataoff,
44 const char **dptr, unsigned int *datalen, 46 const char **dptr, unsigned int *datalen,
45 unsigned int cseq, unsigned int code); 47 unsigned int cseq, unsigned int code);
46}; 48};
@@ -97,19 +99,20 @@ enum sip_header_types {
97enum sdp_header_types { 99enum sdp_header_types {
98 SDP_HDR_UNSPEC, 100 SDP_HDR_UNSPEC,
99 SDP_HDR_VERSION, 101 SDP_HDR_VERSION,
100 SDP_HDR_OWNER_IP4, 102 SDP_HDR_OWNER,
101 SDP_HDR_CONNECTION_IP4, 103 SDP_HDR_CONNECTION,
102 SDP_HDR_OWNER_IP6,
103 SDP_HDR_CONNECTION_IP6,
104 SDP_HDR_MEDIA, 104 SDP_HDR_MEDIA,
105}; 105};
106 106
107extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 107extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
108 unsigned int protoff,
108 unsigned int dataoff, 109 unsigned int dataoff,
109 const char **dptr, 110 const char **dptr,
110 unsigned int *datalen); 111 unsigned int *datalen);
111extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off); 112extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
113 unsigned int protoff, s16 off);
112extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 114extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
115 unsigned int protoff,
113 unsigned int dataoff, 116 unsigned int dataoff,
114 const char **dptr, 117 const char **dptr,
115 unsigned int *datalen, 118 unsigned int *datalen,
@@ -117,6 +120,7 @@ extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
117 unsigned int matchoff, 120 unsigned int matchoff,
118 unsigned int matchlen); 121 unsigned int matchlen);
119extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, 122extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
123 unsigned int protoff,
120 unsigned int dataoff, 124 unsigned int dataoff,
121 const char **dptr, 125 const char **dptr,
122 unsigned int *datalen, 126 unsigned int *datalen,
@@ -125,6 +129,7 @@ extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
125 enum sdp_header_types term, 129 enum sdp_header_types term,
126 const union nf_inet_addr *addr); 130 const union nf_inet_addr *addr);
127extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, 131extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
132 unsigned int protoff,
128 unsigned int dataoff, 133 unsigned int dataoff,
129 const char **dptr, 134 const char **dptr,
130 unsigned int *datalen, 135 unsigned int *datalen,
@@ -132,12 +137,14 @@ extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
132 unsigned int matchlen, 137 unsigned int matchlen,
133 u_int16_t port); 138 u_int16_t port);
134extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, 139extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
140 unsigned int protoff,
135 unsigned int dataoff, 141 unsigned int dataoff,
136 const char **dptr, 142 const char **dptr,
137 unsigned int *datalen, 143 unsigned int *datalen,
138 unsigned int sdpoff, 144 unsigned int sdpoff,
139 const union nf_inet_addr *addr); 145 const union nf_inet_addr *addr);
140extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, 146extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
147 unsigned int protoff,
141 unsigned int dataoff, 148 unsigned int dataoff,
142 const char **dptr, 149 const char **dptr,
143 unsigned int *datalen, 150 unsigned int *datalen,
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h
index 8df2d13730b2..bf0cc373ffb6 100644
--- a/include/linux/netfilter/nf_nat.h
+++ b/include/linux/netfilter/nf_nat.h
@@ -22,4 +22,12 @@ struct nf_nat_ipv4_multi_range_compat {
22 struct nf_nat_ipv4_range range[1]; 22 struct nf_nat_ipv4_range range[1];
23}; 23};
24 24
25struct nf_nat_range {
26 unsigned int flags;
27 union nf_inet_addr min_addr;
28 union nf_inet_addr max_addr;
29 union nf_conntrack_man_proto min_proto;
30 union nf_conntrack_man_proto max_proto;
31};
32
25#endif /* _NETFILTER_NF_NAT_H */ 33#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index f649f7423ca2..43bfe3e1685b 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -142,9 +142,13 @@ enum ctattr_tstamp {
142 142
143enum ctattr_nat { 143enum ctattr_nat {
144 CTA_NAT_UNSPEC, 144 CTA_NAT_UNSPEC,
145 CTA_NAT_MINIP, 145 CTA_NAT_V4_MINIP,
146 CTA_NAT_MAXIP, 146#define CTA_NAT_MINIP CTA_NAT_V4_MINIP
147 CTA_NAT_V4_MAXIP,
148#define CTA_NAT_MAXIP CTA_NAT_V4_MAXIP
147 CTA_NAT_PROTO, 149 CTA_NAT_PROTO,
150 CTA_NAT_V6_MINIP,
151 CTA_NAT_V6_MAXIP,
148 __CTA_NAT_MAX 152 __CTA_NAT_MAX
149}; 153};
150#define CTA_NAT_MAX (__CTA_NAT_MAX - 1) 154#define CTA_NAT_MAX (__CTA_NAT_MAX - 1)
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 3b1c1360aedf..70ec8c2bc11a 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -44,6 +44,7 @@ enum nfqnl_attr_type {
44 NFQA_PAYLOAD, /* opaque data payload */ 44 NFQA_PAYLOAD, /* opaque data payload */
45 NFQA_CT, /* nf_conntrack_netlink.h */ 45 NFQA_CT, /* nf_conntrack_netlink.h */
46 NFQA_CT_INFO, /* enum ip_conntrack_info */ 46 NFQA_CT_INFO, /* enum ip_conntrack_info */
47 NFQA_CAP_LEN, /* __u32 length of captured packet */
47 48
48 __NFQA_MAX 49 __NFQA_MAX
49}; 50};
diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h
index 7c37fac576c4..095886019396 100644
--- a/include/linux/netfilter/xt_time.h
+++ b/include/linux/netfilter/xt_time.h
@@ -17,6 +17,9 @@ enum {
17 /* Match against local time (instead of UTC) */ 17 /* Match against local time (instead of UTC) */
18 XT_TIME_LOCAL_TZ = 1 << 0, 18 XT_TIME_LOCAL_TZ = 1 << 0,
19 19
20 /* treat timestart > timestop (e.g. 23:00-01:00) as single period */
21 XT_TIME_CONTIGUOUS = 1 << 1,
22
20 /* Shortcuts */ 23 /* Shortcuts */
21 XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE, 24 XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE,
22 XT_TIME_ALL_WEEKDAYS = 0xFE, 25 XT_TIME_ALL_WEEKDAYS = 0xFE,
@@ -24,4 +27,6 @@ enum {
24 XT_TIME_MAX_DAYTIME = 24 * 60 * 60 - 1, 27 XT_TIME_MAX_DAYTIME = 24 * 60 * 60 - 1,
25}; 28};
26 29
30#define XT_TIME_ALL_FLAGS (XT_TIME_LOCAL_TZ|XT_TIME_CONTIGUOUS)
31
27#endif /* _XT_TIME_H */ 32#endif /* _XT_TIME_H */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index e2b12801378d..b962dfc695ae 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -79,7 +79,6 @@ enum nf_ip_hook_priorities {
79 79
80#ifdef __KERNEL__ 80#ifdef __KERNEL__
81extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); 81extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
82extern int ip_xfrm_me_harder(struct sk_buff *skb);
83extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 82extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
84 unsigned int dataoff, u_int8_t protocol); 83 unsigned int dataoff, u_int8_t protocol);
85#endif /*__KERNEL__*/ 84#endif /*__KERNEL__*/
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
index bd095bc075e9..b88c0058bf73 100644
--- a/include/linux/netfilter_ipv6/Kbuild
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -1,6 +1,7 @@
1header-y += ip6_tables.h 1header-y += ip6_tables.h
2header-y += ip6t_HL.h 2header-y += ip6t_HL.h
3header-y += ip6t_LOG.h 3header-y += ip6t_LOG.h
4header-y += ip6t_NPT.h
4header-y += ip6t_REJECT.h 5header-y += ip6t_REJECT.h
5header-y += ip6t_ah.h 6header-y += ip6t_ah.h
6header-y += ip6t_frag.h 7header-y += ip6t_frag.h
diff --git a/include/linux/netfilter_ipv6/ip6t_NPT.h b/include/linux/netfilter_ipv6/ip6t_NPT.h
new file mode 100644
index 000000000000..f763355481b5
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_NPT.h
@@ -0,0 +1,16 @@
1#ifndef __NETFILTER_IP6T_NPT
2#define __NETFILTER_IP6T_NPT
3
4#include <linux/types.h>
5#include <linux/netfilter.h>
6
7struct ip6t_npt_tginfo {
8 union nf_inet_addr src_pfx;
9 union nf_inet_addr dst_pfx;
10 __u8 src_pfx_len;
11 __u8 dst_pfx_len;
12 /* Used internally by the kernel */
13 __sum16 adjustment;
14};
15
16#endif /* __NETFILTER_IP6T_NPT */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index c9fdde2bc73f..f80c56ac4d82 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -153,6 +153,8 @@ struct nlattr {
153 153
154#include <linux/capability.h> 154#include <linux/capability.h>
155#include <linux/skbuff.h> 155#include <linux/skbuff.h>
156#include <linux/export.h>
157#include <net/scm.h>
156 158
157struct net; 159struct net;
158 160
@@ -162,8 +164,8 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
162} 164}
163 165
164struct netlink_skb_parms { 166struct netlink_skb_parms {
165 struct ucred creds; /* Skb credentials */ 167 struct scm_creds creds; /* Skb credentials */
166 __u32 pid; 168 __u32 portid;
167 __u32 dst_group; 169 __u32 dst_group;
168 struct sock *ssk; 170 struct sock *ssk;
169}; 171};
@@ -175,17 +177,27 @@ struct netlink_skb_parms {
175extern void netlink_table_grab(void); 177extern void netlink_table_grab(void);
176extern void netlink_table_ungrab(void); 178extern void netlink_table_ungrab(void);
177 179
180#define NL_CFG_F_NONROOT_RECV (1 << 0)
181#define NL_CFG_F_NONROOT_SEND (1 << 1)
182
178/* optional Netlink kernel configuration parameters */ 183/* optional Netlink kernel configuration parameters */
179struct netlink_kernel_cfg { 184struct netlink_kernel_cfg {
180 unsigned int groups; 185 unsigned int groups;
186 unsigned int flags;
181 void (*input)(struct sk_buff *skb); 187 void (*input)(struct sk_buff *skb);
182 struct mutex *cb_mutex; 188 struct mutex *cb_mutex;
183 void (*bind)(int group); 189 void (*bind)(int group);
184}; 190};
185 191
186extern struct sock *netlink_kernel_create(struct net *net, int unit, 192extern struct sock *__netlink_kernel_create(struct net *net, int unit,
187 struct module *module, 193 struct module *module,
188 struct netlink_kernel_cfg *cfg); 194 struct netlink_kernel_cfg *cfg);
195static inline struct sock *
196netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
197{
198 return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
199}
200
189extern void netlink_kernel_release(struct sock *sk); 201extern void netlink_kernel_release(struct sock *sk);
190extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); 202extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
191extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); 203extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
@@ -193,14 +205,14 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
193extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); 205extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
194extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 206extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
195extern int netlink_has_listeners(struct sock *sk, unsigned int group); 207extern int netlink_has_listeners(struct sock *sk, unsigned int group);
196extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 208extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
197extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 209extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
198 __u32 group, gfp_t allocation); 210 __u32 group, gfp_t allocation);
199extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, 211extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
200 __u32 pid, __u32 group, gfp_t allocation, 212 __u32 portid, __u32 group, gfp_t allocation,
201 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), 213 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
202 void *filter_data); 214 void *filter_data);
203extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); 215extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
204extern int netlink_register_notifier(struct notifier_block *nb); 216extern int netlink_register_notifier(struct notifier_block *nb);
205extern int netlink_unregister_notifier(struct notifier_block *nb); 217extern int netlink_unregister_notifier(struct notifier_block *nb);
206 218
@@ -241,12 +253,12 @@ struct netlink_callback {
241 253
242struct netlink_notify { 254struct netlink_notify {
243 struct net *net; 255 struct net *net;
244 int pid; 256 int portid;
245 int protocol; 257 int protocol;
246}; 258};
247 259
248struct nlmsghdr * 260struct nlmsghdr *
249__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags); 261__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
250 262
251struct netlink_dump_control { 263struct netlink_dump_control {
252 int (*dump)(struct sk_buff *skb, struct netlink_callback *); 264 int (*dump)(struct sk_buff *skb, struct netlink_callback *);
@@ -259,11 +271,6 @@ extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
259 const struct nlmsghdr *nlh, 271 const struct nlmsghdr *nlh,
260 struct netlink_dump_control *control); 272 struct netlink_dump_control *control);
261 273
262
263#define NL_NONROOT_RECV 0x1
264#define NL_NONROOT_SEND 0x2
265extern void netlink_set_nonroot(int protocol, unsigned flag);
266
267#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
268 275
269#endif /* __LINUX_NETLINK_H */ 276#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 6189f27e305b..d908d17da56d 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -183,4 +183,15 @@ struct sockaddr_nfc_llcp {
183 183
184#define NFC_HEADER_SIZE 1 184#define NFC_HEADER_SIZE 1
185 185
186/**
187 * Pseudo-header info for raw socket packets
188 * First byte is the adapter index
189 * Second byte contains flags
190 * - 0x01 - Direction (0=RX, 1=TX)
191 * - 0x02-0x80 - Reserved
192 **/
193#define NFC_LLCP_RAW_HEADER_SIZE 2
194#define NFC_LLCP_DIRECTION_RX 0x00
195#define NFC_LLCP_DIRECTION_TX 0x01
196
186#endif /*__LINUX_NFC_H */ 197#endif /*__LINUX_NFC_H */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2f3878806403..7df9b500c804 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -565,6 +565,19 @@
565 * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with 565 * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
566 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE. 566 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
567 * 567 *
568 * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
569 * its %NL80211_ATTR_WDEV identifier. It must have been created with
570 * %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
571 * P2P Device can be used for P2P operations, e.g. remain-on-channel and
572 * public action frame TX.
573 * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
574 * its %NL80211_ATTR_WDEV identifier.
575 *
576 * @NL80211_CMD_CONN_FAILED: connection request to an AP failed; used to
577 * notify userspace that AP has rejected the connection request from a
578 * station, due to particular reason. %NL80211_ATTR_CONN_FAILED_REASON
579 * is used for this.
580 *
568 * @NL80211_CMD_MAX: highest used command number 581 * @NL80211_CMD_MAX: highest used command number
569 * @__NL80211_CMD_AFTER_LAST: internal use 582 * @__NL80211_CMD_AFTER_LAST: internal use
570 */ 583 */
@@ -708,6 +721,11 @@ enum nl80211_commands {
708 721
709 NL80211_CMD_CH_SWITCH_NOTIFY, 722 NL80211_CMD_CH_SWITCH_NOTIFY,
710 723
724 NL80211_CMD_START_P2P_DEVICE,
725 NL80211_CMD_STOP_P2P_DEVICE,
726
727 NL80211_CMD_CONN_FAILED,
728
711 /* add new commands above here */ 729 /* add new commands above here */
712 730
713 /* used to define NL80211_CMD_MAX below */ 731 /* used to define NL80211_CMD_MAX below */
@@ -1251,6 +1269,10 @@ enum nl80211_commands {
1251 * was used to provide the hint. For the different types of 1269 * was used to provide the hint. For the different types of
1252 * allowed user regulatory hints see nl80211_user_reg_hint_type. 1270 * allowed user regulatory hints see nl80211_user_reg_hint_type.
1253 * 1271 *
1272 * @NL80211_ATTR_CONN_FAILED_REASON: The reason for which AP has rejected
1273 * the connection request from a station. nl80211_connect_failed_reason
1274 * enum has different reasons of connection failure.
1275 *
1254 * @NL80211_ATTR_MAX: highest attribute number currently defined 1276 * @NL80211_ATTR_MAX: highest attribute number currently defined
1255 * @__NL80211_ATTR_AFTER_LAST: internal use 1277 * @__NL80211_ATTR_AFTER_LAST: internal use
1256 */ 1278 */
@@ -1506,6 +1528,8 @@ enum nl80211_attrs {
1506 1528
1507 NL80211_ATTR_USER_REG_HINT_TYPE, 1529 NL80211_ATTR_USER_REG_HINT_TYPE,
1508 1530
1531 NL80211_ATTR_CONN_FAILED_REASON,
1532
1509 /* add attributes here, update the policy in nl80211.c */ 1533 /* add attributes here, update the policy in nl80211.c */
1510 1534
1511 __NL80211_ATTR_AFTER_LAST, 1535 __NL80211_ATTR_AFTER_LAST,
@@ -1575,6 +1599,10 @@ enum nl80211_attrs {
1575 * @NL80211_IFTYPE_MESH_POINT: mesh point 1599 * @NL80211_IFTYPE_MESH_POINT: mesh point
1576 * @NL80211_IFTYPE_P2P_CLIENT: P2P client 1600 * @NL80211_IFTYPE_P2P_CLIENT: P2P client
1577 * @NL80211_IFTYPE_P2P_GO: P2P group owner 1601 * @NL80211_IFTYPE_P2P_GO: P2P group owner
1602 * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
1603 * and therefore can't be created in the normal ways, use the
1604 * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
1605 * commands to create and destroy one
1578 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 1606 * @NL80211_IFTYPE_MAX: highest interface type number currently defined
1579 * @NUM_NL80211_IFTYPES: number of defined interface types 1607 * @NUM_NL80211_IFTYPES: number of defined interface types
1580 * 1608 *
@@ -1593,6 +1621,7 @@ enum nl80211_iftype {
1593 NL80211_IFTYPE_MESH_POINT, 1621 NL80211_IFTYPE_MESH_POINT,
1594 NL80211_IFTYPE_P2P_CLIENT, 1622 NL80211_IFTYPE_P2P_CLIENT,
1595 NL80211_IFTYPE_P2P_GO, 1623 NL80211_IFTYPE_P2P_GO,
1624 NL80211_IFTYPE_P2P_DEVICE,
1596 1625
1597 /* keep last */ 1626 /* keep last */
1598 NUM_NL80211_IFTYPES, 1627 NUM_NL80211_IFTYPES,
@@ -2994,12 +3023,18 @@ enum nl80211_ap_sme_features {
2994 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested 3023 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
2995 * to work properly to suppport receiving regulatory hints from 3024 * to work properly to suppport receiving regulatory hints from
2996 * cellular base stations. 3025 * cellular base stations.
3026 * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
3027 * P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
3028 * in the interface combinations, even when it's only used for scan
3029 * and remain-on-channel. This could be due to, for example, the
3030 * remain-on-channel implementation requiring a channel context.
2997 */ 3031 */
2998enum nl80211_feature_flags { 3032enum nl80211_feature_flags {
2999 NL80211_FEATURE_SK_TX_STATUS = 1 << 0, 3033 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
3000 NL80211_FEATURE_HT_IBSS = 1 << 1, 3034 NL80211_FEATURE_HT_IBSS = 1 << 1,
3001 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, 3035 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
3002 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, 3036 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
3037 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4,
3003}; 3038};
3004 3039
3005/** 3040/**
@@ -3023,4 +3058,15 @@ enum nl80211_probe_resp_offload_support_attr {
3023 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, 3058 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3,
3024}; 3059};
3025 3060
3061/**
3062 * enum nl80211_connect_failed_reason - connection request failed reasons
3063 * @NL80211_CONN_FAIL_MAX_CLIENTS: Maximum number of clients that can be
3064 * handled by the AP is reached.
3065 * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Client's MAC is in the AP's blocklist.
3066 */
3067enum nl80211_connect_failed_reason {
3068 NL80211_CONN_FAIL_MAX_CLIENTS,
3069 NL80211_CONN_FAIL_BLOCKED_CLIENT,
3070};
3071
3026#endif /* __LINUX_NL80211_H */ 3072#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 912c27a0f7ee..6ef49b803efb 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -12,6 +12,7 @@
12#include <linux/phy.h> 12#include <linux/phy.h>
13#include <linux/of.h> 13#include <linux/of.h>
14 14
15#ifdef CONFIG_OF
15extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); 16extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
16extern struct phy_device *of_phy_find_device(struct device_node *phy_np); 17extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
17extern struct phy_device *of_phy_connect(struct net_device *dev, 18extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -24,4 +25,36 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
24 25
25extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
26 27
28#else /* CONFIG_OF */
29int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
30{
31 return -ENOSYS;
32}
33
34struct phy_device *of_phy_find_device(struct device_node *phy_np)
35{
36 return NULL;
37}
38
39struct phy_device *of_phy_connect(struct net_device *dev,
40 struct device_node *phy_np,
41 void (*hndlr)(struct net_device *),
42 u32 flags, phy_interface_t iface)
43{
44 return NULL;
45}
46
47struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
48 void (*hndlr)(struct net_device *),
49 phy_interface_t iface)
50{
51 return NULL;
52}
53
54struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
55{
56 return NULL;
57}
58#endif /* CONFIG_OF */
59
27#endif /* __LINUX_OF_MDIO_H */ 60#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/packet_diag.h b/include/linux/packet_diag.h
new file mode 100644
index 000000000000..93f5fa94a431
--- /dev/null
+++ b/include/linux/packet_diag.h
@@ -0,0 +1,72 @@
1#ifndef __PACKET_DIAG_H__
2#define __PACKET_DIAG_H__
3
4#include <linux/types.h>
5
6struct packet_diag_req {
7 __u8 sdiag_family;
8 __u8 sdiag_protocol;
9 __u16 pad;
10 __u32 pdiag_ino;
11 __u32 pdiag_show;
12 __u32 pdiag_cookie[2];
13};
14
15#define PACKET_SHOW_INFO 0x00000001 /* Basic packet_sk information */
16#define PACKET_SHOW_MCLIST 0x00000002 /* A set of packet_diag_mclist-s */
17#define PACKET_SHOW_RING_CFG 0x00000004 /* Rings configuration parameters */
18#define PACKET_SHOW_FANOUT 0x00000008
19
20struct packet_diag_msg {
21 __u8 pdiag_family;
22 __u8 pdiag_type;
23 __u16 pdiag_num;
24
25 __u32 pdiag_ino;
26 __u32 pdiag_cookie[2];
27};
28
29enum {
30 PACKET_DIAG_INFO,
31 PACKET_DIAG_MCLIST,
32 PACKET_DIAG_RX_RING,
33 PACKET_DIAG_TX_RING,
34 PACKET_DIAG_FANOUT,
35
36 PACKET_DIAG_MAX,
37};
38
39struct packet_diag_info {
40 __u32 pdi_index;
41 __u32 pdi_version;
42 __u32 pdi_reserve;
43 __u32 pdi_copy_thresh;
44 __u32 pdi_tstamp;
45 __u32 pdi_flags;
46
47#define PDI_RUNNING 0x1
48#define PDI_AUXDATA 0x2
49#define PDI_ORIGDEV 0x4
50#define PDI_VNETHDR 0x8
51#define PDI_LOSS 0x10
52};
53
54struct packet_diag_mclist {
55 __u32 pdmc_index;
56 __u32 pdmc_count;
57 __u16 pdmc_type;
58 __u16 pdmc_alen;
59 __u8 pdmc_addr[MAX_ADDR_LEN];
60};
61
62struct packet_diag_ring {
63 __u32 pdr_block_size;
64 __u32 pdr_block_nr;
65 __u32 pdr_frame_size;
66 __u32 pdr_frame_nr;
67 __u32 pdr_retire_tmo;
68 __u32 pdr_sizeof_priv;
69 __u32 pdr_features;
70};
71
72#endif
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 94048547f29a..0cc45ae1afd5 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -116,5 +116,14 @@ static inline void pps_get_ts(struct pps_event_time *ts)
116 116
117#endif /* CONFIG_NTP_PPS */ 117#endif /* CONFIG_NTP_PPS */
118 118
119/* Subtract known time delay from PPS event time(s) */
120static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
121{
122 ts->ts_real = timespec_sub(ts->ts_real, delta);
123#ifdef CONFIG_NTP_PPS
124 ts->ts_raw = timespec_sub(ts->ts_raw, delta);
125#endif
126}
127
119#endif /* LINUX_PPS_KERNEL_H */ 128#endif /* LINUX_PPS_KERNEL_H */
120 129
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 945704c2ed65..f2dc6d8fc680 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -21,6 +21,8 @@
21#ifndef _PTP_CLOCK_KERNEL_H_ 21#ifndef _PTP_CLOCK_KERNEL_H_
22#define _PTP_CLOCK_KERNEL_H_ 22#define _PTP_CLOCK_KERNEL_H_
23 23
24#include <linux/device.h>
25#include <linux/pps_kernel.h>
24#include <linux/ptp_clock.h> 26#include <linux/ptp_clock.h>
25 27
26 28
@@ -40,7 +42,9 @@ struct ptp_clock_request {
40 * struct ptp_clock_info - decribes a PTP hardware clock 42 * struct ptp_clock_info - decribes a PTP hardware clock
41 * 43 *
42 * @owner: The clock driver should set to THIS_MODULE. 44 * @owner: The clock driver should set to THIS_MODULE.
43 * @name: A short name to identify the clock. 45 * @name: A short "friendly name" to identify the clock and to
46 * help distinguish PHY based devices from MAC based ones.
47 * The string is not meant to be a unique id.
44 * @max_adj: The maximum possible frequency adjustment, in parts per billon. 48 * @max_adj: The maximum possible frequency adjustment, in parts per billon.
45 * @n_alarm: The number of programmable alarms. 49 * @n_alarm: The number of programmable alarms.
46 * @n_ext_ts: The number of external time stamp channels. 50 * @n_ext_ts: The number of external time stamp channels.
@@ -92,10 +96,12 @@ struct ptp_clock;
92/** 96/**
93 * ptp_clock_register() - register a PTP hardware clock driver 97 * ptp_clock_register() - register a PTP hardware clock driver
94 * 98 *
95 * @info: Structure describing the new clock. 99 * @info: Structure describing the new clock.
100 * @parent: Pointer to the parent device of the new clock.
96 */ 101 */
97 102
98extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info); 103extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
104 struct device *parent);
99 105
100/** 106/**
101 * ptp_clock_unregister() - unregister a PTP hardware clock driver 107 * ptp_clock_unregister() - unregister a PTP hardware clock driver
@@ -110,6 +116,7 @@ enum ptp_clock_events {
110 PTP_CLOCK_ALARM, 116 PTP_CLOCK_ALARM,
111 PTP_CLOCK_EXTTS, 117 PTP_CLOCK_EXTTS,
112 PTP_CLOCK_PPS, 118 PTP_CLOCK_PPS,
119 PTP_CLOCK_PPSUSR,
113}; 120};
114 121
115/** 122/**
@@ -117,13 +124,17 @@ enum ptp_clock_events {
117 * 124 *
118 * @type: One of the ptp_clock_events enumeration values. 125 * @type: One of the ptp_clock_events enumeration values.
119 * @index: Identifies the source of the event. 126 * @index: Identifies the source of the event.
120 * @timestamp: When the event occured. 127 * @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
128 * @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
121 */ 129 */
122 130
123struct ptp_clock_event { 131struct ptp_clock_event {
124 int type; 132 int type;
125 int index; 133 int index;
126 u64 timestamp; 134 union {
135 u64 timestamp;
136 struct pps_event_time pps_times;
137 };
127}; 138};
128 139
129/** 140/**
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 6fdf02737e9d..0ec590bb3611 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -354,6 +354,37 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
354} 354}
355#endif /* RFKILL || RFKILL_MODULE */ 355#endif /* RFKILL || RFKILL_MODULE */
356 356
357
358#ifdef CONFIG_RFKILL_LEDS
359/**
360 * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
361 * This function might return a NULL pointer if registering of the
362 * LED trigger failed. Use this as "default_trigger" for the LED.
363 */
364const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
365
366/**
367 * rfkill_set_led_trigger_name -- set the LED trigger name
368 * @rfkill: rfkill struct
369 * @name: LED trigger name
370 *
371 * This function sets the LED trigger name of the radio LED
372 * trigger that rfkill creates. It is optional, but if called
373 * must be called before rfkill_register() to be effective.
374 */
375void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
376#else
377static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
378{
379 return NULL;
380}
381
382static inline void
383rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
384{
385}
386#endif
387
357#endif /* __KERNEL__ */ 388#endif /* __KERNEL__ */
358 389
359#endif /* RFKILL_H */ 390#endif /* RFKILL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d23ca6245d54..9c5612f0374b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1526,6 +1526,9 @@ struct task_struct {
1526 * cache last used pipe for splice 1526 * cache last used pipe for splice
1527 */ 1527 */
1528 struct pipe_inode_info *splice_pipe; 1528 struct pipe_inode_info *splice_pipe;
1529
1530 struct page_frag task_frag;
1531
1529#ifdef CONFIG_TASK_DELAY_ACCT 1532#ifdef CONFIG_TASK_DELAY_ACCT
1530 struct task_delay_info *delays; 1533 struct task_delay_info *delays;
1531#endif 1534#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7632c87da2c9..b33a3a1f205e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -846,13 +846,16 @@ static inline int skb_shared(const struct sk_buff *skb)
846 * 846 *
847 * NULL is returned on a memory allocation failure. 847 * NULL is returned on a memory allocation failure.
848 */ 848 */
849static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 849static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
850 gfp_t pri)
851{ 850{
852 might_sleep_if(pri & __GFP_WAIT); 851 might_sleep_if(pri & __GFP_WAIT);
853 if (skb_shared(skb)) { 852 if (skb_shared(skb)) {
854 struct sk_buff *nskb = skb_clone(skb, pri); 853 struct sk_buff *nskb = skb_clone(skb, pri);
855 kfree_skb(skb); 854
855 if (likely(nskb))
856 consume_skb(skb);
857 else
858 kfree_skb(skb);
856 skb = nskb; 859 skb = nskb;
857 } 860 }
858 return skb; 861 return skb;
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 00bc189cb395..fdfba235f9f1 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -18,7 +18,14 @@
18enum 18enum
19{ 19{
20 IPSTATS_MIB_NUM = 0, 20 IPSTATS_MIB_NUM = 0,
21/* frequently written fields in fast path, kept in same cache line */
21 IPSTATS_MIB_INPKTS, /* InReceives */ 22 IPSTATS_MIB_INPKTS, /* InReceives */
23 IPSTATS_MIB_INOCTETS, /* InOctets */
24 IPSTATS_MIB_INDELIVERS, /* InDelivers */
25 IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
26 IPSTATS_MIB_OUTPKTS, /* OutRequests */
27 IPSTATS_MIB_OUTOCTETS, /* OutOctets */
28/* other fields */
22 IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ 29 IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
23 IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */ 30 IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */
24 IPSTATS_MIB_INNOROUTES, /* InNoRoutes */ 31 IPSTATS_MIB_INNOROUTES, /* InNoRoutes */
@@ -26,9 +33,6 @@ enum
26 IPSTATS_MIB_INUNKNOWNPROTOS, /* InUnknownProtos */ 33 IPSTATS_MIB_INUNKNOWNPROTOS, /* InUnknownProtos */
27 IPSTATS_MIB_INTRUNCATEDPKTS, /* InTruncatedPkts */ 34 IPSTATS_MIB_INTRUNCATEDPKTS, /* InTruncatedPkts */
28 IPSTATS_MIB_INDISCARDS, /* InDiscards */ 35 IPSTATS_MIB_INDISCARDS, /* InDiscards */
29 IPSTATS_MIB_INDELIVERS, /* InDelivers */
30 IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
31 IPSTATS_MIB_OUTPKTS, /* OutRequests */
32 IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */ 36 IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */
33 IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */ 37 IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */
34 IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */ 38 IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */
@@ -42,8 +46,6 @@ enum
42 IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */ 46 IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */
43 IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */ 47 IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */
44 IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */ 48 IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */
45 IPSTATS_MIB_INOCTETS, /* InOctets */
46 IPSTATS_MIB_OUTOCTETS, /* OutOctets */
47 IPSTATS_MIB_INMCASTOCTETS, /* InMcastOctets */ 49 IPSTATS_MIB_INMCASTOCTETS, /* InMcastOctets */
48 IPSTATS_MIB_OUTMCASTOCTETS, /* OutMcastOctets */ 50 IPSTATS_MIB_OUTMCASTOCTETS, /* OutMcastOctets */
49 IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */ 51 IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */
@@ -239,6 +241,10 @@ enum
239 LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */ 241 LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
240 LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */ 242 LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
241 LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */ 243 LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */
244 LINUX_MIB_TCPFASTOPENPASSIVE, /* TCPFastOpenPassive*/
245 LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */
246 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
247 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
242 __LINUX_MIB_MAX 248 __LINUX_MIB_MAX
243}; 249};
244 250
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 1a6b0045b06b..c2b02a5c86ae 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -504,7 +504,9 @@
504#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ 504#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */
505#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ 505#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */
506#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ 506#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */
507#define SSB_CHIPCO_FLASHCTL_ST_RSIG 0x03AB /* Read Electronic Signature */ 507#define SSB_CHIPCO_FLASHCTL_ST_RES 0x03AB /* Read Electronic Signature */
508#define SSB_CHIPCO_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */
509#define SSB_CHIPCO_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */
508 510
509/* Status register bits for ST flashes */ 511/* Status register bits for ST flashes */
510#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ 512#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index b69bdb1e08b6..a1547ea3920d 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -76,7 +76,6 @@
76/* Platfrom data for platform device structure's platform_data field */ 76/* Platfrom data for platform device structure's platform_data field */
77 77
78struct stmmac_mdio_bus_data { 78struct stmmac_mdio_bus_data {
79 int bus_id;
80 int (*phy_reset)(void *priv); 79 int (*phy_reset)(void *priv);
81 unsigned int phy_mask; 80 unsigned int phy_mask;
82 int *irqs; 81 int *irqs;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index eb125a4c30b3..67c789ae719c 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -110,6 +110,7 @@ enum {
110#define TCP_REPAIR_QUEUE 20 110#define TCP_REPAIR_QUEUE 20
111#define TCP_QUEUE_SEQ 21 111#define TCP_QUEUE_SEQ 21
112#define TCP_REPAIR_OPTIONS 22 112#define TCP_REPAIR_OPTIONS 22
113#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
113 114
114struct tcp_repair_opt { 115struct tcp_repair_opt {
115 __u32 opt_code; 116 __u32 opt_code;
@@ -246,6 +247,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
246/* TCP Fast Open */ 247/* TCP Fast Open */
247#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ 248#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */
248#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ 249#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */
250#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */
249 251
250/* TCP Fast Open Cookie as stored in memory */ 252/* TCP Fast Open Cookie as stored in memory */
251struct tcp_fastopen_cookie { 253struct tcp_fastopen_cookie {
@@ -312,9 +314,14 @@ struct tcp_request_sock {
312 /* Only used by TCP MD5 Signature so far. */ 314 /* Only used by TCP MD5 Signature so far. */
313 const struct tcp_request_sock_ops *af_specific; 315 const struct tcp_request_sock_ops *af_specific;
314#endif 316#endif
317 struct sock *listener; /* needed for TFO */
315 u32 rcv_isn; 318 u32 rcv_isn;
316 u32 snt_isn; 319 u32 snt_isn;
317 u32 snt_synack; /* synack sent time */ 320 u32 snt_synack; /* synack sent time */
321 u32 rcv_nxt; /* the ack # by SYNACK. For
322 * FastOpen it's the seq#
323 * after data-in-SYN.
324 */
318}; 325};
319 326
320static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) 327static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -505,14 +512,18 @@ struct tcp_sock {
505 struct tcp_md5sig_info __rcu *md5sig_info; 512 struct tcp_md5sig_info __rcu *md5sig_info;
506#endif 513#endif
507 514
508/* TCP fastopen related information */
509 struct tcp_fastopen_request *fastopen_req;
510
511 /* When the cookie options are generated and exchanged, then this 515 /* When the cookie options are generated and exchanged, then this
512 * object holds a reference to them (cookie_values->kref). Also 516 * object holds a reference to them (cookie_values->kref). Also
513 * contains related tcp_cookie_transactions fields. 517 * contains related tcp_cookie_transactions fields.
514 */ 518 */
515 struct tcp_cookie_values *cookie_values; 519 struct tcp_cookie_values *cookie_values;
520
521/* TCP fastopen related information */
522 struct tcp_fastopen_request *fastopen_req;
523 /* fastopen_rsk points to request_sock that resulted in this big
524 * socket. Used to retransmit SYNACKs etc.
525 */
526 struct request_sock *fastopen_rsk;
516}; 527};
517 528
518enum tsq_flags { 529enum tsq_flags {
@@ -552,6 +563,38 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
552 return (struct tcp_timewait_sock *)sk; 563 return (struct tcp_timewait_sock *)sk;
553} 564}
554 565
566static inline bool tcp_passive_fastopen(const struct sock *sk)
567{
568 return (sk->sk_state == TCP_SYN_RECV &&
569 tcp_sk(sk)->fastopen_rsk != NULL);
570}
571
572static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
573{
574 return foc->len != -1;
575}
576
577extern void tcp_sock_destruct(struct sock *sk);
578
579static inline int fastopen_init_queue(struct sock *sk, int backlog)
580{
581 struct request_sock_queue *queue =
582 &inet_csk(sk)->icsk_accept_queue;
583
584 if (queue->fastopenq == NULL) {
585 queue->fastopenq = kzalloc(
586 sizeof(struct fastopen_queue),
587 sk->sk_allocation);
588 if (queue->fastopenq == NULL)
589 return -ENOMEM;
590
591 sk->sk_destruct = tcp_sock_destruct;
592 spin_lock_init(&queue->fastopenq->lock);
593 }
594 queue->fastopenq->max_qlen = backlog;
595 return 0;
596}
597
555#endif /* __KERNEL__ */ 598#endif /* __KERNEL__ */
556 599
557#endif /* _LINUX_TCP_H */ 600#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tcp_metrics.h b/include/linux/tcp_metrics.h
new file mode 100644
index 000000000000..cb5157b55f32
--- /dev/null
+++ b/include/linux/tcp_metrics.h
@@ -0,0 +1,54 @@
1/* tcp_metrics.h - TCP Metrics Interface */
2
3#ifndef _LINUX_TCP_METRICS_H
4#define _LINUX_TCP_METRICS_H
5
6#include <linux/types.h>
7
8/* NETLINK_GENERIC related info
9 */
10#define TCP_METRICS_GENL_NAME "tcp_metrics"
11#define TCP_METRICS_GENL_VERSION 0x1
12
13enum tcp_metric_index {
14 TCP_METRIC_RTT,
15 TCP_METRIC_RTTVAR,
16 TCP_METRIC_SSTHRESH,
17 TCP_METRIC_CWND,
18 TCP_METRIC_REORDERING,
19
20 /* Always last. */
21 __TCP_METRIC_MAX,
22};
23
24#define TCP_METRIC_MAX (__TCP_METRIC_MAX - 1)
25
26enum {
27 TCP_METRICS_ATTR_UNSPEC,
28 TCP_METRICS_ATTR_ADDR_IPV4, /* u32 */
29 TCP_METRICS_ATTR_ADDR_IPV6, /* binary */
30 TCP_METRICS_ATTR_AGE, /* msecs */
31 TCP_METRICS_ATTR_TW_TSVAL, /* u32, raw, rcv tsval */
32 TCP_METRICS_ATTR_TW_TS_STAMP, /* s32, sec age */
33 TCP_METRICS_ATTR_VALS, /* nested +1, u32 */
34 TCP_METRICS_ATTR_FOPEN_MSS, /* u16 */
35 TCP_METRICS_ATTR_FOPEN_SYN_DROPS, /* u16, count of drops */
36 TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, /* msecs age */
37 TCP_METRICS_ATTR_FOPEN_COOKIE, /* binary */
38
39 __TCP_METRICS_ATTR_MAX,
40};
41
42#define TCP_METRICS_ATTR_MAX (__TCP_METRICS_ATTR_MAX - 1)
43
44enum {
45 TCP_METRICS_CMD_UNSPEC,
46 TCP_METRICS_CMD_GET,
47 TCP_METRICS_CMD_DEL,
48
49 __TCP_METRICS_CMD_MAX,
50};
51
52#define TCP_METRICS_CMD_MAX (__TCP_METRICS_CMD_MAX - 1)
53
54#endif /* _LINUX_TCP_METRICS_H */
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index c98928420100..0b1e3f218a36 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -89,8 +89,8 @@
89 89
90#define TIPC_CMD_GET_REMOTE_MNG 0x4003 /* tx none, rx unsigned */ 90#define TIPC_CMD_GET_REMOTE_MNG 0x4003 /* tx none, rx unsigned */
91#define TIPC_CMD_GET_MAX_PORTS 0x4004 /* tx none, rx unsigned */ 91#define TIPC_CMD_GET_MAX_PORTS 0x4004 /* tx none, rx unsigned */
92#define TIPC_CMD_GET_MAX_PUBL 0x4005 /* tx none, rx unsigned */ 92#define TIPC_CMD_GET_MAX_PUBL 0x4005 /* obsoleted */
93#define TIPC_CMD_GET_MAX_SUBSCR 0x4006 /* tx none, rx unsigned */ 93#define TIPC_CMD_GET_MAX_SUBSCR 0x4006 /* obsoleted */
94#define TIPC_CMD_GET_MAX_ZONES 0x4007 /* obsoleted */ 94#define TIPC_CMD_GET_MAX_ZONES 0x4007 /* obsoleted */
95#define TIPC_CMD_GET_MAX_CLUSTERS 0x4008 /* obsoleted */ 95#define TIPC_CMD_GET_MAX_CLUSTERS 0x4008 /* obsoleted */
96#define TIPC_CMD_GET_MAX_NODES 0x4009 /* obsoleted */ 96#define TIPC_CMD_GET_MAX_NODES 0x4009 /* obsoleted */
@@ -115,8 +115,8 @@
115#define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */ 115#define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */
116#define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */ 116#define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */
117#define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */ 117#define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */
118#define TIPC_CMD_SET_MAX_PUBL 0x8005 /* tx unsigned, rx none */ 118#define TIPC_CMD_SET_MAX_PUBL 0x8005 /* obsoleted */
119#define TIPC_CMD_SET_MAX_SUBSCR 0x8006 /* tx unsigned, rx none */ 119#define TIPC_CMD_SET_MAX_SUBSCR 0x8006 /* obsoleted */
120#define TIPC_CMD_SET_MAX_ZONES 0x8007 /* obsoleted */ 120#define TIPC_CMD_SET_MAX_ZONES 0x8007 /* obsoleted */
121#define TIPC_CMD_SET_MAX_CLUSTERS 0x8008 /* obsoleted */ 121#define TIPC_CMD_SET_MAX_CLUSTERS 0x8008 /* obsoleted */
122#define TIPC_CMD_SET_MAX_NODES 0x8009 /* obsoleted */ 122#define TIPC_CMD_SET_MAX_NODES 0x8009 /* obsoleted */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 089a09d001d1..9e63e76b20e7 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -78,7 +78,7 @@ extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
78 int strict); 78 int strict);
79 79
80extern int ipv6_dev_get_saddr(struct net *net, 80extern int ipv6_dev_get_saddr(struct net *net,
81 struct net_device *dev, 81 const struct net_device *dev,
82 const struct in6_addr *daddr, 82 const struct in6_addr *daddr,
83 unsigned int srcprefs, 83 unsigned int srcprefs,
84 struct in6_addr *saddr); 84 struct in6_addr *saddr);
diff --git a/include/net/arp.h b/include/net/arp.h
index 7f7df93f37cd..b630dae03411 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -3,6 +3,7 @@
3#define _ARP_H 3#define _ARP_H
4 4
5#include <linux/if_arp.h> 5#include <linux/if_arp.h>
6#include <linux/hash.h>
6#include <net/neighbour.h> 7#include <net/neighbour.h>
7 8
8 9
@@ -10,7 +11,7 @@ extern struct neigh_table arp_tbl;
10 11
11static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd) 12static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
12{ 13{
13 u32 val = key ^ dev->ifindex; 14 u32 val = key ^ hash32_ptr(dev);
14 15
15 return val * hash_rnd; 16 return val * hash_rnd;
16} 17}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 565d4bee1e49..ede036977ae8 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/poll.h> 28#include <linux/poll.h>
29#include <net/sock.h> 29#include <net/sock.h>
30#include <linux/seq_file.h>
30 31
31#ifndef AF_BLUETOOTH 32#ifndef AF_BLUETOOTH
32#define AF_BLUETOOTH 31 33#define AF_BLUETOOTH 31
@@ -202,6 +203,10 @@ enum {
202struct bt_sock_list { 203struct bt_sock_list {
203 struct hlist_head head; 204 struct hlist_head head;
204 rwlock_t lock; 205 rwlock_t lock;
206#ifdef CONFIG_PROC_FS
207 struct file_operations fops;
208 int (* custom_seq_show)(struct seq_file *, void *);
209#endif
205}; 210};
206 211
207int bt_sock_register(int proto, const struct net_proto_family *ops); 212int bt_sock_register(int proto, const struct net_proto_family *ops);
@@ -292,6 +297,11 @@ extern void hci_sock_cleanup(void);
292extern int bt_sysfs_init(void); 297extern int bt_sysfs_init(void);
293extern void bt_sysfs_cleanup(void); 298extern void bt_sysfs_cleanup(void);
294 299
300extern int bt_procfs_init(struct module* module, struct net *net, const char *name,
301 struct bt_sock_list* sk_list,
302 int (* seq_show)(struct seq_file *, void *));
303extern void bt_procfs_cleanup(struct net *net, const char *name);
304
295extern struct dentry *bt_debugfs; 305extern struct dentry *bt_debugfs;
296 306
297int l2cap_init(void); 307int l2cap_init(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ccd723e0f783..76b2b6bdcf36 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -62,6 +62,15 @@
62/* First BR/EDR Controller shall have ID = 0 */ 62/* First BR/EDR Controller shall have ID = 0 */
63#define HCI_BREDR_ID 0 63#define HCI_BREDR_ID 0
64 64
65/* AMP controller status */
66#define AMP_CTRL_POWERED_DOWN 0x00
67#define AMP_CTRL_BLUETOOTH_ONLY 0x01
68#define AMP_CTRL_NO_CAPACITY 0x02
69#define AMP_CTRL_LOW_CAPACITY 0x03
70#define AMP_CTRL_MEDIUM_CAPACITY 0x04
71#define AMP_CTRL_HIGH_CAPACITY 0x05
72#define AMP_CTRL_FULL_CAPACITY 0x06
73
65/* HCI device quirks */ 74/* HCI device quirks */
66enum { 75enum {
67 HCI_QUIRK_RESET_ON_CLOSE, 76 HCI_QUIRK_RESET_ON_CLOSE,
@@ -293,8 +302,11 @@ enum {
293 302
294/* ---- HCI Error Codes ---- */ 303/* ---- HCI Error Codes ---- */
295#define HCI_ERROR_AUTH_FAILURE 0x05 304#define HCI_ERROR_AUTH_FAILURE 0x05
305#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
296#define HCI_ERROR_REJ_BAD_ADDR 0x0f 306#define HCI_ERROR_REJ_BAD_ADDR 0x0f
297#define HCI_ERROR_REMOTE_USER_TERM 0x13 307#define HCI_ERROR_REMOTE_USER_TERM 0x13
308#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
309#define HCI_ERROR_REMOTE_POWER_OFF 0x15
298#define HCI_ERROR_LOCAL_HOST_TERM 0x16 310#define HCI_ERROR_LOCAL_HOST_TERM 0x16
299#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 311#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
300 312
@@ -1237,6 +1249,24 @@ struct hci_ev_simple_pair_complete {
1237 bdaddr_t bdaddr; 1249 bdaddr_t bdaddr;
1238} __packed; 1250} __packed;
1239 1251
1252#define HCI_EV_USER_PASSKEY_NOTIFY 0x3b
1253struct hci_ev_user_passkey_notify {
1254 bdaddr_t bdaddr;
1255 __le32 passkey;
1256} __packed;
1257
1258#define HCI_KEYPRESS_STARTED 0
1259#define HCI_KEYPRESS_ENTERED 1
1260#define HCI_KEYPRESS_ERASED 2
1261#define HCI_KEYPRESS_CLEARED 3
1262#define HCI_KEYPRESS_COMPLETED 4
1263
1264#define HCI_EV_KEYPRESS_NOTIFY 0x3c
1265struct hci_ev_keypress_notify {
1266 bdaddr_t bdaddr;
1267 __u8 type;
1268} __packed;
1269
1240#define HCI_EV_REMOTE_HOST_FEATURES 0x3d 1270#define HCI_EV_REMOTE_HOST_FEATURES 0x3d
1241struct hci_ev_remote_host_features { 1271struct hci_ev_remote_host_features {
1242 bdaddr_t bdaddr; 1272 bdaddr_t bdaddr;
@@ -1295,6 +1325,8 @@ struct hci_ev_num_comp_blocks {
1295} __packed; 1325} __packed;
1296 1326
1297/* Low energy meta events */ 1327/* Low energy meta events */
1328#define LE_CONN_ROLE_MASTER 0x00
1329
1298#define HCI_EV_LE_CONN_COMPLETE 0x01 1330#define HCI_EV_LE_CONN_COMPLETE 0x01
1299struct hci_ev_le_conn_complete { 1331struct hci_ev_le_conn_complete {
1300 __u8 status; 1332 __u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 475b8c04ba52..e7d454609881 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -115,12 +115,6 @@ struct oob_data {
115 u8 randomizer[16]; 115 u8 randomizer[16];
116}; 116};
117 117
118struct adv_entry {
119 struct list_head list;
120 bdaddr_t bdaddr;
121 u8 bdaddr_type;
122};
123
124struct le_scan_params { 118struct le_scan_params {
125 u8 type; 119 u8 type;
126 u16 interval; 120 u16 interval;
@@ -309,6 +303,8 @@ struct hci_conn {
309 __u8 pin_length; 303 __u8 pin_length;
310 __u8 enc_key_size; 304 __u8 enc_key_size;
311 __u8 io_capability; 305 __u8 io_capability;
306 __u32 passkey_notify;
307 __u8 passkey_entered;
312 __u16 disc_timeout; 308 __u16 disc_timeout;
313 unsigned long flags; 309 unsigned long flags;
314 310
@@ -356,16 +352,16 @@ extern rwlock_t hci_cb_list_lock;
356 352
357/* ----- HCI interface to upper protocols ----- */ 353/* ----- HCI interface to upper protocols ----- */
358extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 354extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
359extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status); 355extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
360extern int l2cap_disconn_ind(struct hci_conn *hcon); 356extern int l2cap_disconn_ind(struct hci_conn *hcon);
361extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); 357extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
362extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); 358extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
363extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, 359extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
364 u16 flags); 360 u16 flags);
365 361
366extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 362extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
367extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); 363extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
368extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); 364extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
369extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); 365extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
370 366
371/* ----- Inquiry cache ----- */ 367/* ----- Inquiry cache ----- */
@@ -434,15 +430,6 @@ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
434 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 430 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
435} 431}
436 432
437static inline void hci_conn_hash_init(struct hci_dev *hdev)
438{
439 struct hci_conn_hash *h = &hdev->conn_hash;
440 INIT_LIST_HEAD(&h->list);
441 h->acl_num = 0;
442 h->sco_num = 0;
443 h->le_num = 0;
444}
445
446static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) 433static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
447{ 434{
448 struct hci_conn_hash *h = &hdev->conn_hash; 435 struct hci_conn_hash *h = &hdev->conn_hash;
@@ -557,9 +544,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
557 return NULL; 544 return NULL;
558} 545}
559 546
560void hci_acl_connect(struct hci_conn *conn);
561void hci_acl_disconn(struct hci_conn *conn, __u8 reason); 547void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
562void hci_add_sco(struct hci_conn *conn, __u16 handle);
563void hci_setup_sync(struct hci_conn *conn, __u16 handle); 548void hci_setup_sync(struct hci_conn *conn, __u16 handle);
564void hci_sco_setup(struct hci_conn *conn, __u8 status); 549void hci_sco_setup(struct hci_conn *conn, __u8 status);
565 550
@@ -569,7 +554,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev);
569void hci_conn_check_pending(struct hci_dev *hdev); 554void hci_conn_check_pending(struct hci_dev *hdev);
570 555
571struct hci_chan *hci_chan_create(struct hci_conn *conn); 556struct hci_chan *hci_chan_create(struct hci_conn *conn);
572int hci_chan_del(struct hci_chan *chan); 557void hci_chan_del(struct hci_chan *chan);
573void hci_chan_list_flush(struct hci_conn *conn); 558void hci_chan_list_flush(struct hci_conn *conn);
574 559
575struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, 560struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -587,8 +572,7 @@ void hci_conn_put_device(struct hci_conn *conn);
587 572
588static inline void hci_conn_hold(struct hci_conn *conn) 573static inline void hci_conn_hold(struct hci_conn *conn)
589{ 574{
590 BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt), 575 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
591 atomic_read(&conn->refcnt) + 1);
592 576
593 atomic_inc(&conn->refcnt); 577 atomic_inc(&conn->refcnt);
594 cancel_delayed_work(&conn->disc_work); 578 cancel_delayed_work(&conn->disc_work);
@@ -596,8 +580,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
596 580
597static inline void hci_conn_put(struct hci_conn *conn) 581static inline void hci_conn_put(struct hci_conn *conn)
598{ 582{
599 BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt), 583 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
600 atomic_read(&conn->refcnt) - 1);
601 584
602 if (atomic_dec_and_test(&conn->refcnt)) { 585 if (atomic_dec_and_test(&conn->refcnt)) {
603 unsigned long timeo; 586 unsigned long timeo;
@@ -622,11 +605,17 @@ static inline void hci_conn_put(struct hci_conn *conn)
622/* ----- HCI Devices ----- */ 605/* ----- HCI Devices ----- */
623static inline void hci_dev_put(struct hci_dev *d) 606static inline void hci_dev_put(struct hci_dev *d)
624{ 607{
608 BT_DBG("%s orig refcnt %d", d->name,
609 atomic_read(&d->dev.kobj.kref.refcount));
610
625 put_device(&d->dev); 611 put_device(&d->dev);
626} 612}
627 613
628static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) 614static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
629{ 615{
616 BT_DBG("%s orig refcnt %d", d->name,
617 atomic_read(&d->dev.kobj.kref.refcount));
618
630 get_device(&d->dev); 619 get_device(&d->dev);
631 return d; 620 return d;
632} 621}
@@ -1012,7 +1001,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1012 u8 addr_type, u32 flags, u8 *name, u8 name_len, 1001 u8 addr_type, u32 flags, u8 *name, u8 name_len,
1013 u8 *dev_class); 1002 u8 *dev_class);
1014int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, 1003int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
1015 u8 link_type, u8 addr_type); 1004 u8 link_type, u8 addr_type, u8 reason);
1016int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, 1005int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
1017 u8 link_type, u8 addr_type, u8 status); 1006 u8 link_type, u8 addr_type, u8 status);
1018int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1007int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -1035,6 +1024,9 @@ int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1035 u8 link_type, u8 addr_type, u8 status); 1024 u8 link_type, u8 addr_type, u8 status);
1036int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1025int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1037 u8 link_type, u8 addr_type, u8 status); 1026 u8 link_type, u8 addr_type, u8 status);
1027int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1028 u8 link_type, u8 addr_type, u32 passkey,
1029 u8 entered);
1038int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1030int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1039 u8 addr_type, u8 status); 1031 u8 addr_type, u8 status);
1040int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); 1032int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
@@ -1056,7 +1048,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1056int mgmt_interleaved_discovery(struct hci_dev *hdev); 1048int mgmt_interleaved_discovery(struct hci_dev *hdev);
1057int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1049int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1058int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1050int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1059 1051bool mgmt_valid_hdev(struct hci_dev *hdev);
1060int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent); 1052int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
1061 1053
1062/* HCI info for socket */ 1054/* HCI info for socket */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index a7679f8913d2..7ed8e356425a 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -433,11 +433,10 @@ struct l2cap_chan {
433 struct sock *sk; 433 struct sock *sk;
434 434
435 struct l2cap_conn *conn; 435 struct l2cap_conn *conn;
436 struct kref kref;
436 437
437 __u8 state; 438 __u8 state;
438 439
439 atomic_t refcnt;
440
441 __le16 psm; 440 __le16 psm;
442 __u16 dcid; 441 __u16 dcid;
443 __u16 scid; 442 __u16 scid;
@@ -671,20 +670,8 @@ enum {
671 L2CAP_EV_RECV_FRAME, 670 L2CAP_EV_RECV_FRAME,
672}; 671};
673 672
674static inline void l2cap_chan_hold(struct l2cap_chan *c) 673void l2cap_chan_hold(struct l2cap_chan *c);
675{ 674void l2cap_chan_put(struct l2cap_chan *c);
676 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
677
678 atomic_inc(&c->refcnt);
679}
680
681static inline void l2cap_chan_put(struct l2cap_chan *c)
682{
683 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
684
685 if (atomic_dec_and_test(&c->refcnt))
686 kfree(c);
687}
688 675
689static inline void l2cap_chan_lock(struct l2cap_chan *chan) 676static inline void l2cap_chan_lock(struct l2cap_chan *chan)
690{ 677{
@@ -771,7 +758,6 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
771 758
772struct l2cap_chan *l2cap_chan_create(void); 759struct l2cap_chan *l2cap_chan_create(void);
773void l2cap_chan_close(struct l2cap_chan *chan, int reason); 760void l2cap_chan_close(struct l2cap_chan *chan, int reason);
774void l2cap_chan_destroy(struct l2cap_chan *chan);
775int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 761int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
776 bdaddr_t *dst, u8 dst_type); 762 bdaddr_t *dst, u8 dst_type);
777int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 763int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 4348ee8bda69..22980a7c3873 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -405,7 +405,16 @@ struct mgmt_ev_device_connected {
405 __u8 eir[0]; 405 __u8 eir[0];
406} __packed; 406} __packed;
407 407
408#define MGMT_DEV_DISCONN_UNKNOWN 0x00
409#define MGMT_DEV_DISCONN_TIMEOUT 0x01
410#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
411#define MGMT_DEV_DISCONN_REMOTE 0x03
412
408#define MGMT_EV_DEVICE_DISCONNECTED 0x000C 413#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
414struct mgmt_ev_device_disconnected {
415 struct mgmt_addr_info addr;
416 __u8 reason;
417} __packed;
409 418
410#define MGMT_EV_CONNECT_FAILED 0x000D 419#define MGMT_EV_CONNECT_FAILED 0x000D
411struct mgmt_ev_connect_failed { 420struct mgmt_ev_connect_failed {
@@ -469,3 +478,10 @@ struct mgmt_ev_device_unblocked {
469struct mgmt_ev_device_unpaired { 478struct mgmt_ev_device_unpaired {
470 struct mgmt_addr_info addr; 479 struct mgmt_addr_info addr;
471} __packed; 480} __packed;
481
482#define MGMT_EV_PASSKEY_NOTIFY 0x0017
483struct mgmt_ev_passkey_notify {
484 struct mgmt_addr_info addr;
485 __le32 passkey;
486 __u8 entered;
487} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 8b27927b2a55..f8ba07f3e5fa 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -108,8 +108,8 @@ struct smp_cmd_security_req {
108#define SMP_CONFIRM_FAILED 0x04 108#define SMP_CONFIRM_FAILED 0x04
109#define SMP_PAIRING_NOTSUPP 0x05 109#define SMP_PAIRING_NOTSUPP 0x05
110#define SMP_ENC_KEY_SIZE 0x06 110#define SMP_ENC_KEY_SIZE 0x06
111#define SMP_CMD_NOTSUPP 0x07 111#define SMP_CMD_NOTSUPP 0x07
112#define SMP_UNSPECIFIED 0x08 112#define SMP_UNSPECIFIED 0x08
113#define SMP_REPEATED_ATTEMPTS 0x09 113#define SMP_REPEATED_ATTEMPTS 0x09
114 114
115#define SMP_MIN_ENC_KEY_SIZE 7 115#define SMP_MIN_ENC_KEY_SIZE 7
@@ -123,8 +123,8 @@ struct smp_chan {
123 struct l2cap_conn *conn; 123 struct l2cap_conn *conn;
124 u8 preq[7]; /* SMP Pairing Request */ 124 u8 preq[7]; /* SMP Pairing Request */
125 u8 prsp[7]; /* SMP Pairing Response */ 125 u8 prsp[7]; /* SMP Pairing Response */
126 u8 prnd[16]; /* SMP Pairing Random (local) */ 126 u8 prnd[16]; /* SMP Pairing Random (local) */
127 u8 rrnd[16]; /* SMP Pairing Random (remote) */ 127 u8 rrnd[16]; /* SMP Pairing Random (remote) */
128 u8 pcnf[16]; /* SMP Pairing Confirm */ 128 u8 pcnf[16]; /* SMP Pairing Confirm */
129 u8 tk[16]; /* SMP Temporary Key */ 129 u8 tk[16]; /* SMP Temporary Key */
130 u8 enc_key_size; 130 u8 enc_key_size;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3d254e10ff30..1b4989082244 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -245,6 +245,7 @@ struct ieee80211_sta_vht_cap {
245 * rates" IE, i.e. CCK rates first, then OFDM. 245 * rates" IE, i.e. CCK rates first, then OFDM.
246 * @n_bitrates: Number of bitrates in @bitrates 246 * @n_bitrates: Number of bitrates in @bitrates
247 * @ht_cap: HT capabilities in this band 247 * @ht_cap: HT capabilities in this band
248 * @vht_cap: VHT capabilities in this band
248 */ 249 */
249struct ieee80211_supported_band { 250struct ieee80211_supported_band {
250 struct ieee80211_channel *channels; 251 struct ieee80211_channel *channels;
@@ -1439,7 +1440,8 @@ struct cfg80211_gtk_rekey_data {
1439 * @add_virtual_intf: create a new virtual interface with the given name, 1440 * @add_virtual_intf: create a new virtual interface with the given name,
1440 * must set the struct wireless_dev's iftype. Beware: You must create 1441 * must set the struct wireless_dev's iftype. Beware: You must create
1441 * the new netdev in the wiphy's network namespace! Returns the struct 1442 * the new netdev in the wiphy's network namespace! Returns the struct
1442 * wireless_dev, or an ERR_PTR. 1443 * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
1444 * also set the address member in the wdev.
1443 * 1445 *
1444 * @del_virtual_intf: remove the virtual interface 1446 * @del_virtual_intf: remove the virtual interface
1445 * 1447 *
@@ -1578,9 +1580,7 @@ struct cfg80211_gtk_rekey_data {
1578 * @set_cqm_txe_config: Configure connection quality monitor TX error 1580 * @set_cqm_txe_config: Configure connection quality monitor TX error
1579 * thresholds. 1581 * thresholds.
1580 * @sched_scan_start: Tell the driver to start a scheduled scan. 1582 * @sched_scan_start: Tell the driver to start a scheduled scan.
1581 * @sched_scan_stop: Tell the driver to stop an ongoing scheduled 1583 * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan.
1582 * scan. The driver_initiated flag specifies whether the driver
1583 * itself has informed that the scan has stopped.
1584 * 1584 *
1585 * @mgmt_frame_register: Notify driver that a management frame type was 1585 * @mgmt_frame_register: Notify driver that a management frame type was
1586 * registered. Note that this callback may not sleep, and cannot run 1586 * registered. Note that this callback may not sleep, and cannot run
@@ -1618,6 +1618,9 @@ struct cfg80211_gtk_rekey_data {
1618 * @get_channel: Get the current operating channel for the virtual interface. 1618 * @get_channel: Get the current operating channel for the virtual interface.
1619 * For monitor interfaces, it should return %NULL unless there's a single 1619 * For monitor interfaces, it should return %NULL unless there's a single
1620 * current monitoring channel. 1620 * current monitoring channel.
1621 *
1622 * @start_p2p_device: Start the given P2P device.
1623 * @stop_p2p_device: Stop the given P2P device.
1621 */ 1624 */
1622struct cfg80211_ops { 1625struct cfg80211_ops {
1623 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 1626 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1625,7 +1628,7 @@ struct cfg80211_ops {
1625 void (*set_wakeup)(struct wiphy *wiphy, bool enabled); 1628 void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
1626 1629
1627 struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy, 1630 struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
1628 char *name, 1631 const char *name,
1629 enum nl80211_iftype type, 1632 enum nl80211_iftype type,
1630 u32 *flags, 1633 u32 *flags,
1631 struct vif_params *params); 1634 struct vif_params *params);
@@ -1834,6 +1837,11 @@ struct cfg80211_ops {
1834 (*get_channel)(struct wiphy *wiphy, 1837 (*get_channel)(struct wiphy *wiphy,
1835 struct wireless_dev *wdev, 1838 struct wireless_dev *wdev,
1836 enum nl80211_channel_type *type); 1839 enum nl80211_channel_type *type);
1840
1841 int (*start_p2p_device)(struct wiphy *wiphy,
1842 struct wireless_dev *wdev);
1843 void (*stop_p2p_device)(struct wiphy *wiphy,
1844 struct wireless_dev *wdev);
1837}; 1845};
1838 1846
1839/* 1847/*
@@ -2397,6 +2405,8 @@ struct cfg80211_cached_keys;
2397 * @cleanup_work: work struct used for cleanup that can't be done directly 2405 * @cleanup_work: work struct used for cleanup that can't be done directly
2398 * @beacon_interval: beacon interval used on this device for transmitting 2406 * @beacon_interval: beacon interval used on this device for transmitting
2399 * beacons, 0 when not valid 2407 * beacons, 0 when not valid
2408 * @address: The address for this device, valid only if @netdev is %NULL
2409 * @p2p_started: true if this is a P2P Device that has been started
2400 */ 2410 */
2401struct wireless_dev { 2411struct wireless_dev {
2402 struct wiphy *wiphy; 2412 struct wiphy *wiphy;
@@ -2415,7 +2425,9 @@ struct wireless_dev {
2415 2425
2416 struct work_struct cleanup_work; 2426 struct work_struct cleanup_work;
2417 2427
2418 bool use_4addr; 2428 bool use_4addr, p2p_started;
2429
2430 u8 address[ETH_ALEN] __aligned(sizeof(u16));
2419 2431
2420 /* currently used for IBSS and SME - might be rearranged later */ 2432 /* currently used for IBSS and SME - might be rearranged later */
2421 u8 ssid[IEEE80211_MAX_SSID_LEN]; 2433 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2445,7 +2457,7 @@ struct wireless_dev {
2445 2457
2446 int beacon_interval; 2458 int beacon_interval;
2447 2459
2448 u32 ap_unexpected_nlpid; 2460 u32 ap_unexpected_nlportid;
2449 2461
2450#ifdef CONFIG_CFG80211_WEXT 2462#ifdef CONFIG_CFG80211_WEXT
2451 /* wext data */ 2463 /* wext data */
@@ -2463,6 +2475,13 @@ struct wireless_dev {
2463#endif 2475#endif
2464}; 2476};
2465 2477
2478static inline u8 *wdev_address(struct wireless_dev *wdev)
2479{
2480 if (wdev->netdev)
2481 return wdev->netdev->dev_addr;
2482 return wdev->address;
2483}
2484
2466/** 2485/**
2467 * wdev_priv - return wiphy priv from wireless_dev 2486 * wdev_priv - return wiphy priv from wireless_dev
2468 * 2487 *
@@ -3342,6 +3361,25 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
3342void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp); 3361void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
3343 3362
3344/** 3363/**
3364 * cfg80211_conn_failed - connection request failed notification
3365 *
3366 * @dev: the netdev
3367 * @mac_addr: the station's address
3368 * @reason: the reason for connection failure
3369 * @gfp: allocation flags
3370 *
3371 * Whenever a station tries to connect to an AP and if the station
3372 * could not connect to the AP as the AP has rejected the connection
3373 * for some reasons, this function is called.
3374 *
3375 * The reason for connection failure can be any of the value from
3376 * nl80211_connect_failed_reason enum
3377 */
3378void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
3379 enum nl80211_connect_failed_reason reason,
3380 gfp_t gfp);
3381
3382/**
3345 * cfg80211_rx_mgmt - notification of received, unprocessed management frame 3383 * cfg80211_rx_mgmt - notification of received, unprocessed management frame
3346 * @wdev: wireless device receiving the frame 3384 * @wdev: wireless device receiving the frame
3347 * @freq: Frequency on which the frame was received in MHz 3385 * @freq: Frequency on which the frame was received in MHz
@@ -3530,6 +3568,22 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
3530 */ 3568 */
3531u32 cfg80211_calculate_bitrate(struct rate_info *rate); 3569u32 cfg80211_calculate_bitrate(struct rate_info *rate);
3532 3570
3571/**
3572 * cfg80211_unregister_wdev - remove the given wdev
3573 * @wdev: struct wireless_dev to remove
3574 *
3575 * Call this function only for wdevs that have no netdev assigned,
3576 * e.g. P2P Devices. It removes the device from the list so that
3577 * it can no longer be used. It is necessary to call this function
3578 * even when cfg80211 requests the removal of the interface by
3579 * calling the del_virtual_intf() callback. The function must also
3580 * be called when the driver wishes to unregister the wdev, e.g.
3581 * when the device is unbound from the driver.
3582 *
3583 * Requires the RTNL to be held.
3584 */
3585void cfg80211_unregister_wdev(struct wireless_dev *wdev);
3586
3533/* Logging, debugging and troubleshooting/diagnostic helpers. */ 3587/* Logging, debugging and troubleshooting/diagnostic helpers. */
3534 3588
3535/* wiphy_printk helpers, similar to dev_printk */ 3589/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index ba55d8b8c87c..600d1d705bb8 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -109,6 +109,9 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
109struct sk_buff; 109struct sk_buff;
110extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, 110extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
111 __be32 from, __be32 to, int pseudohdr); 111 __be32 from, __be32 to, int pseudohdr);
112extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
113 const __be32 *from, const __be32 *to,
114 int pseudohdr);
112 115
113static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, 116static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
114 __be16 from, __be16 to, 117 __be16 from, __be16 to,
diff --git a/include/net/dst.h b/include/net/dst.h
index 621e3513ef5e..9a7881066fb3 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -396,11 +396,15 @@ static inline void dst_confirm(struct dst_entry *dst)
396static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, 396static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
397 struct sk_buff *skb) 397 struct sk_buff *skb)
398{ 398{
399 struct hh_cache *hh; 399 const struct hh_cache *hh;
400
401 if (dst->pending_confirm) {
402 unsigned long now = jiffies;
400 403
401 if (unlikely(dst->pending_confirm)) {
402 n->confirmed = jiffies;
403 dst->pending_confirm = 0; 404 dst->pending_confirm = 0;
405 /* avoid dirtying neighbour */
406 if (n->confirmed != now)
407 n->confirmed = now;
404 } 408 }
405 409
406 hh = &n->hh; 410 hh = &n->hh;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 48905cd3884c..bdfbe68c1c3b 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -65,7 +65,7 @@ struct genl_family {
65/** 65/**
66 * struct genl_info - receiving information 66 * struct genl_info - receiving information
67 * @snd_seq: sending sequence number 67 * @snd_seq: sending sequence number
68 * @snd_pid: netlink pid of sender 68 * @snd_portid: netlink portid of sender
69 * @nlhdr: netlink message header 69 * @nlhdr: netlink message header
70 * @genlhdr: generic netlink message header 70 * @genlhdr: generic netlink message header
71 * @userhdr: user specific header 71 * @userhdr: user specific header
@@ -75,7 +75,7 @@ struct genl_family {
75 */ 75 */
76struct genl_info { 76struct genl_info {
77 u32 snd_seq; 77 u32 snd_seq;
78 u32 snd_pid; 78 u32 snd_portid;
79 struct nlmsghdr * nlhdr; 79 struct nlmsghdr * nlhdr;
80 struct genlmsghdr * genlhdr; 80 struct genlmsghdr * genlhdr;
81 void * userhdr; 81 void * userhdr;
@@ -130,10 +130,10 @@ extern int genl_register_mc_group(struct genl_family *family,
130 struct genl_multicast_group *grp); 130 struct genl_multicast_group *grp);
131extern void genl_unregister_mc_group(struct genl_family *family, 131extern void genl_unregister_mc_group(struct genl_family *family,
132 struct genl_multicast_group *grp); 132 struct genl_multicast_group *grp);
133extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, 133extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
134 u32 group, struct nlmsghdr *nlh, gfp_t flags); 134 u32 group, struct nlmsghdr *nlh, gfp_t flags);
135 135
136void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, 136void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
137 struct genl_family *family, int flags, u8 cmd); 137 struct genl_family *family, int flags, u8 cmd);
138 138
139/** 139/**
@@ -183,7 +183,7 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
183 struct genl_family *family, 183 struct genl_family *family,
184 int flags, u8 cmd) 184 int flags, u8 cmd)
185{ 185{
186 return genlmsg_put(skb, info->snd_pid, info->snd_seq, family, 186 return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
187 flags, cmd); 187 flags, cmd);
188} 188}
189 189
@@ -212,49 +212,49 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
212 * genlmsg_multicast_netns - multicast a netlink message to a specific netns 212 * genlmsg_multicast_netns - multicast a netlink message to a specific netns
213 * @net: the net namespace 213 * @net: the net namespace
214 * @skb: netlink message as socket buffer 214 * @skb: netlink message as socket buffer
215 * @pid: own netlink pid to avoid sending to yourself 215 * @portid: own netlink portid to avoid sending to yourself
216 * @group: multicast group id 216 * @group: multicast group id
217 * @flags: allocation flags 217 * @flags: allocation flags
218 */ 218 */
219static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb, 219static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb,
220 u32 pid, unsigned int group, gfp_t flags) 220 u32 portid, unsigned int group, gfp_t flags)
221{ 221{
222 return nlmsg_multicast(net->genl_sock, skb, pid, group, flags); 222 return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
223} 223}
224 224
225/** 225/**
226 * genlmsg_multicast - multicast a netlink message to the default netns 226 * genlmsg_multicast - multicast a netlink message to the default netns
227 * @skb: netlink message as socket buffer 227 * @skb: netlink message as socket buffer
228 * @pid: own netlink pid to avoid sending to yourself 228 * @portid: own netlink portid to avoid sending to yourself
229 * @group: multicast group id 229 * @group: multicast group id
230 * @flags: allocation flags 230 * @flags: allocation flags
231 */ 231 */
232static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid, 232static inline int genlmsg_multicast(struct sk_buff *skb, u32 portid,
233 unsigned int group, gfp_t flags) 233 unsigned int group, gfp_t flags)
234{ 234{
235 return genlmsg_multicast_netns(&init_net, skb, pid, group, flags); 235 return genlmsg_multicast_netns(&init_net, skb, portid, group, flags);
236} 236}
237 237
238/** 238/**
239 * genlmsg_multicast_allns - multicast a netlink message to all net namespaces 239 * genlmsg_multicast_allns - multicast a netlink message to all net namespaces
240 * @skb: netlink message as socket buffer 240 * @skb: netlink message as socket buffer
241 * @pid: own netlink pid to avoid sending to yourself 241 * @portid: own netlink portid to avoid sending to yourself
242 * @group: multicast group id 242 * @group: multicast group id
243 * @flags: allocation flags 243 * @flags: allocation flags
244 * 244 *
245 * This function must hold the RTNL or rcu_read_lock(). 245 * This function must hold the RTNL or rcu_read_lock().
246 */ 246 */
247int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, 247int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid,
248 unsigned int group, gfp_t flags); 248 unsigned int group, gfp_t flags);
249 249
250/** 250/**
251 * genlmsg_unicast - unicast a netlink message 251 * genlmsg_unicast - unicast a netlink message
252 * @skb: netlink message as socket buffer 252 * @skb: netlink message as socket buffer
253 * @pid: netlink pid of the destination socket 253 * @portid: netlink portid of the destination socket
254 */ 254 */
255static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid) 255static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
256{ 256{
257 return nlmsg_unicast(net->genl_sock, skb, pid); 257 return nlmsg_unicast(net->genl_sock, skb, portid);
258} 258}
259 259
260/** 260/**
@@ -264,7 +264,7 @@ static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
264 */ 264 */
265static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info) 265static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
266{ 266{
267 return genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); 267 return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
268} 268}
269 269
270/** 270/**
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
new file mode 100644
index 000000000000..4fd8a4b4b7ee
--- /dev/null
+++ b/include/net/gro_cells.h
@@ -0,0 +1,103 @@
1#ifndef _NET_GRO_CELLS_H
2#define _NET_GRO_CELLS_H
3
4#include <linux/skbuff.h>
5#include <linux/slab.h>
6#include <linux/netdevice.h>
7
8struct gro_cell {
9 struct sk_buff_head napi_skbs;
10 struct napi_struct napi;
11} ____cacheline_aligned_in_smp;
12
13struct gro_cells {
14 unsigned int gro_cells_mask;
15 struct gro_cell *cells;
16};
17
18static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
19{
20 unsigned long flags;
21 struct gro_cell *cell = gcells->cells;
22 struct net_device *dev = skb->dev;
23
24 if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
25 netif_rx(skb);
26 return;
27 }
28
29 if (skb_rx_queue_recorded(skb))
30 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
31
32 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
33 atomic_long_inc(&dev->rx_dropped);
34 kfree_skb(skb);
35 return;
36 }
37
38 spin_lock_irqsave(&cell->napi_skbs.lock, flags);
39
40 __skb_queue_tail(&cell->napi_skbs, skb);
41 if (skb_queue_len(&cell->napi_skbs) == 1)
42 napi_schedule(&cell->napi);
43
44 spin_unlock_irqrestore(&cell->napi_skbs.lock, flags);
45}
46
47static inline int gro_cell_poll(struct napi_struct *napi, int budget)
48{
49 struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
50 struct sk_buff *skb;
51 int work_done = 0;
52
53 while (work_done < budget) {
54 skb = skb_dequeue(&cell->napi_skbs);
55 if (!skb)
56 break;
57
58 napi_gro_receive(napi, skb);
59 work_done++;
60 }
61
62 if (work_done < budget)
63 napi_complete(napi);
64 return work_done;
65}
66
67static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
68{
69 int i;
70
71 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
72 gcells->cells = kcalloc(sizeof(struct gro_cell),
73 gcells->gro_cells_mask + 1,
74 GFP_KERNEL);
75 if (!gcells->cells)
76 return -ENOMEM;
77
78 for (i = 0; i <= gcells->gro_cells_mask; i++) {
79 struct gro_cell *cell = gcells->cells + i;
80
81 skb_queue_head_init(&cell->napi_skbs);
82 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
83 napi_enable(&cell->napi);
84 }
85 return 0;
86}
87
88static inline void gro_cells_destroy(struct gro_cells *gcells)
89{
90 struct gro_cell *cell = gcells->cells;
91 int i;
92
93 if (!cell)
94 return;
95 for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
96 netif_napi_del(&cell->napi);
97 skb_queue_purge(&cell->napi_skbs);
98 }
99 kfree(gcells->cells);
100 gcells->cells = NULL;
101}
102
103#endif
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 71392545d0a1..7f0df133d119 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -183,6 +183,9 @@ struct ieee80211_radiotap_header {
183 * Contains a bitmap of known fields/flags, the flags, and 183 * Contains a bitmap of known fields/flags, the flags, and
184 * the MCS index. 184 * the MCS index.
185 * 185 *
186 * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless
187 *
188 * Contains the AMPDU information for the subframe.
186 */ 189 */
187enum ieee80211_radiotap_type { 190enum ieee80211_radiotap_type {
188 IEEE80211_RADIOTAP_TSFT = 0, 191 IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@ enum ieee80211_radiotap_type {
205 IEEE80211_RADIOTAP_DATA_RETRIES = 17, 208 IEEE80211_RADIOTAP_DATA_RETRIES = 17,
206 209
207 IEEE80211_RADIOTAP_MCS = 19, 210 IEEE80211_RADIOTAP_MCS = 19,
211 IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
208 212
209 /* valid in every it_present bitmap, even vendor namespaces */ 213 /* valid in every it_present bitmap, even vendor namespaces */
210 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, 214 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@ enum ieee80211_radiotap_type {
270#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 274#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
271#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 275#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
272 276
277/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
278#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001
279#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002
280#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004
281#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008
282#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010
283#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020
273 284
274/* helpers */ 285/* helpers */
275static inline int ieee80211_get_radiotap_len(unsigned char *data) 286static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 2fa14691869c..aab73757bc4d 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -15,6 +15,8 @@ enum {
15 INET_ECN_MASK = 3, 15 INET_ECN_MASK = 3,
16}; 16};
17 17
18extern int sysctl_tunnel_ecn_log;
19
18static inline int INET_ECN_is_ce(__u8 dsfield) 20static inline int INET_ECN_is_ce(__u8 dsfield)
19{ 21{
20 return (dsfield & INET_ECN_MASK) == INET_ECN_CE; 22 return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
@@ -145,4 +147,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
145 return 0; 147 return 0;
146} 148}
147 149
150/*
151 * RFC 6080 4.2
152 * To decapsulate the inner header at the tunnel egress, a compliant
153 * tunnel egress MUST set the outgoing ECN field to the codepoint at the
154 * intersection of the appropriate arriving inner header (row) and outer
155 * header (column) in Figure 4
156 *
157 * +---------+------------------------------------------------+
158 * |Arriving | Arriving Outer Header |
159 * | Inner +---------+------------+------------+------------+
160 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
161 * +---------+---------+------------+------------+------------+
162 * | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
163 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE |
164 * | ECT(1) | ECT(1) | ECT(1) (!) | ECT(1) | CE |
165 * | CE | CE | CE | CE(!!!)| CE |
166 * +---------+---------+------------+------------+------------+
167 *
168 * Figure 4: New IP in IP Decapsulation Behaviour
169 *
170 * returns 0 on success
171 * 1 if something is broken and should be logged (!!! above)
172 * 2 if packet should be dropped
173 */
174static inline int INET_ECN_decapsulate(struct sk_buff *skb,
175 __u8 outer, __u8 inner)
176{
177 if (INET_ECN_is_not_ect(inner)) {
178 switch (outer & INET_ECN_MASK) {
179 case INET_ECN_NOT_ECT:
180 return 0;
181 case INET_ECN_ECT_0:
182 case INET_ECN_ECT_1:
183 return 1;
184 case INET_ECN_CE:
185 return 2;
186 }
187 }
188
189 if (INET_ECN_is_ce(outer))
190 INET_ECN_set_ce(skb);
191
192 return 0;
193}
194
195static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
196 struct sk_buff *skb)
197{
198 __u8 inner;
199
200 if (skb->protocol == htons(ETH_P_IP))
201 inner = ip_hdr(skb)->tos;
202 else if (skb->protocol == htons(ETH_P_IPV6))
203 inner = ipv6_get_dsfield(ipv6_hdr(skb));
204 else
205 return 0;
206
207 return INET_ECN_decapsulate(skb, oiph->tos, inner);
208}
209
210static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
211 struct sk_buff *skb)
212{
213 __u8 inner;
214
215 if (skb->protocol == htons(ETH_P_IP))
216 inner = ip_hdr(skb)->tos;
217 else if (skb->protocol == htons(ETH_P_IPV6))
218 inner = ipv6_get_dsfield(ipv6_hdr(skb));
219 else
220 return 0;
221
222 return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
223}
148#endif 224#endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 2431cf83aeca..32786a044718 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -29,6 +29,8 @@ struct inet_frag_queue {
29#define INET_FRAG_COMPLETE 4 29#define INET_FRAG_COMPLETE 4
30#define INET_FRAG_FIRST_IN 2 30#define INET_FRAG_FIRST_IN 2
31#define INET_FRAG_LAST_IN 1 31#define INET_FRAG_LAST_IN 1
32
33 u16 max_size;
32}; 34};
33 35
34#define INETFRAGS_HASHSZ 64 36#define INETFRAGS_HASHSZ 64
@@ -59,7 +61,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
59void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); 61void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
60void inet_frag_destroy(struct inet_frag_queue *q, 62void inet_frag_destroy(struct inet_frag_queue *q,
61 struct inet_frags *f, int *work); 63 struct inet_frags *f, int *work);
62int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f); 64int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
63struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 65struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
64 struct inet_frags *f, void *key, unsigned int hash) 66 struct inet_frags *f, void *key, unsigned int hash)
65 __releases(&f->lock); 67 __releases(&f->lock);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 613cfa401672..256c1ed2d69a 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -101,10 +101,8 @@ struct inet_cork {
101 __be32 addr; 101 __be32 addr;
102 struct ip_options *opt; 102 struct ip_options *opt;
103 unsigned int fragsize; 103 unsigned int fragsize;
104 struct dst_entry *dst;
105 int length; /* Total length of all frames */ 104 int length; /* Total length of all frames */
106 struct page *page; 105 struct dst_entry *dst;
107 u32 off;
108 u8 tx_flags; 106 u8 tx_flags;
109}; 107};
110 108
diff --git a/include/net/ip.h b/include/net/ip.h
index 5a5d84d3d2c6..0707fb9551aa 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -42,6 +42,8 @@ struct inet_skb_parm {
42#define IPSKB_XFRM_TRANSFORMED 4 42#define IPSKB_XFRM_TRANSFORMED 4
43#define IPSKB_FRAG_COMPLETE 8 43#define IPSKB_FRAG_COMPLETE 8
44#define IPSKB_REROUTED 16 44#define IPSKB_REROUTED 16
45
46 u16 frag_max_size;
45}; 47};
46 48
47static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 49static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9fc7114159e8..8a2a203eb15d 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -37,6 +37,7 @@ struct fib6_config {
37 int fc_ifindex; 37 int fc_ifindex;
38 u32 fc_flags; 38 u32 fc_flags;
39 u32 fc_protocol; 39 u32 fc_protocol;
40 u32 fc_type; /* only 8 bits are used */
40 41
41 struct in6_addr fc_dst; 42 struct in6_addr fc_dst;
42 struct in6_addr fc_src; 43 struct in6_addr fc_src;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 358fb86f57eb..e03047f7090b 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -5,6 +5,8 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/ip6_tunnel.h> 6#include <linux/ip6_tunnel.h>
7 7
8#define IP6TUNNEL_ERR_TIMEO (30*HZ)
9
8/* capable of sending packets */ 10/* capable of sending packets */
9#define IP6_TNL_F_CAP_XMIT 0x10000 11#define IP6_TNL_F_CAP_XMIT 0x10000
10/* capable of receiving packets */ 12/* capable of receiving packets */
@@ -12,15 +14,40 @@
12/* determine capability on a per-packet basis */ 14/* determine capability on a per-packet basis */
13#define IP6_TNL_F_CAP_PER_PACKET 0x40000 15#define IP6_TNL_F_CAP_PER_PACKET 0x40000
14 16
15/* IPv6 tunnel */ 17struct __ip6_tnl_parm {
18 char name[IFNAMSIZ]; /* name of tunnel device */
19 int link; /* ifindex of underlying L2 interface */
20 __u8 proto; /* tunnel protocol */
21 __u8 encap_limit; /* encapsulation limit for tunnel */
22 __u8 hop_limit; /* hop limit for tunnel */
23 __be32 flowinfo; /* traffic class and flowlabel for tunnel */
24 __u32 flags; /* tunnel flags */
25 struct in6_addr laddr; /* local tunnel end-point address */
26 struct in6_addr raddr; /* remote tunnel end-point address */
27
28 __be16 i_flags;
29 __be16 o_flags;
30 __be32 i_key;
31 __be32 o_key;
32};
16 33
34/* IPv6 tunnel */
17struct ip6_tnl { 35struct ip6_tnl {
18 struct ip6_tnl __rcu *next; /* next tunnel in list */ 36 struct ip6_tnl __rcu *next; /* next tunnel in list */
19 struct net_device *dev; /* virtual device associated with tunnel */ 37 struct net_device *dev; /* virtual device associated with tunnel */
20 struct ip6_tnl_parm parms; /* tunnel configuration parameters */ 38 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
21 struct flowi fl; /* flowi template for xmit */ 39 struct flowi fl; /* flowi template for xmit */
22 struct dst_entry *dst_cache; /* cached dst */ 40 struct dst_entry *dst_cache; /* cached dst */
23 u32 dst_cookie; 41 u32 dst_cookie;
42
43 int err_count;
44 unsigned long err_time;
45
46 /* These fields used only by GRE */
47 __u32 i_seqno; /* The last seen seqno */
48 __u32 o_seqno; /* The last output seqno */
49 int hlen; /* Precalculated GRE header length */
50 int mlink;
24}; 51};
25 52
26/* Tunnel encapsulation limit destination sub-option */ 53/* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@ struct ipv6_tlv_tnl_enc_lim {
31 __u8 encap_limit; /* tunnel encapsulation limit */ 58 __u8 encap_limit; /* tunnel encapsulation limit */
32} __packed; 59} __packed;
33 60
61struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
62void ip6_tnl_dst_reset(struct ip6_tnl *t);
63void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
64int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
65 const struct in6_addr *raddr);
66int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
67__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
68__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
69 const struct in6_addr *raddr);
70
34#endif 71#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 95374d1696a1..ee75ccdf5188 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -808,8 +808,6 @@ struct netns_ipvs {
808 struct list_head rs_table[IP_VS_RTAB_SIZE]; 808 struct list_head rs_table[IP_VS_RTAB_SIZE];
809 /* ip_vs_app */ 809 /* ip_vs_app */
810 struct list_head app_list; 810 struct list_head app_list;
811 /* ip_vs_ftp */
812 struct ip_vs_app *ftp_app;
813 /* ip_vs_proto */ 811 /* ip_vs_proto */
814 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 812 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
815 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 813 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
@@ -890,6 +888,7 @@ struct netns_ipvs {
890 unsigned int sysctl_sync_refresh_period; 888 unsigned int sysctl_sync_refresh_period;
891 int sysctl_sync_retries; 889 int sysctl_sync_retries;
892 int sysctl_nat_icmp_send; 890 int sysctl_nat_icmp_send;
891 int sysctl_pmtu_disc;
893 892
894 /* ip_vs_lblc */ 893 /* ip_vs_lblc */
895 int sysctl_lblc_expiration; 894 int sysctl_lblc_expiration;
@@ -976,6 +975,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
976 return ipvs->sysctl_sync_sock_size; 975 return ipvs->sysctl_sync_sock_size;
977} 976}
978 977
978static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
979{
980 return ipvs->sysctl_pmtu_disc;
981}
982
979#else 983#else
980 984
981static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 985static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1018,6 +1022,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
1018 return 0; 1022 return 0;
1019} 1023}
1020 1024
1025static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1026{
1027 return 1;
1028}
1029
1021#endif 1030#endif
1022 1031
1023/* 1032/*
@@ -1179,7 +1188,8 @@ extern void ip_vs_service_net_cleanup(struct net *net);
1179 * (from ip_vs_app.c) 1188 * (from ip_vs_app.c)
1180 */ 1189 */
1181#define IP_VS_APP_MAX_PORTS 8 1190#define IP_VS_APP_MAX_PORTS 8
1182extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app); 1191extern struct ip_vs_app *register_ip_vs_app(struct net *net,
1192 struct ip_vs_app *app);
1183extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); 1193extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
1184extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1194extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1185extern void ip_vs_unbind_app(struct ip_vs_conn *cp); 1195extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
diff --git a/include/net/ipip.h b/include/net/ipip.h
index a93cf6d7e94b..ddc077c51f32 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -2,6 +2,7 @@
2#define __NET_IPIP_H 1 2#define __NET_IPIP_H 1
3 3
4#include <linux/if_tunnel.h> 4#include <linux/if_tunnel.h>
5#include <net/gro_cells.h>
5#include <net/ip.h> 6#include <net/ip.h>
6 7
7/* Keep error state on tunnel for 30 sec */ 8/* Keep error state on tunnel for 30 sec */
@@ -36,6 +37,8 @@ struct ip_tunnel {
36#endif 37#endif
37 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */ 38 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
38 unsigned int prl_count; /* # of entries in PRL */ 39 unsigned int prl_count; /* # of entries in PRL */
40
41 struct gro_cells gro_cells;
39}; 42};
40 43
41struct ip_tunnel_prl_entry { 44struct ip_tunnel_prl_entry {
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index c8a202436e01..979bf6c13141 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -34,6 +34,7 @@
34#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ 34#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
35#define NEXTHDR_ROUTING 43 /* Routing header. */ 35#define NEXTHDR_ROUTING 43 /* Routing header. */
36#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ 36#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
37#define NEXTHDR_GRE 47 /* GRE header. */
37#define NEXTHDR_ESP 50 /* Encapsulating security payload. */ 38#define NEXTHDR_ESP 50 /* Encapsulating security payload. */
38#define NEXTHDR_AUTH 51 /* Authentication header. */ 39#define NEXTHDR_AUTH 51 /* Authentication header. */
39#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ 40#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
@@ -270,8 +271,17 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
270 271
271extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); 272extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
272 273
273int ip6_frag_nqueues(struct net *net); 274#if IS_ENABLED(CONFIG_IPV6)
274int ip6_frag_mem(struct net *net); 275static inline int ip6_frag_nqueues(struct net *net)
276{
277 return net->ipv6.frags.nqueues;
278}
279
280static inline int ip6_frag_mem(struct net *net)
281{
282 return atomic_read(&net->ipv6.frags.mem);
283}
284#endif
275 285
276#define IPV6_FRAG_HIGH_THRESH (256 * 1024) /* 262144 */ 286#define IPV6_FRAG_HIGH_THRESH (256 * 1024) /* 262144 */
277#define IPV6_FRAG_LOW_THRESH (192 * 1024) /* 196608 */ 287#define IPV6_FRAG_LOW_THRESH (192 * 1024) /* 196608 */
@@ -410,6 +420,25 @@ struct ip6_create_arg {
410void ip6_frag_init(struct inet_frag_queue *q, void *a); 420void ip6_frag_init(struct inet_frag_queue *q, void *a);
411bool ip6_frag_match(struct inet_frag_queue *q, void *a); 421bool ip6_frag_match(struct inet_frag_queue *q, void *a);
412 422
423/*
424 * Equivalent of ipv4 struct ip
425 */
426struct frag_queue {
427 struct inet_frag_queue q;
428
429 __be32 id; /* fragment id */
430 u32 user;
431 struct in6_addr saddr;
432 struct in6_addr daddr;
433
434 int iif;
435 unsigned int csum;
436 __u16 nhoffset;
437};
438
439void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
440 struct inet_frags *frags);
441
413static inline bool ipv6_addr_any(const struct in6_addr *a) 442static inline bool ipv6_addr_any(const struct in6_addr *a)
414{ 443{
415#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 444#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/include/net/llc.h b/include/net/llc.h
index f2d0fc570527..9e7d7f08ef77 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -151,7 +151,6 @@ extern int sysctl_llc2_ack_timeout;
151extern int sysctl_llc2_busy_timeout; 151extern int sysctl_llc2_busy_timeout;
152extern int sysctl_llc2_p_timeout; 152extern int sysctl_llc2_p_timeout;
153extern int sysctl_llc2_rej_timeout; 153extern int sysctl_llc2_rej_timeout;
154extern int sysctl_llc_station_ack_timeout;
155#else 154#else
156#define llc_sysctl_init() (0) 155#define llc_sysctl_init() (0)
157#define llc_sysctl_exit() do { } while(0) 156#define llc_sysctl_exit() do { } while(0)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bb86aa6f98dd..82558c8decf8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -171,6 +171,7 @@ struct ieee80211_low_level_stats {
171 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. 171 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
172 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode) 172 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
173 * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) 173 * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
174 * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
174 */ 175 */
175enum ieee80211_bss_change { 176enum ieee80211_bss_change {
176 BSS_CHANGED_ASSOC = 1<<0, 177 BSS_CHANGED_ASSOC = 1<<0,
@@ -190,6 +191,7 @@ enum ieee80211_bss_change {
190 BSS_CHANGED_IDLE = 1<<14, 191 BSS_CHANGED_IDLE = 1<<14,
191 BSS_CHANGED_SSID = 1<<15, 192 BSS_CHANGED_SSID = 1<<15,
192 BSS_CHANGED_AP_PROBE_RESP = 1<<16, 193 BSS_CHANGED_AP_PROBE_RESP = 1<<16,
194 BSS_CHANGED_PS = 1<<17,
193 195
194 /* when adding here, make sure to change ieee80211_reconfig */ 196 /* when adding here, make sure to change ieee80211_reconfig */
195}; 197};
@@ -266,6 +268,8 @@ enum ieee80211_rssi_event {
266 * @idle: This interface is idle. There's also a global idle flag in the 268 * @idle: This interface is idle. There's also a global idle flag in the
267 * hardware config which may be more appropriate depending on what 269 * hardware config which may be more appropriate depending on what
268 * your driver/device needs to do. 270 * your driver/device needs to do.
271 * @ps: power-save mode (STA only). This flag is NOT affected by
272 * offchannel/dynamic_ps operations.
269 * @ssid: The SSID of the current vif. Only valid in AP-mode. 273 * @ssid: The SSID of the current vif. Only valid in AP-mode.
270 * @ssid_len: Length of SSID given in @ssid. 274 * @ssid_len: Length of SSID given in @ssid.
271 * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. 275 * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@ struct ieee80211_bss_conf {
296 bool arp_filter_enabled; 300 bool arp_filter_enabled;
297 bool qos; 301 bool qos;
298 bool idle; 302 bool idle;
303 bool ps;
299 u8 ssid[IEEE80211_MAX_SSID_LEN]; 304 u8 ssid[IEEE80211_MAX_SSID_LEN];
300 size_t ssid_len; 305 size_t ssid_len;
301 bool hidden_ssid; 306 bool hidden_ssid;
@@ -522,9 +527,6 @@ struct ieee80211_tx_rate {
522 * (2) driver internal use (if applicable) 527 * (2) driver internal use (if applicable)
523 * (3) TX status information - driver tells mac80211 what happened 528 * (3) TX status information - driver tells mac80211 what happened
524 * 529 *
525 * The TX control's sta pointer is only valid during the ->tx call,
526 * it may be NULL.
527 *
528 * @flags: transmit info flags, defined above 530 * @flags: transmit info flags, defined above
529 * @band: the band to transmit on (use for checking for races) 531 * @band: the band to transmit on (use for checking for races)
530 * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC 532 * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@ struct ieee80211_tx_info {
555 struct ieee80211_tx_rate rates[ 557 struct ieee80211_tx_rate rates[
556 IEEE80211_TX_MAX_RATES]; 558 IEEE80211_TX_MAX_RATES];
557 s8 rts_cts_rate_idx; 559 s8 rts_cts_rate_idx;
560 /* 3 bytes free */
558 }; 561 };
559 /* only needed before rate control */ 562 /* only needed before rate control */
560 unsigned long jiffies; 563 unsigned long jiffies;
@@ -562,7 +565,7 @@ struct ieee80211_tx_info {
562 /* NB: vif can be NULL for injected frames */ 565 /* NB: vif can be NULL for injected frames */
563 struct ieee80211_vif *vif; 566 struct ieee80211_vif *vif;
564 struct ieee80211_key_conf *hw_key; 567 struct ieee80211_key_conf *hw_key;
565 struct ieee80211_sta *sta; 568 /* 8 bytes free */
566 } control; 569 } control;
567 struct { 570 struct {
568 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; 571 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
673 * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if 676 * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
674 * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT 677 * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
675 * to hw.radiotap_mcs_details to advertise that fact 678 * to hw.radiotap_mcs_details to advertise that fact
679 * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
680 * number (@ampdu_reference) must be populated and be a distinct number for
681 * each A-MPDU
682 * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
683 * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
684 * monitoring purposes only
685 * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
686 * subframes of a single A-MPDU
687 * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
688 * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
689 * on this subframe
690 * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
691 * is stored in the @ampdu_delimiter_crc field)
676 */ 692 */
677enum mac80211_rx_flags { 693enum mac80211_rx_flags {
678 RX_FLAG_MMIC_ERROR = 1<<0, 694 RX_FLAG_MMIC_ERROR = BIT(0),
679 RX_FLAG_DECRYPTED = 1<<1, 695 RX_FLAG_DECRYPTED = BIT(1),
680 RX_FLAG_MMIC_STRIPPED = 1<<3, 696 RX_FLAG_MMIC_STRIPPED = BIT(3),
681 RX_FLAG_IV_STRIPPED = 1<<4, 697 RX_FLAG_IV_STRIPPED = BIT(4),
682 RX_FLAG_FAILED_FCS_CRC = 1<<5, 698 RX_FLAG_FAILED_FCS_CRC = BIT(5),
683 RX_FLAG_FAILED_PLCP_CRC = 1<<6, 699 RX_FLAG_FAILED_PLCP_CRC = BIT(6),
684 RX_FLAG_MACTIME_MPDU = 1<<7, 700 RX_FLAG_MACTIME_MPDU = BIT(7),
685 RX_FLAG_SHORTPRE = 1<<8, 701 RX_FLAG_SHORTPRE = BIT(8),
686 RX_FLAG_HT = 1<<9, 702 RX_FLAG_HT = BIT(9),
687 RX_FLAG_40MHZ = 1<<10, 703 RX_FLAG_40MHZ = BIT(10),
688 RX_FLAG_SHORT_GI = 1<<11, 704 RX_FLAG_SHORT_GI = BIT(11),
689 RX_FLAG_NO_SIGNAL_VAL = 1<<12, 705 RX_FLAG_NO_SIGNAL_VAL = BIT(12),
690 RX_FLAG_HT_GF = 1<<13, 706 RX_FLAG_HT_GF = BIT(13),
707 RX_FLAG_AMPDU_DETAILS = BIT(14),
708 RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
709 RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
710 RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
711 RX_FLAG_AMPDU_IS_LAST = BIT(18),
712 RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
713 RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
691}; 714};
692 715
693/** 716/**
@@ -711,17 +734,22 @@ enum mac80211_rx_flags {
711 * HT rates are use (RX_FLAG_HT) 734 * HT rates are use (RX_FLAG_HT)
712 * @flag: %RX_FLAG_* 735 * @flag: %RX_FLAG_*
713 * @rx_flags: internal RX flags for mac80211 736 * @rx_flags: internal RX flags for mac80211
737 * @ampdu_reference: A-MPDU reference number, must be a different value for
738 * each A-MPDU but the same for each subframe within one A-MPDU
739 * @ampdu_delimiter_crc: A-MPDU delimiter CRC
714 */ 740 */
715struct ieee80211_rx_status { 741struct ieee80211_rx_status {
716 u64 mactime; 742 u64 mactime;
717 u32 device_timestamp; 743 u32 device_timestamp;
718 u16 flag; 744 u32 ampdu_reference;
745 u32 flag;
719 u16 freq; 746 u16 freq;
720 u8 rate_idx; 747 u8 rate_idx;
721 u8 rx_flags; 748 u8 rx_flags;
722 u8 band; 749 u8 band;
723 u8 antenna; 750 u8 antenna;
724 s8 signal; 751 s8 signal;
752 u8 ampdu_delimiter_crc;
725}; 753};
726 754
727/** 755/**
@@ -945,21 +973,29 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
945 * generation in software. 973 * generation in software.
946 * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates 974 * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
947 * that the key is pairwise rather then a shared key. 975 * that the key is pairwise rather then a shared key.
948 * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a 976 * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
949 * CCMP key if it requires CCMP encryption of management frames (MFP) to 977 * CCMP key if it requires CCMP encryption of management frames (MFP) to
950 * be done in software. 978 * be done in software.
951 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 979 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
952 * if space should be prepared for the IV, but the IV 980 * if space should be prepared for the IV, but the IV
953 * itself should not be generated. Do not set together with 981 * itself should not be generated. Do not set together with
954 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. 982 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
983 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
984 * management frames. The flag can help drivers that have a hardware
985 * crypto implementation that doesn't deal with management frames
986 * properly by allowing them to not upload the keys to hardware and
987 * fall back to software crypto. Note that this flag deals only with
988 * RX, if your crypto engine can't deal with TX you can also set the
989 * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
955 */ 990 */
956enum ieee80211_key_flags { 991enum ieee80211_key_flags {
957 IEEE80211_KEY_FLAG_WMM_STA = 1<<0, 992 IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
958 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, 993 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
959 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 994 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
960 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, 995 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
961 IEEE80211_KEY_FLAG_SW_MGMT = 1<<4, 996 IEEE80211_KEY_FLAG_SW_MGMT_TX = 1<<4,
962 IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5, 997 IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
998 IEEE80211_KEY_FLAG_RX_MGMT = 1<<6,
963}; 999};
964 1000
965/** 1001/**
@@ -1074,6 +1110,16 @@ enum sta_notify_cmd {
1074}; 1110};
1075 1111
1076/** 1112/**
1113 * struct ieee80211_tx_control - TX control data
1114 *
1115 * @sta: station table entry, this sta pointer may be NULL and
1116 * it is not allowed to copy the pointer, due to RCU.
1117 */
1118struct ieee80211_tx_control {
1119 struct ieee80211_sta *sta;
1120};
1121
1122/**
1077 * enum ieee80211_hw_flags - hardware flags 1123 * enum ieee80211_hw_flags - hardware flags
1078 * 1124 *
1079 * These flags are used to indicate hardware capabilities to 1125 * These flags are used to indicate hardware capabilities to
@@ -1203,6 +1249,10 @@ enum sta_notify_cmd {
1203 * queue mapping in order to use different queues (not just one per AC) 1249 * queue mapping in order to use different queues (not just one per AC)
1204 * for different virtual interfaces. See the doc section on HW queue 1250 * for different virtual interfaces. See the doc section on HW queue
1205 * control for more details. 1251 * control for more details.
1252 *
1253 * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
1254 * P2P Interface. This will be honoured even if more than one interface
1255 * is supported.
1206 */ 1256 */
1207enum ieee80211_hw_flags { 1257enum ieee80211_hw_flags {
1208 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 1258 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1230,6 +1280,7 @@ enum ieee80211_hw_flags {
1230 IEEE80211_HW_AP_LINK_PS = 1<<22, 1280 IEEE80211_HW_AP_LINK_PS = 1<<22,
1231 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, 1281 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
1232 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24, 1282 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24,
1283 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
1233}; 1284};
1234 1285
1235/** 1286/**
@@ -1884,10 +1935,14 @@ enum ieee80211_frame_release_type {
1884 * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit 1935 * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
1885 * to this station changed. 1936 * to this station changed.
1886 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. 1937 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
1938 * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
1939 * changed (in IBSS mode) due to discovering more information about
1940 * the peer.
1887 */ 1941 */
1888enum ieee80211_rate_control_changed { 1942enum ieee80211_rate_control_changed {
1889 IEEE80211_RC_BW_CHANGED = BIT(0), 1943 IEEE80211_RC_BW_CHANGED = BIT(0),
1890 IEEE80211_RC_SMPS_CHANGED = BIT(1), 1944 IEEE80211_RC_SMPS_CHANGED = BIT(1),
1945 IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
1891}; 1946};
1892 1947
1893/** 1948/**
@@ -2264,7 +2319,9 @@ enum ieee80211_rate_control_changed {
2264 * The callback is optional and can (should!) sleep. 2319 * The callback is optional and can (should!) sleep.
2265 */ 2320 */
2266struct ieee80211_ops { 2321struct ieee80211_ops {
2267 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 2322 void (*tx)(struct ieee80211_hw *hw,
2323 struct ieee80211_tx_control *control,
2324 struct sk_buff *skb);
2268 int (*start)(struct ieee80211_hw *hw); 2325 int (*start)(struct ieee80211_hw *hw);
2269 void (*stop)(struct ieee80211_hw *hw); 2326 void (*stop)(struct ieee80211_hw *hw);
2270#ifdef CONFIG_PM 2327#ifdef CONFIG_PM
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 96a3b5c03e37..980d263765cf 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -49,6 +49,7 @@ enum {
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/if_arp.h> 50#include <linux/if_arp.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/hash.h>
52 53
53#include <net/neighbour.h> 54#include <net/neighbour.h>
54 55
@@ -134,7 +135,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
134{ 135{
135 const u32 *p32 = pkey; 136 const u32 *p32 = pkey;
136 137
137 return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) + 138 return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
138 (p32[1] * hash_rnd[1]) + 139 (p32[1] * hash_rnd[1]) +
139 (p32[2] * hash_rnd[2]) + 140 (p32[2] * hash_rnd[2]) +
140 (p32[3] * hash_rnd[3])); 141 (p32[3] * hash_rnd[3]));
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 344d8988842a..0dab173e27da 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -334,18 +334,22 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
334} 334}
335#endif 335#endif
336 336
337static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) 337static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
338{ 338{
339 unsigned int seq; 339 unsigned int seq;
340 int hh_len; 340 int hh_len;
341 341
342 do { 342 do {
343 int hh_alen;
344
345 seq = read_seqbegin(&hh->hh_lock); 343 seq = read_seqbegin(&hh->hh_lock);
346 hh_len = hh->hh_len; 344 hh_len = hh->hh_len;
347 hh_alen = HH_DATA_ALIGN(hh_len); 345 if (likely(hh_len <= HH_DATA_MOD)) {
348 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); 346 /* this is inlined by gcc */
347 memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
348 } else {
349 int hh_alen = HH_DATA_ALIGN(hh_len);
350
351 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
352 }
349 } while (read_seqretry(&hh->hh_lock, seq)); 353 } while (read_seqretry(&hh->hh_lock, seq));
350 354
351 skb_push(skb, hh_len); 355 skb_push(skb, hh_len);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fd87963a0ea5..4faf6612ecac 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -15,6 +15,7 @@
15#include <net/netns/packet.h> 15#include <net/netns/packet.h>
16#include <net/netns/ipv4.h> 16#include <net/netns/ipv4.h>
17#include <net/netns/ipv6.h> 17#include <net/netns/ipv6.h>
18#include <net/netns/sctp.h>
18#include <net/netns/dccp.h> 19#include <net/netns/dccp.h>
19#include <net/netns/x_tables.h> 20#include <net/netns/x_tables.h>
20#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 21#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -66,6 +67,7 @@ struct net {
66 struct hlist_head *dev_name_head; 67 struct hlist_head *dev_name_head;
67 struct hlist_head *dev_index_head; 68 struct hlist_head *dev_index_head;
68 unsigned int dev_base_seq; /* protected by rtnl_mutex */ 69 unsigned int dev_base_seq; /* protected by rtnl_mutex */
70 int ifindex;
69 71
70 /* core fib_rules */ 72 /* core fib_rules */
71 struct list_head rules_ops; 73 struct list_head rules_ops;
@@ -80,6 +82,9 @@ struct net {
80#if IS_ENABLED(CONFIG_IPV6) 82#if IS_ENABLED(CONFIG_IPV6)
81 struct netns_ipv6 ipv6; 83 struct netns_ipv6 ipv6;
82#endif 84#endif
85#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
86 struct netns_sctp sctp;
87#endif
83#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) 88#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
84 struct netns_dccp dccp; 89 struct netns_dccp dccp;
85#endif 90#endif
@@ -88,6 +93,9 @@ struct net {
88#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 93#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
89 struct netns_ct ct; 94 struct netns_ct ct;
90#endif 95#endif
96#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
97 struct netns_nf_frag nf_frag;
98#endif
91 struct sock *nfnl; 99 struct sock *nfnl;
92 struct sock *nfnl_stash; 100 struct sock *nfnl_stash;
93#endif 101#endif
@@ -105,6 +113,13 @@ struct net {
105 atomic_t rt_genid; 113 atomic_t rt_genid;
106}; 114};
107 115
116/*
117 * ifindex generation is per-net namespace, and loopback is
118 * always the 1st device in ns (see net_dev_init), thus any
119 * loopback device should get ifindex 1
120 */
121
122#define LOOPBACK_IFINDEX 1
108 123
109#include <linux/seq_file_net.h> 124#include <linux/seq_file_net.h>
110 125
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4a045cda9c60..5654d292efd4 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -17,7 +17,7 @@ struct nf_conntrack_ecache {
17 unsigned long missed; /* missed events */ 17 unsigned long missed; /* missed events */
18 u16 ctmask; /* bitmask of ct events to be delivered */ 18 u16 ctmask; /* bitmask of ct events to be delivered */
19 u16 expmask; /* bitmask of expect events to be delivered */ 19 u16 expmask; /* bitmask of expect events to be delivered */
20 u32 pid; /* netlink pid of destroyer */ 20 u32 portid; /* netlink portid of destroyer */
21 struct timer_list timeout; 21 struct timer_list timeout;
22}; 22};
23 23
@@ -60,7 +60,7 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
60/* This structure is passed to event handler */ 60/* This structure is passed to event handler */
61struct nf_ct_event { 61struct nf_ct_event {
62 struct nf_conn *ct; 62 struct nf_conn *ct;
63 u32 pid; 63 u32 portid;
64 int report; 64 int report;
65}; 65};
66 66
@@ -92,7 +92,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
92static inline int 92static inline int
93nf_conntrack_eventmask_report(unsigned int eventmask, 93nf_conntrack_eventmask_report(unsigned int eventmask,
94 struct nf_conn *ct, 94 struct nf_conn *ct,
95 u32 pid, 95 u32 portid,
96 int report) 96 int report)
97{ 97{
98 int ret = 0; 98 int ret = 0;
@@ -112,11 +112,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
112 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { 112 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
113 struct nf_ct_event item = { 113 struct nf_ct_event item = {
114 .ct = ct, 114 .ct = ct,
115 .pid = e->pid ? e->pid : pid, 115 .portid = e->portid ? e->portid : portid,
116 .report = report 116 .report = report
117 }; 117 };
118 /* This is a resent of a destroy event? If so, skip missed */ 118 /* This is a resent of a destroy event? If so, skip missed */
119 unsigned long missed = e->pid ? 0 : e->missed; 119 unsigned long missed = e->portid ? 0 : e->missed;
120 120
121 if (!((eventmask | missed) & e->ctmask)) 121 if (!((eventmask | missed) & e->ctmask))
122 goto out_unlock; 122 goto out_unlock;
@@ -126,11 +126,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
126 spin_lock_bh(&ct->lock); 126 spin_lock_bh(&ct->lock);
127 if (ret < 0) { 127 if (ret < 0) {
128 /* This is a destroy event that has been 128 /* This is a destroy event that has been
129 * triggered by a process, we store the PID 129 * triggered by a process, we store the PORTID
130 * to include it in the retransmission. */ 130 * to include it in the retransmission. */
131 if (eventmask & (1 << IPCT_DESTROY) && 131 if (eventmask & (1 << IPCT_DESTROY) &&
132 e->pid == 0 && pid != 0) 132 e->portid == 0 && portid != 0)
133 e->pid = pid; 133 e->portid = portid;
134 else 134 else
135 e->missed |= eventmask; 135 e->missed |= eventmask;
136 } else 136 } else
@@ -145,9 +145,9 @@ out_unlock:
145 145
146static inline int 146static inline int
147nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, 147nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
148 u32 pid, int report) 148 u32 portid, int report)
149{ 149{
150 return nf_conntrack_eventmask_report(1 << event, ct, pid, report); 150 return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
151} 151}
152 152
153static inline int 153static inline int
@@ -158,7 +158,7 @@ nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
158 158
159struct nf_exp_event { 159struct nf_exp_event {
160 struct nf_conntrack_expect *exp; 160 struct nf_conntrack_expect *exp;
161 u32 pid; 161 u32 portid;
162 int report; 162 int report;
163}; 163};
164 164
@@ -172,7 +172,7 @@ extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_even
172static inline void 172static inline void
173nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 173nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
174 struct nf_conntrack_expect *exp, 174 struct nf_conntrack_expect *exp,
175 u32 pid, 175 u32 portid,
176 int report) 176 int report)
177{ 177{
178 struct net *net = nf_ct_exp_net(exp); 178 struct net *net = nf_ct_exp_net(exp);
@@ -191,7 +191,7 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
191 if (e->expmask & (1 << event)) { 191 if (e->expmask & (1 << event)) {
192 struct nf_exp_event item = { 192 struct nf_exp_event item = {
193 .exp = exp, 193 .exp = exp,
194 .pid = pid, 194 .portid = portid,
195 .report = report 195 .report = report
196 }; 196 };
197 notify->fcn(1 << event, &item); 197 notify->fcn(1 << event, &item);
@@ -216,20 +216,20 @@ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
216 struct nf_conn *ct) {} 216 struct nf_conn *ct) {}
217static inline int nf_conntrack_eventmask_report(unsigned int eventmask, 217static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
218 struct nf_conn *ct, 218 struct nf_conn *ct,
219 u32 pid, 219 u32 portid,
220 int report) { return 0; } 220 int report) { return 0; }
221static inline int nf_conntrack_event(enum ip_conntrack_events event, 221static inline int nf_conntrack_event(enum ip_conntrack_events event,
222 struct nf_conn *ct) { return 0; } 222 struct nf_conn *ct) { return 0; }
223static inline int nf_conntrack_event_report(enum ip_conntrack_events event, 223static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
224 struct nf_conn *ct, 224 struct nf_conn *ct,
225 u32 pid, 225 u32 portid,
226 int report) { return 0; } 226 int report) { return 0; }
227static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 227static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
228static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, 228static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
229 struct nf_conntrack_expect *exp) {} 229 struct nf_conntrack_expect *exp) {}
230static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, 230static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
231 struct nf_conntrack_expect *exp, 231 struct nf_conntrack_expect *exp,
232 u32 pid, 232 u32 portid,
233 int report) {} 233 int report) {}
234 234
235static inline int nf_conntrack_ecache_init(struct net *net) 235static inline int nf_conntrack_ecache_init(struct net *net)
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 983f00263243..cc13f377a705 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -43,7 +43,7 @@ struct nf_conntrack_expect {
43 unsigned int class; 43 unsigned int class;
44 44
45#ifdef CONFIG_NF_NAT_NEEDED 45#ifdef CONFIG_NF_NAT_NEEDED
46 __be32 saved_ip; 46 union nf_inet_addr saved_addr;
47 /* This is the original per-proto part, used to map the 47 /* This is the original per-proto part, used to map the
48 * expected connection the way the recipient expects. */ 48 * expected connection the way the recipient expects. */
49 union nf_conntrack_man_proto saved_proto; 49 union nf_conntrack_man_proto saved_proto;
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 34ec89f8dbf9..e41e472d08f2 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -55,6 +55,26 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
55#endif 55#endif
56}; 56};
57 57
58static inline unsigned int *
59nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
60 struct nf_conntrack_l4proto *l4proto)
61{
62#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
63 struct nf_conn_timeout *timeout_ext;
64 unsigned int *timeouts;
65
66 timeout_ext = nf_ct_timeout_find(ct);
67 if (timeout_ext)
68 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
69 else
70 timeouts = l4proto->get_timeouts(net);
71
72 return timeouts;
73#else
74 return l4proto->get_timeouts(net);
75#endif
76}
77
58#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 78#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
59extern int nf_conntrack_timeout_init(struct net *net); 79extern int nf_conntrack_timeout_init(struct net *net);
60extern void nf_conntrack_timeout_fini(struct net *net); 80extern void nf_conntrack_timeout_fini(struct net *net);
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index b4de990b55f1..bd8eea720f2e 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -43,14 +43,16 @@ struct nf_conn_nat {
43 struct nf_conn *ct; 43 struct nf_conn *ct;
44 union nf_conntrack_nat_help help; 44 union nf_conntrack_nat_help help;
45#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ 45#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
46 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) 46 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
47 defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
48 defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
47 int masq_index; 49 int masq_index;
48#endif 50#endif
49}; 51};
50 52
51/* Set up the info structure to map into this range. */ 53/* Set up the info structure to map into this range. */
52extern unsigned int nf_nat_setup_info(struct nf_conn *ct, 54extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
53 const struct nf_nat_ipv4_range *range, 55 const struct nf_nat_range *range,
54 enum nf_nat_manip_type maniptype); 56 enum nf_nat_manip_type maniptype);
55 57
56/* Is this tuple already taken? (not by us)*/ 58/* Is this tuple already taken? (not by us)*/
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index b13d8d18d595..972e1e47ec79 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -12,10 +12,7 @@ extern unsigned int nf_nat_packet(struct nf_conn *ct,
12 unsigned int hooknum, 12 unsigned int hooknum,
13 struct sk_buff *skb); 13 struct sk_buff *skb);
14 14
15extern int nf_nat_icmp_reply_translation(struct nf_conn *ct, 15extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
16 enum ip_conntrack_info ctinfo,
17 unsigned int hooknum,
18 struct sk_buff *skb);
19 16
20static inline int nf_nat_initialized(struct nf_conn *ct, 17static inline int nf_nat_initialized(struct nf_conn *ct,
21 enum nf_nat_manip_type manip) 18 enum nf_nat_manip_type manip)
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 7d8fb7b46c44..b4d6bfc2af03 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -10,6 +10,7 @@ struct sk_buff;
10extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, 10extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
11 struct nf_conn *ct, 11 struct nf_conn *ct,
12 enum ip_conntrack_info ctinfo, 12 enum ip_conntrack_info ctinfo,
13 unsigned int protoff,
13 unsigned int match_offset, 14 unsigned int match_offset,
14 unsigned int match_len, 15 unsigned int match_len,
15 const char *rep_buffer, 16 const char *rep_buffer,
@@ -18,12 +19,13 @@ extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
18static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb, 19static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
19 struct nf_conn *ct, 20 struct nf_conn *ct,
20 enum ip_conntrack_info ctinfo, 21 enum ip_conntrack_info ctinfo,
22 unsigned int protoff,
21 unsigned int match_offset, 23 unsigned int match_offset,
22 unsigned int match_len, 24 unsigned int match_len,
23 const char *rep_buffer, 25 const char *rep_buffer,
24 unsigned int rep_len) 26 unsigned int rep_len)
25{ 27{
26 return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 28 return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
27 match_offset, match_len, 29 match_offset, match_len,
28 rep_buffer, rep_len, true); 30 rep_buffer, rep_len, true);
29} 31}
@@ -31,6 +33,7 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
31extern int nf_nat_mangle_udp_packet(struct sk_buff *skb, 33extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
32 struct nf_conn *ct, 34 struct nf_conn *ct,
33 enum ip_conntrack_info ctinfo, 35 enum ip_conntrack_info ctinfo,
36 unsigned int protoff,
34 unsigned int match_offset, 37 unsigned int match_offset,
35 unsigned int match_len, 38 unsigned int match_len,
36 const char *rep_buffer, 39 const char *rep_buffer,
@@ -41,10 +44,12 @@ extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
41 __be32 seq, s16 off); 44 __be32 seq, s16 off);
42extern int nf_nat_seq_adjust(struct sk_buff *skb, 45extern int nf_nat_seq_adjust(struct sk_buff *skb,
43 struct nf_conn *ct, 46 struct nf_conn *ct,
44 enum ip_conntrack_info ctinfo); 47 enum ip_conntrack_info ctinfo,
48 unsigned int protoff);
45extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, 49extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
46 struct nf_conn *ct, 50 struct nf_conn *ct,
47 enum ip_conntrack_info ctinfo); 51 enum ip_conntrack_info ctinfo,
52 unsigned int protoff);
48 53
49/* Setup NAT on this expected conntrack so it follows master, but goes 54/* Setup NAT on this expected conntrack so it follows master, but goes
50 * to port ct->master->saved_proto. */ 55 * to port ct->master->saved_proto. */
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
new file mode 100644
index 000000000000..bd3b97e02c82
--- /dev/null
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -0,0 +1,52 @@
1#ifndef _NF_NAT_L3PROTO_H
2#define _NF_NAT_L3PROTO_H
3
4struct nf_nat_l4proto;
5struct nf_nat_l3proto {
6 u8 l3proto;
7
8 bool (*in_range)(const struct nf_conntrack_tuple *t,
9 const struct nf_nat_range *range);
10
11 u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
12
13 bool (*manip_pkt)(struct sk_buff *skb,
14 unsigned int iphdroff,
15 const struct nf_nat_l4proto *l4proto,
16 const struct nf_conntrack_tuple *target,
17 enum nf_nat_manip_type maniptype);
18
19 void (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
20 __sum16 *check,
21 const struct nf_conntrack_tuple *t,
22 enum nf_nat_manip_type maniptype);
23
24 void (*csum_recalc)(struct sk_buff *skb, u8 proto,
25 void *data, __sum16 *check,
26 int datalen, int oldlen);
27
28 void (*decode_session)(struct sk_buff *skb,
29 const struct nf_conn *ct,
30 enum ip_conntrack_dir dir,
31 unsigned long statusbit,
32 struct flowi *fl);
33
34 int (*nlattr_to_range)(struct nlattr *tb[],
35 struct nf_nat_range *range);
36};
37
38extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
39extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
40extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
41
42extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
43 struct nf_conn *ct,
44 enum ip_conntrack_info ctinfo,
45 unsigned int hooknum);
46extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
47 struct nf_conn *ct,
48 enum ip_conntrack_info ctinfo,
49 unsigned int hooknum,
50 unsigned int hdrlen);
51
52#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
new file mode 100644
index 000000000000..24feb68d1bcc
--- /dev/null
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -0,0 +1,72 @@
1/* Header for use in defining a given protocol. */
2#ifndef _NF_NAT_L4PROTO_H
3#define _NF_NAT_L4PROTO_H
4#include <net/netfilter/nf_nat.h>
5#include <linux/netfilter/nfnetlink_conntrack.h>
6
7struct nf_nat_range;
8struct nf_nat_l3proto;
9
10struct nf_nat_l4proto {
11 /* Protocol number. */
12 u8 l4proto;
13
14 /* Translate a packet to the target according to manip type.
15 * Return true if succeeded.
16 */
17 bool (*manip_pkt)(struct sk_buff *skb,
18 const struct nf_nat_l3proto *l3proto,
19 unsigned int iphdroff, unsigned int hdroff,
20 const struct nf_conntrack_tuple *tuple,
21 enum nf_nat_manip_type maniptype);
22
23 /* Is the manipable part of the tuple between min and max incl? */
24 bool (*in_range)(const struct nf_conntrack_tuple *tuple,
25 enum nf_nat_manip_type maniptype,
26 const union nf_conntrack_man_proto *min,
27 const union nf_conntrack_man_proto *max);
28
29 /* Alter the per-proto part of the tuple (depending on
30 * maniptype), to give a unique tuple in the given range if
31 * possible. Per-protocol part of tuple is initialized to the
32 * incoming packet.
33 */
34 void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
35 struct nf_conntrack_tuple *tuple,
36 const struct nf_nat_range *range,
37 enum nf_nat_manip_type maniptype,
38 const struct nf_conn *ct);
39
40 int (*nlattr_to_range)(struct nlattr *tb[],
41 struct nf_nat_range *range);
42};
43
44/* Protocol registration. */
45extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
46extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
47
48extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
49
50/* Built-in protocols. */
51extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
52extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
53extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
54extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
55extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
56
57extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
58 enum nf_nat_manip_type maniptype,
59 const union nf_conntrack_man_proto *min,
60 const union nf_conntrack_man_proto *max);
61
62extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
63 struct nf_conntrack_tuple *tuple,
64 const struct nf_nat_range *range,
65 enum nf_nat_manip_type maniptype,
66 const struct nf_conn *ct,
67 u16 *rover);
68
69extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
70 struct nf_nat_range *range);
71
72#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
deleted file mode 100644
index 7b0b51165f70..000000000000
--- a/include/net/netfilter/nf_nat_protocol.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/* Header for use in defining a given protocol. */
2#ifndef _NF_NAT_PROTOCOL_H
3#define _NF_NAT_PROTOCOL_H
4#include <net/netfilter/nf_nat.h>
5#include <linux/netfilter/nfnetlink_conntrack.h>
6
7struct nf_nat_ipv4_range;
8
9struct nf_nat_protocol {
10 /* Protocol number. */
11 unsigned int protonum;
12
13 /* Translate a packet to the target according to manip type.
14 Return true if succeeded. */
15 bool (*manip_pkt)(struct sk_buff *skb,
16 unsigned int iphdroff,
17 const struct nf_conntrack_tuple *tuple,
18 enum nf_nat_manip_type maniptype);
19
20 /* Is the manipable part of the tuple between min and max incl? */
21 bool (*in_range)(const struct nf_conntrack_tuple *tuple,
22 enum nf_nat_manip_type maniptype,
23 const union nf_conntrack_man_proto *min,
24 const union nf_conntrack_man_proto *max);
25
26 /* Alter the per-proto part of the tuple (depending on
27 maniptype), to give a unique tuple in the given range if
28 possible. Per-protocol part of tuple is initialized to the
29 incoming packet. */
30 void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
31 const struct nf_nat_ipv4_range *range,
32 enum nf_nat_manip_type maniptype,
33 const struct nf_conn *ct);
34
35 int (*nlattr_to_range)(struct nlattr *tb[],
36 struct nf_nat_ipv4_range *range);
37};
38
39/* Protocol registration. */
40extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto);
41extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto);
42
43/* Built-in protocols. */
44extern const struct nf_nat_protocol nf_nat_protocol_tcp;
45extern const struct nf_nat_protocol nf_nat_protocol_udp;
46extern const struct nf_nat_protocol nf_nat_protocol_icmp;
47extern const struct nf_nat_protocol nf_nat_unknown_protocol;
48
49extern int init_protocols(void) __init;
50extern void cleanup_protocols(void);
51extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
52
53extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
54 enum nf_nat_manip_type maniptype,
55 const union nf_conntrack_man_proto *min,
56 const union nf_conntrack_man_proto *max);
57
58extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
59 const struct nf_nat_ipv4_range *range,
60 enum nf_nat_manip_type maniptype,
61 const struct nf_conn *ct,
62 u_int16_t *rover);
63
64extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
65 struct nf_nat_ipv4_range *range);
66
67#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
deleted file mode 100644
index 2890bdc4cd92..000000000000
--- a/include/net/netfilter/nf_nat_rule.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef _NF_NAT_RULE_H
2#define _NF_NAT_RULE_H
3#include <net/netfilter/nf_conntrack.h>
4#include <net/netfilter/nf_nat.h>
5#include <linux/netfilter_ipv4/ip_tables.h>
6
7extern int nf_nat_rule_init(void) __init;
8extern void nf_nat_rule_cleanup(void);
9extern int nf_nat_rule_find(struct sk_buff *skb,
10 unsigned int hooknum,
11 const struct net_device *in,
12 const struct net_device *out,
13 struct nf_conn *ct);
14
15#endif /* _NF_NAT_RULE_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 785f37a3b44e..9690b0f6698a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,6 +98,10 @@
98 * nla_put_u16(skb, type, value) add u16 attribute to skb 98 * nla_put_u16(skb, type, value) add u16 attribute to skb
99 * nla_put_u32(skb, type, value) add u32 attribute to skb 99 * nla_put_u32(skb, type, value) add u32 attribute to skb
100 * nla_put_u64(skb, type, value) add u64 attribute to skb 100 * nla_put_u64(skb, type, value) add u64 attribute to skb
101 * nla_put_s8(skb, type, value) add s8 attribute to skb
102 * nla_put_s16(skb, type, value) add s16 attribute to skb
103 * nla_put_s32(skb, type, value) add s32 attribute to skb
104 * nla_put_s64(skb, type, value) add s64 attribute to skb
101 * nla_put_string(skb, type, str) add string attribute to skb 105 * nla_put_string(skb, type, str) add string attribute to skb
102 * nla_put_flag(skb, type) add flag attribute to skb 106 * nla_put_flag(skb, type) add flag attribute to skb
103 * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb 107 * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
@@ -121,6 +125,10 @@
121 * nla_get_u16(nla) get payload for a u16 attribute 125 * nla_get_u16(nla) get payload for a u16 attribute
122 * nla_get_u32(nla) get payload for a u32 attribute 126 * nla_get_u32(nla) get payload for a u32 attribute
123 * nla_get_u64(nla) get payload for a u64 attribute 127 * nla_get_u64(nla) get payload for a u64 attribute
128 * nla_get_s8(nla) get payload for a s8 attribute
129 * nla_get_s16(nla) get payload for a s16 attribute
130 * nla_get_s32(nla) get payload for a s32 attribute
131 * nla_get_s64(nla) get payload for a s64 attribute
124 * nla_get_flag(nla) return 1 if flag is true 132 * nla_get_flag(nla) return 1 if flag is true
125 * nla_get_msecs(nla) get payload for a msecs attribute 133 * nla_get_msecs(nla) get payload for a msecs attribute
126 * 134 *
@@ -160,6 +168,10 @@ enum {
160 NLA_NESTED_COMPAT, 168 NLA_NESTED_COMPAT,
161 NLA_NUL_STRING, 169 NLA_NUL_STRING,
162 NLA_BINARY, 170 NLA_BINARY,
171 NLA_S8,
172 NLA_S16,
173 NLA_S32,
174 NLA_S64,
163 __NLA_TYPE_MAX, 175 __NLA_TYPE_MAX,
164}; 176};
165 177
@@ -183,6 +195,8 @@ enum {
183 * NLA_NESTED_COMPAT Minimum length of structure payload 195 * NLA_NESTED_COMPAT Minimum length of structure payload
184 * NLA_U8, NLA_U16, 196 * NLA_U8, NLA_U16,
185 * NLA_U32, NLA_U64, 197 * NLA_U32, NLA_U64,
198 * NLA_S8, NLA_S16,
199 * NLA_S32, NLA_S64,
186 * NLA_MSECS Leaving the length field zero will verify the 200 * NLA_MSECS Leaving the length field zero will verify the
187 * given type fits, using it verifies minimum length 201 * given type fits, using it verifies minimum length
188 * just like "All other" 202 * just like "All other"
@@ -203,19 +217,19 @@ struct nla_policy {
203/** 217/**
204 * struct nl_info - netlink source information 218 * struct nl_info - netlink source information
205 * @nlh: Netlink message header of original request 219 * @nlh: Netlink message header of original request
206 * @pid: Netlink PID of requesting application 220 * @portid: Netlink PORTID of requesting application
207 */ 221 */
208struct nl_info { 222struct nl_info {
209 struct nlmsghdr *nlh; 223 struct nlmsghdr *nlh;
210 struct net *nl_net; 224 struct net *nl_net;
211 u32 pid; 225 u32 portid;
212}; 226};
213 227
214extern int netlink_rcv_skb(struct sk_buff *skb, 228extern int netlink_rcv_skb(struct sk_buff *skb,
215 int (*cb)(struct sk_buff *, 229 int (*cb)(struct sk_buff *,
216 struct nlmsghdr *)); 230 struct nlmsghdr *));
217extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb, 231extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
218 u32 pid, unsigned int group, int report, 232 u32 portid, unsigned int group, int report,
219 gfp_t flags); 233 gfp_t flags);
220 234
221extern int nla_validate(const struct nlattr *head, 235extern int nla_validate(const struct nlattr *head,
@@ -430,7 +444,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
430/** 444/**
431 * nlmsg_put - Add a new netlink message to an skb 445 * nlmsg_put - Add a new netlink message to an skb
432 * @skb: socket buffer to store message in 446 * @skb: socket buffer to store message in
433 * @pid: netlink process id 447 * @portid: netlink process id
434 * @seq: sequence number of message 448 * @seq: sequence number of message
435 * @type: message type 449 * @type: message type
436 * @payload: length of message payload 450 * @payload: length of message payload
@@ -439,13 +453,13 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
439 * Returns NULL if the tailroom of the skb is insufficient to store 453 * Returns NULL if the tailroom of the skb is insufficient to store
440 * the message header and payload. 454 * the message header and payload.
441 */ 455 */
442static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, 456static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
443 int type, int payload, int flags) 457 int type, int payload, int flags)
444{ 458{
445 if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload))) 459 if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
446 return NULL; 460 return NULL;
447 461
448 return __nlmsg_put(skb, pid, seq, type, payload, flags); 462 return __nlmsg_put(skb, portid, seq, type, payload, flags);
449} 463}
450 464
451/** 465/**
@@ -464,7 +478,7 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
464 int type, int payload, 478 int type, int payload,
465 int flags) 479 int flags)
466{ 480{
467 return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 481 return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
468 type, payload, flags); 482 type, payload, flags);
469} 483}
470 484
@@ -549,18 +563,18 @@ static inline void nlmsg_free(struct sk_buff *skb)
549 * nlmsg_multicast - multicast a netlink message 563 * nlmsg_multicast - multicast a netlink message
550 * @sk: netlink socket to spread messages to 564 * @sk: netlink socket to spread messages to
551 * @skb: netlink message as socket buffer 565 * @skb: netlink message as socket buffer
552 * @pid: own netlink pid to avoid sending to yourself 566 * @portid: own netlink portid to avoid sending to yourself
553 * @group: multicast group id 567 * @group: multicast group id
554 * @flags: allocation flags 568 * @flags: allocation flags
555 */ 569 */
556static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, 570static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
557 u32 pid, unsigned int group, gfp_t flags) 571 u32 portid, unsigned int group, gfp_t flags)
558{ 572{
559 int err; 573 int err;
560 574
561 NETLINK_CB(skb).dst_group = group; 575 NETLINK_CB(skb).dst_group = group;
562 576
563 err = netlink_broadcast(sk, skb, pid, group, flags); 577 err = netlink_broadcast(sk, skb, portid, group, flags);
564 if (err > 0) 578 if (err > 0)
565 err = 0; 579 err = 0;
566 580
@@ -571,13 +585,13 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
571 * nlmsg_unicast - unicast a netlink message 585 * nlmsg_unicast - unicast a netlink message
572 * @sk: netlink socket to spread message to 586 * @sk: netlink socket to spread message to
573 * @skb: netlink message as socket buffer 587 * @skb: netlink message as socket buffer
574 * @pid: netlink pid of the destination socket 588 * @portid: netlink portid of the destination socket
575 */ 589 */
576static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid) 590static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
577{ 591{
578 int err; 592 int err;
579 593
580 err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT); 594 err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
581 if (err > 0) 595 if (err > 0)
582 err = 0; 596 err = 0;
583 597
@@ -879,6 +893,50 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
879} 893}
880 894
881/** 895/**
896 * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
897 * @skb: socket buffer to add attribute to
898 * @attrtype: attribute type
899 * @value: numeric value
900 */
901static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
902{
903 return nla_put(skb, attrtype, sizeof(s8), &value);
904}
905
906/**
907 * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
908 * @skb: socket buffer to add attribute to
909 * @attrtype: attribute type
910 * @value: numeric value
911 */
912static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
913{
914 return nla_put(skb, attrtype, sizeof(s16), &value);
915}
916
917/**
918 * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
919 * @skb: socket buffer to add attribute to
920 * @attrtype: attribute type
921 * @value: numeric value
922 */
923static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
924{
925 return nla_put(skb, attrtype, sizeof(s32), &value);
926}
927
928/**
929 * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
930 * @skb: socket buffer to add attribute to
931 * @attrtype: attribute type
932 * @value: numeric value
933 */
934static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
935{
936 return nla_put(skb, attrtype, sizeof(s64), &value);
937}
938
939/**
882 * nla_put_string - Add a string netlink attribute to a socket buffer 940 * nla_put_string - Add a string netlink attribute to a socket buffer
883 * @skb: socket buffer to add attribute to 941 * @skb: socket buffer to add attribute to
884 * @attrtype: attribute type 942 * @attrtype: attribute type
@@ -994,6 +1052,46 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
994} 1052}
995 1053
996/** 1054/**
1055 * nla_get_s32 - return payload of s32 attribute
1056 * @nla: s32 netlink attribute
1057 */
1058static inline s32 nla_get_s32(const struct nlattr *nla)
1059{
1060 return *(s32 *) nla_data(nla);
1061}
1062
1063/**
1064 * nla_get_s16 - return payload of s16 attribute
1065 * @nla: s16 netlink attribute
1066 */
1067static inline s16 nla_get_s16(const struct nlattr *nla)
1068{
1069 return *(s16 *) nla_data(nla);
1070}
1071
1072/**
1073 * nla_get_s8 - return payload of s8 attribute
1074 * @nla: s8 netlink attribute
1075 */
1076static inline s8 nla_get_s8(const struct nlattr *nla)
1077{
1078 return *(s8 *) nla_data(nla);
1079}
1080
1081/**
1082 * nla_get_s64 - return payload of s64 attribute
1083 * @nla: s64 netlink attribute
1084 */
1085static inline s64 nla_get_s64(const struct nlattr *nla)
1086{
1087 s64 tmp;
1088
1089 nla_memcpy(&tmp, nla, sizeof(tmp));
1090
1091 return tmp;
1092}
1093
1094/**
997 * nla_get_flag - return payload of flag attribute 1095 * nla_get_flag - return payload of flag attribute
998 * @nla: flag netlink attribute 1096 * @nla: flag netlink attribute
999 */ 1097 */
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 3aecdc7a84fb..a1d83cc8bf85 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -83,6 +83,10 @@ struct netns_ct {
83 int sysctl_auto_assign_helper; 83 int sysctl_auto_assign_helper;
84 bool auto_assign_helper_warned; 84 bool auto_assign_helper_warned;
85 struct nf_ip_net nf_ct_proto; 85 struct nf_ip_net nf_ct_proto;
86#ifdef CONFIG_NF_NAT_NEEDED
87 struct hlist_head *nat_bysource;
88 unsigned int nat_htable_size;
89#endif
86#ifdef CONFIG_SYSCTL 90#ifdef CONFIG_SYSCTL
87 struct ctl_table_header *sysctl_header; 91 struct ctl_table_header *sysctl_header;
88 struct ctl_table_header *acct_sysctl_header; 92 struct ctl_table_header *acct_sysctl_header;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 69e50c789d96..2ae2b8372cfd 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -52,8 +52,6 @@ struct netns_ipv4 {
52 struct xt_table *iptable_security; 52 struct xt_table *iptable_security;
53#endif 53#endif
54 struct xt_table *nat_table; 54 struct xt_table *nat_table;
55 struct hlist_head *nat_bysource;
56 unsigned int nat_htable_size;
57#endif 55#endif
58 56
59 int sysctl_icmp_echo_ignore_all; 57 int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index df0a5456a3fd..214cb0a53359 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -42,6 +42,7 @@ struct netns_ipv6 {
42#ifdef CONFIG_SECURITY 42#ifdef CONFIG_SECURITY
43 struct xt_table *ip6table_security; 43 struct xt_table *ip6table_security;
44#endif 44#endif
45 struct xt_table *ip6table_nat;
45#endif 46#endif
46 struct rt6_info *ip6_null_entry; 47 struct rt6_info *ip6_null_entry;
47 struct rt6_statistics *rt6_stats; 48 struct rt6_statistics *rt6_stats;
@@ -70,4 +71,12 @@ struct netns_ipv6 {
70#endif 71#endif
71#endif 72#endif
72}; 73};
74
75#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
76struct netns_nf_frag {
77 struct netns_sysctl_ipv6 sysctl;
78 struct netns_frags frags;
79};
80#endif
81
73#endif 82#endif
diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h
index cb4e894c0f8d..17ec2b95c062 100644
--- a/include/net/netns/packet.h
+++ b/include/net/netns/packet.h
@@ -5,10 +5,10 @@
5#define __NETNS_PACKET_H__ 5#define __NETNS_PACKET_H__
6 6
7#include <linux/rculist.h> 7#include <linux/rculist.h>
8#include <linux/spinlock.h> 8#include <linux/mutex.h>
9 9
10struct netns_packet { 10struct netns_packet {
11 spinlock_t sklist_lock; 11 struct mutex sklist_lock;
12 struct hlist_head sklist; 12 struct hlist_head sklist;
13}; 13};
14 14
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644
index 000000000000..5e5eb1f9f14b
--- /dev/null
+++ b/include/net/netns/sctp.h
@@ -0,0 +1,131 @@
1#ifndef __NETNS_SCTP_H__
2#define __NETNS_SCTP_H__
3
4struct sock;
5struct proc_dir_entry;
6struct sctp_mib;
7struct ctl_table_header;
8
9struct netns_sctp {
10 DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
11
12#ifdef CONFIG_PROC_FS
13 struct proc_dir_entry *proc_net_sctp;
14#endif
15#ifdef CONFIG_SYSCTL
16 struct ctl_table_header *sysctl_header;
17#endif
18 /* This is the global socket data structure used for responding to
19 * the Out-of-the-blue (OOTB) packets. A control sock will be created
20 * for this socket at the initialization time.
21 */
22 struct sock *ctl_sock;
23
24 /* This is the global local address list.
25 * We actively maintain this complete list of addresses on
26 * the system by catching address add/delete events.
27 *
28 * It is a list of sctp_sockaddr_entry.
29 */
30 struct list_head local_addr_list;
31 struct list_head addr_waitq;
32 struct timer_list addr_wq_timer;
33 struct list_head auto_asconf_splist;
34 spinlock_t addr_wq_lock;
35
36 /* Lock that protects the local_addr_list writers */
37 spinlock_t local_addr_lock;
38
39 /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
40 *
41 * The following protocol parameters are RECOMMENDED:
42 *
43 * RTO.Initial - 3 seconds
44 * RTO.Min - 1 second
45 * RTO.Max - 60 seconds
46 * RTO.Alpha - 1/8 (3 when converted to right shifts.)
47 * RTO.Beta - 1/4 (2 when converted to right shifts.)
48 */
49 unsigned int rto_initial;
50 unsigned int rto_min;
51 unsigned int rto_max;
52
53 /* Note: rto_alpha and rto_beta are really defined as inverse
54 * powers of two to facilitate integer operations.
55 */
56 int rto_alpha;
57 int rto_beta;
58
59 /* Max.Burst - 4 */
60 int max_burst;
61
62 /* Whether Cookie Preservative is enabled(1) or not(0) */
63 int cookie_preserve_enable;
64
65 /* Valid.Cookie.Life - 60 seconds */
66 unsigned int valid_cookie_life;
67
68 /* Delayed SACK timeout 200ms default*/
69 unsigned int sack_timeout;
70
71 /* HB.interval - 30 seconds */
72 unsigned int hb_interval;
73
74 /* Association.Max.Retrans - 10 attempts
75 * Path.Max.Retrans - 5 attempts (per destination address)
76 * Max.Init.Retransmits - 8 attempts
77 */
78 int max_retrans_association;
79 int max_retrans_path;
80 int max_retrans_init;
81 /* Potentially-Failed.Max.Retrans sysctl value
82 * taken from:
83 * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
84 */
85 int pf_retrans;
86
87 /*
88 * Policy for preforming sctp/socket accounting
89 * 0 - do socket level accounting, all assocs share sk_sndbuf
90 * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
91 */
92 int sndbuf_policy;
93
94 /*
95 * Policy for preforming sctp/socket accounting
96 * 0 - do socket level accounting, all assocs share sk_rcvbuf
97 * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
98 */
99 int rcvbuf_policy;
100
101 int default_auto_asconf;
102
103 /* Flag to indicate if addip is enabled. */
104 int addip_enable;
105 int addip_noauth;
106
107 /* Flag to indicate if PR-SCTP is enabled. */
108 int prsctp_enable;
109
110 /* Flag to idicate if SCTP-AUTH is enabled */
111 int auth_enable;
112
113 /*
114 * Policy to control SCTP IPv4 address scoping
115 * 0 - Disable IPv4 address scoping
116 * 1 - Enable IPv4 address scoping
117 * 2 - Selectively allow only IPv4 private addresses
118 * 3 - Selectively allow only IPv4 link local address
119 */
120 int scope_policy;
121
122 /* Threshold for rwnd update SACKS. Receive buffer shifted this many
123 * bits is an indicator of when to send and window update SACK.
124 */
125 int rwnd_upd_shift;
126
127 /* Threshold for autoclose timeout, in seconds. */
128 unsigned long max_autoclose;
129};
130
131#endif /* __NETNS_SCTP_H__ */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index f5169b04f082..e900072950cb 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -30,6 +30,11 @@ struct nfc_hci_ops {
30 int (*open) (struct nfc_hci_dev *hdev); 30 int (*open) (struct nfc_hci_dev *hdev);
31 void (*close) (struct nfc_hci_dev *hdev); 31 void (*close) (struct nfc_hci_dev *hdev);
32 int (*hci_ready) (struct nfc_hci_dev *hdev); 32 int (*hci_ready) (struct nfc_hci_dev *hdev);
33 /*
34 * xmit must always send the complete buffer before
35 * returning. Returned result must be 0 for success
36 * or negative for failure.
37 */
33 int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb); 38 int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
34 int (*start_poll) (struct nfc_hci_dev *hdev, 39 int (*start_poll) (struct nfc_hci_dev *hdev,
35 u32 im_protocols, u32 tm_protocols); 40 u32 im_protocols, u32 tm_protocols);
@@ -38,8 +43,8 @@ struct nfc_hci_ops {
38 int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate, 43 int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
39 struct nfc_target *target); 44 struct nfc_target *target);
40 int (*data_exchange) (struct nfc_hci_dev *hdev, 45 int (*data_exchange) (struct nfc_hci_dev *hdev,
41 struct nfc_target *target, 46 struct nfc_target *target, struct sk_buff *skb,
42 struct sk_buff *skb, struct sk_buff **res_skb); 47 data_exchange_cb_t cb, void *cb_context);
43 int (*check_presence)(struct nfc_hci_dev *hdev, 48 int (*check_presence)(struct nfc_hci_dev *hdev,
44 struct nfc_target *target); 49 struct nfc_target *target);
45}; 50};
@@ -74,7 +79,6 @@ struct nfc_hci_dev {
74 79
75 struct list_head msg_tx_queue; 80 struct list_head msg_tx_queue;
76 81
77 struct workqueue_struct *msg_tx_wq;
78 struct work_struct msg_tx_work; 82 struct work_struct msg_tx_work;
79 83
80 struct timer_list cmd_timer; 84 struct timer_list cmd_timer;
@@ -82,13 +86,14 @@ struct nfc_hci_dev {
82 86
83 struct sk_buff_head rx_hcp_frags; 87 struct sk_buff_head rx_hcp_frags;
84 88
85 struct workqueue_struct *msg_rx_wq;
86 struct work_struct msg_rx_work; 89 struct work_struct msg_rx_work;
87 90
88 struct sk_buff_head msg_rx_queue; 91 struct sk_buff_head msg_rx_queue;
89 92
90 struct nfc_hci_ops *ops; 93 struct nfc_hci_ops *ops;
91 94
95 struct nfc_llc *llc;
96
92 struct nfc_hci_init_data init_data; 97 struct nfc_hci_init_data init_data;
93 98
94 void *clientdata; 99 void *clientdata;
@@ -105,12 +110,17 @@ struct nfc_hci_dev {
105 u8 hw_mpw; 110 u8 hw_mpw;
106 u8 hw_software; 111 u8 hw_software;
107 u8 hw_bsid; 112 u8 hw_bsid;
113
114 int async_cb_type;
115 data_exchange_cb_t async_cb;
116 void *async_cb_context;
108}; 117};
109 118
110/* hci device allocation */ 119/* hci device allocation */
111struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, 120struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
112 struct nfc_hci_init_data *init_data, 121 struct nfc_hci_init_data *init_data,
113 u32 protocols, 122 u32 protocols,
123 const char *llc_name,
114 int tx_headroom, 124 int tx_headroom,
115 int tx_tailroom, 125 int tx_tailroom,
116 int max_link_payload); 126 int max_link_payload);
@@ -202,6 +212,9 @@ int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
202 const u8 *param, size_t param_len); 212 const u8 *param, size_t param_len);
203int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, 213int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
204 const u8 *param, size_t param_len, struct sk_buff **skb); 214 const u8 *param, size_t param_len, struct sk_buff **skb);
215int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
216 const u8 *param, size_t param_len,
217 data_exchange_cb_t cb, void *cb_context);
205int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, 218int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
206 const u8 *param, size_t param_len); 219 const u8 *param, size_t param_len);
207int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, 220int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
new file mode 100644
index 000000000000..400ab7ae749d
--- /dev/null
+++ b/include/net/nfc/llc.h
@@ -0,0 +1,54 @@
1/*
2 * Link Layer Control manager public interface
3 *
4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef __NFC_LLC_H_
22#define __NFC_LLC_H_
23
24#include <net/nfc/hci.h>
25#include <linux/skbuff.h>
26
27#define LLC_NOP_NAME "nop"
28#define LLC_SHDLC_NAME "shdlc"
29
30typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
31typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
32typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err);
33
34struct nfc_llc;
35
36struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
37 xmit_to_drv_t xmit_to_drv,
38 rcv_to_hci_t rcv_to_hci, int tx_headroom,
39 int tx_tailroom, llc_failure_t llc_failure);
40void nfc_llc_free(struct nfc_llc *llc);
41
42void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
43 int *rx_tailroom);
44
45
46int nfc_llc_start(struct nfc_llc *llc);
47int nfc_llc_stop(struct nfc_llc *llc);
48void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
49int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
50
51int nfc_llc_init(void);
52void nfc_llc_exit(void);
53
54#endif /* __NFC_LLC_H_ */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 276094b91d7c..88785e5c6b2c 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -32,6 +32,7 @@
32#define NCI_MAX_NUM_MAPPING_CONFIGS 10 32#define NCI_MAX_NUM_MAPPING_CONFIGS 10
33#define NCI_MAX_NUM_RF_CONFIGS 10 33#define NCI_MAX_NUM_RF_CONFIGS 10
34#define NCI_MAX_NUM_CONN 10 34#define NCI_MAX_NUM_CONN 10
35#define NCI_MAX_PARAM_LEN 251
35 36
36/* NCI Status Codes */ 37/* NCI Status Codes */
37#define NCI_STATUS_OK 0x00 38#define NCI_STATUS_OK 0x00
@@ -102,6 +103,9 @@
102#define NCI_RF_INTERFACE_ISO_DEP 0x02 103#define NCI_RF_INTERFACE_ISO_DEP 0x02
103#define NCI_RF_INTERFACE_NFC_DEP 0x03 104#define NCI_RF_INTERFACE_NFC_DEP 0x03
104 105
106/* NCI Configuration Parameter Tags */
107#define NCI_PN_ATR_REQ_GEN_BYTES 0x29
108
105/* NCI Reset types */ 109/* NCI Reset types */
106#define NCI_RESET_TYPE_KEEP_CONFIG 0x00 110#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
107#define NCI_RESET_TYPE_RESET_CONFIG 0x01 111#define NCI_RESET_TYPE_RESET_CONFIG 0x01
@@ -188,6 +192,18 @@ struct nci_core_reset_cmd {
188 192
189#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) 193#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
190 194
195#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
196struct set_config_param {
197 __u8 id;
198 __u8 len;
199 __u8 val[NCI_MAX_PARAM_LEN];
200} __packed;
201
202struct nci_core_set_config_cmd {
203 __u8 num_params;
204 struct set_config_param param; /* support 1 param per cmd is enough */
205} __packed;
206
191#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) 207#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
192struct disc_map_config { 208struct disc_map_config {
193 __u8 rf_protocol; 209 __u8 rf_protocol;
@@ -252,6 +268,13 @@ struct nci_core_init_rsp_2 {
252 __le32 manufact_specific_info; 268 __le32 manufact_specific_info;
253} __packed; 269} __packed;
254 270
271#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
272struct nci_core_set_config_rsp {
273 __u8 status;
274 __u8 num_params;
275 __u8 params_id[0]; /* variable size array */
276} __packed;
277
255#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) 278#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
256 279
257#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) 280#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -328,6 +351,11 @@ struct activation_params_nfcb_poll_iso_dep {
328 __u8 attrib_res[50]; 351 __u8 attrib_res[50];
329}; 352};
330 353
354struct activation_params_poll_nfc_dep {
355 __u8 atr_res_len;
356 __u8 atr_res[63];
357};
358
331struct nci_rf_intf_activated_ntf { 359struct nci_rf_intf_activated_ntf {
332 __u8 rf_discovery_id; 360 __u8 rf_discovery_id;
333 __u8 rf_interface; 361 __u8 rf_interface;
@@ -351,6 +379,7 @@ struct nci_rf_intf_activated_ntf {
351 union { 379 union {
352 struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep; 380 struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
353 struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep; 381 struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
382 struct activation_params_poll_nfc_dep poll_nfc_dep;
354 } activation_params; 383 } activation_params;
355 384
356} __packed; 385} __packed;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index feba74027ff8..d705d8674949 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -54,6 +54,7 @@ enum nci_state {
54/* NCI timeouts */ 54/* NCI timeouts */
55#define NCI_RESET_TIMEOUT 5000 55#define NCI_RESET_TIMEOUT 5000
56#define NCI_INIT_TIMEOUT 5000 56#define NCI_INIT_TIMEOUT 5000
57#define NCI_SET_CONFIG_TIMEOUT 5000
57#define NCI_RF_DISC_TIMEOUT 5000 58#define NCI_RF_DISC_TIMEOUT 5000
58#define NCI_RF_DISC_SELECT_TIMEOUT 5000 59#define NCI_RF_DISC_SELECT_TIMEOUT 5000
59#define NCI_RF_DEACTIVATE_TIMEOUT 30000 60#define NCI_RF_DEACTIVATE_TIMEOUT 30000
@@ -137,6 +138,10 @@ struct nci_dev {
137 data_exchange_cb_t data_exchange_cb; 138 data_exchange_cb_t data_exchange_cb;
138 void *data_exchange_cb_context; 139 void *data_exchange_cb_context;
139 struct sk_buff *rx_data_reassembly; 140 struct sk_buff *rx_data_reassembly;
141
142 /* stored during intf_activated_ntf */
143 __u8 remote_gb[NFC_MAX_GT_LEN];
144 __u8 remote_gb_len;
140}; 145};
141 146
142/* ----- NCI Devices ----- */ 147/* ----- NCI Devices ----- */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6431f5e39022..f05b10682c9d 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -72,6 +72,7 @@ struct nfc_ops {
72 72
73#define NFC_TARGET_IDX_ANY -1 73#define NFC_TARGET_IDX_ANY -1
74#define NFC_MAX_GT_LEN 48 74#define NFC_MAX_GT_LEN 48
75#define NFC_ATR_RES_GT_OFFSET 15
75 76
76struct nfc_target { 77struct nfc_target {
77 u32 idx; 78 u32 idx;
@@ -89,7 +90,7 @@ struct nfc_target {
89}; 90};
90 91
91struct nfc_genl_data { 92struct nfc_genl_data {
92 u32 poll_req_pid; 93 u32 poll_req_portid;
93 struct mutex genl_data_mutex; 94 struct mutex genl_data_mutex;
94}; 95};
95 96
@@ -112,7 +113,6 @@ struct nfc_dev {
112 int tx_tailroom; 113 int tx_tailroom;
113 114
114 struct timer_list check_pres_timer; 115 struct timer_list check_pres_timer;
115 struct workqueue_struct *check_pres_wq;
116 struct work_struct check_pres_work; 116 struct work_struct check_pres_work;
117 117
118 struct nfc_ops *ops; 118 struct nfc_ops *ops;
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
deleted file mode 100644
index 35e930d2f638..000000000000
--- a/include/net/nfc/shdlc.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __NFC_SHDLC_H
21#define __NFC_SHDLC_H
22
23struct nfc_shdlc;
24
25struct nfc_shdlc_ops {
26 int (*open) (struct nfc_shdlc *shdlc);
27 void (*close) (struct nfc_shdlc *shdlc);
28 int (*hci_ready) (struct nfc_shdlc *shdlc);
29 int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
30 int (*start_poll) (struct nfc_shdlc *shdlc,
31 u32 im_protocols, u32 tm_protocols);
32 int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
33 struct nfc_target *target);
34 int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
35 struct nfc_target *target);
36 int (*data_exchange) (struct nfc_shdlc *shdlc,
37 struct nfc_target *target,
38 struct sk_buff *skb, struct sk_buff **res_skb);
39 int (*check_presence)(struct nfc_shdlc *shdlc,
40 struct nfc_target *target);
41};
42
43enum shdlc_state {
44 SHDLC_DISCONNECTED = 0,
45 SHDLC_CONNECTING = 1,
46 SHDLC_NEGOCIATING = 2,
47 SHDLC_CONNECTED = 3
48};
49
50struct nfc_shdlc {
51 struct mutex state_mutex;
52 enum shdlc_state state;
53 int hard_fault;
54
55 struct nfc_hci_dev *hdev;
56
57 wait_queue_head_t *connect_wq;
58 int connect_tries;
59 int connect_result;
60 struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
61
62 u8 w; /* window size */
63 bool srej_support;
64
65 struct timer_list t1_timer; /* send ack timeout */
66 bool t1_active;
67
68 struct timer_list t2_timer; /* guard/retransmit timeout */
69 bool t2_active;
70
71 int ns; /* next seq num for send */
72 int nr; /* next expected seq num for receive */
73 int dnr; /* oldest sent unacked seq num */
74
75 struct sk_buff_head rcv_q;
76
77 struct sk_buff_head send_q;
78 bool rnr; /* other side is not ready to receive */
79
80 struct sk_buff_head ack_pending_q;
81
82 struct workqueue_struct *sm_wq;
83 struct work_struct sm_work;
84
85 struct nfc_shdlc_ops *ops;
86
87 int client_headroom;
88 int client_tailroom;
89
90 void *clientdata;
91};
92
93void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb);
94
95struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
96 struct nfc_hci_init_data *init_data,
97 u32 protocols,
98 int tx_headroom, int tx_tailroom,
99 int max_link_payload, const char *devname);
100
101void nfc_shdlc_free(struct nfc_shdlc *shdlc);
102
103void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata);
104void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc);
105struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc);
106
107#endif /* __NFC_SHDLC_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 4c0766e201e3..b01d8dd9ee7c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -106,6 +106,34 @@ struct listen_sock {
106 struct request_sock *syn_table[0]; 106 struct request_sock *syn_table[0];
107}; 107};
108 108
109/*
110 * For a TCP Fast Open listener -
111 * lock - protects the access to all the reqsk, which is co-owned by
112 * the listener and the child socket.
113 * qlen - pending TFO requests (still in TCP_SYN_RECV).
114 * max_qlen - max TFO reqs allowed before TFO is disabled.
115 *
116 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
117 * structure above. But there is some implementation difficulty due to
118 * listen_sock being part of request_sock_queue hence will be freed when
119 * a listener is stopped. But TFO related fields may continue to be
120 * accessed even after a listener is closed, until its sk_refcnt drops
121 * to 0 implying no more outstanding TFO reqs. One solution is to keep
122 * listen_opt around until sk_refcnt drops to 0. But there is some other
123 * complexity that needs to be resolved. E.g., a listener can be disabled
124 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
125 */
126struct fastopen_queue {
127 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
128 struct request_sock *rskq_rst_tail; /* requests that caused RST.
129 * This is part of the defense
130 * against spoofing attack.
131 */
132 spinlock_t lock;
133 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
134 int max_qlen; /* != 0 iff TFO is currently enabled */
135};
136
109/** struct request_sock_queue - queue of request_socks 137/** struct request_sock_queue - queue of request_socks
110 * 138 *
111 * @rskq_accept_head - FIFO head of established children 139 * @rskq_accept_head - FIFO head of established children
@@ -129,6 +157,12 @@ struct request_sock_queue {
129 u8 rskq_defer_accept; 157 u8 rskq_defer_accept;
130 /* 3 bytes hole, try to pack */ 158 /* 3 bytes hole, try to pack */
131 struct listen_sock *listen_opt; 159 struct listen_sock *listen_opt;
160 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
161 * enabled on this listener. Check
162 * max_qlen != 0 in fastopen_queue
163 * to determine if TFO is enabled
164 * right at this moment.
165 */
132}; 166};
133 167
134extern int reqsk_queue_alloc(struct request_sock_queue *queue, 168extern int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue,
136 170
137extern void __reqsk_queue_destroy(struct request_sock_queue *queue); 171extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
138extern void reqsk_queue_destroy(struct request_sock_queue *queue); 172extern void reqsk_queue_destroy(struct request_sock_queue *queue);
173extern void reqsk_fastopen_remove(struct sock *sk,
174 struct request_sock *req, bool reset);
139 175
140static inline struct request_sock * 176static inline struct request_sock *
141 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) 177 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -190,19 +226,6 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
190 return req; 226 return req;
191} 227}
192 228
193static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
194 struct sock *parent)
195{
196 struct request_sock *req = reqsk_queue_remove(queue);
197 struct sock *child = req->sk;
198
199 WARN_ON(child == NULL);
200
201 sk_acceptq_removed(parent);
202 __reqsk_free(req);
203 return child;
204}
205
206static inline int reqsk_queue_removed(struct request_sock_queue *queue, 229static inline int reqsk_queue_removed(struct request_sock_queue *queue,
207 struct request_sock *req) 230 struct request_sock *req)
208{ 231{
diff --git a/include/net/scm.h b/include/net/scm.h
index 7dc0854f0b38..975cca01048b 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -12,6 +12,12 @@
12 */ 12 */
13#define SCM_MAX_FD 253 13#define SCM_MAX_FD 253
14 14
15struct scm_creds {
16 u32 pid;
17 kuid_t uid;
18 kgid_t gid;
19};
20
15struct scm_fp_list { 21struct scm_fp_list {
16 short count; 22 short count;
17 short max; 23 short max;
@@ -22,7 +28,7 @@ struct scm_cookie {
22 struct pid *pid; /* Skb credentials */ 28 struct pid *pid; /* Skb credentials */
23 const struct cred *cred; 29 const struct cred *cred;
24 struct scm_fp_list *fp; /* Passed files */ 30 struct scm_fp_list *fp; /* Passed files */
25 struct ucred creds; /* Skb credentials */ 31 struct scm_creds creds; /* Skb credentials */
26#ifdef CONFIG_SECURITY_NETWORK 32#ifdef CONFIG_SECURITY_NETWORK
27 u32 secid; /* Passed security ID */ 33 u32 secid; /* Passed security ID */
28#endif 34#endif
@@ -49,7 +55,9 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
49{ 55{
50 scm->pid = get_pid(pid); 56 scm->pid = get_pid(pid);
51 scm->cred = cred ? get_cred(cred) : NULL; 57 scm->cred = cred ? get_cred(cred) : NULL;
52 cred_to_ucred(pid, cred, &scm->creds); 58 scm->creds.pid = pid_vnr(pid);
59 scm->creds.uid = cred ? cred->euid : INVALID_UID;
60 scm->creds.gid = cred ? cred->egid : INVALID_GID;
53} 61}
54 62
55static __inline__ void scm_destroy_cred(struct scm_cookie *scm) 63static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
@@ -65,7 +73,7 @@ static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
65static __inline__ void scm_destroy(struct scm_cookie *scm) 73static __inline__ void scm_destroy(struct scm_cookie *scm)
66{ 74{
67 scm_destroy_cred(scm); 75 scm_destroy_cred(scm);
68 if (scm && scm->fp) 76 if (scm->fp)
69 __scm_destroy(scm); 77 __scm_destroy(scm);
70} 78}
71 79
@@ -112,8 +120,15 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
112 return; 120 return;
113 } 121 }
114 122
115 if (test_bit(SOCK_PASSCRED, &sock->flags)) 123 if (test_bit(SOCK_PASSCRED, &sock->flags)) {
116 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); 124 struct user_namespace *current_ns = current_user_ns();
125 struct ucred ucreds = {
126 .pid = scm->creds.pid,
127 .uid = from_kuid_munged(current_ns, scm->creds.uid),
128 .gid = from_kgid_munged(current_ns, scm->creds.gid),
129 };
130 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
131 }
117 132
118 scm_destroy_cred(scm); 133 scm_destroy_cred(scm);
119 134
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index ff499640528b..9c6414f553f9 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -114,13 +114,12 @@
114/* 114/*
115 * sctp/protocol.c 115 * sctp/protocol.c
116 */ 116 */
117extern struct sock *sctp_get_ctl_sock(void); 117extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
118extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
119 sctp_scope_t, gfp_t gfp, 118 sctp_scope_t, gfp_t gfp,
120 int flags); 119 int flags);
121extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); 120extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
122extern int sctp_register_pf(struct sctp_pf *, sa_family_t); 121extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
123extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int); 122extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
124 123
125/* 124/*
126 * sctp/socket.c 125 * sctp/socket.c
@@ -140,12 +139,12 @@ extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
140/* 139/*
141 * sctp/primitive.c 140 * sctp/primitive.c
142 */ 141 */
143int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg); 142int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
144int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg); 143int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
145int sctp_primitive_ABORT(struct sctp_association *, void *arg); 144int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
146int sctp_primitive_SEND(struct sctp_association *, void *arg); 145int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
147int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg); 146int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
148int sctp_primitive_ASCONF(struct sctp_association *, void *arg); 147int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
149 148
150/* 149/*
151 * sctp/input.c 150 * sctp/input.c
@@ -156,7 +155,7 @@ void sctp_hash_established(struct sctp_association *);
156void sctp_unhash_established(struct sctp_association *); 155void sctp_unhash_established(struct sctp_association *);
157void sctp_hash_endpoint(struct sctp_endpoint *); 156void sctp_hash_endpoint(struct sctp_endpoint *);
158void sctp_unhash_endpoint(struct sctp_endpoint *); 157void sctp_unhash_endpoint(struct sctp_endpoint *);
159struct sock *sctp_err_lookup(int family, struct sk_buff *, 158struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
160 struct sctphdr *, struct sctp_association **, 159 struct sctphdr *, struct sctp_association **,
161 struct sctp_transport **); 160 struct sctp_transport **);
162void sctp_err_finish(struct sock *, struct sctp_association *); 161void sctp_err_finish(struct sock *, struct sctp_association *);
@@ -173,14 +172,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
173/* 172/*
174 * sctp/proc.c 173 * sctp/proc.c
175 */ 174 */
176int sctp_snmp_proc_init(void); 175int sctp_snmp_proc_init(struct net *net);
177void sctp_snmp_proc_exit(void); 176void sctp_snmp_proc_exit(struct net *net);
178int sctp_eps_proc_init(void); 177int sctp_eps_proc_init(struct net *net);
179void sctp_eps_proc_exit(void); 178void sctp_eps_proc_exit(struct net *net);
180int sctp_assocs_proc_init(void); 179int sctp_assocs_proc_init(struct net *net);
181void sctp_assocs_proc_exit(void); 180void sctp_assocs_proc_exit(struct net *net);
182int sctp_remaddr_proc_init(void); 181int sctp_remaddr_proc_init(struct net *net);
183void sctp_remaddr_proc_exit(void); 182void sctp_remaddr_proc_exit(struct net *net);
184 183
185 184
186/* 185/*
@@ -222,11 +221,10 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
222#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk) 221#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
223 222
224/* SCTP SNMP MIB stats handlers */ 223/* SCTP SNMP MIB stats handlers */
225DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics); 224#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
226#define SCTP_INC_STATS(field) SNMP_INC_STATS(sctp_statistics, field) 225#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
227#define SCTP_INC_STATS_BH(field) SNMP_INC_STATS_BH(sctp_statistics, field) 226#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
228#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field) 227#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
229#define SCTP_DEC_STATS(field) SNMP_DEC_STATS(sctp_statistics, field)
230 228
231#endif /* !TEST_FRAME */ 229#endif /* !TEST_FRAME */
232 230
@@ -361,25 +359,29 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
361#define SCTP_DBG_OBJCNT_ENTRY(name) \ 359#define SCTP_DBG_OBJCNT_ENTRY(name) \
362{.label= #name, .counter= &sctp_dbg_objcnt_## name} 360{.label= #name, .counter= &sctp_dbg_objcnt_## name}
363 361
364void sctp_dbg_objcnt_init(void); 362void sctp_dbg_objcnt_init(struct net *);
365void sctp_dbg_objcnt_exit(void); 363void sctp_dbg_objcnt_exit(struct net *);
366 364
367#else 365#else
368 366
369#define SCTP_DBG_OBJCNT_INC(name) 367#define SCTP_DBG_OBJCNT_INC(name)
370#define SCTP_DBG_OBJCNT_DEC(name) 368#define SCTP_DBG_OBJCNT_DEC(name)
371 369
372static inline void sctp_dbg_objcnt_init(void) { return; } 370static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
373static inline void sctp_dbg_objcnt_exit(void) { return; } 371static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
374 372
375#endif /* CONFIG_SCTP_DBG_OBJCOUNT */ 373#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
376 374
377#if defined CONFIG_SYSCTL 375#if defined CONFIG_SYSCTL
378void sctp_sysctl_register(void); 376void sctp_sysctl_register(void);
379void sctp_sysctl_unregister(void); 377void sctp_sysctl_unregister(void);
378int sctp_sysctl_net_register(struct net *net);
379void sctp_sysctl_net_unregister(struct net *net);
380#else 380#else
381static inline void sctp_sysctl_register(void) { return; } 381static inline void sctp_sysctl_register(void) { return; }
382static inline void sctp_sysctl_unregister(void) { return; } 382static inline void sctp_sysctl_unregister(void) { return; }
383static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
384static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
383#endif 385#endif
384 386
385/* Size of Supported Address Parameter for 'x' address types. */ 387/* Size of Supported Address Parameter for 'x' address types. */
@@ -586,7 +588,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
586 588
587extern struct proto sctp_prot; 589extern struct proto sctp_prot;
588extern struct proto sctpv6_prot; 590extern struct proto sctpv6_prot;
589extern struct proc_dir_entry *proc_net_sctp;
590void sctp_put_port(struct sock *sk); 591void sctp_put_port(struct sock *sk);
591 592
592extern struct idr sctp_assocs_id; 593extern struct idr sctp_assocs_id;
@@ -632,21 +633,21 @@ static inline int sctp_sanity_check(void)
632 633
633/* Warning: The following hash functions assume a power of two 'size'. */ 634/* Warning: The following hash functions assume a power of two 'size'. */
634/* This is the hash function for the SCTP port hash table. */ 635/* This is the hash function for the SCTP port hash table. */
635static inline int sctp_phashfn(__u16 lport) 636static inline int sctp_phashfn(struct net *net, __u16 lport)
636{ 637{
637 return lport & (sctp_port_hashsize - 1); 638 return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
638} 639}
639 640
640/* This is the hash function for the endpoint hash table. */ 641/* This is the hash function for the endpoint hash table. */
641static inline int sctp_ep_hashfn(__u16 lport) 642static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
642{ 643{
643 return lport & (sctp_ep_hashsize - 1); 644 return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
644} 645}
645 646
646/* This is the hash function for the association hash table. */ 647/* This is the hash function for the association hash table. */
647static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport) 648static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
648{ 649{
649 int h = (lport << 16) + rport; 650 int h = (lport << 16) + rport + net_hash_mix(net);
650 h ^= h>>8; 651 h ^= h>>8;
651 return h & (sctp_assoc_hashsize - 1); 652 return h & (sctp_assoc_hashsize - 1);
652} 653}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9148632b8204..b5887e1677e4 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -77,7 +77,8 @@ typedef struct {
77 int action; 77 int action;
78} sctp_sm_command_t; 78} sctp_sm_command_t;
79 79
80typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *, 80typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
81 const struct sctp_endpoint *,
81 const struct sctp_association *, 82 const struct sctp_association *,
82 const sctp_subtype_t type, 83 const sctp_subtype_t type,
83 void *arg, 84 void *arg,
@@ -178,7 +179,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
178 179
179/* Prototypes for utility support functions. */ 180/* Prototypes for utility support functions. */
180__u8 sctp_get_chunk_type(struct sctp_chunk *chunk); 181__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
181const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t, 182const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
183 sctp_event_t,
182 sctp_state_t, 184 sctp_state_t,
183 sctp_subtype_t); 185 sctp_subtype_t);
184int sctp_chunk_iif(const struct sctp_chunk *); 186int sctp_chunk_iif(const struct sctp_chunk *);
@@ -268,7 +270,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *);
268 270
269/* Prototypes for statetable processing. */ 271/* Prototypes for statetable processing. */
270 272
271int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, 273int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
272 sctp_state_t state, 274 sctp_state_t state,
273 struct sctp_endpoint *, 275 struct sctp_endpoint *,
274 struct sctp_association *asoc, 276 struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index fc5e60016e37..0fef00f5d3ce 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -102,6 +102,7 @@ struct sctp_bind_bucket {
102 unsigned short fastreuse; 102 unsigned short fastreuse;
103 struct hlist_node node; 103 struct hlist_node node;
104 struct hlist_head owner; 104 struct hlist_head owner;
105 struct net *net;
105}; 106};
106 107
107struct sctp_bind_hashbucket { 108struct sctp_bind_hashbucket {
@@ -118,69 +119,6 @@ struct sctp_hashbucket {
118 119
119/* The SCTP globals structure. */ 120/* The SCTP globals structure. */
120extern struct sctp_globals { 121extern struct sctp_globals {
121 /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
122 *
123 * The following protocol parameters are RECOMMENDED:
124 *
125 * RTO.Initial - 3 seconds
126 * RTO.Min - 1 second
127 * RTO.Max - 60 seconds
128 * RTO.Alpha - 1/8 (3 when converted to right shifts.)
129 * RTO.Beta - 1/4 (2 when converted to right shifts.)
130 */
131 unsigned int rto_initial;
132 unsigned int rto_min;
133 unsigned int rto_max;
134
135 /* Note: rto_alpha and rto_beta are really defined as inverse
136 * powers of two to facilitate integer operations.
137 */
138 int rto_alpha;
139 int rto_beta;
140
141 /* Max.Burst - 4 */
142 int max_burst;
143
144 /* Whether Cookie Preservative is enabled(1) or not(0) */
145 int cookie_preserve_enable;
146
147 /* Valid.Cookie.Life - 60 seconds */
148 unsigned int valid_cookie_life;
149
150 /* Delayed SACK timeout 200ms default*/
151 unsigned int sack_timeout;
152
153 /* HB.interval - 30 seconds */
154 unsigned int hb_interval;
155
156 /* Association.Max.Retrans - 10 attempts
157 * Path.Max.Retrans - 5 attempts (per destination address)
158 * Max.Init.Retransmits - 8 attempts
159 */
160 int max_retrans_association;
161 int max_retrans_path;
162 int max_retrans_init;
163
164 /* Potentially-Failed.Max.Retrans sysctl value
165 * taken from:
166 * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
167 */
168 int pf_retrans;
169
170 /*
171 * Policy for preforming sctp/socket accounting
172 * 0 - do socket level accounting, all assocs share sk_sndbuf
173 * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
174 */
175 int sndbuf_policy;
176
177 /*
178 * Policy for preforming sctp/socket accounting
179 * 0 - do socket level accounting, all assocs share sk_rcvbuf
180 * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
181 */
182 int rcvbuf_policy;
183
184 /* The following variables are implementation specific. */ 122 /* The following variables are implementation specific. */
185 123
186 /* Default initialization values to be applied to new associations. */ 124 /* Default initialization values to be applied to new associations. */
@@ -204,70 +142,11 @@ extern struct sctp_globals {
204 int port_hashsize; 142 int port_hashsize;
205 struct sctp_bind_hashbucket *port_hashtable; 143 struct sctp_bind_hashbucket *port_hashtable;
206 144
207 /* This is the global local address list.
208 * We actively maintain this complete list of addresses on
209 * the system by catching address add/delete events.
210 *
211 * It is a list of sctp_sockaddr_entry.
212 */
213 struct list_head local_addr_list;
214 int default_auto_asconf;
215 struct list_head addr_waitq;
216 struct timer_list addr_wq_timer;
217 struct list_head auto_asconf_splist;
218 spinlock_t addr_wq_lock;
219
220 /* Lock that protects the local_addr_list writers */
221 spinlock_t addr_list_lock;
222
223 /* Flag to indicate if addip is enabled. */
224 int addip_enable;
225 int addip_noauth_enable;
226
227 /* Flag to indicate if PR-SCTP is enabled. */
228 int prsctp_enable;
229
230 /* Flag to idicate if SCTP-AUTH is enabled */
231 int auth_enable;
232
233 /*
234 * Policy to control SCTP IPv4 address scoping
235 * 0 - Disable IPv4 address scoping
236 * 1 - Enable IPv4 address scoping
237 * 2 - Selectively allow only IPv4 private addresses
238 * 3 - Selectively allow only IPv4 link local address
239 */
240 int ipv4_scope_policy;
241
242 /* Flag to indicate whether computing and verifying checksum 145 /* Flag to indicate whether computing and verifying checksum
243 * is disabled. */ 146 * is disabled. */
244 bool checksum_disable; 147 bool checksum_disable;
245
246 /* Threshold for rwnd update SACKS. Receive buffer shifted this many
247 * bits is an indicator of when to send and window update SACK.
248 */
249 int rwnd_update_shift;
250
251 /* Threshold for autoclose timeout, in seconds. */
252 unsigned long max_autoclose;
253} sctp_globals; 148} sctp_globals;
254 149
255#define sctp_rto_initial (sctp_globals.rto_initial)
256#define sctp_rto_min (sctp_globals.rto_min)
257#define sctp_rto_max (sctp_globals.rto_max)
258#define sctp_rto_alpha (sctp_globals.rto_alpha)
259#define sctp_rto_beta (sctp_globals.rto_beta)
260#define sctp_max_burst (sctp_globals.max_burst)
261#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life)
262#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
263#define sctp_max_retrans_association (sctp_globals.max_retrans_association)
264#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
265#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy)
266#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
267#define sctp_pf_retrans (sctp_globals.pf_retrans)
268#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
269#define sctp_sack_timeout (sctp_globals.sack_timeout)
270#define sctp_hb_interval (sctp_globals.hb_interval)
271#define sctp_max_instreams (sctp_globals.max_instreams) 150#define sctp_max_instreams (sctp_globals.max_instreams)
272#define sctp_max_outstreams (sctp_globals.max_outstreams) 151#define sctp_max_outstreams (sctp_globals.max_outstreams)
273#define sctp_address_families (sctp_globals.address_families) 152#define sctp_address_families (sctp_globals.address_families)
@@ -277,21 +156,7 @@ extern struct sctp_globals {
277#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable) 156#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
278#define sctp_port_hashsize (sctp_globals.port_hashsize) 157#define sctp_port_hashsize (sctp_globals.port_hashsize)
279#define sctp_port_hashtable (sctp_globals.port_hashtable) 158#define sctp_port_hashtable (sctp_globals.port_hashtable)
280#define sctp_local_addr_list (sctp_globals.local_addr_list)
281#define sctp_local_addr_lock (sctp_globals.addr_list_lock)
282#define sctp_auto_asconf_splist (sctp_globals.auto_asconf_splist)
283#define sctp_addr_waitq (sctp_globals.addr_waitq)
284#define sctp_addr_wq_timer (sctp_globals.addr_wq_timer)
285#define sctp_addr_wq_lock (sctp_globals.addr_wq_lock)
286#define sctp_default_auto_asconf (sctp_globals.default_auto_asconf)
287#define sctp_scope_policy (sctp_globals.ipv4_scope_policy)
288#define sctp_addip_enable (sctp_globals.addip_enable)
289#define sctp_addip_noauth (sctp_globals.addip_noauth_enable)
290#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
291#define sctp_auth_enable (sctp_globals.auth_enable)
292#define sctp_checksum_disable (sctp_globals.checksum_disable) 159#define sctp_checksum_disable (sctp_globals.checksum_disable)
293#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
294#define sctp_max_autoclose (sctp_globals.max_autoclose)
295 160
296/* SCTP Socket type: UDP or TCP style. */ 161/* SCTP Socket type: UDP or TCP style. */
297typedef enum { 162typedef enum {
@@ -1085,7 +950,7 @@ struct sctp_transport {
1085 __u64 hb_nonce; 950 __u64 hb_nonce;
1086}; 951};
1087 952
1088struct sctp_transport *sctp_transport_new(const union sctp_addr *, 953struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
1089 gfp_t); 954 gfp_t);
1090void sctp_transport_set_owner(struct sctp_transport *, 955void sctp_transport_set_owner(struct sctp_transport *,
1091 struct sctp_association *); 956 struct sctp_association *);
@@ -1240,7 +1105,7 @@ struct sctp_bind_addr {
1240 1105
1241void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port); 1106void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
1242void sctp_bind_addr_free(struct sctp_bind_addr *); 1107void sctp_bind_addr_free(struct sctp_bind_addr *);
1243int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 1108int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
1244 const struct sctp_bind_addr *src, 1109 const struct sctp_bind_addr *src,
1245 sctp_scope_t scope, gfp_t gfp, 1110 sctp_scope_t scope, gfp_t gfp,
1246 int flags); 1111 int flags);
@@ -1267,7 +1132,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
1267 __u16 port, gfp_t gfp); 1132 __u16 port, gfp_t gfp);
1268 1133
1269sctp_scope_t sctp_scope(const union sctp_addr *); 1134sctp_scope_t sctp_scope(const union sctp_addr *);
1270int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); 1135int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
1271int sctp_is_any(struct sock *sk, const union sctp_addr *addr); 1136int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
1272int sctp_addr_is_valid(const union sctp_addr *addr); 1137int sctp_addr_is_valid(const union sctp_addr *addr);
1273int sctp_is_ep_boundall(struct sock *sk); 1138int sctp_is_ep_boundall(struct sock *sk);
@@ -1425,13 +1290,13 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
1425int sctp_endpoint_is_peeled_off(struct sctp_endpoint *, 1290int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
1426 const union sctp_addr *); 1291 const union sctp_addr *);
1427struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *, 1292struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
1428 const union sctp_addr *); 1293 struct net *, const union sctp_addr *);
1429int sctp_has_association(const union sctp_addr *laddr, 1294int sctp_has_association(struct net *net, const union sctp_addr *laddr,
1430 const union sctp_addr *paddr); 1295 const union sctp_addr *paddr);
1431 1296
1432int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t, 1297int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
1433 sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, 1298 sctp_cid_t, sctp_init_chunk_t *peer_init,
1434 struct sctp_chunk **err_chunk); 1299 struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
1435int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk, 1300int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
1436 const union sctp_addr *peer, 1301 const union sctp_addr *peer,
1437 sctp_init_chunk_t *init, gfp_t gfp); 1302 sctp_init_chunk_t *init, gfp_t gfp);
@@ -2013,6 +1878,7 @@ void sctp_assoc_control_transport(struct sctp_association *,
2013 sctp_transport_cmd_t, sctp_sn_error_t); 1878 sctp_transport_cmd_t, sctp_sn_error_t);
2014struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32); 1879struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
2015struct sctp_transport *sctp_assoc_is_match(struct sctp_association *, 1880struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
1881 struct net *,
2016 const union sctp_addr *, 1882 const union sctp_addr *,
2017 const union sctp_addr *); 1883 const union sctp_addr *);
2018void sctp_assoc_migrate(struct sctp_association *, struct sock *); 1884void sctp_assoc_migrate(struct sctp_association *, struct sock *);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 0147b901e79c..71596261fa99 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -154,13 +154,15 @@ struct linux_xfrm_mib {
154 */ 154 */
155#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 155#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
156 do { \ 156 do { \
157 this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ 157 __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs; \
158 this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ 158 this_cpu_inc(ptr[basefield##PKTS]); \
159 this_cpu_add(ptr[basefield##OCTETS], addend); \
159 } while (0) 160 } while (0)
160#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 161#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
161 do { \ 162 do { \
162 __this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ 163 __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs; \
163 __this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ 164 __this_cpu_inc(ptr[basefield##PKTS]); \
165 __this_cpu_add(ptr[basefield##OCTETS], addend); \
164 } while (0) 166 } while (0)
165 167
166 168
diff --git a/include/net/sock.h b/include/net/sock.h
index 0d7e9834d9be..c945fba4f543 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -247,8 +247,7 @@ struct cg_proto;
247 * @sk_stamp: time stamp of last packet received 247 * @sk_stamp: time stamp of last packet received
248 * @sk_socket: Identd and reporting IO signals 248 * @sk_socket: Identd and reporting IO signals
249 * @sk_user_data: RPC layer private data 249 * @sk_user_data: RPC layer private data
250 * @sk_sndmsg_page: cached page for sendmsg 250 * @sk_frag: cached page frag
251 * @sk_sndmsg_off: cached offset for sendmsg
252 * @sk_peek_off: current peek_offset value 251 * @sk_peek_off: current peek_offset value
253 * @sk_send_head: front of stuff to transmit 252 * @sk_send_head: front of stuff to transmit
254 * @sk_security: used by security modules 253 * @sk_security: used by security modules
@@ -362,9 +361,8 @@ struct sock {
362 ktime_t sk_stamp; 361 ktime_t sk_stamp;
363 struct socket *sk_socket; 362 struct socket *sk_socket;
364 void *sk_user_data; 363 void *sk_user_data;
365 struct page *sk_sndmsg_page; 364 struct page_frag sk_frag;
366 struct sk_buff *sk_send_head; 365 struct sk_buff *sk_send_head;
367 __u32 sk_sndmsg_off;
368 __s32 sk_peek_off; 366 __s32 sk_peek_off;
369 int sk_write_pending; 367 int sk_write_pending;
370#ifdef CONFIG_SECURITY 368#ifdef CONFIG_SECURITY
@@ -2026,18 +2024,23 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2026 2024
2027struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); 2025struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
2028 2026
2029static inline struct page *sk_stream_alloc_page(struct sock *sk) 2027/**
2028 * sk_page_frag - return an appropriate page_frag
2029 * @sk: socket
2030 *
2031 * If socket allocation mode allows current thread to sleep, it means its
2032 * safe to use the per task page_frag instead of the per socket one.
2033 */
2034static inline struct page_frag *sk_page_frag(struct sock *sk)
2030{ 2035{
2031 struct page *page = NULL; 2036 if (sk->sk_allocation & __GFP_WAIT)
2037 return &current->task_frag;
2032 2038
2033 page = alloc_pages(sk->sk_allocation, 0); 2039 return &sk->sk_frag;
2034 if (!page) {
2035 sk_enter_memory_pressure(sk);
2036 sk_stream_moderate_sndbuf(sk);
2037 }
2038 return page;
2039} 2040}
2040 2041
2042extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2043
2041/* 2044/*
2042 * Default write policy as shown to user space via poll/select/SIGIO 2045 * Default write policy as shown to user space via poll/select/SIGIO
2043 */ 2046 */
@@ -2218,8 +2221,6 @@ extern int net_msg_warn;
2218extern __u32 sysctl_wmem_max; 2221extern __u32 sysctl_wmem_max;
2219extern __u32 sysctl_rmem_max; 2222extern __u32 sysctl_rmem_max;
2220 2223
2221extern void sk_init(void);
2222
2223extern int sysctl_optmem_max; 2224extern int sysctl_optmem_max;
2224 2225
2225extern __u32 sysctl_wmem_default; 2226extern __u32 sysctl_wmem_default;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9a0021d16d91..6feeccd83dd7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -98,11 +98,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
98 * 15 is ~13-30min depending on RTO. 98 * 15 is ~13-30min depending on RTO.
99 */ 99 */
100 100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a 101#define TCP_SYN_RETRIES 6 /* This is how many retries are done
102 * connection: ~180sec is RFC minimum */ 102 * when active opening a connection.
103 * RFC1122 says the minimum retry MUST
104 * be at least 180secs. Nevertheless
105 * this value is corresponding to
106 * 63secs of retransmission with the
107 * current initial RTO.
108 */
103 109
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a 110#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
105 * connection: ~180sec is RFC minimum */ 111 * when passive opening a connection.
112 * This is corresponding to 31secs of
113 * retransmission with the current
114 * initial RTO.
115 */
106 116
107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 117#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */ 118 * state, about 60 seconds */
@@ -214,8 +224,24 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
214 224
215/* Bit Flags for sysctl_tcp_fastopen */ 225/* Bit Flags for sysctl_tcp_fastopen */
216#define TFO_CLIENT_ENABLE 1 226#define TFO_CLIENT_ENABLE 1
227#define TFO_SERVER_ENABLE 2
217#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 228#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
218 229
230/* Process SYN data but skip cookie validation */
231#define TFO_SERVER_COOKIE_NOT_CHKED 0x100
232/* Accept SYN data w/o any cookie option */
233#define TFO_SERVER_COOKIE_NOT_REQD 0x200
234
235/* Force enable TFO on all listeners, i.e., not requiring the
236 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
237 */
238#define TFO_SERVER_WO_SOCKOPT1 0x400
239#define TFO_SERVER_WO_SOCKOPT2 0x800
240/* Always create TFO child sockets on a TFO listener even when
241 * cookie/data not present. (For testing purpose!)
242 */
243#define TFO_SERVER_ALWAYS 0x1000
244
219extern struct inet_timewait_death_row tcp_death_row; 245extern struct inet_timewait_death_row tcp_death_row;
220 246
221/* sysctl variables for tcp */ 247/* sysctl variables for tcp */
@@ -398,7 +424,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *
398 const struct tcphdr *th); 424 const struct tcphdr *th);
399extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 425extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
400 struct request_sock *req, 426 struct request_sock *req,
401 struct request_sock **prev); 427 struct request_sock **prev,
428 bool fastopen);
402extern int tcp_child_process(struct sock *parent, struct sock *child, 429extern int tcp_child_process(struct sock *parent, struct sock *child,
403 struct sk_buff *skb); 430 struct sk_buff *skb);
404extern bool tcp_use_frto(struct sock *sk); 431extern bool tcp_use_frto(struct sock *sk);
@@ -411,12 +438,6 @@ extern void tcp_metrics_init(void);
411extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check); 438extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
412extern bool tcp_remember_stamp(struct sock *sk); 439extern bool tcp_remember_stamp(struct sock *sk);
413extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); 440extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
414extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
415 struct tcp_fastopen_cookie *cookie,
416 int *syn_loss, unsigned long *last_syn_loss);
417extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
418 struct tcp_fastopen_cookie *cookie,
419 bool syn_lost);
420extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); 441extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
421extern void tcp_disable_fack(struct tcp_sock *tp); 442extern void tcp_disable_fack(struct tcp_sock *tp);
422extern void tcp_close(struct sock *sk, long timeout); 443extern void tcp_close(struct sock *sk, long timeout);
@@ -458,7 +479,8 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
458extern int tcp_connect(struct sock *sk); 479extern int tcp_connect(struct sock *sk);
459extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 480extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
460 struct request_sock *req, 481 struct request_sock *req,
461 struct request_values *rvp); 482 struct request_values *rvp,
483 struct tcp_fastopen_cookie *foc);
462extern int tcp_disconnect(struct sock *sk, int flags); 484extern int tcp_disconnect(struct sock *sk, int flags);
463 485
464void tcp_connect_init(struct sock *sk); 486void tcp_connect_init(struct sock *sk);
@@ -527,6 +549,7 @@ extern void tcp_send_delayed_ack(struct sock *sk);
527extern void tcp_cwnd_application_limited(struct sock *sk); 549extern void tcp_cwnd_application_limited(struct sock *sk);
528extern void tcp_resume_early_retransmit(struct sock *sk); 550extern void tcp_resume_early_retransmit(struct sock *sk);
529extern void tcp_rearm_rto(struct sock *sk); 551extern void tcp_rearm_rto(struct sock *sk);
552extern void tcp_reset(struct sock *sk);
530 553
531/* tcp_timer.c */ 554/* tcp_timer.c */
532extern void tcp_init_xmit_timers(struct sock *); 555extern void tcp_init_xmit_timers(struct sock *);
@@ -576,6 +599,7 @@ extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
576extern int tcp_mss_to_mtu(struct sock *sk, int mss); 599extern int tcp_mss_to_mtu(struct sock *sk, int mss);
577extern void tcp_mtup_init(struct sock *sk); 600extern void tcp_mtup_init(struct sock *sk);
578extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); 601extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
602extern void tcp_init_buffer_space(struct sock *sk);
579 603
580static inline void tcp_bound_rto(const struct sock *sk) 604static inline void tcp_bound_rto(const struct sock *sk)
581{ 605{
@@ -889,15 +913,21 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
889 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 913 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
890} 914}
891 915
916static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
917{
918 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
919 (1 << inet_csk(sk)->icsk_ca_state);
920}
921
892/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 922/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
893 * The exception is rate halving phase, when cwnd is decreasing towards 923 * The exception is cwnd reduction phase, when cwnd is decreasing towards
894 * ssthresh. 924 * ssthresh.
895 */ 925 */
896static inline __u32 tcp_current_ssthresh(const struct sock *sk) 926static inline __u32 tcp_current_ssthresh(const struct sock *sk)
897{ 927{
898 const struct tcp_sock *tp = tcp_sk(sk); 928 const struct tcp_sock *tp = tcp_sk(sk);
899 929
900 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) 930 if (tcp_in_cwnd_reduction(sk))
901 return tp->snd_ssthresh; 931 return tp->snd_ssthresh;
902 else 932 else
903 return max(tp->snd_ssthresh, 933 return max(tp->snd_ssthresh,
@@ -1094,6 +1124,8 @@ static inline void tcp_openreq_init(struct request_sock *req,
1094 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1124 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1095 req->cookie_ts = 0; 1125 req->cookie_ts = 0;
1096 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 1126 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1127 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1128 tcp_rsk(req)->snt_synack = 0;
1097 req->mss = rx_opt->mss_clamp; 1129 req->mss = rx_opt->mss_clamp;
1098 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1130 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1099 ireq->tstamp_ok = rx_opt->tstamp_ok; 1131 ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1106,6 +1138,15 @@ static inline void tcp_openreq_init(struct request_sock *req,
1106 ireq->loc_port = tcp_hdr(skb)->dest; 1138 ireq->loc_port = tcp_hdr(skb)->dest;
1107} 1139}
1108 1140
1141/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
1142static inline void tcp_synack_rtt_meas(struct sock *sk,
1143 struct request_sock *req)
1144{
1145 if (tcp_rsk(req)->snt_synack)
1146 tcp_valid_rtt_meas(sk,
1147 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1148}
1149
1109extern void tcp_enter_memory_pressure(struct sock *sk); 1150extern void tcp_enter_memory_pressure(struct sock *sk);
1110 1151
1111static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1152static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1298,15 +1339,34 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff
1298extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1339extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1299 const struct tcp_md5sig_key *key); 1340 const struct tcp_md5sig_key *key);
1300 1341
1342/* From tcp_fastopen.c */
1343extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1344 struct tcp_fastopen_cookie *cookie,
1345 int *syn_loss, unsigned long *last_syn_loss);
1346extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1347 struct tcp_fastopen_cookie *cookie,
1348 bool syn_lost);
1301struct tcp_fastopen_request { 1349struct tcp_fastopen_request {
1302 /* Fast Open cookie. Size 0 means a cookie request */ 1350 /* Fast Open cookie. Size 0 means a cookie request */
1303 struct tcp_fastopen_cookie cookie; 1351 struct tcp_fastopen_cookie cookie;
1304 struct msghdr *data; /* data in MSG_FASTOPEN */ 1352 struct msghdr *data; /* data in MSG_FASTOPEN */
1305 u16 copied; /* queued in tcp_connect() */ 1353 u16 copied; /* queued in tcp_connect() */
1306}; 1354};
1307
1308void tcp_free_fastopen_req(struct tcp_sock *tp); 1355void tcp_free_fastopen_req(struct tcp_sock *tp);
1309 1356
1357extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1358int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1359void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
1360
1361#define TCP_FASTOPEN_KEY_LENGTH 16
1362
1363/* Fastopen key context */
1364struct tcp_fastopen_context {
1365 struct crypto_cipher __rcu *tfm;
1366 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1367 struct rcu_head rcu;
1368};
1369
1310/* write queue abstraction */ 1370/* write queue abstraction */
1311static inline void tcp_write_queue_purge(struct sock *sk) 1371static inline void tcp_write_queue_purge(struct sock *sk)
1312{ 1372{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 411d83c9821d..6f0ba01afe73 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -263,7 +263,7 @@ struct km_event {
263 } data; 263 } data;
264 264
265 u32 seq; 265 u32 seq;
266 u32 pid; 266 u32 portid;
267 u32 event; 267 u32 event;
268 struct net *net; 268 struct net *net;
269}; 269};
@@ -313,7 +313,7 @@ extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
313 313
314struct xfrm_tmpl; 314struct xfrm_tmpl;
315extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 315extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
316extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 316extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
317extern int __xfrm_state_delete(struct xfrm_state *x); 317extern int __xfrm_state_delete(struct xfrm_state *x);
318 318
319struct xfrm_state_afinfo { 319struct xfrm_state_afinfo {
@@ -576,7 +576,7 @@ struct xfrm_mgr {
576 struct list_head list; 576 struct list_head list;
577 char *id; 577 char *id;
578 int (*notify)(struct xfrm_state *x, const struct km_event *c); 578 int (*notify)(struct xfrm_state *x, const struct km_event *c);
579 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); 579 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
580 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); 580 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
581 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 581 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
582 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c); 582 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
@@ -1558,7 +1558,7 @@ extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1558#endif 1558#endif
1559 1559
1560extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 1560extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1561extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid); 1561extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1562extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 1562extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1563 1563
1564extern void xfrm_input_init(void); 1564extern void xfrm_input_init(void);
diff --git a/include/scsi/scsi_netlink.h b/include/scsi/scsi_netlink.h
index 5cb20ccb1956..62b4edab15d3 100644
--- a/include/scsi/scsi_netlink.h
+++ b/include/scsi/scsi_netlink.h
@@ -119,29 +119,5 @@ struct scsi_nl_host_vendor_msg {
119 (hdr)->msglen = mlen; \ 119 (hdr)->msglen = mlen; \
120 } 120 }
121 121
122
123#ifdef __KERNEL__
124
125#include <scsi/scsi_host.h>
126
127/* Exported Kernel Interfaces */
128int scsi_nl_add_transport(u8 tport,
129 int (*msg_handler)(struct sk_buff *),
130 void (*event_handler)(struct notifier_block *, unsigned long, void *));
131void scsi_nl_remove_transport(u8 tport);
132
133int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
134 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
135 u32 len, u32 pid),
136 void (*nlevt_handler)(struct notifier_block *nb,
137 unsigned long event, void *notify_ptr));
138void scsi_nl_remove_driver(u64 vendor_id);
139
140void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr);
141int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
142 char *data_buf, u32 data_len);
143
144#endif /* __KERNEL__ */
145
146#endif /* SCSI_NETLINK_H */ 122#endif /* SCSI_NETLINK_H */
147 123
diff --git a/kernel/audit.c b/kernel/audit.c
index 511488a7bc71..4d0ceede3319 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -88,11 +88,11 @@ static int audit_failure = AUDIT_FAIL_PRINTK;
88 88
89/* 89/*
90 * If audit records are to be written to the netlink socket, audit_pid 90 * If audit records are to be written to the netlink socket, audit_pid
91 * contains the pid of the auditd process and audit_nlk_pid contains 91 * contains the pid of the auditd process and audit_nlk_portid contains
92 * the pid to use to send netlink messages to that process. 92 * the portid to use to send netlink messages to that process.
93 */ 93 */
94int audit_pid; 94int audit_pid;
95static int audit_nlk_pid; 95static int audit_nlk_portid;
96 96
97/* If audit_rate_limit is non-zero, limit the rate of sending audit records 97/* If audit_rate_limit is non-zero, limit the rate of sending audit records
98 * to that number per second. This prevents DoS attacks, but results in 98 * to that number per second. This prevents DoS attacks, but results in
@@ -402,7 +402,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
402 int err; 402 int err;
403 /* take a reference in case we can't send it and we want to hold it */ 403 /* take a reference in case we can't send it and we want to hold it */
404 skb_get(skb); 404 skb_get(skb);
405 err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0); 405 err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
406 if (err < 0) { 406 if (err < 0) {
407 BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ 407 BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
408 printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); 408 printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
@@ -679,7 +679,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
679 status_set.backlog_limit = audit_backlog_limit; 679 status_set.backlog_limit = audit_backlog_limit;
680 status_set.lost = atomic_read(&audit_lost); 680 status_set.lost = atomic_read(&audit_lost);
681 status_set.backlog = skb_queue_len(&audit_skb_queue); 681 status_set.backlog = skb_queue_len(&audit_skb_queue);
682 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, 682 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
683 &status_set, sizeof(status_set)); 683 &status_set, sizeof(status_set));
684 break; 684 break;
685 case AUDIT_SET: 685 case AUDIT_SET:
@@ -707,7 +707,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
707 sessionid, sid, 1); 707 sessionid, sid, 1);
708 708
709 audit_pid = new_pid; 709 audit_pid = new_pid;
710 audit_nlk_pid = NETLINK_CB(skb).pid; 710 audit_nlk_portid = NETLINK_CB(skb).portid;
711 } 711 }
712 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { 712 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
713 err = audit_set_rate_limit(status_get->rate_limit, 713 err = audit_set_rate_limit(status_get->rate_limit,
@@ -750,7 +750,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
750 size--; 750 size--;
751 audit_log_n_untrustedstring(ab, data, size); 751 audit_log_n_untrustedstring(ab, data, size);
752 } 752 }
753 audit_set_pid(ab, NETLINK_CB(skb).pid); 753 audit_set_pid(ab, NETLINK_CB(skb).portid);
754 audit_log_end(ab); 754 audit_log_end(ab);
755 } 755 }
756 break; 756 break;
@@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
769 } 769 }
770 /* fallthrough */ 770 /* fallthrough */
771 case AUDIT_LIST: 771 case AUDIT_LIST:
772 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, 772 err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
773 seq, data, nlmsg_len(nlh), 773 seq, data, nlmsg_len(nlh),
774 loginuid, sessionid, sid); 774 loginuid, sessionid, sid);
775 break; 775 break;
@@ -788,7 +788,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
788 } 788 }
789 /* fallthrough */ 789 /* fallthrough */
790 case AUDIT_LIST_RULES: 790 case AUDIT_LIST_RULES:
791 err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid, 791 err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
792 seq, data, nlmsg_len(nlh), 792 seq, data, nlmsg_len(nlh),
793 loginuid, sessionid, sid); 793 loginuid, sessionid, sid);
794 break; 794 break;
@@ -859,7 +859,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
859 memcpy(sig_data->ctx, ctx, len); 859 memcpy(sig_data->ctx, ctx, len);
860 security_release_secctx(ctx, len); 860 security_release_secctx(ctx, len);
861 } 861 }
862 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 862 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO,
863 0, 0, sig_data, sizeof(*sig_data) + len); 863 0, 0, sig_data, sizeof(*sig_data) + len);
864 kfree(sig_data); 864 kfree(sig_data);
865 break; 865 break;
@@ -871,7 +871,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
871 s.enabled = tsk->signal->audit_tty != 0; 871 s.enabled = tsk->signal->audit_tty != 0;
872 spin_unlock_irq(&tsk->sighand->siglock); 872 spin_unlock_irq(&tsk->sighand->siglock);
873 873
874 audit_send_reply(NETLINK_CB(skb).pid, seq, 874 audit_send_reply(NETLINK_CB(skb).portid, seq,
875 AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); 875 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
876 break; 876 break;
877 } 877 }
@@ -946,8 +946,7 @@ static int __init audit_init(void)
946 946
947 printk(KERN_INFO "audit: initializing netlink socket (%s)\n", 947 printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
948 audit_default ? "enabled" : "disabled"); 948 audit_default ? "enabled" : "disabled");
949 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 949 audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, &cfg);
950 THIS_MODULE, &cfg);
951 if (!audit_sock) 950 if (!audit_sock)
952 audit_panic("cannot initialize netlink socket"); 951 audit_panic("cannot initialize netlink socket");
953 else 952 else
diff --git a/kernel/exit.c b/kernel/exit.c
index f65345f9e5bb..42f25952edd9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1046,6 +1046,9 @@ void do_exit(long code)
1046 if (tsk->splice_pipe) 1046 if (tsk->splice_pipe)
1047 __free_pipe_info(tsk->splice_pipe); 1047 __free_pipe_info(tsk->splice_pipe);
1048 1048
1049 if (tsk->task_frag.page)
1050 put_page(tsk->task_frag.page);
1051
1049 validate_creds_for_do_exit(tsk); 1052 validate_creds_for_do_exit(tsk);
1050 1053
1051 preempt_disable(); 1054 preempt_disable();
diff --git a/kernel/fork.c b/kernel/fork.c
index 5a0e74d89a5a..a2b1efc20928 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -330,6 +330,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
330 tsk->btrace_seq = 0; 330 tsk->btrace_seq = 0;
331#endif 331#endif
332 tsk->splice_pipe = NULL; 332 tsk->splice_pipe = NULL;
333 tsk->task_frag.page = NULL;
333 334
334 account_kernel_stack(ti, 1); 335 account_kernel_stack(ti, 1);
335 336
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 3880df2acf05..5eab1f3edfa5 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -476,7 +476,7 @@ static int cmd_attr_register_cpumask(struct genl_info *info)
476 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); 476 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
477 if (rc < 0) 477 if (rc < 0)
478 goto out; 478 goto out;
479 rc = add_del_listener(info->snd_pid, mask, REGISTER); 479 rc = add_del_listener(info->snd_portid, mask, REGISTER);
480out: 480out:
481 free_cpumask_var(mask); 481 free_cpumask_var(mask);
482 return rc; 482 return rc;
@@ -492,7 +492,7 @@ static int cmd_attr_deregister_cpumask(struct genl_info *info)
492 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); 492 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
493 if (rc < 0) 493 if (rc < 0)
494 goto out; 494 goto out;
495 rc = add_del_listener(info->snd_pid, mask, DEREGISTER); 495 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
496out: 496out:
497 free_cpumask_var(mask); 497 free_cpumask_var(mask);
498 return rc; 498 return rc;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 0401d2916d9f..52e5abbc41db 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
375 struct uevent_sock *ue_sk; 375 struct uevent_sock *ue_sk;
376 struct netlink_kernel_cfg cfg = { 376 struct netlink_kernel_cfg cfg = {
377 .groups = 1, 377 .groups = 1,
378 .flags = NL_CFG_F_NONROOT_RECV,
378 }; 379 };
379 380
380 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 381 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
381 if (!ue_sk) 382 if (!ue_sk)
382 return -ENOMEM; 383 return -ENOMEM;
383 384
384 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, 385 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
385 THIS_MODULE, &cfg);
386 if (!ue_sk->sk) { 386 if (!ue_sk->sk) {
387 printk(KERN_ERR 387 printk(KERN_ERR
388 "kobject_uevent: unable to create netlink socket!\n"); 388 "kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
422 422
423static int __init kobject_uevent_init(void) 423static int __init kobject_uevent_init(void)
424{ 424{
425 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
426 return register_pernet_subsys(&uevent_net_ops); 425 return register_pernet_subsys(&uevent_net_ops);
427} 426}
428 427
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb5178..18eca7809b08 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64), 23 [NLA_MSECS] = sizeof(u64),
24 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
25 [NLA_S8] = sizeof(s8),
26 [NLA_S16] = sizeof(s16),
27 [NLA_S32] = sizeof(s32),
28 [NLA_S64] = sizeof(s64),
25}; 29};
26 30
27static int validate_nla(const struct nlattr *nla, int maxtype, 31static int validate_nla(const struct nlattr *nla, int maxtype,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8ca533c95de0..b258da88f675 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
368 vlan_vid_del(dev, vid_info->vid); 368 vlan_vid_del(dev, vid_info->vid);
369} 369}
370EXPORT_SYMBOL(vlan_vids_del_by_dev); 370EXPORT_SYMBOL(vlan_vids_del_by_dev);
371
372bool vlan_uses_dev(const struct net_device *dev)
373{
374 return rtnl_dereference(dev->vlan_info) ? true : false;
375}
376EXPORT_SYMBOL(vlan_uses_dev);
diff --git a/net/Kconfig b/net/Kconfig
index 245831bec09a..30b48f523135 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -52,6 +52,8 @@ source "net/iucv/Kconfig"
52 52
53config INET 53config INET
54 bool "TCP/IP networking" 54 bool "TCP/IP networking"
55 select CRYPTO
56 select CRYPTO_AES
55 ---help--- 57 ---help---
56 These are the protocols used on the Internet and on most local 58 These are the protocols used on the Internet and on most local
57 Ethernets. It is highly recommended to say Y here (this will enlarge 59 Ethernets. It is highly recommended to say Y here (this will enlarge
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 23f45ce6f351..0447d5d0b639 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -432,7 +432,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
432 size = dev->ops->ioctl(dev, cmd, buf); 432 size = dev->ops->ioctl(dev, cmd, buf);
433 } 433 }
434 if (size < 0) { 434 if (size < 0) {
435 error = (size == -ENOIOCTLCMD ? -EINVAL : size); 435 error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
436 goto done; 436 goto done;
437 } 437 }
438 } 438 }
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 469daabd90c7..b02b75dae3a8 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
166 int16_t buff_pos; 166 int16_t buff_pos;
167 struct batadv_ogm_packet *batadv_ogm_packet; 167 struct batadv_ogm_packet *batadv_ogm_packet;
168 struct sk_buff *skb; 168 struct sk_buff *skb;
169 uint8_t *packet_pos;
169 170
170 if (hard_iface->if_status != BATADV_IF_ACTIVE) 171 if (hard_iface->if_status != BATADV_IF_ACTIVE)
171 return; 172 return;
172 173
173 packet_num = 0; 174 packet_num = 0;
174 buff_pos = 0; 175 buff_pos = 0;
175 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; 176 packet_pos = forw_packet->skb->data;
177 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
176 178
177 /* adjust all flags and log packets */ 179 /* adjust all flags and log packets */
178 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 180 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
181 /* we might have aggregated direct link packets with an 183 /* we might have aggregated direct link packets with an
182 * ordinary base packet 184 * ordinary base packet
183 */ 185 */
184 if ((forw_packet->direct_link_flags & (1 << packet_num)) && 186 if (forw_packet->direct_link_flags & BIT(packet_num) &&
185 (forw_packet->if_incoming == hard_iface)) 187 forw_packet->if_incoming == hard_iface)
186 batadv_ogm_packet->flags |= BATADV_DIRECTLINK; 188 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
187 else 189 else
188 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; 190 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
189 191
190 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? 192 if (packet_num > 0 || !forw_packet->own)
191 "Sending own" : 193 fwd_str = "Forwarding";
192 "Forwarding")); 194 else
195 fwd_str = "Sending own";
196
193 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 197 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
194 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", 198 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
195 fwd_str, (packet_num > 0 ? "aggregated " : ""), 199 fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
204 buff_pos += BATADV_OGM_HLEN; 208 buff_pos += BATADV_OGM_HLEN;
205 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); 209 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
206 packet_num++; 210 packet_num++;
207 batadv_ogm_packet = (struct batadv_ogm_packet *) 211 packet_pos = forw_packet->skb->data + buff_pos;
208 (forw_packet->skb->data + buff_pos); 212 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
209 } 213 }
210 214
211 /* create clone because function is called more than once */ 215 /* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
227 struct batadv_hard_iface *primary_if = NULL; 231 struct batadv_hard_iface *primary_if = NULL;
228 struct batadv_ogm_packet *batadv_ogm_packet; 232 struct batadv_ogm_packet *batadv_ogm_packet;
229 unsigned char directlink; 233 unsigned char directlink;
234 uint8_t *packet_pos;
230 235
231 batadv_ogm_packet = (struct batadv_ogm_packet *) 236 packet_pos = forw_packet->skb->data;
232 (forw_packet->skb->data); 237 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
233 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0); 238 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
234 239
235 if (!forw_packet->if_incoming) { 240 if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
454 int packet_len, bool direct_link) 459 int packet_len, bool direct_link)
455{ 460{
456 unsigned char *skb_buff; 461 unsigned char *skb_buff;
462 unsigned long new_direct_link_flag;
457 463
458 skb_buff = skb_put(forw_packet_aggr->skb, packet_len); 464 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
459 memcpy(skb_buff, packet_buff, packet_len); 465 memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
461 forw_packet_aggr->num_packets++; 467 forw_packet_aggr->num_packets++;
462 468
463 /* save packet direct link flag status */ 469 /* save packet direct link flag status */
464 if (direct_link) 470 if (direct_link) {
465 forw_packet_aggr->direct_link_flags |= 471 new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
466 (1 << forw_packet_aggr->num_packets); 472 forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
473 }
467} 474}
468 475
469static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, 476static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
586 struct batadv_ogm_packet *batadv_ogm_packet; 593 struct batadv_ogm_packet *batadv_ogm_packet;
587 struct batadv_hard_iface *primary_if; 594 struct batadv_hard_iface *primary_if;
588 int vis_server, tt_num_changes = 0; 595 int vis_server, tt_num_changes = 0;
596 uint32_t seqno;
597 uint8_t bandwidth;
589 598
590 vis_server = atomic_read(&bat_priv->vis_mode); 599 vis_server = atomic_read(&bat_priv->vis_mode);
591 primary_if = batadv_primary_if_get_selected(bat_priv); 600 primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
599 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 608 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
600 609
601 /* change sequence number to network order */ 610 /* change sequence number to network order */
602 batadv_ogm_packet->seqno = 611 seqno = (uint32_t)atomic_read(&hard_iface->seqno);
603 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 612 batadv_ogm_packet->seqno = htonl(seqno);
604 atomic_inc(&hard_iface->seqno); 613 atomic_inc(&hard_iface->seqno);
605 614
606 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); 615 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
607 batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc); 616 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
608 if (tt_num_changes >= 0) 617 if (tt_num_changes >= 0)
609 batadv_ogm_packet->tt_num_changes = tt_num_changes; 618 batadv_ogm_packet->tt_num_changes = tt_num_changes;
610 619
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
613 else 622 else
614 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER; 623 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
615 624
616 if ((hard_iface == primary_if) && 625 if (hard_iface == primary_if &&
617 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER)) 626 atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
618 batadv_ogm_packet->gw_flags = 627 bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
619 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 628 batadv_ogm_packet->gw_flags = bandwidth;
620 else 629 } else {
621 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; 630 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
631 }
622 632
623 batadv_slide_own_bcast_window(hard_iface); 633 batadv_slide_own_bcast_window(hard_iface);
624 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 634 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -645,6 +655,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
645 int if_num; 655 int if_num;
646 uint8_t sum_orig, sum_neigh; 656 uint8_t sum_orig, sum_neigh;
647 uint8_t *neigh_addr; 657 uint8_t *neigh_addr;
658 uint8_t tq_avg;
648 659
649 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 660 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
650 "update_originator(): Searching and updating originator entry of received packet\n"); 661 "update_originator(): Searching and updating originator entry of received packet\n");
@@ -668,8 +679,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
668 spin_lock_bh(&tmp_neigh_node->lq_update_lock); 679 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
669 batadv_ring_buffer_set(tmp_neigh_node->tq_recv, 680 batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
670 &tmp_neigh_node->tq_index, 0); 681 &tmp_neigh_node->tq_index, 0);
671 tmp_neigh_node->tq_avg = 682 tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
672 batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); 683 tmp_neigh_node->tq_avg = tq_avg;
673 spin_unlock_bh(&tmp_neigh_node->lq_update_lock); 684 spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
674 } 685 }
675 686
@@ -836,8 +847,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
836 spin_unlock_bh(&orig_node->ogm_cnt_lock); 847 spin_unlock_bh(&orig_node->ogm_cnt_lock);
837 848
838 /* pay attention to not get a value bigger than 100 % */ 849 /* pay attention to not get a value bigger than 100 % */
839 total_count = (orig_eq_count > neigh_rq_count ? 850 if (orig_eq_count > neigh_rq_count)
840 neigh_rq_count : orig_eq_count); 851 total_count = neigh_rq_count;
852 else
853 total_count = orig_eq_count;
841 854
842 /* if we have too few packets (too less data) we set tq_own to zero 855 /* if we have too few packets (too less data) we set tq_own to zero
843 * if we receive too few packets it is not considered bidirectional 856 * if we receive too few packets it is not considered bidirectional
@@ -911,6 +924,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
911 int set_mark, ret = -1; 924 int set_mark, ret = -1;
912 uint32_t seqno = ntohl(batadv_ogm_packet->seqno); 925 uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
913 uint8_t *neigh_addr; 926 uint8_t *neigh_addr;
927 uint8_t packet_count;
914 928
915 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); 929 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
916 if (!orig_node) 930 if (!orig_node)
@@ -945,9 +959,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
945 tmp_neigh_node->real_bits, 959 tmp_neigh_node->real_bits,
946 seq_diff, set_mark); 960 seq_diff, set_mark);
947 961
948 tmp_neigh_node->real_packet_count = 962 packet_count = bitmap_weight(tmp_neigh_node->real_bits,
949 bitmap_weight(tmp_neigh_node->real_bits, 963 BATADV_TQ_LOCAL_WINDOW_SIZE);
950 BATADV_TQ_LOCAL_WINDOW_SIZE); 964 tmp_neigh_node->real_packet_count = packet_count;
951 } 965 }
952 rcu_read_unlock(); 966 rcu_read_unlock();
953 967
@@ -1164,9 +1178,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1164 /* if sender is a direct neighbor the sender mac equals 1178 /* if sender is a direct neighbor the sender mac equals
1165 * originator mac 1179 * originator mac
1166 */ 1180 */
1167 orig_neigh_node = (is_single_hop_neigh ? 1181 if (is_single_hop_neigh)
1168 orig_node : 1182 orig_neigh_node = orig_node;
1169 batadv_get_orig_node(bat_priv, ethhdr->h_source)); 1183 else
1184 orig_neigh_node = batadv_get_orig_node(bat_priv,
1185 ethhdr->h_source);
1186
1170 if (!orig_neigh_node) 1187 if (!orig_neigh_node)
1171 goto out; 1188 goto out;
1172 1189
@@ -1252,6 +1269,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1252 int buff_pos = 0, packet_len; 1269 int buff_pos = 0, packet_len;
1253 unsigned char *tt_buff, *packet_buff; 1270 unsigned char *tt_buff, *packet_buff;
1254 bool ret; 1271 bool ret;
1272 uint8_t *packet_pos;
1255 1273
1256 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); 1274 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
1257 if (!ret) 1275 if (!ret)
@@ -1282,8 +1300,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1282 buff_pos += BATADV_OGM_HLEN; 1300 buff_pos += BATADV_OGM_HLEN;
1283 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); 1301 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
1284 1302
1285 batadv_ogm_packet = (struct batadv_ogm_packet *) 1303 packet_pos = packet_buff + buff_pos;
1286 (packet_buff + buff_pos); 1304 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
1287 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, 1305 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1288 batadv_ogm_packet->tt_num_changes)); 1306 batadv_ogm_packet->tt_num_changes));
1289 1307
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6705d35b17ce..0a9084ad19a6 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, 133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
134 struct batadv_claim *data) 134 struct batadv_claim *data)
135{ 135{
136 struct batadv_hashtable *hash = bat_priv->claim_hash; 136 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
137 struct hlist_head *head; 137 struct hlist_head *head;
138 struct hlist_node *node; 138 struct hlist_node *node;
139 struct batadv_claim *claim; 139 struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
174batadv_backbone_hash_find(struct batadv_priv *bat_priv, 174batadv_backbone_hash_find(struct batadv_priv *bat_priv,
175 uint8_t *addr, short vid) 175 uint8_t *addr, short vid)
176{ 176{
177 struct batadv_hashtable *hash = bat_priv->backbone_hash; 177 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
178 struct hlist_head *head; 178 struct hlist_head *head;
179 struct hlist_node *node; 179 struct hlist_node *node;
180 struct batadv_backbone_gw search_entry, *backbone_gw; 180 struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
218 int i; 218 int i;
219 spinlock_t *list_lock; /* protects write access to the hash lists */ 219 spinlock_t *list_lock; /* protects write access to the hash lists */
220 220
221 hash = backbone_gw->bat_priv->claim_hash; 221 hash = backbone_gw->bat_priv->bla.claim_hash;
222 if (!hash) 222 if (!hash)
223 return; 223 return;
224 224
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
265 if (!primary_if) 265 if (!primary_if)
266 return; 266 return;
267 267
268 memcpy(&local_claim_dest, &bat_priv->claim_dest, 268 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
269 sizeof(local_claim_dest)); 269 sizeof(local_claim_dest));
270 local_claim_dest.type = claimtype; 270 local_claim_dest.type = claimtype;
271 271
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
281 NULL, 281 NULL,
282 /* Ethernet SRC/HW SRC: originator mac */ 282 /* Ethernet SRC/HW SRC: originator mac */
283 primary_if->net_dev->dev_addr, 283 primary_if->net_dev->dev_addr,
284 /* HW DST: FF:43:05:XX:00:00 284 /* HW DST: FF:43:05:XX:YY:YY
285 * with XX = claim type 285 * with XX = claim type
286 * and YY:YY = group id 286 * and YY:YY = group id
287 */ 287 */
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
295 295
296 /* now we pretend that the client would have sent this ... */ 296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) { 297 switch (claimtype) {
298 case BATADV_CLAIM_TYPE_ADD: 298 case BATADV_CLAIM_TYPE_CLAIM:
299 /* normal claim frame 299 /* normal claim frame
300 * set Ethernet SRC to the clients mac 300 * set Ethernet SRC to the clients mac
301 */ 301 */
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
303 batadv_dbg(BATADV_DBG_BLA, bat_priv, 303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); 304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break; 305 break;
306 case BATADV_CLAIM_TYPE_DEL: 306 case BATADV_CLAIM_TYPE_UNCLAIM:
307 /* unclaim frame 307 /* unclaim frame
308 * set HW SRC to the clients mac 308 * set HW SRC to the clients mac
309 */ 309 */
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
323 break; 323 break;
324 case BATADV_CLAIM_TYPE_REQUEST: 324 case BATADV_CLAIM_TYPE_REQUEST:
325 /* request frame 325 /* request frame
326 * set HW SRC to the special mac containg the crc 326 * set HW SRC and header destination to the receiving backbone
327 * gws mac
327 */ 328 */
328 memcpy(hw_src, mac, ETH_ALEN); 329 memcpy(hw_src, mac, ETH_ALEN);
329 memcpy(ethhdr->h_dest, mac, ETH_ALEN); 330 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
339 340
340 skb_reset_mac_header(skb); 341 skb_reset_mac_header(skb);
341 skb->protocol = eth_type_trans(skb, soft_iface); 342 skb->protocol = eth_type_trans(skb, soft_iface);
342 bat_priv->stats.rx_packets++; 343 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
343 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 344 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
345 skb->len + ETH_HLEN);
344 soft_iface->last_rx = jiffies; 346 soft_iface->last_rx = jiffies;
345 347
346 netif_rx(skb); 348 netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
389 /* one for the hash, one for returning */ 391 /* one for the hash, one for returning */
390 atomic_set(&entry->refcount, 2); 392 atomic_set(&entry->refcount, 2);
391 393
392 hash_added = batadv_hash_add(bat_priv->backbone_hash, 394 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
393 batadv_compare_backbone_gw, 395 batadv_compare_backbone_gw,
394 batadv_choose_backbone_gw, entry, 396 batadv_choose_backbone_gw, entry,
395 &entry->hash_entry); 397 &entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
456 if (!backbone_gw) 458 if (!backbone_gw)
457 return; 459 return;
458 460
459 hash = bat_priv->claim_hash; 461 hash = bat_priv->bla.claim_hash;
460 for (i = 0; i < hash->size; i++) { 462 for (i = 0; i < hash->size; i++) {
461 head = &hash->table[i]; 463 head = &hash->table[i];
462 464
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
467 continue; 469 continue;
468 470
469 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, 471 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
470 BATADV_CLAIM_TYPE_ADD); 472 BATADV_CLAIM_TYPE_CLAIM);
471 } 473 }
472 rcu_read_unlock(); 474 rcu_read_unlock();
473 } 475 }
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
497 499
498 /* no local broadcasts should be sent or received, for now. */ 500 /* no local broadcasts should be sent or received, for now. */
499 if (!atomic_read(&backbone_gw->request_sent)) { 501 if (!atomic_read(&backbone_gw->request_sent)) {
500 atomic_inc(&backbone_gw->bat_priv->bla_num_requests); 502 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
501 atomic_set(&backbone_gw->request_sent, 1); 503 atomic_set(&backbone_gw->request_sent, 1);
502 } 504 }
503} 505}
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
557 batadv_dbg(BATADV_DBG_BLA, bat_priv, 559 batadv_dbg(BATADV_DBG_BLA, bat_priv,
558 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 560 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
559 mac, vid); 561 mac, vid);
560 hash_added = batadv_hash_add(bat_priv->claim_hash, 562 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
561 batadv_compare_claim, 563 batadv_compare_claim,
562 batadv_choose_claim, claim, 564 batadv_choose_claim, claim,
563 &claim->hash_entry); 565 &claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
577 "bla_add_claim(): changing ownership for %pM, vid %d\n", 579 "bla_add_claim(): changing ownership for %pM, vid %d\n",
578 mac, vid); 580 mac, vid);
579 581
580 claim->backbone_gw->crc ^= 582 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
581 crc16(0, claim->addr, ETH_ALEN);
582 batadv_backbone_gw_free_ref(claim->backbone_gw); 583 batadv_backbone_gw_free_ref(claim->backbone_gw);
583 584
584 } 585 }
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
610 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", 611 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
611 mac, vid); 612 mac, vid);
612 613
613 batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim, 614 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
614 batadv_choose_claim, claim); 615 batadv_choose_claim, claim);
615 batadv_claim_free_ref(claim); /* reference from the hash is gone */ 616 batadv_claim_free_ref(claim); /* reference from the hash is gone */
616 617
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
657 * we can allow traffic again. 658 * we can allow traffic again.
658 */ 659 */
659 if (atomic_read(&backbone_gw->request_sent)) { 660 if (atomic_read(&backbone_gw->request_sent)) {
660 atomic_dec(&backbone_gw->bat_priv->bla_num_requests); 661 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
661 atomic_set(&backbone_gw->request_sent, 0); 662 atomic_set(&backbone_gw->request_sent, 0);
662 } 663 }
663 } 664 }
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
702 if (primary_if && batadv_compare_eth(backbone_addr, 703 if (primary_if && batadv_compare_eth(backbone_addr,
703 primary_if->net_dev->dev_addr)) 704 primary_if->net_dev->dev_addr))
704 batadv_bla_send_claim(bat_priv, claim_addr, vid, 705 batadv_bla_send_claim(bat_priv, claim_addr, vid,
705 BATADV_CLAIM_TYPE_DEL); 706 BATADV_CLAIM_TYPE_UNCLAIM);
706 707
707 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); 708 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
708 709
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
738 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 739 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
739 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 740 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
740 batadv_bla_send_claim(bat_priv, claim_addr, vid, 741 batadv_bla_send_claim(bat_priv, claim_addr, vid,
741 BATADV_CLAIM_TYPE_ADD); 742 BATADV_CLAIM_TYPE_CLAIM);
742 743
743 /* TODO: we could call something like tt_local_del() here. */ 744 /* TODO: we could call something like tt_local_del() here. */
744 745
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
772 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 773 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
773 774
774 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 775 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
775 bla_dst_own = &bat_priv->claim_dest; 776 bla_dst_own = &bat_priv->bla.claim_dest;
776 777
777 /* check if it is a claim packet in general */ 778 /* check if it is a claim packet in general */
778 if (memcmp(bla_dst->magic, bla_dst_own->magic, 779 if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
783 * otherwise assume it is in the hw_src 784 * otherwise assume it is in the hw_src
784 */ 785 */
785 switch (bla_dst->type) { 786 switch (bla_dst->type) {
786 case BATADV_CLAIM_TYPE_ADD: 787 case BATADV_CLAIM_TYPE_CLAIM:
787 backbone_addr = hw_src; 788 backbone_addr = hw_src;
788 break; 789 break;
789 case BATADV_CLAIM_TYPE_REQUEST: 790 case BATADV_CLAIM_TYPE_REQUEST:
790 case BATADV_CLAIM_TYPE_ANNOUNCE: 791 case BATADV_CLAIM_TYPE_ANNOUNCE:
791 case BATADV_CLAIM_TYPE_DEL: 792 case BATADV_CLAIM_TYPE_UNCLAIM:
792 backbone_addr = ethhdr->h_source; 793 backbone_addr = ethhdr->h_source;
793 break; 794 break;
794 default: 795 default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
904 905
905 /* check for the different types of claim frames ... */ 906 /* check for the different types of claim frames ... */
906 switch (bla_dst->type) { 907 switch (bla_dst->type) {
907 case BATADV_CLAIM_TYPE_ADD: 908 case BATADV_CLAIM_TYPE_CLAIM:
908 if (batadv_handle_claim(bat_priv, primary_if, hw_src, 909 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
909 ethhdr->h_source, vid)) 910 ethhdr->h_source, vid))
910 return 1; 911 return 1;
911 break; 912 break;
912 case BATADV_CLAIM_TYPE_DEL: 913 case BATADV_CLAIM_TYPE_UNCLAIM:
913 if (batadv_handle_unclaim(bat_priv, primary_if, 914 if (batadv_handle_unclaim(bat_priv, primary_if,
914 ethhdr->h_source, hw_src, vid)) 915 ethhdr->h_source, hw_src, vid))
915 return 1; 916 return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
945 spinlock_t *list_lock; /* protects write access to the hash lists */ 946 spinlock_t *list_lock; /* protects write access to the hash lists */
946 int i; 947 int i;
947 948
948 hash = bat_priv->backbone_hash; 949 hash = bat_priv->bla.backbone_hash;
949 if (!hash) 950 if (!hash)
950 return; 951 return;
951 952
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
969purge_now: 970purge_now:
970 /* don't wait for the pending request anymore */ 971 /* don't wait for the pending request anymore */
971 if (atomic_read(&backbone_gw->request_sent)) 972 if (atomic_read(&backbone_gw->request_sent))
972 atomic_dec(&bat_priv->bla_num_requests); 973 atomic_dec(&bat_priv->bla.num_requests);
973 974
974 batadv_bla_del_backbone_claims(backbone_gw); 975 batadv_bla_del_backbone_claims(backbone_gw);
975 976
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
999 struct batadv_hashtable *hash; 1000 struct batadv_hashtable *hash;
1000 int i; 1001 int i;
1001 1002
1002 hash = bat_priv->claim_hash; 1003 hash = bat_priv->bla.claim_hash;
1003 if (!hash) 1004 if (!hash)
1004 return; 1005 return;
1005 1006
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1046 struct hlist_node *node; 1047 struct hlist_node *node;
1047 struct hlist_head *head; 1048 struct hlist_head *head;
1048 struct batadv_hashtable *hash; 1049 struct batadv_hashtable *hash;
1050 __be16 group;
1049 int i; 1051 int i;
1050 1052
1051 /* reset bridge loop avoidance group id */ 1053 /* reset bridge loop avoidance group id */
1052 bat_priv->claim_dest.group = 1054 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1053 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1055 bat_priv->bla.claim_dest.group = group;
1054 1056
1055 if (!oldif) { 1057 if (!oldif) {
1056 batadv_bla_purge_claims(bat_priv, NULL, 1); 1058 batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1058 return; 1060 return;
1059 } 1061 }
1060 1062
1061 hash = bat_priv->backbone_hash; 1063 hash = bat_priv->bla.backbone_hash;
1062 if (!hash) 1064 if (!hash)
1063 return; 1065 return;
1064 1066
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1088/* (re)start the timer */ 1090/* (re)start the timer */
1089static void batadv_bla_start_timer(struct batadv_priv *bat_priv) 1091static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1090{ 1092{
1091 INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work); 1093 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1092 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work, 1094 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1093 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1095 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1094} 1096}
1095 1097
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1099 */ 1101 */
1100static void batadv_bla_periodic_work(struct work_struct *work) 1102static void batadv_bla_periodic_work(struct work_struct *work)
1101{ 1103{
1102 struct delayed_work *delayed_work = 1104 struct delayed_work *delayed_work;
1103 container_of(work, struct delayed_work, work);
1104 struct batadv_priv *bat_priv; 1105 struct batadv_priv *bat_priv;
1106 struct batadv_priv_bla *priv_bla;
1105 struct hlist_node *node; 1107 struct hlist_node *node;
1106 struct hlist_head *head; 1108 struct hlist_head *head;
1107 struct batadv_backbone_gw *backbone_gw; 1109 struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1109 struct batadv_hard_iface *primary_if; 1111 struct batadv_hard_iface *primary_if;
1110 int i; 1112 int i;
1111 1113
1112 bat_priv = container_of(delayed_work, struct batadv_priv, bla_work); 1114 delayed_work = container_of(work, struct delayed_work, work);
1115 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1116 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1113 primary_if = batadv_primary_if_get_selected(bat_priv); 1117 primary_if = batadv_primary_if_get_selected(bat_priv);
1114 if (!primary_if) 1118 if (!primary_if)
1115 goto out; 1119 goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1120 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1124 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1121 goto out; 1125 goto out;
1122 1126
1123 hash = bat_priv->backbone_hash; 1127 hash = bat_priv->bla.backbone_hash;
1124 if (!hash) 1128 if (!hash)
1125 goto out; 1129 goto out;
1126 1130
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1160 int i; 1164 int i;
1161 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1165 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1162 struct batadv_hard_iface *primary_if; 1166 struct batadv_hard_iface *primary_if;
1167 uint16_t crc;
1168 unsigned long entrytime;
1163 1169
1164 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1170 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1165 1171
1166 /* setting claim destination address */ 1172 /* setting claim destination address */
1167 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); 1173 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1168 bat_priv->claim_dest.type = 0; 1174 bat_priv->bla.claim_dest.type = 0;
1169 primary_if = batadv_primary_if_get_selected(bat_priv); 1175 primary_if = batadv_primary_if_get_selected(bat_priv);
1170 if (primary_if) { 1176 if (primary_if) {
1171 bat_priv->claim_dest.group = 1177 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1172 htons(crc16(0, primary_if->net_dev->dev_addr, 1178 bat_priv->bla.claim_dest.group = htons(crc);
1173 ETH_ALEN));
1174 batadv_hardif_free_ref(primary_if); 1179 batadv_hardif_free_ref(primary_if);
1175 } else { 1180 } else {
1176 bat_priv->claim_dest.group = 0; /* will be set later */ 1181 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1177 } 1182 }
1178 1183
1179 /* initialize the duplicate list */ 1184 /* initialize the duplicate list */
1185 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1180 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) 1186 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1181 bat_priv->bcast_duplist[i].entrytime = 1187 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1182 jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); 1188 bat_priv->bla.bcast_duplist_curr = 0;
1183 bat_priv->bcast_duplist_curr = 0;
1184 1189
1185 if (bat_priv->claim_hash) 1190 if (bat_priv->bla.claim_hash)
1186 return 0; 1191 return 0;
1187 1192
1188 bat_priv->claim_hash = batadv_hash_new(128); 1193 bat_priv->bla.claim_hash = batadv_hash_new(128);
1189 bat_priv->backbone_hash = batadv_hash_new(32); 1194 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1190 1195
1191 if (!bat_priv->claim_hash || !bat_priv->backbone_hash) 1196 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1192 return -ENOMEM; 1197 return -ENOMEM;
1193 1198
1194 batadv_hash_set_lock_class(bat_priv->claim_hash, 1199 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1195 &batadv_claim_hash_lock_class_key); 1200 &batadv_claim_hash_lock_class_key);
1196 batadv_hash_set_lock_class(bat_priv->backbone_hash, 1201 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1197 &batadv_backbone_hash_lock_class_key); 1202 &batadv_backbone_hash_lock_class_key);
1198 1203
1199 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1204 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1234 crc = crc16(0, content, length); 1239 crc = crc16(0, content, length);
1235 1240
1236 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1241 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1237 curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE; 1242 curr = (bat_priv->bla.bcast_duplist_curr + i);
1238 entry = &bat_priv->bcast_duplist[curr]; 1243 curr %= BATADV_DUPLIST_SIZE;
1244 entry = &bat_priv->bla.bcast_duplist[curr];
1239 1245
1240 /* we can stop searching if the entry is too old ; 1246 /* we can stop searching if the entry is too old ;
1241 * later entries will be even older 1247 * later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1256 return 1; 1262 return 1;
1257 } 1263 }
1258 /* not found, add a new entry (overwrite the oldest entry) */ 1264 /* not found, add a new entry (overwrite the oldest entry) */
1259 curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1265 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1260 curr %= BATADV_DUPLIST_SIZE; 1266 curr %= BATADV_DUPLIST_SIZE;
1261 entry = &bat_priv->bcast_duplist[curr]; 1267 entry = &bat_priv->bla.bcast_duplist[curr];
1262 entry->crc = crc; 1268 entry->crc = crc;
1263 entry->entrytime = jiffies; 1269 entry->entrytime = jiffies;
1264 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); 1270 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1265 bat_priv->bcast_duplist_curr = curr; 1271 bat_priv->bla.bcast_duplist_curr = curr;
1266 1272
1267 /* allow it, its the first occurence. */ 1273 /* allow it, its the first occurence. */
1268 return 0; 1274 return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1279 */ 1285 */
1280int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) 1286int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1281{ 1287{
1282 struct batadv_hashtable *hash = bat_priv->backbone_hash; 1288 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1283 struct hlist_head *head; 1289 struct hlist_head *head;
1284 struct hlist_node *node; 1290 struct hlist_node *node;
1285 struct batadv_backbone_gw *backbone_gw; 1291 struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1339 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) 1345 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1340 return 0; 1346 return 0;
1341 1347
1342 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + 1348 vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
1343 hdr_size);
1344 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 1349 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1345 } 1350 }
1346 1351
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
1359{ 1364{
1360 struct batadv_hard_iface *primary_if; 1365 struct batadv_hard_iface *primary_if;
1361 1366
1362 cancel_delayed_work_sync(&bat_priv->bla_work); 1367 cancel_delayed_work_sync(&bat_priv->bla.work);
1363 primary_if = batadv_primary_if_get_selected(bat_priv); 1368 primary_if = batadv_primary_if_get_selected(bat_priv);
1364 1369
1365 if (bat_priv->claim_hash) { 1370 if (bat_priv->bla.claim_hash) {
1366 batadv_bla_purge_claims(bat_priv, primary_if, 1); 1371 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1367 batadv_hash_destroy(bat_priv->claim_hash); 1372 batadv_hash_destroy(bat_priv->bla.claim_hash);
1368 bat_priv->claim_hash = NULL; 1373 bat_priv->bla.claim_hash = NULL;
1369 } 1374 }
1370 if (bat_priv->backbone_hash) { 1375 if (bat_priv->bla.backbone_hash) {
1371 batadv_bla_purge_backbone_gw(bat_priv, 1); 1376 batadv_bla_purge_backbone_gw(bat_priv, 1);
1372 batadv_hash_destroy(bat_priv->backbone_hash); 1377 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1373 bat_priv->backbone_hash = NULL; 1378 bat_priv->bla.backbone_hash = NULL;
1374 } 1379 }
1375 if (primary_if) 1380 if (primary_if)
1376 batadv_hardif_free_ref(primary_if); 1381 batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1409 goto allow; 1414 goto allow;
1410 1415
1411 1416
1412 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1417 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1413 /* don't allow broadcasts while requests are in flight */ 1418 /* don't allow broadcasts while requests are in flight */
1414 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1419 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1415 goto handled; 1420 goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1508 1513
1509 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1514 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1510 1515
1511 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1516 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1512 /* don't allow broadcasts while requests are in flight */ 1517 /* don't allow broadcasts while requests are in flight */
1513 if (is_multicast_ether_addr(ethhdr->h_dest)) 1518 if (is_multicast_ether_addr(ethhdr->h_dest))
1514 goto handled; 1519 goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1564{ 1569{
1565 struct net_device *net_dev = (struct net_device *)seq->private; 1570 struct net_device *net_dev = (struct net_device *)seq->private;
1566 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1571 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1567 struct batadv_hashtable *hash = bat_priv->claim_hash; 1572 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1568 struct batadv_claim *claim; 1573 struct batadv_claim *claim;
1569 struct batadv_hard_iface *primary_if; 1574 struct batadv_hard_iface *primary_if;
1570 struct hlist_node *node; 1575 struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1593 seq_printf(seq, 1598 seq_printf(seq,
1594 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1599 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1595 net_dev->name, primary_addr, 1600 net_dev->name, primary_addr,
1596 ntohs(bat_priv->claim_dest.group)); 1601 ntohs(bat_priv->bla.claim_dest.group));
1597 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1602 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1598 "Client", "VID", "Originator", "CRC"); 1603 "Client", "VID", "Originator", "CRC");
1599 for (i = 0; i < hash->size; i++) { 1604 for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
1616 batadv_hardif_free_ref(primary_if); 1621 batadv_hardif_free_ref(primary_if);
1617 return ret; 1622 return ret;
1618} 1623}
1624
1625int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1626{
1627 struct net_device *net_dev = (struct net_device *)seq->private;
1628 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1629 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1630 struct batadv_backbone_gw *backbone_gw;
1631 struct batadv_hard_iface *primary_if;
1632 struct hlist_node *node;
1633 struct hlist_head *head;
1634 int secs, msecs;
1635 uint32_t i;
1636 bool is_own;
1637 int ret = 0;
1638 uint8_t *primary_addr;
1639
1640 primary_if = batadv_primary_if_get_selected(bat_priv);
1641 if (!primary_if) {
1642 ret = seq_printf(seq,
1643 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1644 net_dev->name);
1645 goto out;
1646 }
1647
1648 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1649 ret = seq_printf(seq,
1650 "BATMAN mesh %s disabled - primary interface not active\n",
1651 net_dev->name);
1652 goto out;
1653 }
1654
1655 primary_addr = primary_if->net_dev->dev_addr;
1656 seq_printf(seq,
1657 "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
1658 net_dev->name, primary_addr,
1659 ntohs(bat_priv->bla.claim_dest.group));
1660 seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n",
1661 "Originator", "VID", "last seen", "CRC");
1662 for (i = 0; i < hash->size; i++) {
1663 head = &hash->table[i];
1664
1665 rcu_read_lock();
1666 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1667 msecs = jiffies_to_msecs(jiffies -
1668 backbone_gw->lasttime);
1669 secs = msecs / 1000;
1670 msecs = msecs % 1000;
1671
1672 is_own = batadv_compare_eth(backbone_gw->orig,
1673 primary_addr);
1674 if (is_own)
1675 continue;
1676
1677 seq_printf(seq,
1678 " * %pM on % 5d % 4i.%03is (%04x)\n",
1679 backbone_gw->orig, backbone_gw->vid,
1680 secs, msecs, backbone_gw->crc);
1681 }
1682 rcu_read_unlock();
1683 }
1684out:
1685 if (primary_if)
1686 batadv_hardif_free_ref(primary_if);
1687 return ret;
1688}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 563cfbf94a7f..789cb73bde67 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
27int batadv_bla_is_backbone_gw(struct sk_buff *skb, 27int batadv_bla_is_backbone_gw(struct sk_buff *skb,
28 struct batadv_orig_node *orig_node, int hdr_size); 28 struct batadv_orig_node *orig_node, int hdr_size);
29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); 29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
30int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
31 void *offset);
30int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig); 32int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
31int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 33int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
32 struct batadv_bcast_packet *bcast_packet, 34 struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 43#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42 44
43static inline int batadv_bla_rx(struct batadv_priv *bat_priv, 45static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
44 struct sk_buff *skb, short vid, 46 struct sk_buff *skb, short vid, bool is_bcast)
45 bool is_bcast)
46{ 47{
47 return 0; 48 return 0;
48} 49}
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
66 return 0; 67 return 0;
67} 68}
68 69
70static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
71 void *offset)
72{
73 return 0;
74}
75
69static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, 76static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
70 uint8_t *orig) 77 uint8_t *orig)
71{ 78{
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 34fbb1667bcd..391d4fb2026f 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
267 return single_open(file, batadv_bla_claim_table_seq_print_text, 267 return single_open(file, batadv_bla_claim_table_seq_print_text,
268 net_dev); 268 net_dev);
269} 269}
270
271static int batadv_bla_backbone_table_open(struct inode *inode,
272 struct file *file)
273{
274 struct net_device *net_dev = (struct net_device *)inode->i_private;
275 return single_open(file, batadv_bla_backbone_table_seq_print_text,
276 net_dev);
277}
278
270#endif 279#endif
271 280
272static int batadv_transtable_local_open(struct inode *inode, struct file *file) 281static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
305 batadv_transtable_global_open); 314 batadv_transtable_global_open);
306#ifdef CONFIG_BATMAN_ADV_BLA 315#ifdef CONFIG_BATMAN_ADV_BLA
307static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open); 316static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
317static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
318 batadv_bla_backbone_table_open);
308#endif 319#endif
309static BATADV_DEBUGINFO(transtable_local, S_IRUGO, 320static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
310 batadv_transtable_local_open); 321 batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
316 &batadv_debuginfo_transtable_global, 327 &batadv_debuginfo_transtable_global,
317#ifdef CONFIG_BATMAN_ADV_BLA 328#ifdef CONFIG_BATMAN_ADV_BLA
318 &batadv_debuginfo_bla_claim_table, 329 &batadv_debuginfo_bla_claim_table,
330 &batadv_debuginfo_bla_backbone_table,
319#endif 331#endif
320 &batadv_debuginfo_transtable_local, 332 &batadv_debuginfo_transtable_local,
321 &batadv_debuginfo_vis_data, 333 &batadv_debuginfo_vis_data,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index fc866f2e4528..15d67abc10a4 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
48 struct batadv_gw_node *gw_node; 48 struct batadv_gw_node *gw_node;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->gw.curr_gw);
52 if (!gw_node) 52 if (!gw_node)
53 goto out; 53 goto out;
54 54
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
91{ 91{
92 struct batadv_gw_node *curr_gw_node; 92 struct batadv_gw_node *curr_gw_node;
93 93
94 spin_lock_bh(&bat_priv->gw_list_lock); 94 spin_lock_bh(&bat_priv->gw.list_lock);
95 95
96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
97 new_gw_node = NULL; 97 new_gw_node = NULL;
98 98
99 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); 99 curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
100 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 100 rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
101 101
102 if (curr_gw_node) 102 if (curr_gw_node)
103 batadv_gw_node_free_ref(curr_gw_node); 103 batadv_gw_node_free_ref(curr_gw_node);
104 104
105 spin_unlock_bh(&bat_priv->gw_list_lock); 105 spin_unlock_bh(&bat_priv->gw.list_lock);
106} 106}
107 107
108void batadv_gw_deselect(struct batadv_priv *bat_priv) 108void batadv_gw_deselect(struct batadv_priv *bat_priv)
109{ 109{
110 atomic_set(&bat_priv->gw_reselect, 1); 110 atomic_set(&bat_priv->gw.reselect, 1);
111} 111}
112 112
113static struct batadv_gw_node * 113static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
117 struct hlist_node *node; 117 struct hlist_node *node;
118 struct batadv_gw_node *gw_node, *curr_gw = NULL; 118 struct batadv_gw_node *gw_node, *curr_gw = NULL;
119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
120 uint32_t gw_divisor;
120 uint8_t max_tq = 0; 121 uint8_t max_tq = 0;
121 int down, up; 122 int down, up;
123 uint8_t tq_avg;
122 struct batadv_orig_node *orig_node; 124 struct batadv_orig_node *orig_node;
123 125
126 gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
127 gw_divisor *= 64;
128
124 rcu_read_lock(); 129 rcu_read_lock();
125 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
126 if (gw_node->deleted) 131 if (gw_node->deleted)
127 continue; 132 continue;
128 133
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
134 if (!atomic_inc_not_zero(&gw_node->refcount)) 139 if (!atomic_inc_not_zero(&gw_node->refcount))
135 goto next; 140 goto next;
136 141
142 tq_avg = router->tq_avg;
143
137 switch (atomic_read(&bat_priv->gw_sel_class)) { 144 switch (atomic_read(&bat_priv->gw_sel_class)) {
138 case 1: /* fast connection */ 145 case 1: /* fast connection */
139 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, 146 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
140 &down, &up); 147 &down, &up);
141 148
142 tmp_gw_factor = (router->tq_avg * router->tq_avg * 149 tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
143 down * 100 * 100) / 150 tmp_gw_factor /= gw_divisor;
144 (BATADV_TQ_LOCAL_WINDOW_SIZE *
145 BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
146 151
147 if ((tmp_gw_factor > max_gw_factor) || 152 if ((tmp_gw_factor > max_gw_factor) ||
148 ((tmp_gw_factor == max_gw_factor) && 153 ((tmp_gw_factor == max_gw_factor) &&
149 (router->tq_avg > max_tq))) { 154 (tq_avg > max_tq))) {
150 if (curr_gw) 155 if (curr_gw)
151 batadv_gw_node_free_ref(curr_gw); 156 batadv_gw_node_free_ref(curr_gw);
152 curr_gw = gw_node; 157 curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
161 * soon as a better gateway appears which has 166 * soon as a better gateway appears which has
162 * $routing_class more tq points) 167 * $routing_class more tq points)
163 */ 168 */
164 if (router->tq_avg > max_tq) { 169 if (tq_avg > max_tq) {
165 if (curr_gw) 170 if (curr_gw)
166 batadv_gw_node_free_ref(curr_gw); 171 batadv_gw_node_free_ref(curr_gw);
167 curr_gw = gw_node; 172 curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
170 break; 175 break;
171 } 176 }
172 177
173 if (router->tq_avg > max_tq) 178 if (tq_avg > max_tq)
174 max_tq = router->tq_avg; 179 max_tq = tq_avg;
175 180
176 if (tmp_gw_factor > max_gw_factor) 181 if (tmp_gw_factor > max_gw_factor)
177 max_gw_factor = tmp_gw_factor; 182 max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
202 207
203 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 208 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
204 209
205 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) 210 if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
206 goto out; 211 goto out;
207 212
208 next_gw = batadv_gw_get_best_gw_node(bat_priv); 213 next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
321 gw_node->orig_node = orig_node; 326 gw_node->orig_node = orig_node;
322 atomic_set(&gw_node->refcount, 1); 327 atomic_set(&gw_node->refcount, 1);
323 328
324 spin_lock_bh(&bat_priv->gw_list_lock); 329 spin_lock_bh(&bat_priv->gw.list_lock);
325 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); 330 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
326 spin_unlock_bh(&bat_priv->gw_list_lock); 331 spin_unlock_bh(&bat_priv->gw.list_lock);
327 332
328 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); 333 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
329 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 334 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
350 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
351 356
352 rcu_read_lock(); 357 rcu_read_lock();
353 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
354 if (gw_node->orig_node != orig_node) 359 if (gw_node->orig_node != orig_node)
355 continue; 360 continue;
356 361
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
404 409
405 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 410 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
406 411
407 spin_lock_bh(&bat_priv->gw_list_lock); 412 spin_lock_bh(&bat_priv->gw.list_lock);
408 413
409 hlist_for_each_entry_safe(gw_node, node, node_tmp, 414 hlist_for_each_entry_safe(gw_node, node, node_tmp,
410 &bat_priv->gw_list, list) { 415 &bat_priv->gw.list, list) {
411 if (((!gw_node->deleted) || 416 if (((!gw_node->deleted) ||
412 (time_before(jiffies, gw_node->deleted + timeout))) && 417 (time_before(jiffies, gw_node->deleted + timeout))) &&
413 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) 418 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
420 batadv_gw_node_free_ref(gw_node); 425 batadv_gw_node_free_ref(gw_node);
421 } 426 }
422 427
423 spin_unlock_bh(&bat_priv->gw_list_lock); 428 spin_unlock_bh(&bat_priv->gw.list_lock);
424 429
425 /* gw_deselect() needs to acquire the gw_list_lock */ 430 /* gw_deselect() needs to acquire the gw_list_lock */
426 if (do_deselect) 431 if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
496 primary_if->net_dev->dev_addr, net_dev->name); 501 primary_if->net_dev->dev_addr, net_dev->name);
497 502
498 rcu_read_lock(); 503 rcu_read_lock();
499 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 504 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
500 if (gw_node->deleted) 505 if (gw_node->deleted)
501 continue; 506 continue;
502 507
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 282bf6e9353e..d112fd6750b0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
103{ 103{
104 struct batadv_vis_packet *vis_packet; 104 struct batadv_vis_packet *vis_packet;
105 struct batadv_hard_iface *primary_if; 105 struct batadv_hard_iface *primary_if;
106 struct sk_buff *skb;
106 107
107 primary_if = batadv_primary_if_get_selected(bat_priv); 108 primary_if = batadv_primary_if_get_selected(bat_priv);
108 if (!primary_if) 109 if (!primary_if)
109 goto out; 110 goto out;
110 111
111 vis_packet = (struct batadv_vis_packet *) 112 skb = bat_priv->vis.my_info->skb_packet;
112 bat_priv->my_vis_info->skb_packet->data; 113 vis_packet = (struct batadv_vis_packet *)skb->data;
113 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 114 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
114 memcpy(vis_packet->sender_orig, 115 memcpy(vis_packet->sender_orig,
115 primary_if->net_dev->dev_addr, ETH_ALEN); 116 primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
313 hard_iface->if_num = bat_priv->num_ifaces; 314 hard_iface->if_num = bat_priv->num_ifaces;
314 bat_priv->num_ifaces++; 315 bat_priv->num_ifaces++;
315 hard_iface->if_status = BATADV_IF_INACTIVE; 316 hard_iface->if_status = BATADV_IF_INACTIVE;
316 batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces); 317 ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
318 if (ret < 0) {
319 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
320 bat_priv->num_ifaces--;
321 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
322 goto err_dev;
323 }
317 324
318 hard_iface->batman_adv_ptype.type = ethertype; 325 hard_iface->batman_adv_ptype.type = ethertype;
319 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv; 326 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 13c88b25ab31..b4aa470bc4a6 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
58 58
59 batadv_iv_init(); 59 batadv_iv_init();
60 60
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/
63 */
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 61 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
65 62
66 if (!batadv_event_workqueue) 63 if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
97 94
98 spin_lock_init(&bat_priv->forw_bat_list_lock); 95 spin_lock_init(&bat_priv->forw_bat_list_lock);
99 spin_lock_init(&bat_priv->forw_bcast_list_lock); 96 spin_lock_init(&bat_priv->forw_bcast_list_lock);
100 spin_lock_init(&bat_priv->tt_changes_list_lock); 97 spin_lock_init(&bat_priv->tt.changes_list_lock);
101 spin_lock_init(&bat_priv->tt_req_list_lock); 98 spin_lock_init(&bat_priv->tt.req_list_lock);
102 spin_lock_init(&bat_priv->tt_roam_list_lock); 99 spin_lock_init(&bat_priv->tt.roam_list_lock);
103 spin_lock_init(&bat_priv->tt_buff_lock); 100 spin_lock_init(&bat_priv->tt.last_changeset_lock);
104 spin_lock_init(&bat_priv->gw_list_lock); 101 spin_lock_init(&bat_priv->gw.list_lock);
105 spin_lock_init(&bat_priv->vis_hash_lock); 102 spin_lock_init(&bat_priv->vis.hash_lock);
106 spin_lock_init(&bat_priv->vis_list_lock); 103 spin_lock_init(&bat_priv->vis.list_lock);
107 104
108 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 105 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
109 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 106 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
110 INIT_HLIST_HEAD(&bat_priv->gw_list); 107 INIT_HLIST_HEAD(&bat_priv->gw.list);
111 INIT_LIST_HEAD(&bat_priv->tt_changes_list); 108 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
112 INIT_LIST_HEAD(&bat_priv->tt_req_list); 109 INIT_LIST_HEAD(&bat_priv->tt.req_list);
113 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 110 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
114 111
115 ret = batadv_originator_init(bat_priv); 112 ret = batadv_originator_init(bat_priv);
116 if (ret < 0) 113 if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
131 if (ret < 0) 128 if (ret < 0)
132 goto err; 129 goto err;
133 130
134 atomic_set(&bat_priv->gw_reselect, 0); 131 atomic_set(&bat_priv->gw.reselect, 0);
135 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
136 133
137 return 0; 134 return 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5d8fa0757947..d57b746219de 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
26#define BATADV_DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28#ifndef BATADV_SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
29#define BATADV_SOURCE_VERSION "2012.3.0" 29#define BATADV_SOURCE_VERSION "2012.4.0"
30#endif 30#endif
31 31
32/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
42 */ 42 */
43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */ 43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ 45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
46/* sliding packet range of received originator messages in sequence numbers 47/* sliding packet range of received originator messages in sequence numbers
47 * (should be a multiple of our word size) 48 * (should be a multiple of our word size)
48 */ 49 */
49#define BATADV_TQ_LOCAL_WINDOW_SIZE 64 50#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
50/* miliseconds we have to keep pending tt_req */ 51/* milliseconds we have to keep pending tt_req */
51#define BATADV_TT_REQUEST_TIMEOUT 3000 52#define BATADV_TT_REQUEST_TIMEOUT 3000
52 53
53#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5 54#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
59#define BATADV_TT_OGM_APPEND_MAX 3 60#define BATADV_TT_OGM_APPEND_MAX 3
60 61
61/* Time in which a client can roam at most ROAMING_MAX_COUNT times in 62/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
62 * miliseconds 63 * milliseconds
63 */ 64 */
64#define BATADV_ROAMING_MAX_TIME 20000 65#define BATADV_ROAMING_MAX_TIME 20000
65#define BATADV_ROAMING_MAX_COUNT 5 66#define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
123/* Append 'batman-adv: ' before kernel messages */ 124/* Append 'batman-adv: ' before kernel messages */
124#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 125#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
125 126
126/* all messages related to routing / flooding / broadcasting / etc */
127enum batadv_dbg_level {
128 BATADV_DBG_BATMAN = 1 << 0,
129 BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
130 BATADV_DBG_TT = 1 << 2, /* translation table operations */
131 BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
132 BATADV_DBG_ALL = 15,
133};
134
135/* Kernel headers */ 127/* Kernel headers */
136 128
137#include <linux/mutex.h> /* mutex */ 129#include <linux/mutex.h> /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
173int batadv_algo_select(struct batadv_priv *bat_priv, char *name); 165int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
174int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); 166int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
175 167
168/* all messages related to routing / flooding / broadcasting / etc */
169enum batadv_dbg_level {
170 BATADV_DBG_BATMAN = BIT(0),
171 BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
172 BATADV_DBG_TT = BIT(2), /* translation table operations */
173 BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */
174 BATADV_DBG_ALL = 15,
175};
176
176#ifdef CONFIG_BATMAN_ADV_DEBUG 177#ifdef CONFIG_BATMAN_ADV_DEBUG
177int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) 178int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
178__printf(2, 3); 179__printf(2, 3);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 8d3e55a96adc..2d23a14c220e 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -37,10 +37,10 @@ enum batadv_packettype {
37#define BATADV_COMPAT_VERSION 14 37#define BATADV_COMPAT_VERSION 14
38 38
39enum batadv_iv_flags { 39enum batadv_iv_flags {
40 BATADV_NOT_BEST_NEXT_HOP = 1 << 3, 40 BATADV_NOT_BEST_NEXT_HOP = BIT(3),
41 BATADV_PRIMARIES_FIRST_HOP = 1 << 4, 41 BATADV_PRIMARIES_FIRST_HOP = BIT(4),
42 BATADV_VIS_SERVER = 1 << 5, 42 BATADV_VIS_SERVER = BIT(5),
43 BATADV_DIRECTLINK = 1 << 6, 43 BATADV_DIRECTLINK = BIT(6),
44}; 44};
45 45
46/* ICMP message types */ 46/* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
60 60
61/* fragmentation defines */ 61/* fragmentation defines */
62enum batadv_unicast_frag_flags { 62enum batadv_unicast_frag_flags {
63 BATADV_UNI_FRAG_HEAD = 1 << 0, 63 BATADV_UNI_FRAG_HEAD = BIT(0),
64 BATADV_UNI_FRAG_LARGETAIL = 1 << 1, 64 BATADV_UNI_FRAG_LARGETAIL = BIT(1),
65}; 65};
66 66
67/* TT_QUERY subtypes */ 67/* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
74 74
75/* TT_QUERY flags */ 75/* TT_QUERY flags */
76enum batadv_tt_query_flags { 76enum batadv_tt_query_flags {
77 BATADV_TT_FULL_TABLE = 1 << 2, 77 BATADV_TT_FULL_TABLE = BIT(2),
78}; 78};
79 79
80/* BATADV_TT_CLIENT flags. 80/* BATADV_TT_CLIENT flags.
81 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to 81 * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
82 * 1 << 15 are used for local computation only 82 * BIT(15) are used for local computation only
83 */ 83 */
84enum batadv_tt_client_flags { 84enum batadv_tt_client_flags {
85 BATADV_TT_CLIENT_DEL = 1 << 0, 85 BATADV_TT_CLIENT_DEL = BIT(0),
86 BATADV_TT_CLIENT_ROAM = 1 << 1, 86 BATADV_TT_CLIENT_ROAM = BIT(1),
87 BATADV_TT_CLIENT_WIFI = 1 << 2, 87 BATADV_TT_CLIENT_WIFI = BIT(2),
88 BATADV_TT_CLIENT_NOPURGE = 1 << 8, 88 BATADV_TT_CLIENT_TEMP = BIT(3),
89 BATADV_TT_CLIENT_NEW = 1 << 9, 89 BATADV_TT_CLIENT_NOPURGE = BIT(8),
90 BATADV_TT_CLIENT_PENDING = 1 << 10, 90 BATADV_TT_CLIENT_NEW = BIT(9),
91 BATADV_TT_CLIENT_PENDING = BIT(10),
91}; 92};
92 93
93/* claim frame types for the bridge loop avoidance */ 94/* claim frame types for the bridge loop avoidance */
94enum batadv_bla_claimframe { 95enum batadv_bla_claimframe {
95 BATADV_CLAIM_TYPE_ADD = 0x00, 96 BATADV_CLAIM_TYPE_CLAIM = 0x00,
96 BATADV_CLAIM_TYPE_DEL = 0x01, 97 BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
97 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02, 98 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
98 BATADV_CLAIM_TYPE_REQUEST = 0x03, 99 BATADV_CLAIM_TYPE_REQUEST = 0x03,
99}; 100};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bc2b88bbea1f..939fc01371df 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
579 return router; 579 return router;
580} 580}
581 581
582int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) 582static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
583{ 583{
584 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
585 struct batadv_tt_query_packet *tt_query;
586 uint16_t tt_size;
587 struct ethhdr *ethhdr; 584 struct ethhdr *ethhdr;
588 char tt_flag;
589 size_t packet_size;
590 585
591 /* drop packet if it has not necessary minimum size */ 586 /* drop packet if it has not necessary minimum size */
592 if (unlikely(!pskb_may_pull(skb, 587 if (unlikely(!pskb_may_pull(skb, hdr_size)))
593 sizeof(struct batadv_tt_query_packet)))) 588 return -1;
594 goto out;
595
596 /* I could need to modify it */
597 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
598 goto out;
599 589
600 ethhdr = (struct ethhdr *)skb_mac_header(skb); 590 ethhdr = (struct ethhdr *)skb_mac_header(skb);
601 591
602 /* packet with unicast indication but broadcast recipient */ 592 /* packet with unicast indication but broadcast recipient */
603 if (is_broadcast_ether_addr(ethhdr->h_dest)) 593 if (is_broadcast_ether_addr(ethhdr->h_dest))
604 goto out; 594 return -1;
605 595
606 /* packet with broadcast sender address */ 596 /* packet with broadcast sender address */
607 if (is_broadcast_ether_addr(ethhdr->h_source)) 597 if (is_broadcast_ether_addr(ethhdr->h_source))
598 return -1;
599
600 /* not for me */
601 if (!batadv_is_my_mac(ethhdr->h_dest))
602 return -1;
603
604 return 0;
605}
606
607int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
608{
609 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
610 struct batadv_tt_query_packet *tt_query;
611 uint16_t tt_size;
612 int hdr_size = sizeof(*tt_query);
613 char tt_flag;
614 size_t packet_size;
615
616 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
617 return NET_RX_DROP;
618
619 /* I could need to modify it */
620 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
608 goto out; 621 goto out;
609 622
610 tt_query = (struct batadv_tt_query_packet *)skb->data; 623 tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
721 * been incremented yet. This flag will make me check all the incoming 734 * been incremented yet. This flag will make me check all the incoming
722 * packets for the correct destination. 735 * packets for the correct destination.
723 */ 736 */
724 bat_priv->tt_poss_change = true; 737 bat_priv->tt.poss_change = true;
725 738
726 batadv_orig_node_free_ref(orig_node); 739 batadv_orig_node_free_ref(orig_node);
727out: 740out:
@@ -819,31 +832,6 @@ err:
819 return NULL; 832 return NULL;
820} 833}
821 834
822static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
823{
824 struct ethhdr *ethhdr;
825
826 /* drop packet if it has not necessary minimum size */
827 if (unlikely(!pskb_may_pull(skb, hdr_size)))
828 return -1;
829
830 ethhdr = (struct ethhdr *)skb_mac_header(skb);
831
832 /* packet with unicast indication but broadcast recipient */
833 if (is_broadcast_ether_addr(ethhdr->h_dest))
834 return -1;
835
836 /* packet with broadcast sender address */
837 if (is_broadcast_ether_addr(ethhdr->h_source))
838 return -1;
839
840 /* not for me */
841 if (!batadv_is_my_mac(ethhdr->h_dest))
842 return -1;
843
844 return 0;
845}
846
847static int batadv_route_unicast_packet(struct sk_buff *skb, 835static int batadv_route_unicast_packet(struct sk_buff *skb,
848 struct batadv_hard_iface *recv_if) 836 struct batadv_hard_iface *recv_if)
849{ 837{
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
947 unicast_packet = (struct batadv_unicast_packet *)skb->data; 935 unicast_packet = (struct batadv_unicast_packet *)skb->data;
948 936
949 if (batadv_is_my_mac(unicast_packet->dest)) { 937 if (batadv_is_my_mac(unicast_packet->dest)) {
950 tt_poss_change = bat_priv->tt_poss_change; 938 tt_poss_change = bat_priv->tt.poss_change;
951 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 939 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
952 } else { 940 } else {
953 orig_node = batadv_orig_hash_find(bat_priv, 941 orig_node = batadv_orig_hash_find(bat_priv,
954 unicast_packet->dest); 942 unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
993 } else { 981 } else {
994 memcpy(unicast_packet->dest, orig_node->orig, 982 memcpy(unicast_packet->dest, orig_node->orig,
995 ETH_ALEN); 983 ETH_ALEN);
996 curr_ttvn = (uint8_t) 984 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
997 atomic_read(&orig_node->last_ttvn);
998 batadv_orig_node_free_ref(orig_node); 985 batadv_orig_node_free_ref(orig_node);
999 } 986 }
1000 987
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1025 1012
1026 /* packet for me */ 1013 /* packet for me */
1027 if (batadv_is_my_mac(unicast_packet->dest)) { 1014 if (batadv_is_my_mac(unicast_packet->dest)) {
1028 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, 1015 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1029 hdr_size); 1016 NULL);
1017
1030 return NET_RX_SUCCESS; 1018 return NET_RX_SUCCESS;
1031 } 1019 }
1032 1020
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1063 return NET_RX_SUCCESS; 1051 return NET_RX_SUCCESS;
1064 1052
1065 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, 1053 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
1066 sizeof(struct batadv_unicast_packet)); 1054 sizeof(struct batadv_unicast_packet), NULL);
1067 return NET_RX_SUCCESS; 1055 return NET_RX_SUCCESS;
1068 } 1056 }
1069 1057
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1150 goto out; 1138 goto out;
1151 1139
1152 /* broadcast for me */ 1140 /* broadcast for me */
1153 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1141 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1142 orig_node);
1154 ret = NET_RX_SUCCESS; 1143 ret = NET_RX_SUCCESS;
1155 goto out; 1144 goto out;
1156 1145
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3b4b2daa3b3e..570a8bce0364 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -190,13 +190,13 @@ out:
190static void batadv_send_outstanding_bcast_packet(struct work_struct *work) 190static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
191{ 191{
192 struct batadv_hard_iface *hard_iface; 192 struct batadv_hard_iface *hard_iface;
193 struct delayed_work *delayed_work = 193 struct delayed_work *delayed_work;
194 container_of(work, struct delayed_work, work);
195 struct batadv_forw_packet *forw_packet; 194 struct batadv_forw_packet *forw_packet;
196 struct sk_buff *skb1; 195 struct sk_buff *skb1;
197 struct net_device *soft_iface; 196 struct net_device *soft_iface;
198 struct batadv_priv *bat_priv; 197 struct batadv_priv *bat_priv;
199 198
199 delayed_work = container_of(work, struct delayed_work, work);
200 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 200 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201 delayed_work); 201 delayed_work);
202 soft_iface = forw_packet->if_incoming->soft_iface; 202 soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
239 239
240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) 240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
241{ 241{
242 struct delayed_work *delayed_work = 242 struct delayed_work *delayed_work;
243 container_of(work, struct delayed_work, work);
244 struct batadv_forw_packet *forw_packet; 243 struct batadv_forw_packet *forw_packet;
245 struct batadv_priv *bat_priv; 244 struct batadv_priv *bat_priv;
246 245
246 delayed_work = container_of(work, struct delayed_work, work);
247 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 247 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248 delayed_work); 248 delayed_work);
249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 21c53577c8d6..b9a28d2dd3e8 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
93static struct net_device_stats *batadv_interface_stats(struct net_device *dev) 93static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
94{ 94{
95 struct batadv_priv *bat_priv = netdev_priv(dev); 95 struct batadv_priv *bat_priv = netdev_priv(dev);
96 return &bat_priv->stats; 96 struct net_device_stats *stats = &bat_priv->stats;
97
98 stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
99 stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
100 stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
101 stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
102 stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
103 return stats;
97} 104}
98 105
99static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) 106static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -145,6 +152,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
145 int data_len = skb->len, ret; 152 int data_len = skb->len, ret;
146 short vid __maybe_unused = -1; 153 short vid __maybe_unused = -1;
147 bool do_bcast = false; 154 bool do_bcast = false;
155 uint32_t seqno;
148 156
149 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 157 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
150 goto dropped; 158 goto dropped;
@@ -226,8 +234,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
226 primary_if->net_dev->dev_addr, ETH_ALEN); 234 primary_if->net_dev->dev_addr, ETH_ALEN);
227 235
228 /* set broadcast sequence number */ 236 /* set broadcast sequence number */
229 bcast_packet->seqno = 237 seqno = atomic_inc_return(&bat_priv->bcast_seqno);
230 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 238 bcast_packet->seqno = htonl(seqno);
231 239
232 batadv_add_bcast_packet_to_list(bat_priv, skb, 1); 240 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
233 241
@@ -249,14 +257,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
249 goto dropped_freed; 257 goto dropped_freed;
250 } 258 }
251 259
252 bat_priv->stats.tx_packets++; 260 batadv_inc_counter(bat_priv, BATADV_CNT_TX);
253 bat_priv->stats.tx_bytes += data_len; 261 batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
254 goto end; 262 goto end;
255 263
256dropped: 264dropped:
257 kfree_skb(skb); 265 kfree_skb(skb);
258dropped_freed: 266dropped_freed:
259 bat_priv->stats.tx_dropped++; 267 batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
260end: 268end:
261 if (primary_if) 269 if (primary_if)
262 batadv_hardif_free_ref(primary_if); 270 batadv_hardif_free_ref(primary_if);
@@ -265,7 +273,7 @@ end:
265 273
266void batadv_interface_rx(struct net_device *soft_iface, 274void batadv_interface_rx(struct net_device *soft_iface,
267 struct sk_buff *skb, struct batadv_hard_iface *recv_if, 275 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
268 int hdr_size) 276 int hdr_size, struct batadv_orig_node *orig_node)
269{ 277{
270 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 278 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
271 struct ethhdr *ethhdr; 279 struct ethhdr *ethhdr;
@@ -311,11 +319,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
311 319
312 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ 320 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
313 321
314 bat_priv->stats.rx_packets++; 322 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
315 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 323 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
324 skb->len + ETH_HLEN);
316 325
317 soft_iface->last_rx = jiffies; 326 soft_iface->last_rx = jiffies;
318 327
328 if (orig_node)
329 batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
330 ethhdr->h_source);
331
319 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 332 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
320 goto dropped; 333 goto dropped;
321 334
@@ -382,15 +395,22 @@ struct net_device *batadv_softif_create(const char *name)
382 if (!soft_iface) 395 if (!soft_iface)
383 goto out; 396 goto out;
384 397
398 bat_priv = netdev_priv(soft_iface);
399
400 /* batadv_interface_stats() needs to be available as soon as
401 * register_netdevice() has been called
402 */
403 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
404 if (!bat_priv->bat_counters)
405 goto free_soft_iface;
406
385 ret = register_netdevice(soft_iface); 407 ret = register_netdevice(soft_iface);
386 if (ret < 0) { 408 if (ret < 0) {
387 pr_err("Unable to register the batman interface '%s': %i\n", 409 pr_err("Unable to register the batman interface '%s': %i\n",
388 name, ret); 410 name, ret);
389 goto free_soft_iface; 411 goto free_bat_counters;
390 } 412 }
391 413
392 bat_priv = netdev_priv(soft_iface);
393
394 atomic_set(&bat_priv->aggregated_ogms, 1); 414 atomic_set(&bat_priv->aggregated_ogms, 1);
395 atomic_set(&bat_priv->bonding, 0); 415 atomic_set(&bat_priv->bonding, 0);
396 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 416 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -408,29 +428,26 @@ struct net_device *batadv_softif_create(const char *name)
408 428
409 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 429 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
410 atomic_set(&bat_priv->bcast_seqno, 1); 430 atomic_set(&bat_priv->bcast_seqno, 1);
411 atomic_set(&bat_priv->ttvn, 0); 431 atomic_set(&bat_priv->tt.vn, 0);
412 atomic_set(&bat_priv->tt_local_changes, 0); 432 atomic_set(&bat_priv->tt.local_changes, 0);
413 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 433 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
414 atomic_set(&bat_priv->bla_num_requests, 0); 434#ifdef CONFIG_BATMAN_ADV_BLA
415 435 atomic_set(&bat_priv->bla.num_requests, 0);
416 bat_priv->tt_buff = NULL; 436#endif
417 bat_priv->tt_buff_len = 0; 437 bat_priv->tt.last_changeset = NULL;
418 bat_priv->tt_poss_change = false; 438 bat_priv->tt.last_changeset_len = 0;
439 bat_priv->tt.poss_change = false;
419 440
420 bat_priv->primary_if = NULL; 441 bat_priv->primary_if = NULL;
421 bat_priv->num_ifaces = 0; 442 bat_priv->num_ifaces = 0;
422 443
423 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
424 if (!bat_priv->bat_counters)
425 goto unreg_soft_iface;
426
427 ret = batadv_algo_select(bat_priv, batadv_routing_algo); 444 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
428 if (ret < 0) 445 if (ret < 0)
429 goto free_bat_counters; 446 goto unreg_soft_iface;
430 447
431 ret = batadv_sysfs_add_meshif(soft_iface); 448 ret = batadv_sysfs_add_meshif(soft_iface);
432 if (ret < 0) 449 if (ret < 0)
433 goto free_bat_counters; 450 goto unreg_soft_iface;
434 451
435 ret = batadv_debugfs_add_meshif(soft_iface); 452 ret = batadv_debugfs_add_meshif(soft_iface);
436 if (ret < 0) 453 if (ret < 0)
@@ -446,12 +463,13 @@ unreg_debugfs:
446 batadv_debugfs_del_meshif(soft_iface); 463 batadv_debugfs_del_meshif(soft_iface);
447unreg_sysfs: 464unreg_sysfs:
448 batadv_sysfs_del_meshif(soft_iface); 465 batadv_sysfs_del_meshif(soft_iface);
449free_bat_counters:
450 free_percpu(bat_priv->bat_counters);
451unreg_soft_iface: 466unreg_soft_iface:
467 free_percpu(bat_priv->bat_counters);
452 unregister_netdevice(soft_iface); 468 unregister_netdevice(soft_iface);
453 return NULL; 469 return NULL;
454 470
471free_bat_counters:
472 free_percpu(bat_priv->bat_counters);
455free_soft_iface: 473free_soft_iface:
456 free_netdev(soft_iface); 474 free_netdev(soft_iface);
457out: 475out:
@@ -521,6 +539,11 @@ static u32 batadv_get_link(struct net_device *dev)
521static const struct { 539static const struct {
522 const char name[ETH_GSTRING_LEN]; 540 const char name[ETH_GSTRING_LEN];
523} batadv_counters_strings[] = { 541} batadv_counters_strings[] = {
542 { "tx" },
543 { "tx_bytes" },
544 { "tx_dropped" },
545 { "rx" },
546 { "rx_bytes" },
524 { "forward" }, 547 { "forward" },
525 { "forward_bytes" }, 548 { "forward_bytes" },
526 { "mgmt_tx" }, 549 { "mgmt_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 852c683b06a1..07a08fed28b9 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -21,8 +21,9 @@
21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
22 22
23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); 23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
24void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, 24void batadv_interface_rx(struct net_device *soft_iface,
25 struct batadv_hard_iface *recv_if, int hdr_size); 25 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
26 int hdr_size, struct batadv_orig_node *orig_node);
26struct net_device *batadv_softif_create(const char *name); 27struct net_device *batadv_softif_create(const char *name);
27void batadv_softif_destroy(struct net_device *soft_iface); 28void batadv_softif_destroy(struct net_device *soft_iface);
28int batadv_softif_is_valid(const struct net_device *net_dev); 29int batadv_softif_is_valid(const struct net_device *net_dev);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 99dd8f75b3ff..112edd371b2f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
34static void batadv_tt_purge(struct work_struct *work); 34static void batadv_tt_purge(struct work_struct *work);
35static void 35static void
36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry); 36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
37static void batadv_tt_global_del(struct batadv_priv *bat_priv,
38 struct batadv_orig_node *orig_node,
39 const unsigned char *addr,
40 const char *message, bool roaming);
37 41
38/* returns 1 if they are the same mac addr */ 42/* returns 1 if they are the same mac addr */
39static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 43static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
46 50
47static void batadv_tt_start_timer(struct batadv_priv *bat_priv) 51static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
48{ 52{
49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge); 53 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work, 54 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
51 msecs_to_jiffies(5000)); 55 msecs_to_jiffies(5000));
52} 56}
53 57
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
88 struct batadv_tt_common_entry *tt_common_entry; 92 struct batadv_tt_common_entry *tt_common_entry;
89 struct batadv_tt_local_entry *tt_local_entry = NULL; 93 struct batadv_tt_local_entry *tt_local_entry = NULL;
90 94
91 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data); 95 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
92 if (tt_common_entry) 96 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry, 97 tt_local_entry = container_of(tt_common_entry,
94 struct batadv_tt_local_entry, 98 struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
102 struct batadv_tt_common_entry *tt_common_entry; 106 struct batadv_tt_common_entry *tt_common_entry;
103 struct batadv_tt_global_entry *tt_global_entry = NULL; 107 struct batadv_tt_global_entry *tt_global_entry = NULL;
104 108
105 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data); 109 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
106 if (tt_common_entry) 110 if (tt_common_entry)
107 tt_global_entry = container_of(tt_common_entry, 111 tt_global_entry = container_of(tt_common_entry,
108 struct batadv_tt_global_entry, 112 struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
152static void 156static void
153batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry) 157batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
154{ 158{
159 if (!atomic_dec_and_test(&orig_entry->refcount))
160 return;
155 /* to avoid race conditions, immediately decrease the tt counter */ 161 /* to avoid race conditions, immediately decrease the tt counter */
156 atomic_dec(&orig_entry->orig_node->tt_size); 162 atomic_dec(&orig_entry->orig_node->tt_size);
157 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); 163 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
175 del_op_requested = flags & BATADV_TT_CLIENT_DEL; 181 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
176 182
177 /* check for ADD+DEL or DEL+ADD events */ 183 /* check for ADD+DEL or DEL+ADD events */
178 spin_lock_bh(&bat_priv->tt_changes_list_lock); 184 spin_lock_bh(&bat_priv->tt.changes_list_lock);
179 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 185 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
180 list) { 186 list) {
181 if (!batadv_compare_eth(entry->change.addr, addr)) 187 if (!batadv_compare_eth(entry->change.addr, addr))
182 continue; 188 continue;
@@ -203,15 +209,15 @@ del:
203 } 209 }
204 210
205 /* track the change in the OGMinterval list */ 211 /* track the change in the OGMinterval list */
206 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); 212 list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
207 213
208unlock: 214unlock:
209 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 215 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
210 216
211 if (event_removed) 217 if (event_removed)
212 atomic_dec(&bat_priv->tt_local_changes); 218 atomic_dec(&bat_priv->tt.local_changes);
213 else 219 else
214 atomic_inc(&bat_priv->tt_local_changes); 220 atomic_inc(&bat_priv->tt.local_changes);
215} 221}
216 222
217int batadv_tt_len(int changes_num) 223int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
221 227
222static int batadv_tt_local_init(struct batadv_priv *bat_priv) 228static int batadv_tt_local_init(struct batadv_priv *bat_priv)
223{ 229{
224 if (bat_priv->tt_local_hash) 230 if (bat_priv->tt.local_hash)
225 return 0; 231 return 0;
226 232
227 bat_priv->tt_local_hash = batadv_hash_new(1024); 233 bat_priv->tt.local_hash = batadv_hash_new(1024);
228 234
229 if (!bat_priv->tt_local_hash) 235 if (!bat_priv->tt.local_hash)
230 return -ENOMEM; 236 return -ENOMEM;
231 237
232 return 0; 238 return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
258 264
259 batadv_dbg(BATADV_DBG_TT, bat_priv, 265 batadv_dbg(BATADV_DBG_TT, bat_priv,
260 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 266 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
261 (uint8_t)atomic_read(&bat_priv->ttvn)); 267 (uint8_t)atomic_read(&bat_priv->tt.vn));
262 268
263 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 269 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
264 tt_local_entry->common.flags = BATADV_NO_FLAGS; 270 tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
266 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI; 272 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
267 atomic_set(&tt_local_entry->common.refcount, 2); 273 atomic_set(&tt_local_entry->common.refcount, 2);
268 tt_local_entry->last_seen = jiffies; 274 tt_local_entry->last_seen = jiffies;
275 tt_local_entry->common.added_at = tt_local_entry->last_seen;
269 276
270 /* the batman interface mac address should never be purged */ 277 /* the batman interface mac address should never be purged */
271 if (batadv_compare_eth(addr, soft_iface->dev_addr)) 278 if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
277 */ 284 */
278 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; 285 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
279 286
280 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt, 287 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
281 batadv_choose_orig, 288 batadv_choose_orig,
282 &tt_local_entry->common, 289 &tt_local_entry->common,
283 &tt_local_entry->common.hash_entry); 290 &tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
348 primary_if = batadv_primary_if_get_selected(bat_priv); 355 primary_if = batadv_primary_if_get_selected(bat_priv);
349 356
350 req_len = min_packet_len; 357 req_len = min_packet_len;
351 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes)); 358 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
352 359
353 /* if we have too many changes for one packet don't send any 360 /* if we have too many changes for one packet don't send any
354 * and wait for the tt table request which will be fragmented 361 * and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
381 if (new_len > 0) 388 if (new_len > 0)
382 tot_changes = new_len / batadv_tt_len(1); 389 tot_changes = new_len / batadv_tt_len(1);
383 390
384 spin_lock_bh(&bat_priv->tt_changes_list_lock); 391 spin_lock_bh(&bat_priv->tt.changes_list_lock);
385 atomic_set(&bat_priv->tt_local_changes, 0); 392 atomic_set(&bat_priv->tt.local_changes, 0);
386 393
387 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 394 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
388 list) { 395 list) {
389 if (count < tot_changes) { 396 if (count < tot_changes) {
390 memcpy(tt_buff + batadv_tt_len(count), 397 memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
394 list_del(&entry->list); 401 list_del(&entry->list);
395 kfree(entry); 402 kfree(entry);
396 } 403 }
397 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 404 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
398 405
399 /* Keep the buffer for possible tt_request */ 406 /* Keep the buffer for possible tt_request */
400 spin_lock_bh(&bat_priv->tt_buff_lock); 407 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
401 kfree(bat_priv->tt_buff); 408 kfree(bat_priv->tt.last_changeset);
402 bat_priv->tt_buff_len = 0; 409 bat_priv->tt.last_changeset_len = 0;
403 bat_priv->tt_buff = NULL; 410 bat_priv->tt.last_changeset = NULL;
404 /* check whether this new OGM has no changes due to size problems */ 411 /* check whether this new OGM has no changes due to size problems */
405 if (new_len > 0) { 412 if (new_len > 0) {
406 /* if kmalloc() fails we will reply with the full table 413 /* if kmalloc() fails we will reply with the full table
407 * instead of providing the diff 414 * instead of providing the diff
408 */ 415 */
409 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC); 416 bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
410 if (bat_priv->tt_buff) { 417 if (bat_priv->tt.last_changeset) {
411 memcpy(bat_priv->tt_buff, tt_buff, new_len); 418 memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
412 bat_priv->tt_buff_len = new_len; 419 bat_priv->tt.last_changeset_len = new_len;
413 } 420 }
414 } 421 }
415 spin_unlock_bh(&bat_priv->tt_buff_lock); 422 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
416 423
417 return count; 424 return count;
418} 425}
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
421{ 428{
422 struct net_device *net_dev = (struct net_device *)seq->private; 429 struct net_device *net_dev = (struct net_device *)seq->private;
423 struct batadv_priv *bat_priv = netdev_priv(net_dev); 430 struct batadv_priv *bat_priv = netdev_priv(net_dev);
424 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 431 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
425 struct batadv_tt_common_entry *tt_common_entry; 432 struct batadv_tt_common_entry *tt_common_entry;
426 struct batadv_hard_iface *primary_if; 433 struct batadv_hard_iface *primary_if;
427 struct hlist_node *node; 434 struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
446 453
447 seq_printf(seq, 454 seq_printf(seq,
448 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", 455 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
449 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 456 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
450 457
451 for (i = 0; i < hash->size; i++) { 458 for (i = 0; i < hash->size; i++) {
452 head = &hash->table[i]; 459 head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
544 551
545static void batadv_tt_local_purge(struct batadv_priv *bat_priv) 552static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
546{ 553{
547 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 554 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
548 struct hlist_head *head; 555 struct hlist_head *head;
549 spinlock_t *list_lock; /* protects write access to the hash lists */ 556 spinlock_t *list_lock; /* protects write access to the hash lists */
550 uint32_t i; 557 uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
570 struct hlist_head *head; 577 struct hlist_head *head;
571 uint32_t i; 578 uint32_t i;
572 579
573 if (!bat_priv->tt_local_hash) 580 if (!bat_priv->tt.local_hash)
574 return; 581 return;
575 582
576 hash = bat_priv->tt_local_hash; 583 hash = bat_priv->tt.local_hash;
577 584
578 for (i = 0; i < hash->size; i++) { 585 for (i = 0; i < hash->size; i++) {
579 head = &hash->table[i]; 586 head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
593 600
594 batadv_hash_destroy(hash); 601 batadv_hash_destroy(hash);
595 602
596 bat_priv->tt_local_hash = NULL; 603 bat_priv->tt.local_hash = NULL;
597} 604}
598 605
599static int batadv_tt_global_init(struct batadv_priv *bat_priv) 606static int batadv_tt_global_init(struct batadv_priv *bat_priv)
600{ 607{
601 if (bat_priv->tt_global_hash) 608 if (bat_priv->tt.global_hash)
602 return 0; 609 return 0;
603 610
604 bat_priv->tt_global_hash = batadv_hash_new(1024); 611 bat_priv->tt.global_hash = batadv_hash_new(1024);
605 612
606 if (!bat_priv->tt_global_hash) 613 if (!bat_priv->tt.global_hash)
607 return -ENOMEM; 614 return -ENOMEM;
608 615
609 return 0; 616 return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
613{ 620{
614 struct batadv_tt_change_node *entry, *safe; 621 struct batadv_tt_change_node *entry, *safe;
615 622
616 spin_lock_bh(&bat_priv->tt_changes_list_lock); 623 spin_lock_bh(&bat_priv->tt.changes_list_lock);
617 624
618 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 625 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
619 list) { 626 list) {
620 list_del(&entry->list); 627 list_del(&entry->list);
621 kfree(entry); 628 kfree(entry);
622 } 629 }
623 630
624 atomic_set(&bat_priv->tt_local_changes, 0); 631 atomic_set(&bat_priv->tt.local_changes, 0);
625 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 632 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
626} 633}
627 634
628/* find out if an orig_node is already in the list of a tt_global_entry. 635/* retrieves the orig_tt_list_entry belonging to orig_node from the
629 * returns 1 if found, 0 otherwise 636 * batadv_tt_global_entry list
637 *
638 * returns it with an increased refcounter, NULL if not found
630 */ 639 */
631static bool 640static struct batadv_tt_orig_list_entry *
632batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, 641batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
633 const struct batadv_orig_node *orig_node) 642 const struct batadv_orig_node *orig_node)
634{ 643{
635 struct batadv_tt_orig_list_entry *tmp_orig_entry; 644 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
636 const struct hlist_head *head; 645 const struct hlist_head *head;
637 struct hlist_node *node; 646 struct hlist_node *node;
638 bool found = false;
639 647
640 rcu_read_lock(); 648 rcu_read_lock();
641 head = &entry->orig_list; 649 head = &entry->orig_list;
642 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { 650 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
643 if (tmp_orig_entry->orig_node == orig_node) { 651 if (tmp_orig_entry->orig_node != orig_node)
644 found = true; 652 continue;
645 break; 653 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
646 } 654 continue;
655
656 orig_entry = tmp_orig_entry;
657 break;
647 } 658 }
648 rcu_read_unlock(); 659 rcu_read_unlock();
660
661 return orig_entry;
662}
663
664/* find out if an orig_node is already in the list of a tt_global_entry.
665 * returns true if found, false otherwise
666 */
667static bool
668batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
669 const struct batadv_orig_node *orig_node)
670{
671 struct batadv_tt_orig_list_entry *orig_entry;
672 bool found = false;
673
674 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
675 if (orig_entry) {
676 found = true;
677 batadv_tt_orig_list_entry_free_ref(orig_entry);
678 }
679
649 return found; 680 return found;
650} 681}
651 682
652static void 683static void
653batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry, 684batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
654 struct batadv_orig_node *orig_node, int ttvn) 685 struct batadv_orig_node *orig_node, int ttvn)
655{ 686{
656 struct batadv_tt_orig_list_entry *orig_entry; 687 struct batadv_tt_orig_list_entry *orig_entry;
657 688
689 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
690 if (orig_entry) {
691 /* refresh the ttvn: the current value could be a bogus one that
692 * was added during a "temporary client detection"
693 */
694 orig_entry->ttvn = ttvn;
695 goto out;
696 }
697
658 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); 698 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
659 if (!orig_entry) 699 if (!orig_entry)
660 return; 700 goto out;
661 701
662 INIT_HLIST_NODE(&orig_entry->list); 702 INIT_HLIST_NODE(&orig_entry->list);
663 atomic_inc(&orig_node->refcount); 703 atomic_inc(&orig_node->refcount);
664 atomic_inc(&orig_node->tt_size); 704 atomic_inc(&orig_node->tt_size);
665 orig_entry->orig_node = orig_node; 705 orig_entry->orig_node = orig_node;
666 orig_entry->ttvn = ttvn; 706 orig_entry->ttvn = ttvn;
707 atomic_set(&orig_entry->refcount, 2);
667 708
668 spin_lock_bh(&tt_global_entry->list_lock); 709 spin_lock_bh(&tt_global->list_lock);
669 hlist_add_head_rcu(&orig_entry->list, 710 hlist_add_head_rcu(&orig_entry->list,
670 &tt_global_entry->orig_list); 711 &tt_global->orig_list);
671 spin_unlock_bh(&tt_global_entry->list_lock); 712 spin_unlock_bh(&tt_global->list_lock);
713out:
714 if (orig_entry)
715 batadv_tt_orig_list_entry_free_ref(orig_entry);
672} 716}
673 717
674/* caller must hold orig_node refcount */ 718/* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
695 common->flags = flags; 739 common->flags = flags;
696 tt_global_entry->roam_at = 0; 740 tt_global_entry->roam_at = 0;
697 atomic_set(&common->refcount, 2); 741 atomic_set(&common->refcount, 2);
742 common->added_at = jiffies;
698 743
699 INIT_HLIST_HEAD(&tt_global_entry->orig_list); 744 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
700 spin_lock_init(&tt_global_entry->list_lock); 745 spin_lock_init(&tt_global_entry->list_lock);
701 746
702 hash_added = batadv_hash_add(bat_priv->tt_global_hash, 747 hash_added = batadv_hash_add(bat_priv->tt.global_hash,
703 batadv_compare_tt, 748 batadv_compare_tt,
704 batadv_choose_orig, common, 749 batadv_choose_orig, common,
705 &common->hash_entry); 750 &common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
709 batadv_tt_global_entry_free_ref(tt_global_entry); 754 batadv_tt_global_entry_free_ref(tt_global_entry);
710 goto out_remove; 755 goto out_remove;
711 } 756 }
712
713 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
714 ttvn);
715 } else { 757 } else {
716 /* there is already a global entry, use this one. */ 758 /* If there is already a global entry, we can use this one for
759 * our processing.
760 * But if we are trying to add a temporary client we can exit
761 * directly because the temporary information should never
762 * override any already known client state (whatever it is)
763 */
764 if (flags & BATADV_TT_CLIENT_TEMP)
765 goto out;
766
767 /* if the client was temporary added before receiving the first
768 * OGM announcing it, we have to clear the TEMP flag
769 */
770 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
717 771
718 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 772 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
719 * one originator left in the list and we previously received a 773 * one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
727 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; 781 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
728 tt_global_entry->roam_at = 0; 782 tt_global_entry->roam_at = 0;
729 } 783 }
730
731 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
732 orig_node))
733 batadv_tt_global_add_orig_entry(tt_global_entry,
734 orig_node, ttvn);
735 } 784 }
785 /* add the new orig_entry (if needed) or update it */
786 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
736 787
737 batadv_dbg(BATADV_DBG_TT, bat_priv, 788 batadv_dbg(BATADV_DBG_TT, bat_priv,
738 "Creating new global tt entry: %pM (via %pM)\n", 789 "Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
771 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 822 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
772 flags = tt_common_entry->flags; 823 flags = tt_common_entry->flags;
773 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); 824 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
774 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", 825 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
775 tt_global_entry->common.addr, orig_entry->ttvn, 826 tt_global_entry->common.addr, orig_entry->ttvn,
776 orig_entry->orig_node->orig, last_ttvn, 827 orig_entry->orig_node->orig, last_ttvn,
777 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 828 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
778 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.')); 829 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
830 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
779 } 831 }
780} 832}
781 833
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
783{ 835{
784 struct net_device *net_dev = (struct net_device *)seq->private; 836 struct net_device *net_dev = (struct net_device *)seq->private;
785 struct batadv_priv *bat_priv = netdev_priv(net_dev); 837 struct batadv_priv *bat_priv = netdev_priv(net_dev);
786 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 838 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
787 struct batadv_tt_common_entry *tt_common_entry; 839 struct batadv_tt_common_entry *tt_common_entry;
788 struct batadv_tt_global_entry *tt_global; 840 struct batadv_tt_global_entry *tt_global;
789 struct batadv_hard_iface *primary_if; 841 struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
884 "Deleting global tt entry %pM: %s\n", 936 "Deleting global tt entry %pM: %s\n",
885 tt_global_entry->common.addr, message); 937 tt_global_entry->common.addr, message);
886 938
887 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt, 939 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
888 batadv_choose_orig, tt_global_entry->common.addr); 940 batadv_choose_orig, tt_global_entry->common.addr);
889 batadv_tt_global_entry_free_ref(tt_global_entry); 941 batadv_tt_global_entry_free_ref(tt_global_entry);
890 942
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
995 struct batadv_tt_global_entry *tt_global; 1047 struct batadv_tt_global_entry *tt_global;
996 struct batadv_tt_common_entry *tt_common_entry; 1048 struct batadv_tt_common_entry *tt_common_entry;
997 uint32_t i; 1049 uint32_t i;
998 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1050 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
999 struct hlist_node *node, *safe; 1051 struct hlist_node *node, *safe;
1000 struct hlist_head *head; 1052 struct hlist_head *head;
1001 spinlock_t *list_lock; /* protects write access to the hash lists */ 1053 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1030 orig_node->tt_initialised = false; 1082 orig_node->tt_initialised = false;
1031} 1083}
1032 1084
1033static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv, 1085static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
1034 struct hlist_head *head) 1086 char **msg)
1035{ 1087{
1036 struct batadv_tt_common_entry *tt_common_entry; 1088 bool purge = false;
1037 struct batadv_tt_global_entry *tt_global_entry; 1089 unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
1038 struct hlist_node *node, *node_tmp; 1090 unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
1039
1040 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1041 hash_entry) {
1042 tt_global_entry = container_of(tt_common_entry,
1043 struct batadv_tt_global_entry,
1044 common);
1045 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1046 continue;
1047 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1048 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1049 continue;
1050 1091
1051 batadv_dbg(BATADV_DBG_TT, bat_priv, 1092 if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
1052 "Deleting global tt entry (%pM): Roaming timeout\n", 1093 batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
1053 tt_global_entry->common.addr); 1094 purge = true;
1095 *msg = "Roaming timeout\n";
1096 }
1054 1097
1055 hlist_del_rcu(node); 1098 if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
1056 batadv_tt_global_entry_free_ref(tt_global_entry); 1099 batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
1100 purge = true;
1101 *msg = "Temporary client timeout\n";
1057 } 1102 }
1103
1104 return purge;
1058} 1105}
1059 1106
1060static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv) 1107static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1061{ 1108{
1062 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1109 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1063 struct hlist_head *head; 1110 struct hlist_head *head;
1111 struct hlist_node *node, *node_tmp;
1064 spinlock_t *list_lock; /* protects write access to the hash lists */ 1112 spinlock_t *list_lock; /* protects write access to the hash lists */
1065 uint32_t i; 1113 uint32_t i;
1114 char *msg = NULL;
1115 struct batadv_tt_common_entry *tt_common;
1116 struct batadv_tt_global_entry *tt_global;
1066 1117
1067 for (i = 0; i < hash->size; i++) { 1118 for (i = 0; i < hash->size; i++) {
1068 head = &hash->table[i]; 1119 head = &hash->table[i];
1069 list_lock = &hash->list_locks[i]; 1120 list_lock = &hash->list_locks[i];
1070 1121
1071 spin_lock_bh(list_lock); 1122 spin_lock_bh(list_lock);
1072 batadv_tt_global_roam_purge_list(bat_priv, head); 1123 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1124 hash_entry) {
1125 tt_global = container_of(tt_common,
1126 struct batadv_tt_global_entry,
1127 common);
1128
1129 if (!batadv_tt_global_to_purge(tt_global, &msg))
1130 continue;
1131
1132 batadv_dbg(BATADV_DBG_TT, bat_priv,
1133 "Deleting global tt entry (%pM): %s\n",
1134 tt_global->common.addr, msg);
1135
1136 hlist_del_rcu(node);
1137
1138 batadv_tt_global_entry_free_ref(tt_global);
1139 }
1073 spin_unlock_bh(list_lock); 1140 spin_unlock_bh(list_lock);
1074 } 1141 }
1075
1076} 1142}
1077 1143
1078static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) 1144static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1085 struct hlist_head *head; 1151 struct hlist_head *head;
1086 uint32_t i; 1152 uint32_t i;
1087 1153
1088 if (!bat_priv->tt_global_hash) 1154 if (!bat_priv->tt.global_hash)
1089 return; 1155 return;
1090 1156
1091 hash = bat_priv->tt_global_hash; 1157 hash = bat_priv->tt.global_hash;
1092 1158
1093 for (i = 0; i < hash->size; i++) { 1159 for (i = 0; i < hash->size; i++) {
1094 head = &hash->table[i]; 1160 head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1108 1174
1109 batadv_hash_destroy(hash); 1175 batadv_hash_destroy(hash);
1110 1176
1111 bat_priv->tt_global_hash = NULL; 1177 bat_priv->tt.global_hash = NULL;
1112} 1178}
1113 1179
1114static bool 1180static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1187 struct batadv_orig_node *orig_node) 1253 struct batadv_orig_node *orig_node)
1188{ 1254{
1189 uint16_t total = 0, total_one; 1255 uint16_t total = 0, total_one;
1190 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1256 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1191 struct batadv_tt_common_entry *tt_common; 1257 struct batadv_tt_common_entry *tt_common;
1192 struct batadv_tt_global_entry *tt_global; 1258 struct batadv_tt_global_entry *tt_global;
1193 struct hlist_node *node; 1259 struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1210 */ 1276 */
1211 if (tt_common->flags & BATADV_TT_CLIENT_ROAM) 1277 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1212 continue; 1278 continue;
1279 /* Temporary clients have not been announced yet, so
1280 * they have to be skipped while computing the global
1281 * crc
1282 */
1283 if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
1284 continue;
1213 1285
1214 /* find out if this global entry is announced by this 1286 /* find out if this global entry is announced by this
1215 * originator 1287 * originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1234static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) 1306static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1235{ 1307{
1236 uint16_t total = 0, total_one; 1308 uint16_t total = 0, total_one;
1237 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 1309 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1238 struct batadv_tt_common_entry *tt_common; 1310 struct batadv_tt_common_entry *tt_common;
1239 struct hlist_node *node; 1311 struct hlist_node *node;
1240 struct hlist_head *head; 1312 struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1267{ 1339{
1268 struct batadv_tt_req_node *node, *safe; 1340 struct batadv_tt_req_node *node, *safe;
1269 1341
1270 spin_lock_bh(&bat_priv->tt_req_list_lock); 1342 spin_lock_bh(&bat_priv->tt.req_list_lock);
1271 1343
1272 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1344 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1273 list_del(&node->list); 1345 list_del(&node->list);
1274 kfree(node); 1346 kfree(node);
1275 } 1347 }
1276 1348
1277 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1349 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1278} 1350}
1279 1351
1280static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, 1352static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1304{ 1376{
1305 struct batadv_tt_req_node *node, *safe; 1377 struct batadv_tt_req_node *node, *safe;
1306 1378
1307 spin_lock_bh(&bat_priv->tt_req_list_lock); 1379 spin_lock_bh(&bat_priv->tt.req_list_lock);
1308 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1380 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1309 if (batadv_has_timed_out(node->issued_at, 1381 if (batadv_has_timed_out(node->issued_at,
1310 BATADV_TT_REQUEST_TIMEOUT)) { 1382 BATADV_TT_REQUEST_TIMEOUT)) {
1311 list_del(&node->list); 1383 list_del(&node->list);
1312 kfree(node); 1384 kfree(node);
1313 } 1385 }
1314 } 1386 }
1315 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1387 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1316} 1388}
1317 1389
1318/* returns the pointer to the new tt_req_node struct if no request 1390/* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1324{ 1396{
1325 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 1397 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1326 1398
1327 spin_lock_bh(&bat_priv->tt_req_list_lock); 1399 spin_lock_bh(&bat_priv->tt.req_list_lock);
1328 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 1400 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
1329 if (batadv_compare_eth(tt_req_node_tmp, orig_node) && 1401 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1330 !batadv_has_timed_out(tt_req_node_tmp->issued_at, 1402 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1331 BATADV_TT_REQUEST_TIMEOUT)) 1403 BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1339 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); 1411 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1340 tt_req_node->issued_at = jiffies; 1412 tt_req_node->issued_at = jiffies;
1341 1413
1342 list_add(&tt_req_node->list, &bat_priv->tt_req_list); 1414 list_add(&tt_req_node->list, &bat_priv->tt.req_list);
1343unlock: 1415unlock:
1344 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1416 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1345 return tt_req_node; 1417 return tt_req_node;
1346} 1418}
1347 1419
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
1363 const struct batadv_tt_global_entry *tt_global_entry; 1435 const struct batadv_tt_global_entry *tt_global_entry;
1364 const struct batadv_orig_node *orig_node = data_ptr; 1436 const struct batadv_orig_node *orig_node = data_ptr;
1365 1437
1366 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM) 1438 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
1439 tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
1367 return 0; 1440 return 0;
1368 1441
1369 tt_global_entry = container_of(tt_common_entry, 1442 tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
1507 if (ret) 1580 if (ret)
1508 kfree_skb(skb); 1581 kfree_skb(skb);
1509 if (ret && tt_req_node) { 1582 if (ret && tt_req_node) {
1510 spin_lock_bh(&bat_priv->tt_req_list_lock); 1583 spin_lock_bh(&bat_priv->tt.req_list_lock);
1511 list_del(&tt_req_node->list); 1584 list_del(&tt_req_node->list);
1512 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1585 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1513 kfree(tt_req_node); 1586 kfree(tt_req_node);
1514 } 1587 }
1515 return ret; 1588 return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1530 uint16_t tt_len, tt_tot; 1603 uint16_t tt_len, tt_tot;
1531 struct sk_buff *skb = NULL; 1604 struct sk_buff *skb = NULL;
1532 struct batadv_tt_query_packet *tt_response; 1605 struct batadv_tt_query_packet *tt_response;
1606 uint8_t *packet_pos;
1533 size_t len; 1607 size_t len;
1534 1608
1535 batadv_dbg(BATADV_DBG_TT, bat_priv, 1609 batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1583 goto unlock; 1657 goto unlock;
1584 1658
1585 skb_reserve(skb, ETH_HLEN); 1659 skb_reserve(skb, ETH_HLEN);
1586 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, 1660 packet_pos = skb_put(skb, len);
1587 len); 1661 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1588 tt_response->ttvn = req_ttvn; 1662 tt_response->ttvn = req_ttvn;
1589 tt_response->tt_data = htons(tt_tot); 1663 tt_response->tt_data = htons(tt_tot);
1590 1664
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1600 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1674 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1601 1675
1602 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1676 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1603 bat_priv->tt_global_hash, 1677 bat_priv->tt.global_hash,
1604 primary_if, 1678 primary_if,
1605 batadv_tt_global_valid, 1679 batadv_tt_global_valid,
1606 req_dst_orig_node); 1680 req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1663 uint16_t tt_len, tt_tot; 1737 uint16_t tt_len, tt_tot;
1664 struct sk_buff *skb = NULL; 1738 struct sk_buff *skb = NULL;
1665 struct batadv_tt_query_packet *tt_response; 1739 struct batadv_tt_query_packet *tt_response;
1740 uint8_t *packet_pos;
1666 size_t len; 1741 size_t len;
1667 1742
1668 batadv_dbg(BATADV_DBG_TT, bat_priv, 1743 batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1671 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1746 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1672 1747
1673 1748
1674 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1749 my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1675 req_ttvn = tt_request->ttvn; 1750 req_ttvn = tt_request->ttvn;
1676 1751
1677 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); 1752 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1690 * is too big send the whole local translation table 1765 * is too big send the whole local translation table
1691 */ 1766 */
1692 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || 1767 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1693 !bat_priv->tt_buff) 1768 !bat_priv->tt.last_changeset)
1694 full_table = true; 1769 full_table = true;
1695 else 1770 else
1696 full_table = false; 1771 full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1699 * I'll send only one packet with as much TT entries as I can 1774 * I'll send only one packet with as much TT entries as I can
1700 */ 1775 */
1701 if (!full_table) { 1776 if (!full_table) {
1702 spin_lock_bh(&bat_priv->tt_buff_lock); 1777 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
1703 tt_len = bat_priv->tt_buff_len; 1778 tt_len = bat_priv->tt.last_changeset_len;
1704 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1779 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1705 1780
1706 len = sizeof(*tt_response) + tt_len; 1781 len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1709 goto unlock; 1784 goto unlock;
1710 1785
1711 skb_reserve(skb, ETH_HLEN); 1786 skb_reserve(skb, ETH_HLEN);
1712 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, 1787 packet_pos = skb_put(skb, len);
1713 len); 1788 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1714 tt_response->ttvn = req_ttvn; 1789 tt_response->ttvn = req_ttvn;
1715 tt_response->tt_data = htons(tt_tot); 1790 tt_response->tt_data = htons(tt_tot);
1716 1791
1717 tt_buff = skb->data + sizeof(*tt_response); 1792 tt_buff = skb->data + sizeof(*tt_response);
1718 memcpy(tt_buff, bat_priv->tt_buff, 1793 memcpy(tt_buff, bat_priv->tt.last_changeset,
1719 bat_priv->tt_buff_len); 1794 bat_priv->tt.last_changeset_len);
1720 spin_unlock_bh(&bat_priv->tt_buff_lock); 1795 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1721 } else { 1796 } else {
1722 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt); 1797 tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
1723 tt_len *= sizeof(struct batadv_tt_change); 1798 tt_len *= sizeof(struct batadv_tt_change);
1724 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1799 ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1725 1800
1726 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1801 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1727 bat_priv->tt_local_hash, 1802 bat_priv->tt.local_hash,
1728 primary_if, 1803 primary_if,
1729 batadv_tt_local_valid_entry, 1804 batadv_tt_local_valid_entry,
1730 NULL); 1805 NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1756 goto out; 1831 goto out;
1757 1832
1758unlock: 1833unlock:
1759 spin_unlock_bh(&bat_priv->tt_buff_lock); 1834 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1760out: 1835out:
1761 if (orig_node) 1836 if (orig_node)
1762 batadv_orig_node_free_ref(orig_node); 1837 batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1909 } 1984 }
1910 1985
1911 /* Delete the tt_req_node from pending tt_requests list */ 1986 /* Delete the tt_req_node from pending tt_requests list */
1912 spin_lock_bh(&bat_priv->tt_req_list_lock); 1987 spin_lock_bh(&bat_priv->tt.req_list_lock);
1913 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1988 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1914 if (!batadv_compare_eth(node->addr, tt_response->src)) 1989 if (!batadv_compare_eth(node->addr, tt_response->src))
1915 continue; 1990 continue;
1916 list_del(&node->list); 1991 list_del(&node->list);
1917 kfree(node); 1992 kfree(node);
1918 } 1993 }
1919 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1994 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1920 1995
1921 /* Recalculate the CRC for this orig_node and store it */ 1996 /* Recalculate the CRC for this orig_node and store it */
1922 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); 1997 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
1950{ 2025{
1951 struct batadv_tt_roam_node *node, *safe; 2026 struct batadv_tt_roam_node *node, *safe;
1952 2027
1953 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2028 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1954 2029
1955 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 2030 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
1956 list_del(&node->list); 2031 list_del(&node->list);
1957 kfree(node); 2032 kfree(node);
1958 } 2033 }
1959 2034
1960 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2035 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
1961} 2036}
1962 2037
1963static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) 2038static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1964{ 2039{
1965 struct batadv_tt_roam_node *node, *safe; 2040 struct batadv_tt_roam_node *node, *safe;
1966 2041
1967 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2042 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1968 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 2043 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
1969 if (!batadv_has_timed_out(node->first_time, 2044 if (!batadv_has_timed_out(node->first_time,
1970 BATADV_ROAMING_MAX_TIME)) 2045 BATADV_ROAMING_MAX_TIME))
1971 continue; 2046 continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1973 list_del(&node->list); 2048 list_del(&node->list);
1974 kfree(node); 2049 kfree(node);
1975 } 2050 }
1976 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2051 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
1977} 2052}
1978 2053
1979/* This function checks whether the client already reached the 2054/* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
1988 struct batadv_tt_roam_node *tt_roam_node; 2063 struct batadv_tt_roam_node *tt_roam_node;
1989 bool ret = false; 2064 bool ret = false;
1990 2065
1991 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2066 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1992 /* The new tt_req will be issued only if I'm not waiting for a 2067 /* The new tt_req will be issued only if I'm not waiting for a
1993 * reply from the same orig_node yet 2068 * reply from the same orig_node yet
1994 */ 2069 */
1995 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { 2070 list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
1996 if (!batadv_compare_eth(tt_roam_node->addr, client)) 2071 if (!batadv_compare_eth(tt_roam_node->addr, client))
1997 continue; 2072 continue;
1998 2073
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
2017 BATADV_ROAMING_MAX_COUNT - 1); 2092 BATADV_ROAMING_MAX_COUNT - 1);
2018 memcpy(tt_roam_node->addr, client, ETH_ALEN); 2093 memcpy(tt_roam_node->addr, client, ETH_ALEN);
2019 2094
2020 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); 2095 list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
2021 ret = true; 2096 ret = true;
2022 } 2097 }
2023 2098
2024unlock: 2099unlock:
2025 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2100 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2026 return ret; 2101 return ret;
2027} 2102}
2028 2103
@@ -2086,13 +2161,15 @@ out:
2086static void batadv_tt_purge(struct work_struct *work) 2161static void batadv_tt_purge(struct work_struct *work)
2087{ 2162{
2088 struct delayed_work *delayed_work; 2163 struct delayed_work *delayed_work;
2164 struct batadv_priv_tt *priv_tt;
2089 struct batadv_priv *bat_priv; 2165 struct batadv_priv *bat_priv;
2090 2166
2091 delayed_work = container_of(work, struct delayed_work, work); 2167 delayed_work = container_of(work, struct delayed_work, work);
2092 bat_priv = container_of(delayed_work, struct batadv_priv, tt_work); 2168 priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
2169 bat_priv = container_of(priv_tt, struct batadv_priv, tt);
2093 2170
2094 batadv_tt_local_purge(bat_priv); 2171 batadv_tt_local_purge(bat_priv);
2095 batadv_tt_global_roam_purge(bat_priv); 2172 batadv_tt_global_purge(bat_priv);
2096 batadv_tt_req_purge(bat_priv); 2173 batadv_tt_req_purge(bat_priv);
2097 batadv_tt_roam_purge(bat_priv); 2174 batadv_tt_roam_purge(bat_priv);
2098 2175
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
2101 2178
2102void batadv_tt_free(struct batadv_priv *bat_priv) 2179void batadv_tt_free(struct batadv_priv *bat_priv)
2103{ 2180{
2104 cancel_delayed_work_sync(&bat_priv->tt_work); 2181 cancel_delayed_work_sync(&bat_priv->tt.work);
2105 2182
2106 batadv_tt_local_table_free(bat_priv); 2183 batadv_tt_local_table_free(bat_priv);
2107 batadv_tt_global_table_free(bat_priv); 2184 batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
2109 batadv_tt_changes_list_free(bat_priv); 2186 batadv_tt_changes_list_free(bat_priv);
2110 batadv_tt_roam_list_free(bat_priv); 2187 batadv_tt_roam_list_free(bat_priv);
2111 2188
2112 kfree(bat_priv->tt_buff); 2189 kfree(bat_priv->tt.last_changeset);
2113} 2190}
2114 2191
2115/* This function will enable or disable the specified flags for all the entries 2192/* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
2153/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ 2230/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2154static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) 2231static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2155{ 2232{
2156 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 2233 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2157 struct batadv_tt_common_entry *tt_common; 2234 struct batadv_tt_common_entry *tt_common;
2158 struct batadv_tt_local_entry *tt_local; 2235 struct batadv_tt_local_entry *tt_local;
2159 struct hlist_node *node, *node_tmp; 2236 struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2178 "Deleting local tt entry (%pM): pending\n", 2255 "Deleting local tt entry (%pM): pending\n",
2179 tt_common->addr); 2256 tt_common->addr);
2180 2257
2181 atomic_dec(&bat_priv->num_local_tt); 2258 atomic_dec(&bat_priv->tt.local_entry_num);
2182 hlist_del_rcu(node); 2259 hlist_del_rcu(node);
2183 tt_local = container_of(tt_common, 2260 tt_local = container_of(tt_common,
2184 struct batadv_tt_local_entry, 2261 struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2196{ 2273{
2197 uint16_t changed_num = 0; 2274 uint16_t changed_num = 0;
2198 2275
2199 if (atomic_read(&bat_priv->tt_local_changes) < 1) 2276 if (atomic_read(&bat_priv->tt.local_changes) < 1)
2200 return -ENOENT; 2277 return -ENOENT;
2201 2278
2202 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash, 2279 changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
2203 BATADV_TT_CLIENT_NEW, false); 2280 BATADV_TT_CLIENT_NEW, false);
2204 2281
2205 /* all reset entries have to be counted as local entries */ 2282 /* all reset entries have to be counted as local entries */
2206 atomic_add(changed_num, &bat_priv->num_local_tt); 2283 atomic_add(changed_num, &bat_priv->tt.local_entry_num);
2207 batadv_tt_local_purge_pending_clients(bat_priv); 2284 batadv_tt_local_purge_pending_clients(bat_priv);
2208 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv); 2285 bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
2209 2286
2210 /* Increment the TTVN only once per OGM interval */ 2287 /* Increment the TTVN only once per OGM interval */
2211 atomic_inc(&bat_priv->ttvn); 2288 atomic_inc(&bat_priv->tt.vn);
2212 batadv_dbg(BATADV_DBG_TT, bat_priv, 2289 batadv_dbg(BATADV_DBG_TT, bat_priv,
2213 "Local changes committed, updating to ttvn %u\n", 2290 "Local changes committed, updating to ttvn %u\n",
2214 (uint8_t)atomic_read(&bat_priv->ttvn)); 2291 (uint8_t)atomic_read(&bat_priv->tt.vn));
2215 bat_priv->tt_poss_change = false; 2292 bat_priv->tt.poss_change = false;
2216 2293
2217 /* reset the sending counter */ 2294 /* reset the sending counter */
2218 atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); 2295 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2219 2296
2220 return batadv_tt_changes_fill_buff(bat_priv, packet_buff, 2297 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2221 packet_buff_len, packet_min_len); 2298 packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2235 2312
2236 /* if the changes have been sent often enough */ 2313 /* if the changes have been sent often enough */
2237 if ((tt_num_changes < 0) && 2314 if ((tt_num_changes < 0) &&
2238 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) { 2315 (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
2239 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, 2316 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2240 packet_min_len, packet_min_len); 2317 packet_min_len, packet_min_len);
2241 tt_num_changes = 0; 2318 tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2366out: 2443out:
2367 return ret; 2444 return ret;
2368} 2445}
2446
2447bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
2448 struct batadv_orig_node *orig_node,
2449 const unsigned char *addr)
2450{
2451 bool ret = false;
2452
2453 if (!batadv_tt_global_add(bat_priv, orig_node, addr,
2454 BATADV_TT_CLIENT_TEMP,
2455 atomic_read(&orig_node->last_ttvn)))
2456 goto out;
2457
2458 batadv_dbg(BATADV_DBG_TT, bat_priv,
2459 "Added temporary global client (addr: %pM orig: %pM)\n",
2460 addr, orig_node->orig);
2461 ret = true;
2462out:
2463 return ret;
2464}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ffa87355096b..811fffd4760c 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
59 int packet_min_len); 59 int packet_min_len);
60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, 60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
61 uint8_t *addr); 61 uint8_t *addr);
62 62bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
63 struct batadv_orig_node *orig_node,
64 const unsigned char *addr);
63 65
64#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 12635fd2c3d3..2ed82caacdca 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
145#endif 145#endif
146 146
147enum batadv_counters { 147enum batadv_counters {
148 BATADV_CNT_TX,
149 BATADV_CNT_TX_BYTES,
150 BATADV_CNT_TX_DROPPED,
151 BATADV_CNT_RX,
152 BATADV_CNT_RX_BYTES,
148 BATADV_CNT_FORWARD, 153 BATADV_CNT_FORWARD,
149 BATADV_CNT_FORWARD_BYTES, 154 BATADV_CNT_FORWARD_BYTES,
150 BATADV_CNT_MGMT_TX, 155 BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
160 BATADV_CNT_NUM, 165 BATADV_CNT_NUM,
161}; 166};
162 167
168/**
169 * struct batadv_priv_tt - per mesh interface translation table data
170 * @vn: translation table version number
171 * @local_changes: changes registered in an originator interval
172 * @poss_change: Detect an ongoing roaming phase. If true, then this node
173 * received a roaming_adv and has to inspect every packet directed to it to
174 * check whether it still is the true destination or not. This flag will be
175 * reset to false as soon as the this node's ttvn is increased
176 * @changes_list: tracks tt local changes within an originator interval
177 * @req_list: list of pending tt_requests
178 * @local_crc: Checksum of the local table, recomputed before sending a new OGM
179 */
180struct batadv_priv_tt {
181 atomic_t vn;
182 atomic_t ogm_append_cnt;
183 atomic_t local_changes;
184 bool poss_change;
185 struct list_head changes_list;
186 struct batadv_hashtable *local_hash;
187 struct batadv_hashtable *global_hash;
188 struct list_head req_list;
189 struct list_head roam_list;
190 spinlock_t changes_list_lock; /* protects changes */
191 spinlock_t req_list_lock; /* protects req_list */
192 spinlock_t roam_list_lock; /* protects roam_list */
193 atomic_t local_entry_num;
194 uint16_t local_crc;
195 unsigned char *last_changeset;
196 int16_t last_changeset_len;
197 spinlock_t last_changeset_lock; /* protects last_changeset */
198 struct delayed_work work;
199};
200
201#ifdef CONFIG_BATMAN_ADV_BLA
202struct batadv_priv_bla {
203 atomic_t num_requests; /* number of bla requests in flight */
204 struct batadv_hashtable *claim_hash;
205 struct batadv_hashtable *backbone_hash;
206 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
207 int bcast_duplist_curr;
208 struct batadv_bla_claim_dst claim_dest;
209 struct delayed_work work;
210};
211#endif
212
213struct batadv_priv_gw {
214 struct hlist_head list;
215 spinlock_t list_lock; /* protects gw_list and curr_gw */
216 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
217 atomic_t reselect;
218};
219
220struct batadv_priv_vis {
221 struct list_head send_list;
222 struct batadv_hashtable *hash;
223 spinlock_t hash_lock; /* protects hash */
224 spinlock_t list_lock; /* protects info::recv_list */
225 struct delayed_work work;
226 struct batadv_vis_info *my_info;
227};
228
163struct batadv_priv { 229struct batadv_priv {
164 atomic_t mesh_state; 230 atomic_t mesh_state;
165 struct net_device_stats stats; 231 struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
179 atomic_t bcast_seqno; 245 atomic_t bcast_seqno;
180 atomic_t bcast_queue_left; 246 atomic_t bcast_queue_left;
181 atomic_t batman_queue_left; 247 atomic_t batman_queue_left;
182 atomic_t ttvn; /* translation table version number */
183 atomic_t tt_ogm_append_cnt;
184 atomic_t tt_local_changes; /* changes registered in a OGM interval */
185 atomic_t bla_num_requests; /* number of bla requests in flight */
186 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
187 * If true, then I received a Roaming_adv and I have to inspect every
188 * packet directed to me to check whether I am still the true
189 * destination or not. This flag will be reset to false as soon as I
190 * increase my TTVN
191 */
192 bool tt_poss_change;
193 char num_ifaces; 248 char num_ifaces;
194 struct batadv_debug_log *debug_log; 249 struct batadv_debug_log *debug_log;
195 struct kobject *mesh_obj; 250 struct kobject *mesh_obj;
196 struct dentry *debug_dir; 251 struct dentry *debug_dir;
197 struct hlist_head forw_bat_list; 252 struct hlist_head forw_bat_list;
198 struct hlist_head forw_bcast_list; 253 struct hlist_head forw_bcast_list;
199 struct hlist_head gw_list;
200 struct list_head tt_changes_list; /* tracks changes in a OGM int */
201 struct list_head vis_send_list;
202 struct batadv_hashtable *orig_hash; 254 struct batadv_hashtable *orig_hash;
203 struct batadv_hashtable *tt_local_hash;
204 struct batadv_hashtable *tt_global_hash;
205#ifdef CONFIG_BATMAN_ADV_BLA
206 struct batadv_hashtable *claim_hash;
207 struct batadv_hashtable *backbone_hash;
208#endif
209 struct list_head tt_req_list; /* list of pending tt_requests */
210 struct list_head tt_roam_list;
211 struct batadv_hashtable *vis_hash;
212#ifdef CONFIG_BATMAN_ADV_BLA
213 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
214 int bcast_duplist_curr;
215 struct batadv_bla_claim_dst claim_dest;
216#endif
217 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 255 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
218 spinlock_t forw_bcast_list_lock; /* protects */ 256 spinlock_t forw_bcast_list_lock; /* protects */
219 spinlock_t tt_changes_list_lock; /* protects tt_changes */
220 spinlock_t tt_req_list_lock; /* protects tt_req_list */
221 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
222 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
223 spinlock_t vis_hash_lock; /* protects vis_hash */
224 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
225 atomic_t num_local_tt;
226 /* Checksum of the local table, recomputed before sending a new OGM */
227 uint16_t tt_crc;
228 unsigned char *tt_buff;
229 int16_t tt_buff_len;
230 spinlock_t tt_buff_lock; /* protects tt_buff */
231 struct delayed_work tt_work;
232 struct delayed_work orig_work; 257 struct delayed_work orig_work;
233 struct delayed_work vis_work;
234 struct delayed_work bla_work;
235 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
236 atomic_t gw_reselect;
237 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ 258 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
238 struct batadv_vis_info *my_vis_info;
239 struct batadv_algo_ops *bat_algo_ops; 259 struct batadv_algo_ops *bat_algo_ops;
260#ifdef CONFIG_BATMAN_ADV_BLA
261 struct batadv_priv_bla bla;
262#endif
263 struct batadv_priv_gw gw;
264 struct batadv_priv_tt tt;
265 struct batadv_priv_vis vis;
240}; 266};
241 267
242struct batadv_socket_client { 268struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
258 uint8_t addr[ETH_ALEN]; 284 uint8_t addr[ETH_ALEN];
259 struct hlist_node hash_entry; 285 struct hlist_node hash_entry;
260 uint16_t flags; 286 uint16_t flags;
287 unsigned long added_at;
261 atomic_t refcount; 288 atomic_t refcount;
262 struct rcu_head rcu; 289 struct rcu_head rcu;
263}; 290};
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
277struct batadv_tt_orig_list_entry { 304struct batadv_tt_orig_list_entry {
278 struct batadv_orig_node *orig_node; 305 struct batadv_orig_node *orig_node;
279 uint8_t ttvn; 306 uint8_t ttvn;
307 atomic_t refcount;
280 struct rcu_head rcu; 308 struct rcu_head rcu;
281 struct hlist_node list; 309 struct hlist_node list;
282}; 310};
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 00164645b3f7..f39723281ca1 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
39 struct batadv_unicast_packet *unicast_packet; 39 struct batadv_unicast_packet *unicast_packet;
40 int hdr_len = sizeof(*unicast_packet); 40 int hdr_len = sizeof(*unicast_packet);
41 int uni_diff = sizeof(*up) - hdr_len; 41 int uni_diff = sizeof(*up) - hdr_len;
42 uint8_t *packet_pos;
42 43
43 up = (struct batadv_unicast_frag_packet *)skb->data; 44 up = (struct batadv_unicast_frag_packet *)skb->data;
44 /* set skb to the first part and tmp_skb to the second part */ 45 /* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
65 kfree_skb(tmp_skb); 66 kfree_skb(tmp_skb);
66 67
67 memmove(skb->data + uni_diff, skb->data, hdr_len); 68 memmove(skb->data + uni_diff, skb->data, hdr_len);
68 unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb, 69 packet_pos = skb_pull(skb, uni_diff);
69 uni_diff); 70 unicast_packet = (struct batadv_unicast_packet *)packet_pos;
70 unicast_packet->header.packet_type = BATADV_UNICAST; 71 unicast_packet->header.packet_type = BATADV_UNICAST;
71 72
72 return skb; 73 return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
121{ 122{
122 struct batadv_frag_packet_list_entry *tfp; 123 struct batadv_frag_packet_list_entry *tfp;
123 struct batadv_unicast_frag_packet *tmp_up = NULL; 124 struct batadv_unicast_frag_packet *tmp_up = NULL;
125 int is_head_tmp, is_head;
124 uint16_t search_seqno; 126 uint16_t search_seqno;
125 127
126 if (up->flags & BATADV_UNI_FRAG_HEAD) 128 if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
128 else 130 else
129 search_seqno = ntohs(up->seqno)-1; 131 search_seqno = ntohs(up->seqno)-1;
130 132
133 is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
134
131 list_for_each_entry(tfp, head, list) { 135 list_for_each_entry(tfp, head, list) {
132 136
133 if (!tfp->skb) 137 if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
139 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data; 143 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
140 144
141 if (tfp->seqno == search_seqno) { 145 if (tfp->seqno == search_seqno) {
142 146 is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
143 if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) != 147 if (is_head_tmp != is_head)
144 (up->flags & BATADV_UNI_FRAG_HEAD))
145 return tfp; 148 return tfp;
146 else 149 else
147 goto mov_tail; 150 goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
334 /* copy the destination for faster routing */ 337 /* copy the destination for faster routing */
335 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 338 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
336 /* set the destination tt version number */ 339 /* set the destination tt version number */
337 unicast_packet->ttvn = 340 unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
338 (uint8_t)atomic_read(&orig_node->last_ttvn);
339 341
340 /* inform the destination node that we are still missing a correct route 342 /* inform the destination node that we are still missing a correct route
341 * for this client. The destination will receive this packet and will 343 * for this client. The destination will receive this packet and will
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 2a2ea0681469..5abd1454fb07 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
41 bat_priv = info->bat_priv; 41 bat_priv = info->bat_priv;
42 42
43 list_del_init(&info->send_list); 43 list_del_init(&info->send_list);
44 spin_lock_bh(&bat_priv->vis_list_lock); 44 spin_lock_bh(&bat_priv->vis.list_lock);
45 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { 45 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
46 list_del(&entry->list); 46 list_del(&entry->list);
47 kfree(entry); 47 kfree(entry);
48 } 48 }
49 49
50 spin_unlock_bh(&bat_priv->vis_list_lock); 50 spin_unlock_bh(&bat_priv->vis.list_lock);
51 kfree_skb(info->skb_packet); 51 kfree_skb(info->skb_packet);
52 kfree(info); 52 kfree(info);
53} 53}
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
94static struct batadv_vis_info * 94static struct batadv_vis_info *
95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) 95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
96{ 96{
97 struct batadv_hashtable *hash = bat_priv->vis_hash; 97 struct batadv_hashtable *hash = bat_priv->vis.hash;
98 struct hlist_head *head; 98 struct hlist_head *head;
99 struct hlist_node *node; 99 struct hlist_node *node;
100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
252 struct hlist_head *head; 252 struct hlist_head *head;
253 struct net_device *net_dev = (struct net_device *)seq->private; 253 struct net_device *net_dev = (struct net_device *)seq->private;
254 struct batadv_priv *bat_priv = netdev_priv(net_dev); 254 struct batadv_priv *bat_priv = netdev_priv(net_dev);
255 struct batadv_hashtable *hash = bat_priv->vis_hash; 255 struct batadv_hashtable *hash = bat_priv->vis.hash;
256 uint32_t i; 256 uint32_t i;
257 int ret = 0; 257 int ret = 0;
258 int vis_server = atomic_read(&bat_priv->vis_mode); 258 int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE) 264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
265 goto out; 265 goto out;
266 266
267 spin_lock_bh(&bat_priv->vis_hash_lock); 267 spin_lock_bh(&bat_priv->vis.hash_lock);
268 for (i = 0; i < hash->size; i++) { 268 for (i = 0; i < hash->size; i++) {
269 head = &hash->table[i]; 269 head = &hash->table[i];
270 batadv_vis_seq_print_text_bucket(seq, head); 270 batadv_vis_seq_print_text_bucket(seq, head);
271 } 271 }
272 spin_unlock_bh(&bat_priv->vis_hash_lock); 272 spin_unlock_bh(&bat_priv->vis.hash_lock);
273 273
274out: 274out:
275 if (primary_if) 275 if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
285{ 285{
286 if (list_empty(&info->send_list)) { 286 if (list_empty(&info->send_list)) {
287 kref_get(&info->refcount); 287 kref_get(&info->refcount);
288 list_add_tail(&info->send_list, &bat_priv->vis_send_list); 288 list_add_tail(&info->send_list, &bat_priv->vis.send_list);
289 } 289 }
290} 290}
291 291
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
311 return; 311 return;
312 312
313 memcpy(entry->mac, mac, ETH_ALEN); 313 memcpy(entry->mac, mac, ETH_ALEN);
314 spin_lock_bh(&bat_priv->vis_list_lock); 314 spin_lock_bh(&bat_priv->vis.list_lock);
315 list_add_tail(&entry->list, recv_list); 315 list_add_tail(&entry->list, recv_list);
316 spin_unlock_bh(&bat_priv->vis_list_lock); 316 spin_unlock_bh(&bat_priv->vis.list_lock);
317} 317}
318 318
319/* returns 1 if this mac is in the recv_list */ 319/* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
323{ 323{
324 const struct batadv_recvlist_node *entry; 324 const struct batadv_recvlist_node *entry;
325 325
326 spin_lock_bh(&bat_priv->vis_list_lock); 326 spin_lock_bh(&bat_priv->vis.list_lock);
327 list_for_each_entry(entry, recv_list, list) { 327 list_for_each_entry(entry, recv_list, list) {
328 if (batadv_compare_eth(entry->mac, mac)) { 328 if (batadv_compare_eth(entry->mac, mac)) {
329 spin_unlock_bh(&bat_priv->vis_list_lock); 329 spin_unlock_bh(&bat_priv->vis.list_lock);
330 return 1; 330 return 1;
331 } 331 }
332 } 332 }
333 spin_unlock_bh(&bat_priv->vis_list_lock); 333 spin_unlock_bh(&bat_priv->vis.list_lock);
334 return 0; 334 return 0;
335} 335}
336 336
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
354 354
355 *is_new = 0; 355 *is_new = 0;
356 /* sanity check */ 356 /* sanity check */
357 if (!bat_priv->vis_hash) 357 if (!bat_priv->vis.hash)
358 return NULL; 358 return NULL;
359 359
360 /* see if the packet is already in vis_hash */ 360 /* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
385 } 385 }
386 } 386 }
387 /* remove old entry */ 387 /* remove old entry */
388 batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp, 388 batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
389 batadv_vis_info_choose, old_info); 389 batadv_vis_info_choose, old_info);
390 batadv_send_list_del(old_info); 390 batadv_send_list_del(old_info);
391 kref_put(&old_info->refcount, batadv_free_info); 391 kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); 426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
427 427
428 /* try to add it */ 428 /* try to add it */
429 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, 429 hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
430 batadv_vis_info_choose, info, 430 batadv_vis_info_choose, info,
431 &info->hash_entry); 431 &info->hash_entry);
432 if (hash_added != 0) { 432 if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
449 449
450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC); 450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
451 451
452 spin_lock_bh(&bat_priv->vis_hash_lock); 452 spin_lock_bh(&bat_priv->vis.hash_lock);
453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, 453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
454 &is_new, make_broadcast); 454 &is_new, make_broadcast);
455 if (!info) 455 if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new) 461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
462 batadv_send_list_add(bat_priv, info); 462 batadv_send_list_add(bat_priv, info);
463end: 463end:
464 spin_unlock_bh(&bat_priv->vis_hash_lock); 464 spin_unlock_bh(&bat_priv->vis.hash_lock);
465} 465}
466 466
467/* handle an incoming client update packet and schedule forward if needed. */ 467/* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
484 batadv_is_my_mac(vis_packet->target_orig)) 484 batadv_is_my_mac(vis_packet->target_orig))
485 are_target = 1; 485 are_target = 1;
486 486
487 spin_lock_bh(&bat_priv->vis_hash_lock); 487 spin_lock_bh(&bat_priv->vis.hash_lock);
488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, 488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
489 &is_new, are_target); 489 &is_new, are_target);
490 490
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
505 } 505 }
506 506
507end: 507end:
508 spin_unlock_bh(&bat_priv->vis_hash_lock); 508 spin_unlock_bh(&bat_priv->vis.hash_lock);
509} 509}
510 510
511/* Walk the originators and find the VIS server with the best tq. Set the packet 511/* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
574 struct hlist_head *head; 574 struct hlist_head *head;
575 struct batadv_orig_node *orig_node; 575 struct batadv_orig_node *orig_node;
576 struct batadv_neigh_node *router; 576 struct batadv_neigh_node *router;
577 struct batadv_vis_info *info = bat_priv->my_vis_info; 577 struct batadv_vis_info *info = bat_priv->vis.my_info;
578 struct batadv_vis_packet *packet; 578 struct batadv_vis_packet *packet;
579 struct batadv_vis_info_entry *entry; 579 struct batadv_vis_info_entry *entry;
580 struct batadv_tt_common_entry *tt_common_entry; 580 struct batadv_tt_common_entry *tt_common_entry;
581 uint8_t *packet_pos;
581 int best_tq = -1; 582 int best_tq = -1;
582 uint32_t i; 583 uint32_t i;
583 584
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
618 goto next; 619 goto next;
619 620
620 /* fill one entry into buffer. */ 621 /* fill one entry into buffer. */
621 entry = (struct batadv_vis_info_entry *) 622 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
622 skb_put(info->skb_packet, sizeof(*entry)); 623 entry = (struct batadv_vis_info_entry *)packet_pos;
623 memcpy(entry->src, 624 memcpy(entry->src,
624 router->if_incoming->net_dev->dev_addr, 625 router->if_incoming->net_dev->dev_addr,
625 ETH_ALEN); 626 ETH_ALEN);
@@ -636,7 +637,7 @@ next:
636 rcu_read_unlock(); 637 rcu_read_unlock();
637 } 638 }
638 639
639 hash = bat_priv->tt_local_hash; 640 hash = bat_priv->tt.local_hash;
640 641
641 for (i = 0; i < hash->size; i++) { 642 for (i = 0; i < hash->size; i++) {
642 head = &hash->table[i]; 643 head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
644 rcu_read_lock(); 645 rcu_read_lock();
645 hlist_for_each_entry_rcu(tt_common_entry, node, head, 646 hlist_for_each_entry_rcu(tt_common_entry, node, head,
646 hash_entry) { 647 hash_entry) {
647 entry = (struct batadv_vis_info_entry *) 648 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
648 skb_put(info->skb_packet, 649 entry = (struct batadv_vis_info_entry *)packet_pos;
649 sizeof(*entry));
650 memset(entry->src, 0, ETH_ALEN); 650 memset(entry->src, 0, ETH_ALEN);
651 memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); 651 memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
652 entry->quality = 0; /* 0 means TT */ 652 entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) 671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
672{ 672{
673 uint32_t i; 673 uint32_t i;
674 struct batadv_hashtable *hash = bat_priv->vis_hash; 674 struct batadv_hashtable *hash = bat_priv->vis.hash;
675 struct hlist_node *node, *node_tmp; 675 struct hlist_node *node, *node_tmp;
676 struct hlist_head *head; 676 struct hlist_head *head;
677 struct batadv_vis_info *info; 677 struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
682 hlist_for_each_entry_safe(info, node, node_tmp, 682 hlist_for_each_entry_safe(info, node, node_tmp,
683 head, hash_entry) { 683 head, hash_entry) {
684 /* never purge own data. */ 684 /* never purge own data. */
685 if (info == bat_priv->my_vis_info) 685 if (info == bat_priv->vis.my_info)
686 continue; 686 continue;
687 687
688 if (batadv_has_timed_out(info->first_seen, 688 if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
814/* called from timer; send (and maybe generate) vis packet. */ 814/* called from timer; send (and maybe generate) vis packet. */
815static void batadv_send_vis_packets(struct work_struct *work) 815static void batadv_send_vis_packets(struct work_struct *work)
816{ 816{
817 struct delayed_work *delayed_work = 817 struct delayed_work *delayed_work;
818 container_of(work, struct delayed_work, work);
819 struct batadv_priv *bat_priv; 818 struct batadv_priv *bat_priv;
819 struct batadv_priv_vis *priv_vis;
820 struct batadv_vis_info *info; 820 struct batadv_vis_info *info;
821 821
822 bat_priv = container_of(delayed_work, struct batadv_priv, vis_work); 822 delayed_work = container_of(work, struct delayed_work, work);
823 spin_lock_bh(&bat_priv->vis_hash_lock); 823 priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
824 bat_priv = container_of(priv_vis, struct batadv_priv, vis);
825 spin_lock_bh(&bat_priv->vis.hash_lock);
824 batadv_purge_vis_packets(bat_priv); 826 batadv_purge_vis_packets(bat_priv);
825 827
826 if (batadv_generate_vis_packet(bat_priv) == 0) { 828 if (batadv_generate_vis_packet(bat_priv) == 0) {
827 /* schedule if generation was successful */ 829 /* schedule if generation was successful */
828 batadv_send_list_add(bat_priv, bat_priv->my_vis_info); 830 batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
829 } 831 }
830 832
831 while (!list_empty(&bat_priv->vis_send_list)) { 833 while (!list_empty(&bat_priv->vis.send_list)) {
832 info = list_first_entry(&bat_priv->vis_send_list, 834 info = list_first_entry(&bat_priv->vis.send_list,
833 typeof(*info), send_list); 835 typeof(*info), send_list);
834 836
835 kref_get(&info->refcount); 837 kref_get(&info->refcount);
836 spin_unlock_bh(&bat_priv->vis_hash_lock); 838 spin_unlock_bh(&bat_priv->vis.hash_lock);
837 839
838 batadv_send_vis_packet(bat_priv, info); 840 batadv_send_vis_packet(bat_priv, info);
839 841
840 spin_lock_bh(&bat_priv->vis_hash_lock); 842 spin_lock_bh(&bat_priv->vis.hash_lock);
841 batadv_send_list_del(info); 843 batadv_send_list_del(info);
842 kref_put(&info->refcount, batadv_free_info); 844 kref_put(&info->refcount, batadv_free_info);
843 } 845 }
844 spin_unlock_bh(&bat_priv->vis_hash_lock); 846 spin_unlock_bh(&bat_priv->vis.hash_lock);
845 batadv_start_vis_timer(bat_priv); 847 batadv_start_vis_timer(bat_priv);
846} 848}
847 849
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
856 unsigned long first_seen; 858 unsigned long first_seen;
857 struct sk_buff *tmp_skb; 859 struct sk_buff *tmp_skb;
858 860
859 if (bat_priv->vis_hash) 861 if (bat_priv->vis.hash)
860 return 0; 862 return 0;
861 863
862 spin_lock_bh(&bat_priv->vis_hash_lock); 864 spin_lock_bh(&bat_priv->vis.hash_lock);
863 865
864 bat_priv->vis_hash = batadv_hash_new(256); 866 bat_priv->vis.hash = batadv_hash_new(256);
865 if (!bat_priv->vis_hash) { 867 if (!bat_priv->vis.hash) {
866 pr_err("Can't initialize vis_hash\n"); 868 pr_err("Can't initialize vis_hash\n");
867 goto err; 869 goto err;
868 } 870 }
869 871
870 bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 872 bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
871 if (!bat_priv->my_vis_info) 873 if (!bat_priv->vis.my_info)
872 goto err; 874 goto err;
873 875
874 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; 876 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
875 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len); 877 bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
876 if (!bat_priv->my_vis_info->skb_packet) 878 if (!bat_priv->vis.my_info->skb_packet)
877 goto free_info; 879 goto free_info;
878 880
879 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); 881 skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
880 tmp_skb = bat_priv->my_vis_info->skb_packet; 882 tmp_skb = bat_priv->vis.my_info->skb_packet;
881 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); 883 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
882 884
883 /* prefill the vis info */ 885 /* prefill the vis info */
884 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL); 886 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
885 bat_priv->my_vis_info->first_seen = first_seen; 887 bat_priv->vis.my_info->first_seen = first_seen;
886 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); 888 INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
887 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 889 INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
888 kref_init(&bat_priv->my_vis_info->refcount); 890 kref_init(&bat_priv->vis.my_info->refcount);
889 bat_priv->my_vis_info->bat_priv = bat_priv; 891 bat_priv->vis.my_info->bat_priv = bat_priv;
890 packet->header.version = BATADV_COMPAT_VERSION; 892 packet->header.version = BATADV_COMPAT_VERSION;
891 packet->header.packet_type = BATADV_VIS; 893 packet->header.packet_type = BATADV_VIS;
892 packet->header.ttl = BATADV_TTL; 894 packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
894 packet->reserved = 0; 896 packet->reserved = 0;
895 packet->entries = 0; 897 packet->entries = 0;
896 898
897 INIT_LIST_HEAD(&bat_priv->vis_send_list); 899 INIT_LIST_HEAD(&bat_priv->vis.send_list);
898 900
899 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, 901 hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
900 batadv_vis_info_choose, 902 batadv_vis_info_choose,
901 bat_priv->my_vis_info, 903 bat_priv->vis.my_info,
902 &bat_priv->my_vis_info->hash_entry); 904 &bat_priv->vis.my_info->hash_entry);
903 if (hash_added != 0) { 905 if (hash_added != 0) {
904 pr_err("Can't add own vis packet into hash\n"); 906 pr_err("Can't add own vis packet into hash\n");
905 /* not in hash, need to remove it manually. */ 907 /* not in hash, need to remove it manually. */
906 kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info); 908 kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
907 goto err; 909 goto err;
908 } 910 }
909 911
910 spin_unlock_bh(&bat_priv->vis_hash_lock); 912 spin_unlock_bh(&bat_priv->vis.hash_lock);
911 batadv_start_vis_timer(bat_priv); 913 batadv_start_vis_timer(bat_priv);
912 return 0; 914 return 0;
913 915
914free_info: 916free_info:
915 kfree(bat_priv->my_vis_info); 917 kfree(bat_priv->vis.my_info);
916 bat_priv->my_vis_info = NULL; 918 bat_priv->vis.my_info = NULL;
917err: 919err:
918 spin_unlock_bh(&bat_priv->vis_hash_lock); 920 spin_unlock_bh(&bat_priv->vis.hash_lock);
919 batadv_vis_quit(bat_priv); 921 batadv_vis_quit(bat_priv);
920 return -ENOMEM; 922 return -ENOMEM;
921} 923}
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
933/* shutdown vis-server */ 935/* shutdown vis-server */
934void batadv_vis_quit(struct batadv_priv *bat_priv) 936void batadv_vis_quit(struct batadv_priv *bat_priv)
935{ 937{
936 if (!bat_priv->vis_hash) 938 if (!bat_priv->vis.hash)
937 return; 939 return;
938 940
939 cancel_delayed_work_sync(&bat_priv->vis_work); 941 cancel_delayed_work_sync(&bat_priv->vis.work);
940 942
941 spin_lock_bh(&bat_priv->vis_hash_lock); 943 spin_lock_bh(&bat_priv->vis.hash_lock);
942 /* properly remove, kill timers ... */ 944 /* properly remove, kill timers ... */
943 batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL); 945 batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
944 bat_priv->vis_hash = NULL; 946 bat_priv->vis.hash = NULL;
945 bat_priv->my_vis_info = NULL; 947 bat_priv->vis.my_info = NULL;
946 spin_unlock_bh(&bat_priv->vis_hash_lock); 948 spin_unlock_bh(&bat_priv->vis.hash_lock);
947} 949}
948 950
949/* schedule packets for (re)transmission */ 951/* schedule packets for (re)transmission */
950static void batadv_start_vis_timer(struct batadv_priv *bat_priv) 952static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
951{ 953{
952 INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets); 954 INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
953 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work, 955 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
954 msecs_to_jiffies(BATADV_VIS_INTERVAL)); 956 msecs_to_jiffies(BATADV_VIS_INTERVAL));
955} 957}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 84e716ed8963..873282fa86da 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -20,7 +20,7 @@
20#ifndef _NET_BATMAN_ADV_VIS_H_ 20#ifndef _NET_BATMAN_ADV_VIS_H_
21#define _NET_BATMAN_ADV_VIS_H_ 21#define _NET_BATMAN_ADV_VIS_H_
22 22
23/* timeout of vis packets in miliseconds */ 23/* timeout of vis packets in milliseconds */
24#define BATADV_VIS_TIMEOUT 200000 24#define BATADV_VIS_TIMEOUT 200000
25 25
26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset); 26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 4ff0bf3ba9a5..0760d1fed6f0 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -316,7 +316,7 @@ send_rsp:
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, 316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr) 317 struct a2mp_cmd *hdr)
318{ 318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code); 319 BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
320 320
321 skb_pull(skb, le16_to_cpu(hdr->len)); 321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0; 322 return 0;
@@ -325,17 +325,19 @@ static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
325/* Handle A2MP signalling */ 325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) 326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{ 327{
328 struct a2mp_cmd *hdr = (void *) skb->data; 328 struct a2mp_cmd *hdr;
329 struct amp_mgr *mgr = chan->data; 329 struct amp_mgr *mgr = chan->data;
330 int err = 0; 330 int err = 0;
331 331
332 amp_mgr_get(mgr); 332 amp_mgr_get(mgr);
333 333
334 while (skb->len >= sizeof(*hdr)) { 334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data; 335 u16 len;
336 u16 len = le16_to_cpu(hdr->len);
337 336
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len); 337 hdr = (void *) skb->data;
338 len = le16_to_cpu(hdr->len);
339
340 BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
339 341
340 skb_pull(skb, sizeof(*hdr)); 342 skb_pull(skb, sizeof(*hdr));
341 343
@@ -393,7 +395,9 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
393 395
394 if (err) { 396 if (err) {
395 struct a2mp_cmd_rej rej; 397 struct a2mp_cmd_rej rej;
398
396 rej.reason = __constant_cpu_to_le16(0); 399 rej.reason = __constant_cpu_to_le16(0);
400 hdr = (void *) skb->data;
397 401
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); 402 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399 403
@@ -412,7 +416,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
412 416
413static void a2mp_chan_close_cb(struct l2cap_chan *chan) 417static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{ 418{
415 l2cap_chan_destroy(chan); 419 l2cap_chan_put(chan);
416} 420}
417 421
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) 422static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f7db5792ec64..9d49ee6d7219 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -28,6 +28,7 @@
28#include <asm/ioctls.h> 28#include <asm/ioctls.h>
29 29
30#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
31#include <linux/proc_fs.h>
31 32
32#define VERSION "2.16" 33#define VERSION "2.16"
33 34
@@ -532,6 +533,144 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
532} 533}
533EXPORT_SYMBOL(bt_sock_wait_state); 534EXPORT_SYMBOL(bt_sock_wait_state);
534 535
536#ifdef CONFIG_PROC_FS
537struct bt_seq_state {
538 struct bt_sock_list *l;
539};
540
541static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
542 __acquires(seq->private->l->lock)
543{
544 struct bt_seq_state *s = seq->private;
545 struct bt_sock_list *l = s->l;
546
547 read_lock(&l->lock);
548 return seq_hlist_start_head(&l->head, *pos);
549}
550
551static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
552{
553 struct bt_seq_state *s = seq->private;
554 struct bt_sock_list *l = s->l;
555
556 return seq_hlist_next(v, &l->head, pos);
557}
558
559static void bt_seq_stop(struct seq_file *seq, void *v)
560 __releases(seq->private->l->lock)
561{
562 struct bt_seq_state *s = seq->private;
563 struct bt_sock_list *l = s->l;
564
565 read_unlock(&l->lock);
566}
567
568static int bt_seq_show(struct seq_file *seq, void *v)
569{
570 struct bt_seq_state *s = seq->private;
571 struct bt_sock_list *l = s->l;
572 bdaddr_t src_baswapped, dst_baswapped;
573
574 if (v == SEQ_START_TOKEN) {
575 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
576
577 if (l->custom_seq_show) {
578 seq_putc(seq, ' ');
579 l->custom_seq_show(seq, v);
580 }
581
582 seq_putc(seq, '\n');
583 } else {
584 struct sock *sk = sk_entry(v);
585 struct bt_sock *bt = bt_sk(sk);
586 baswap(&src_baswapped, &bt->src);
587 baswap(&dst_baswapped, &bt->dst);
588
589 seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
590 sk,
591 atomic_read(&sk->sk_refcnt),
592 sk_rmem_alloc_get(sk),
593 sk_wmem_alloc_get(sk),
594 sock_i_uid(sk),
595 sock_i_ino(sk),
596 &src_baswapped,
597 &dst_baswapped,
598 bt->parent? sock_i_ino(bt->parent): 0LU);
599
600 if (l->custom_seq_show) {
601 seq_putc(seq, ' ');
602 l->custom_seq_show(seq, v);
603 }
604
605 seq_putc(seq, '\n');
606 }
607 return 0;
608}
609
610static struct seq_operations bt_seq_ops = {
611 .start = bt_seq_start,
612 .next = bt_seq_next,
613 .stop = bt_seq_stop,
614 .show = bt_seq_show,
615};
616
617static int bt_seq_open(struct inode *inode, struct file *file)
618{
619 struct bt_sock_list *sk_list;
620 struct bt_seq_state *s;
621
622 sk_list = PDE(inode)->data;
623 s = __seq_open_private(file, &bt_seq_ops,
624 sizeof(struct bt_seq_state));
625 if (!s)
626 return -ENOMEM;
627
628 s->l = sk_list;
629 return 0;
630}
631
632int bt_procfs_init(struct module* module, struct net *net, const char *name,
633 struct bt_sock_list* sk_list,
634 int (* seq_show)(struct seq_file *, void *))
635{
636 struct proc_dir_entry * pde;
637
638 sk_list->custom_seq_show = seq_show;
639
640 sk_list->fops.owner = module;
641 sk_list->fops.open = bt_seq_open;
642 sk_list->fops.read = seq_read;
643 sk_list->fops.llseek = seq_lseek;
644 sk_list->fops.release = seq_release_private;
645
646 pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
647 if (!pde)
648 return -ENOMEM;
649
650 pde->data = sk_list;
651
652 return 0;
653}
654
655void bt_procfs_cleanup(struct net *net, const char *name)
656{
657 proc_net_remove(net, name);
658}
659#else
660int bt_procfs_init(struct module* module, struct net *net, const char *name,
661 struct bt_sock_list* sk_list,
662 int (* seq_show)(struct seq_file *, void *))
663{
664 return 0;
665}
666
667void bt_procfs_cleanup(struct net *net, const char *name)
668{
669}
670#endif
671EXPORT_SYMBOL(bt_procfs_init);
672EXPORT_SYMBOL(bt_procfs_cleanup);
673
535static struct net_proto_family bt_sock_family_ops = { 674static struct net_proto_family bt_sock_family_ops = {
536 .owner = THIS_MODULE, 675 .owner = THIS_MODULE,
537 .family = PF_BLUETOOTH, 676 .family = PF_BLUETOOTH,
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 1eaacf10d19d..e7154a58465f 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -29,6 +29,10 @@
29 29
30#include "bnep.h" 30#include "bnep.h"
31 31
32static struct bt_sock_list bnep_sk_list = {
33 .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
34};
35
32static int bnep_sock_release(struct socket *sock) 36static int bnep_sock_release(struct socket *sock)
33{ 37{
34 struct sock *sk = sock->sk; 38 struct sock *sk = sock->sk;
@@ -38,6 +42,8 @@ static int bnep_sock_release(struct socket *sock)
38 if (!sk) 42 if (!sk)
39 return 0; 43 return 0;
40 44
45 bt_sock_unlink(&bnep_sk_list, sk);
46
41 sock_orphan(sk); 47 sock_orphan(sk);
42 sock_put(sk); 48 sock_put(sk);
43 return 0; 49 return 0;
@@ -204,6 +210,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
204 sk->sk_protocol = protocol; 210 sk->sk_protocol = protocol;
205 sk->sk_state = BT_OPEN; 211 sk->sk_state = BT_OPEN;
206 212
213 bt_sock_link(&bnep_sk_list, sk);
207 return 0; 214 return 0;
208} 215}
209 216
@@ -222,19 +229,30 @@ int __init bnep_sock_init(void)
222 return err; 229 return err;
223 230
224 err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); 231 err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
225 if (err < 0) 232 if (err < 0) {
233 BT_ERR("Can't register BNEP socket");
226 goto error; 234 goto error;
235 }
236
237 err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
238 if (err < 0) {
239 BT_ERR("Failed to create BNEP proc file");
240 bt_sock_unregister(BTPROTO_BNEP);
241 goto error;
242 }
243
244 BT_INFO("BNEP socket layer initialized");
227 245
228 return 0; 246 return 0;
229 247
230error: 248error:
231 BT_ERR("Can't register BNEP socket");
232 proto_unregister(&bnep_proto); 249 proto_unregister(&bnep_proto);
233 return err; 250 return err;
234} 251}
235 252
236void __exit bnep_sock_cleanup(void) 253void __exit bnep_sock_cleanup(void)
237{ 254{
255 bt_procfs_cleanup(&init_net, "bnep");
238 if (bt_sock_unregister(BTPROTO_BNEP) < 0) 256 if (bt_sock_unregister(BTPROTO_BNEP) < 0)
239 BT_ERR("Can't unregister BNEP socket"); 257 BT_ERR("Can't unregister BNEP socket");
240 258
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 32dc83dcb6b2..aacb802d1ee4 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -42,6 +42,10 @@
42 42
43#include "cmtp.h" 43#include "cmtp.h"
44 44
45static struct bt_sock_list cmtp_sk_list = {
46 .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
47};
48
45static int cmtp_sock_release(struct socket *sock) 49static int cmtp_sock_release(struct socket *sock)
46{ 50{
47 struct sock *sk = sock->sk; 51 struct sock *sk = sock->sk;
@@ -51,6 +55,8 @@ static int cmtp_sock_release(struct socket *sock)
51 if (!sk) 55 if (!sk)
52 return 0; 56 return 0;
53 57
58 bt_sock_unlink(&cmtp_sk_list, sk);
59
54 sock_orphan(sk); 60 sock_orphan(sk);
55 sock_put(sk); 61 sock_put(sk);
56 62
@@ -214,6 +220,8 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
214 sk->sk_protocol = protocol; 220 sk->sk_protocol = protocol;
215 sk->sk_state = BT_OPEN; 221 sk->sk_state = BT_OPEN;
216 222
223 bt_sock_link(&cmtp_sk_list, sk);
224
217 return 0; 225 return 0;
218} 226}
219 227
@@ -232,19 +240,30 @@ int cmtp_init_sockets(void)
232 return err; 240 return err;
233 241
234 err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); 242 err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
235 if (err < 0) 243 if (err < 0) {
244 BT_ERR("Can't register CMTP socket");
236 goto error; 245 goto error;
246 }
247
248 err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
249 if (err < 0) {
250 BT_ERR("Failed to create CMTP proc file");
251 bt_sock_unregister(BTPROTO_HIDP);
252 goto error;
253 }
254
255 BT_INFO("CMTP socket layer initialized");
237 256
238 return 0; 257 return 0;
239 258
240error: 259error:
241 BT_ERR("Can't register CMTP socket");
242 proto_unregister(&cmtp_proto); 260 proto_unregister(&cmtp_proto);
243 return err; 261 return err;
244} 262}
245 263
246void cmtp_cleanup_sockets(void) 264void cmtp_cleanup_sockets(void)
247{ 265{
266 bt_procfs_cleanup(&init_net, "cmtp");
248 if (bt_sock_unregister(BTPROTO_CMTP) < 0) 267 if (bt_sock_unregister(BTPROTO_CMTP) < 0)
249 BT_ERR("Can't unregister CMTP socket"); 268 BT_ERR("Can't unregister CMTP socket");
250 269
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3c094e78dde9..b9196a44f759 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -31,7 +31,7 @@
31#include <net/bluetooth/a2mp.h> 31#include <net/bluetooth/a2mp.h>
32#include <net/bluetooth/smp.h> 32#include <net/bluetooth/smp.h>
33 33
34static void hci_le_connect(struct hci_conn *conn) 34static void hci_le_create_connection(struct hci_conn *conn)
35{ 35{
36 struct hci_dev *hdev = conn->hdev; 36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp; 37 struct hci_cp_le_create_conn cp;
@@ -55,12 +55,12 @@ static void hci_le_connect(struct hci_conn *conn)
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56} 56}
57 57
58static void hci_le_connect_cancel(struct hci_conn *conn) 58static void hci_le_create_connection_cancel(struct hci_conn *conn)
59{ 59{
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); 60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61} 61}
62 62
63void hci_acl_connect(struct hci_conn *conn) 63static void hci_acl_create_connection(struct hci_conn *conn)
64{ 64{
65 struct hci_dev *hdev = conn->hdev; 65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie; 66 struct inquiry_entry *ie;
@@ -104,7 +104,7 @@ void hci_acl_connect(struct hci_conn *conn)
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); 104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105} 105}
106 106
107static void hci_acl_connect_cancel(struct hci_conn *conn) 107static void hci_acl_create_connection_cancel(struct hci_conn *conn)
108{ 108{
109 struct hci_cp_create_conn_cancel cp; 109 struct hci_cp_create_conn_cancel cp;
110 110
@@ -130,7 +130,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
131} 131}
132 132
133void hci_add_sco(struct hci_conn *conn, __u16 handle) 133static void hci_add_sco(struct hci_conn *conn, __u16 handle)
134{ 134{
135 struct hci_dev *hdev = conn->hdev; 135 struct hci_dev *hdev = conn->hdev;
136 struct hci_cp_add_sco cp; 136 struct hci_cp_add_sco cp;
@@ -246,9 +246,9 @@ static void hci_conn_timeout(struct work_struct *work)
246 case BT_CONNECT2: 246 case BT_CONNECT2:
247 if (conn->out) { 247 if (conn->out) {
248 if (conn->type == ACL_LINK) 248 if (conn->type == ACL_LINK)
249 hci_acl_connect_cancel(conn); 249 hci_acl_create_connection_cancel(conn);
250 else if (conn->type == LE_LINK) 250 else if (conn->type == LE_LINK)
251 hci_le_connect_cancel(conn); 251 hci_le_create_connection_cancel(conn);
252 } 252 }
253 break; 253 break;
254 case BT_CONFIG: 254 case BT_CONFIG:
@@ -471,40 +471,37 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
471} 471}
472EXPORT_SYMBOL(hci_get_route); 472EXPORT_SYMBOL(hci_get_route);
473 473
474/* Create SCO, ACL or LE connection. 474static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
475 * Device _must_ be locked */ 475 u8 dst_type, u8 sec_level, u8 auth_type)
476struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
477 __u8 dst_type, __u8 sec_level, __u8 auth_type)
478{ 476{
479 struct hci_conn *acl;
480 struct hci_conn *sco;
481 struct hci_conn *le; 477 struct hci_conn *le;
482 478
483 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 479 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
480 if (!le) {
481 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
482 if (le)
483 return ERR_PTR(-EBUSY);
484 484
485 if (type == LE_LINK) { 485 le = hci_conn_add(hdev, LE_LINK, dst);
486 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 486 if (!le)
487 if (!le) { 487 return ERR_PTR(-ENOMEM);
488 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
489 BT_CONNECT);
490 if (le)
491 return ERR_PTR(-EBUSY);
492 488
493 le = hci_conn_add(hdev, LE_LINK, dst); 489 le->dst_type = bdaddr_to_le(dst_type);
494 if (!le) 490 hci_le_create_connection(le);
495 return ERR_PTR(-ENOMEM); 491 }
496 492
497 le->dst_type = bdaddr_to_le(dst_type); 493 le->pending_sec_level = sec_level;
498 hci_le_connect(le); 494 le->auth_type = auth_type;
499 }
500 495
501 le->pending_sec_level = sec_level; 496 hci_conn_hold(le);
502 le->auth_type = auth_type;
503 497
504 hci_conn_hold(le); 498 return le;
499}
505 500
506 return le; 501static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
507 } 502 u8 sec_level, u8 auth_type)
503{
504 struct hci_conn *acl;
508 505
509 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 506 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
510 if (!acl) { 507 if (!acl) {
@@ -519,10 +516,20 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
519 acl->sec_level = BT_SECURITY_LOW; 516 acl->sec_level = BT_SECURITY_LOW;
520 acl->pending_sec_level = sec_level; 517 acl->pending_sec_level = sec_level;
521 acl->auth_type = auth_type; 518 acl->auth_type = auth_type;
522 hci_acl_connect(acl); 519 hci_acl_create_connection(acl);
523 } 520 }
524 521
525 if (type == ACL_LINK) 522 return acl;
523}
524
525static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
526 bdaddr_t *dst, u8 sec_level, u8 auth_type)
527{
528 struct hci_conn *acl;
529 struct hci_conn *sco;
530
531 acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
532 if (IS_ERR(acl))
526 return acl; 533 return acl;
527 534
528 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 535 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
@@ -556,6 +563,25 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
556 return sco; 563 return sco;
557} 564}
558 565
566/* Create SCO, ACL or LE connection. */
567struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
568 __u8 dst_type, __u8 sec_level, __u8 auth_type)
569{
570 BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type);
571
572 switch (type) {
573 case LE_LINK:
574 return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
575 case ACL_LINK:
576 return hci_connect_acl(hdev, dst, sec_level, auth_type);
577 case SCO_LINK:
578 case ESCO_LINK:
579 return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
580 }
581
582 return ERR_PTR(-EINVAL);
583}
584
559/* Check link security requirement */ 585/* Check link security requirement */
560int hci_conn_check_link_mode(struct hci_conn *conn) 586int hci_conn_check_link_mode(struct hci_conn *conn)
561{ 587{
@@ -775,7 +801,7 @@ void hci_conn_check_pending(struct hci_dev *hdev)
775 801
776 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 802 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
777 if (conn) 803 if (conn)
778 hci_acl_connect(conn); 804 hci_acl_create_connection(conn);
779 805
780 hci_dev_unlock(hdev); 806 hci_dev_unlock(hdev);
781} 807}
@@ -913,7 +939,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
913 return chan; 939 return chan;
914} 940}
915 941
916int hci_chan_del(struct hci_chan *chan) 942void hci_chan_del(struct hci_chan *chan)
917{ 943{
918 struct hci_conn *conn = chan->conn; 944 struct hci_conn *conn = chan->conn;
919 struct hci_dev *hdev = conn->hdev; 945 struct hci_dev *hdev = conn->hdev;
@@ -926,8 +952,6 @@ int hci_chan_del(struct hci_chan *chan)
926 952
927 skb_queue_purge(&chan->data_q); 953 skb_queue_purge(&chan->data_q);
928 kfree(chan); 954 kfree(chan);
929
930 return 0;
931} 955}
932 956
933void hci_chan_list_flush(struct hci_conn *conn) 957void hci_chan_list_flush(struct hci_conn *conn)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0b997c8f9655..8a0ce706aebd 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -231,6 +231,9 @@ static void amp_init(struct hci_dev *hdev)
231 231
232 /* Read Local AMP Info */ 232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
234} 237}
235 238
236static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -268,7 +271,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
268 BT_ERR("Unknown device type %d", hdev->dev_type); 271 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break; 272 break;
270 } 273 }
271
272} 274}
273 275
274static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) 276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -696,7 +698,8 @@ int hci_dev_open(__u16 dev)
696 hci_dev_hold(hdev); 698 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags); 699 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP); 700 hci_notify(hdev, HCI_DEV_UP);
699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { 701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
700 hci_dev_lock(hdev); 703 hci_dev_lock(hdev);
701 mgmt_powered(hdev, 1); 704 mgmt_powered(hdev, 1);
702 hci_dev_unlock(hdev); 705 hci_dev_unlock(hdev);
@@ -799,7 +802,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
799 * and no tasks are scheduled. */ 802 * and no tasks are scheduled. */
800 hdev->close(hdev); 803 hdev->close(hdev);
801 804
802 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806 mgmt_valid_hdev(hdev)) {
803 hci_dev_lock(hdev); 807 hci_dev_lock(hdev);
804 mgmt_powered(hdev, 0); 808 mgmt_powered(hdev, 0);
805 hci_dev_unlock(hdev); 809 hci_dev_unlock(hdev);
@@ -1652,6 +1656,7 @@ struct hci_dev *hci_alloc_dev(void)
1652 INIT_LIST_HEAD(&hdev->link_keys); 1656 INIT_LIST_HEAD(&hdev->link_keys);
1653 INIT_LIST_HEAD(&hdev->long_term_keys); 1657 INIT_LIST_HEAD(&hdev->long_term_keys);
1654 INIT_LIST_HEAD(&hdev->remote_oob_data); 1658 INIT_LIST_HEAD(&hdev->remote_oob_data);
1659 INIT_LIST_HEAD(&hdev->conn_hash.list);
1655 1660
1656 INIT_WORK(&hdev->rx_work, hci_rx_work); 1661 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 1662 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
@@ -1674,7 +1679,6 @@ struct hci_dev *hci_alloc_dev(void)
1674 1679
1675 hci_init_sysfs(hdev); 1680 hci_init_sysfs(hdev);
1676 discovery_init(hdev); 1681 discovery_init(hdev);
1677 hci_conn_hash_init(hdev);
1678 1682
1679 return hdev; 1683 return hdev;
1680} 1684}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 715d7e33fba0..2022b43c7353 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -29,6 +29,7 @@
29 29
30#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h>
32 33
33/* Handle HCI Event packets */ 34/* Handle HCI Event packets */
34 35
@@ -303,7 +304,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
303 304
304 hci_dev_lock(hdev); 305 hci_dev_lock(hdev);
305 306
306 if (status != 0) { 307 if (status) {
307 mgmt_write_scan_failed(hdev, param, status); 308 mgmt_write_scan_failed(hdev, param, status);
308 hdev->discov_timeout = 0; 309 hdev->discov_timeout = 0;
309 goto done; 310 goto done;
@@ -513,7 +514,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
513 if (hdev->features[3] & LMP_RSSI_INQ) 514 if (hdev->features[3] & LMP_RSSI_INQ)
514 events[4] |= 0x02; /* Inquiry Result with RSSI */ 515 events[4] |= 0x02; /* Inquiry Result with RSSI */
515 516
516 if (hdev->features[5] & LMP_SNIFF_SUBR) 517 if (lmp_sniffsubr_capable(hdev))
517 events[5] |= 0x20; /* Sniff Subrating */ 518 events[5] |= 0x20; /* Sniff Subrating */
518 519
519 if (hdev->features[5] & LMP_PAUSE_ENC) 520 if (hdev->features[5] & LMP_PAUSE_ENC)
@@ -522,13 +523,13 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
522 if (hdev->features[6] & LMP_EXT_INQ) 523 if (hdev->features[6] & LMP_EXT_INQ)
523 events[5] |= 0x40; /* Extended Inquiry Result */ 524 events[5] |= 0x40; /* Extended Inquiry Result */
524 525
525 if (hdev->features[6] & LMP_NO_FLUSH) 526 if (lmp_no_flush_capable(hdev))
526 events[7] |= 0x01; /* Enhanced Flush Complete */ 527 events[7] |= 0x01; /* Enhanced Flush Complete */
527 528
528 if (hdev->features[7] & LMP_LSTO) 529 if (hdev->features[7] & LMP_LSTO)
529 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 530 events[6] |= 0x80; /* Link Supervision Timeout Changed */
530 531
531 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 532 if (lmp_ssp_capable(hdev)) {
532 events[6] |= 0x01; /* IO Capability Request */ 533 events[6] |= 0x01; /* IO Capability Request */
533 events[6] |= 0x02; /* IO Capability Response */ 534 events[6] |= 0x02; /* IO Capability Response */
534 events[6] |= 0x04; /* User Confirmation Request */ 535 events[6] |= 0x04; /* User Confirmation Request */
@@ -541,7 +542,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
541 * Features Notification */ 542 * Features Notification */
542 } 543 }
543 544
544 if (hdev->features[4] & LMP_LE) 545 if (lmp_le_capable(hdev))
545 events[7] |= 0x20; /* LE Meta-Event */ 546 events[7] |= 0x20; /* LE Meta-Event */
546 547
547 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 548 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
@@ -623,11 +624,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
623 struct hci_cp_write_def_link_policy cp; 624 struct hci_cp_write_def_link_policy cp;
624 u16 link_policy = 0; 625 u16 link_policy = 0;
625 626
626 if (hdev->features[0] & LMP_RSWITCH) 627 if (lmp_rswitch_capable(hdev))
627 link_policy |= HCI_LP_RSWITCH; 628 link_policy |= HCI_LP_RSWITCH;
628 if (hdev->features[0] & LMP_HOLD) 629 if (hdev->features[0] & LMP_HOLD)
629 link_policy |= HCI_LP_HOLD; 630 link_policy |= HCI_LP_HOLD;
630 if (hdev->features[0] & LMP_SNIFF) 631 if (lmp_sniff_capable(hdev))
631 link_policy |= HCI_LP_SNIFF; 632 link_policy |= HCI_LP_SNIFF;
632 if (hdev->features[1] & LMP_PARK) 633 if (hdev->features[1] & LMP_PARK)
633 link_policy |= HCI_LP_PARK; 634 link_policy |= HCI_LP_PARK;
@@ -686,7 +687,7 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
686 hdev->esco_type |= (ESCO_HV3); 687 hdev->esco_type |= (ESCO_HV3);
687 } 688 }
688 689
689 if (hdev->features[3] & LMP_ESCO) 690 if (lmp_esco_capable(hdev))
690 hdev->esco_type |= (ESCO_EV3); 691 hdev->esco_type |= (ESCO_EV3);
691 692
692 if (hdev->features[4] & LMP_EV4) 693 if (hdev->features[4] & LMP_EV4)
@@ -746,7 +747,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
746 break; 747 break;
747 } 748 }
748 749
749 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE) 750 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
750 hci_set_le_support(hdev); 751 hci_set_le_support(hdev);
751 752
752done: 753done:
@@ -925,7 +926,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
925 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 926 if (test_bit(HCI_MGMT, &hdev->dev_flags))
926 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 927 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
927 928
928 if (rp->status != 0) 929 if (rp->status)
929 goto unlock; 930 goto unlock;
930 931
931 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 932 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
@@ -1625,43 +1626,30 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1625 1626
1626static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1627static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1627{ 1628{
1628 struct hci_cp_le_create_conn *cp;
1629 struct hci_conn *conn; 1629 struct hci_conn *conn;
1630 1630
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632 1632
1633 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1633 if (status) {
1634 if (!cp) 1634 hci_dev_lock(hdev);
1635 return;
1636 1635
1637 hci_dev_lock(hdev); 1636 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1637 if (!conn) {
1638 hci_dev_unlock(hdev);
1639 return;
1640 }
1638 1641
1639 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1642 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
1643 conn);
1640 1644
1641 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1645 conn->state = BT_CLOSED;
1642 conn); 1646 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1647 conn->dst_type, status);
1648 hci_proto_connect_cfm(conn, status);
1649 hci_conn_del(conn);
1643 1650
1644 if (status) { 1651 hci_dev_unlock(hdev);
1645 if (conn && conn->state == BT_CONNECT) {
1646 conn->state = BT_CLOSED;
1647 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1648 conn->dst_type, status);
1649 hci_proto_connect_cfm(conn, status);
1650 hci_conn_del(conn);
1651 }
1652 } else {
1653 if (!conn) {
1654 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1655 if (conn) {
1656 conn->dst_type = cp->peer_addr_type;
1657 conn->out = true;
1658 } else {
1659 BT_ERR("No memory for new connection");
1660 }
1661 }
1662 } 1652 }
1663
1664 hci_dev_unlock(hdev);
1665} 1653}
1666 1654
1667static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1655static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
@@ -1904,6 +1892,22 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1904 } 1892 }
1905} 1893}
1906 1894
1895static u8 hci_to_mgmt_reason(u8 err)
1896{
1897 switch (err) {
1898 case HCI_ERROR_CONNECTION_TIMEOUT:
1899 return MGMT_DEV_DISCONN_TIMEOUT;
1900 case HCI_ERROR_REMOTE_USER_TERM:
1901 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1902 case HCI_ERROR_REMOTE_POWER_OFF:
1903 return MGMT_DEV_DISCONN_REMOTE;
1904 case HCI_ERROR_LOCAL_HOST_TERM:
1905 return MGMT_DEV_DISCONN_LOCAL_HOST;
1906 default:
1907 return MGMT_DEV_DISCONN_UNKNOWN;
1908 }
1909}
1910
1907static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1911static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1908{ 1912{
1909 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1913 struct hci_ev_disconn_complete *ev = (void *) skb->data;
@@ -1922,12 +1926,15 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1922 1926
1923 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1927 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1924 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1928 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1925 if (ev->status != 0) 1929 if (ev->status) {
1926 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1930 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1927 conn->dst_type, ev->status); 1931 conn->dst_type, ev->status);
1928 else 1932 } else {
1933 u8 reason = hci_to_mgmt_reason(ev->reason);
1934
1929 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1935 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1930 conn->dst_type); 1936 conn->dst_type, reason);
1937 }
1931 } 1938 }
1932 1939
1933 if (ev->status == 0) { 1940 if (ev->status == 0) {
@@ -3268,12 +3275,67 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3268 3275
3269 BT_DBG("%s", hdev->name); 3276 BT_DBG("%s", hdev->name);
3270 3277
3271 hci_dev_lock(hdev);
3272
3273 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3278 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3274 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3279 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3280}
3275 3281
3276 hci_dev_unlock(hdev); 3282static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3283 struct sk_buff *skb)
3284{
3285 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3286 struct hci_conn *conn;
3287
3288 BT_DBG("%s", hdev->name);
3289
3290 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3291 if (!conn)
3292 return;
3293
3294 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3295 conn->passkey_entered = 0;
3296
3297 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3298 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3299 conn->dst_type, conn->passkey_notify,
3300 conn->passkey_entered);
3301}
3302
3303static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3304{
3305 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3306 struct hci_conn *conn;
3307
3308 BT_DBG("%s", hdev->name);
3309
3310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3311 if (!conn)
3312 return;
3313
3314 switch (ev->type) {
3315 case HCI_KEYPRESS_STARTED:
3316 conn->passkey_entered = 0;
3317 return;
3318
3319 case HCI_KEYPRESS_ENTERED:
3320 conn->passkey_entered++;
3321 break;
3322
3323 case HCI_KEYPRESS_ERASED:
3324 conn->passkey_entered--;
3325 break;
3326
3327 case HCI_KEYPRESS_CLEARED:
3328 conn->passkey_entered = 0;
3329 break;
3330
3331 case HCI_KEYPRESS_COMPLETED:
3332 return;
3333 }
3334
3335 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3336 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3337 conn->dst_type, conn->passkey_notify,
3338 conn->passkey_entered);
3277} 3339}
3278 3340
3279static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 3341static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
@@ -3295,7 +3357,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3295 * initiated the authentication. A traditional auth_complete 3357 * initiated the authentication. A traditional auth_complete
3296 * event gets always produced as initiator and is also mapped to 3358 * event gets always produced as initiator and is also mapped to
3297 * the mgmt_auth_failed event */ 3359 * the mgmt_auth_failed event */
3298 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0) 3360 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3299 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3361 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3300 ev->status); 3362 ev->status);
3301 3363
@@ -3366,11 +3428,23 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3366 3428
3367 hci_dev_lock(hdev); 3429 hci_dev_lock(hdev);
3368 3430
3369 if (ev->status) { 3431 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3370 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 3432 if (!conn) {
3371 if (!conn) 3433 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3434 if (!conn) {
3435 BT_ERR("No memory for new connection");
3372 goto unlock; 3436 goto unlock;
3437 }
3438
3439 conn->dst_type = ev->bdaddr_type;
3373 3440
3441 if (ev->role == LE_CONN_ROLE_MASTER) {
3442 conn->out = true;
3443 conn->link_mode |= HCI_LM_MASTER;
3444 }
3445 }
3446
3447 if (ev->status) {
3374 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3448 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3375 conn->dst_type, ev->status); 3449 conn->dst_type, ev->status);
3376 hci_proto_connect_cfm(conn, ev->status); 3450 hci_proto_connect_cfm(conn, ev->status);
@@ -3379,18 +3453,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3379 goto unlock; 3453 goto unlock;
3380 } 3454 }
3381 3455
3382 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3383 if (!conn) {
3384 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3385 if (!conn) {
3386 BT_ERR("No memory for new connection");
3387 hci_dev_unlock(hdev);
3388 return;
3389 }
3390
3391 conn->dst_type = ev->bdaddr_type;
3392 }
3393
3394 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3456 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3395 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3457 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3396 conn->dst_type, 0, NULL, 0, NULL); 3458 conn->dst_type, 0, NULL, 0, NULL);
@@ -3640,6 +3702,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3640 hci_user_passkey_request_evt(hdev, skb); 3702 hci_user_passkey_request_evt(hdev, skb);
3641 break; 3703 break;
3642 3704
3705 case HCI_EV_USER_PASSKEY_NOTIFY:
3706 hci_user_passkey_notify_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_KEYPRESS_NOTIFY:
3710 hci_keypress_notify_evt(hdev, skb);
3711 break;
3712
3643 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3713 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3644 hci_simple_pair_complete_evt(hdev, skb); 3714 hci_simple_pair_complete_evt(hdev, skb);
3645 break; 3715 break;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index d5ace1eda3ed..07f073935811 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1102,21 +1102,30 @@ int __init hci_sock_init(void)
1102 return err; 1102 return err;
1103 1103
1104 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); 1104 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1105 if (err < 0) 1105 if (err < 0) {
1106 BT_ERR("HCI socket registration failed");
1106 goto error; 1107 goto error;
1108 }
1109
1110 err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
1111 if (err < 0) {
1112 BT_ERR("Failed to create HCI proc file");
1113 bt_sock_unregister(BTPROTO_HCI);
1114 goto error;
1115 }
1107 1116
1108 BT_INFO("HCI socket layer initialized"); 1117 BT_INFO("HCI socket layer initialized");
1109 1118
1110 return 0; 1119 return 0;
1111 1120
1112error: 1121error:
1113 BT_ERR("HCI socket registration failed");
1114 proto_unregister(&hci_sk_proto); 1122 proto_unregister(&hci_sk_proto);
1115 return err; 1123 return err;
1116} 1124}
1117 1125
1118void hci_sock_cleanup(void) 1126void hci_sock_cleanup(void)
1119{ 1127{
1128 bt_procfs_cleanup(&init_net, "hci");
1120 if (bt_sock_unregister(BTPROTO_HCI) < 0) 1129 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1121 BT_ERR("HCI socket unregistration failed"); 1130 BT_ERR("HCI socket unregistration failed");
1122 1131
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index b24fb3bd8625..82a829d90b0f 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -25,6 +25,10 @@
25 25
26#include "hidp.h" 26#include "hidp.h"
27 27
28static struct bt_sock_list hidp_sk_list = {
29 .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
30};
31
28static int hidp_sock_release(struct socket *sock) 32static int hidp_sock_release(struct socket *sock)
29{ 33{
30 struct sock *sk = sock->sk; 34 struct sock *sk = sock->sk;
@@ -34,6 +38,8 @@ static int hidp_sock_release(struct socket *sock)
34 if (!sk) 38 if (!sk)
35 return 0; 39 return 0;
36 40
41 bt_sock_unlink(&hidp_sk_list, sk);
42
37 sock_orphan(sk); 43 sock_orphan(sk);
38 sock_put(sk); 44 sock_put(sk);
39 45
@@ -253,6 +259,8 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
253 sk->sk_protocol = protocol; 259 sk->sk_protocol = protocol;
254 sk->sk_state = BT_OPEN; 260 sk->sk_state = BT_OPEN;
255 261
262 bt_sock_link(&hidp_sk_list, sk);
263
256 return 0; 264 return 0;
257} 265}
258 266
@@ -271,8 +279,19 @@ int __init hidp_init_sockets(void)
271 return err; 279 return err;
272 280
273 err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); 281 err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
274 if (err < 0) 282 if (err < 0) {
283 BT_ERR("Can't register HIDP socket");
275 goto error; 284 goto error;
285 }
286
287 err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
288 if (err < 0) {
289 BT_ERR("Failed to create HIDP proc file");
290 bt_sock_unregister(BTPROTO_HIDP);
291 goto error;
292 }
293
294 BT_INFO("HIDP socket layer initialized");
276 295
277 return 0; 296 return 0;
278 297
@@ -284,6 +303,7 @@ error:
284 303
285void __exit hidp_cleanup_sockets(void) 304void __exit hidp_cleanup_sockets(void)
286{ 305{
306 bt_procfs_cleanup(&init_net, "hidp");
287 if (bt_sock_unregister(BTPROTO_HIDP) < 0) 307 if (bt_sock_unregister(BTPROTO_HIDP) < 0)
288 BT_ERR("Can't unregister HIDP socket"); 308 BT_ERR("Can't unregister HIDP socket");
289 309
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 38c00f142203..a91239dcda41 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -406,7 +406,7 @@ struct l2cap_chan *l2cap_chan_create(void)
406 406
407 chan->state = BT_OPEN; 407 chan->state = BT_OPEN;
408 408
409 atomic_set(&chan->refcnt, 1); 409 kref_init(&chan->kref);
410 410
411 /* This flag is cleared in l2cap_chan_ready() */ 411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state); 412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
@@ -416,13 +416,31 @@ struct l2cap_chan *l2cap_chan_create(void)
416 return chan; 416 return chan;
417} 417}
418 418
419void l2cap_chan_destroy(struct l2cap_chan *chan) 419static void l2cap_chan_destroy(struct kref *kref)
420{ 420{
421 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
422
423 BT_DBG("chan %p", chan);
424
421 write_lock(&chan_list_lock); 425 write_lock(&chan_list_lock);
422 list_del(&chan->global_l); 426 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock); 427 write_unlock(&chan_list_lock);
424 428
425 l2cap_chan_put(chan); 429 kfree(chan);
430}
431
432void l2cap_chan_hold(struct l2cap_chan *c)
433{
434 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
435
436 kref_get(&c->kref);
437}
438
439void l2cap_chan_put(struct l2cap_chan *c)
440{
441 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
442
443 kref_put(&c->kref, l2cap_chan_destroy);
426} 444}
427 445
428void l2cap_chan_set_defaults(struct l2cap_chan *chan) 446void l2cap_chan_set_defaults(struct l2cap_chan *chan)
@@ -1431,7 +1449,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1431 int err; 1449 int err;
1432 1450
1433 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst), 1451 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1434 dst_type, __le16_to_cpu(chan->psm)); 1452 dst_type, __le16_to_cpu(psm));
1435 1453
1436 hdev = hci_get_route(dst, src); 1454 hdev = hci_get_route(dst, src);
1437 if (!hdev) 1455 if (!hdev)
@@ -5331,7 +5349,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5331 return exact ? lm1 : lm2; 5349 return exact ? lm1 : lm2;
5332} 5350}
5333 5351
5334int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 5352void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5335{ 5353{
5336 struct l2cap_conn *conn; 5354 struct l2cap_conn *conn;
5337 5355
@@ -5344,7 +5362,6 @@ int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5344 } else 5362 } else
5345 l2cap_conn_del(hcon, bt_to_errno(status)); 5363 l2cap_conn_del(hcon, bt_to_errno(status));
5346 5364
5347 return 0;
5348} 5365}
5349 5366
5350int l2cap_disconn_ind(struct hci_conn *hcon) 5367int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5358,12 +5375,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
5358 return conn->disc_reason; 5375 return conn->disc_reason;
5359} 5376}
5360 5377
5361int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) 5378void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5362{ 5379{
5363 BT_DBG("hcon %p reason %d", hcon, reason); 5380 BT_DBG("hcon %p reason %d", hcon, reason);
5364 5381
5365 l2cap_conn_del(hcon, bt_to_errno(reason)); 5382 l2cap_conn_del(hcon, bt_to_errno(reason));
5366 return 0;
5367} 5383}
5368 5384
5369static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 5385static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
@@ -5406,6 +5422,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5406 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, 5422 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5407 state_to_string(chan->state)); 5423 state_to_string(chan->state));
5408 5424
5425 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5426 l2cap_chan_unlock(chan);
5427 continue;
5428 }
5429
5409 if (chan->scid == L2CAP_CID_LE_DATA) { 5430 if (chan->scid == L2CAP_CID_LE_DATA) {
5410 if (!status && encrypt) { 5431 if (!status && encrypt) {
5411 chan->sec_level = hcon->sec_level; 5432 chan->sec_level = hcon->sec_level;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 34bbe1c5e389..083f2bf065d4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -34,6 +34,10 @@
34#include <net/bluetooth/l2cap.h> 34#include <net/bluetooth/l2cap.h>
35#include <net/bluetooth/smp.h> 35#include <net/bluetooth/smp.h>
36 36
37static struct bt_sock_list l2cap_sk_list = {
38 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
39};
40
37static const struct proto_ops l2cap_sock_ops; 41static const struct proto_ops l2cap_sock_ops;
38static void l2cap_sock_init(struct sock *sk, struct sock *parent); 42static void l2cap_sock_init(struct sock *sk, struct sock *parent);
39static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio); 43static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
@@ -824,7 +828,7 @@ static void l2cap_sock_kill(struct sock *sk)
824 828
825 /* Kill poor orphan */ 829 /* Kill poor orphan */
826 830
827 l2cap_chan_destroy(l2cap_pi(sk)->chan); 831 l2cap_chan_put(l2cap_pi(sk)->chan);
828 sock_set_flag(sk, SOCK_DEAD); 832 sock_set_flag(sk, SOCK_DEAD);
829 sock_put(sk); 833 sock_put(sk);
830} 834}
@@ -887,6 +891,8 @@ static int l2cap_sock_release(struct socket *sock)
887 if (!sk) 891 if (!sk)
888 return 0; 892 return 0;
889 893
894 bt_sock_unlink(&l2cap_sk_list, sk);
895
890 err = l2cap_sock_shutdown(sock, 2); 896 err = l2cap_sock_shutdown(sock, 2);
891 897
892 sock_orphan(sk); 898 sock_orphan(sk);
@@ -1211,6 +1217,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1211 return -ENOMEM; 1217 return -ENOMEM;
1212 1218
1213 l2cap_sock_init(sk, NULL); 1219 l2cap_sock_init(sk, NULL);
1220 bt_sock_link(&l2cap_sk_list, sk);
1214 return 0; 1221 return 0;
1215} 1222}
1216 1223
@@ -1249,21 +1256,30 @@ int __init l2cap_init_sockets(void)
1249 return err; 1256 return err;
1250 1257
1251 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); 1258 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
1252 if (err < 0) 1259 if (err < 0) {
1260 BT_ERR("L2CAP socket registration failed");
1253 goto error; 1261 goto error;
1262 }
1263
1264 err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
1265 if (err < 0) {
1266 BT_ERR("Failed to create L2CAP proc file");
1267 bt_sock_unregister(BTPROTO_L2CAP);
1268 goto error;
1269 }
1254 1270
1255 BT_INFO("L2CAP socket layer initialized"); 1271 BT_INFO("L2CAP socket layer initialized");
1256 1272
1257 return 0; 1273 return 0;
1258 1274
1259error: 1275error:
1260 BT_ERR("L2CAP socket registration failed");
1261 proto_unregister(&l2cap_proto); 1276 proto_unregister(&l2cap_proto);
1262 return err; 1277 return err;
1263} 1278}
1264 1279
1265void l2cap_cleanup_sockets(void) 1280void l2cap_cleanup_sockets(void)
1266{ 1281{
1282 bt_procfs_cleanup(&init_net, "l2cap");
1267 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 1283 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
1268 BT_ERR("L2CAP socket unregistration failed"); 1284 BT_ERR("L2CAP socket unregistration failed");
1269 1285
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index eba022de3c20..aa2ea0a8142c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35bool enable_hs; 35bool enable_hs;
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 1 38#define MGMT_REVISION 2
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -99,6 +99,7 @@ static const u16 mgmt_events[] = {
99 MGMT_EV_DEVICE_BLOCKED, 99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED, 100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED, 101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
102}; 103};
103 104
104/* 105/*
@@ -193,6 +194,11 @@ static u8 mgmt_status_table[] = {
193 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ 194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
194}; 195};
195 196
197bool mgmt_valid_hdev(struct hci_dev *hdev)
198{
199 return hdev->dev_type == HCI_BREDR;
200}
201
196static u8 mgmt_status(u8 hci_status) 202static u8 mgmt_status(u8 hci_status)
197{ 203{
198 if (hci_status < ARRAY_SIZE(mgmt_status_table)) 204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -317,7 +323,6 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
317 u16 data_len) 323 u16 data_len)
318{ 324{
319 struct mgmt_rp_read_index_list *rp; 325 struct mgmt_rp_read_index_list *rp;
320 struct list_head *p;
321 struct hci_dev *d; 326 struct hci_dev *d;
322 size_t rp_len; 327 size_t rp_len;
323 u16 count; 328 u16 count;
@@ -328,7 +333,10 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
328 read_lock(&hci_dev_list_lock); 333 read_lock(&hci_dev_list_lock);
329 334
330 count = 0; 335 count = 0;
331 list_for_each(p, &hci_dev_list) { 336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
338 continue;
339
332 count++; 340 count++;
333 } 341 }
334 342
@@ -346,6 +354,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
346 if (test_bit(HCI_SETUP, &d->dev_flags)) 354 if (test_bit(HCI_SETUP, &d->dev_flags))
347 continue; 355 continue;
348 356
357 if (!mgmt_valid_hdev(d))
358 continue;
359
349 rp->index[i++] = cpu_to_le16(d->id); 360 rp->index[i++] = cpu_to_le16(d->id);
350 BT_DBG("Added hci%u", d->id); 361 BT_DBG("Added hci%u", d->id);
351 } 362 }
@@ -370,10 +381,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
370 settings |= MGMT_SETTING_DISCOVERABLE; 381 settings |= MGMT_SETTING_DISCOVERABLE;
371 settings |= MGMT_SETTING_PAIRABLE; 382 settings |= MGMT_SETTING_PAIRABLE;
372 383
373 if (hdev->features[6] & LMP_SIMPLE_PAIR) 384 if (lmp_ssp_capable(hdev))
374 settings |= MGMT_SETTING_SSP; 385 settings |= MGMT_SETTING_SSP;
375 386
376 if (!(hdev->features[4] & LMP_NO_BREDR)) { 387 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_BREDR; 388 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY; 389 settings |= MGMT_SETTING_LINK_SECURITY;
379 } 390 }
@@ -381,7 +392,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
381 if (enable_hs) 392 if (enable_hs)
382 settings |= MGMT_SETTING_HS; 393 settings |= MGMT_SETTING_HS;
383 394
384 if (hdev->features[4] & LMP_LE) 395 if (lmp_le_capable(hdev))
385 settings |= MGMT_SETTING_LE; 396 settings |= MGMT_SETTING_LE;
386 397
387 return settings; 398 return settings;
@@ -403,7 +414,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
403 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 414 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_PAIRABLE; 415 settings |= MGMT_SETTING_PAIRABLE;
405 416
406 if (!(hdev->features[4] & LMP_NO_BREDR)) 417 if (lmp_bredr_capable(hdev))
407 settings |= MGMT_SETTING_BREDR; 418 settings |= MGMT_SETTING_BREDR;
408 419
409 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 420 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -1111,7 +1122,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1111 1122
1112 hci_dev_lock(hdev); 1123 hci_dev_lock(hdev);
1113 1124
1114 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { 1125 if (!lmp_ssp_capable(hdev)) {
1115 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1126 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1116 MGMT_STATUS_NOT_SUPPORTED); 1127 MGMT_STATUS_NOT_SUPPORTED);
1117 goto failed; 1128 goto failed;
@@ -1195,7 +1206,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1195 1206
1196 hci_dev_lock(hdev); 1207 hci_dev_lock(hdev);
1197 1208
1198 if (!(hdev->features[4] & LMP_LE)) { 1209 if (!lmp_le_capable(hdev)) {
1199 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1210 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1200 MGMT_STATUS_NOT_SUPPORTED); 1211 MGMT_STATUS_NOT_SUPPORTED);
1201 goto unlock; 1212 goto unlock;
@@ -2191,7 +2202,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2191 goto unlock; 2202 goto unlock;
2192 } 2203 }
2193 2204
2194 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { 2205 if (!lmp_ssp_capable(hdev)) {
2195 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 2206 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2196 MGMT_STATUS_NOT_SUPPORTED); 2207 MGMT_STATUS_NOT_SUPPORTED);
2197 goto unlock; 2208 goto unlock;
@@ -2820,6 +2831,9 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
2820 2831
2821int mgmt_index_added(struct hci_dev *hdev) 2832int mgmt_index_added(struct hci_dev *hdev)
2822{ 2833{
2834 if (!mgmt_valid_hdev(hdev))
2835 return -ENOTSUPP;
2836
2823 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 2837 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
2824} 2838}
2825 2839
@@ -2827,6 +2841,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
2827{ 2841{
2828 u8 status = MGMT_STATUS_INVALID_INDEX; 2842 u8 status = MGMT_STATUS_INVALID_INDEX;
2829 2843
2844 if (!mgmt_valid_hdev(hdev))
2845 return -ENOTSUPP;
2846
2830 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2847 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2831 2848
2832 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 2849 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
@@ -3077,16 +3094,17 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3077} 3094}
3078 3095
3079int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, 3096int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3080 u8 link_type, u8 addr_type) 3097 u8 link_type, u8 addr_type, u8 reason)
3081{ 3098{
3082 struct mgmt_addr_info ev; 3099 struct mgmt_ev_device_disconnected ev;
3083 struct sock *sk = NULL; 3100 struct sock *sk = NULL;
3084 int err; 3101 int err;
3085 3102
3086 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); 3103 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3087 3104
3088 bacpy(&ev.bdaddr, bdaddr); 3105 bacpy(&ev.addr.bdaddr, bdaddr);
3089 ev.type = link_to_bdaddr(link_type, addr_type); 3106 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3107 ev.reason = reason;
3090 3108
3091 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), 3109 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3092 sk); 3110 sk);
@@ -3275,6 +3293,22 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3275 MGMT_OP_USER_PASSKEY_NEG_REPLY); 3293 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3276} 3294}
3277 3295
3296int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3297 u8 link_type, u8 addr_type, u32 passkey,
3298 u8 entered)
3299{
3300 struct mgmt_ev_passkey_notify ev;
3301
3302 BT_DBG("%s", hdev->name);
3303
3304 bacpy(&ev.addr.bdaddr, bdaddr);
3305 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3306 ev.passkey = __cpu_to_le32(passkey);
3307 ev.entered = entered;
3308
3309 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3310}
3311
3278int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3312int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3279 u8 addr_type, u8 status) 3313 u8 addr_type, u8 status)
3280{ 3314{
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1a17850d093c..b3226f3658cf 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1035,8 +1035,17 @@ int __init rfcomm_init_sockets(void)
1035 return err; 1035 return err;
1036 1036
1037 err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); 1037 err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
1038 if (err < 0) 1038 if (err < 0) {
1039 BT_ERR("RFCOMM socket layer registration failed");
1040 goto error;
1041 }
1042
1043 err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
1044 if (err < 0) {
1045 BT_ERR("Failed to create RFCOMM proc file");
1046 bt_sock_unregister(BTPROTO_RFCOMM);
1039 goto error; 1047 goto error;
1048 }
1040 1049
1041 if (bt_debugfs) { 1050 if (bt_debugfs) {
1042 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, 1051 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1050,13 +1059,14 @@ int __init rfcomm_init_sockets(void)
1050 return 0; 1059 return 0;
1051 1060
1052error: 1061error:
1053 BT_ERR("RFCOMM socket layer registration failed");
1054 proto_unregister(&rfcomm_proto); 1062 proto_unregister(&rfcomm_proto);
1055 return err; 1063 return err;
1056} 1064}
1057 1065
1058void __exit rfcomm_cleanup_sockets(void) 1066void __exit rfcomm_cleanup_sockets(void)
1059{ 1067{
1068 bt_procfs_cleanup(&init_net, "rfcomm");
1069
1060 debugfs_remove(rfcomm_sock_debugfs); 1070 debugfs_remove(rfcomm_sock_debugfs);
1061 1071
1062 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1072 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 3589e21edb09..dc42b917aaaf 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -912,7 +912,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
912 return lm; 912 return lm;
913} 913}
914 914
915int sco_connect_cfm(struct hci_conn *hcon, __u8 status) 915void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
916{ 916{
917 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 917 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
918 if (!status) { 918 if (!status) {
@@ -923,16 +923,13 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
923 sco_conn_ready(conn); 923 sco_conn_ready(conn);
924 } else 924 } else
925 sco_conn_del(hcon, bt_to_errno(status)); 925 sco_conn_del(hcon, bt_to_errno(status));
926
927 return 0;
928} 926}
929 927
930int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) 928void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
931{ 929{
932 BT_DBG("hcon %p reason %d", hcon, reason); 930 BT_DBG("hcon %p reason %d", hcon, reason);
933 931
934 sco_conn_del(hcon, bt_to_errno(reason)); 932 sco_conn_del(hcon, bt_to_errno(reason));
935 return 0;
936} 933}
937 934
938int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) 935int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
@@ -1025,6 +1022,13 @@ int __init sco_init(void)
1025 goto error; 1022 goto error;
1026 } 1023 }
1027 1024
1025 err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
1026 if (err < 0) {
1027 BT_ERR("Failed to create SCO proc file");
1028 bt_sock_unregister(BTPROTO_SCO);
1029 goto error;
1030 }
1031
1028 if (bt_debugfs) { 1032 if (bt_debugfs) {
1029 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, 1033 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1030 NULL, &sco_debugfs_fops); 1034 NULL, &sco_debugfs_fops);
@@ -1043,6 +1047,8 @@ error:
1043 1047
1044void __exit sco_exit(void) 1048void __exit sco_exit(void)
1045{ 1049{
1050 bt_procfs_cleanup(&init_net, "sco");
1051
1046 debugfs_remove(sco_debugfs); 1052 debugfs_remove(sco_debugfs);
1047 1053
1048 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1054 if (bt_sock_unregister(BTPROTO_SCO) < 0)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d21f32383517..d9576e6de2b8 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -312,7 +312,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
312 312
313 fe->is_local = f->is_local; 313 fe->is_local = f->is_local;
314 if (!f->is_static) 314 if (!f->is_static)
315 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated); 315 fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
316 ++fe; 316 ++fe;
317 ++num; 317 ++num;
318 } 318 }
@@ -467,14 +467,14 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
467 467
468static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, 468static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
469 const struct net_bridge_fdb_entry *fdb, 469 const struct net_bridge_fdb_entry *fdb,
470 u32 pid, u32 seq, int type, unsigned int flags) 470 u32 portid, u32 seq, int type, unsigned int flags)
471{ 471{
472 unsigned long now = jiffies; 472 unsigned long now = jiffies;
473 struct nda_cacheinfo ci; 473 struct nda_cacheinfo ci;
474 struct nlmsghdr *nlh; 474 struct nlmsghdr *nlh;
475 struct ndmsg *ndm; 475 struct ndmsg *ndm;
476 476
477 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 477 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
478 if (nlh == NULL) 478 if (nlh == NULL)
479 return -EMSGSIZE; 479 return -EMSGSIZE;
480 480
@@ -555,7 +555,7 @@ int br_fdb_dump(struct sk_buff *skb,
555 goto skip; 555 goto skip;
556 556
557 if (fdb_fill_info(skb, br, f, 557 if (fdb_fill_info(skb, br, f,
558 NETLINK_CB(cb->skb).pid, 558 NETLINK_CB(cb->skb).portid,
559 cb->nlh->nlmsg_seq, 559 cb->nlh->nlmsg_seq,
560 RTM_NEWNEIGH, 560 RTM_NEWNEIGH,
561 NLM_F_MULTI) < 0) 561 NLM_F_MULTI) < 0)
@@ -608,8 +608,9 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
608} 608}
609 609
610/* Add new permanent fdb entry with RTM_NEWNEIGH */ 610/* Add new permanent fdb entry with RTM_NEWNEIGH */
611int br_fdb_add(struct ndmsg *ndm, struct net_device *dev, 611int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
612 unsigned char *addr, u16 nlh_flags) 612 struct net_device *dev,
613 const unsigned char *addr, u16 nlh_flags)
613{ 614{
614 struct net_bridge_port *p; 615 struct net_bridge_port *p;
615 int err = 0; 616 int err = 0;
@@ -639,7 +640,7 @@ int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
639 return err; 640 return err;
640} 641}
641 642
642static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr) 643static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
643{ 644{
644 struct net_bridge *br = p->br; 645 struct net_bridge *br = p->br;
645 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 646 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -655,7 +656,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
655 656
656/* Remove neighbor entry with RTM_DELNEIGH */ 657/* Remove neighbor entry with RTM_DELNEIGH */
657int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, 658int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
658 unsigned char *addr) 659 const unsigned char *addr)
659{ 660{
660 struct net_bridge_port *p; 661 struct net_bridge_port *p;
661 int err; 662 int err;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fe41260fbf38..093f527276a3 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -127,7 +127,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
127 goto skip; 127 goto skip;
128 128
129 if (br_fill_ifinfo(skb, port, 129 if (br_fill_ifinfo(skb, port,
130 NETLINK_CB(cb->skb).pid, 130 NETLINK_CB(cb->skb).portid,
131 cb->nlh->nlmsg_seq, RTM_NEWLINK, 131 cb->nlh->nlmsg_seq, RTM_NEWLINK,
132 NLM_F_MULTI) < 0) 132 NLM_F_MULTI) < 0)
133 break; 133 break;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index f507d2af9646..9b278c4ebee1 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -363,10 +363,10 @@ extern void br_fdb_update(struct net_bridge *br,
363 363
364extern int br_fdb_delete(struct ndmsg *ndm, 364extern int br_fdb_delete(struct ndmsg *ndm,
365 struct net_device *dev, 365 struct net_device *dev,
366 unsigned char *addr); 366 const unsigned char *addr);
367extern int br_fdb_add(struct ndmsg *nlh, 367extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
368 struct net_device *dev, 368 struct net_device *dev,
369 unsigned char *addr, 369 const unsigned char *addr,
370 u16 nlh_flags); 370 u16 nlh_flags);
371extern int br_fdb_dump(struct sk_buff *skb, 371extern int br_fdb_dump(struct sk_buff *skb,
372 struct netlink_callback *cb, 372 struct netlink_callback *cb,
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index a6747e673426..c3530a81a33b 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -170,5 +170,5 @@ void br_stp_port_timer_init(struct net_bridge_port *p)
170unsigned long br_timer_value(const struct timer_list *timer) 170unsigned long br_timer_value(const struct timer_list *timer)
171{ 171{
172 return timer_pending(timer) 172 return timer_pending(timer)
173 ? jiffies_to_clock_t(timer->expires - jiffies) : 0; 173 ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
174} 174}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 19063473c71f..3476ec469740 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -298,8 +298,7 @@ static int __init ebt_ulog_init(void)
298 spin_lock_init(&ulog_buffers[i].lock); 298 spin_lock_init(&ulog_buffers[i].lock);
299 } 299 }
300 300
301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, 301 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
302 THIS_MODULE, &cfg);
303 if (!ebtulognl) 302 if (!ebtulognl)
304 ret = -ENOMEM; 303 ret = -ENOMEM;
305 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) 304 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 42e6bd094574..3c2e9dced9e0 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
100static int __net_init frame_filter_net_init(struct net *net) 100static int __net_init frame_filter_net_init(struct net *net)
101{ 101{
102 net->xt.frame_filter = ebt_register_table(net, &frame_filter); 102 net->xt.frame_filter = ebt_register_table(net, &frame_filter);
103 if (IS_ERR(net->xt.frame_filter)) 103 return PTR_RET(net->xt.frame_filter);
104 return PTR_ERR(net->xt.frame_filter);
105 return 0;
106} 104}
107 105
108static void __net_exit frame_filter_net_exit(struct net *net) 106static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 6dc2f878ae05..10871bc77908 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
100static int __net_init frame_nat_net_init(struct net *net) 100static int __net_init frame_nat_net_init(struct net *net)
101{ 101{
102 net->xt.frame_nat = ebt_register_table(net, &frame_nat); 102 net->xt.frame_nat = ebt_register_table(net, &frame_nat);
103 if (IS_ERR(net->xt.frame_nat)) 103 return PTR_RET(net->xt.frame_nat);
104 return PTR_ERR(net->xt.frame_nat);
105 return 0;
106} 104}
107 105
108static void __net_exit frame_nat_net_exit(struct net *net) 106static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/can/gw.c b/net/can/gw.c
index b54d5e695b03..127879c55fb6 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -549,7 +549,7 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
549 if (idx < s_idx) 549 if (idx < s_idx)
550 goto cont; 550 goto cont;
551 551
552 if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid, 552 if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid,
553 cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) 553 cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
554 break; 554 break;
555cont: 555cont:
diff --git a/net/core/dev.c b/net/core/dev.c
index 17e912f9b711..1e0a1847c3bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -959,18 +959,30 @@ int dev_alloc_name(struct net_device *dev, const char *name)
959} 959}
960EXPORT_SYMBOL(dev_alloc_name); 960EXPORT_SYMBOL(dev_alloc_name);
961 961
962static int dev_get_valid_name(struct net_device *dev, const char *name) 962static int dev_alloc_name_ns(struct net *net,
963 struct net_device *dev,
964 const char *name)
963{ 965{
964 struct net *net; 966 char buf[IFNAMSIZ];
967 int ret;
965 968
966 BUG_ON(!dev_net(dev)); 969 ret = __dev_alloc_name(net, name, buf);
967 net = dev_net(dev); 970 if (ret >= 0)
971 strlcpy(dev->name, buf, IFNAMSIZ);
972 return ret;
973}
974
975static int dev_get_valid_name(struct net *net,
976 struct net_device *dev,
977 const char *name)
978{
979 BUG_ON(!net);
968 980
969 if (!dev_valid_name(name)) 981 if (!dev_valid_name(name))
970 return -EINVAL; 982 return -EINVAL;
971 983
972 if (strchr(name, '%')) 984 if (strchr(name, '%'))
973 return dev_alloc_name(dev, name); 985 return dev_alloc_name_ns(net, dev, name);
974 else if (__dev_get_by_name(net, name)) 986 else if (__dev_get_by_name(net, name))
975 return -EEXIST; 987 return -EEXIST;
976 else if (dev->name != name) 988 else if (dev->name != name)
@@ -1006,7 +1018,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
1006 1018
1007 memcpy(oldname, dev->name, IFNAMSIZ); 1019 memcpy(oldname, dev->name, IFNAMSIZ);
1008 1020
1009 err = dev_get_valid_name(dev, newname); 1021 err = dev_get_valid_name(net, dev, newname);
1010 if (err < 0) 1022 if (err < 0)
1011 return err; 1023 return err;
1012 1024
@@ -1109,11 +1121,23 @@ void netdev_state_change(struct net_device *dev)
1109} 1121}
1110EXPORT_SYMBOL(netdev_state_change); 1122EXPORT_SYMBOL(netdev_state_change);
1111 1123
1112int netdev_bonding_change(struct net_device *dev, unsigned long event) 1124/**
1125 * netdev_notify_peers - notify network peers about existence of @dev
1126 * @dev: network device
1127 *
1128 * Generate traffic such that interested network peers are aware of
1129 * @dev, such as by generating a gratuitous ARP. This may be used when
1130 * a device wants to inform the rest of the network about some sort of
1131 * reconfiguration such as a failover event or virtual machine
1132 * migration.
1133 */
1134void netdev_notify_peers(struct net_device *dev)
1113{ 1135{
1114 return call_netdevice_notifiers(event, dev); 1136 rtnl_lock();
1137 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1138 rtnl_unlock();
1115} 1139}
1116EXPORT_SYMBOL(netdev_bonding_change); 1140EXPORT_SYMBOL(netdev_notify_peers);
1117 1141
1118/** 1142/**
1119 * dev_load - load a network module 1143 * dev_load - load a network module
@@ -1394,7 +1418,6 @@ rollback:
1394 nb->notifier_call(nb, NETDEV_DOWN, dev); 1418 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 } 1419 }
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1420 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1398 } 1421 }
1399 } 1422 }
1400 1423
@@ -1436,7 +1459,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1436 nb->notifier_call(nb, NETDEV_DOWN, dev); 1459 nb->notifier_call(nb, NETDEV_DOWN, dev);
1437 } 1460 }
1438 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1461 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1439 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1440 } 1462 }
1441 } 1463 }
1442unlock: 1464unlock:
@@ -2175,9 +2197,7 @@ EXPORT_SYMBOL(netif_skb_features);
2175/* 2197/*
2176 * Returns true if either: 2198 * Returns true if either:
2177 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2199 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2178 * 2. skb is fragmented and the device does not support SG, or if 2200 * 2. skb is fragmented and the device does not support SG.
2179 * at least one of fragments is in highmem and device does not
2180 * support DMA from it.
2181 */ 2201 */
2182static inline int skb_needs_linearize(struct sk_buff *skb, 2202static inline int skb_needs_linearize(struct sk_buff *skb,
2183 int features) 2203 int features)
@@ -2206,9 +2226,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2206 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2226 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2207 skb_dst_drop(skb); 2227 skb_dst_drop(skb);
2208 2228
2209 if (!list_empty(&ptype_all))
2210 dev_queue_xmit_nit(skb, dev);
2211
2212 features = netif_skb_features(skb); 2229 features = netif_skb_features(skb);
2213 2230
2214 if (vlan_tx_tag_present(skb) && 2231 if (vlan_tx_tag_present(skb) &&
@@ -2243,6 +2260,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2243 } 2260 }
2244 } 2261 }
2245 2262
2263 if (!list_empty(&ptype_all))
2264 dev_queue_xmit_nit(skb, dev);
2265
2246 skb_len = skb->len; 2266 skb_len = skb->len;
2247 rc = ops->ndo_start_xmit(skb, dev); 2267 rc = ops->ndo_start_xmit(skb, dev);
2248 trace_net_dev_xmit(skb, rc, dev, skb_len); 2268 trace_net_dev_xmit(skb, rc, dev, skb_len);
@@ -2265,6 +2285,9 @@ gso:
2265 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2285 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2266 skb_dst_drop(nskb); 2286 skb_dst_drop(nskb);
2267 2287
2288 if (!list_empty(&ptype_all))
2289 dev_queue_xmit_nit(nskb, dev);
2290
2268 skb_len = nskb->len; 2291 skb_len = nskb->len;
2269 rc = ops->ndo_start_xmit(nskb, dev); 2292 rc = ops->ndo_start_xmit(nskb, dev);
2270 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2293 trace_net_dev_xmit(nskb, rc, dev, skb_len);
@@ -2374,8 +2397,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2374#endif 2397#endif
2375} 2398}
2376 2399
2377static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2400struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2378 struct sk_buff *skb) 2401 struct sk_buff *skb)
2379{ 2402{
2380 int queue_index; 2403 int queue_index;
2381 const struct net_device_ops *ops = dev->netdev_ops; 2404 const struct net_device_ops *ops = dev->netdev_ops;
@@ -2549,7 +2572,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2549 2572
2550 skb_update_prio(skb); 2573 skb_update_prio(skb);
2551 2574
2552 txq = dev_pick_tx(dev, skb); 2575 txq = netdev_pick_tx(dev, skb);
2553 q = rcu_dereference_bh(txq->qdisc); 2576 q = rcu_dereference_bh(txq->qdisc);
2554 2577
2555#ifdef CONFIG_NET_CLS_ACT 2578#ifdef CONFIG_NET_CLS_ACT
@@ -2622,6 +2645,8 @@ EXPORT_SYMBOL(dev_queue_xmit);
2622 =======================================================================*/ 2645 =======================================================================*/
2623 2646
2624int netdev_max_backlog __read_mostly = 1000; 2647int netdev_max_backlog __read_mostly = 1000;
2648EXPORT_SYMBOL(netdev_max_backlog);
2649
2625int netdev_tstamp_prequeue __read_mostly = 1; 2650int netdev_tstamp_prequeue __read_mostly = 1;
2626int netdev_budget __read_mostly = 300; 2651int netdev_budget __read_mostly = 300;
2627int weight_p __read_mostly = 64; /* old backlog weight */ 2652int weight_p __read_mostly = 64; /* old backlog weight */
@@ -5239,12 +5264,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5239 */ 5264 */
5240static int dev_new_index(struct net *net) 5265static int dev_new_index(struct net *net)
5241{ 5266{
5242 static int ifindex; 5267 int ifindex = net->ifindex;
5243 for (;;) { 5268 for (;;) {
5244 if (++ifindex <= 0) 5269 if (++ifindex <= 0)
5245 ifindex = 1; 5270 ifindex = 1;
5246 if (!__dev_get_by_index(net, ifindex)) 5271 if (!__dev_get_by_index(net, ifindex))
5247 return ifindex; 5272 return net->ifindex = ifindex;
5248 } 5273 }
5249} 5274}
5250 5275
@@ -5322,10 +5347,6 @@ static void rollback_registered_many(struct list_head *head)
5322 netdev_unregister_kobject(dev); 5347 netdev_unregister_kobject(dev);
5323 } 5348 }
5324 5349
5325 /* Process any work delayed until the end of the batch */
5326 dev = list_first_entry(head, struct net_device, unreg_list);
5327 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5328
5329 synchronize_net(); 5350 synchronize_net();
5330 5351
5331 list_for_each_entry(dev, head, unreg_list) 5352 list_for_each_entry(dev, head, unreg_list)
@@ -5583,7 +5604,7 @@ int register_netdevice(struct net_device *dev)
5583 5604
5584 dev->iflink = -1; 5605 dev->iflink = -1;
5585 5606
5586 ret = dev_get_valid_name(dev, dev->name); 5607 ret = dev_get_valid_name(net, dev, dev->name);
5587 if (ret < 0) 5608 if (ret < 0)
5588 goto out; 5609 goto out;
5589 5610
@@ -5597,7 +5618,12 @@ int register_netdevice(struct net_device *dev)
5597 } 5618 }
5598 } 5619 }
5599 5620
5600 dev->ifindex = dev_new_index(net); 5621 ret = -EBUSY;
5622 if (!dev->ifindex)
5623 dev->ifindex = dev_new_index(net);
5624 else if (__dev_get_by_index(net, dev->ifindex))
5625 goto err_uninit;
5626
5601 if (dev->iflink == -1) 5627 if (dev->iflink == -1)
5602 dev->iflink = dev->ifindex; 5628 dev->iflink = dev->ifindex;
5603 5629
@@ -5640,6 +5666,8 @@ int register_netdevice(struct net_device *dev)
5640 5666
5641 set_bit(__LINK_STATE_PRESENT, &dev->state); 5667 set_bit(__LINK_STATE_PRESENT, &dev->state);
5642 5668
5669 linkwatch_init_dev(dev);
5670
5643 dev_init_scheduler(dev); 5671 dev_init_scheduler(dev);
5644 dev_hold(dev); 5672 dev_hold(dev);
5645 list_netdevice(dev); 5673 list_netdevice(dev);
@@ -5773,9 +5801,12 @@ static void netdev_wait_allrefs(struct net_device *dev)
5773 5801
5774 /* Rebroadcast unregister notification */ 5802 /* Rebroadcast unregister notification */
5775 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5803 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5776 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5777 * should have already handle it the first time */
5778 5804
5805 __rtnl_unlock();
5806 rcu_barrier();
5807 rtnl_lock();
5808
5809 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5779 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5810 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5780 &dev->state)) { 5811 &dev->state)) {
5781 /* We must not have linkwatch events 5812 /* We must not have linkwatch events
@@ -5837,9 +5868,8 @@ void netdev_run_todo(void)
5837 5868
5838 __rtnl_unlock(); 5869 __rtnl_unlock();
5839 5870
5840 /* Wait for rcu callbacks to finish before attempting to drain 5871
5841 * the device list. This usually avoids a 250ms wait. 5872 /* Wait for rcu callbacks to finish before next phase */
5842 */
5843 if (!list_empty(&list)) 5873 if (!list_empty(&list))
5844 rcu_barrier(); 5874 rcu_barrier();
5845 5875
@@ -5848,6 +5878,10 @@ void netdev_run_todo(void)
5848 = list_first_entry(&list, struct net_device, todo_list); 5878 = list_first_entry(&list, struct net_device, todo_list);
5849 list_del(&dev->todo_list); 5879 list_del(&dev->todo_list);
5850 5880
5881 rtnl_lock();
5882 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5883 __rtnl_unlock();
5884
5851 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5885 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5852 pr_err("network todo '%s' but state %d\n", 5886 pr_err("network todo '%s' but state %d\n",
5853 dev->name, dev->reg_state); 5887 dev->name, dev->reg_state);
@@ -5943,6 +5977,8 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5943 return queue; 5977 return queue;
5944} 5978}
5945 5979
5980static const struct ethtool_ops default_ethtool_ops;
5981
5946/** 5982/**
5947 * alloc_netdev_mqs - allocate network device 5983 * alloc_netdev_mqs - allocate network device
5948 * @sizeof_priv: size of private data to allocate space for 5984 * @sizeof_priv: size of private data to allocate space for
@@ -6030,6 +6066,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6030 6066
6031 strcpy(dev->name, name); 6067 strcpy(dev->name, name);
6032 dev->group = INIT_NETDEV_GROUP; 6068 dev->group = INIT_NETDEV_GROUP;
6069 if (!dev->ethtool_ops)
6070 dev->ethtool_ops = &default_ethtool_ops;
6033 return dev; 6071 return dev;
6034 6072
6035free_all: 6073free_all:
@@ -6214,7 +6252,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6214 /* We get here if we can't use the current device name */ 6252 /* We get here if we can't use the current device name */
6215 if (!pat) 6253 if (!pat)
6216 goto out; 6254 goto out;
6217 if (dev_get_valid_name(dev, pat) < 0) 6255 if (dev_get_valid_name(net, dev, pat) < 0)
6218 goto out; 6256 goto out;
6219 } 6257 }
6220 6258
@@ -6242,7 +6280,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6242 the device is just moving and can keep their slaves up. 6280 the device is just moving and can keep their slaves up.
6243 */ 6281 */
6244 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6282 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6245 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6283 rcu_barrier();
6284 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6246 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6285 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6247 6286
6248 /* 6287 /*
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c4cc2bc49f06..87cc17db2d56 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, 24static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
25 unsigned char *addr, int addr_len, 25 const unsigned char *addr, int addr_len,
26 unsigned char addr_type, bool global) 26 unsigned char addr_type, bool global)
27{ 27{
28 struct netdev_hw_addr *ha; 28 struct netdev_hw_addr *ha;
@@ -46,7 +46,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
46} 46}
47 47
48static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 48static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
49 unsigned char *addr, int addr_len, 49 const unsigned char *addr, int addr_len,
50 unsigned char addr_type, bool global) 50 unsigned char addr_type, bool global)
51{ 51{
52 struct netdev_hw_addr *ha; 52 struct netdev_hw_addr *ha;
@@ -72,14 +72,15 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
72 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global); 72 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
73} 73}
74 74
75static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, 75static int __hw_addr_add(struct netdev_hw_addr_list *list,
76 int addr_len, unsigned char addr_type) 76 const unsigned char *addr, int addr_len,
77 unsigned char addr_type)
77{ 78{
78 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); 79 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
79} 80}
80 81
81static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 82static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
82 unsigned char *addr, int addr_len, 83 const unsigned char *addr, int addr_len,
83 unsigned char addr_type, bool global) 84 unsigned char addr_type, bool global)
84{ 85{
85 struct netdev_hw_addr *ha; 86 struct netdev_hw_addr *ha;
@@ -104,8 +105,9 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
104 return -ENOENT; 105 return -ENOENT;
105} 106}
106 107
107static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, 108static int __hw_addr_del(struct netdev_hw_addr_list *list,
108 int addr_len, unsigned char addr_type) 109 const unsigned char *addr, int addr_len,
110 unsigned char addr_type)
109{ 111{
110 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); 112 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
111} 113}
@@ -278,7 +280,7 @@ EXPORT_SYMBOL(dev_addr_init);
278 * 280 *
279 * The caller must hold the rtnl_mutex. 281 * The caller must hold the rtnl_mutex.
280 */ 282 */
281int dev_addr_add(struct net_device *dev, unsigned char *addr, 283int dev_addr_add(struct net_device *dev, const unsigned char *addr,
282 unsigned char addr_type) 284 unsigned char addr_type)
283{ 285{
284 int err; 286 int err;
@@ -303,7 +305,7 @@ EXPORT_SYMBOL(dev_addr_add);
303 * 305 *
304 * The caller must hold the rtnl_mutex. 306 * The caller must hold the rtnl_mutex.
305 */ 307 */
306int dev_addr_del(struct net_device *dev, unsigned char *addr, 308int dev_addr_del(struct net_device *dev, const unsigned char *addr,
307 unsigned char addr_type) 309 unsigned char addr_type)
308{ 310{
309 int err; 311 int err;
@@ -390,7 +392,7 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
390 * @dev: device 392 * @dev: device
391 * @addr: address to add 393 * @addr: address to add
392 */ 394 */
393int dev_uc_add_excl(struct net_device *dev, unsigned char *addr) 395int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
394{ 396{
395 struct netdev_hw_addr *ha; 397 struct netdev_hw_addr *ha;
396 int err; 398 int err;
@@ -421,7 +423,7 @@ EXPORT_SYMBOL(dev_uc_add_excl);
421 * Add a secondary unicast address to the device or increase 423 * Add a secondary unicast address to the device or increase
422 * the reference count if it already exists. 424 * the reference count if it already exists.
423 */ 425 */
424int dev_uc_add(struct net_device *dev, unsigned char *addr) 426int dev_uc_add(struct net_device *dev, const unsigned char *addr)
425{ 427{
426 int err; 428 int err;
427 429
@@ -443,7 +445,7 @@ EXPORT_SYMBOL(dev_uc_add);
443 * Release reference to a secondary unicast address and remove it 445 * Release reference to a secondary unicast address and remove it
444 * from the device if the reference count drops to zero. 446 * from the device if the reference count drops to zero.
445 */ 447 */
446int dev_uc_del(struct net_device *dev, unsigned char *addr) 448int dev_uc_del(struct net_device *dev, const unsigned char *addr)
447{ 449{
448 int err; 450 int err;
449 451
@@ -543,7 +545,7 @@ EXPORT_SYMBOL(dev_uc_init);
543 * @dev: device 545 * @dev: device
544 * @addr: address to add 546 * @addr: address to add
545 */ 547 */
546int dev_mc_add_excl(struct net_device *dev, unsigned char *addr) 548int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
547{ 549{
548 struct netdev_hw_addr *ha; 550 struct netdev_hw_addr *ha;
549 int err; 551 int err;
@@ -566,7 +568,7 @@ out:
566} 568}
567EXPORT_SYMBOL(dev_mc_add_excl); 569EXPORT_SYMBOL(dev_mc_add_excl);
568 570
569static int __dev_mc_add(struct net_device *dev, unsigned char *addr, 571static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
570 bool global) 572 bool global)
571{ 573{
572 int err; 574 int err;
@@ -587,7 +589,7 @@ static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
587 * Add a multicast address to the device or increase 589 * Add a multicast address to the device or increase
588 * the reference count if it already exists. 590 * the reference count if it already exists.
589 */ 591 */
590int dev_mc_add(struct net_device *dev, unsigned char *addr) 592int dev_mc_add(struct net_device *dev, const unsigned char *addr)
591{ 593{
592 return __dev_mc_add(dev, addr, false); 594 return __dev_mc_add(dev, addr, false);
593} 595}
@@ -600,13 +602,13 @@ EXPORT_SYMBOL(dev_mc_add);
600 * 602 *
601 * Add a global multicast address to the device. 603 * Add a global multicast address to the device.
602 */ 604 */
603int dev_mc_add_global(struct net_device *dev, unsigned char *addr) 605int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
604{ 606{
605 return __dev_mc_add(dev, addr, true); 607 return __dev_mc_add(dev, addr, true);
606} 608}
607EXPORT_SYMBOL(dev_mc_add_global); 609EXPORT_SYMBOL(dev_mc_add_global);
608 610
609static int __dev_mc_del(struct net_device *dev, unsigned char *addr, 611static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
610 bool global) 612 bool global)
611{ 613{
612 int err; 614 int err;
@@ -628,7 +630,7 @@ static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
628 * Release reference to a multicast address and remove it 630 * Release reference to a multicast address and remove it
629 * from the device if the reference count drops to zero. 631 * from the device if the reference count drops to zero.
630 */ 632 */
631int dev_mc_del(struct net_device *dev, unsigned char *addr) 633int dev_mc_del(struct net_device *dev, const unsigned char *addr)
632{ 634{
633 return __dev_mc_del(dev, addr, false); 635 return __dev_mc_del(dev, addr, false);
634} 636}
@@ -642,7 +644,7 @@ EXPORT_SYMBOL(dev_mc_del);
642 * Release reference to a multicast address and remove it 644 * Release reference to a multicast address and remove it
643 * from the device if the reference count drops to zero. 645 * from the device if the reference count drops to zero.
644 */ 646 */
645int dev_mc_del_global(struct net_device *dev, unsigned char *addr) 647int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
646{ 648{
647 return __dev_mc_del(dev, addr, true); 649 return __dev_mc_del(dev, addr, true);
648} 650}
diff --git a/net/core/dst.c b/net/core/dst.c
index b8d7c700541d..ee6153e2cf43 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -374,7 +374,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
374 struct dst_entry *dst, *last = NULL; 374 struct dst_entry *dst, *last = NULL;
375 375
376 switch (event) { 376 switch (event) {
377 case NETDEV_UNREGISTER: 377 case NETDEV_UNREGISTER_FINAL:
378 case NETDEV_DOWN: 378 case NETDEV_DOWN:
379 mutex_lock(&dst_gc_mutex); 379 mutex_lock(&dst_gc_mutex);
380 for (dst = dst_busy_list; dst; dst = dst->next) { 380 for (dst = dst_busy_list; dst; dst = dst->next) {
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index cbf033dcaf1f..4d64cc2e3fa9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1426,18 +1426,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1426 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) 1426 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1427 return -EFAULT; 1427 return -EFAULT;
1428 1428
1429 if (!dev->ethtool_ops) {
1430 /* A few commands do not require any driver support,
1431 * are unprivileged, and do not change anything, so we
1432 * can take a shortcut to them. */
1433 if (ethcmd == ETHTOOL_GDRVINFO)
1434 return ethtool_get_drvinfo(dev, useraddr);
1435 else if (ethcmd == ETHTOOL_GET_TS_INFO)
1436 return ethtool_get_ts_info(dev, useraddr);
1437 else
1438 return -EOPNOTSUPP;
1439 }
1440
1441 /* Allow some commands to be done by anyone */ 1429 /* Allow some commands to be done by anyone */
1442 switch (ethcmd) { 1430 switch (ethcmd) {
1443 case ETHTOOL_GSET: 1431 case ETHTOOL_GSET:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index ab7db83236c9..58a4ba27dfe3 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -402,7 +402,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
402 if (unresolved) 402 if (unresolved)
403 ops->unresolved_rules++; 403 ops->unresolved_rules++;
404 404
405 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); 405 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
406 flush_route_cache(ops); 406 flush_route_cache(ops);
407 rules_ops_put(ops); 407 rules_ops_put(ops);
408 return 0; 408 return 0;
@@ -500,7 +500,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
500 } 500 }
501 501
502 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 502 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
503 NETLINK_CB(skb).pid); 503 NETLINK_CB(skb).portid);
504 if (ops->delete) 504 if (ops->delete)
505 ops->delete(rule); 505 ops->delete(rule);
506 fib_rule_put(rule); 506 fib_rule_put(rule);
@@ -601,7 +601,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
601 if (idx < cb->args[1]) 601 if (idx < cb->args[1])
602 goto skip; 602 goto skip;
603 603
604 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid, 604 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
605 cb->nlh->nlmsg_seq, RTM_NEWRULE, 605 cb->nlh->nlmsg_seq, RTM_NEWRULE,
606 NLM_F_MULTI, ops) < 0) 606 NLM_F_MULTI, ops) < 0)
607 break; 607 break;
diff --git a/net/core/filter.c b/net/core/filter.c
index 907efd27ec77..3d92ebb7fbcf 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -167,6 +167,14 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
167 case BPF_S_ALU_DIV_K: 167 case BPF_S_ALU_DIV_K:
168 A = reciprocal_divide(A, K); 168 A = reciprocal_divide(A, K);
169 continue; 169 continue;
170 case BPF_S_ALU_MOD_X:
171 if (X == 0)
172 return 0;
173 A %= X;
174 continue;
175 case BPF_S_ALU_MOD_K:
176 A %= K;
177 continue;
170 case BPF_S_ALU_AND_X: 178 case BPF_S_ALU_AND_X:
171 A &= X; 179 A &= X;
172 continue; 180 continue;
@@ -179,6 +187,13 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
179 case BPF_S_ALU_OR_K: 187 case BPF_S_ALU_OR_K:
180 A |= K; 188 A |= K;
181 continue; 189 continue;
190 case BPF_S_ANC_ALU_XOR_X:
191 case BPF_S_ALU_XOR_X:
192 A ^= X;
193 continue;
194 case BPF_S_ALU_XOR_K:
195 A ^= K;
196 continue;
182 case BPF_S_ALU_LSH_X: 197 case BPF_S_ALU_LSH_X:
183 A <<= X; 198 A <<= X;
184 continue; 199 continue;
@@ -326,9 +341,6 @@ load_b:
326 case BPF_S_ANC_CPU: 341 case BPF_S_ANC_CPU:
327 A = raw_smp_processor_id(); 342 A = raw_smp_processor_id();
328 continue; 343 continue;
329 case BPF_S_ANC_ALU_XOR_X:
330 A ^= X;
331 continue;
332 case BPF_S_ANC_NLATTR: { 344 case BPF_S_ANC_NLATTR: {
333 struct nlattr *nla; 345 struct nlattr *nla;
334 346
@@ -469,10 +481,14 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
469 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K, 481 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
470 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X, 482 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
471 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X, 483 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
484 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
485 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
472 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K, 486 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
473 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X, 487 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
474 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K, 488 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
475 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X, 489 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
490 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
491 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
476 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K, 492 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
477 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X, 493 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
478 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K, 494 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
@@ -531,6 +547,11 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
531 return -EINVAL; 547 return -EINVAL;
532 ftest->k = reciprocal_value(ftest->k); 548 ftest->k = reciprocal_value(ftest->k);
533 break; 549 break;
550 case BPF_S_ALU_MOD_K:
551 /* check for division by zero */
552 if (ftest->k == 0)
553 return -EINVAL;
554 break;
534 case BPF_S_LD_MEM: 555 case BPF_S_LD_MEM:
535 case BPF_S_LDX_MEM: 556 case BPF_S_LDX_MEM:
536 case BPF_S_ST: 557 case BPF_S_ST:
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 8e397a69005a..8f82a5cc3851 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
76} 76}
77 77
78 78
79void linkwatch_init_dev(struct net_device *dev)
80{
81 /* Handle pre-registration link state changes */
82 if (!netif_carrier_ok(dev) || netif_dormant(dev))
83 rfc2863_policy(dev);
84}
85
86
79static bool linkwatch_urgent_event(struct net_device *dev) 87static bool linkwatch_urgent_event(struct net_device *dev)
80{ 88{
81 if (!netif_running(dev)) 89 if (!netif_running(dev))
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 112c6e2266e9..baca771caae2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2102,7 +2102,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2102 if (tidx < tbl_skip || (family && tbl->family != family)) 2102 if (tidx < tbl_skip || (family && tbl->family != family))
2103 continue; 2103 continue;
2104 2104
2105 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid, 2105 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2106 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2106 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2107 NLM_F_MULTI) <= 0) 2107 NLM_F_MULTI) <= 0)
2108 break; 2108 break;
@@ -2115,7 +2115,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2115 goto next; 2115 goto next;
2116 2116
2117 if (neightbl_fill_param_info(skb, tbl, p, 2117 if (neightbl_fill_param_info(skb, tbl, p,
2118 NETLINK_CB(cb->skb).pid, 2118 NETLINK_CB(cb->skb).portid,
2119 cb->nlh->nlmsg_seq, 2119 cb->nlh->nlmsg_seq,
2120 RTM_NEWNEIGHTBL, 2120 RTM_NEWNEIGHTBL,
2121 NLM_F_MULTI) <= 0) 2121 NLM_F_MULTI) <= 0)
@@ -2244,7 +2244,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2244 continue; 2244 continue;
2245 if (idx < s_idx) 2245 if (idx < s_idx)
2246 goto next; 2246 goto next;
2247 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, 2247 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2248 cb->nlh->nlmsg_seq, 2248 cb->nlh->nlmsg_seq,
2249 RTM_NEWNEIGH, 2249 RTM_NEWNEIGH,
2250 NLM_F_MULTI) <= 0) { 2250 NLM_F_MULTI) <= 0) {
@@ -2281,7 +2281,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2281 continue; 2281 continue;
2282 if (idx < s_idx) 2282 if (idx < s_idx)
2283 goto next; 2283 goto next;
2284 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, 2284 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2285 cb->nlh->nlmsg_seq, 2285 cb->nlh->nlmsg_seq,
2286 RTM_NEWNEIGH, 2286 RTM_NEWNEIGH,
2287 NLM_F_MULTI, tbl) <= 0) { 2287 NLM_F_MULTI, tbl) <= 0) {
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 72607174ea5a..bcf02f608cbf 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -166,9 +166,21 @@ static ssize_t show_duplex(struct device *dev,
166 166
167 if (netif_running(netdev)) { 167 if (netif_running(netdev)) {
168 struct ethtool_cmd cmd; 168 struct ethtool_cmd cmd;
169 if (!__ethtool_get_settings(netdev, &cmd)) 169 if (!__ethtool_get_settings(netdev, &cmd)) {
170 ret = sprintf(buf, "%s\n", 170 const char *duplex;
171 cmd.duplex ? "full" : "half"); 171 switch (cmd.duplex) {
172 case DUPLEX_HALF:
173 duplex = "half";
174 break;
175 case DUPLEX_FULL:
176 duplex = "full";
177 break;
178 default:
179 duplex = "unknown";
180 break;
181 }
182 ret = sprintf(buf, "%s\n", duplex);
183 }
172 } 184 }
173 rtnl_unlock(); 185 rtnl_unlock();
174 return ret; 186 return ret;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e4ba3e70c174..77a0388fc3be 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -328,7 +328,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
329 struct netdev_queue *txq; 329 struct netdev_queue *txq;
330 330
331 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 331 txq = netdev_pick_tx(dev, skb);
332 332
333 /* try until next clock tick */ 333 /* try until next clock tick */
334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -380,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
380 struct udphdr *udph; 380 struct udphdr *udph;
381 struct iphdr *iph; 381 struct iphdr *iph;
382 struct ethhdr *eth; 382 struct ethhdr *eth;
383 static atomic_t ip_ident;
383 384
384 udp_len = len + sizeof(*udph); 385 udp_len = len + sizeof(*udph);
385 ip_len = udp_len + sizeof(*iph); 386 ip_len = udp_len + sizeof(*iph);
@@ -415,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
415 put_unaligned(0x45, (unsigned char *)iph); 416 put_unaligned(0x45, (unsigned char *)iph);
416 iph->tos = 0; 417 iph->tos = 0;
417 put_unaligned(htons(ip_len), &(iph->tot_len)); 418 put_unaligned(htons(ip_len), &(iph->tot_len));
418 iph->id = 0; 419 iph->id = htons(atomic_inc_return(&ip_ident));
419 iph->frag_off = 0; 420 iph->frag_off = 0;
420 iph->ttl = 64; 421 iph->ttl = 64;
421 iph->protocol = IPPROTO_UDP; 422 iph->protocol = IPPROTO_UDP;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 39e7e4d3cdb4..4a83fb3c8e87 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -73,7 +73,6 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
73 ((sizeof(u32) * new_len)); 73 ((sizeof(u32) * new_len));
74 struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL); 74 struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
75 struct netprio_map *old_priomap; 75 struct netprio_map *old_priomap;
76 int i;
77 76
78 old_priomap = rtnl_dereference(dev->priomap); 77 old_priomap = rtnl_dereference(dev->priomap);
79 78
@@ -82,10 +81,10 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
82 return -ENOMEM; 81 return -ENOMEM;
83 } 82 }
84 83
85 for (i = 0; 84 if (old_priomap)
86 old_priomap && (i < old_priomap->priomap_len); 85 memcpy(new_priomap->priomap, old_priomap->priomap,
87 i++) 86 old_priomap->priomap_len *
88 new_priomap->priomap[i] = old_priomap->priomap[i]; 87 sizeof(old_priomap->priomap[0]));
89 88
90 new_priomap->priomap_len = new_len; 89 new_priomap->priomap_len = new_len;
91 90
@@ -109,32 +108,6 @@ static int write_update_netdev_table(struct net_device *dev)
109 return ret; 108 return ret;
110} 109}
111 110
112static int update_netdev_tables(void)
113{
114 int ret = 0;
115 struct net_device *dev;
116 u32 max_len;
117 struct netprio_map *map;
118
119 rtnl_lock();
120 max_len = atomic_read(&max_prioidx) + 1;
121 for_each_netdev(&init_net, dev) {
122 map = rtnl_dereference(dev->priomap);
123 /*
124 * don't allocate priomap if we didn't
125 * change net_prio.ifpriomap (map == NULL),
126 * this will speed up skb_update_prio.
127 */
128 if (map && map->priomap_len < max_len) {
129 ret = extend_netdev_table(dev, max_len);
130 if (ret < 0)
131 break;
132 }
133 }
134 rtnl_unlock();
135 return ret;
136}
137
138static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 111static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
139{ 112{
140 struct cgroup_netprio_state *cs; 113 struct cgroup_netprio_state *cs;
@@ -153,12 +126,6 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
153 goto out; 126 goto out;
154 } 127 }
155 128
156 ret = update_netdev_tables();
157 if (ret < 0) {
158 put_prioidx(cs->prioidx);
159 goto out;
160 }
161
162 return &cs->css; 129 return &cs->css;
163out: 130out:
164 kfree(cs); 131 kfree(cs);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 9b570a6a33c5..c31d9e8668c3 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -15,6 +15,7 @@
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/tcp.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19 20
20#include <net/request_sock.h> 21#include <net/request_sock.h>
@@ -130,3 +131,97 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
130 kfree(lopt); 131 kfree(lopt);
131} 132}
132 133
134/*
135 * This function is called to set a Fast Open socket's "fastopen_rsk" field
136 * to NULL when a TFO socket no longer needs to access the request_sock.
137 * This happens only after 3WHS has been either completed or aborted (e.g.,
138 * RST is received).
139 *
140 * Before TFO, a child socket is created only after 3WHS is completed,
141 * hence it never needs to access the request_sock. things get a lot more
142 * complex with TFO. A child socket, accepted or not, has to access its
143 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
144 * until 3WHS is either completed or aborted. Afterwards the req will stay
145 * until either the child socket is accepted, or in the rare case when the
146 * listener is closed before the child is accepted.
147 *
148 * In short, a request socket is only freed after BOTH 3WHS has completed
149 * (or aborted) and the child socket has been accepted (or listener closed).
150 * When a child socket is accepted, its corresponding req->sk is set to
151 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
152 * will be used by the code below to determine if a child socket has been
153 * accepted or not, and the check is protected by the fastopenq->lock
154 * described below.
155 *
156 * Note that fastopen_rsk is only accessed from the child socket's context
157 * with its socket lock held. But a request_sock (req) can be accessed by
158 * both its child socket through fastopen_rsk, and a listener socket through
159 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
160 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
161 * only in the rare case when both the listener and the child locks are held,
162 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
163 * The lock also protects other fields such as fastopenq->qlen, which is
164 * decremented by this function when fastopen_rsk is no longer needed.
165 *
166 * Note that another solution was to simply use the existing socket lock
167 * from the listener. But first socket lock is difficult to use. It is not
168 * a simple spin lock - one must consider sock_owned_by_user() and arrange
169 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
170 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
171 * acquire a child's lock while holding listener's socket lock. A corner
172 * case might also exist in tcp_v4_hnd_req() that will trigger this locking
173 * order.
174 *
175 * When a TFO req is created, it needs to sock_hold its listener to prevent
176 * the latter data structure from going away.
177 *
178 * This function also sets "treq->listener" to NULL and unreference listener
179 * socket. treq->listener is used by the listener so it is protected by the
180 * fastopenq->lock in this function.
181 */
182void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
183 bool reset)
184{
185 struct sock *lsk = tcp_rsk(req)->listener;
186 struct fastopen_queue *fastopenq =
187 inet_csk(lsk)->icsk_accept_queue.fastopenq;
188
189 BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
190
191 tcp_sk(sk)->fastopen_rsk = NULL;
192 spin_lock_bh(&fastopenq->lock);
193 fastopenq->qlen--;
194 tcp_rsk(req)->listener = NULL;
195 if (req->sk) /* the child socket hasn't been accepted yet */
196 goto out;
197
198 if (!reset || lsk->sk_state != TCP_LISTEN) {
199 /* If the listener has been closed don't bother with the
200 * special RST handling below.
201 */
202 spin_unlock_bh(&fastopenq->lock);
203 sock_put(lsk);
204 reqsk_free(req);
205 return;
206 }
207 /* Wait for 60secs before removing a req that has triggered RST.
208 * This is a simple defense against TFO spoofing attack - by
209 * counting the req against fastopen.max_qlen, and disabling
210 * TFO when the qlen exceeds max_qlen.
211 *
212 * For more details see CoNext'11 "TCP Fast Open" paper.
213 */
214 req->expires = jiffies + 60*HZ;
215 if (fastopenq->rskq_rst_head == NULL)
216 fastopenq->rskq_rst_head = req;
217 else
218 fastopenq->rskq_rst_tail->dl_next = req;
219
220 req->dl_next = NULL;
221 fastopenq->rskq_rst_tail = req;
222 fastopenq->qlen++;
223out:
224 spin_unlock_bh(&fastopenq->lock);
225 sock_put(lsk);
226 return;
227}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2c5a0a06c4ce..76d4c2c3c89b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -618,7 +618,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
618 long expires, u32 error) 618 long expires, u32 error)
619{ 619{
620 struct rta_cacheinfo ci = { 620 struct rta_cacheinfo ci = {
621 .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), 621 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
622 .rta_used = dst->__use, 622 .rta_used = dst->__use,
623 .rta_clntref = atomic_read(&(dst->__refcnt)), 623 .rta_clntref = atomic_read(&(dst->__refcnt)),
624 .rta_error = error, 624 .rta_error = error,
@@ -1081,7 +1081,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1081 if (idx < s_idx) 1081 if (idx < s_idx)
1082 goto cont; 1082 goto cont;
1083 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1083 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1084 NETLINK_CB(cb->skb).pid, 1084 NETLINK_CB(cb->skb).portid,
1085 cb->nlh->nlmsg_seq, 0, 1085 cb->nlh->nlmsg_seq, 0,
1086 NLM_F_MULTI, 1086 NLM_F_MULTI,
1087 ext_filter_mask) <= 0) 1087 ext_filter_mask) <= 0)
@@ -1812,8 +1812,6 @@ replay:
1812 return -ENODEV; 1812 return -ENODEV;
1813 } 1813 }
1814 1814
1815 if (ifm->ifi_index)
1816 return -EOPNOTSUPP;
1817 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) 1815 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1818 return -EOPNOTSUPP; 1816 return -EOPNOTSUPP;
1819 1817
@@ -1839,10 +1837,14 @@ replay:
1839 return PTR_ERR(dest_net); 1837 return PTR_ERR(dest_net);
1840 1838
1841 dev = rtnl_create_link(net, dest_net, ifname, ops, tb); 1839 dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
1842 1840 if (IS_ERR(dev)) {
1843 if (IS_ERR(dev))
1844 err = PTR_ERR(dev); 1841 err = PTR_ERR(dev);
1845 else if (ops->newlink) 1842 goto out;
1843 }
1844
1845 dev->ifindex = ifm->ifi_index;
1846
1847 if (ops->newlink)
1846 err = ops->newlink(net, dev, tb, data); 1848 err = ops->newlink(net, dev, tb, data);
1847 else 1849 else
1848 err = register_netdevice(dev); 1850 err = register_netdevice(dev);
@@ -1897,14 +1899,14 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1897 if (nskb == NULL) 1899 if (nskb == NULL)
1898 return -ENOBUFS; 1900 return -ENOBUFS;
1899 1901
1900 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1902 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
1901 nlh->nlmsg_seq, 0, 0, ext_filter_mask); 1903 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1902 if (err < 0) { 1904 if (err < 0) {
1903 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1905 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1904 WARN_ON(err == -EMSGSIZE); 1906 WARN_ON(err == -EMSGSIZE);
1905 kfree_skb(nskb); 1907 kfree_skb(nskb);
1906 } else 1908 } else
1907 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); 1909 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
1908 1910
1909 return err; 1911 return err;
1910} 1912}
@@ -2088,7 +2090,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2088 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && 2090 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2089 (dev->priv_flags & IFF_BRIDGE_PORT)) { 2091 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2090 master = dev->master; 2092 master = dev->master;
2091 err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr, 2093 err = master->netdev_ops->ndo_fdb_add(ndm, tb,
2094 dev, addr,
2092 nlh->nlmsg_flags); 2095 nlh->nlmsg_flags);
2093 if (err) 2096 if (err)
2094 goto out; 2097 goto out;
@@ -2098,7 +2101,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2098 2101
2099 /* Embedded bridge, macvlan, and any other device support */ 2102 /* Embedded bridge, macvlan, and any other device support */
2100 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { 2103 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
2101 err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr, 2104 err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
2105 dev, addr,
2102 nlh->nlmsg_flags); 2106 nlh->nlmsg_flags);
2103 2107
2104 if (!err) { 2108 if (!err) {
@@ -2178,9 +2182,9 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2178{ 2182{
2179 struct netdev_hw_addr *ha; 2183 struct netdev_hw_addr *ha;
2180 int err; 2184 int err;
2181 u32 pid, seq; 2185 u32 portid, seq;
2182 2186
2183 pid = NETLINK_CB(cb->skb).pid; 2187 portid = NETLINK_CB(cb->skb).portid;
2184 seq = cb->nlh->nlmsg_seq; 2188 seq = cb->nlh->nlmsg_seq;
2185 2189
2186 list_for_each_entry(ha, &list->list, list) { 2190 list_for_each_entry(ha, &list->list, list) {
@@ -2188,7 +2192,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2188 goto skip; 2192 goto skip;
2189 2193
2190 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 2194 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2191 pid, seq, 0, NTF_SELF); 2195 portid, seq, 0, NTF_SELF);
2192 if (err < 0) 2196 if (err < 0)
2193 return err; 2197 return err;
2194skip: 2198skip:
@@ -2356,7 +2360,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
2356 case NETDEV_PRE_TYPE_CHANGE: 2360 case NETDEV_PRE_TYPE_CHANGE:
2357 case NETDEV_GOING_DOWN: 2361 case NETDEV_GOING_DOWN:
2358 case NETDEV_UNREGISTER: 2362 case NETDEV_UNREGISTER:
2359 case NETDEV_UNREGISTER_BATCH: 2363 case NETDEV_UNREGISTER_FINAL:
2360 case NETDEV_RELEASE: 2364 case NETDEV_RELEASE:
2361 case NETDEV_JOIN: 2365 case NETDEV_JOIN:
2362 break; 2366 break;
@@ -2379,9 +2383,10 @@ static int __net_init rtnetlink_net_init(struct net *net)
2379 .groups = RTNLGRP_MAX, 2383 .groups = RTNLGRP_MAX,
2380 .input = rtnetlink_rcv, 2384 .input = rtnetlink_rcv,
2381 .cb_mutex = &rtnl_mutex, 2385 .cb_mutex = &rtnl_mutex,
2386 .flags = NL_CFG_F_NONROOT_RECV,
2382 }; 2387 };
2383 2388
2384 sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg); 2389 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
2385 if (!sk) 2390 if (!sk)
2386 return -ENOMEM; 2391 return -ENOMEM;
2387 net->rtnl = sk; 2392 net->rtnl = sk;
@@ -2414,7 +2419,6 @@ void __init rtnetlink_init(void)
2414 if (register_pernet_subsys(&rtnetlink_net_ops)) 2419 if (register_pernet_subsys(&rtnetlink_net_ops))
2415 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 2420 panic("rtnetlink_init: cannot initialize rtnetlink\n");
2416 2421
2417 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
2418 register_netdevice_notifier(&rtnetlink_dev_notifier); 2422 register_netdevice_notifier(&rtnetlink_dev_notifier);
2419 2423
2420 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, 2424 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
diff --git a/net/core/scm.c b/net/core/scm.c
index 6ab491d6c26f..9c1c63da3ca8 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -155,19 +155,21 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
155 break; 155 break;
156 case SCM_CREDENTIALS: 156 case SCM_CREDENTIALS:
157 { 157 {
158 struct ucred creds;
158 kuid_t uid; 159 kuid_t uid;
159 kgid_t gid; 160 kgid_t gid;
160 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) 161 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
161 goto error; 162 goto error;
162 memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); 163 memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
163 err = scm_check_creds(&p->creds); 164 err = scm_check_creds(&creds);
164 if (err) 165 if (err)
165 goto error; 166 goto error;
166 167
167 if (!p->pid || pid_vnr(p->pid) != p->creds.pid) { 168 p->creds.pid = creds.pid;
169 if (!p->pid || pid_vnr(p->pid) != creds.pid) {
168 struct pid *pid; 170 struct pid *pid;
169 err = -ESRCH; 171 err = -ESRCH;
170 pid = find_get_pid(p->creds.pid); 172 pid = find_get_pid(creds.pid);
171 if (!pid) 173 if (!pid)
172 goto error; 174 goto error;
173 put_pid(p->pid); 175 put_pid(p->pid);
@@ -175,11 +177,14 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
175 } 177 }
176 178
177 err = -EINVAL; 179 err = -EINVAL;
178 uid = make_kuid(current_user_ns(), p->creds.uid); 180 uid = make_kuid(current_user_ns(), creds.uid);
179 gid = make_kgid(current_user_ns(), p->creds.gid); 181 gid = make_kgid(current_user_ns(), creds.gid);
180 if (!uid_valid(uid) || !gid_valid(gid)) 182 if (!uid_valid(uid) || !gid_valid(gid))
181 goto error; 183 goto error;
182 184
185 p->creds.uid = uid;
186 p->creds.gid = gid;
187
183 if (!p->cred || 188 if (!p->cred ||
184 !uid_eq(p->cred->euid, uid) || 189 !uid_eq(p->cred->euid, uid) ||
185 !gid_eq(p->cred->egid, gid)) { 190 !gid_eq(p->cred->egid, gid)) {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 99b2596531bb..e61a8bb7fce7 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -76,6 +76,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
76 76
77 return hash[0]; 77 return hash[0];
78} 78}
79EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
79#endif 80#endif
80 81
81#ifdef CONFIG_INET 82#ifdef CONFIG_INET
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e33ebae519c8..cdc28598f4ef 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,43 +340,57 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
340EXPORT_SYMBOL(build_skb); 340EXPORT_SYMBOL(build_skb);
341 341
342struct netdev_alloc_cache { 342struct netdev_alloc_cache {
343 struct page *page; 343 struct page_frag frag;
344 unsigned int offset; 344 /* we maintain a pagecount bias, so that we dont dirty cache line
345 unsigned int pagecnt_bias; 345 * containing page->_count every time we allocate a fragment.
346 */
347 unsigned int pagecnt_bias;
346}; 348};
347static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 349static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
348 350
349#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES) 351#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
352#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
353#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
350 354
351static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 355static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
352{ 356{
353 struct netdev_alloc_cache *nc; 357 struct netdev_alloc_cache *nc;
354 void *data = NULL; 358 void *data = NULL;
359 int order;
355 unsigned long flags; 360 unsigned long flags;
356 361
357 local_irq_save(flags); 362 local_irq_save(flags);
358 nc = &__get_cpu_var(netdev_alloc_cache); 363 nc = &__get_cpu_var(netdev_alloc_cache);
359 if (unlikely(!nc->page)) { 364 if (unlikely(!nc->frag.page)) {
360refill: 365refill:
361 nc->page = alloc_page(gfp_mask); 366 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
362 if (unlikely(!nc->page)) 367 gfp_t gfp = gfp_mask;
363 goto end; 368
369 if (order)
370 gfp |= __GFP_COMP | __GFP_NOWARN;
371 nc->frag.page = alloc_pages(gfp, order);
372 if (likely(nc->frag.page))
373 break;
374 if (--order < 0)
375 goto end;
376 }
377 nc->frag.size = PAGE_SIZE << order;
364recycle: 378recycle:
365 atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS); 379 atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
366 nc->pagecnt_bias = NETDEV_PAGECNT_BIAS; 380 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
367 nc->offset = 0; 381 nc->frag.offset = 0;
368 } 382 }
369 383
370 if (nc->offset + fragsz > PAGE_SIZE) { 384 if (nc->frag.offset + fragsz > nc->frag.size) {
371 /* avoid unnecessary locked operations if possible */ 385 /* avoid unnecessary locked operations if possible */
372 if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) || 386 if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
373 atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count)) 387 atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
374 goto recycle; 388 goto recycle;
375 goto refill; 389 goto refill;
376 } 390 }
377 391
378 data = page_address(nc->page) + nc->offset; 392 data = page_address(nc->frag.page) + nc->frag.offset;
379 nc->offset += fragsz; 393 nc->frag.offset += fragsz;
380 nc->pagecnt_bias--; 394 nc->pagecnt_bias--;
381end: 395end:
382 local_irq_restore(flags); 396 local_irq_restore(flags);
@@ -1655,38 +1669,19 @@ static struct page *linear_to_page(struct page *page, unsigned int *len,
1655 unsigned int *offset, 1669 unsigned int *offset,
1656 struct sk_buff *skb, struct sock *sk) 1670 struct sk_buff *skb, struct sock *sk)
1657{ 1671{
1658 struct page *p = sk->sk_sndmsg_page; 1672 struct page_frag *pfrag = sk_page_frag(sk);
1659 unsigned int off;
1660
1661 if (!p) {
1662new_page:
1663 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1664 if (!p)
1665 return NULL;
1666 1673
1667 off = sk->sk_sndmsg_off = 0; 1674 if (!sk_page_frag_refill(sk, pfrag))
1668 /* hold one ref to this page until it's full */ 1675 return NULL;
1669 } else {
1670 unsigned int mlen;
1671
1672 /* If we are the only user of the page, we can reset offset */
1673 if (page_count(p) == 1)
1674 sk->sk_sndmsg_off = 0;
1675 off = sk->sk_sndmsg_off;
1676 mlen = PAGE_SIZE - off;
1677 if (mlen < 64 && mlen < *len) {
1678 put_page(p);
1679 goto new_page;
1680 }
1681 1676
1682 *len = min_t(unsigned int, *len, mlen); 1677 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1683 }
1684 1678
1685 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1679 memcpy(page_address(pfrag->page) + pfrag->offset,
1686 sk->sk_sndmsg_off += *len; 1680 page_address(page) + *offset, *len);
1687 *offset = off; 1681 *offset = pfrag->offset;
1682 pfrag->offset += *len;
1688 1683
1689 return p; 1684 return pfrag->page;
1690} 1685}
1691 1686
1692static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1687static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
@@ -3488,8 +3483,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3488 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3483 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3489 return false; 3484 return false;
3490 3485
3491 delta = from->truesize - 3486 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3492 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
3493 } 3487 }
3494 3488
3495 WARN_ON_ONCE(delta < len); 3489 WARN_ON_ONCE(delta < len);
diff --git a/net/core/sock.c b/net/core/sock.c
index 12cddd037bce..8a146cfcc366 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1221,7 +1221,7 @@ void sock_update_classid(struct sock *sk)
1221 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1221 rcu_read_lock(); /* doing current task, which cannot vanish. */
1222 classid = task_cls_classid(current); 1222 classid = task_cls_classid(current);
1223 rcu_read_unlock(); 1223 rcu_read_unlock();
1224 if (classid && classid != sk->sk_classid) 1224 if (classid != sk->sk_classid)
1225 sk->sk_classid = classid; 1225 sk->sk_classid = classid;
1226} 1226}
1227EXPORT_SYMBOL(sock_update_classid); 1227EXPORT_SYMBOL(sock_update_classid);
@@ -1458,19 +1458,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1458} 1458}
1459EXPORT_SYMBOL_GPL(sk_setup_caps); 1459EXPORT_SYMBOL_GPL(sk_setup_caps);
1460 1460
1461void __init sk_init(void)
1462{
1463 if (totalram_pages <= 4096) {
1464 sysctl_wmem_max = 32767;
1465 sysctl_rmem_max = 32767;
1466 sysctl_wmem_default = 32767;
1467 sysctl_rmem_default = 32767;
1468 } else if (totalram_pages >= 131072) {
1469 sysctl_wmem_max = 131071;
1470 sysctl_rmem_max = 131071;
1471 }
1472}
1473
1474/* 1461/*
1475 * Simple resource managers for sockets. 1462 * Simple resource managers for sockets.
1476 */ 1463 */
@@ -1738,6 +1725,45 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1738} 1725}
1739EXPORT_SYMBOL(sock_alloc_send_skb); 1726EXPORT_SYMBOL(sock_alloc_send_skb);
1740 1727
1728/* On 32bit arches, an skb frag is limited to 2^15 */
1729#define SKB_FRAG_PAGE_ORDER get_order(32768)
1730
1731bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1732{
1733 int order;
1734
1735 if (pfrag->page) {
1736 if (atomic_read(&pfrag->page->_count) == 1) {
1737 pfrag->offset = 0;
1738 return true;
1739 }
1740 if (pfrag->offset < pfrag->size)
1741 return true;
1742 put_page(pfrag->page);
1743 }
1744
1745 /* We restrict high order allocations to users that can afford to wait */
1746 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1747
1748 do {
1749 gfp_t gfp = sk->sk_allocation;
1750
1751 if (order)
1752 gfp |= __GFP_COMP | __GFP_NOWARN;
1753 pfrag->page = alloc_pages(gfp, order);
1754 if (likely(pfrag->page)) {
1755 pfrag->offset = 0;
1756 pfrag->size = PAGE_SIZE << order;
1757 return true;
1758 }
1759 } while (--order >= 0);
1760
1761 sk_enter_memory_pressure(sk);
1762 sk_stream_moderate_sndbuf(sk);
1763 return false;
1764}
1765EXPORT_SYMBOL(sk_page_frag_refill);
1766
1741static void __lock_sock(struct sock *sk) 1767static void __lock_sock(struct sock *sk)
1742 __releases(&sk->sk_lock.slock) 1768 __releases(&sk->sk_lock.slock)
1743 __acquires(&sk->sk_lock.slock) 1769 __acquires(&sk->sk_lock.slock)
@@ -2167,8 +2193,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2167 sk->sk_error_report = sock_def_error_report; 2193 sk->sk_error_report = sock_def_error_report;
2168 sk->sk_destruct = sock_def_destruct; 2194 sk->sk_destruct = sock_def_destruct;
2169 2195
2170 sk->sk_sndmsg_page = NULL; 2196 sk->sk_frag.page = NULL;
2171 sk->sk_sndmsg_off = 0; 2197 sk->sk_frag.offset = 0;
2172 sk->sk_peek_off = -1; 2198 sk->sk_peek_off = -1;
2173 2199
2174 sk->sk_peer_pid = NULL; 2200 sk->sk_peer_pid = NULL;
@@ -2411,6 +2437,12 @@ void sk_common_release(struct sock *sk)
2411 xfrm_sk_free_policy(sk); 2437 xfrm_sk_free_policy(sk);
2412 2438
2413 sk_refcnt_debug_release(sk); 2439 sk_refcnt_debug_release(sk);
2440
2441 if (sk->sk_frag.page) {
2442 put_page(sk->sk_frag.page);
2443 sk->sk_frag.page = NULL;
2444 }
2445
2414 sock_put(sk); 2446 sock_put(sk);
2415} 2447}
2416EXPORT_SYMBOL(sk_common_release); 2448EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 9d8755e4a7a5..602cd637182e 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -172,8 +172,7 @@ static int __net_init diag_net_init(struct net *net)
172 .input = sock_diag_rcv, 172 .input = sock_diag_rcv,
173 }; 173 };
174 174
175 net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, 175 net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
176 THIS_MODULE, &cfg);
177 return net->diag_nlsk == NULL ? -ENOMEM : 0; 176 return net->diag_nlsk == NULL ? -ENOMEM : 0;
178} 177}
179 178
diff --git a/net/core/utils.c b/net/core/utils.c
index 39895a65e54a..f5613d569c23 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -294,6 +294,26 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
294} 294}
295EXPORT_SYMBOL(inet_proto_csum_replace4); 295EXPORT_SYMBOL(inet_proto_csum_replace4);
296 296
297void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
298 const __be32 *from, const __be32 *to,
299 int pseudohdr)
300{
301 __be32 diff[] = {
302 ~from[0], ~from[1], ~from[2], ~from[3],
303 to[0], to[1], to[2], to[3],
304 };
305 if (skb->ip_summed != CHECKSUM_PARTIAL) {
306 *sum = csum_fold(csum_partial(diff, sizeof(diff),
307 ~csum_unfold(*sum)));
308 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
309 skb->csum = ~csum_partial(diff, sizeof(diff),
310 ~skb->csum);
311 } else if (pseudohdr)
312 *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
313 csum_unfold(*sum)));
314}
315EXPORT_SYMBOL(inet_proto_csum_replace16);
316
297int mac_pton(const char *s, u8 *mac) 317int mac_pton(const char *s, u8 *mac)
298{ 318{
299 int i; 319 int i;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 81f2bb62dea3..70989e672304 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1319,7 +1319,7 @@ nla_put_failure:
1319} 1319}
1320 1320
1321static int dcbnl_notify(struct net_device *dev, int event, int cmd, 1321static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1322 u32 seq, u32 pid, int dcbx_ver) 1322 u32 seq, u32 portid, int dcbx_ver)
1323{ 1323{
1324 struct net *net = dev_net(dev); 1324 struct net *net = dev_net(dev);
1325 struct sk_buff *skb; 1325 struct sk_buff *skb;
@@ -1330,7 +1330,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1330 if (!ops) 1330 if (!ops)
1331 return -EOPNOTSUPP; 1331 return -EOPNOTSUPP;
1332 1332
1333 skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh); 1333 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1334 if (!skb) 1334 if (!skb)
1335 return -ENOBUFS; 1335 return -ENOBUFS;
1336 1336
@@ -1353,16 +1353,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1353} 1353}
1354 1354
1355int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, 1355int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1356 u32 seq, u32 pid) 1356 u32 seq, u32 portid)
1357{ 1357{
1358 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE); 1358 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1359} 1359}
1360EXPORT_SYMBOL(dcbnl_ieee_notify); 1360EXPORT_SYMBOL(dcbnl_ieee_notify);
1361 1361
1362int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, 1362int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1363 u32 seq, u32 pid) 1363 u32 seq, u32 portid)
1364{ 1364{
1365 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE); 1365 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1366} 1366}
1367EXPORT_SYMBOL(dcbnl_cee_notify); 1367EXPORT_SYMBOL(dcbnl_cee_notify);
1368 1368
@@ -1656,7 +1656,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1656 struct net_device *netdev; 1656 struct net_device *netdev;
1657 struct dcbmsg *dcb = nlmsg_data(nlh); 1657 struct dcbmsg *dcb = nlmsg_data(nlh);
1658 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1658 struct nlattr *tb[DCB_ATTR_MAX + 1];
1659 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1659 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1660 int ret = -EINVAL; 1660 int ret = -EINVAL;
1661 struct sk_buff *reply_skb; 1661 struct sk_buff *reply_skb;
1662 struct nlmsghdr *reply_nlh = NULL; 1662 struct nlmsghdr *reply_nlh = NULL;
@@ -1690,7 +1690,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1690 goto out; 1690 goto out;
1691 } 1691 }
1692 1692
1693 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq, 1693 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1694 nlh->nlmsg_flags, &reply_nlh); 1694 nlh->nlmsg_flags, &reply_nlh);
1695 if (!reply_skb) { 1695 if (!reply_skb) {
1696 ret = -ENOBUFS; 1696 ret = -ENOBUFS;
@@ -1705,7 +1705,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1705 1705
1706 nlmsg_end(reply_skb, reply_nlh); 1706 nlmsg_end(reply_skb, reply_nlh);
1707 1707
1708 ret = rtnl_unicast(reply_skb, &init_net, pid); 1708 ret = rtnl_unicast(reply_skb, &init_net, portid);
1709out: 1709out:
1710 dev_put(netdev); 1710 dev_put(netdev);
1711 return ret; 1711 return ret;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2ba1a2814c24..307c322d53bb 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1313,10 +1313,10 @@ static int dn_shutdown(struct socket *sock, int how)
1313 if (scp->state == DN_O) 1313 if (scp->state == DN_O)
1314 goto out; 1314 goto out;
1315 1315
1316 if (how != SHUTDOWN_MASK) 1316 if (how != SHUT_RDWR)
1317 goto out; 1317 goto out;
1318 1318
1319 sk->sk_shutdown = how; 1319 sk->sk_shutdown = SHUTDOWN_MASK;
1320 dn_destroy_sock(sk); 1320 dn_destroy_sock(sk);
1321 err = 0; 1321 err = 0;
1322 1322
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index f3924ab1e019..7b7e561412d3 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -667,12 +667,12 @@ static inline size_t dn_ifaddr_nlmsg_size(void)
667} 667}
668 668
669static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, 669static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
670 u32 pid, u32 seq, int event, unsigned int flags) 670 u32 portid, u32 seq, int event, unsigned int flags)
671{ 671{
672 struct ifaddrmsg *ifm; 672 struct ifaddrmsg *ifm;
673 struct nlmsghdr *nlh; 673 struct nlmsghdr *nlh;
674 674
675 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 675 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
676 if (nlh == NULL) 676 if (nlh == NULL)
677 return -EMSGSIZE; 677 return -EMSGSIZE;
678 678
@@ -753,7 +753,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
753 if (dn_idx < skip_naddr) 753 if (dn_idx < skip_naddr)
754 continue; 754 continue;
755 755
756 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 756 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid,
757 cb->nlh->nlmsg_seq, RTM_NEWADDR, 757 cb->nlh->nlmsg_seq, RTM_NEWADDR,
758 NLM_F_MULTI) < 0) 758 NLM_F_MULTI) < 0)
759 goto done; 759 goto done;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 85a3604c87c8..b57419cc41a4 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -961,7 +961,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
961 .saddr = oldflp->saddr, 961 .saddr = oldflp->saddr,
962 .flowidn_scope = RT_SCOPE_UNIVERSE, 962 .flowidn_scope = RT_SCOPE_UNIVERSE,
963 .flowidn_mark = oldflp->flowidn_mark, 963 .flowidn_mark = oldflp->flowidn_mark,
964 .flowidn_iif = init_net.loopback_dev->ifindex, 964 .flowidn_iif = LOOPBACK_IFINDEX,
965 .flowidn_oif = oldflp->flowidn_oif, 965 .flowidn_oif = oldflp->flowidn_oif,
966 }; 966 };
967 struct dn_route *rt = NULL; 967 struct dn_route *rt = NULL;
@@ -979,7 +979,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
979 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 979 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
980 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr), 980 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
981 le16_to_cpu(oldflp->saddr), 981 le16_to_cpu(oldflp->saddr),
982 oldflp->flowidn_mark, init_net.loopback_dev->ifindex, 982 oldflp->flowidn_mark, LOOPBACK_IFINDEX,
983 oldflp->flowidn_oif); 983 oldflp->flowidn_oif);
984 984
985 /* If we have an output interface, verify its a DECnet device */ 985 /* If we have an output interface, verify its a DECnet device */
@@ -1042,7 +1042,7 @@ source_ok:
1042 if (!fld.daddr) 1042 if (!fld.daddr)
1043 goto out; 1043 goto out;
1044 } 1044 }
1045 fld.flowidn_oif = init_net.loopback_dev->ifindex; 1045 fld.flowidn_oif = LOOPBACK_IFINDEX;
1046 res.type = RTN_LOCAL; 1046 res.type = RTN_LOCAL;
1047 goto make_route; 1047 goto make_route;
1048 } 1048 }
@@ -1543,7 +1543,7 @@ static int dn_route_input(struct sk_buff *skb)
1543 return dn_route_input_slow(skb); 1543 return dn_route_input_slow(skb);
1544} 1544}
1545 1545
1546static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1546static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1547 int event, int nowait, unsigned int flags) 1547 int event, int nowait, unsigned int flags)
1548{ 1548{
1549 struct dn_route *rt = (struct dn_route *)skb_dst(skb); 1549 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
@@ -1551,7 +1551,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1551 struct nlmsghdr *nlh; 1551 struct nlmsghdr *nlh;
1552 long expires; 1552 long expires;
1553 1553
1554 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 1554 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
1555 if (!nlh) 1555 if (!nlh)
1556 return -EMSGSIZE; 1556 return -EMSGSIZE;
1557 1557
@@ -1685,7 +1685,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1685 if (rtm->rtm_flags & RTM_F_NOTIFY) 1685 if (rtm->rtm_flags & RTM_F_NOTIFY)
1686 rt->rt_flags |= RTCF_NOTIFY; 1686 rt->rt_flags |= RTCF_NOTIFY;
1687 1687
1688 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1688 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1689 1689
1690 if (err == 0) 1690 if (err == 0)
1691 goto out_free; 1691 goto out_free;
@@ -1694,7 +1694,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1694 goto out_free; 1694 goto out_free;
1695 } 1695 }
1696 1696
1697 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 1697 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
1698 1698
1699out_free: 1699out_free:
1700 kfree_skb(skb); 1700 kfree_skb(skb);
@@ -1737,7 +1737,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1737 if (idx < s_idx) 1737 if (idx < s_idx)
1738 continue; 1738 continue;
1739 skb_dst_set(skb, dst_clone(&rt->dst)); 1739 skb_dst_set(skb, dst_clone(&rt->dst));
1740 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1740 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
1741 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1741 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1742 1, NLM_F_MULTI) <= 0) { 1742 1, NLM_F_MULTI) <= 0) {
1743 skb_dst_drop(skb); 1743 skb_dst_drop(skb);
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 16c986ab1228..f968c1b58f47 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -291,14 +291,14 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
291 return payload; 291 return payload;
292} 292}
293 293
294static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 294static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
295 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, 295 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
296 struct dn_fib_info *fi, unsigned int flags) 296 struct dn_fib_info *fi, unsigned int flags)
297{ 297{
298 struct rtmsg *rtm; 298 struct rtmsg *rtm;
299 struct nlmsghdr *nlh; 299 struct nlmsghdr *nlh;
300 300
301 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 301 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
302 if (!nlh) 302 if (!nlh)
303 return -EMSGSIZE; 303 return -EMSGSIZE;
304 304
@@ -374,14 +374,14 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
374 struct nlmsghdr *nlh, struct netlink_skb_parms *req) 374 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
375{ 375{
376 struct sk_buff *skb; 376 struct sk_buff *skb;
377 u32 pid = req ? req->pid : 0; 377 u32 portid = req ? req->portid : 0;
378 int err = -ENOBUFS; 378 int err = -ENOBUFS;
379 379
380 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); 380 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
381 if (skb == NULL) 381 if (skb == NULL)
382 goto errout; 382 goto errout;
383 383
384 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 384 err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
385 f->fn_type, f->fn_scope, &f->fn_key, z, 385 f->fn_type, f->fn_scope, &f->fn_key, z,
386 DN_FIB_INFO(f), 0); 386 DN_FIB_INFO(f), 0);
387 if (err < 0) { 387 if (err < 0) {
@@ -390,7 +390,7 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
390 kfree_skb(skb); 390 kfree_skb(skb);
391 goto errout; 391 goto errout;
392 } 392 }
393 rtnl_notify(skb, &init_net, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); 393 rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
394 return; 394 return;
395errout: 395errout:
396 if (err < 0) 396 if (err < 0)
@@ -411,7 +411,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
411 continue; 411 continue;
412 if (f->fn_state & DN_S_ZOMBIE) 412 if (f->fn_state & DN_S_ZOMBIE)
413 continue; 413 continue;
414 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, 414 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
415 cb->nlh->nlmsg_seq, 415 cb->nlh->nlmsg_seq,
416 RTM_NEWROUTE, 416 RTM_NEWROUTE,
417 tb->n, 417 tb->n,
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 11db0ecf342f..dfe42012a044 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -130,8 +130,7 @@ static int __init dn_rtmsg_init(void)
130 .input = dnrmg_receive_user_skb, 130 .input = dnrmg_receive_user_skb,
131 }; 131 };
132 132
133 dnrmg = netlink_kernel_create(&init_net, 133 dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, &cfg);
134 NETLINK_DNRTMSG, THIS_MODULE, &cfg);
135 if (dnrmg == NULL) { 134 if (dnrmg == NULL) {
136 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); 135 printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
137 return -ENOMEM; 136 return -ENOMEM;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 6a095225148e..6d42c17af96b 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1063,12 +1063,6 @@ out:
1063 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 1063 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
1064} 1064}
1065 1065
1066static void lowpan_dev_free(struct net_device *dev)
1067{
1068 dev_put(lowpan_dev_info(dev)->real_dev);
1069 free_netdev(dev);
1070}
1071
1072static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) 1066static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1073{ 1067{
1074 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; 1068 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -1118,7 +1112,7 @@ static void lowpan_setup(struct net_device *dev)
1118 dev->netdev_ops = &lowpan_netdev_ops; 1112 dev->netdev_ops = &lowpan_netdev_ops;
1119 dev->header_ops = &lowpan_header_ops; 1113 dev->header_ops = &lowpan_header_ops;
1120 dev->ml_priv = &lowpan_mlme; 1114 dev->ml_priv = &lowpan_mlme;
1121 dev->destructor = lowpan_dev_free; 1115 dev->destructor = free_netdev;
1122} 1116}
1123 1117
1124static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) 1118static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1133,6 +1127,8 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
1133static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, 1127static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1134 struct packet_type *pt, struct net_device *orig_dev) 1128 struct packet_type *pt, struct net_device *orig_dev)
1135{ 1129{
1130 struct sk_buff *local_skb;
1131
1136 if (!netif_running(dev)) 1132 if (!netif_running(dev))
1137 goto drop; 1133 goto drop;
1138 1134
@@ -1144,7 +1140,12 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1144 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ 1140 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
1145 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ 1141 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
1146 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ 1142 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
1147 lowpan_process_data(skb); 1143 local_skb = skb_clone(skb, GFP_ATOMIC);
1144 if (!local_skb)
1145 goto drop;
1146 lowpan_process_data(local_skb);
1147
1148 kfree_skb(skb);
1148 break; 1149 break;
1149 default: 1150 default:
1150 break; 1151 break;
@@ -1237,6 +1238,34 @@ static inline void __init lowpan_netlink_fini(void)
1237 rtnl_link_unregister(&lowpan_link_ops); 1238 rtnl_link_unregister(&lowpan_link_ops);
1238} 1239}
1239 1240
1241static int lowpan_device_event(struct notifier_block *unused,
1242 unsigned long event,
1243 void *ptr)
1244{
1245 struct net_device *dev = ptr;
1246 LIST_HEAD(del_list);
1247 struct lowpan_dev_record *entry, *tmp;
1248
1249 if (dev->type != ARPHRD_IEEE802154)
1250 goto out;
1251
1252 if (event == NETDEV_UNREGISTER) {
1253 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1254 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
1255 lowpan_dellink(entry->ldev, &del_list);
1256 }
1257
1258 unregister_netdevice_many(&del_list);
1259 }
1260
1261out:
1262 return NOTIFY_DONE;
1263}
1264
1265static struct notifier_block lowpan_dev_notifier = {
1266 .notifier_call = lowpan_device_event,
1267};
1268
1240static struct packet_type lowpan_packet_type = { 1269static struct packet_type lowpan_packet_type = {
1241 .type = __constant_htons(ETH_P_IEEE802154), 1270 .type = __constant_htons(ETH_P_IEEE802154),
1242 .func = lowpan_rcv, 1271 .func = lowpan_rcv,
@@ -1251,6 +1280,12 @@ static int __init lowpan_init_module(void)
1251 goto out; 1280 goto out;
1252 1281
1253 dev_add_pack(&lowpan_packet_type); 1282 dev_add_pack(&lowpan_packet_type);
1283
1284 err = register_netdevice_notifier(&lowpan_dev_notifier);
1285 if (err < 0) {
1286 dev_remove_pack(&lowpan_packet_type);
1287 lowpan_netlink_fini();
1288 }
1254out: 1289out:
1255 return err; 1290 return err;
1256} 1291}
@@ -1263,6 +1298,8 @@ static void __exit lowpan_cleanup_module(void)
1263 1298
1264 dev_remove_pack(&lowpan_packet_type); 1299 dev_remove_pack(&lowpan_packet_type);
1265 1300
1301 unregister_netdevice_notifier(&lowpan_dev_notifier);
1302
1266 /* Now 6lowpan packet_type is removed, so no new fragments are 1303 /* Now 6lowpan packet_type is removed, so no new fragments are
1267 * expected on RX, therefore that's the time to clean incomplete 1304 * expected on RX, therefore that's the time to clean incomplete
1268 * fragments. 1305 * fragments.
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 1e9917124e75..96bb08abece2 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -246,7 +246,7 @@ nla_put_failure:
246} 246}
247EXPORT_SYMBOL(ieee802154_nl_start_confirm); 247EXPORT_SYMBOL(ieee802154_nl_start_confirm);
248 248
249static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, 249static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
250 u32 seq, int flags, struct net_device *dev) 250 u32 seq, int flags, struct net_device *dev)
251{ 251{
252 void *hdr; 252 void *hdr;
@@ -534,7 +534,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
534 if (!msg) 534 if (!msg)
535 goto out_dev; 535 goto out_dev;
536 536
537 rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq, 537 rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
538 0, dev); 538 0, dev);
539 if (rc < 0) 539 if (rc < 0)
540 goto out_free; 540 goto out_free;
@@ -565,7 +565,7 @@ static int ieee802154_dump_iface(struct sk_buff *skb,
565 if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) 565 if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
566 goto cont; 566 goto cont;
567 567
568 if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid, 568 if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
569 cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) 569 cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
570 break; 570 break;
571cont: 571cont:
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index d54be34cca94..22b1a7058fd3 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -35,7 +35,7 @@
35 35
36#include "ieee802154.h" 36#include "ieee802154.h"
37 37
38static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid, 38static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
39 u32 seq, int flags, struct wpan_phy *phy) 39 u32 seq, int flags, struct wpan_phy *phy)
40{ 40{
41 void *hdr; 41 void *hdr;
@@ -105,7 +105,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
105 if (!msg) 105 if (!msg)
106 goto out_dev; 106 goto out_dev;
107 107
108 rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq, 108 rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
109 0, phy); 109 0, phy);
110 if (rc < 0) 110 if (rc < 0)
111 goto out_free; 111 goto out_free;
@@ -138,7 +138,7 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
138 return 0; 138 return 0;
139 139
140 rc = ieee802154_nl_fill_phy(data->skb, 140 rc = ieee802154_nl_fill_phy(data->skb,
141 NETLINK_CB(data->cb->skb).pid, 141 NETLINK_CB(data->cb->skb).portid,
142 data->cb->nlh->nlmsg_seq, 142 data->cb->nlh->nlmsg_seq,
143 NLM_F_MULTI, 143 NLM_F_MULTI,
144 phy); 144 phy);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fe4582ca969a..766c59658563 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -212,6 +212,26 @@ int inet_listen(struct socket *sock, int backlog)
212 * we can only allow the backlog to be adjusted. 212 * we can only allow the backlog to be adjusted.
213 */ 213 */
214 if (old_state != TCP_LISTEN) { 214 if (old_state != TCP_LISTEN) {
215 /* Check special setups for testing purpose to enable TFO w/o
216 * requiring TCP_FASTOPEN sockopt.
217 * Note that only TCP sockets (SOCK_STREAM) will reach here.
218 * Also fastopenq may already been allocated because this
219 * socket was in TCP_LISTEN state previously but was
220 * shutdown() (rather than close()).
221 */
222 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
223 inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
224 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
225 err = fastopen_init_queue(sk, backlog);
226 else if ((sysctl_tcp_fastopen &
227 TFO_SERVER_WO_SOCKOPT2) != 0)
228 err = fastopen_init_queue(sk,
229 ((uint)sysctl_tcp_fastopen) >> 16);
230 else
231 err = 0;
232 if (err)
233 goto out;
234 }
215 err = inet_csk_listen_start(sk, backlog); 235 err = inet_csk_listen_start(sk, backlog);
216 if (err) 236 if (err)
217 goto out; 237 goto out;
@@ -701,7 +721,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
701 721
702 sock_rps_record_flow(sk2); 722 sock_rps_record_flow(sk2);
703 WARN_ON(!((1 << sk2->sk_state) & 723 WARN_ON(!((1 << sk2->sk_state) &
704 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 724 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
725 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
705 726
706 sock_graft(sk2, newsock); 727 sock_graft(sk2, newsock);
707 728
@@ -1364,7 +1385,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1364 if (*(u8 *)iph != 0x45) 1385 if (*(u8 *)iph != 0x45)
1365 goto out_unlock; 1386 goto out_unlock;
1366 1387
1367 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1388 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1368 goto out_unlock; 1389 goto out_unlock;
1369 1390
1370 id = ntohl(*(__be32 *)&iph->id); 1391 id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1401,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1380 iph2 = ip_hdr(p); 1401 iph2 = ip_hdr(p);
1381 1402
1382 if ((iph->protocol ^ iph2->protocol) | 1403 if ((iph->protocol ^ iph2->protocol) |
1383 (iph->tos ^ iph2->tos) |
1384 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | 1404 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1385 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { 1405 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1386 NAPI_GRO_CB(p)->same_flow = 0; 1406 NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1410,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1390 /* All fields must match except length and checksum. */ 1410 /* All fields must match except length and checksum. */
1391 NAPI_GRO_CB(p)->flush |= 1411 NAPI_GRO_CB(p)->flush |=
1392 (iph->ttl ^ iph2->ttl) | 1412 (iph->ttl ^ iph2->ttl) |
1413 (iph->tos ^ iph2->tos) |
1393 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id); 1414 ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
1394 1415
1395 NAPI_GRO_CB(p)->flush |= flush; 1416 NAPI_GRO_CB(p)->flush |= flush;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e12fad773852..2a6abc163ed2 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -94,25 +94,22 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
95}; 95};
96 96
97/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE 97#define IN4_ADDR_HSIZE_SHIFT 8
98 * value. So if you change this define, make appropriate changes to 98#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
99 * inet_addr_hash as well. 99
100 */
101#define IN4_ADDR_HSIZE 256
102static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; 100static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
103static DEFINE_SPINLOCK(inet_addr_hash_lock); 101static DEFINE_SPINLOCK(inet_addr_hash_lock);
104 102
105static inline unsigned int inet_addr_hash(struct net *net, __be32 addr) 103static u32 inet_addr_hash(struct net *net, __be32 addr)
106{ 104{
107 u32 val = (__force u32) addr ^ hash_ptr(net, 8); 105 u32 val = (__force u32) addr ^ net_hash_mix(net);
108 106
109 return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) & 107 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
110 (IN4_ADDR_HSIZE - 1));
111} 108}
112 109
113static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) 110static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
114{ 111{
115 unsigned int hash = inet_addr_hash(net, ifa->ifa_local); 112 u32 hash = inet_addr_hash(net, ifa->ifa_local);
116 113
117 spin_lock(&inet_addr_hash_lock); 114 spin_lock(&inet_addr_hash_lock);
118 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); 115 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@ static void inet_hash_remove(struct in_ifaddr *ifa)
136 */ 133 */
137struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) 134struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
138{ 135{
139 unsigned int hash = inet_addr_hash(net, addr); 136 u32 hash = inet_addr_hash(net, addr);
140 struct net_device *result = NULL; 137 struct net_device *result = NULL;
141 struct in_ifaddr *ifa; 138 struct in_ifaddr *ifa;
142 struct hlist_node *node; 139 struct hlist_node *node;
143 140
144 rcu_read_lock(); 141 rcu_read_lock();
145 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { 142 hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
146 struct net_device *dev = ifa->ifa_dev->dev;
147
148 if (!net_eq(dev_net(dev), net))
149 continue;
150 if (ifa->ifa_local == addr) { 143 if (ifa->ifa_local == addr) {
144 struct net_device *dev = ifa->ifa_dev->dev;
145
146 if (!net_eq(dev_net(dev), net))
147 continue;
151 result = dev; 148 result = dev;
152 break; 149 break;
153 } 150 }
@@ -182,10 +179,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
182static void devinet_sysctl_register(struct in_device *idev); 179static void devinet_sysctl_register(struct in_device *idev);
183static void devinet_sysctl_unregister(struct in_device *idev); 180static void devinet_sysctl_unregister(struct in_device *idev);
184#else 181#else
185static inline void devinet_sysctl_register(struct in_device *idev) 182static void devinet_sysctl_register(struct in_device *idev)
186{ 183{
187} 184}
188static inline void devinet_sysctl_unregister(struct in_device *idev) 185static void devinet_sysctl_unregister(struct in_device *idev)
189{ 186{
190} 187}
191#endif 188#endif
@@ -205,7 +202,7 @@ static void inet_rcu_free_ifa(struct rcu_head *head)
205 kfree(ifa); 202 kfree(ifa);
206} 203}
207 204
208static inline void inet_free_ifa(struct in_ifaddr *ifa) 205static void inet_free_ifa(struct in_ifaddr *ifa)
209{ 206{
210 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa); 207 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
211} 208}
@@ -314,7 +311,7 @@ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
314} 311}
315 312
316static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, 313static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
317 int destroy, struct nlmsghdr *nlh, u32 pid) 314 int destroy, struct nlmsghdr *nlh, u32 portid)
318{ 315{
319 struct in_ifaddr *promote = NULL; 316 struct in_ifaddr *promote = NULL;
320 struct in_ifaddr *ifa, *ifa1 = *ifap; 317 struct in_ifaddr *ifa, *ifa1 = *ifap;
@@ -348,7 +345,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
348 inet_hash_remove(ifa); 345 inet_hash_remove(ifa);
349 *ifap1 = ifa->ifa_next; 346 *ifap1 = ifa->ifa_next;
350 347
351 rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid); 348 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
352 blocking_notifier_call_chain(&inetaddr_chain, 349 blocking_notifier_call_chain(&inetaddr_chain,
353 NETDEV_DOWN, ifa); 350 NETDEV_DOWN, ifa);
354 inet_free_ifa(ifa); 351 inet_free_ifa(ifa);
@@ -385,7 +382,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
385 is valid, it will try to restore deleted routes... Grr. 382 is valid, it will try to restore deleted routes... Grr.
386 So that, this order is correct. 383 So that, this order is correct.
387 */ 384 */
388 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid); 385 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
389 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); 386 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
390 387
391 if (promote) { 388 if (promote) {
@@ -398,7 +395,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
398 } 395 }
399 396
400 promote->ifa_flags &= ~IFA_F_SECONDARY; 397 promote->ifa_flags &= ~IFA_F_SECONDARY;
401 rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); 398 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
402 blocking_notifier_call_chain(&inetaddr_chain, 399 blocking_notifier_call_chain(&inetaddr_chain,
403 NETDEV_UP, promote); 400 NETDEV_UP, promote);
404 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { 401 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
@@ -420,7 +417,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
420} 417}
421 418
422static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, 419static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
423 u32 pid) 420 u32 portid)
424{ 421{
425 struct in_device *in_dev = ifa->ifa_dev; 422 struct in_device *in_dev = ifa->ifa_dev;
426 struct in_ifaddr *ifa1, **ifap, **last_primary; 423 struct in_ifaddr *ifa1, **ifap, **last_primary;
@@ -467,7 +464,7 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
467 /* Send message first, then call notifier. 464 /* Send message first, then call notifier.
468 Notifier will trigger FIB update, so that 465 Notifier will trigger FIB update, so that
469 listeners of netlink will know about new ifaddr */ 466 listeners of netlink will know about new ifaddr */
470 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid); 467 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
471 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); 468 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
472 469
473 return 0; 470 return 0;
@@ -566,7 +563,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
566 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) 563 !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
567 continue; 564 continue;
568 565
569 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); 566 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
570 return 0; 567 return 0;
571 } 568 }
572 569
@@ -652,14 +649,14 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
652 if (IS_ERR(ifa)) 649 if (IS_ERR(ifa))
653 return PTR_ERR(ifa); 650 return PTR_ERR(ifa);
654 651
655 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid); 652 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
656} 653}
657 654
658/* 655/*
659 * Determine a default network mask, based on the IP address. 656 * Determine a default network mask, based on the IP address.
660 */ 657 */
661 658
662static inline int inet_abc_len(__be32 addr) 659static int inet_abc_len(__be32 addr)
663{ 660{
664 int rc = -1; /* Something else, probably a multicast. */ 661 int rc = -1; /* Something else, probably a multicast. */
665 662
@@ -1124,7 +1121,7 @@ skip:
1124 } 1121 }
1125} 1122}
1126 1123
1127static inline bool inetdev_valid_mtu(unsigned int mtu) 1124static bool inetdev_valid_mtu(unsigned int mtu)
1128{ 1125{
1129 return mtu >= 68; 1126 return mtu >= 68;
1130} 1127}
@@ -1239,7 +1236,7 @@ static struct notifier_block ip_netdev_notifier = {
1239 .notifier_call = inetdev_event, 1236 .notifier_call = inetdev_event,
1240}; 1237};
1241 1238
1242static inline size_t inet_nlmsg_size(void) 1239static size_t inet_nlmsg_size(void)
1243{ 1240{
1244 return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) 1241 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1245 + nla_total_size(4) /* IFA_ADDRESS */ 1242 + nla_total_size(4) /* IFA_ADDRESS */
@@ -1249,12 +1246,12 @@ static inline size_t inet_nlmsg_size(void)
1249} 1246}
1250 1247
1251static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, 1248static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1252 u32 pid, u32 seq, int event, unsigned int flags) 1249 u32 portid, u32 seq, int event, unsigned int flags)
1253{ 1250{
1254 struct ifaddrmsg *ifm; 1251 struct ifaddrmsg *ifm;
1255 struct nlmsghdr *nlh; 1252 struct nlmsghdr *nlh;
1256 1253
1257 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 1254 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1258 if (nlh == NULL) 1255 if (nlh == NULL)
1259 return -EMSGSIZE; 1256 return -EMSGSIZE;
1260 1257
@@ -1316,7 +1313,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1316 if (ip_idx < s_ip_idx) 1313 if (ip_idx < s_ip_idx)
1317 continue; 1314 continue;
1318 if (inet_fill_ifaddr(skb, ifa, 1315 if (inet_fill_ifaddr(skb, ifa,
1319 NETLINK_CB(cb->skb).pid, 1316 NETLINK_CB(cb->skb).portid,
1320 cb->nlh->nlmsg_seq, 1317 cb->nlh->nlmsg_seq,
1321 RTM_NEWADDR, NLM_F_MULTI) <= 0) { 1318 RTM_NEWADDR, NLM_F_MULTI) <= 0) {
1322 rcu_read_unlock(); 1319 rcu_read_unlock();
@@ -1338,7 +1335,7 @@ done:
1338} 1335}
1339 1336
1340static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, 1337static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1341 u32 pid) 1338 u32 portid)
1342{ 1339{
1343 struct sk_buff *skb; 1340 struct sk_buff *skb;
1344 u32 seq = nlh ? nlh->nlmsg_seq : 0; 1341 u32 seq = nlh ? nlh->nlmsg_seq : 0;
@@ -1350,14 +1347,14 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1350 if (skb == NULL) 1347 if (skb == NULL)
1351 goto errout; 1348 goto errout;
1352 1349
1353 err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); 1350 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1354 if (err < 0) { 1351 if (err < 0) {
1355 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ 1352 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1356 WARN_ON(err == -EMSGSIZE); 1353 WARN_ON(err == -EMSGSIZE);
1357 kfree_skb(skb); 1354 kfree_skb(skb);
1358 goto errout; 1355 goto errout;
1359 } 1356 }
1360 rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); 1357 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1361 return; 1358 return;
1362errout: 1359errout:
1363 if (err < 0) 1360 if (err < 0)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8e2b475da9fa..68c93d1bb03a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -218,7 +218,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
218 scope = RT_SCOPE_UNIVERSE; 218 scope = RT_SCOPE_UNIVERSE;
219 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 219 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
220 fl4.flowi4_oif = 0; 220 fl4.flowi4_oif = 0;
221 fl4.flowi4_iif = net->loopback_dev->ifindex; 221 fl4.flowi4_iif = LOOPBACK_IFINDEX;
222 fl4.daddr = ip_hdr(skb)->saddr; 222 fl4.daddr = ip_hdr(skb)->saddr;
223 fl4.saddr = 0; 223 fl4.saddr = 0;
224 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 224 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
@@ -557,7 +557,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
557 cfg->fc_flags = rtm->rtm_flags; 557 cfg->fc_flags = rtm->rtm_flags;
558 cfg->fc_nlflags = nlh->nlmsg_flags; 558 cfg->fc_nlflags = nlh->nlmsg_flags;
559 559
560 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; 560 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
561 cfg->fc_nlinfo.nlh = nlh; 561 cfg->fc_nlinfo.nlh = nlh;
562 cfg->fc_nlinfo.nl_net = net; 562 cfg->fc_nlinfo.nl_net = net;
563 563
@@ -955,7 +955,7 @@ static void nl_fib_input(struct sk_buff *skb)
955 struct fib_result_nl *frn; 955 struct fib_result_nl *frn;
956 struct nlmsghdr *nlh; 956 struct nlmsghdr *nlh;
957 struct fib_table *tb; 957 struct fib_table *tb;
958 u32 pid; 958 u32 portid;
959 959
960 net = sock_net(skb->sk); 960 net = sock_net(skb->sk);
961 nlh = nlmsg_hdr(skb); 961 nlh = nlmsg_hdr(skb);
@@ -973,10 +973,10 @@ static void nl_fib_input(struct sk_buff *skb)
973 973
974 nl_fib_lookup(frn, tb); 974 nl_fib_lookup(frn, tb);
975 975
976 pid = NETLINK_CB(skb).pid; /* pid of sending process */ 976 portid = NETLINK_CB(skb).portid; /* pid of sending process */
977 NETLINK_CB(skb).pid = 0; /* from kernel */ 977 NETLINK_CB(skb).portid = 0; /* from kernel */
978 NETLINK_CB(skb).dst_group = 0; /* unicast */ 978 NETLINK_CB(skb).dst_group = 0; /* unicast */
979 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 979 netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
980} 980}
981 981
982static int __net_init nl_fib_lookup_init(struct net *net) 982static int __net_init nl_fib_lookup_init(struct net *net)
@@ -986,7 +986,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
986 .input = nl_fib_input, 986 .input = nl_fib_input,
987 }; 987 };
988 988
989 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg); 989 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
990 if (sk == NULL) 990 if (sk == NULL)
991 return -EAFNOSUPPORT; 991 return -EAFNOSUPPORT;
992 net->ipv4.fibnl = sk; 992 net->ipv4.fibnl = sk;
@@ -1041,7 +1041,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
1041static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1041static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1042{ 1042{
1043 struct net_device *dev = ptr; 1043 struct net_device *dev = ptr;
1044 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1044 struct in_device *in_dev;
1045 struct net *net = dev_net(dev); 1045 struct net *net = dev_net(dev);
1046 1046
1047 if (event == NETDEV_UNREGISTER) { 1047 if (event == NETDEV_UNREGISTER) {
@@ -1050,8 +1050,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1050 return NOTIFY_DONE; 1050 return NOTIFY_DONE;
1051 } 1051 }
1052 1052
1053 if (!in_dev) 1053 in_dev = __in_dev_get_rtnl(dev);
1054 return NOTIFY_DONE;
1055 1054
1056 switch (event) { 1055 switch (event) {
1057 case NETDEV_UP: 1056 case NETDEV_UP:
@@ -1062,16 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1062 fib_sync_up(dev); 1061 fib_sync_up(dev);
1063#endif 1062#endif
1064 atomic_inc(&net->ipv4.dev_addr_genid); 1063 atomic_inc(&net->ipv4.dev_addr_genid);
1065 rt_cache_flush(dev_net(dev)); 1064 rt_cache_flush(net);
1066 break; 1065 break;
1067 case NETDEV_DOWN: 1066 case NETDEV_DOWN:
1068 fib_disable_ip(dev, 0); 1067 fib_disable_ip(dev, 0);
1069 break; 1068 break;
1070 case NETDEV_CHANGEMTU: 1069 case NETDEV_CHANGEMTU:
1071 case NETDEV_CHANGE: 1070 case NETDEV_CHANGE:
1072 rt_cache_flush(dev_net(dev)); 1071 rt_cache_flush(net);
1073 break;
1074 case NETDEV_UNREGISTER_BATCH:
1075 break; 1072 break;
1076 } 1073 }
1077 return NOTIFY_DONE; 1074 return NOTIFY_DONE;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index da80dc14cc76..3509065e409a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -391,7 +391,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
391 if (skb == NULL) 391 if (skb == NULL)
392 goto errout; 392 goto errout;
393 393
394 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 394 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
395 fa->fa_type, key, dst_len, 395 fa->fa_type, key, dst_len,
396 fa->fa_tos, fa->fa_info, nlm_flags); 396 fa->fa_tos, fa->fa_info, nlm_flags);
397 if (err < 0) { 397 if (err < 0) {
@@ -400,7 +400,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
400 kfree_skb(skb); 400 kfree_skb(skb);
401 goto errout; 401 goto errout;
402 } 402 }
403 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, 403 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
404 info->nlh, GFP_KERNEL); 404 info->nlh, GFP_KERNEL);
405 return; 405 return;
406errout: 406errout:
@@ -989,14 +989,14 @@ failure:
989 return ERR_PTR(err); 989 return ERR_PTR(err);
990} 990}
991 991
992int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 992int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
993 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, 993 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
994 struct fib_info *fi, unsigned int flags) 994 struct fib_info *fi, unsigned int flags)
995{ 995{
996 struct nlmsghdr *nlh; 996 struct nlmsghdr *nlh;
997 struct rtmsg *rtm; 997 struct rtmsg *rtm;
998 998
999 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 999 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1000 if (nlh == NULL) 1000 if (nlh == NULL)
1001 return -EMSGSIZE; 1001 return -EMSGSIZE;
1002 1002
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d1b93595b4a7..31d771ca9a70 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1550,7 +1550,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
1550 * state.directly. 1550 * state.directly.
1551 */ 1551 */
1552 if (pref_mismatch) { 1552 if (pref_mismatch) {
1553 int mp = KEYLENGTH - fls(pref_mismatch); 1553 /* fls(x) = __fls(x) + 1 */
1554 int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
1554 1555
1555 if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0) 1556 if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
1556 goto backtrace; 1557 goto backtrace;
@@ -1655,7 +1656,12 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1655 if (!l) 1656 if (!l)
1656 return -ESRCH; 1657 return -ESRCH;
1657 1658
1658 fa_head = get_fa_head(l, plen); 1659 li = find_leaf_info(l, plen);
1660
1661 if (!li)
1662 return -ESRCH;
1663
1664 fa_head = &li->falh;
1659 fa = fib_find_alias(fa_head, tos, 0); 1665 fa = fib_find_alias(fa_head, tos, 0);
1660 1666
1661 if (!fa) 1667 if (!fa)
@@ -1691,9 +1697,6 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1691 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, 1697 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1692 &cfg->fc_nlinfo, 0); 1698 &cfg->fc_nlinfo, 0);
1693 1699
1694 l = fib_find_node(t, key);
1695 li = find_leaf_info(l, plen);
1696
1697 list_del_rcu(&fa->fa_list); 1700 list_del_rcu(&fa->fa_list);
1698 1701
1699 if (!plen) 1702 if (!plen)
@@ -1870,7 +1873,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1870 continue; 1873 continue;
1871 } 1874 }
1872 1875
1873 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid, 1876 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1874 cb->nlh->nlmsg_seq, 1877 cb->nlh->nlmsg_seq,
1875 RTM_NEWROUTE, 1878 RTM_NEWROUTE,
1876 tb->tb_id, 1879 tb->tb_id,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6699f23e6f55..736ab70fd179 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -815,14 +815,15 @@ static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
815 return 1; 815 return 1;
816} 816}
817 817
818static void igmp_heard_report(struct in_device *in_dev, __be32 group) 818/* return true if packet was dropped */
819static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
819{ 820{
820 struct ip_mc_list *im; 821 struct ip_mc_list *im;
821 822
822 /* Timers are only set for non-local groups */ 823 /* Timers are only set for non-local groups */
823 824
824 if (group == IGMP_ALL_HOSTS) 825 if (group == IGMP_ALL_HOSTS)
825 return; 826 return false;
826 827
827 rcu_read_lock(); 828 rcu_read_lock();
828 for_each_pmc_rcu(in_dev, im) { 829 for_each_pmc_rcu(in_dev, im) {
@@ -832,9 +833,11 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
832 } 833 }
833 } 834 }
834 rcu_read_unlock(); 835 rcu_read_unlock();
836 return false;
835} 837}
836 838
837static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 839/* return true if packet was dropped */
840static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
838 int len) 841 int len)
839{ 842{
840 struct igmphdr *ih = igmp_hdr(skb); 843 struct igmphdr *ih = igmp_hdr(skb);
@@ -866,7 +869,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
866 /* clear deleted report items */ 869 /* clear deleted report items */
867 igmpv3_clear_delrec(in_dev); 870 igmpv3_clear_delrec(in_dev);
868 } else if (len < 12) { 871 } else if (len < 12) {
869 return; /* ignore bogus packet; freed by caller */ 872 return true; /* ignore bogus packet; freed by caller */
870 } else if (IGMP_V1_SEEN(in_dev)) { 873 } else if (IGMP_V1_SEEN(in_dev)) {
871 /* This is a v3 query with v1 queriers present */ 874 /* This is a v3 query with v1 queriers present */
872 max_delay = IGMP_Query_Response_Interval; 875 max_delay = IGMP_Query_Response_Interval;
@@ -883,13 +886,13 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
883 max_delay = 1; /* can't mod w/ 0 */ 886 max_delay = 1; /* can't mod w/ 0 */
884 } else { /* v3 */ 887 } else { /* v3 */
885 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 888 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
886 return; 889 return true;
887 890
888 ih3 = igmpv3_query_hdr(skb); 891 ih3 = igmpv3_query_hdr(skb);
889 if (ih3->nsrcs) { 892 if (ih3->nsrcs) {
890 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 893 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
891 + ntohs(ih3->nsrcs)*sizeof(__be32))) 894 + ntohs(ih3->nsrcs)*sizeof(__be32)))
892 return; 895 return true;
893 ih3 = igmpv3_query_hdr(skb); 896 ih3 = igmpv3_query_hdr(skb);
894 } 897 }
895 898
@@ -901,9 +904,9 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
901 in_dev->mr_qrv = ih3->qrv; 904 in_dev->mr_qrv = ih3->qrv;
902 if (!group) { /* general query */ 905 if (!group) { /* general query */
903 if (ih3->nsrcs) 906 if (ih3->nsrcs)
904 return; /* no sources allowed */ 907 return false; /* no sources allowed */
905 igmp_gq_start_timer(in_dev); 908 igmp_gq_start_timer(in_dev);
906 return; 909 return false;
907 } 910 }
908 /* mark sources to include, if group & source-specific */ 911 /* mark sources to include, if group & source-specific */
909 mark = ih3->nsrcs != 0; 912 mark = ih3->nsrcs != 0;
@@ -939,6 +942,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
939 igmp_mod_timer(im, max_delay); 942 igmp_mod_timer(im, max_delay);
940 } 943 }
941 rcu_read_unlock(); 944 rcu_read_unlock();
945 return false;
942} 946}
943 947
944/* called in rcu_read_lock() section */ 948/* called in rcu_read_lock() section */
@@ -948,6 +952,7 @@ int igmp_rcv(struct sk_buff *skb)
948 struct igmphdr *ih; 952 struct igmphdr *ih;
949 struct in_device *in_dev = __in_dev_get_rcu(skb->dev); 953 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
950 int len = skb->len; 954 int len = skb->len;
955 bool dropped = true;
951 956
952 if (in_dev == NULL) 957 if (in_dev == NULL)
953 goto drop; 958 goto drop;
@@ -969,7 +974,7 @@ int igmp_rcv(struct sk_buff *skb)
969 ih = igmp_hdr(skb); 974 ih = igmp_hdr(skb);
970 switch (ih->type) { 975 switch (ih->type) {
971 case IGMP_HOST_MEMBERSHIP_QUERY: 976 case IGMP_HOST_MEMBERSHIP_QUERY:
972 igmp_heard_query(in_dev, skb, len); 977 dropped = igmp_heard_query(in_dev, skb, len);
973 break; 978 break;
974 case IGMP_HOST_MEMBERSHIP_REPORT: 979 case IGMP_HOST_MEMBERSHIP_REPORT:
975 case IGMPV2_HOST_MEMBERSHIP_REPORT: 980 case IGMPV2_HOST_MEMBERSHIP_REPORT:
@@ -979,7 +984,7 @@ int igmp_rcv(struct sk_buff *skb)
979 /* don't rely on MC router hearing unicast reports */ 984 /* don't rely on MC router hearing unicast reports */
980 if (skb->pkt_type == PACKET_MULTICAST || 985 if (skb->pkt_type == PACKET_MULTICAST ||
981 skb->pkt_type == PACKET_BROADCAST) 986 skb->pkt_type == PACKET_BROADCAST)
982 igmp_heard_report(in_dev, ih->group); 987 dropped = igmp_heard_report(in_dev, ih->group);
983 break; 988 break;
984 case IGMP_PIM: 989 case IGMP_PIM:
985#ifdef CONFIG_IP_PIMSM_V1 990#ifdef CONFIG_IP_PIMSM_V1
@@ -997,7 +1002,10 @@ int igmp_rcv(struct sk_buff *skb)
997 } 1002 }
998 1003
999drop: 1004drop:
1000 kfree_skb(skb); 1005 if (dropped)
1006 kfree_skb(skb);
1007 else
1008 consume_skb(skb);
1001 return 0; 1009 return 0;
1002} 1010}
1003 1011
@@ -1896,6 +1904,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1896 rtnl_unlock(); 1904 rtnl_unlock();
1897 return ret; 1905 return ret;
1898} 1906}
1907EXPORT_SYMBOL(ip_mc_leave_group);
1899 1908
1900int ip_mc_source(int add, int omode, struct sock *sk, struct 1909int ip_mc_source(int add, int omode, struct sock *sk, struct
1901 ip_mreq_source *mreqs, int ifindex) 1910 ip_mreq_source *mreqs, int ifindex)
@@ -2435,6 +2444,8 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2435 struct ip_mc_list *im = (struct ip_mc_list *)v; 2444 struct ip_mc_list *im = (struct ip_mc_list *)v;
2436 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2445 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2437 char *querier; 2446 char *querier;
2447 long delta;
2448
2438#ifdef CONFIG_IP_MULTICAST 2449#ifdef CONFIG_IP_MULTICAST
2439 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" : 2450 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2440 IGMP_V2_SEEN(state->in_dev) ? "V2" : 2451 IGMP_V2_SEEN(state->in_dev) ? "V2" :
@@ -2448,11 +2459,12 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2448 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2459 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2449 } 2460 }
2450 2461
2462 delta = im->timer.expires - jiffies;
2451 seq_printf(seq, 2463 seq_printf(seq,
2452 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n", 2464 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
2453 im->multiaddr, im->users, 2465 im->multiaddr, im->users,
2454 im->tm_running, im->tm_running ? 2466 im->tm_running,
2455 jiffies_to_clock_t(im->timer.expires-jiffies) : 0, 2467 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
2456 im->reporter); 2468 im->reporter);
2457 } 2469 }
2458 return 0; 2470 return 0;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7f75f21d7b83..f0c5b9c1a957 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -283,7 +283,9 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
284{ 284{
285 struct inet_connection_sock *icsk = inet_csk(sk); 285 struct inet_connection_sock *icsk = inet_csk(sk);
286 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
286 struct sock *newsk; 287 struct sock *newsk;
288 struct request_sock *req;
287 int error; 289 int error;
288 290
289 lock_sock(sk); 291 lock_sock(sk);
@@ -296,7 +298,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
296 goto out_err; 298 goto out_err;
297 299
298 /* Find already established connection */ 300 /* Find already established connection */
299 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { 301 if (reqsk_queue_empty(queue)) {
300 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 302 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
301 303
302 /* If this is a non blocking socket don't sleep */ 304 /* If this is a non blocking socket don't sleep */
@@ -308,14 +310,32 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
308 if (error) 310 if (error)
309 goto out_err; 311 goto out_err;
310 } 312 }
311 313 req = reqsk_queue_remove(queue);
312 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 314 newsk = req->sk;
313 WARN_ON(newsk->sk_state == TCP_SYN_RECV); 315
316 sk_acceptq_removed(sk);
317 if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
318 spin_lock_bh(&queue->fastopenq->lock);
319 if (tcp_rsk(req)->listener) {
320 /* We are still waiting for the final ACK from 3WHS
321 * so can't free req now. Instead, we set req->sk to
322 * NULL to signify that the child socket is taken
323 * so reqsk_fastopen_remove() will free the req
324 * when 3WHS finishes (or is aborted).
325 */
326 req->sk = NULL;
327 req = NULL;
328 }
329 spin_unlock_bh(&queue->fastopenq->lock);
330 }
314out: 331out:
315 release_sock(sk); 332 release_sock(sk);
333 if (req)
334 __reqsk_free(req);
316 return newsk; 335 return newsk;
317out_err: 336out_err:
318 newsk = NULL; 337 newsk = NULL;
338 req = NULL;
319 *err = error; 339 *err = error;
320 goto out; 340 goto out;
321} 341}
@@ -720,13 +740,14 @@ EXPORT_SYMBOL_GPL(inet_csk_listen_start);
720void inet_csk_listen_stop(struct sock *sk) 740void inet_csk_listen_stop(struct sock *sk)
721{ 741{
722 struct inet_connection_sock *icsk = inet_csk(sk); 742 struct inet_connection_sock *icsk = inet_csk(sk);
743 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
723 struct request_sock *acc_req; 744 struct request_sock *acc_req;
724 struct request_sock *req; 745 struct request_sock *req;
725 746
726 inet_csk_delete_keepalive_timer(sk); 747 inet_csk_delete_keepalive_timer(sk);
727 748
728 /* make all the listen_opt local to us */ 749 /* make all the listen_opt local to us */
729 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); 750 acc_req = reqsk_queue_yank_acceptq(queue);
730 751
731 /* Following specs, it would be better either to send FIN 752 /* Following specs, it would be better either to send FIN
732 * (and enter FIN-WAIT-1, it is normal close) 753 * (and enter FIN-WAIT-1, it is normal close)
@@ -736,7 +757,7 @@ void inet_csk_listen_stop(struct sock *sk)
736 * To be honest, we are not able to make either 757 * To be honest, we are not able to make either
737 * of the variants now. --ANK 758 * of the variants now. --ANK
738 */ 759 */
739 reqsk_queue_destroy(&icsk->icsk_accept_queue); 760 reqsk_queue_destroy(queue);
740 761
741 while ((req = acc_req) != NULL) { 762 while ((req = acc_req) != NULL) {
742 struct sock *child = req->sk; 763 struct sock *child = req->sk;
@@ -754,6 +775,19 @@ void inet_csk_listen_stop(struct sock *sk)
754 775
755 percpu_counter_inc(sk->sk_prot->orphan_count); 776 percpu_counter_inc(sk->sk_prot->orphan_count);
756 777
778 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
779 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
780 BUG_ON(sk != tcp_rsk(req)->listener);
781
782 /* Paranoid, to prevent race condition if
783 * an inbound pkt destined for child is
784 * blocked by sock lock in tcp_v4_rcv().
785 * Also to satisfy an assertion in
786 * tcp_v4_destroy_sock().
787 */
788 tcp_sk(child)->fastopen_rsk = NULL;
789 sock_put(sk);
790 }
757 inet_csk_destroy_sock(child); 791 inet_csk_destroy_sock(child);
758 792
759 bh_unlock_sock(child); 793 bh_unlock_sock(child);
@@ -763,6 +797,17 @@ void inet_csk_listen_stop(struct sock *sk)
763 sk_acceptq_removed(sk); 797 sk_acceptq_removed(sk);
764 __reqsk_free(req); 798 __reqsk_free(req);
765 } 799 }
800 if (queue->fastopenq != NULL) {
801 /* Free all the reqs queued in rskq_rst_head. */
802 spin_lock_bh(&queue->fastopenq->lock);
803 acc_req = queue->fastopenq->rskq_rst_head;
804 queue->fastopenq->rskq_rst_head = NULL;
805 spin_unlock_bh(&queue->fastopenq->lock);
806 while ((req = acc_req) != NULL) {
807 acc_req = req->dl_next;
808 __reqsk_free(req);
809 }
810 }
766 WARN_ON(sk->sk_ack_backlog); 811 WARN_ON(sk->sk_ack_backlog);
767} 812}
768EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 813EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8bc005b1435f..535584c00f91 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -70,7 +70,7 @@ static inline void inet_diag_unlock_handler(
70int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 70int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
71 struct sk_buff *skb, struct inet_diag_req_v2 *req, 71 struct sk_buff *skb, struct inet_diag_req_v2 *req,
72 struct user_namespace *user_ns, 72 struct user_namespace *user_ns,
73 u32 pid, u32 seq, u16 nlmsg_flags, 73 u32 portid, u32 seq, u16 nlmsg_flags,
74 const struct nlmsghdr *unlh) 74 const struct nlmsghdr *unlh)
75{ 75{
76 const struct inet_sock *inet = inet_sk(sk); 76 const struct inet_sock *inet = inet_sk(sk);
@@ -84,7 +84,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
84 handler = inet_diag_table[req->sdiag_protocol]; 84 handler = inet_diag_table[req->sdiag_protocol];
85 BUG_ON(handler == NULL); 85 BUG_ON(handler == NULL);
86 86
87 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r), 87 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
88 nlmsg_flags); 88 nlmsg_flags);
89 if (!nlh) 89 if (!nlh)
90 return -EMSGSIZE; 90 return -EMSGSIZE;
@@ -201,23 +201,23 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
201static int inet_csk_diag_fill(struct sock *sk, 201static int inet_csk_diag_fill(struct sock *sk,
202 struct sk_buff *skb, struct inet_diag_req_v2 *req, 202 struct sk_buff *skb, struct inet_diag_req_v2 *req,
203 struct user_namespace *user_ns, 203 struct user_namespace *user_ns,
204 u32 pid, u32 seq, u16 nlmsg_flags, 204 u32 portid, u32 seq, u16 nlmsg_flags,
205 const struct nlmsghdr *unlh) 205 const struct nlmsghdr *unlh)
206{ 206{
207 return inet_sk_diag_fill(sk, inet_csk(sk), 207 return inet_sk_diag_fill(sk, inet_csk(sk),
208 skb, req, user_ns, pid, seq, nlmsg_flags, unlh); 208 skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
209} 209}
210 210
211static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 211static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
212 struct sk_buff *skb, struct inet_diag_req_v2 *req, 212 struct sk_buff *skb, struct inet_diag_req_v2 *req,
213 u32 pid, u32 seq, u16 nlmsg_flags, 213 u32 portid, u32 seq, u16 nlmsg_flags,
214 const struct nlmsghdr *unlh) 214 const struct nlmsghdr *unlh)
215{ 215{
216 long tmo; 216 long tmo;
217 struct inet_diag_msg *r; 217 struct inet_diag_msg *r;
218 struct nlmsghdr *nlh; 218 struct nlmsghdr *nlh;
219 219
220 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r), 220 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
221 nlmsg_flags); 221 nlmsg_flags);
222 if (!nlh) 222 if (!nlh)
223 return -EMSGSIZE; 223 return -EMSGSIZE;
@@ -260,14 +260,14 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
260static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 260static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
261 struct inet_diag_req_v2 *r, 261 struct inet_diag_req_v2 *r,
262 struct user_namespace *user_ns, 262 struct user_namespace *user_ns,
263 u32 pid, u32 seq, u16 nlmsg_flags, 263 u32 portid, u32 seq, u16 nlmsg_flags,
264 const struct nlmsghdr *unlh) 264 const struct nlmsghdr *unlh)
265{ 265{
266 if (sk->sk_state == TCP_TIME_WAIT) 266 if (sk->sk_state == TCP_TIME_WAIT)
267 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 267 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
268 skb, r, pid, seq, nlmsg_flags, 268 skb, r, portid, seq, nlmsg_flags,
269 unlh); 269 unlh);
270 return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh); 270 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
271} 271}
272 272
273int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, 273int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -316,14 +316,14 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
316 316
317 err = sk_diag_fill(sk, rep, req, 317 err = sk_diag_fill(sk, rep, req,
318 sk_user_ns(NETLINK_CB(in_skb).ssk), 318 sk_user_ns(NETLINK_CB(in_skb).ssk),
319 NETLINK_CB(in_skb).pid, 319 NETLINK_CB(in_skb).portid,
320 nlh->nlmsg_seq, 0, nlh); 320 nlh->nlmsg_seq, 0, nlh);
321 if (err < 0) { 321 if (err < 0) {
322 WARN_ON(err == -EMSGSIZE); 322 WARN_ON(err == -EMSGSIZE);
323 nlmsg_free(rep); 323 nlmsg_free(rep);
324 goto out; 324 goto out;
325 } 325 }
326 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid, 326 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
327 MSG_DONTWAIT); 327 MSG_DONTWAIT);
328 if (err > 0) 328 if (err > 0)
329 err = 0; 329 err = 0;
@@ -557,7 +557,7 @@ static int inet_csk_diag_dump(struct sock *sk,
557 557
558 return inet_csk_diag_fill(sk, skb, r, 558 return inet_csk_diag_fill(sk, skb, r,
559 sk_user_ns(NETLINK_CB(cb->skb).ssk), 559 sk_user_ns(NETLINK_CB(cb->skb).ssk),
560 NETLINK_CB(cb->skb).pid, 560 NETLINK_CB(cb->skb).portid,
561 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 561 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
562} 562}
563 563
@@ -592,14 +592,14 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
592 } 592 }
593 593
594 return inet_twsk_diag_fill(tw, skb, r, 594 return inet_twsk_diag_fill(tw, skb, r,
595 NETLINK_CB(cb->skb).pid, 595 NETLINK_CB(cb->skb).portid,
596 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 596 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
597} 597}
598 598
599static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 599static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
600 struct request_sock *req, 600 struct request_sock *req,
601 struct user_namespace *user_ns, 601 struct user_namespace *user_ns,
602 u32 pid, u32 seq, 602 u32 portid, u32 seq,
603 const struct nlmsghdr *unlh) 603 const struct nlmsghdr *unlh)
604{ 604{
605 const struct inet_request_sock *ireq = inet_rsk(req); 605 const struct inet_request_sock *ireq = inet_rsk(req);
@@ -608,7 +608,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
608 struct nlmsghdr *nlh; 608 struct nlmsghdr *nlh;
609 long tmo; 609 long tmo;
610 610
611 nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r), 611 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
612 NLM_F_MULTI); 612 NLM_F_MULTI);
613 if (!nlh) 613 if (!nlh)
614 return -EMSGSIZE; 614 return -EMSGSIZE;
@@ -711,7 +711,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
711 711
712 err = inet_diag_fill_req(skb, sk, req, 712 err = inet_diag_fill_req(skb, sk, req,
713 sk_user_ns(NETLINK_CB(cb->skb).ssk), 713 sk_user_ns(NETLINK_CB(cb->skb).ssk),
714 NETLINK_CB(cb->skb).pid, 714 NETLINK_CB(cb->skb).portid,
715 cb->nlh->nlmsg_seq, cb->nlh); 715 cb->nlh->nlmsg_seq, cb->nlh);
716 if (err < 0) { 716 if (err < 0) {
717 cb->args[3] = j + 1; 717 cb->args[3] = j + 1;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 85190e69297b..4750d2b74d79 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -89,7 +89,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
89 nf->low_thresh = 0; 89 nf->low_thresh = 0;
90 90
91 local_bh_disable(); 91 local_bh_disable();
92 inet_frag_evictor(nf, f); 92 inet_frag_evictor(nf, f, true);
93 local_bh_enable(); 93 local_bh_enable();
94} 94}
95EXPORT_SYMBOL(inet_frags_exit_net); 95EXPORT_SYMBOL(inet_frags_exit_net);
@@ -158,11 +158,16 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
158} 158}
159EXPORT_SYMBOL(inet_frag_destroy); 159EXPORT_SYMBOL(inet_frag_destroy);
160 160
161int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) 161int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
162{ 162{
163 struct inet_frag_queue *q; 163 struct inet_frag_queue *q;
164 int work, evicted = 0; 164 int work, evicted = 0;
165 165
166 if (!force) {
167 if (atomic_read(&nf->mem) <= nf->high_thresh)
168 return 0;
169 }
170
166 work = atomic_read(&nf->mem) - nf->low_thresh; 171 work = atomic_read(&nf->mem) - nf->low_thresh;
167 while (work > 0) { 172 while (work > 0) {
168 read_lock(&f->lock); 173 read_lock(&f->lock);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8d07c973409c..448e68546827 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -219,7 +219,7 @@ static void ip_evictor(struct net *net)
219{ 219{
220 int evicted; 220 int evicted;
221 221
222 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); 222 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
223 if (evicted) 223 if (evicted)
224 IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); 224 IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
225} 225}
@@ -523,6 +523,10 @@ found:
523 if (offset == 0) 523 if (offset == 0)
524 qp->q.last_in |= INET_FRAG_FIRST_IN; 524 qp->q.last_in |= INET_FRAG_FIRST_IN;
525 525
526 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
527 skb->len + ihl > qp->q.max_size)
528 qp->q.max_size = skb->len + ihl;
529
526 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 530 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
527 qp->q.meat == qp->q.len) 531 qp->q.meat == qp->q.len)
528 return ip_frag_reasm(qp, prev, dev); 532 return ip_frag_reasm(qp, prev, dev);
@@ -646,9 +650,11 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
646 head->next = NULL; 650 head->next = NULL;
647 head->dev = dev; 651 head->dev = dev;
648 head->tstamp = qp->q.stamp; 652 head->tstamp = qp->q.stamp;
653 IPCB(head)->frag_max_size = qp->q.max_size;
649 654
650 iph = ip_hdr(head); 655 iph = ip_hdr(head);
651 iph->frag_off = 0; 656 /* max_size != 0 implies at least one fragment had IP_DF set */
657 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
652 iph->tot_len = htons(len); 658 iph->tot_len = htons(len);
653 iph->tos |= ecn; 659 iph->tos |= ecn;
654 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 660 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
@@ -678,8 +684,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
678 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 684 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
679 685
680 /* Start by cleaning up the memory. */ 686 /* Start by cleaning up the memory. */
681 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 687 ip_evictor(net);
682 ip_evictor(net);
683 688
684 /* Lookup (or create) queue header */ 689 /* Lookup (or create) queue header */
685 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 690 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b062a98574f2..7240f8e2dd45 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -120,6 +120,10 @@
120 Alexey Kuznetsov. 120 Alexey Kuznetsov.
121 */ 121 */
122 122
123static bool log_ecn_error = true;
124module_param(log_ecn_error, bool, 0644);
125MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
123static struct rtnl_link_ops ipgre_link_ops __read_mostly; 127static struct rtnl_link_ops ipgre_link_ops __read_mostly;
124static int ipgre_tunnel_init(struct net_device *dev); 128static int ipgre_tunnel_init(struct net_device *dev);
125static void ipgre_tunnel_setup(struct net_device *dev); 129static void ipgre_tunnel_setup(struct net_device *dev);
@@ -204,7 +208,9 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
204 tot->rx_crc_errors = dev->stats.rx_crc_errors; 208 tot->rx_crc_errors = dev->stats.rx_crc_errors;
205 tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 209 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
206 tot->rx_length_errors = dev->stats.rx_length_errors; 210 tot->rx_length_errors = dev->stats.rx_length_errors;
211 tot->rx_frame_errors = dev->stats.rx_frame_errors;
207 tot->rx_errors = dev->stats.rx_errors; 212 tot->rx_errors = dev->stats.rx_errors;
213
208 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 214 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
209 tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 215 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
210 tot->tx_dropped = dev->stats.tx_dropped; 216 tot->tx_dropped = dev->stats.tx_dropped;
@@ -214,11 +220,25 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
214 return tot; 220 return tot;
215} 221}
216 222
223/* Does key in tunnel parameters match packet */
224static bool ipgre_key_match(const struct ip_tunnel_parm *p,
225 __be16 flags, __be32 key)
226{
227 if (p->i_flags & GRE_KEY) {
228 if (flags & GRE_KEY)
229 return key == p->i_key;
230 else
231 return false; /* key expected, none present */
232 } else
233 return !(flags & GRE_KEY);
234}
235
217/* Given src, dst and key, find appropriate for input tunnel. */ 236/* Given src, dst and key, find appropriate for input tunnel. */
218 237
219static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, 238static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
220 __be32 remote, __be32 local, 239 __be32 remote, __be32 local,
221 __be32 key, __be16 gre_proto) 240 __be16 flags, __be32 key,
241 __be16 gre_proto)
222{ 242{
223 struct net *net = dev_net(dev); 243 struct net *net = dev_net(dev);
224 int link = dev->ifindex; 244 int link = dev->ifindex;
@@ -233,10 +253,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
233 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { 253 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
234 if (local != t->parms.iph.saddr || 254 if (local != t->parms.iph.saddr ||
235 remote != t->parms.iph.daddr || 255 remote != t->parms.iph.daddr ||
236 key != t->parms.i_key ||
237 !(t->dev->flags & IFF_UP)) 256 !(t->dev->flags & IFF_UP))
238 continue; 257 continue;
239 258
259 if (!ipgre_key_match(&t->parms, flags, key))
260 continue;
261
240 if (t->dev->type != ARPHRD_IPGRE && 262 if (t->dev->type != ARPHRD_IPGRE &&
241 t->dev->type != dev_type) 263 t->dev->type != dev_type)
242 continue; 264 continue;
@@ -257,10 +279,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
257 279
258 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { 280 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
259 if (remote != t->parms.iph.daddr || 281 if (remote != t->parms.iph.daddr ||
260 key != t->parms.i_key ||
261 !(t->dev->flags & IFF_UP)) 282 !(t->dev->flags & IFF_UP))
262 continue; 283 continue;
263 284
285 if (!ipgre_key_match(&t->parms, flags, key))
286 continue;
287
264 if (t->dev->type != ARPHRD_IPGRE && 288 if (t->dev->type != ARPHRD_IPGRE &&
265 t->dev->type != dev_type) 289 t->dev->type != dev_type)
266 continue; 290 continue;
@@ -283,10 +307,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
283 if ((local != t->parms.iph.saddr && 307 if ((local != t->parms.iph.saddr &&
284 (local != t->parms.iph.daddr || 308 (local != t->parms.iph.daddr ||
285 !ipv4_is_multicast(local))) || 309 !ipv4_is_multicast(local))) ||
286 key != t->parms.i_key ||
287 !(t->dev->flags & IFF_UP)) 310 !(t->dev->flags & IFF_UP))
288 continue; 311 continue;
289 312
313 if (!ipgre_key_match(&t->parms, flags, key))
314 continue;
315
290 if (t->dev->type != ARPHRD_IPGRE && 316 if (t->dev->type != ARPHRD_IPGRE &&
291 t->dev->type != dev_type) 317 t->dev->type != dev_type)
292 continue; 318 continue;
@@ -489,6 +515,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
489 const int code = icmp_hdr(skb)->code; 515 const int code = icmp_hdr(skb)->code;
490 struct ip_tunnel *t; 516 struct ip_tunnel *t;
491 __be16 flags; 517 __be16 flags;
518 __be32 key = 0;
492 519
493 flags = p[0]; 520 flags = p[0];
494 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 521 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
@@ -505,6 +532,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
505 if (skb_headlen(skb) < grehlen) 532 if (skb_headlen(skb) < grehlen)
506 return; 533 return;
507 534
535 if (flags & GRE_KEY)
536 key = *(((__be32 *)p) + (grehlen / 4) - 1);
537
508 switch (type) { 538 switch (type) {
509 default: 539 default:
510 case ICMP_PARAMETERPROB: 540 case ICMP_PARAMETERPROB:
@@ -533,49 +563,34 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
533 break; 563 break;
534 } 564 }
535 565
536 rcu_read_lock();
537 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, 566 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
538 flags & GRE_KEY ? 567 flags, key, p[1]);
539 *(((__be32 *)p) + (grehlen / 4) - 1) : 0, 568
540 p[1]);
541 if (t == NULL) 569 if (t == NULL)
542 goto out; 570 return;
543 571
544 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 572 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
545 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 573 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
546 t->parms.link, 0, IPPROTO_GRE, 0); 574 t->parms.link, 0, IPPROTO_GRE, 0);
547 goto out; 575 return;
548 } 576 }
549 if (type == ICMP_REDIRECT) { 577 if (type == ICMP_REDIRECT) {
550 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 578 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
551 IPPROTO_GRE, 0); 579 IPPROTO_GRE, 0);
552 goto out; 580 return;
553 } 581 }
554 if (t->parms.iph.daddr == 0 || 582 if (t->parms.iph.daddr == 0 ||
555 ipv4_is_multicast(t->parms.iph.daddr)) 583 ipv4_is_multicast(t->parms.iph.daddr))
556 goto out; 584 return;
557 585
558 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 586 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
559 goto out; 587 return;
560 588
561 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 589 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
562 t->err_count++; 590 t->err_count++;
563 else 591 else
564 t->err_count = 1; 592 t->err_count = 1;
565 t->err_time = jiffies; 593 t->err_time = jiffies;
566out:
567 rcu_read_unlock();
568}
569
570static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
571{
572 if (INET_ECN_is_ce(iph->tos)) {
573 if (skb->protocol == htons(ETH_P_IP)) {
574 IP_ECN_set_ce(ip_hdr(skb));
575 } else if (skb->protocol == htons(ETH_P_IPV6)) {
576 IP6_ECN_set_ce(ipv6_hdr(skb));
577 }
578 }
579} 594}
580 595
581static inline u8 596static inline u8
@@ -600,9 +615,10 @@ static int ipgre_rcv(struct sk_buff *skb)
600 struct ip_tunnel *tunnel; 615 struct ip_tunnel *tunnel;
601 int offset = 4; 616 int offset = 4;
602 __be16 gre_proto; 617 __be16 gre_proto;
618 int err;
603 619
604 if (!pskb_may_pull(skb, 16)) 620 if (!pskb_may_pull(skb, 16))
605 goto drop_nolock; 621 goto drop;
606 622
607 iph = ip_hdr(skb); 623 iph = ip_hdr(skb);
608 h = skb->data; 624 h = skb->data;
@@ -613,7 +629,7 @@ static int ipgre_rcv(struct sk_buff *skb)
613 - We do not support routing headers. 629 - We do not support routing headers.
614 */ 630 */
615 if (flags&(GRE_VERSION|GRE_ROUTING)) 631 if (flags&(GRE_VERSION|GRE_ROUTING))
616 goto drop_nolock; 632 goto drop;
617 633
618 if (flags&GRE_CSUM) { 634 if (flags&GRE_CSUM) {
619 switch (skb->ip_summed) { 635 switch (skb->ip_summed) {
@@ -641,10 +657,10 @@ static int ipgre_rcv(struct sk_buff *skb)
641 657
642 gre_proto = *(__be16 *)(h + 2); 658 gre_proto = *(__be16 *)(h + 2);
643 659
644 rcu_read_lock(); 660 tunnel = ipgre_tunnel_lookup(skb->dev,
645 if ((tunnel = ipgre_tunnel_lookup(skb->dev, 661 iph->saddr, iph->daddr, flags, key,
646 iph->saddr, iph->daddr, key, 662 gre_proto);
647 gre_proto))) { 663 if (tunnel) {
648 struct pcpu_tstats *tstats; 664 struct pcpu_tstats *tstats;
649 665
650 secpath_reset(skb); 666 secpath_reset(skb);
@@ -703,27 +719,33 @@ static int ipgre_rcv(struct sk_buff *skb)
703 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 719 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
704 } 720 }
705 721
722 __skb_tunnel_rx(skb, tunnel->dev);
723
724 skb_reset_network_header(skb);
725 err = IP_ECN_decapsulate(iph, skb);
726 if (unlikely(err)) {
727 if (log_ecn_error)
728 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
729 &iph->saddr, iph->tos);
730 if (err > 1) {
731 ++tunnel->dev->stats.rx_frame_errors;
732 ++tunnel->dev->stats.rx_errors;
733 goto drop;
734 }
735 }
736
706 tstats = this_cpu_ptr(tunnel->dev->tstats); 737 tstats = this_cpu_ptr(tunnel->dev->tstats);
707 u64_stats_update_begin(&tstats->syncp); 738 u64_stats_update_begin(&tstats->syncp);
708 tstats->rx_packets++; 739 tstats->rx_packets++;
709 tstats->rx_bytes += skb->len; 740 tstats->rx_bytes += skb->len;
710 u64_stats_update_end(&tstats->syncp); 741 u64_stats_update_end(&tstats->syncp);
711 742
712 __skb_tunnel_rx(skb, tunnel->dev); 743 gro_cells_receive(&tunnel->gro_cells, skb);
713
714 skb_reset_network_header(skb);
715 ipgre_ecn_decapsulate(iph, skb);
716
717 netif_rx(skb);
718
719 rcu_read_unlock();
720 return 0; 744 return 0;
721 } 745 }
722 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 746 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
723 747
724drop: 748drop:
725 rcu_read_unlock();
726drop_nolock:
727 kfree_skb(skb); 749 kfree_skb(skb);
728 return 0; 750 return 0;
729} 751}
@@ -745,6 +767,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
745 __be32 dst; 767 __be32 dst;
746 int mtu; 768 int mtu;
747 769
770 if (skb->ip_summed == CHECKSUM_PARTIAL &&
771 skb_checksum_help(skb))
772 goto tx_error;
773
748 if (dev->type == ARPHRD_ETHER) 774 if (dev->type == ARPHRD_ETHER)
749 IPCB(skb)->flags = 0; 775 IPCB(skb)->flags = 0;
750 776
@@ -1292,10 +1318,18 @@ static const struct net_device_ops ipgre_netdev_ops = {
1292 1318
1293static void ipgre_dev_free(struct net_device *dev) 1319static void ipgre_dev_free(struct net_device *dev)
1294{ 1320{
1321 struct ip_tunnel *tunnel = netdev_priv(dev);
1322
1323 gro_cells_destroy(&tunnel->gro_cells);
1295 free_percpu(dev->tstats); 1324 free_percpu(dev->tstats);
1296 free_netdev(dev); 1325 free_netdev(dev);
1297} 1326}
1298 1327
1328#define GRE_FEATURES (NETIF_F_SG | \
1329 NETIF_F_FRAGLIST | \
1330 NETIF_F_HIGHDMA | \
1331 NETIF_F_HW_CSUM)
1332
1299static void ipgre_tunnel_setup(struct net_device *dev) 1333static void ipgre_tunnel_setup(struct net_device *dev)
1300{ 1334{
1301 dev->netdev_ops = &ipgre_netdev_ops; 1335 dev->netdev_ops = &ipgre_netdev_ops;
@@ -1309,12 +1343,16 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1309 dev->addr_len = 4; 1343 dev->addr_len = 4;
1310 dev->features |= NETIF_F_NETNS_LOCAL; 1344 dev->features |= NETIF_F_NETNS_LOCAL;
1311 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1345 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1346
1347 dev->features |= GRE_FEATURES;
1348 dev->hw_features |= GRE_FEATURES;
1312} 1349}
1313 1350
1314static int ipgre_tunnel_init(struct net_device *dev) 1351static int ipgre_tunnel_init(struct net_device *dev)
1315{ 1352{
1316 struct ip_tunnel *tunnel; 1353 struct ip_tunnel *tunnel;
1317 struct iphdr *iph; 1354 struct iphdr *iph;
1355 int err;
1318 1356
1319 tunnel = netdev_priv(dev); 1357 tunnel = netdev_priv(dev);
1320 iph = &tunnel->parms.iph; 1358 iph = &tunnel->parms.iph;
@@ -1341,6 +1379,12 @@ static int ipgre_tunnel_init(struct net_device *dev)
1341 if (!dev->tstats) 1379 if (!dev->tstats)
1342 return -ENOMEM; 1380 return -ENOMEM;
1343 1381
1382 err = gro_cells_init(&tunnel->gro_cells, dev);
1383 if (err) {
1384 free_percpu(dev->tstats);
1385 return err;
1386 }
1387
1344 return 0; 1388 return 0;
1345} 1389}
1346 1390
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c196d749daf2..24a29a39e9a8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -467,7 +467,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
467 467
468 iph = ip_hdr(skb); 468 iph = ip_hdr(skb);
469 469
470 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { 470 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
471 (IPCB(skb)->frag_max_size &&
472 IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
471 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 473 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
472 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 474 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
473 htonl(ip_skb_dst_mtu(skb))); 475 htonl(ip_skb_dst_mtu(skb)));
@@ -791,6 +793,7 @@ static int __ip_append_data(struct sock *sk,
791 struct flowi4 *fl4, 793 struct flowi4 *fl4,
792 struct sk_buff_head *queue, 794 struct sk_buff_head *queue,
793 struct inet_cork *cork, 795 struct inet_cork *cork,
796 struct page_frag *pfrag,
794 int getfrag(void *from, char *to, int offset, 797 int getfrag(void *from, char *to, int offset,
795 int len, int odd, struct sk_buff *skb), 798 int len, int odd, struct sk_buff *skb),
796 void *from, int length, int transhdrlen, 799 void *from, int length, int transhdrlen,
@@ -985,47 +988,30 @@ alloc_new_skb:
985 } 988 }
986 } else { 989 } else {
987 int i = skb_shinfo(skb)->nr_frags; 990 int i = skb_shinfo(skb)->nr_frags;
988 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
989 struct page *page = cork->page;
990 int off = cork->off;
991 unsigned int left;
992
993 if (page && (left = PAGE_SIZE - off) > 0) {
994 if (copy >= left)
995 copy = left;
996 if (page != skb_frag_page(frag)) {
997 if (i == MAX_SKB_FRAGS) {
998 err = -EMSGSIZE;
999 goto error;
1000 }
1001 skb_fill_page_desc(skb, i, page, off, 0);
1002 skb_frag_ref(skb, i);
1003 frag = &skb_shinfo(skb)->frags[i];
1004 }
1005 } else if (i < MAX_SKB_FRAGS) {
1006 if (copy > PAGE_SIZE)
1007 copy = PAGE_SIZE;
1008 page = alloc_pages(sk->sk_allocation, 0);
1009 if (page == NULL) {
1010 err = -ENOMEM;
1011 goto error;
1012 }
1013 cork->page = page;
1014 cork->off = 0;
1015 991
1016 skb_fill_page_desc(skb, i, page, 0, 0); 992 err = -ENOMEM;
1017 frag = &skb_shinfo(skb)->frags[i]; 993 if (!sk_page_frag_refill(sk, pfrag))
1018 } else {
1019 err = -EMSGSIZE;
1020 goto error;
1021 }
1022 if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
1023 offset, copy, skb->len, skb) < 0) {
1024 err = -EFAULT;
1025 goto error; 994 goto error;
995
996 if (!skb_can_coalesce(skb, i, pfrag->page,
997 pfrag->offset)) {
998 err = -EMSGSIZE;
999 if (i == MAX_SKB_FRAGS)
1000 goto error;
1001
1002 __skb_fill_page_desc(skb, i, pfrag->page,
1003 pfrag->offset, 0);
1004 skb_shinfo(skb)->nr_frags = ++i;
1005 get_page(pfrag->page);
1026 } 1006 }
1027 cork->off += copy; 1007 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1028 skb_frag_size_add(frag, copy); 1008 if (getfrag(from,
1009 page_address(pfrag->page) + pfrag->offset,
1010 offset, copy, skb->len, skb) < 0)
1011 goto error_efault;
1012
1013 pfrag->offset += copy;
1014 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1029 skb->len += copy; 1015 skb->len += copy;
1030 skb->data_len += copy; 1016 skb->data_len += copy;
1031 skb->truesize += copy; 1017 skb->truesize += copy;
@@ -1037,6 +1023,8 @@ alloc_new_skb:
1037 1023
1038 return 0; 1024 return 0;
1039 1025
1026error_efault:
1027 err = -EFAULT;
1040error: 1028error:
1041 cork->length -= length; 1029 cork->length -= length;
1042 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1030 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
@@ -1077,8 +1065,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1077 cork->dst = &rt->dst; 1065 cork->dst = &rt->dst;
1078 cork->length = 0; 1066 cork->length = 0;
1079 cork->tx_flags = ipc->tx_flags; 1067 cork->tx_flags = ipc->tx_flags;
1080 cork->page = NULL;
1081 cork->off = 0;
1082 1068
1083 return 0; 1069 return 0;
1084} 1070}
@@ -1115,7 +1101,8 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1115 transhdrlen = 0; 1101 transhdrlen = 0;
1116 } 1102 }
1117 1103
1118 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag, 1104 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1105 sk_page_frag(sk), getfrag,
1119 from, length, transhdrlen, flags); 1106 from, length, transhdrlen, flags);
1120} 1107}
1121 1108
@@ -1437,7 +1424,8 @@ struct sk_buff *ip_make_skb(struct sock *sk,
1437 if (err) 1424 if (err)
1438 return ERR_PTR(err); 1425 return ERR_PTR(err);
1439 1426
1440 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag, 1427 err = __ip_append_data(sk, fl4, &queue, &cork,
1428 &current->task_frag, getfrag,
1441 from, length, transhdrlen, flags); 1429 from, length, transhdrlen, flags);
1442 if (err) { 1430 if (err) {
1443 __ip_flush_pending_frames(sk, &queue, &cork); 1431 __ip_flush_pending_frames(sk, &queue, &cork);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 3511ffba7bd4..978bca4818ae 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -304,7 +304,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
304 304
305 err = -ENOENT; 305 err = -ENOENT;
306 306
307 rcu_read_lock();
308 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 307 t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
309 if (t == NULL) 308 if (t == NULL)
310 goto out; 309 goto out;
@@ -326,7 +325,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
326 t->err_count = 1; 325 t->err_count = 1;
327 t->err_time = jiffies; 326 t->err_time = jiffies;
328out: 327out:
329 rcu_read_unlock();
330 return err; 328 return err;
331} 329}
332 330
@@ -336,7 +334,6 @@ static int vti_rcv(struct sk_buff *skb)
336 struct ip_tunnel *tunnel; 334 struct ip_tunnel *tunnel;
337 const struct iphdr *iph = ip_hdr(skb); 335 const struct iphdr *iph = ip_hdr(skb);
338 336
339 rcu_read_lock();
340 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); 337 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
341 if (tunnel != NULL) { 338 if (tunnel != NULL) {
342 struct pcpu_tstats *tstats; 339 struct pcpu_tstats *tstats;
@@ -348,10 +345,8 @@ static int vti_rcv(struct sk_buff *skb)
348 u64_stats_update_end(&tstats->syncp); 345 u64_stats_update_end(&tstats->syncp);
349 346
350 skb->dev = tunnel->dev; 347 skb->dev = tunnel->dev;
351 rcu_read_unlock();
352 return 1; 348 return 1;
353 } 349 }
354 rcu_read_unlock();
355 350
356 return -1; 351 return -1;
357} 352}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 67e8a6b086ea..798358b10717 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -583,6 +583,17 @@ static void __init ic_rarp_send_if(struct ic_device *d)
583#endif 583#endif
584 584
585/* 585/*
586 * Predefine Nameservers
587 */
588static inline void __init ic_nameservers_predef(void)
589{
590 int i;
591
592 for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
593 ic_nameservers[i] = NONE;
594}
595
596/*
586 * DHCP/BOOTP support. 597 * DHCP/BOOTP support.
587 */ 598 */
588 599
@@ -747,10 +758,7 @@ static void __init ic_bootp_init_ext(u8 *e)
747 */ 758 */
748static inline void __init ic_bootp_init(void) 759static inline void __init ic_bootp_init(void)
749{ 760{
750 int i; 761 ic_nameservers_predef();
751
752 for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
753 ic_nameservers[i] = NONE;
754 762
755 dev_add_pack(&bootp_packet_type); 763 dev_add_pack(&bootp_packet_type);
756} 764}
@@ -1379,6 +1387,7 @@ static int __init ip_auto_config(void)
1379 int retries = CONF_OPEN_RETRIES; 1387 int retries = CONF_OPEN_RETRIES;
1380#endif 1388#endif
1381 int err; 1389 int err;
1390 unsigned int i;
1382 1391
1383#ifdef CONFIG_PROC_FS 1392#ifdef CONFIG_PROC_FS
1384 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1393 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1499,7 +1508,15 @@ static int __init ip_auto_config(void)
1499 &ic_servaddr, &root_server_addr, root_server_path); 1508 &ic_servaddr, &root_server_addr, root_server_path);
1500 if (ic_dev_mtu) 1509 if (ic_dev_mtu)
1501 pr_cont(", mtu=%d", ic_dev_mtu); 1510 pr_cont(", mtu=%d", ic_dev_mtu);
1502 pr_cont("\n"); 1511 for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
1512 if (ic_nameservers[i] != NONE) {
1513 pr_info(" nameserver%u=%pI4",
1514 i, &ic_nameservers[i]);
1515 break;
1516 }
1517 for (i++; i < CONF_NAMESERVERS_MAX; i++)
1518 if (ic_nameservers[i] != NONE)
1519 pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
1503#endif /* !SILENT */ 1520#endif /* !SILENT */
1504 1521
1505 return 0; 1522 return 0;
@@ -1570,6 +1587,8 @@ static int __init ip_auto_config_setup(char *addrs)
1570 return 1; 1587 return 1;
1571 } 1588 }
1572 1589
1590 ic_nameservers_predef();
1591
1573 /* Parse string for static IP assignment. */ 1592 /* Parse string for static IP assignment. */
1574 ip = addrs; 1593 ip = addrs;
1575 while (ip && *ip) { 1594 while (ip && *ip) {
@@ -1613,6 +1632,20 @@ static int __init ip_auto_config_setup(char *addrs)
1613 ic_enable = 0; 1632 ic_enable = 0;
1614 } 1633 }
1615 break; 1634 break;
1635 case 7:
1636 if (CONF_NAMESERVERS_MAX >= 1) {
1637 ic_nameservers[0] = in_aton(ip);
1638 if (ic_nameservers[0] == ANY)
1639 ic_nameservers[0] = NONE;
1640 }
1641 break;
1642 case 8:
1643 if (CONF_NAMESERVERS_MAX >= 2) {
1644 ic_nameservers[1] = in_aton(ip);
1645 if (ic_nameservers[1] == ANY)
1646 ic_nameservers[1] = NONE;
1647 }
1648 break;
1616 } 1649 }
1617 } 1650 }
1618 ip = cp; 1651 ip = cp;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 99af1f0cc658..e15b45297c09 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -120,6 +120,10 @@
120#define HASH_SIZE 16 120#define HASH_SIZE 16
121#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 121#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
122 122
123static bool log_ecn_error = true;
124module_param(log_ecn_error, bool, 0644);
125MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
123static int ipip_net_id __read_mostly; 127static int ipip_net_id __read_mostly;
124struct ipip_net { 128struct ipip_net {
125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; 129 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
@@ -365,8 +369,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
365 } 369 }
366 370
367 err = -ENOENT; 371 err = -ENOENT;
368
369 rcu_read_lock();
370 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 372 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
371 if (t == NULL) 373 if (t == NULL)
372 goto out; 374 goto out;
@@ -398,34 +400,22 @@ static int ipip_err(struct sk_buff *skb, u32 info)
398 t->err_count = 1; 400 t->err_count = 1;
399 t->err_time = jiffies; 401 t->err_time = jiffies;
400out: 402out:
401 rcu_read_unlock();
402 return err;
403}
404
405static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
406 struct sk_buff *skb)
407{
408 struct iphdr *inner_iph = ip_hdr(skb);
409 403
410 if (INET_ECN_is_ce(outer_iph->tos)) 404 return err;
411 IP_ECN_set_ce(inner_iph);
412} 405}
413 406
414static int ipip_rcv(struct sk_buff *skb) 407static int ipip_rcv(struct sk_buff *skb)
415{ 408{
416 struct ip_tunnel *tunnel; 409 struct ip_tunnel *tunnel;
417 const struct iphdr *iph = ip_hdr(skb); 410 const struct iphdr *iph = ip_hdr(skb);
411 int err;
418 412
419 rcu_read_lock();
420 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); 413 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
421 if (tunnel != NULL) { 414 if (tunnel != NULL) {
422 struct pcpu_tstats *tstats; 415 struct pcpu_tstats *tstats;
423 416
424 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 417 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
425 rcu_read_unlock(); 418 goto drop;
426 kfree_skb(skb);
427 return 0;
428 }
429 419
430 secpath_reset(skb); 420 secpath_reset(skb);
431 421
@@ -434,24 +424,35 @@ static int ipip_rcv(struct sk_buff *skb)
434 skb->protocol = htons(ETH_P_IP); 424 skb->protocol = htons(ETH_P_IP);
435 skb->pkt_type = PACKET_HOST; 425 skb->pkt_type = PACKET_HOST;
436 426
427 __skb_tunnel_rx(skb, tunnel->dev);
428
429 err = IP_ECN_decapsulate(iph, skb);
430 if (unlikely(err)) {
431 if (log_ecn_error)
432 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
433 &iph->saddr, iph->tos);
434 if (err > 1) {
435 ++tunnel->dev->stats.rx_frame_errors;
436 ++tunnel->dev->stats.rx_errors;
437 goto drop;
438 }
439 }
440
437 tstats = this_cpu_ptr(tunnel->dev->tstats); 441 tstats = this_cpu_ptr(tunnel->dev->tstats);
438 u64_stats_update_begin(&tstats->syncp); 442 u64_stats_update_begin(&tstats->syncp);
439 tstats->rx_packets++; 443 tstats->rx_packets++;
440 tstats->rx_bytes += skb->len; 444 tstats->rx_bytes += skb->len;
441 u64_stats_update_end(&tstats->syncp); 445 u64_stats_update_end(&tstats->syncp);
442 446
443 __skb_tunnel_rx(skb, tunnel->dev);
444
445 ipip_ecn_decapsulate(iph, skb);
446
447 netif_rx(skb); 447 netif_rx(skb);
448
449 rcu_read_unlock();
450 return 0; 448 return 0;
451 } 449 }
452 rcu_read_unlock();
453 450
454 return -1; 451 return -1;
452
453drop:
454 kfree_skb(skb);
455 return 0;
455} 456}
456 457
457/* 458/*
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ebdf06f938bf..1daa95c2a0ba 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -626,7 +626,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
626 e->error = -ETIMEDOUT; 626 e->error = -ETIMEDOUT;
627 memset(&e->msg, 0, sizeof(e->msg)); 627 memset(&e->msg, 0, sizeof(e->msg));
628 628
629 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 629 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
630 } else { 630 } else {
631 kfree_skb(skb); 631 kfree_skb(skb);
632 } 632 }
@@ -870,7 +870,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
870 memset(&e->msg, 0, sizeof(e->msg)); 870 memset(&e->msg, 0, sizeof(e->msg));
871 } 871 }
872 872
873 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 873 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
874 } else { 874 } else {
875 ip_mr_forward(net, mrt, skb, c, 0); 875 ip_mr_forward(net, mrt, skb, c, 0);
876 } 876 }
@@ -1808,7 +1808,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1808 .flowi4_oif = (rt_is_output_route(rt) ? 1808 .flowi4_oif = (rt_is_output_route(rt) ?
1809 skb->dev->ifindex : 0), 1809 skb->dev->ifindex : 0),
1810 .flowi4_iif = (rt_is_output_route(rt) ? 1810 .flowi4_iif = (rt_is_output_route(rt) ?
1811 net->loopback_dev->ifindex : 1811 LOOPBACK_IFINDEX :
1812 skb->dev->ifindex), 1812 skb->dev->ifindex),
1813 .flowi4_mark = skb->mark, 1813 .flowi4_mark = skb->mark,
1814 }; 1814 };
@@ -2117,12 +2117,12 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2117} 2117}
2118 2118
2119static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2119static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2120 u32 pid, u32 seq, struct mfc_cache *c) 2120 u32 portid, u32 seq, struct mfc_cache *c)
2121{ 2121{
2122 struct nlmsghdr *nlh; 2122 struct nlmsghdr *nlh;
2123 struct rtmsg *rtm; 2123 struct rtmsg *rtm;
2124 2124
2125 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2125 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2126 if (nlh == NULL) 2126 if (nlh == NULL)
2127 return -EMSGSIZE; 2127 return -EMSGSIZE;
2128 2128
@@ -2176,7 +2176,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2176 if (e < s_e) 2176 if (e < s_e)
2177 goto next_entry; 2177 goto next_entry;
2178 if (ipmr_fill_mroute(mrt, skb, 2178 if (ipmr_fill_mroute(mrt, skb,
2179 NETLINK_CB(cb->skb).pid, 2179 NETLINK_CB(cb->skb).portid,
2180 cb->nlh->nlmsg_seq, 2180 cb->nlh->nlmsg_seq,
2181 mfc) < 0) 2181 mfc) < 0)
2182 goto done; 2182 goto done;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index ed1b36783192..4c0cf63dd92e 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -72,43 +72,6 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
72} 72}
73EXPORT_SYMBOL(ip_route_me_harder); 73EXPORT_SYMBOL(ip_route_me_harder);
74 74
75#ifdef CONFIG_XFRM
76int ip_xfrm_me_harder(struct sk_buff *skb)
77{
78 struct flowi fl;
79 unsigned int hh_len;
80 struct dst_entry *dst;
81
82 if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
83 return 0;
84 if (xfrm_decode_session(skb, &fl, AF_INET) < 0)
85 return -1;
86
87 dst = skb_dst(skb);
88 if (dst->xfrm)
89 dst = ((struct xfrm_dst *)dst)->route;
90 dst_hold(dst);
91
92 dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
93 if (IS_ERR(dst))
94 return -1;
95
96 skb_dst_drop(skb);
97 skb_dst_set(skb, dst);
98
99 /* Change in oif may mean change in hh_len. */
100 hh_len = skb_dst(skb)->dev->hard_header_len;
101 if (skb_headroom(skb) < hh_len &&
102 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
103 return -1;
104 return 0;
105}
106EXPORT_SYMBOL(ip_xfrm_me_harder);
107#endif
108
109void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
110EXPORT_SYMBOL(ip_nat_decode_session);
111
112/* 75/*
113 * Extra routing may needed on local out, as the QUEUE target never 76 * Extra routing may needed on local out, as the QUEUE target never
114 * returns control to the table. 77 * returns control to the table.
@@ -225,12 +188,12 @@ static const struct nf_afinfo nf_ip_afinfo = {
225 .route_key_size = sizeof(struct ip_rt_info), 188 .route_key_size = sizeof(struct ip_rt_info),
226}; 189};
227 190
228static int ipv4_netfilter_init(void) 191static int __init ipv4_netfilter_init(void)
229{ 192{
230 return nf_register_afinfo(&nf_ip_afinfo); 193 return nf_register_afinfo(&nf_ip_afinfo);
231} 194}
232 195
233static void ipv4_netfilter_fini(void) 196static void __exit ipv4_netfilter_fini(void)
234{ 197{
235 nf_unregister_afinfo(&nf_ip_afinfo); 198 nf_unregister_afinfo(&nf_ip_afinfo);
236} 199}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fcc543cd987a..d8d6f2a5bf12 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -143,25 +143,22 @@ config IP_NF_TARGET_ULOG
143 To compile it as a module, choose M here. If unsure, say N. 143 To compile it as a module, choose M here. If unsure, say N.
144 144
145# NAT + specific targets: nf_conntrack 145# NAT + specific targets: nf_conntrack
146config NF_NAT 146config NF_NAT_IPV4
147 tristate "Full NAT" 147 tristate "IPv4 NAT"
148 depends on NF_CONNTRACK_IPV4 148 depends on NF_CONNTRACK_IPV4
149 default m if NETFILTER_ADVANCED=n 149 default m if NETFILTER_ADVANCED=n
150 select NF_NAT
150 help 151 help
151 The Full NAT option allows masquerading, port forwarding and other 152 The IPv4 NAT option allows masquerading, port forwarding and other
152 forms of full Network Address Port Translation. It is controlled by 153 forms of full Network Address Port Translation. It is controlled by
153 the `nat' table in iptables: see the man page for iptables(8). 154 the `nat' table in iptables: see the man page for iptables(8).
154 155
155 To compile it as a module, choose M here. If unsure, say N. 156 To compile it as a module, choose M here. If unsure, say N.
156 157
157config NF_NAT_NEEDED 158if NF_NAT_IPV4
158 bool
159 depends on NF_NAT
160 default y
161 159
162config IP_NF_TARGET_MASQUERADE 160config IP_NF_TARGET_MASQUERADE
163 tristate "MASQUERADE target support" 161 tristate "MASQUERADE target support"
164 depends on NF_NAT
165 default m if NETFILTER_ADVANCED=n 162 default m if NETFILTER_ADVANCED=n
166 help 163 help
167 Masquerading is a special case of NAT: all outgoing connections are 164 Masquerading is a special case of NAT: all outgoing connections are
@@ -174,30 +171,27 @@ config IP_NF_TARGET_MASQUERADE
174 171
175config IP_NF_TARGET_NETMAP 172config IP_NF_TARGET_NETMAP
176 tristate "NETMAP target support" 173 tristate "NETMAP target support"
177 depends on NF_NAT
178 depends on NETFILTER_ADVANCED 174 depends on NETFILTER_ADVANCED
179 help 175 select NETFILTER_XT_TARGET_NETMAP
180 NETMAP is an implementation of static 1:1 NAT mapping of network 176 ---help---
181 addresses. It maps the network address part, while keeping the host 177 This is a backwards-compat option for the user's convenience
182 address part intact. 178 (e.g. when running oldconfig). It selects
183 179 CONFIG_NETFILTER_XT_TARGET_NETMAP.
184 To compile it as a module, choose M here. If unsure, say N.
185 180
186config IP_NF_TARGET_REDIRECT 181config IP_NF_TARGET_REDIRECT
187 tristate "REDIRECT target support" 182 tristate "REDIRECT target support"
188 depends on NF_NAT
189 depends on NETFILTER_ADVANCED 183 depends on NETFILTER_ADVANCED
190 help 184 select NETFILTER_XT_TARGET_REDIRECT
191 REDIRECT is a special case of NAT: all incoming connections are 185 ---help---
192 mapped onto the incoming interface's address, causing the packets to 186 This is a backwards-compat option for the user's convenience
193 come to the local machine instead of passing through. This is 187 (e.g. when running oldconfig). It selects
194 useful for transparent proxies. 188 CONFIG_NETFILTER_XT_TARGET_REDIRECT.
195 189
196 To compile it as a module, choose M here. If unsure, say N. 190endif
197 191
198config NF_NAT_SNMP_BASIC 192config NF_NAT_SNMP_BASIC
199 tristate "Basic SNMP-ALG support" 193 tristate "Basic SNMP-ALG support"
200 depends on NF_CONNTRACK_SNMP && NF_NAT 194 depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
201 depends on NETFILTER_ADVANCED 195 depends on NETFILTER_ADVANCED
202 default NF_NAT && NF_CONNTRACK_SNMP 196 default NF_NAT && NF_CONNTRACK_SNMP
203 ---help--- 197 ---help---
@@ -219,61 +213,21 @@ config NF_NAT_SNMP_BASIC
219# <expr> '&&' <expr> (6) 213# <expr> '&&' <expr> (6)
220# 214#
221# (6) Returns the result of min(/expr/, /expr/). 215# (6) Returns the result of min(/expr/, /expr/).
222config NF_NAT_PROTO_DCCP
223 tristate
224 depends on NF_NAT && NF_CT_PROTO_DCCP
225 default NF_NAT && NF_CT_PROTO_DCCP
226 216
227config NF_NAT_PROTO_GRE 217config NF_NAT_PROTO_GRE
228 tristate 218 tristate
229 depends on NF_NAT && NF_CT_PROTO_GRE 219 depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
230
231config NF_NAT_PROTO_UDPLITE
232 tristate
233 depends on NF_NAT && NF_CT_PROTO_UDPLITE
234 default NF_NAT && NF_CT_PROTO_UDPLITE
235
236config NF_NAT_PROTO_SCTP
237 tristate
238 default NF_NAT && NF_CT_PROTO_SCTP
239 depends on NF_NAT && NF_CT_PROTO_SCTP
240 select LIBCRC32C
241
242config NF_NAT_FTP
243 tristate
244 depends on NF_CONNTRACK && NF_NAT
245 default NF_NAT && NF_CONNTRACK_FTP
246
247config NF_NAT_IRC
248 tristate
249 depends on NF_CONNTRACK && NF_NAT
250 default NF_NAT && NF_CONNTRACK_IRC
251
252config NF_NAT_TFTP
253 tristate
254 depends on NF_CONNTRACK && NF_NAT
255 default NF_NAT && NF_CONNTRACK_TFTP
256
257config NF_NAT_AMANDA
258 tristate
259 depends on NF_CONNTRACK && NF_NAT
260 default NF_NAT && NF_CONNTRACK_AMANDA
261 220
262config NF_NAT_PPTP 221config NF_NAT_PPTP
263 tristate 222 tristate
264 depends on NF_CONNTRACK && NF_NAT 223 depends on NF_CONNTRACK && NF_NAT_IPV4
265 default NF_NAT && NF_CONNTRACK_PPTP 224 default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
266 select NF_NAT_PROTO_GRE 225 select NF_NAT_PROTO_GRE
267 226
268config NF_NAT_H323 227config NF_NAT_H323
269 tristate 228 tristate
270 depends on NF_CONNTRACK && NF_NAT 229 depends on NF_CONNTRACK && NF_NAT_IPV4
271 default NF_NAT && NF_CONNTRACK_H323 230 default NF_NAT_IPV4 && NF_CONNTRACK_H323
272
273config NF_NAT_SIP
274 tristate
275 depends on NF_CONNTRACK && NF_NAT
276 default NF_NAT && NF_CONNTRACK_SIP
277 231
278# mangle + specific targets 232# mangle + specific targets
279config IP_NF_MANGLE 233config IP_NF_MANGLE
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c20674dc9452..007b128eecc9 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -10,32 +10,22 @@ nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
10endif 10endif
11endif 11endif
12 12
13nf_nat-y := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
14iptable_nat-y := nf_nat_rule.o nf_nat_standalone.o
15
16# connection tracking 13# connection tracking
17obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o 14obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
18 15
19obj-$(CONFIG_NF_NAT) += nf_nat.o 16nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o
17obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
20 18
21# defrag 19# defrag
22obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o 20obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
23 21
24# NAT helpers (nf_conntrack) 22# NAT helpers (nf_conntrack)
25obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
26obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
27obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o 23obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
28obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
29obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o 24obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
30obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
31obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o 25obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
32obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
33 26
34# NAT protocols (nf_nat) 27# NAT protocols (nf_nat)
35obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
36obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o 28obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
37obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
38obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
39 29
40# generic IP tables 30# generic IP tables
41obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o 31obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
@@ -43,7 +33,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
43# the three instances of ip_tables 33# the three instances of ip_tables
44obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o 34obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
45obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o 35obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
46obj-$(CONFIG_NF_NAT) += iptable_nat.o 36obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
47obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 37obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
48obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o 38obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
49 39
@@ -55,8 +45,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
55obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 45obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o 46obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
57obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o 47obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
58obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
59obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
60obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o 48obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
61obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o 49obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
62 50
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index cbb6a1a6f6f7..5d5d4d1be9c2 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -19,9 +19,9 @@
19#include <net/ip.h> 19#include <net/ip.h>
20#include <net/checksum.h> 20#include <net/checksum.h>
21#include <net/route.h> 21#include <net/route.h>
22#include <net/netfilter/nf_nat_rule.h>
23#include <linux/netfilter_ipv4.h> 22#include <linux/netfilter_ipv4.h>
24#include <linux/netfilter/x_tables.h> 23#include <linux/netfilter/x_tables.h>
24#include <net/netfilter/nf_nat.h>
25 25
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 27MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -49,7 +49,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
49 struct nf_conn *ct; 49 struct nf_conn *ct;
50 struct nf_conn_nat *nat; 50 struct nf_conn_nat *nat;
51 enum ip_conntrack_info ctinfo; 51 enum ip_conntrack_info ctinfo;
52 struct nf_nat_ipv4_range newrange; 52 struct nf_nat_range newrange;
53 const struct nf_nat_ipv4_multi_range_compat *mr; 53 const struct nf_nat_ipv4_multi_range_compat *mr;
54 const struct rtable *rt; 54 const struct rtable *rt;
55 __be32 newsrc, nh; 55 __be32 newsrc, nh;
@@ -80,10 +80,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
80 nat->masq_index = par->out->ifindex; 80 nat->masq_index = par->out->ifindex;
81 81
82 /* Transfer from original range. */ 82 /* Transfer from original range. */
83 newrange = ((struct nf_nat_ipv4_range) 83 memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
84 { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS, 84 memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
85 newsrc, newsrc, 85 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
86 mr->range[0].min, mr->range[0].max }); 86 newrange.min_addr.ip = newsrc;
87 newrange.max_addr.ip = newsrc;
88 newrange.min_proto = mr->range[0].min;
89 newrange.max_proto = mr->range[0].max;
87 90
88 /* Hand modified range to generic setup. */ 91 /* Hand modified range to generic setup. */
89 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); 92 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
@@ -96,7 +99,8 @@ device_cmp(struct nf_conn *i, void *ifindex)
96 99
97 if (!nat) 100 if (!nat)
98 return 0; 101 return 0;
99 102 if (nf_ct_l3num(i) != NFPROTO_IPV4)
103 return 0;
100 return nat->masq_index == (int)(long)ifindex; 104 return nat->masq_index == (int)(long)ifindex;
101} 105}
102 106
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
deleted file mode 100644
index b5bfbbabf70d..000000000000
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/* NETMAP - static NAT mapping of IP network addresses (1:1).
2 * The mapping can be applied to source (POSTROUTING),
3 * destination (PREROUTING), or both (with separate rules).
4 */
5
6/* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/ip.h>
14#include <linux/module.h>
15#include <linux/netdevice.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter_ipv4.h>
18#include <linux/netfilter/x_tables.h>
19#include <net/netfilter/nf_nat_rule.h>
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
23MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
24
25static int netmap_tg_check(const struct xt_tgchk_param *par)
26{
27 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
28
29 if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
30 pr_debug("bad MAP_IPS.\n");
31 return -EINVAL;
32 }
33 if (mr->rangesize != 1) {
34 pr_debug("bad rangesize %u.\n", mr->rangesize);
35 return -EINVAL;
36 }
37 return 0;
38}
39
40static unsigned int
41netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
42{
43 struct nf_conn *ct;
44 enum ip_conntrack_info ctinfo;
45 __be32 new_ip, netmask;
46 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
47 struct nf_nat_ipv4_range newrange;
48
49 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
50 par->hooknum == NF_INET_POST_ROUTING ||
51 par->hooknum == NF_INET_LOCAL_OUT ||
52 par->hooknum == NF_INET_LOCAL_IN);
53 ct = nf_ct_get(skb, &ctinfo);
54
55 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
56
57 if (par->hooknum == NF_INET_PRE_ROUTING ||
58 par->hooknum == NF_INET_LOCAL_OUT)
59 new_ip = ip_hdr(skb)->daddr & ~netmask;
60 else
61 new_ip = ip_hdr(skb)->saddr & ~netmask;
62 new_ip |= mr->range[0].min_ip & netmask;
63
64 newrange = ((struct nf_nat_ipv4_range)
65 { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
66 new_ip, new_ip,
67 mr->range[0].min, mr->range[0].max });
68
69 /* Hand modified range to generic setup. */
70 return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
71}
72
73static struct xt_target netmap_tg_reg __read_mostly = {
74 .name = "NETMAP",
75 .family = NFPROTO_IPV4,
76 .target = netmap_tg,
77 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
78 .table = "nat",
79 .hooks = (1 << NF_INET_PRE_ROUTING) |
80 (1 << NF_INET_POST_ROUTING) |
81 (1 << NF_INET_LOCAL_OUT) |
82 (1 << NF_INET_LOCAL_IN),
83 .checkentry = netmap_tg_check,
84 .me = THIS_MODULE
85};
86
87static int __init netmap_tg_init(void)
88{
89 return xt_register_target(&netmap_tg_reg);
90}
91
92static void __exit netmap_tg_exit(void)
93{
94 xt_unregister_target(&netmap_tg_reg);
95}
96
97module_init(netmap_tg_init);
98module_exit(netmap_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
deleted file mode 100644
index 7c0103a5203e..000000000000
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/* Redirect. Simple mapping which alters dst to a local IP address. */
2/* (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10#include <linux/types.h>
11#include <linux/ip.h>
12#include <linux/timer.h>
13#include <linux/module.h>
14#include <linux/netfilter.h>
15#include <linux/netdevice.h>
16#include <linux/if.h>
17#include <linux/inetdevice.h>
18#include <net/protocol.h>
19#include <net/checksum.h>
20#include <linux/netfilter_ipv4.h>
21#include <linux/netfilter/x_tables.h>
22#include <net/netfilter/nf_nat_rule.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
26MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
27
28/* FIXME: Take multiple ranges --RR */
29static int redirect_tg_check(const struct xt_tgchk_param *par)
30{
31 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
32
33 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
34 pr_debug("bad MAP_IPS.\n");
35 return -EINVAL;
36 }
37 if (mr->rangesize != 1) {
38 pr_debug("bad rangesize %u.\n", mr->rangesize);
39 return -EINVAL;
40 }
41 return 0;
42}
43
44static unsigned int
45redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
46{
47 struct nf_conn *ct;
48 enum ip_conntrack_info ctinfo;
49 __be32 newdst;
50 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
51 struct nf_nat_ipv4_range newrange;
52
53 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
54 par->hooknum == NF_INET_LOCAL_OUT);
55
56 ct = nf_ct_get(skb, &ctinfo);
57 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
58
59 /* Local packets: make them go to loopback */
60 if (par->hooknum == NF_INET_LOCAL_OUT)
61 newdst = htonl(0x7F000001);
62 else {
63 struct in_device *indev;
64 struct in_ifaddr *ifa;
65
66 newdst = 0;
67
68 rcu_read_lock();
69 indev = __in_dev_get_rcu(skb->dev);
70 if (indev && (ifa = indev->ifa_list))
71 newdst = ifa->ifa_local;
72 rcu_read_unlock();
73
74 if (!newdst)
75 return NF_DROP;
76 }
77
78 /* Transfer from original range. */
79 newrange = ((struct nf_nat_ipv4_range)
80 { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
81 newdst, newdst,
82 mr->range[0].min, mr->range[0].max });
83
84 /* Hand modified range to generic setup. */
85 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
86}
87
88static struct xt_target redirect_tg_reg __read_mostly = {
89 .name = "REDIRECT",
90 .family = NFPROTO_IPV4,
91 .target = redirect_tg,
92 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
93 .table = "nat",
94 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
95 .checkentry = redirect_tg_check,
96 .me = THIS_MODULE,
97};
98
99static int __init redirect_tg_init(void)
100{
101 return xt_register_target(&redirect_tg_reg);
102}
103
104static void __exit redirect_tg_exit(void)
105{
106 xt_unregister_target(&redirect_tg_reg);
107}
108
109module_init(redirect_tg_init);
110module_exit(redirect_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 1109f7f6c254..b5ef3cba2250 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -396,8 +396,7 @@ static int __init ulog_tg_init(void)
396 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 396 for (i = 0; i < ULOG_MAXNLGROUPS; i++)
397 setup_timer(&ulog_buffers[i].timer, ulog_timer, i); 397 setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
398 398
399 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, 399 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
400 THIS_MODULE, &cfg);
401 if (!nflognl) 400 if (!nflognl)
402 return -ENOMEM; 401 return -ENOMEM;
403 402
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 31371be8174b..c30130062cd6 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
85 return ipv4_is_local_multicast(iph->daddr) ^ invert; 85 return ipv4_is_local_multicast(iph->daddr) ^ invert;
86 flow.flowi4_iif = 0; 86 flow.flowi4_iif = 0;
87 } else { 87 } else {
88 flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex; 88 flow.flowi4_iif = LOOPBACK_IFINDEX;
89 } 89 }
90 90
91 flow.daddr = iph->saddr; 91 flow.daddr = iph->saddr;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 851acec852d2..6b3da5cf54e9 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -69,9 +69,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
69 net->ipv4.iptable_filter = 69 net->ipv4.iptable_filter =
70 ipt_register_table(net, &packet_filter, repl); 70 ipt_register_table(net, &packet_filter, repl);
71 kfree(repl); 71 kfree(repl);
72 if (IS_ERR(net->ipv4.iptable_filter)) 72 return PTR_RET(net->ipv4.iptable_filter);
73 return PTR_ERR(net->ipv4.iptable_filter);
74 return 0;
75} 73}
76 74
77static void __net_exit iptable_filter_net_exit(struct net *net) 75static void __net_exit iptable_filter_net_exit(struct net *net)
@@ -96,14 +94,10 @@ static int __init iptable_filter_init(void)
96 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); 94 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
97 if (IS_ERR(filter_ops)) { 95 if (IS_ERR(filter_ops)) {
98 ret = PTR_ERR(filter_ops); 96 ret = PTR_ERR(filter_ops);
99 goto cleanup_table; 97 unregister_pernet_subsys(&iptable_filter_net_ops);
100 } 98 }
101 99
102 return ret; 100 return ret;
103
104 cleanup_table:
105 unregister_pernet_subsys(&iptable_filter_net_ops);
106 return ret;
107} 101}
108 102
109static void __exit iptable_filter_fini(void) 103static void __exit iptable_filter_fini(void)
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index aef5d1fbe77d..85d88f206447 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -104,9 +104,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
104 net->ipv4.iptable_mangle = 104 net->ipv4.iptable_mangle =
105 ipt_register_table(net, &packet_mangler, repl); 105 ipt_register_table(net, &packet_mangler, repl);
106 kfree(repl); 106 kfree(repl);
107 if (IS_ERR(net->ipv4.iptable_mangle)) 107 return PTR_RET(net->ipv4.iptable_mangle);
108 return PTR_ERR(net->ipv4.iptable_mangle);
109 return 0;
110} 108}
111 109
112static void __net_exit iptable_mangle_net_exit(struct net *net) 110static void __net_exit iptable_mangle_net_exit(struct net *net)
@@ -131,14 +129,10 @@ static int __init iptable_mangle_init(void)
131 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook); 129 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
132 if (IS_ERR(mangle_ops)) { 130 if (IS_ERR(mangle_ops)) {
133 ret = PTR_ERR(mangle_ops); 131 ret = PTR_ERR(mangle_ops);
134 goto cleanup_table; 132 unregister_pernet_subsys(&iptable_mangle_net_ops);
135 } 133 }
136 134
137 return ret; 135 return ret;
138
139 cleanup_table:
140 unregister_pernet_subsys(&iptable_mangle_net_ops);
141 return ret;
142} 136}
143 137
144static void __exit iptable_mangle_fini(void) 138static void __exit iptable_mangle_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/iptable_nat.c
index 3828a4229822..9e0ffaf1d942 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -1,84 +1,71 @@
1/* (C) 1999-2001 Paul `Rusty' Russell 1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2011 Patrick McHardy <kaber@trash.net>
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
7 */ 8 */
8#include <linux/types.h> 9
9#include <linux/icmp.h> 10#include <linux/module.h>
10#include <linux/gfp.h>
11#include <linux/ip.h>
12#include <linux/netfilter.h> 11#include <linux/netfilter.h>
13#include <linux/netfilter_ipv4.h> 12#include <linux/netfilter_ipv4.h>
14#include <linux/module.h> 13#include <linux/netfilter_ipv4/ip_tables.h>
15#include <linux/skbuff.h> 14#include <linux/ip.h>
16#include <linux/proc_fs.h>
17#include <net/ip.h> 15#include <net/ip.h>
18#include <net/checksum.h>
19#include <linux/spinlock.h>
20 16
21#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack_extend.h>
24#include <net/netfilter/nf_nat.h> 17#include <net/netfilter/nf_nat.h>
25#include <net/netfilter/nf_nat_rule.h>
26#include <net/netfilter/nf_nat_protocol.h>
27#include <net/netfilter/nf_nat_core.h> 18#include <net/netfilter/nf_nat_core.h>
28#include <net/netfilter/nf_nat_helper.h> 19#include <net/netfilter/nf_nat_l3proto.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 20
21static const struct xt_table nf_nat_ipv4_table = {
22 .name = "nat",
23 .valid_hooks = (1 << NF_INET_PRE_ROUTING) |
24 (1 << NF_INET_POST_ROUTING) |
25 (1 << NF_INET_LOCAL_OUT) |
26 (1 << NF_INET_LOCAL_IN),
27 .me = THIS_MODULE,
28 .af = NFPROTO_IPV4,
29};
30 30
31#ifdef CONFIG_XFRM 31static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
32static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
33{ 32{
34 struct flowi4 *fl4 = &fl->u.ip4; 33 /* Force range to this IP; let proto decide mapping for
35 const struct nf_conn *ct; 34 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
36 const struct nf_conntrack_tuple *t; 35 */
37 enum ip_conntrack_info ctinfo; 36 struct nf_nat_range range;
38 enum ip_conntrack_dir dir; 37
39 unsigned long statusbit; 38 range.flags = 0;
40 39 pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
41 ct = nf_ct_get(skb, &ctinfo); 40 HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
42 if (ct == NULL) 41 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
43 return; 42 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
44 dir = CTINFO2DIR(ctinfo); 43
45 t = &ct->tuplehash[dir].tuple; 44 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
46 45}
47 if (dir == IP_CT_DIR_ORIGINAL)
48 statusbit = IPS_DST_NAT;
49 else
50 statusbit = IPS_SRC_NAT;
51
52 if (ct->status & statusbit) {
53 fl4->daddr = t->dst.u3.ip;
54 if (t->dst.protonum == IPPROTO_TCP ||
55 t->dst.protonum == IPPROTO_UDP ||
56 t->dst.protonum == IPPROTO_UDPLITE ||
57 t->dst.protonum == IPPROTO_DCCP ||
58 t->dst.protonum == IPPROTO_SCTP)
59 fl4->fl4_dport = t->dst.u.tcp.port;
60 }
61 46
62 statusbit ^= IPS_NAT_MASK; 47static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
48 const struct net_device *in,
49 const struct net_device *out,
50 struct nf_conn *ct)
51{
52 struct net *net = nf_ct_net(ct);
53 unsigned int ret;
63 54
64 if (ct->status & statusbit) { 55 ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
65 fl4->saddr = t->src.u3.ip; 56 if (ret == NF_ACCEPT) {
66 if (t->dst.protonum == IPPROTO_TCP || 57 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
67 t->dst.protonum == IPPROTO_UDP || 58 ret = alloc_null_binding(ct, hooknum);
68 t->dst.protonum == IPPROTO_UDPLITE ||
69 t->dst.protonum == IPPROTO_DCCP ||
70 t->dst.protonum == IPPROTO_SCTP)
71 fl4->fl4_sport = t->src.u.tcp.port;
72 } 59 }
60 return ret;
73} 61}
74#endif
75 62
76static unsigned int 63static unsigned int
77nf_nat_fn(unsigned int hooknum, 64nf_nat_ipv4_fn(unsigned int hooknum,
78 struct sk_buff *skb, 65 struct sk_buff *skb,
79 const struct net_device *in, 66 const struct net_device *in,
80 const struct net_device *out, 67 const struct net_device *out,
81 int (*okfn)(struct sk_buff *)) 68 int (*okfn)(struct sk_buff *))
82{ 69{
83 struct nf_conn *ct; 70 struct nf_conn *ct;
84 enum ip_conntrack_info ctinfo; 71 enum ip_conntrack_info ctinfo;
@@ -87,14 +74,16 @@ nf_nat_fn(unsigned int hooknum,
87 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); 74 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
88 75
89 /* We never see fragments: conntrack defrags on pre-routing 76 /* We never see fragments: conntrack defrags on pre-routing
90 and local-out, and nf_nat_out protects post-routing. */ 77 * and local-out, and nf_nat_out protects post-routing.
78 */
91 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); 79 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
92 80
93 ct = nf_ct_get(skb, &ctinfo); 81 ct = nf_ct_get(skb, &ctinfo);
94 /* Can't track? It's not due to stress, or conntrack would 82 /* Can't track? It's not due to stress, or conntrack would
95 have dropped it. Hence it's the user's responsibilty to 83 * have dropped it. Hence it's the user's responsibilty to
96 packet filter it out, or implement conntrack/NAT for that 84 * packet filter it out, or implement conntrack/NAT for that
97 protocol. 8) --RR */ 85 * protocol. 8) --RR
86 */
98 if (!ct) 87 if (!ct)
99 return NF_ACCEPT; 88 return NF_ACCEPT;
100 89
@@ -118,17 +107,17 @@ nf_nat_fn(unsigned int hooknum,
118 case IP_CT_RELATED: 107 case IP_CT_RELATED:
119 case IP_CT_RELATED_REPLY: 108 case IP_CT_RELATED_REPLY:
120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { 109 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
121 if (!nf_nat_icmp_reply_translation(ct, ctinfo, 110 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
122 hooknum, skb)) 111 hooknum))
123 return NF_DROP; 112 return NF_DROP;
124 else 113 else
125 return NF_ACCEPT; 114 return NF_ACCEPT;
126 } 115 }
127 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ 116 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
128 case IP_CT_NEW: 117 case IP_CT_NEW:
129
130 /* Seen it before? This can happen for loopback, retrans, 118 /* Seen it before? This can happen for loopback, retrans,
131 or local packets.. */ 119 * or local packets.
120 */
132 if (!nf_nat_initialized(ct, maniptype)) { 121 if (!nf_nat_initialized(ct, maniptype)) {
133 unsigned int ret; 122 unsigned int ret;
134 123
@@ -151,16 +140,16 @@ nf_nat_fn(unsigned int hooknum,
151} 140}
152 141
153static unsigned int 142static unsigned int
154nf_nat_in(unsigned int hooknum, 143nf_nat_ipv4_in(unsigned int hooknum,
155 struct sk_buff *skb, 144 struct sk_buff *skb,
156 const struct net_device *in, 145 const struct net_device *in,
157 const struct net_device *out, 146 const struct net_device *out,
158 int (*okfn)(struct sk_buff *)) 147 int (*okfn)(struct sk_buff *))
159{ 148{
160 unsigned int ret; 149 unsigned int ret;
161 __be32 daddr = ip_hdr(skb)->daddr; 150 __be32 daddr = ip_hdr(skb)->daddr;
162 151
163 ret = nf_nat_fn(hooknum, skb, in, out, okfn); 152 ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
164 if (ret != NF_DROP && ret != NF_STOLEN && 153 if (ret != NF_DROP && ret != NF_STOLEN &&
165 daddr != ip_hdr(skb)->daddr) 154 daddr != ip_hdr(skb)->daddr)
166 skb_dst_drop(skb); 155 skb_dst_drop(skb);
@@ -169,11 +158,11 @@ nf_nat_in(unsigned int hooknum,
169} 158}
170 159
171static unsigned int 160static unsigned int
172nf_nat_out(unsigned int hooknum, 161nf_nat_ipv4_out(unsigned int hooknum,
173 struct sk_buff *skb, 162 struct sk_buff *skb,
174 const struct net_device *in, 163 const struct net_device *in,
175 const struct net_device *out, 164 const struct net_device *out,
176 int (*okfn)(struct sk_buff *)) 165 int (*okfn)(struct sk_buff *))
177{ 166{
178#ifdef CONFIG_XFRM 167#ifdef CONFIG_XFRM
179 const struct nf_conn *ct; 168 const struct nf_conn *ct;
@@ -186,29 +175,30 @@ nf_nat_out(unsigned int hooknum,
186 ip_hdrlen(skb) < sizeof(struct iphdr)) 175 ip_hdrlen(skb) < sizeof(struct iphdr))
187 return NF_ACCEPT; 176 return NF_ACCEPT;
188 177
189 ret = nf_nat_fn(hooknum, skb, in, out, okfn); 178 ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
190#ifdef CONFIG_XFRM 179#ifdef CONFIG_XFRM
191 if (ret != NF_DROP && ret != NF_STOLEN && 180 if (ret != NF_DROP && ret != NF_STOLEN &&
181 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
192 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { 182 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
193 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 183 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
194 184
195 if ((ct->tuplehash[dir].tuple.src.u3.ip != 185 if ((ct->tuplehash[dir].tuple.src.u3.ip !=
196 ct->tuplehash[!dir].tuple.dst.u3.ip) || 186 ct->tuplehash[!dir].tuple.dst.u3.ip) ||
197 (ct->tuplehash[dir].tuple.src.u.all != 187 (ct->tuplehash[dir].tuple.src.u.all !=
198 ct->tuplehash[!dir].tuple.dst.u.all) 188 ct->tuplehash[!dir].tuple.dst.u.all))
199 ) 189 if (nf_xfrm_me_harder(skb, AF_INET) < 0)
200 return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; 190 ret = NF_DROP;
201 } 191 }
202#endif 192#endif
203 return ret; 193 return ret;
204} 194}
205 195
206static unsigned int 196static unsigned int
207nf_nat_local_fn(unsigned int hooknum, 197nf_nat_ipv4_local_fn(unsigned int hooknum,
208 struct sk_buff *skb, 198 struct sk_buff *skb,
209 const struct net_device *in, 199 const struct net_device *in,
210 const struct net_device *out, 200 const struct net_device *out,
211 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
212{ 202{
213 const struct nf_conn *ct; 203 const struct nf_conn *ct;
214 enum ip_conntrack_info ctinfo; 204 enum ip_conntrack_info ctinfo;
@@ -219,7 +209,7 @@ nf_nat_local_fn(unsigned int hooknum,
219 ip_hdrlen(skb) < sizeof(struct iphdr)) 209 ip_hdrlen(skb) < sizeof(struct iphdr))
220 return NF_ACCEPT; 210 return NF_ACCEPT;
221 211
222 ret = nf_nat_fn(hooknum, skb, in, out, okfn); 212 ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
223 if (ret != NF_DROP && ret != NF_STOLEN && 213 if (ret != NF_DROP && ret != NF_STOLEN &&
224 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { 214 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
225 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 215 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -230,21 +220,20 @@ nf_nat_local_fn(unsigned int hooknum,
230 ret = NF_DROP; 220 ret = NF_DROP;
231 } 221 }
232#ifdef CONFIG_XFRM 222#ifdef CONFIG_XFRM
233 else if (ct->tuplehash[dir].tuple.dst.u.all != 223 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
224 ct->tuplehash[dir].tuple.dst.u.all !=
234 ct->tuplehash[!dir].tuple.src.u.all) 225 ct->tuplehash[!dir].tuple.src.u.all)
235 if (ip_xfrm_me_harder(skb)) 226 if (nf_xfrm_me_harder(skb, AF_INET) < 0)
236 ret = NF_DROP; 227 ret = NF_DROP;
237#endif 228#endif
238 } 229 }
239 return ret; 230 return ret;
240} 231}
241 232
242/* We must be after connection tracking and before packet filtering. */ 233static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
243
244static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
245 /* Before packet filtering, change destination */ 234 /* Before packet filtering, change destination */
246 { 235 {
247 .hook = nf_nat_in, 236 .hook = nf_nat_ipv4_in,
248 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
249 .pf = NFPROTO_IPV4, 238 .pf = NFPROTO_IPV4,
250 .hooknum = NF_INET_PRE_ROUTING, 239 .hooknum = NF_INET_PRE_ROUTING,
@@ -252,7 +241,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
252 }, 241 },
253 /* After packet filtering, change source */ 242 /* After packet filtering, change source */
254 { 243 {
255 .hook = nf_nat_out, 244 .hook = nf_nat_ipv4_out,
256 .owner = THIS_MODULE, 245 .owner = THIS_MODULE,
257 .pf = NFPROTO_IPV4, 246 .pf = NFPROTO_IPV4,
258 .hooknum = NF_INET_POST_ROUTING, 247 .hooknum = NF_INET_POST_ROUTING,
@@ -260,7 +249,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
260 }, 249 },
261 /* Before packet filtering, change destination */ 250 /* Before packet filtering, change destination */
262 { 251 {
263 .hook = nf_nat_local_fn, 252 .hook = nf_nat_ipv4_local_fn,
264 .owner = THIS_MODULE, 253 .owner = THIS_MODULE,
265 .pf = NFPROTO_IPV4, 254 .pf = NFPROTO_IPV4,
266 .hooknum = NF_INET_LOCAL_OUT, 255 .hooknum = NF_INET_LOCAL_OUT,
@@ -268,7 +257,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
268 }, 257 },
269 /* After packet filtering, change source */ 258 /* After packet filtering, change source */
270 { 259 {
271 .hook = nf_nat_fn, 260 .hook = nf_nat_ipv4_fn,
272 .owner = THIS_MODULE, 261 .owner = THIS_MODULE,
273 .pf = NFPROTO_IPV4, 262 .pf = NFPROTO_IPV4,
274 .hooknum = NF_INET_LOCAL_IN, 263 .hooknum = NF_INET_LOCAL_IN,
@@ -276,51 +265,56 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
276 }, 265 },
277}; 266};
278 267
279static int __init nf_nat_standalone_init(void) 268static int __net_init iptable_nat_net_init(struct net *net)
280{ 269{
281 int ret = 0; 270 struct ipt_replace *repl;
271
272 repl = ipt_alloc_initial_table(&nf_nat_ipv4_table);
273 if (repl == NULL)
274 return -ENOMEM;
275 net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
276 kfree(repl);
277 if (IS_ERR(net->ipv4.nat_table))
278 return PTR_ERR(net->ipv4.nat_table);
279 return 0;
280}
282 281
283 need_ipv4_conntrack(); 282static void __net_exit iptable_nat_net_exit(struct net *net)
283{
284 ipt_unregister_table(net, net->ipv4.nat_table);
285}
284 286
285#ifdef CONFIG_XFRM 287static struct pernet_operations iptable_nat_net_ops = {
286 BUG_ON(ip_nat_decode_session != NULL); 288 .init = iptable_nat_net_init,
287 RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session); 289 .exit = iptable_nat_net_exit,
288#endif 290};
289 ret = nf_nat_rule_init();
290 if (ret < 0) {
291 pr_err("nf_nat_init: can't setup rules.\n");
292 goto cleanup_decode_session;
293 }
294 ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
295 if (ret < 0) {
296 pr_err("nf_nat_init: can't register hooks.\n");
297 goto cleanup_rule_init;
298 }
299 return ret;
300 291
301 cleanup_rule_init: 292static int __init iptable_nat_init(void)
302 nf_nat_rule_cleanup(); 293{
303 cleanup_decode_session: 294 int err;
304#ifdef CONFIG_XFRM 295
305 RCU_INIT_POINTER(ip_nat_decode_session, NULL); 296 err = register_pernet_subsys(&iptable_nat_net_ops);
306 synchronize_net(); 297 if (err < 0)
307#endif 298 goto err1;
308 return ret; 299
300 err = nf_register_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
301 if (err < 0)
302 goto err2;
303 return 0;
304
305err2:
306 unregister_pernet_subsys(&iptable_nat_net_ops);
307err1:
308 return err;
309} 309}
310 310
311static void __exit nf_nat_standalone_fini(void) 311static void __exit iptable_nat_exit(void)
312{ 312{
313 nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); 313 nf_unregister_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
314 nf_nat_rule_cleanup(); 314 unregister_pernet_subsys(&iptable_nat_net_ops);
315#ifdef CONFIG_XFRM
316 RCU_INIT_POINTER(ip_nat_decode_session, NULL);
317 synchronize_net();
318#endif
319 /* Conntrack caches are unregistered in nf_conntrack_cleanup */
320} 315}
321 316
322module_init(nf_nat_standalone_init); 317module_init(iptable_nat_init);
323module_exit(nf_nat_standalone_fini); 318module_exit(iptable_nat_exit);
324 319
325MODULE_LICENSE("GPL"); 320MODULE_LICENSE("GPL");
326MODULE_ALIAS("ip_nat");
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 07fb710cd722..03d9696d3c6e 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -48,9 +48,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
48 net->ipv4.iptable_raw = 48 net->ipv4.iptable_raw =
49 ipt_register_table(net, &packet_raw, repl); 49 ipt_register_table(net, &packet_raw, repl);
50 kfree(repl); 50 kfree(repl);
51 if (IS_ERR(net->ipv4.iptable_raw)) 51 return PTR_RET(net->ipv4.iptable_raw);
52 return PTR_ERR(net->ipv4.iptable_raw);
53 return 0;
54} 52}
55 53
56static void __net_exit iptable_raw_net_exit(struct net *net) 54static void __net_exit iptable_raw_net_exit(struct net *net)
@@ -75,14 +73,10 @@ static int __init iptable_raw_init(void)
75 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook); 73 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
76 if (IS_ERR(rawtable_ops)) { 74 if (IS_ERR(rawtable_ops)) {
77 ret = PTR_ERR(rawtable_ops); 75 ret = PTR_ERR(rawtable_ops);
78 goto cleanup_table; 76 unregister_pernet_subsys(&iptable_raw_net_ops);
79 } 77 }
80 78
81 return ret; 79 return ret;
82
83 cleanup_table:
84 unregister_pernet_subsys(&iptable_raw_net_ops);
85 return ret;
86} 80}
87 81
88static void __exit iptable_raw_fini(void) 82static void __exit iptable_raw_fini(void)
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index be45bdc4c602..b283d8e2601a 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -66,10 +66,7 @@ static int __net_init iptable_security_net_init(struct net *net)
66 net->ipv4.iptable_security = 66 net->ipv4.iptable_security =
67 ipt_register_table(net, &security_table, repl); 67 ipt_register_table(net, &security_table, repl);
68 kfree(repl); 68 kfree(repl);
69 if (IS_ERR(net->ipv4.iptable_security)) 69 return PTR_RET(net->ipv4.iptable_security);
70 return PTR_ERR(net->ipv4.iptable_security);
71
72 return 0;
73} 70}
74 71
75static void __net_exit iptable_security_net_exit(struct net *net) 72static void __net_exit iptable_security_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index e7ff2dcab6ce..fcdd0c2406e6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -29,11 +29,6 @@
29#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 29#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
30#include <net/netfilter/nf_log.h> 30#include <net/netfilter/nf_log.h>
31 31
32int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
33 struct nf_conn *ct,
34 enum ip_conntrack_info ctinfo);
35EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
36
37static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 32static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
38 struct nf_conntrack_tuple *tuple) 33 struct nf_conntrack_tuple *tuple)
39{ 34{
@@ -149,7 +144,8 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
149 typeof(nf_nat_seq_adjust_hook) seq_adjust; 144 typeof(nf_nat_seq_adjust_hook) seq_adjust;
150 145
151 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 146 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
152 if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) { 147 if (!seq_adjust ||
148 !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
153 NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 149 NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
154 return NF_DROP; 150 return NF_DROP;
155 } 151 }
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index c6784a18c1c4..9c3db10b22d3 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -15,13 +15,12 @@
15 15
16#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_helper.h> 17#include <net/netfilter/nf_nat_helper.h>
18#include <net/netfilter/nf_nat_rule.h>
19#include <net/netfilter/nf_conntrack_helper.h> 18#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_conntrack_expect.h> 19#include <net/netfilter/nf_conntrack_expect.h>
21#include <linux/netfilter/nf_conntrack_h323.h> 20#include <linux/netfilter/nf_conntrack_h323.h>
22 21
23/****************************************************************************/ 22/****************************************************************************/
24static int set_addr(struct sk_buff *skb, 23static int set_addr(struct sk_buff *skb, unsigned int protoff,
25 unsigned char **data, int dataoff, 24 unsigned char **data, int dataoff,
26 unsigned int addroff, __be32 ip, __be16 port) 25 unsigned int addroff, __be32 ip, __be16 port)
27{ 26{
@@ -40,7 +39,7 @@ static int set_addr(struct sk_buff *skb,
40 39
41 if (ip_hdr(skb)->protocol == IPPROTO_TCP) { 40 if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
42 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 41 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
43 addroff, sizeof(buf), 42 protoff, addroff, sizeof(buf),
44 (char *) &buf, sizeof(buf))) { 43 (char *) &buf, sizeof(buf))) {
45 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n"); 44 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
46 return -1; 45 return -1;
@@ -54,7 +53,7 @@ static int set_addr(struct sk_buff *skb,
54 *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff; 53 *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff;
55 } else { 54 } else {
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 55 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
57 addroff, sizeof(buf), 56 protoff, addroff, sizeof(buf),
58 (char *) &buf, sizeof(buf))) { 57 (char *) &buf, sizeof(buf))) {
59 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n"); 58 net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
60 return -1; 59 return -1;
@@ -69,22 +68,22 @@ static int set_addr(struct sk_buff *skb,
69} 68}
70 69
71/****************************************************************************/ 70/****************************************************************************/
72static int set_h225_addr(struct sk_buff *skb, 71static int set_h225_addr(struct sk_buff *skb, unsigned int protoff,
73 unsigned char **data, int dataoff, 72 unsigned char **data, int dataoff,
74 TransportAddress *taddr, 73 TransportAddress *taddr,
75 union nf_inet_addr *addr, __be16 port) 74 union nf_inet_addr *addr, __be16 port)
76{ 75{
77 return set_addr(skb, data, dataoff, taddr->ipAddress.ip, 76 return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip,
78 addr->ip, port); 77 addr->ip, port);
79} 78}
80 79
81/****************************************************************************/ 80/****************************************************************************/
82static int set_h245_addr(struct sk_buff *skb, 81static int set_h245_addr(struct sk_buff *skb, unsigned protoff,
83 unsigned char **data, int dataoff, 82 unsigned char **data, int dataoff,
84 H245_TransportAddress *taddr, 83 H245_TransportAddress *taddr,
85 union nf_inet_addr *addr, __be16 port) 84 union nf_inet_addr *addr, __be16 port)
86{ 85{
87 return set_addr(skb, data, dataoff, 86 return set_addr(skb, protoff, data, dataoff,
88 taddr->unicastAddress.iPAddress.network, 87 taddr->unicastAddress.iPAddress.network,
89 addr->ip, port); 88 addr->ip, port);
90} 89}
@@ -92,7 +91,7 @@ static int set_h245_addr(struct sk_buff *skb,
92/****************************************************************************/ 91/****************************************************************************/
93static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, 92static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
94 enum ip_conntrack_info ctinfo, 93 enum ip_conntrack_info ctinfo,
95 unsigned char **data, 94 unsigned int protoff, unsigned char **data,
96 TransportAddress *taddr, int count) 95 TransportAddress *taddr, int count)
97{ 96{
98 const struct nf_ct_h323_master *info = nfct_help_data(ct); 97 const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -118,7 +117,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
118 &addr.ip, port, 117 &addr.ip, port,
119 &ct->tuplehash[!dir].tuple.dst.u3.ip, 118 &ct->tuplehash[!dir].tuple.dst.u3.ip,
120 info->sig_port[!dir]); 119 info->sig_port[!dir]);
121 return set_h225_addr(skb, data, 0, &taddr[i], 120 return set_h225_addr(skb, protoff, data, 0,
121 &taddr[i],
122 &ct->tuplehash[!dir]. 122 &ct->tuplehash[!dir].
123 tuple.dst.u3, 123 tuple.dst.u3,
124 info->sig_port[!dir]); 124 info->sig_port[!dir]);
@@ -129,7 +129,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
129 &addr.ip, port, 129 &addr.ip, port,
130 &ct->tuplehash[!dir].tuple.src.u3.ip, 130 &ct->tuplehash[!dir].tuple.src.u3.ip,
131 info->sig_port[!dir]); 131 info->sig_port[!dir]);
132 return set_h225_addr(skb, data, 0, &taddr[i], 132 return set_h225_addr(skb, protoff, data, 0,
133 &taddr[i],
133 &ct->tuplehash[!dir]. 134 &ct->tuplehash[!dir].
134 tuple.src.u3, 135 tuple.src.u3,
135 info->sig_port[!dir]); 136 info->sig_port[!dir]);
@@ -143,7 +144,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
143/****************************************************************************/ 144/****************************************************************************/
144static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct, 145static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
145 enum ip_conntrack_info ctinfo, 146 enum ip_conntrack_info ctinfo,
146 unsigned char **data, 147 unsigned int protoff, unsigned char **data,
147 TransportAddress *taddr, int count) 148 TransportAddress *taddr, int count)
148{ 149{
149 int dir = CTINFO2DIR(ctinfo); 150 int dir = CTINFO2DIR(ctinfo);
@@ -159,7 +160,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
159 &addr.ip, ntohs(port), 160 &addr.ip, ntohs(port),
160 &ct->tuplehash[!dir].tuple.dst.u3.ip, 161 &ct->tuplehash[!dir].tuple.dst.u3.ip,
161 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); 162 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
162 return set_h225_addr(skb, data, 0, &taddr[i], 163 return set_h225_addr(skb, protoff, data, 0, &taddr[i],
163 &ct->tuplehash[!dir].tuple.dst.u3, 164 &ct->tuplehash[!dir].tuple.dst.u3,
164 ct->tuplehash[!dir].tuple. 165 ct->tuplehash[!dir].tuple.
165 dst.u.udp.port); 166 dst.u.udp.port);
@@ -172,7 +173,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
172/****************************************************************************/ 173/****************************************************************************/
173static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, 174static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
174 enum ip_conntrack_info ctinfo, 175 enum ip_conntrack_info ctinfo,
175 unsigned char **data, int dataoff, 176 unsigned int protoff, unsigned char **data, int dataoff,
176 H245_TransportAddress *taddr, 177 H245_TransportAddress *taddr,
177 __be16 port, __be16 rtp_port, 178 __be16 port, __be16 rtp_port,
178 struct nf_conntrack_expect *rtp_exp, 179 struct nf_conntrack_expect *rtp_exp,
@@ -244,7 +245,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
244 } 245 }
245 246
246 /* Modify signal */ 247 /* Modify signal */
247 if (set_h245_addr(skb, data, dataoff, taddr, 248 if (set_h245_addr(skb, protoff, data, dataoff, taddr,
248 &ct->tuplehash[!dir].tuple.dst.u3, 249 &ct->tuplehash[!dir].tuple.dst.u3,
249 htons((port & htons(1)) ? nated_port + 1 : 250 htons((port & htons(1)) ? nated_port + 1 :
250 nated_port)) == 0) { 251 nated_port)) == 0) {
@@ -275,7 +276,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
275/****************************************************************************/ 276/****************************************************************************/
276static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, 277static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
277 enum ip_conntrack_info ctinfo, 278 enum ip_conntrack_info ctinfo,
278 unsigned char **data, int dataoff, 279 unsigned int protoff, unsigned char **data, int dataoff,
279 H245_TransportAddress *taddr, __be16 port, 280 H245_TransportAddress *taddr, __be16 port,
280 struct nf_conntrack_expect *exp) 281 struct nf_conntrack_expect *exp)
281{ 282{
@@ -307,7 +308,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
307 } 308 }
308 309
309 /* Modify signal */ 310 /* Modify signal */
310 if (set_h245_addr(skb, data, dataoff, taddr, 311 if (set_h245_addr(skb, protoff, data, dataoff, taddr,
311 &ct->tuplehash[!dir].tuple.dst.u3, 312 &ct->tuplehash[!dir].tuple.dst.u3,
312 htons(nated_port)) < 0) { 313 htons(nated_port)) < 0) {
313 nf_ct_unexpect_related(exp); 314 nf_ct_unexpect_related(exp);
@@ -326,7 +327,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
326/****************************************************************************/ 327/****************************************************************************/
327static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, 328static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
328 enum ip_conntrack_info ctinfo, 329 enum ip_conntrack_info ctinfo,
329 unsigned char **data, int dataoff, 330 unsigned int protoff, unsigned char **data, int dataoff,
330 TransportAddress *taddr, __be16 port, 331 TransportAddress *taddr, __be16 port,
331 struct nf_conntrack_expect *exp) 332 struct nf_conntrack_expect *exp)
332{ 333{
@@ -363,7 +364,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
363 } 364 }
364 365
365 /* Modify signal */ 366 /* Modify signal */
366 if (set_h225_addr(skb, data, dataoff, taddr, 367 if (set_h225_addr(skb, protoff, data, dataoff, taddr,
367 &ct->tuplehash[!dir].tuple.dst.u3, 368 &ct->tuplehash[!dir].tuple.dst.u3,
368 htons(nated_port)) == 0) { 369 htons(nated_port)) == 0) {
369 /* Save ports */ 370 /* Save ports */
@@ -390,7 +391,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
390static void ip_nat_q931_expect(struct nf_conn *new, 391static void ip_nat_q931_expect(struct nf_conn *new,
391 struct nf_conntrack_expect *this) 392 struct nf_conntrack_expect *this)
392{ 393{
393 struct nf_nat_ipv4_range range; 394 struct nf_nat_range range;
394 395
395 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ 396 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */
396 nf_nat_follow_master(new, this); 397 nf_nat_follow_master(new, this);
@@ -402,21 +403,23 @@ static void ip_nat_q931_expect(struct nf_conn *new,
402 403
403 /* Change src to where master sends to */ 404 /* Change src to where master sends to */
404 range.flags = NF_NAT_RANGE_MAP_IPS; 405 range.flags = NF_NAT_RANGE_MAP_IPS;
405 range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; 406 range.min_addr = range.max_addr =
407 new->tuplehash[!this->dir].tuple.src.u3;
406 nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); 408 nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
407 409
408 /* For DST manip, map port here to where it's expected. */ 410 /* For DST manip, map port here to where it's expected. */
409 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 411 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
410 range.min = range.max = this->saved_proto; 412 range.min_proto = range.max_proto = this->saved_proto;
411 range.min_ip = range.max_ip = 413 range.min_addr = range.max_addr =
412 new->master->tuplehash[!this->dir].tuple.src.u3.ip; 414 new->master->tuplehash[!this->dir].tuple.src.u3;
413 nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); 415 nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
414} 416}
415 417
416/****************************************************************************/ 418/****************************************************************************/
417static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, 419static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
418 enum ip_conntrack_info ctinfo, 420 enum ip_conntrack_info ctinfo,
419 unsigned char **data, TransportAddress *taddr, int idx, 421 unsigned int protoff, unsigned char **data,
422 TransportAddress *taddr, int idx,
420 __be16 port, struct nf_conntrack_expect *exp) 423 __be16 port, struct nf_conntrack_expect *exp)
421{ 424{
422 struct nf_ct_h323_master *info = nfct_help_data(ct); 425 struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -453,7 +456,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
453 } 456 }
454 457
455 /* Modify signal */ 458 /* Modify signal */
456 if (set_h225_addr(skb, data, 0, &taddr[idx], 459 if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
457 &ct->tuplehash[!dir].tuple.dst.u3, 460 &ct->tuplehash[!dir].tuple.dst.u3,
458 htons(nated_port)) == 0) { 461 htons(nated_port)) == 0) {
459 /* Save ports */ 462 /* Save ports */
@@ -464,7 +467,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
464 if (idx > 0 && 467 if (idx > 0 &&
465 get_h225_addr(ct, *data, &taddr[0], &addr, &port) && 468 get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
466 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { 469 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
467 set_h225_addr(skb, data, 0, &taddr[0], 470 set_h225_addr(skb, protoff, data, 0, &taddr[0],
468 &ct->tuplehash[!dir].tuple.dst.u3, 471 &ct->tuplehash[!dir].tuple.dst.u3,
469 info->sig_port[!dir]); 472 info->sig_port[!dir]);
470 } 473 }
@@ -487,26 +490,28 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
487static void ip_nat_callforwarding_expect(struct nf_conn *new, 490static void ip_nat_callforwarding_expect(struct nf_conn *new,
488 struct nf_conntrack_expect *this) 491 struct nf_conntrack_expect *this)
489{ 492{
490 struct nf_nat_ipv4_range range; 493 struct nf_nat_range range;
491 494
492 /* This must be a fresh one. */ 495 /* This must be a fresh one. */
493 BUG_ON(new->status & IPS_NAT_DONE_MASK); 496 BUG_ON(new->status & IPS_NAT_DONE_MASK);
494 497
495 /* Change src to where master sends to */ 498 /* Change src to where master sends to */
496 range.flags = NF_NAT_RANGE_MAP_IPS; 499 range.flags = NF_NAT_RANGE_MAP_IPS;
497 range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; 500 range.min_addr = range.max_addr =
501 new->tuplehash[!this->dir].tuple.src.u3;
498 nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC); 502 nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
499 503
500 /* For DST manip, map port here to where it's expected. */ 504 /* For DST manip, map port here to where it's expected. */
501 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 505 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
502 range.min = range.max = this->saved_proto; 506 range.min_proto = range.max_proto = this->saved_proto;
503 range.min_ip = range.max_ip = this->saved_ip; 507 range.min_addr = range.max_addr = this->saved_addr;
504 nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST); 508 nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
505} 509}
506 510
507/****************************************************************************/ 511/****************************************************************************/
508static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, 512static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
509 enum ip_conntrack_info ctinfo, 513 enum ip_conntrack_info ctinfo,
514 unsigned int protoff,
510 unsigned char **data, int dataoff, 515 unsigned char **data, int dataoff,
511 TransportAddress *taddr, __be16 port, 516 TransportAddress *taddr, __be16 port,
512 struct nf_conntrack_expect *exp) 517 struct nf_conntrack_expect *exp)
@@ -515,7 +520,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
515 u_int16_t nated_port; 520 u_int16_t nated_port;
516 521
517 /* Set expectations for NAT */ 522 /* Set expectations for NAT */
518 exp->saved_ip = exp->tuple.dst.u3.ip; 523 exp->saved_addr = exp->tuple.dst.u3;
519 exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; 524 exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
520 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 525 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
521 exp->expectfn = ip_nat_callforwarding_expect; 526 exp->expectfn = ip_nat_callforwarding_expect;
@@ -541,7 +546,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
541 } 546 }
542 547
543 /* Modify signal */ 548 /* Modify signal */
544 if (!set_h225_addr(skb, data, dataoff, taddr, 549 if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
545 &ct->tuplehash[!dir].tuple.dst.u3, 550 &ct->tuplehash[!dir].tuple.dst.u3,
546 htons(nated_port)) == 0) { 551 htons(nated_port)) == 0) {
547 nf_ct_unexpect_related(exp); 552 nf_ct_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
new file mode 100644
index 000000000000..d8b2e14efddc
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -0,0 +1,281 @@
1/*
2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/ip.h>
15#include <linux/icmp.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter_ipv4.h>
18#include <net/secure_seq.h>
19#include <net/checksum.h>
20#include <net/route.h>
21#include <net/ip.h>
22
23#include <net/netfilter/nf_conntrack_core.h>
24#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_nat_core.h>
26#include <net/netfilter/nf_nat_l3proto.h>
27#include <net/netfilter/nf_nat_l4proto.h>
28
29static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
30
31#ifdef CONFIG_XFRM
32static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
33 const struct nf_conn *ct,
34 enum ip_conntrack_dir dir,
35 unsigned long statusbit,
36 struct flowi *fl)
37{
38 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
39 struct flowi4 *fl4 = &fl->u.ip4;
40
41 if (ct->status & statusbit) {
42 fl4->daddr = t->dst.u3.ip;
43 if (t->dst.protonum == IPPROTO_TCP ||
44 t->dst.protonum == IPPROTO_UDP ||
45 t->dst.protonum == IPPROTO_UDPLITE ||
46 t->dst.protonum == IPPROTO_DCCP ||
47 t->dst.protonum == IPPROTO_SCTP)
48 fl4->fl4_dport = t->dst.u.all;
49 }
50
51 statusbit ^= IPS_NAT_MASK;
52
53 if (ct->status & statusbit) {
54 fl4->saddr = t->src.u3.ip;
55 if (t->dst.protonum == IPPROTO_TCP ||
56 t->dst.protonum == IPPROTO_UDP ||
57 t->dst.protonum == IPPROTO_UDPLITE ||
58 t->dst.protonum == IPPROTO_DCCP ||
59 t->dst.protonum == IPPROTO_SCTP)
60 fl4->fl4_sport = t->src.u.all;
61 }
62}
63#endif /* CONFIG_XFRM */
64
65static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
66 const struct nf_nat_range *range)
67{
68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
70}
71
72static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
73 __be16 dport)
74{
75 return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
76}
77
78static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
79 unsigned int iphdroff,
80 const struct nf_nat_l4proto *l4proto,
81 const struct nf_conntrack_tuple *target,
82 enum nf_nat_manip_type maniptype)
83{
84 struct iphdr *iph;
85 unsigned int hdroff;
86
87 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
88 return false;
89
90 iph = (void *)skb->data + iphdroff;
91 hdroff = iphdroff + iph->ihl * 4;
92
93 if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
94 target, maniptype))
95 return false;
96 iph = (void *)skb->data + iphdroff;
97
98 if (maniptype == NF_NAT_MANIP_SRC) {
99 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
100 iph->saddr = target->src.u3.ip;
101 } else {
102 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
103 iph->daddr = target->dst.u3.ip;
104 }
105 return true;
106}
107
108static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
109 unsigned int iphdroff, __sum16 *check,
110 const struct nf_conntrack_tuple *t,
111 enum nf_nat_manip_type maniptype)
112{
113 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
114 __be32 oldip, newip;
115
116 if (maniptype == NF_NAT_MANIP_SRC) {
117 oldip = iph->saddr;
118 newip = t->src.u3.ip;
119 } else {
120 oldip = iph->daddr;
121 newip = t->dst.u3.ip;
122 }
123 inet_proto_csum_replace4(check, skb, oldip, newip, 1);
124}
125
126static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
127 u8 proto, void *data, __sum16 *check,
128 int datalen, int oldlen)
129{
130 const struct iphdr *iph = ip_hdr(skb);
131 struct rtable *rt = skb_rtable(skb);
132
133 if (skb->ip_summed != CHECKSUM_PARTIAL) {
134 if (!(rt->rt_flags & RTCF_LOCAL) &&
135 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
136 skb->ip_summed = CHECKSUM_PARTIAL;
137 skb->csum_start = skb_headroom(skb) +
138 skb_network_offset(skb) +
139 ip_hdrlen(skb);
140 skb->csum_offset = (void *)check - data;
141 *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
142 datalen, proto, 0);
143 } else {
144 *check = 0;
145 *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
146 datalen, proto,
147 csum_partial(data, datalen,
148 0));
149 if (proto == IPPROTO_UDP && !*check)
150 *check = CSUM_MANGLED_0;
151 }
152 } else
153 inet_proto_csum_replace2(check, skb,
154 htons(oldlen), htons(datalen), 1);
155}
156
157static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
158 struct nf_nat_range *range)
159{
160 if (tb[CTA_NAT_V4_MINIP]) {
161 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
162 range->flags |= NF_NAT_RANGE_MAP_IPS;
163 }
164
165 if (tb[CTA_NAT_V4_MAXIP])
166 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
167 else
168 range->max_addr.ip = range->min_addr.ip;
169
170 return 0;
171}
172
173static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
174 .l3proto = NFPROTO_IPV4,
175 .in_range = nf_nat_ipv4_in_range,
176 .secure_port = nf_nat_ipv4_secure_port,
177 .manip_pkt = nf_nat_ipv4_manip_pkt,
178 .csum_update = nf_nat_ipv4_csum_update,
179 .csum_recalc = nf_nat_ipv4_csum_recalc,
180 .nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
181#ifdef CONFIG_XFRM
182 .decode_session = nf_nat_ipv4_decode_session,
183#endif
184};
185
186int nf_nat_icmp_reply_translation(struct sk_buff *skb,
187 struct nf_conn *ct,
188 enum ip_conntrack_info ctinfo,
189 unsigned int hooknum)
190{
191 struct {
192 struct icmphdr icmp;
193 struct iphdr ip;
194 } *inside;
195 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
196 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
197 unsigned int hdrlen = ip_hdrlen(skb);
198 const struct nf_nat_l4proto *l4proto;
199 struct nf_conntrack_tuple target;
200 unsigned long statusbit;
201
202 NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
203
204 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
205 return 0;
206 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
207 return 0;
208
209 inside = (void *)skb->data + hdrlen;
210 if (inside->icmp.type == ICMP_REDIRECT) {
211 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
212 return 0;
213 if (ct->status & IPS_NAT_MASK)
214 return 0;
215 }
216
217 if (manip == NF_NAT_MANIP_SRC)
218 statusbit = IPS_SRC_NAT;
219 else
220 statusbit = IPS_DST_NAT;
221
222 /* Invert if this is reply direction */
223 if (dir == IP_CT_DIR_REPLY)
224 statusbit ^= IPS_NAT_MASK;
225
226 if (!(ct->status & statusbit))
227 return 1;
228
229 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
230 if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
231 l4proto, &ct->tuplehash[!dir].tuple, !manip))
232 return 0;
233
234 if (skb->ip_summed != CHECKSUM_PARTIAL) {
235 /* Reloading "inside" here since manip_pkt may reallocate */
236 inside = (void *)skb->data + hdrlen;
237 inside->icmp.checksum = 0;
238 inside->icmp.checksum =
239 csum_fold(skb_checksum(skb, hdrlen,
240 skb->len - hdrlen, 0));
241 }
242
243 /* Change outer to look like the reply to an incoming packet */
244 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
245 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
246 if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
247 return 0;
248
249 return 1;
250}
251EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
252
253static int __init nf_nat_l3proto_ipv4_init(void)
254{
255 int err;
256
257 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
258 if (err < 0)
259 goto err1;
260 err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
261 if (err < 0)
262 goto err2;
263 return err;
264
265err2:
266 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
267err1:
268 return err;
269}
270
271static void __exit nf_nat_l3proto_ipv4_exit(void)
272{
273 nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
274 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
275}
276
277MODULE_LICENSE("GPL");
278MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
279
280module_init(nf_nat_l3proto_ipv4_init);
281module_exit(nf_nat_l3proto_ipv4_exit);
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 388140881ebe..a06d7d74817d 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -22,7 +22,6 @@
22 22
23#include <net/netfilter/nf_nat.h> 23#include <net/netfilter/nf_nat.h>
24#include <net/netfilter/nf_nat_helper.h> 24#include <net/netfilter/nf_nat_helper.h>
25#include <net/netfilter/nf_nat_rule.h>
26#include <net/netfilter/nf_conntrack_helper.h> 25#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h> 26#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_zones.h> 27#include <net/netfilter/nf_conntrack_zones.h>
@@ -47,7 +46,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
47 struct nf_conntrack_tuple t; 46 struct nf_conntrack_tuple t;
48 const struct nf_ct_pptp_master *ct_pptp_info; 47 const struct nf_ct_pptp_master *ct_pptp_info;
49 const struct nf_nat_pptp *nat_pptp_info; 48 const struct nf_nat_pptp *nat_pptp_info;
50 struct nf_nat_ipv4_range range; 49 struct nf_nat_range range;
51 50
52 ct_pptp_info = nfct_help_data(master); 51 ct_pptp_info = nfct_help_data(master);
53 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; 52 nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -89,21 +88,21 @@ static void pptp_nat_expected(struct nf_conn *ct,
89 88
90 /* Change src to where master sends to */ 89 /* Change src to where master sends to */
91 range.flags = NF_NAT_RANGE_MAP_IPS; 90 range.flags = NF_NAT_RANGE_MAP_IPS;
92 range.min_ip = range.max_ip 91 range.min_addr = range.max_addr
93 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 92 = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
94 if (exp->dir == IP_CT_DIR_ORIGINAL) { 93 if (exp->dir == IP_CT_DIR_ORIGINAL) {
95 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 94 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
96 range.min = range.max = exp->saved_proto; 95 range.min_proto = range.max_proto = exp->saved_proto;
97 } 96 }
98 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 97 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
99 98
100 /* For DST manip, map port here to where it's expected. */ 99 /* For DST manip, map port here to where it's expected. */
101 range.flags = NF_NAT_RANGE_MAP_IPS; 100 range.flags = NF_NAT_RANGE_MAP_IPS;
102 range.min_ip = range.max_ip 101 range.min_addr = range.max_addr
103 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; 102 = ct->master->tuplehash[!exp->dir].tuple.src.u3;
104 if (exp->dir == IP_CT_DIR_REPLY) { 103 if (exp->dir == IP_CT_DIR_REPLY) {
105 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 104 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
106 range.min = range.max = exp->saved_proto; 105 range.min_proto = range.max_proto = exp->saved_proto;
107 } 106 }
108 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 107 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
109} 108}
@@ -113,6 +112,7 @@ static int
113pptp_outbound_pkt(struct sk_buff *skb, 112pptp_outbound_pkt(struct sk_buff *skb,
114 struct nf_conn *ct, 113 struct nf_conn *ct,
115 enum ip_conntrack_info ctinfo, 114 enum ip_conntrack_info ctinfo,
115 unsigned int protoff,
116 struct PptpControlHeader *ctlh, 116 struct PptpControlHeader *ctlh,
117 union pptp_ctrl_union *pptpReq) 117 union pptp_ctrl_union *pptpReq)
118 118
@@ -175,7 +175,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
175 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); 175 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
176 176
177 /* mangle packet */ 177 /* mangle packet */
178 if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 178 if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
179 cid_off + sizeof(struct pptp_pkt_hdr) + 179 cid_off + sizeof(struct pptp_pkt_hdr) +
180 sizeof(struct PptpControlHeader), 180 sizeof(struct PptpControlHeader),
181 sizeof(new_callid), (char *)&new_callid, 181 sizeof(new_callid), (char *)&new_callid,
@@ -216,6 +216,7 @@ static int
216pptp_inbound_pkt(struct sk_buff *skb, 216pptp_inbound_pkt(struct sk_buff *skb,
217 struct nf_conn *ct, 217 struct nf_conn *ct,
218 enum ip_conntrack_info ctinfo, 218 enum ip_conntrack_info ctinfo,
219 unsigned int protoff,
219 struct PptpControlHeader *ctlh, 220 struct PptpControlHeader *ctlh,
220 union pptp_ctrl_union *pptpReq) 221 union pptp_ctrl_union *pptpReq)
221{ 222{
@@ -268,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
268 pr_debug("altering peer call id from 0x%04x to 0x%04x\n", 269 pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
269 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); 270 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
270 271
271 if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 272 if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
272 pcid_off + sizeof(struct pptp_pkt_hdr) + 273 pcid_off + sizeof(struct pptp_pkt_hdr) +
273 sizeof(struct PptpControlHeader), 274 sizeof(struct PptpControlHeader),
274 sizeof(new_pcid), (char *)&new_pcid, 275 sizeof(new_pcid), (char *)&new_pcid,
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index 46ba0b9ab985..ea44f02563b5 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -28,8 +28,7 @@
28#include <linux/ip.h> 28#include <linux/ip.h>
29 29
30#include <net/netfilter/nf_nat.h> 30#include <net/netfilter/nf_nat.h>
31#include <net/netfilter/nf_nat_rule.h> 31#include <net/netfilter/nf_nat_l4proto.h>
32#include <net/netfilter/nf_nat_protocol.h>
33#include <linux/netfilter/nf_conntrack_proto_gre.h> 32#include <linux/netfilter/nf_conntrack_proto_gre.h>
34 33
35MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
@@ -38,8 +37,9 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
38 37
39/* generate unique tuple ... */ 38/* generate unique tuple ... */
40static void 39static void
41gre_unique_tuple(struct nf_conntrack_tuple *tuple, 40gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
42 const struct nf_nat_ipv4_range *range, 41 struct nf_conntrack_tuple *tuple,
42 const struct nf_nat_range *range,
43 enum nf_nat_manip_type maniptype, 43 enum nf_nat_manip_type maniptype,
44 const struct nf_conn *ct) 44 const struct nf_conn *ct)
45{ 45{
@@ -62,8 +62,8 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
62 min = 1; 62 min = 1;
63 range_size = 0xffff; 63 range_size = 0xffff;
64 } else { 64 } else {
65 min = ntohs(range->min.gre.key); 65 min = ntohs(range->min_proto.gre.key);
66 range_size = ntohs(range->max.gre.key) - min + 1; 66 range_size = ntohs(range->max_proto.gre.key) - min + 1;
67 } 67 }
68 68
69 pr_debug("min = %u, range_size = %u\n", min, range_size); 69 pr_debug("min = %u, range_size = %u\n", min, range_size);
@@ -80,14 +80,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
80 80
81/* manipulate a GRE packet according to maniptype */ 81/* manipulate a GRE packet according to maniptype */
82static bool 82static bool
83gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, 83gre_manip_pkt(struct sk_buff *skb,
84 const struct nf_nat_l3proto *l3proto,
85 unsigned int iphdroff, unsigned int hdroff,
84 const struct nf_conntrack_tuple *tuple, 86 const struct nf_conntrack_tuple *tuple,
85 enum nf_nat_manip_type maniptype) 87 enum nf_nat_manip_type maniptype)
86{ 88{
87 const struct gre_hdr *greh; 89 const struct gre_hdr *greh;
88 struct gre_hdr_pptp *pgreh; 90 struct gre_hdr_pptp *pgreh;
89 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
90 unsigned int hdroff = iphdroff + iph->ihl * 4;
91 91
92 /* pgreh includes two optional 32bit fields which are not required 92 /* pgreh includes two optional 32bit fields which are not required
93 * to be there. That's where the magic '8' comes from */ 93 * to be there. That's where the magic '8' comes from */
@@ -117,24 +117,24 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
117 return true; 117 return true;
118} 118}
119 119
120static const struct nf_nat_protocol gre = { 120static const struct nf_nat_l4proto gre = {
121 .protonum = IPPROTO_GRE, 121 .l4proto = IPPROTO_GRE,
122 .manip_pkt = gre_manip_pkt, 122 .manip_pkt = gre_manip_pkt,
123 .in_range = nf_nat_proto_in_range, 123 .in_range = nf_nat_l4proto_in_range,
124 .unique_tuple = gre_unique_tuple, 124 .unique_tuple = gre_unique_tuple,
125#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 125#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
126 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 126 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
127#endif 127#endif
128}; 128};
129 129
130static int __init nf_nat_proto_gre_init(void) 130static int __init nf_nat_proto_gre_init(void)
131{ 131{
132 return nf_nat_protocol_register(&gre); 132 return nf_nat_l4proto_register(NFPROTO_IPV4, &gre);
133} 133}
134 134
135static void __exit nf_nat_proto_gre_fini(void) 135static void __exit nf_nat_proto_gre_fini(void)
136{ 136{
137 nf_nat_protocol_unregister(&gre); 137 nf_nat_l4proto_unregister(NFPROTO_IPV4, &gre);
138} 138}
139 139
140module_init(nf_nat_proto_gre_init); 140module_init(nf_nat_proto_gre_init);
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index b35172851bae..eb303471bcf6 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -15,8 +15,7 @@
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_core.h> 17#include <net/netfilter/nf_nat_core.h>
18#include <net/netfilter/nf_nat_rule.h> 18#include <net/netfilter/nf_nat_l4proto.h>
19#include <net/netfilter/nf_nat_protocol.h>
20 19
21static bool 20static bool
22icmp_in_range(const struct nf_conntrack_tuple *tuple, 21icmp_in_range(const struct nf_conntrack_tuple *tuple,
@@ -29,8 +28,9 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
29} 28}
30 29
31static void 30static void
32icmp_unique_tuple(struct nf_conntrack_tuple *tuple, 31icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
33 const struct nf_nat_ipv4_range *range, 32 struct nf_conntrack_tuple *tuple,
33 const struct nf_nat_range *range,
34 enum nf_nat_manip_type maniptype, 34 enum nf_nat_manip_type maniptype,
35 const struct nf_conn *ct) 35 const struct nf_conn *ct)
36{ 36{
@@ -38,13 +38,14 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
38 unsigned int range_size; 38 unsigned int range_size;
39 unsigned int i; 39 unsigned int i;
40 40
41 range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1; 41 range_size = ntohs(range->max_proto.icmp.id) -
42 ntohs(range->min_proto.icmp.id) + 1;
42 /* If no range specified... */ 43 /* If no range specified... */
43 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) 44 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
44 range_size = 0xFFFF; 45 range_size = 0xFFFF;
45 46
46 for (i = 0; ; ++id) { 47 for (i = 0; ; ++id) {
47 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + 48 tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
48 (id % range_size)); 49 (id % range_size));
49 if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) 50 if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
50 return; 51 return;
@@ -54,13 +55,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
54 55
55static bool 56static bool
56icmp_manip_pkt(struct sk_buff *skb, 57icmp_manip_pkt(struct sk_buff *skb,
57 unsigned int iphdroff, 58 const struct nf_nat_l3proto *l3proto,
59 unsigned int iphdroff, unsigned int hdroff,
58 const struct nf_conntrack_tuple *tuple, 60 const struct nf_conntrack_tuple *tuple,
59 enum nf_nat_manip_type maniptype) 61 enum nf_nat_manip_type maniptype)
60{ 62{
61 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
62 struct icmphdr *hdr; 63 struct icmphdr *hdr;
63 unsigned int hdroff = iphdroff + iph->ihl*4;
64 64
65 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 65 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
66 return false; 66 return false;
@@ -72,12 +72,12 @@ icmp_manip_pkt(struct sk_buff *skb,
72 return true; 72 return true;
73} 73}
74 74
75const struct nf_nat_protocol nf_nat_protocol_icmp = { 75const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
76 .protonum = IPPROTO_ICMP, 76 .l4proto = IPPROTO_ICMP,
77 .manip_pkt = icmp_manip_pkt, 77 .manip_pkt = icmp_manip_pkt,
78 .in_range = icmp_in_range, 78 .in_range = icmp_in_range,
79 .unique_tuple = icmp_unique_tuple, 79 .unique_tuple = icmp_unique_tuple,
80#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 80#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
81 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 81 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
82#endif 82#endif
83}; 83};
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
deleted file mode 100644
index d2a9dc314e0e..000000000000
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ /dev/null
@@ -1,214 +0,0 @@
1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* Everything about the rules for NAT. */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/types.h>
12#include <linux/ip.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter_ipv4.h>
15#include <linux/module.h>
16#include <linux/kmod.h>
17#include <linux/skbuff.h>
18#include <linux/proc_fs.h>
19#include <linux/slab.h>
20#include <net/checksum.h>
21#include <net/route.h>
22#include <linux/bitops.h>
23
24#include <linux/netfilter_ipv4/ip_tables.h>
25#include <net/netfilter/nf_nat.h>
26#include <net/netfilter/nf_nat_core.h>
27#include <net/netfilter/nf_nat_rule.h>
28
29#define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
30 (1 << NF_INET_POST_ROUTING) | \
31 (1 << NF_INET_LOCAL_OUT) | \
32 (1 << NF_INET_LOCAL_IN))
33
34static const struct xt_table nat_table = {
35 .name = "nat",
36 .valid_hooks = NAT_VALID_HOOKS,
37 .me = THIS_MODULE,
38 .af = NFPROTO_IPV4,
39};
40
41/* Source NAT */
42static unsigned int
43ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
44{
45 struct nf_conn *ct;
46 enum ip_conntrack_info ctinfo;
47 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
48
49 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
50 par->hooknum == NF_INET_LOCAL_IN);
51
52 ct = nf_ct_get(skb, &ctinfo);
53
54 /* Connection must be valid and new. */
55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
56 ctinfo == IP_CT_RELATED_REPLY));
57 NF_CT_ASSERT(par->out != NULL);
58
59 return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
60}
61
62static unsigned int
63ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
64{
65 struct nf_conn *ct;
66 enum ip_conntrack_info ctinfo;
67 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
68
69 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
70 par->hooknum == NF_INET_LOCAL_OUT);
71
72 ct = nf_ct_get(skb, &ctinfo);
73
74 /* Connection must be valid and new. */
75 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
76
77 return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
78}
79
80static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
81{
82 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
83
84 /* Must be a valid range */
85 if (mr->rangesize != 1) {
86 pr_info("SNAT: multiple ranges no longer supported\n");
87 return -EINVAL;
88 }
89 return 0;
90}
91
92static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
93{
94 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
95
96 /* Must be a valid range */
97 if (mr->rangesize != 1) {
98 pr_info("DNAT: multiple ranges no longer supported\n");
99 return -EINVAL;
100 }
101 return 0;
102}
103
104static unsigned int
105alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
106{
107 /* Force range to this IP; let proto decide mapping for
108 per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
109 */
110 struct nf_nat_ipv4_range range;
111
112 range.flags = 0;
113 pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
114 HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
115 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
116 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
117
118 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
119}
120
121int nf_nat_rule_find(struct sk_buff *skb,
122 unsigned int hooknum,
123 const struct net_device *in,
124 const struct net_device *out,
125 struct nf_conn *ct)
126{
127 struct net *net = nf_ct_net(ct);
128 int ret;
129
130 ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
131
132 if (ret == NF_ACCEPT) {
133 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
134 /* NUL mapping */
135 ret = alloc_null_binding(ct, hooknum);
136 }
137 return ret;
138}
139
140static struct xt_target ipt_snat_reg __read_mostly = {
141 .name = "SNAT",
142 .target = ipt_snat_target,
143 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
144 .table = "nat",
145 .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
146 .checkentry = ipt_snat_checkentry,
147 .family = AF_INET,
148};
149
150static struct xt_target ipt_dnat_reg __read_mostly = {
151 .name = "DNAT",
152 .target = ipt_dnat_target,
153 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
154 .table = "nat",
155 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
156 .checkentry = ipt_dnat_checkentry,
157 .family = AF_INET,
158};
159
160static int __net_init nf_nat_rule_net_init(struct net *net)
161{
162 struct ipt_replace *repl;
163
164 repl = ipt_alloc_initial_table(&nat_table);
165 if (repl == NULL)
166 return -ENOMEM;
167 net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
168 kfree(repl);
169 if (IS_ERR(net->ipv4.nat_table))
170 return PTR_ERR(net->ipv4.nat_table);
171 return 0;
172}
173
174static void __net_exit nf_nat_rule_net_exit(struct net *net)
175{
176 ipt_unregister_table(net, net->ipv4.nat_table);
177}
178
179static struct pernet_operations nf_nat_rule_net_ops = {
180 .init = nf_nat_rule_net_init,
181 .exit = nf_nat_rule_net_exit,
182};
183
184int __init nf_nat_rule_init(void)
185{
186 int ret;
187
188 ret = register_pernet_subsys(&nf_nat_rule_net_ops);
189 if (ret != 0)
190 goto out;
191 ret = xt_register_target(&ipt_snat_reg);
192 if (ret != 0)
193 goto unregister_table;
194
195 ret = xt_register_target(&ipt_dnat_reg);
196 if (ret != 0)
197 goto unregister_snat;
198
199 return ret;
200
201 unregister_snat:
202 xt_unregister_target(&ipt_snat_reg);
203 unregister_table:
204 unregister_pernet_subsys(&nf_nat_rule_net_ops);
205 out:
206 return ret;
207}
208
209void nf_nat_rule_cleanup(void)
210{
211 xt_unregister_target(&ipt_dnat_reg);
212 xt_unregister_target(&ipt_snat_reg);
213 unregister_pernet_subsys(&nf_nat_rule_net_ops);
214}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 957acd12250b..8de53e1ddd54 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -263,6 +263,10 @@ static const struct snmp_mib snmp4_net_list[] = {
263 SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), 263 SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
264 SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), 264 SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
265 SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE), 265 SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
266 SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
267 SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
268 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
269 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
266 SNMP_MIB_SENTINEL 270 SNMP_MIB_SENTINEL
267}; 271};
268 272
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fd9af60397b5..ff622069fcef 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1111,10 +1111,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1111 const struct rtable *rt = (const struct rtable *) dst; 1111 const struct rtable *rt = (const struct rtable *) dst;
1112 unsigned int mtu = rt->rt_pmtu; 1112 unsigned int mtu = rt->rt_pmtu;
1113 1113
1114 if (mtu && time_after_eq(jiffies, rt->dst.expires)) 1114 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1115 mtu = 0;
1116
1117 if (!mtu)
1118 mtu = dst_metric_raw(dst, RTAX_MTU); 1115 mtu = dst_metric_raw(dst, RTAX_MTU);
1119 1116
1120 if (mtu && rt_is_output_route(rt)) 1117 if (mtu && rt_is_output_route(rt))
@@ -1566,11 +1563,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1566 if (ipv4_is_zeronet(daddr)) 1563 if (ipv4_is_zeronet(daddr))
1567 goto martian_destination; 1564 goto martian_destination;
1568 1565
1569 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) { 1566 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1570 if (ipv4_is_loopback(daddr)) 1567 * and call it once if daddr or/and saddr are loopback addresses
1568 */
1569 if (ipv4_is_loopback(daddr)) {
1570 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1571 goto martian_destination; 1571 goto martian_destination;
1572 1572 } else if (ipv4_is_loopback(saddr)) {
1573 if (ipv4_is_loopback(saddr)) 1573 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1574 goto martian_source; 1574 goto martian_source;
1575 } 1575 }
1576 1576
@@ -1595,7 +1595,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1595 1595
1596 if (res.type == RTN_LOCAL) { 1596 if (res.type == RTN_LOCAL) {
1597 err = fib_validate_source(skb, saddr, daddr, tos, 1597 err = fib_validate_source(skb, saddr, daddr, tos,
1598 net->loopback_dev->ifindex, 1598 LOOPBACK_IFINDEX,
1599 dev, in_dev, &itag); 1599 dev, in_dev, &itag);
1600 if (err < 0) 1600 if (err < 0)
1601 goto martian_source_keep_err; 1601 goto martian_source_keep_err;
@@ -1871,7 +1871,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1871 1871
1872 orig_oif = fl4->flowi4_oif; 1872 orig_oif = fl4->flowi4_oif;
1873 1873
1874 fl4->flowi4_iif = net->loopback_dev->ifindex; 1874 fl4->flowi4_iif = LOOPBACK_IFINDEX;
1875 fl4->flowi4_tos = tos & IPTOS_RT_MASK; 1875 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1876 fl4->flowi4_scope = ((tos & RTO_ONLINK) ? 1876 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1877 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 1877 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -1960,7 +1960,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1960 if (!fl4->daddr) 1960 if (!fl4->daddr)
1961 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); 1961 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
1962 dev_out = net->loopback_dev; 1962 dev_out = net->loopback_dev;
1963 fl4->flowi4_oif = net->loopback_dev->ifindex; 1963 fl4->flowi4_oif = LOOPBACK_IFINDEX;
1964 res.type = RTN_LOCAL; 1964 res.type = RTN_LOCAL;
1965 flags |= RTCF_LOCAL; 1965 flags |= RTCF_LOCAL;
1966 goto make_route; 1966 goto make_route;
@@ -2131,7 +2131,7 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2131EXPORT_SYMBOL_GPL(ip_route_output_flow); 2131EXPORT_SYMBOL_GPL(ip_route_output_flow);
2132 2132
2133static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2133static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2134 struct flowi4 *fl4, struct sk_buff *skb, u32 pid, 2134 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2135 u32 seq, int event, int nowait, unsigned int flags) 2135 u32 seq, int event, int nowait, unsigned int flags)
2136{ 2136{
2137 struct rtable *rt = skb_rtable(skb); 2137 struct rtable *rt = skb_rtable(skb);
@@ -2141,7 +2141,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2141 u32 error; 2141 u32 error;
2142 u32 metrics[RTAX_MAX]; 2142 u32 metrics[RTAX_MAX];
2143 2143
2144 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2144 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2145 if (nlh == NULL) 2145 if (nlh == NULL)
2146 return -EMSGSIZE; 2146 return -EMSGSIZE;
2147 2147
@@ -2301,12 +2301,12 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
2301 rt->rt_flags |= RTCF_NOTIFY; 2301 rt->rt_flags |= RTCF_NOTIFY;
2302 2302
2303 err = rt_fill_info(net, dst, src, &fl4, skb, 2303 err = rt_fill_info(net, dst, src, &fl4, skb,
2304 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2304 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2305 RTM_NEWROUTE, 0, 0); 2305 RTM_NEWROUTE, 0, 0);
2306 if (err <= 0) 2306 if (err <= 0)
2307 goto errout_free; 2307 goto errout_free;
2308 2308
2309 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); 2309 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2310errout: 2310errout:
2311 return err; 2311 return err;
2312 2312
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650e1528e1e6..ba48e799b031 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -319,6 +319,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
319 ireq->tstamp_ok = tcp_opt.saw_tstamp; 319 ireq->tstamp_ok = tcp_opt.saw_tstamp;
320 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 320 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
321 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; 321 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
322 treq->listener = NULL;
322 323
323 /* We throwed the options of the initial SYN away, so we hope 324 /* We throwed the options of the initial SYN away, so we hope
324 * the ACK carries the same options again (see RFC1122 4.2.3.8) 325 * the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 3e78c79b5586..9205e492dc9d 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -232,6 +232,45 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
232 return 0; 232 return 0;
233} 233}
234 234
235int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
236 size_t *lenp, loff_t *ppos)
237{
238 ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
239 struct tcp_fastopen_context *ctxt;
240 int ret;
241 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
242
243 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
244 if (!tbl.data)
245 return -ENOMEM;
246
247 rcu_read_lock();
248 ctxt = rcu_dereference(tcp_fastopen_ctx);
249 if (ctxt)
250 memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
251 rcu_read_unlock();
252
253 snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
254 user_key[0], user_key[1], user_key[2], user_key[3]);
255 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
256
257 if (write && ret == 0) {
258 if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
259 user_key + 2, user_key + 3) != 4) {
260 ret = -EINVAL;
261 goto bad_key;
262 }
263 tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
264 }
265
266bad_key:
267 pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
268 user_key[0], user_key[1], user_key[2], user_key[3],
269 (char *)tbl.data, ret);
270 kfree(tbl.data);
271 return ret;
272}
273
235static struct ctl_table ipv4_table[] = { 274static struct ctl_table ipv4_table[] = {
236 { 275 {
237 .procname = "tcp_timestamps", 276 .procname = "tcp_timestamps",
@@ -386,6 +425,12 @@ static struct ctl_table ipv4_table[] = {
386 .proc_handler = proc_dointvec, 425 .proc_handler = proc_dointvec,
387 }, 426 },
388 { 427 {
428 .procname = "tcp_fastopen_key",
429 .mode = 0600,
430 .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
431 .proc_handler = proc_tcp_fastopen_key,
432 },
433 {
389 .procname = "tcp_tw_recycle", 434 .procname = "tcp_tw_recycle",
390 .data = &tcp_death_row.sysctl_tw_recycle, 435 .data = &tcp_death_row.sysctl_tw_recycle,
391 .maxlen = sizeof(int), 436 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5f6419341821..f32c02e2a543 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -486,8 +486,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
486 if (sk->sk_shutdown & RCV_SHUTDOWN) 486 if (sk->sk_shutdown & RCV_SHUTDOWN)
487 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 487 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
488 488
489 /* Connected? */ 489 /* Connected or passive Fast Open socket? */
490 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 490 if (sk->sk_state != TCP_SYN_SENT &&
491 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
491 int target = sock_rcvlowat(sk, 0, INT_MAX); 492 int target = sock_rcvlowat(sk, 0, INT_MAX);
492 493
493 if (tp->urg_seq == tp->copied_seq && 494 if (tp->urg_seq == tp->copied_seq &&
@@ -840,10 +841,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
840 ssize_t copied; 841 ssize_t copied;
841 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 842 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
842 843
843 /* Wait for a connection to finish. */ 844 /* Wait for a connection to finish. One exception is TCP Fast Open
844 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 845 * (passive side) where data is allowed to be sent before a connection
846 * is fully established.
847 */
848 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
849 !tcp_passive_fastopen(sk)) {
845 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 850 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
846 goto out_err; 851 goto out_err;
852 }
847 853
848 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 854 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
849 855
@@ -1042,10 +1048,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1042 1048
1043 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1049 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1044 1050
1045 /* Wait for a connection to finish. */ 1051 /* Wait for a connection to finish. One exception is TCP Fast Open
1046 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1052 * (passive side) where data is allowed to be sent before a connection
1053 * is fully established.
1054 */
1055 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1056 !tcp_passive_fastopen(sk)) {
1047 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1057 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1048 goto do_error; 1058 goto do_error;
1059 }
1049 1060
1050 if (unlikely(tp->repair)) { 1061 if (unlikely(tp->repair)) {
1051 if (tp->repair_queue == TCP_RECV_QUEUE) { 1062 if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1139,78 +1150,43 @@ new_segment:
1139 if (err) 1150 if (err)
1140 goto do_fault; 1151 goto do_fault;
1141 } else { 1152 } else {
1142 bool merge = false; 1153 bool merge = true;
1143 int i = skb_shinfo(skb)->nr_frags; 1154 int i = skb_shinfo(skb)->nr_frags;
1144 struct page *page = sk->sk_sndmsg_page; 1155 struct page_frag *pfrag = sk_page_frag(sk);
1145 int off; 1156
1146 1157 if (!sk_page_frag_refill(sk, pfrag))
1147 if (page && page_count(page) == 1) 1158 goto wait_for_memory;
1148 sk->sk_sndmsg_off = 0; 1159
1149 1160 if (!skb_can_coalesce(skb, i, pfrag->page,
1150 off = sk->sk_sndmsg_off; 1161 pfrag->offset)) {
1151 1162 if (i == MAX_SKB_FRAGS || !sg) {
1152 if (skb_can_coalesce(skb, i, page, off) && 1163 tcp_mark_push(tp, skb);
1153 off != PAGE_SIZE) { 1164 goto new_segment;
1154 /* We can extend the last page
1155 * fragment. */
1156 merge = true;
1157 } else if (i == MAX_SKB_FRAGS || !sg) {
1158 /* Need to add new fragment and cannot
1159 * do this because interface is non-SG,
1160 * or because all the page slots are
1161 * busy. */
1162 tcp_mark_push(tp, skb);
1163 goto new_segment;
1164 } else if (page) {
1165 if (off == PAGE_SIZE) {
1166 put_page(page);
1167 sk->sk_sndmsg_page = page = NULL;
1168 off = 0;
1169 } 1165 }
1170 } else 1166 merge = false;
1171 off = 0; 1167 }
1172 1168
1173 if (copy > PAGE_SIZE - off) 1169 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1174 copy = PAGE_SIZE - off;
1175 1170
1176 if (!sk_wmem_schedule(sk, copy)) 1171 if (!sk_wmem_schedule(sk, copy))
1177 goto wait_for_memory; 1172 goto wait_for_memory;
1178 1173
1179 if (!page) {
1180 /* Allocate new cache page. */
1181 if (!(page = sk_stream_alloc_page(sk)))
1182 goto wait_for_memory;
1183 }
1184
1185 /* Time to copy data. We are close to
1186 * the end! */
1187 err = skb_copy_to_page_nocache(sk, from, skb, 1174 err = skb_copy_to_page_nocache(sk, from, skb,
1188 page, off, copy); 1175 pfrag->page,
1189 if (err) { 1176 pfrag->offset,
1190 /* If this page was new, give it to the 1177 copy);
1191 * socket so it does not get leaked. 1178 if (err)
1192 */
1193 if (!sk->sk_sndmsg_page) {
1194 sk->sk_sndmsg_page = page;
1195 sk->sk_sndmsg_off = 0;
1196 }
1197 goto do_error; 1179 goto do_error;
1198 }
1199 1180
1200 /* Update the skb. */ 1181 /* Update the skb. */
1201 if (merge) { 1182 if (merge) {
1202 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1183 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1203 } else { 1184 } else {
1204 skb_fill_page_desc(skb, i, page, off, copy); 1185 skb_fill_page_desc(skb, i, pfrag->page,
1205 if (sk->sk_sndmsg_page) { 1186 pfrag->offset, copy);
1206 get_page(page); 1187 get_page(pfrag->page);
1207 } else if (off + copy < PAGE_SIZE) {
1208 get_page(page);
1209 sk->sk_sndmsg_page = page;
1210 }
1211 } 1188 }
1212 1189 pfrag->offset += copy;
1213 sk->sk_sndmsg_off = off + copy;
1214 } 1190 }
1215 1191
1216 if (!copied) 1192 if (!copied)
@@ -2150,6 +2126,10 @@ void tcp_close(struct sock *sk, long timeout)
2150 * they look as CLOSING or LAST_ACK for Linux) 2126 * they look as CLOSING or LAST_ACK for Linux)
2151 * Probably, I missed some more holelets. 2127 * Probably, I missed some more holelets.
2152 * --ANK 2128 * --ANK
2129 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2130 * in a single packet! (May consider it later but will
2131 * probably need API support or TCP_CORK SYN-ACK until
2132 * data is written and socket is closed.)
2153 */ 2133 */
2154 tcp_send_fin(sk); 2134 tcp_send_fin(sk);
2155 } 2135 }
@@ -2221,8 +2201,16 @@ adjudge_to_death:
2221 } 2201 }
2222 } 2202 }
2223 2203
2224 if (sk->sk_state == TCP_CLOSE) 2204 if (sk->sk_state == TCP_CLOSE) {
2205 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2206 /* We could get here with a non-NULL req if the socket is
2207 * aborted (e.g., closed with unread data) before 3WHS
2208 * finishes.
2209 */
2210 if (req != NULL)
2211 reqsk_fastopen_remove(sk, req, false);
2225 inet_csk_destroy_sock(sk); 2212 inet_csk_destroy_sock(sk);
2213 }
2226 /* Otherwise, socket is reprieved until protocol close. */ 2214 /* Otherwise, socket is reprieved until protocol close. */
2227 2215
2228out: 2216out:
@@ -2308,6 +2296,13 @@ int tcp_disconnect(struct sock *sk, int flags)
2308} 2296}
2309EXPORT_SYMBOL(tcp_disconnect); 2297EXPORT_SYMBOL(tcp_disconnect);
2310 2298
2299void tcp_sock_destruct(struct sock *sk)
2300{
2301 inet_sock_destruct(sk);
2302
2303 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2304}
2305
2311static inline bool tcp_can_repair_sock(const struct sock *sk) 2306static inline bool tcp_can_repair_sock(const struct sock *sk)
2312{ 2307{
2313 return capable(CAP_NET_ADMIN) && 2308 return capable(CAP_NET_ADMIN) &&
@@ -2701,6 +2696,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2701 else 2696 else
2702 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2697 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2703 break; 2698 break;
2699
2700 case TCP_FASTOPEN:
2701 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2702 TCPF_LISTEN)))
2703 err = fastopen_init_queue(sk, val);
2704 else
2705 err = -EINVAL;
2706 break;
2704 default: 2707 default:
2705 err = -ENOPROTOOPT; 2708 err = -ENOPROTOOPT;
2706 break; 2709 break;
@@ -3514,11 +3517,15 @@ EXPORT_SYMBOL(tcp_cookie_generator);
3514 3517
3515void tcp_done(struct sock *sk) 3518void tcp_done(struct sock *sk)
3516{ 3519{
3520 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3521
3517 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3522 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3518 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 3523 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3519 3524
3520 tcp_set_state(sk, TCP_CLOSE); 3525 tcp_set_state(sk, TCP_CLOSE);
3521 tcp_clear_xmit_timers(sk); 3526 tcp_clear_xmit_timers(sk);
3527 if (req != NULL)
3528 reqsk_fastopen_remove(sk, req, false);
3522 3529
3523 sk->sk_shutdown = SHUTDOWN_MASK; 3530 sk->sk_shutdown = SHUTDOWN_MASK;
3524 3531
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index a7f729c409d7..8f7ef0ad80e5 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -1,10 +1,91 @@
1#include <linux/err.h>
1#include <linux/init.h> 2#include <linux/init.h>
2#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/list.h>
5#include <linux/tcp.h>
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <net/inetpeer.h>
9#include <net/tcp.h>
3 10
4int sysctl_tcp_fastopen; 11int sysctl_tcp_fastopen __read_mostly;
12
13struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
14
15static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
16
17static void tcp_fastopen_ctx_free(struct rcu_head *head)
18{
19 struct tcp_fastopen_context *ctx =
20 container_of(head, struct tcp_fastopen_context, rcu);
21 crypto_free_cipher(ctx->tfm);
22 kfree(ctx);
23}
24
25int tcp_fastopen_reset_cipher(void *key, unsigned int len)
26{
27 int err;
28 struct tcp_fastopen_context *ctx, *octx;
29
30 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
31 if (!ctx)
32 return -ENOMEM;
33 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
34
35 if (IS_ERR(ctx->tfm)) {
36 err = PTR_ERR(ctx->tfm);
37error: kfree(ctx);
38 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
39 return err;
40 }
41 err = crypto_cipher_setkey(ctx->tfm, key, len);
42 if (err) {
43 pr_err("TCP: TFO cipher key error: %d\n", err);
44 crypto_free_cipher(ctx->tfm);
45 goto error;
46 }
47 memcpy(ctx->key, key, len);
48
49 spin_lock(&tcp_fastopen_ctx_lock);
50
51 octx = rcu_dereference_protected(tcp_fastopen_ctx,
52 lockdep_is_held(&tcp_fastopen_ctx_lock));
53 rcu_assign_pointer(tcp_fastopen_ctx, ctx);
54 spin_unlock(&tcp_fastopen_ctx_lock);
55
56 if (octx)
57 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
58 return err;
59}
60
61/* Computes the fastopen cookie for the peer.
62 * The peer address is a 128 bits long (pad with zeros for IPv4).
63 *
64 * The caller must check foc->len to determine if a valid cookie
65 * has been generated successfully.
66*/
67void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
68{
69 __be32 peer_addr[4] = { addr, 0, 0, 0 };
70 struct tcp_fastopen_context *ctx;
71
72 rcu_read_lock();
73 ctx = rcu_dereference(tcp_fastopen_ctx);
74 if (ctx) {
75 crypto_cipher_encrypt_one(ctx->tfm,
76 foc->val,
77 (__u8 *)peer_addr);
78 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
79 }
80 rcu_read_unlock();
81}
5 82
6static int __init tcp_fastopen_init(void) 83static int __init tcp_fastopen_init(void)
7{ 84{
85 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
86
87 get_random_bytes(key, sizeof(key));
88 tcp_fastopen_reset_cipher(key, sizeof(key));
8 return 0; 89 return 0;
9} 90}
10 91
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d377f4854cb8..432c36649db3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -237,7 +237,11 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
237 tcp_enter_quickack_mode((struct sock *)tp); 237 tcp_enter_quickack_mode((struct sock *)tp);
238 break; 238 break;
239 case INET_ECN_CE: 239 case INET_ECN_CE:
240 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 240 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
241 /* Better not delay acks, sender can have a very low cwnd */
242 tcp_enter_quickack_mode((struct sock *)tp);
243 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
244 }
241 /* fallinto */ 245 /* fallinto */
242 default: 246 default:
243 tp->ecn_flags |= TCP_ECN_SEEN; 247 tp->ecn_flags |= TCP_ECN_SEEN;
@@ -374,7 +378,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
374/* 4. Try to fixup all. It is made immediately after connection enters 378/* 4. Try to fixup all. It is made immediately after connection enters
375 * established state. 379 * established state.
376 */ 380 */
377static void tcp_init_buffer_space(struct sock *sk) 381void tcp_init_buffer_space(struct sock *sk)
378{ 382{
379 struct tcp_sock *tp = tcp_sk(sk); 383 struct tcp_sock *tp = tcp_sk(sk);
380 int maxwin; 384 int maxwin;
@@ -739,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
739 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 743 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
740} 744}
741 745
742/* Set slow start threshold and cwnd not falling to slow start */
743void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
744{
745 struct tcp_sock *tp = tcp_sk(sk);
746 const struct inet_connection_sock *icsk = inet_csk(sk);
747
748 tp->prior_ssthresh = 0;
749 tp->bytes_acked = 0;
750 if (icsk->icsk_ca_state < TCP_CA_CWR) {
751 tp->undo_marker = 0;
752 if (set_ssthresh)
753 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
754 tp->snd_cwnd = min(tp->snd_cwnd,
755 tcp_packets_in_flight(tp) + 1U);
756 tp->snd_cwnd_cnt = 0;
757 tp->high_seq = tp->snd_nxt;
758 tp->snd_cwnd_stamp = tcp_time_stamp;
759 TCP_ECN_queue_cwr(tp);
760
761 tcp_set_ca_state(sk, TCP_CA_CWR);
762 }
763}
764
765/* 746/*
766 * Packet counting of FACK is based on in-order assumptions, therefore TCP 747 * Packet counting of FACK is based on in-order assumptions, therefore TCP
767 * disables it when reordering is detected 748 * disables it when reordering is detected
@@ -2489,35 +2470,6 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
2489 tp->snd_cwnd_stamp = tcp_time_stamp; 2470 tp->snd_cwnd_stamp = tcp_time_stamp;
2490} 2471}
2491 2472
2492/* Lower bound on congestion window is slow start threshold
2493 * unless congestion avoidance choice decides to overide it.
2494 */
2495static inline u32 tcp_cwnd_min(const struct sock *sk)
2496{
2497 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2498
2499 return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
2500}
2501
2502/* Decrease cwnd each second ack. */
2503static void tcp_cwnd_down(struct sock *sk, int flag)
2504{
2505 struct tcp_sock *tp = tcp_sk(sk);
2506 int decr = tp->snd_cwnd_cnt + 1;
2507
2508 if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
2509 (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
2510 tp->snd_cwnd_cnt = decr & 1;
2511 decr >>= 1;
2512
2513 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
2514 tp->snd_cwnd -= decr;
2515
2516 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
2517 tp->snd_cwnd_stamp = tcp_time_stamp;
2518 }
2519}
2520
2521/* Nothing was retransmitted or returned timestamp is less 2473/* Nothing was retransmitted or returned timestamp is less
2522 * than timestamp of the first retransmission. 2474 * than timestamp of the first retransmission.
2523 */ 2475 */
@@ -2719,24 +2671,80 @@ static bool tcp_try_undo_loss(struct sock *sk)
2719 return false; 2671 return false;
2720} 2672}
2721 2673
2722static inline void tcp_complete_cwr(struct sock *sk) 2674/* The cwnd reduction in CWR and Recovery use the PRR algorithm
2675 * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
2676 * It computes the number of packets to send (sndcnt) based on packets newly
2677 * delivered:
2678 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2679 * cwnd reductions across a full RTT.
2680 * 2) If packets in flight is lower than ssthresh (such as due to excess
2681 * losses and/or application stalls), do not perform any further cwnd
2682 * reductions, but instead slow start up to ssthresh.
2683 */
2684static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2723{ 2685{
2724 struct tcp_sock *tp = tcp_sk(sk); 2686 struct tcp_sock *tp = tcp_sk(sk);
2725 2687
2726 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2688 tp->high_seq = tp->snd_nxt;
2727 if (tp->undo_marker) { 2689 tp->bytes_acked = 0;
2728 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { 2690 tp->snd_cwnd_cnt = 0;
2729 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2691 tp->prior_cwnd = tp->snd_cwnd;
2730 tp->snd_cwnd_stamp = tcp_time_stamp; 2692 tp->prr_delivered = 0;
2731 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { 2693 tp->prr_out = 0;
2732 /* PRR algorithm. */ 2694 if (set_ssthresh)
2733 tp->snd_cwnd = tp->snd_ssthresh; 2695 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2734 tp->snd_cwnd_stamp = tcp_time_stamp; 2696 TCP_ECN_queue_cwr(tp);
2735 } 2697}
2698
2699static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
2700 int fast_rexmit)
2701{
2702 struct tcp_sock *tp = tcp_sk(sk);
2703 int sndcnt = 0;
2704 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2705
2706 tp->prr_delivered += newly_acked_sacked;
2707 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
2708 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2709 tp->prior_cwnd - 1;
2710 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2711 } else {
2712 sndcnt = min_t(int, delta,
2713 max_t(int, tp->prr_delivered - tp->prr_out,
2714 newly_acked_sacked) + 1);
2715 }
2716
2717 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
2718 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2719}
2720
2721static inline void tcp_end_cwnd_reduction(struct sock *sk)
2722{
2723 struct tcp_sock *tp = tcp_sk(sk);
2724
2725 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2726 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
2727 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
2728 tp->snd_cwnd = tp->snd_ssthresh;
2729 tp->snd_cwnd_stamp = tcp_time_stamp;
2736 } 2730 }
2737 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2731 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2738} 2732}
2739 2733
2734/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
2735void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2736{
2737 struct tcp_sock *tp = tcp_sk(sk);
2738
2739 tp->prior_ssthresh = 0;
2740 tp->bytes_acked = 0;
2741 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2742 tp->undo_marker = 0;
2743 tcp_init_cwnd_reduction(sk, set_ssthresh);
2744 tcp_set_ca_state(sk, TCP_CA_CWR);
2745 }
2746}
2747
2740static void tcp_try_keep_open(struct sock *sk) 2748static void tcp_try_keep_open(struct sock *sk)
2741{ 2749{
2742 struct tcp_sock *tp = tcp_sk(sk); 2750 struct tcp_sock *tp = tcp_sk(sk);
@@ -2751,7 +2759,7 @@ static void tcp_try_keep_open(struct sock *sk)
2751 } 2759 }
2752} 2760}
2753 2761
2754static void tcp_try_to_open(struct sock *sk, int flag) 2762static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
2755{ 2763{
2756 struct tcp_sock *tp = tcp_sk(sk); 2764 struct tcp_sock *tp = tcp_sk(sk);
2757 2765
@@ -2768,7 +2776,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2768 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 2776 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
2769 tcp_moderate_cwnd(tp); 2777 tcp_moderate_cwnd(tp);
2770 } else { 2778 } else {
2771 tcp_cwnd_down(sk, flag); 2779 tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
2772 } 2780 }
2773} 2781}
2774 2782
@@ -2850,38 +2858,6 @@ void tcp_simple_retransmit(struct sock *sk)
2850} 2858}
2851EXPORT_SYMBOL(tcp_simple_retransmit); 2859EXPORT_SYMBOL(tcp_simple_retransmit);
2852 2860
2853/* This function implements the PRR algorithm, specifcally the PRR-SSRB
2854 * (proportional rate reduction with slow start reduction bound) as described in
2855 * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
2856 * It computes the number of packets to send (sndcnt) based on packets newly
2857 * delivered:
2858 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2859 * cwnd reductions across a full RTT.
2860 * 2) If packets in flight is lower than ssthresh (such as due to excess
2861 * losses and/or application stalls), do not perform any further cwnd
2862 * reductions, but instead slow start up to ssthresh.
2863 */
2864static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
2865 int fast_rexmit, int flag)
2866{
2867 struct tcp_sock *tp = tcp_sk(sk);
2868 int sndcnt = 0;
2869 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2870
2871 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
2872 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2873 tp->prior_cwnd - 1;
2874 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2875 } else {
2876 sndcnt = min_t(int, delta,
2877 max_t(int, tp->prr_delivered - tp->prr_out,
2878 newly_acked_sacked) + 1);
2879 }
2880
2881 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
2882 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
2883}
2884
2885static void tcp_enter_recovery(struct sock *sk, bool ece_ack) 2861static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2886{ 2862{
2887 struct tcp_sock *tp = tcp_sk(sk); 2863 struct tcp_sock *tp = tcp_sk(sk);
@@ -2894,7 +2870,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2894 2870
2895 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2871 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2896 2872
2897 tp->high_seq = tp->snd_nxt;
2898 tp->prior_ssthresh = 0; 2873 tp->prior_ssthresh = 0;
2899 tp->undo_marker = tp->snd_una; 2874 tp->undo_marker = tp->snd_una;
2900 tp->undo_retrans = tp->retrans_out; 2875 tp->undo_retrans = tp->retrans_out;
@@ -2902,15 +2877,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2902 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2877 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2903 if (!ece_ack) 2878 if (!ece_ack)
2904 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2879 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2905 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2880 tcp_init_cwnd_reduction(sk, true);
2906 TCP_ECN_queue_cwr(tp);
2907 } 2881 }
2908
2909 tp->bytes_acked = 0;
2910 tp->snd_cwnd_cnt = 0;
2911 tp->prior_cwnd = tp->snd_cwnd;
2912 tp->prr_delivered = 0;
2913 tp->prr_out = 0;
2914 tcp_set_ca_state(sk, TCP_CA_Recovery); 2882 tcp_set_ca_state(sk, TCP_CA_Recovery);
2915} 2883}
2916 2884
@@ -2970,7 +2938,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2970 /* CWR is to be held something *above* high_seq 2938 /* CWR is to be held something *above* high_seq
2971 * is ACKed for CWR bit to reach receiver. */ 2939 * is ACKed for CWR bit to reach receiver. */
2972 if (tp->snd_una != tp->high_seq) { 2940 if (tp->snd_una != tp->high_seq) {
2973 tcp_complete_cwr(sk); 2941 tcp_end_cwnd_reduction(sk);
2974 tcp_set_ca_state(sk, TCP_CA_Open); 2942 tcp_set_ca_state(sk, TCP_CA_Open);
2975 } 2943 }
2976 break; 2944 break;
@@ -2980,7 +2948,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2980 tcp_reset_reno_sack(tp); 2948 tcp_reset_reno_sack(tp);
2981 if (tcp_try_undo_recovery(sk)) 2949 if (tcp_try_undo_recovery(sk))
2982 return; 2950 return;
2983 tcp_complete_cwr(sk); 2951 tcp_end_cwnd_reduction(sk);
2984 break; 2952 break;
2985 } 2953 }
2986 } 2954 }
@@ -3021,7 +2989,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3021 tcp_try_undo_dsack(sk); 2989 tcp_try_undo_dsack(sk);
3022 2990
3023 if (!tcp_time_to_recover(sk, flag)) { 2991 if (!tcp_time_to_recover(sk, flag)) {
3024 tcp_try_to_open(sk, flag); 2992 tcp_try_to_open(sk, flag, newly_acked_sacked);
3025 return; 2993 return;
3026 } 2994 }
3027 2995
@@ -3043,8 +3011,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3043 3011
3044 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 3012 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
3045 tcp_update_scoreboard(sk, fast_rexmit); 3013 tcp_update_scoreboard(sk, fast_rexmit);
3046 tp->prr_delivered += newly_acked_sacked; 3014 tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit);
3047 tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
3048 tcp_xmit_retransmit_queue(sk); 3015 tcp_xmit_retransmit_queue(sk);
3049} 3016}
3050 3017
@@ -3123,6 +3090,12 @@ void tcp_rearm_rto(struct sock *sk)
3123{ 3090{
3124 struct tcp_sock *tp = tcp_sk(sk); 3091 struct tcp_sock *tp = tcp_sk(sk);
3125 3092
3093 /* If the retrans timer is currently being used by Fast Open
3094 * for SYN-ACK retrans purpose, stay put.
3095 */
3096 if (tp->fastopen_rsk)
3097 return;
3098
3126 if (!tp->packets_out) { 3099 if (!tp->packets_out) {
3127 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3100 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3128 } else { 3101 } else {
@@ -3384,7 +3357,7 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3384{ 3357{
3385 const struct tcp_sock *tp = tcp_sk(sk); 3358 const struct tcp_sock *tp = tcp_sk(sk);
3386 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3359 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
3387 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 3360 !tcp_in_cwnd_reduction(sk);
3388} 3361}
3389 3362
3390/* Check that window update is acceptable. 3363/* Check that window update is acceptable.
@@ -3452,9 +3425,9 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3452} 3425}
3453 3426
3454/* A conservative spurious RTO response algorithm: reduce cwnd using 3427/* A conservative spurious RTO response algorithm: reduce cwnd using
3455 * rate halving and continue in congestion avoidance. 3428 * PRR and continue in congestion avoidance.
3456 */ 3429 */
3457static void tcp_ratehalving_spur_to_response(struct sock *sk) 3430static void tcp_cwr_spur_to_response(struct sock *sk)
3458{ 3431{
3459 tcp_enter_cwr(sk, 0); 3432 tcp_enter_cwr(sk, 0);
3460} 3433}
@@ -3462,7 +3435,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk)
3462static void tcp_undo_spur_to_response(struct sock *sk, int flag) 3435static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3463{ 3436{
3464 if (flag & FLAG_ECE) 3437 if (flag & FLAG_ECE)
3465 tcp_ratehalving_spur_to_response(sk); 3438 tcp_cwr_spur_to_response(sk);
3466 else 3439 else
3467 tcp_undo_cwr(sk, true); 3440 tcp_undo_cwr(sk, true);
3468} 3441}
@@ -3569,7 +3542,7 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3569 tcp_conservative_spur_to_response(tp); 3542 tcp_conservative_spur_to_response(tp);
3570 break; 3543 break;
3571 default: 3544 default:
3572 tcp_ratehalving_spur_to_response(sk); 3545 tcp_cwr_spur_to_response(sk);
3573 break; 3546 break;
3574 } 3547 }
3575 tp->frto_counter = 0; 3548 tp->frto_counter = 0;
@@ -4034,7 +4007,7 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4034} 4007}
4035 4008
4036/* When we get a reset we do this. */ 4009/* When we get a reset we do this. */
4037static void tcp_reset(struct sock *sk) 4010void tcp_reset(struct sock *sk)
4038{ 4011{
4039 /* We want the right error as BSD sees it (and indeed as we do). */ 4012 /* We want the right error as BSD sees it (and indeed as we do). */
4040 switch (sk->sk_state) { 4013 switch (sk->sk_state) {
@@ -5740,7 +5713,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5740 5713
5741 TCP_ECN_rcv_synack(tp, th); 5714 TCP_ECN_rcv_synack(tp, th);
5742 5715
5743 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5716 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5744 tcp_ack(sk, skb, FLAG_SLOWPATH); 5717 tcp_ack(sk, skb, FLAG_SLOWPATH);
5745 5718
5746 /* Ok.. it's good. Set up sequence numbers and 5719 /* Ok.. it's good. Set up sequence numbers and
@@ -5753,7 +5726,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5753 * never scaled. 5726 * never scaled.
5754 */ 5727 */
5755 tp->snd_wnd = ntohs(th->window); 5728 tp->snd_wnd = ntohs(th->window);
5756 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5757 5729
5758 if (!tp->rx_opt.wscale_ok) { 5730 if (!tp->rx_opt.wscale_ok) {
5759 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5731 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5891,7 +5863,9 @@ discard:
5891 tcp_send_synack(sk); 5863 tcp_send_synack(sk);
5892#if 0 5864#if 0
5893 /* Note, we could accept data and URG from this segment. 5865 /* Note, we could accept data and URG from this segment.
5894 * There are no obstacles to make this. 5866 * There are no obstacles to make this (except that we must
5867 * either change tcp_recvmsg() to prevent it from returning data
5868 * before 3WHS completes per RFC793, or employ TCP Fast Open).
5895 * 5869 *
5896 * However, if we ignore data in ACKless segments sometimes, 5870 * However, if we ignore data in ACKless segments sometimes,
5897 * we have no reasons to accept it sometimes. 5871 * we have no reasons to accept it sometimes.
@@ -5931,6 +5905,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5931{ 5905{
5932 struct tcp_sock *tp = tcp_sk(sk); 5906 struct tcp_sock *tp = tcp_sk(sk);
5933 struct inet_connection_sock *icsk = inet_csk(sk); 5907 struct inet_connection_sock *icsk = inet_csk(sk);
5908 struct request_sock *req;
5934 int queued = 0; 5909 int queued = 0;
5935 5910
5936 tp->rx_opt.saw_tstamp = 0; 5911 tp->rx_opt.saw_tstamp = 0;
@@ -5986,6 +5961,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5986 return 0; 5961 return 0;
5987 } 5962 }
5988 5963
5964 req = tp->fastopen_rsk;
5965 if (req != NULL) {
5966 BUG_ON(sk->sk_state != TCP_SYN_RECV &&
5967 sk->sk_state != TCP_FIN_WAIT1);
5968
5969 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
5970 goto discard;
5971 }
5989 if (!tcp_validate_incoming(sk, skb, th, 0)) 5972 if (!tcp_validate_incoming(sk, skb, th, 0))
5990 return 0; 5973 return 0;
5991 5974
@@ -5996,7 +5979,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5996 switch (sk->sk_state) { 5979 switch (sk->sk_state) {
5997 case TCP_SYN_RECV: 5980 case TCP_SYN_RECV:
5998 if (acceptable) { 5981 if (acceptable) {
5999 tp->copied_seq = tp->rcv_nxt; 5982 /* Once we leave TCP_SYN_RECV, we no longer
5983 * need req so release it.
5984 */
5985 if (req) {
5986 tcp_synack_rtt_meas(sk, req);
5987 tp->total_retrans = req->retrans;
5988
5989 reqsk_fastopen_remove(sk, req, false);
5990 } else {
5991 /* Make sure socket is routed, for
5992 * correct metrics.
5993 */
5994 icsk->icsk_af_ops->rebuild_header(sk);
5995 tcp_init_congestion_control(sk);
5996
5997 tcp_mtup_init(sk);
5998 tcp_init_buffer_space(sk);
5999 tp->copied_seq = tp->rcv_nxt;
6000 }
6000 smp_mb(); 6001 smp_mb();
6001 tcp_set_state(sk, TCP_ESTABLISHED); 6002 tcp_set_state(sk, TCP_ESTABLISHED);
6002 sk->sk_state_change(sk); 6003 sk->sk_state_change(sk);
@@ -6018,23 +6019,27 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6018 if (tp->rx_opt.tstamp_ok) 6019 if (tp->rx_opt.tstamp_ok)
6019 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6020 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6020 6021
6021 /* Make sure socket is routed, for 6022 if (req) {
6022 * correct metrics. 6023 /* Re-arm the timer because data may
6023 */ 6024 * have been sent out. This is similar
6024 icsk->icsk_af_ops->rebuild_header(sk); 6025 * to the regular data transmission case
6025 6026 * when new data has just been ack'ed.
6026 tcp_init_metrics(sk); 6027 *
6027 6028 * (TFO) - we could try to be more
6028 tcp_init_congestion_control(sk); 6029 * aggressive and retranmitting any data
6030 * sooner based on when they were sent
6031 * out.
6032 */
6033 tcp_rearm_rto(sk);
6034 } else
6035 tcp_init_metrics(sk);
6029 6036
6030 /* Prevent spurious tcp_cwnd_restart() on 6037 /* Prevent spurious tcp_cwnd_restart() on
6031 * first data packet. 6038 * first data packet.
6032 */ 6039 */
6033 tp->lsndtime = tcp_time_stamp; 6040 tp->lsndtime = tcp_time_stamp;
6034 6041
6035 tcp_mtup_init(sk);
6036 tcp_initialize_rcv_mss(sk); 6042 tcp_initialize_rcv_mss(sk);
6037 tcp_init_buffer_space(sk);
6038 tcp_fast_path_on(tp); 6043 tcp_fast_path_on(tp);
6039 } else { 6044 } else {
6040 return 1; 6045 return 1;
@@ -6042,6 +6047,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6042 break; 6047 break;
6043 6048
6044 case TCP_FIN_WAIT1: 6049 case TCP_FIN_WAIT1:
6050 /* If we enter the TCP_FIN_WAIT1 state and we are a
6051 * Fast Open socket and this is the first acceptable
6052 * ACK we have received, this would have acknowledged
6053 * our SYNACK so stop the SYNACK timer.
6054 */
6055 if (acceptable && req != NULL) {
6056 /* We no longer need the request sock. */
6057 reqsk_fastopen_remove(sk, req, false);
6058 tcp_rearm_rto(sk);
6059 }
6045 if (tp->snd_una == tp->write_seq) { 6060 if (tp->snd_una == tp->write_seq) {
6046 struct dst_entry *dst; 6061 struct dst_entry *dst;
6047 6062
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index be23a0b7b89e..75735c9a6a9d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -352,6 +352,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
352 const int code = icmp_hdr(icmp_skb)->code; 352 const int code = icmp_hdr(icmp_skb)->code;
353 struct sock *sk; 353 struct sock *sk;
354 struct sk_buff *skb; 354 struct sk_buff *skb;
355 struct request_sock *req;
355 __u32 seq; 356 __u32 seq;
356 __u32 remaining; 357 __u32 remaining;
357 int err; 358 int err;
@@ -394,9 +395,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
394 395
395 icsk = inet_csk(sk); 396 icsk = inet_csk(sk);
396 tp = tcp_sk(sk); 397 tp = tcp_sk(sk);
398 req = tp->fastopen_rsk;
397 seq = ntohl(th->seq); 399 seq = ntohl(th->seq);
398 if (sk->sk_state != TCP_LISTEN && 400 if (sk->sk_state != TCP_LISTEN &&
399 !between(seq, tp->snd_una, tp->snd_nxt)) { 401 !between(seq, tp->snd_una, tp->snd_nxt) &&
402 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
403 /* For a Fast Open socket, allow seq to be snt_isn. */
400 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 404 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
401 goto out; 405 goto out;
402 } 406 }
@@ -435,6 +439,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
435 !icsk->icsk_backoff) 439 !icsk->icsk_backoff)
436 break; 440 break;
437 441
442 /* XXX (TFO) - revisit the following logic for TFO */
443
438 if (sock_owned_by_user(sk)) 444 if (sock_owned_by_user(sk))
439 break; 445 break;
440 446
@@ -466,6 +472,14 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
466 goto out; 472 goto out;
467 } 473 }
468 474
475 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
476 * than following the TCP_SYN_RECV case and closing the socket,
477 * we ignore the ICMP error and keep trying like a fully established
478 * socket. Is this the right thing to do?
479 */
480 if (req && req->sk == NULL)
481 goto out;
482
469 switch (sk->sk_state) { 483 switch (sk->sk_state) {
470 struct request_sock *req, **prev; 484 struct request_sock *req, **prev;
471 case TCP_LISTEN: 485 case TCP_LISTEN:
@@ -498,7 +512,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
498 512
499 case TCP_SYN_SENT: 513 case TCP_SYN_SENT:
500 case TCP_SYN_RECV: /* Cannot happen. 514 case TCP_SYN_RECV: /* Cannot happen.
501 It can f.e. if SYNs crossed. 515 It can f.e. if SYNs crossed,
516 or Fast Open.
502 */ 517 */
503 if (!sock_owned_by_user(sk)) { 518 if (!sock_owned_by_user(sk)) {
504 sk->sk_err = err; 519 sk->sk_err = err;
@@ -809,8 +824,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 824static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
810 struct request_sock *req) 825 struct request_sock *req)
811{ 826{
812 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, 827 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
813 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 828 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
829 */
830 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
831 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
832 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
814 req->ts_recent, 833 req->ts_recent,
815 0, 834 0,
816 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 835 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -839,7 +858,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
839 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 858 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
840 return -1; 859 return -1;
841 860
842 skb = tcp_make_synack(sk, dst, req, rvp); 861 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
843 862
844 if (skb) { 863 if (skb) {
845 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 864 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -849,6 +868,8 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
849 ireq->rmt_addr, 868 ireq->rmt_addr,
850 ireq->opt); 869 ireq->opt);
851 err = net_xmit_eval(err); 870 err = net_xmit_eval(err);
871 if (!tcp_rsk(req)->snt_synack && !err)
872 tcp_rsk(req)->snt_synack = tcp_time_stamp;
852 } 873 }
853 874
854 return err; 875 return err;
@@ -904,8 +925,7 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
904/* 925/*
905 * Save and compile IPv4 options into the request_sock if needed. 926 * Save and compile IPv4 options into the request_sock if needed.
906 */ 927 */
907static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk, 928static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
908 struct sk_buff *skb)
909{ 929{
910 const struct ip_options *opt = &(IPCB(skb)->opt); 930 const struct ip_options *opt = &(IPCB(skb)->opt);
911 struct ip_options_rcu *dopt = NULL; 931 struct ip_options_rcu *dopt = NULL;
@@ -1272,6 +1292,182 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1272}; 1292};
1273#endif 1293#endif
1274 1294
1295static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1296 struct request_sock *req,
1297 struct tcp_fastopen_cookie *foc,
1298 struct tcp_fastopen_cookie *valid_foc)
1299{
1300 bool skip_cookie = false;
1301 struct fastopen_queue *fastopenq;
1302
1303 if (likely(!fastopen_cookie_present(foc))) {
1304 /* See include/net/tcp.h for the meaning of these knobs */
1305 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1306 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1307 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1308 skip_cookie = true; /* no cookie to validate */
1309 else
1310 return false;
1311 }
1312 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1313 /* A FO option is present; bump the counter. */
1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1315
1316 /* Make sure the listener has enabled fastopen, and we don't
1317 * exceed the max # of pending TFO requests allowed before trying
1318 * to validating the cookie in order to avoid burning CPU cycles
1319 * unnecessarily.
1320 *
1321 * XXX (TFO) - The implication of checking the max_qlen before
1322 * processing a cookie request is that clients can't differentiate
1323 * between qlen overflow causing Fast Open to be disabled
1324 * temporarily vs a server not supporting Fast Open at all.
1325 */
1326 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1327 fastopenq == NULL || fastopenq->max_qlen == 0)
1328 return false;
1329
1330 if (fastopenq->qlen >= fastopenq->max_qlen) {
1331 struct request_sock *req1;
1332 spin_lock(&fastopenq->lock);
1333 req1 = fastopenq->rskq_rst_head;
1334 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1335 spin_unlock(&fastopenq->lock);
1336 NET_INC_STATS_BH(sock_net(sk),
1337 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1338 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1339 foc->len = -1;
1340 return false;
1341 }
1342 fastopenq->rskq_rst_head = req1->dl_next;
1343 fastopenq->qlen--;
1344 spin_unlock(&fastopenq->lock);
1345 reqsk_free(req1);
1346 }
1347 if (skip_cookie) {
1348 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1349 return true;
1350 }
1351 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1352 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1353 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1354 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1355 memcmp(&foc->val[0], &valid_foc->val[0],
1356 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1357 return false;
1358 valid_foc->len = -1;
1359 }
1360 /* Acknowledge the data received from the peer. */
1361 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1362 return true;
1363 } else if (foc->len == 0) { /* Client requesting a cookie */
1364 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1365 NET_INC_STATS_BH(sock_net(sk),
1366 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1367 } else {
1368 /* Client sent a cookie with wrong size. Treat it
1369 * the same as invalid and return a valid one.
1370 */
1371 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1372 }
1373 return false;
1374}
1375
1376static int tcp_v4_conn_req_fastopen(struct sock *sk,
1377 struct sk_buff *skb,
1378 struct sk_buff *skb_synack,
1379 struct request_sock *req,
1380 struct request_values *rvp)
1381{
1382 struct tcp_sock *tp = tcp_sk(sk);
1383 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1384 const struct inet_request_sock *ireq = inet_rsk(req);
1385 struct sock *child;
1386 int err;
1387
1388 req->retrans = 0;
1389 req->sk = NULL;
1390
1391 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1392 if (child == NULL) {
1393 NET_INC_STATS_BH(sock_net(sk),
1394 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1395 kfree_skb(skb_synack);
1396 return -1;
1397 }
1398 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1399 ireq->rmt_addr, ireq->opt);
1400 err = net_xmit_eval(err);
1401 if (!err)
1402 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1403 /* XXX (TFO) - is it ok to ignore error and continue? */
1404
1405 spin_lock(&queue->fastopenq->lock);
1406 queue->fastopenq->qlen++;
1407 spin_unlock(&queue->fastopenq->lock);
1408
1409 /* Initialize the child socket. Have to fix some values to take
1410 * into account the child is a Fast Open socket and is created
1411 * only out of the bits carried in the SYN packet.
1412 */
1413 tp = tcp_sk(child);
1414
1415 tp->fastopen_rsk = req;
1416 /* Do a hold on the listner sk so that if the listener is being
1417 * closed, the child that has been accepted can live on and still
1418 * access listen_lock.
1419 */
1420 sock_hold(sk);
1421 tcp_rsk(req)->listener = sk;
1422
1423 /* RFC1323: The window in SYN & SYN/ACK segments is never
1424 * scaled. So correct it appropriately.
1425 */
1426 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1427
1428 /* Activate the retrans timer so that SYNACK can be retransmitted.
1429 * The request socket is not added to the SYN table of the parent
1430 * because it's been added to the accept queue directly.
1431 */
1432 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1433 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1434
1435 /* Add the child socket directly into the accept queue */
1436 inet_csk_reqsk_queue_add(sk, req, child);
1437
1438 /* Now finish processing the fastopen child socket. */
1439 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1440 tcp_init_congestion_control(child);
1441 tcp_mtup_init(child);
1442 tcp_init_buffer_space(child);
1443 tcp_init_metrics(child);
1444
1445 /* Queue the data carried in the SYN packet. We need to first
1446 * bump skb's refcnt because the caller will attempt to free it.
1447 *
1448 * XXX (TFO) - we honor a zero-payload TFO request for now.
1449 * (Any reason not to?)
1450 */
1451 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1452 /* Don't queue the skb if there is no payload in SYN.
1453 * XXX (TFO) - How about SYN+FIN?
1454 */
1455 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1456 } else {
1457 skb = skb_get(skb);
1458 skb_dst_drop(skb);
1459 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1460 skb_set_owner_r(skb, child);
1461 __skb_queue_tail(&child->sk_receive_queue, skb);
1462 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1463 }
1464 sk->sk_data_ready(sk, 0);
1465 bh_unlock_sock(child);
1466 sock_put(child);
1467 WARN_ON(req->sk == NULL);
1468 return 0;
1469}
1470
1275int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1471int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1276{ 1472{
1277 struct tcp_extend_values tmp_ext; 1473 struct tcp_extend_values tmp_ext;
@@ -1285,6 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1285 __be32 daddr = ip_hdr(skb)->daddr; 1481 __be32 daddr = ip_hdr(skb)->daddr;
1286 __u32 isn = TCP_SKB_CB(skb)->when; 1482 __u32 isn = TCP_SKB_CB(skb)->when;
1287 bool want_cookie = false; 1483 bool want_cookie = false;
1484 struct flowi4 fl4;
1485 struct tcp_fastopen_cookie foc = { .len = -1 };
1486 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1487 struct sk_buff *skb_synack;
1488 int do_fastopen;
1288 1489
1289 /* Never answer to SYNs send to broadcast or multicast */ 1490 /* Never answer to SYNs send to broadcast or multicast */
1290 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1491 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1319,7 +1520,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tcp_clear_options(&tmp_opt); 1520 tcp_clear_options(&tmp_opt);
1320 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1521 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1321 tmp_opt.user_mss = tp->rx_opt.user_mss; 1522 tmp_opt.user_mss = tp->rx_opt.user_mss;
1322 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 1523 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1524 want_cookie ? NULL : &foc);
1323 1525
1324 if (tmp_opt.cookie_plus > 0 && 1526 if (tmp_opt.cookie_plus > 0 &&
1325 tmp_opt.saw_tstamp && 1527 tmp_opt.saw_tstamp &&
@@ -1365,7 +1567,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1365 ireq->loc_addr = daddr; 1567 ireq->loc_addr = daddr;
1366 ireq->rmt_addr = saddr; 1568 ireq->rmt_addr = saddr;
1367 ireq->no_srccheck = inet_sk(sk)->transparent; 1569 ireq->no_srccheck = inet_sk(sk)->transparent;
1368 ireq->opt = tcp_v4_save_options(sk, skb); 1570 ireq->opt = tcp_v4_save_options(skb);
1369 1571
1370 if (security_inet_conn_request(sk, skb, req)) 1572 if (security_inet_conn_request(sk, skb, req))
1371 goto drop_and_free; 1573 goto drop_and_free;
@@ -1377,8 +1579,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1377 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1579 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1378 req->cookie_ts = tmp_opt.tstamp_ok; 1580 req->cookie_ts = tmp_opt.tstamp_ok;
1379 } else if (!isn) { 1581 } else if (!isn) {
1380 struct flowi4 fl4;
1381
1382 /* VJ's idea. We save last timestamp seen 1582 /* VJ's idea. We save last timestamp seen
1383 * from the destination in peer table, when entering 1583 * from the destination in peer table, when entering
1384 * state TIME-WAIT, and check against it before 1584 * state TIME-WAIT, and check against it before
@@ -1417,16 +1617,54 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1417 isn = tcp_v4_init_sequence(skb); 1617 isn = tcp_v4_init_sequence(skb);
1418 } 1618 }
1419 tcp_rsk(req)->snt_isn = isn; 1619 tcp_rsk(req)->snt_isn = isn;
1420 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1421 1620
1422 if (tcp_v4_send_synack(sk, dst, req, 1621 if (dst == NULL) {
1423 (struct request_values *)&tmp_ext, 1622 dst = inet_csk_route_req(sk, &fl4, req);
1424 skb_get_queue_mapping(skb), 1623 if (dst == NULL)
1425 want_cookie) || 1624 goto drop_and_free;
1426 want_cookie) 1625 }
1626 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1627
1628 /* We don't call tcp_v4_send_synack() directly because we need
1629 * to make sure a child socket can be created successfully before
1630 * sending back synack!
1631 *
1632 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1633 * (or better yet, call tcp_send_synack() in the child context
1634 * directly, but will have to fix bunch of other code first)
1635 * after syn_recv_sock() except one will need to first fix the
1636 * latter to remove its dependency on the current implementation
1637 * of tcp_v4_send_synack()->tcp_select_initial_window().
1638 */
1639 skb_synack = tcp_make_synack(sk, dst, req,
1640 (struct request_values *)&tmp_ext,
1641 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1642
1643 if (skb_synack) {
1644 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1645 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1646 } else
1647 goto drop_and_free;
1648
1649 if (likely(!do_fastopen)) {
1650 int err;
1651 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1652 ireq->rmt_addr, ireq->opt);
1653 err = net_xmit_eval(err);
1654 if (err || want_cookie)
1655 goto drop_and_free;
1656
1657 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1658 tcp_rsk(req)->listener = NULL;
1659 /* Add the request_sock to the SYN table */
1660 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1661 if (fastopen_cookie_present(&foc) && foc.len != 0)
1662 NET_INC_STATS_BH(sock_net(sk),
1663 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1664 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1665 (struct request_values *)&tmp_ext))
1427 goto drop_and_free; 1666 goto drop_and_free;
1428 1667
1429 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1430 return 0; 1668 return 0;
1431 1669
1432drop_and_release: 1670drop_and_release:
@@ -1500,9 +1738,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1500 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1738 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1501 1739
1502 tcp_initialize_rcv_mss(newsk); 1740 tcp_initialize_rcv_mss(newsk);
1503 if (tcp_rsk(req)->snt_synack) 1741 tcp_synack_rtt_meas(newsk, req);
1504 tcp_valid_rtt_meas(newsk,
1505 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1506 newtp->total_retrans = req->retrans; 1742 newtp->total_retrans = req->retrans;
1507 1743
1508#ifdef CONFIG_TCP_MD5SIG 1744#ifdef CONFIG_TCP_MD5SIG
@@ -1554,7 +1790,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1554 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, 1790 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1555 iph->saddr, iph->daddr); 1791 iph->saddr, iph->daddr);
1556 if (req) 1792 if (req)
1557 return tcp_check_req(sk, skb, req, prev); 1793 return tcp_check_req(sk, skb, req, prev, false);
1558 1794
1559 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, 1795 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1560 th->source, iph->daddr, th->dest, inet_iif(skb)); 1796 th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1963,20 +2199,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
1963 if (inet_csk(sk)->icsk_bind_hash) 2199 if (inet_csk(sk)->icsk_bind_hash)
1964 inet_put_port(sk); 2200 inet_put_port(sk);
1965 2201
1966 /*
1967 * If sendmsg cached page exists, toss it.
1968 */
1969 if (sk->sk_sndmsg_page) {
1970 __free_page(sk->sk_sndmsg_page);
1971 sk->sk_sndmsg_page = NULL;
1972 }
1973
1974 /* TCP Cookie Transactions */ 2202 /* TCP Cookie Transactions */
1975 if (tp->cookie_values != NULL) { 2203 if (tp->cookie_values != NULL) {
1976 kref_put(&tp->cookie_values->kref, 2204 kref_put(&tp->cookie_values->kref,
1977 tcp_cookie_values_release); 2205 tcp_cookie_values_release);
1978 tp->cookie_values = NULL; 2206 tp->cookie_values = NULL;
1979 } 2207 }
2208 BUG_ON(tp->fastopen_rsk != NULL);
1980 2209
1981 /* If socket is aborted during connect operation */ 2210 /* If socket is aborted during connect operation */
1982 tcp_free_fastopen_req(tp); 2211 tcp_free_fastopen_req(tp);
@@ -2396,7 +2625,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2396 struct seq_file *f, int i, kuid_t uid, int *len) 2625 struct seq_file *f, int i, kuid_t uid, int *len)
2397{ 2626{
2398 const struct inet_request_sock *ireq = inet_rsk(req); 2627 const struct inet_request_sock *ireq = inet_rsk(req);
2399 int ttd = req->expires - jiffies; 2628 long delta = req->expires - jiffies;
2400 2629
2401 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2630 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2402 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n", 2631 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
@@ -2408,7 +2637,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2408 TCP_SYN_RECV, 2637 TCP_SYN_RECV,
2409 0, 0, /* could print option size, but that is af dependent. */ 2638 0, 0, /* could print option size, but that is af dependent. */
2410 1, /* timers active (only the expire timer) */ 2639 1, /* timers active (only the expire timer) */
2411 jiffies_to_clock_t(ttd), 2640 jiffies_delta_to_clock_t(delta),
2412 req->retrans, 2641 req->retrans,
2413 from_kuid_munged(seq_user_ns(f), uid), 2642 from_kuid_munged(seq_user_ns(f), uid),
2414 0, /* non standard timer */ 2643 0, /* non standard timer */
@@ -2425,6 +2654,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2425 const struct tcp_sock *tp = tcp_sk(sk); 2654 const struct tcp_sock *tp = tcp_sk(sk);
2426 const struct inet_connection_sock *icsk = inet_csk(sk); 2655 const struct inet_connection_sock *icsk = inet_csk(sk);
2427 const struct inet_sock *inet = inet_sk(sk); 2656 const struct inet_sock *inet = inet_sk(sk);
2657 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2428 __be32 dest = inet->inet_daddr; 2658 __be32 dest = inet->inet_daddr;
2429 __be32 src = inet->inet_rcv_saddr; 2659 __be32 src = inet->inet_rcv_saddr;
2430 __u16 destp = ntohs(inet->inet_dport); 2660 __u16 destp = ntohs(inet->inet_dport);
@@ -2459,7 +2689,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2459 tp->write_seq - tp->snd_una, 2689 tp->write_seq - tp->snd_una,
2460 rx_queue, 2690 rx_queue,
2461 timer_active, 2691 timer_active,
2462 jiffies_to_clock_t(timer_expires - jiffies), 2692 jiffies_delta_to_clock_t(timer_expires - jiffies),
2463 icsk->icsk_retransmits, 2693 icsk->icsk_retransmits,
2464 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), 2694 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2465 icsk->icsk_probes_out, 2695 icsk->icsk_probes_out,
@@ -2469,7 +2699,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2469 jiffies_to_clock_t(icsk->icsk_ack.ato), 2699 jiffies_to_clock_t(icsk->icsk_ack.ato),
2470 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2700 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2471 tp->snd_cwnd, 2701 tp->snd_cwnd,
2472 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, 2702 sk->sk_state == TCP_LISTEN ?
2703 (fastopenq ? fastopenq->max_qlen : 0) :
2704 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2473 len); 2705 len);
2474} 2706}
2475 2707
@@ -2478,10 +2710,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2478{ 2710{
2479 __be32 dest, src; 2711 __be32 dest, src;
2480 __u16 destp, srcp; 2712 __u16 destp, srcp;
2481 int ttd = tw->tw_ttd - jiffies; 2713 long delta = tw->tw_ttd - jiffies;
2482
2483 if (ttd < 0)
2484 ttd = 0;
2485 2714
2486 dest = tw->tw_daddr; 2715 dest = tw->tw_daddr;
2487 src = tw->tw_rcv_saddr; 2716 src = tw->tw_rcv_saddr;
@@ -2491,7 +2720,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2491 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2720 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2492 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n", 2721 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2493 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2722 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2494 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2723 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2495 atomic_read(&tw->tw_refcnt), tw, len); 2724 atomic_read(&tw->tw_refcnt), tw, len);
2496} 2725}
2497 2726
@@ -2574,6 +2803,8 @@ void tcp4_proc_exit(void)
2574struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2803struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2575{ 2804{
2576 const struct iphdr *iph = skb_gro_network_header(skb); 2805 const struct iphdr *iph = skb_gro_network_header(skb);
2806 __wsum wsum;
2807 __sum16 sum;
2577 2808
2578 switch (skb->ip_summed) { 2809 switch (skb->ip_summed) {
2579 case CHECKSUM_COMPLETE: 2810 case CHECKSUM_COMPLETE:
@@ -2582,11 +2813,22 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2582 skb->ip_summed = CHECKSUM_UNNECESSARY; 2813 skb->ip_summed = CHECKSUM_UNNECESSARY;
2583 break; 2814 break;
2584 } 2815 }
2585 2816flush:
2586 /* fall through */
2587 case CHECKSUM_NONE:
2588 NAPI_GRO_CB(skb)->flush = 1; 2817 NAPI_GRO_CB(skb)->flush = 1;
2589 return NULL; 2818 return NULL;
2819
2820 case CHECKSUM_NONE:
2821 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2822 skb_gro_len(skb), IPPROTO_TCP, 0);
2823 sum = csum_fold(skb_checksum(skb,
2824 skb_gro_offset(skb),
2825 skb_gro_len(skb),
2826 wsum));
2827 if (sum)
2828 goto flush;
2829
2830 skb->ip_summed = CHECKSUM_UNNECESSARY;
2831 break;
2590 } 2832 }
2591 2833
2592 return tcp_gro_receive(head, skb); 2834 return tcp_gro_receive(head, skb);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 0abe67bb4d3a..4c752a6e0bcd 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/tcp.h> 9#include <linux/tcp.h>
10#include <linux/hash.h> 10#include <linux/hash.h>
11#include <linux/tcp_metrics.h>
11 12
12#include <net/inet_connection_sock.h> 13#include <net/inet_connection_sock.h>
13#include <net/net_namespace.h> 14#include <net/net_namespace.h>
@@ -17,20 +18,10 @@
17#include <net/ipv6.h> 18#include <net/ipv6.h>
18#include <net/dst.h> 19#include <net/dst.h>
19#include <net/tcp.h> 20#include <net/tcp.h>
21#include <net/genetlink.h>
20 22
21int sysctl_tcp_nometrics_save __read_mostly; 23int sysctl_tcp_nometrics_save __read_mostly;
22 24
23enum tcp_metric_index {
24 TCP_METRIC_RTT,
25 TCP_METRIC_RTTVAR,
26 TCP_METRIC_SSTHRESH,
27 TCP_METRIC_CWND,
28 TCP_METRIC_REORDERING,
29
30 /* Always last. */
31 TCP_METRIC_MAX,
32};
33
34struct tcp_fastopen_metrics { 25struct tcp_fastopen_metrics {
35 u16 mss; 26 u16 mss;
36 u16 syn_loss:10; /* Recurring Fast Open SYN losses */ 27 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@@ -45,8 +36,10 @@ struct tcp_metrics_block {
45 u32 tcpm_ts; 36 u32 tcpm_ts;
46 u32 tcpm_ts_stamp; 37 u32 tcpm_ts_stamp;
47 u32 tcpm_lock; 38 u32 tcpm_lock;
48 u32 tcpm_vals[TCP_METRIC_MAX]; 39 u32 tcpm_vals[TCP_METRIC_MAX + 1];
49 struct tcp_fastopen_metrics tcpm_fastopen; 40 struct tcp_fastopen_metrics tcpm_fastopen;
41
42 struct rcu_head rcu_head;
50}; 43};
51 44
52static bool tcp_metric_locked(struct tcp_metrics_block *tm, 45static bool tcp_metric_locked(struct tcp_metrics_block *tm,
@@ -690,6 +683,325 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
690 rcu_read_unlock(); 683 rcu_read_unlock();
691} 684}
692 685
686static struct genl_family tcp_metrics_nl_family = {
687 .id = GENL_ID_GENERATE,
688 .hdrsize = 0,
689 .name = TCP_METRICS_GENL_NAME,
690 .version = TCP_METRICS_GENL_VERSION,
691 .maxattr = TCP_METRICS_ATTR_MAX,
692 .netnsok = true,
693};
694
695static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
696 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
697 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
698 .len = sizeof(struct in6_addr), },
699 /* Following attributes are not received for GET/DEL,
700 * we keep them for reference
701 */
702#if 0
703 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
704 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
705 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
706 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
707 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
708 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
709 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
710 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
711 .len = TCP_FASTOPEN_COOKIE_MAX, },
712#endif
713};
714
715/* Add attributes, caller cancels its header on failure */
716static int tcp_metrics_fill_info(struct sk_buff *msg,
717 struct tcp_metrics_block *tm)
718{
719 struct nlattr *nest;
720 int i;
721
722 switch (tm->tcpm_addr.family) {
723 case AF_INET:
724 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
725 tm->tcpm_addr.addr.a4) < 0)
726 goto nla_put_failure;
727 break;
728 case AF_INET6:
729 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
730 tm->tcpm_addr.addr.a6) < 0)
731 goto nla_put_failure;
732 break;
733 default:
734 return -EAFNOSUPPORT;
735 }
736
737 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
738 jiffies - tm->tcpm_stamp) < 0)
739 goto nla_put_failure;
740 if (tm->tcpm_ts_stamp) {
741 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
742 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
743 goto nla_put_failure;
744 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
745 tm->tcpm_ts) < 0)
746 goto nla_put_failure;
747 }
748
749 {
750 int n = 0;
751
752 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
753 if (!nest)
754 goto nla_put_failure;
755 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
756 if (!tm->tcpm_vals[i])
757 continue;
758 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
759 goto nla_put_failure;
760 n++;
761 }
762 if (n)
763 nla_nest_end(msg, nest);
764 else
765 nla_nest_cancel(msg, nest);
766 }
767
768 {
769 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
770 unsigned int seq;
771
772 do {
773 seq = read_seqbegin(&fastopen_seqlock);
774 tfom_copy[0] = tm->tcpm_fastopen;
775 } while (read_seqretry(&fastopen_seqlock, seq));
776
777 tfom = tfom_copy;
778 if (tfom->mss &&
779 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
780 tfom->mss) < 0)
781 goto nla_put_failure;
782 if (tfom->syn_loss &&
783 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
784 tfom->syn_loss) < 0 ||
785 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
786 jiffies - tfom->last_syn_loss) < 0))
787 goto nla_put_failure;
788 if (tfom->cookie.len > 0 &&
789 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
790 tfom->cookie.len, tfom->cookie.val) < 0)
791 goto nla_put_failure;
792 }
793
794 return 0;
795
796nla_put_failure:
797 return -EMSGSIZE;
798}
799
800static int tcp_metrics_dump_info(struct sk_buff *skb,
801 struct netlink_callback *cb,
802 struct tcp_metrics_block *tm)
803{
804 void *hdr;
805
806 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
807 &tcp_metrics_nl_family, NLM_F_MULTI,
808 TCP_METRICS_CMD_GET);
809 if (!hdr)
810 return -EMSGSIZE;
811
812 if (tcp_metrics_fill_info(skb, tm) < 0)
813 goto nla_put_failure;
814
815 return genlmsg_end(skb, hdr);
816
817nla_put_failure:
818 genlmsg_cancel(skb, hdr);
819 return -EMSGSIZE;
820}
821
822static int tcp_metrics_nl_dump(struct sk_buff *skb,
823 struct netlink_callback *cb)
824{
825 struct net *net = sock_net(skb->sk);
826 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
827 unsigned int row, s_row = cb->args[0];
828 int s_col = cb->args[1], col = s_col;
829
830 for (row = s_row; row < max_rows; row++, s_col = 0) {
831 struct tcp_metrics_block *tm;
832 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
833
834 rcu_read_lock();
835 for (col = 0, tm = rcu_dereference(hb->chain); tm;
836 tm = rcu_dereference(tm->tcpm_next), col++) {
837 if (col < s_col)
838 continue;
839 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
840 rcu_read_unlock();
841 goto done;
842 }
843 }
844 rcu_read_unlock();
845 }
846
847done:
848 cb->args[0] = row;
849 cb->args[1] = col;
850 return skb->len;
851}
852
853static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
854 unsigned int *hash, int optional)
855{
856 struct nlattr *a;
857
858 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
859 if (a) {
860 addr->family = AF_INET;
861 addr->addr.a4 = nla_get_be32(a);
862 *hash = (__force unsigned int) addr->addr.a4;
863 return 0;
864 }
865 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
866 if (a) {
867 if (nla_len(a) != sizeof(sizeof(struct in6_addr)))
868 return -EINVAL;
869 addr->family = AF_INET6;
870 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
871 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
872 return 0;
873 }
874 return optional ? 1 : -EAFNOSUPPORT;
875}
876
877static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
878{
879 struct tcp_metrics_block *tm;
880 struct inetpeer_addr addr;
881 unsigned int hash;
882 struct sk_buff *msg;
883 struct net *net = genl_info_net(info);
884 void *reply;
885 int ret;
886
887 ret = parse_nl_addr(info, &addr, &hash, 0);
888 if (ret < 0)
889 return ret;
890
891 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
892 if (!msg)
893 return -ENOMEM;
894
895 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
896 info->genlhdr->cmd);
897 if (!reply)
898 goto nla_put_failure;
899
900 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
901 ret = -ESRCH;
902 rcu_read_lock();
903 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
904 tm = rcu_dereference(tm->tcpm_next)) {
905 if (addr_same(&tm->tcpm_addr, &addr)) {
906 ret = tcp_metrics_fill_info(msg, tm);
907 break;
908 }
909 }
910 rcu_read_unlock();
911 if (ret < 0)
912 goto out_free;
913
914 genlmsg_end(msg, reply);
915 return genlmsg_reply(msg, info);
916
917nla_put_failure:
918 ret = -EMSGSIZE;
919
920out_free:
921 nlmsg_free(msg);
922 return ret;
923}
924
925#define deref_locked_genl(p) \
926 rcu_dereference_protected(p, lockdep_genl_is_held() && \
927 lockdep_is_held(&tcp_metrics_lock))
928
929#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
930
931static int tcp_metrics_flush_all(struct net *net)
932{
933 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
934 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
935 struct tcp_metrics_block *tm;
936 unsigned int row;
937
938 for (row = 0; row < max_rows; row++, hb++) {
939 spin_lock_bh(&tcp_metrics_lock);
940 tm = deref_locked_genl(hb->chain);
941 if (tm)
942 hb->chain = NULL;
943 spin_unlock_bh(&tcp_metrics_lock);
944 while (tm) {
945 struct tcp_metrics_block *next;
946
947 next = deref_genl(tm->tcpm_next);
948 kfree_rcu(tm, rcu_head);
949 tm = next;
950 }
951 }
952 return 0;
953}
954
955static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
956{
957 struct tcpm_hash_bucket *hb;
958 struct tcp_metrics_block *tm;
959 struct tcp_metrics_block __rcu **pp;
960 struct inetpeer_addr addr;
961 unsigned int hash;
962 struct net *net = genl_info_net(info);
963 int ret;
964
965 ret = parse_nl_addr(info, &addr, &hash, 1);
966 if (ret < 0)
967 return ret;
968 if (ret > 0)
969 return tcp_metrics_flush_all(net);
970
971 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
972 hb = net->ipv4.tcp_metrics_hash + hash;
973 pp = &hb->chain;
974 spin_lock_bh(&tcp_metrics_lock);
975 for (tm = deref_locked_genl(*pp); tm;
976 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
977 if (addr_same(&tm->tcpm_addr, &addr)) {
978 *pp = tm->tcpm_next;
979 break;
980 }
981 }
982 spin_unlock_bh(&tcp_metrics_lock);
983 if (!tm)
984 return -ESRCH;
985 kfree_rcu(tm, rcu_head);
986 return 0;
987}
988
989static struct genl_ops tcp_metrics_nl_ops[] = {
990 {
991 .cmd = TCP_METRICS_CMD_GET,
992 .doit = tcp_metrics_nl_cmd_get,
993 .dumpit = tcp_metrics_nl_dump,
994 .policy = tcp_metrics_nl_policy,
995 .flags = GENL_ADMIN_PERM,
996 },
997 {
998 .cmd = TCP_METRICS_CMD_DEL,
999 .doit = tcp_metrics_nl_cmd_del,
1000 .policy = tcp_metrics_nl_policy,
1001 .flags = GENL_ADMIN_PERM,
1002 },
1003};
1004
693static unsigned int tcpmhash_entries; 1005static unsigned int tcpmhash_entries;
694static int __init set_tcpmhash_entries(char *str) 1006static int __init set_tcpmhash_entries(char *str)
695{ 1007{
@@ -753,5 +1065,21 @@ static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
753 1065
754void __init tcp_metrics_init(void) 1066void __init tcp_metrics_init(void)
755{ 1067{
756 register_pernet_subsys(&tcp_net_metrics_ops); 1068 int ret;
1069
1070 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1071 if (ret < 0)
1072 goto cleanup;
1073 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1074 tcp_metrics_nl_ops,
1075 ARRAY_SIZE(tcp_metrics_nl_ops));
1076 if (ret < 0)
1077 goto cleanup_subsys;
1078 return;
1079
1080cleanup_subsys:
1081 unregister_pernet_subsys(&tcp_net_metrics_ops);
1082
1083cleanup:
1084 return;
757} 1085}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6ff7f10dce9d..27536ba16c9d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -85,6 +85,8 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
85 * spinlock it. I do not want! Well, probability of misbehaviour 85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks 86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK 87 * to avoid misread sequence numbers, states etc. --ANK
88 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
88 */ 90 */
89enum tcp_tw_status 91enum tcp_tw_status
90tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, 92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
@@ -507,6 +509,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
507 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 509 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
508 newtp->rx_opt.mss_clamp = req->mss; 510 newtp->rx_opt.mss_clamp = req->mss;
509 TCP_ECN_openreq_child(newtp, req); 511 TCP_ECN_openreq_child(newtp, req);
512 newtp->fastopen_rsk = NULL;
510 513
511 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 514 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
512 } 515 }
@@ -515,13 +518,20 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
515EXPORT_SYMBOL(tcp_create_openreq_child); 518EXPORT_SYMBOL(tcp_create_openreq_child);
516 519
517/* 520/*
518 * Process an incoming packet for SYN_RECV sockets represented 521 * Process an incoming packet for SYN_RECV sockets represented as a
519 * as a request_sock. 522 * request_sock. Normally sk is the listener socket but for TFO it
523 * points to the child socket.
524 *
525 * XXX (TFO) - The current impl contains a special check for ack
526 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
527 *
528 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
520 */ 529 */
521 530
522struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 531struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
523 struct request_sock *req, 532 struct request_sock *req,
524 struct request_sock **prev) 533 struct request_sock **prev,
534 bool fastopen)
525{ 535{
526 struct tcp_options_received tmp_opt; 536 struct tcp_options_received tmp_opt;
527 const u8 *hash_location; 537 const u8 *hash_location;
@@ -530,6 +540,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
530 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 540 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
531 bool paws_reject = false; 541 bool paws_reject = false;
532 542
543 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
544
533 tmp_opt.saw_tstamp = 0; 545 tmp_opt.saw_tstamp = 0;
534 if (th->doff > (sizeof(struct tcphdr)>>2)) { 546 if (th->doff > (sizeof(struct tcphdr)>>2)) {
535 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 547 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
@@ -565,6 +577,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
565 * 577 *
566 * Enforce "SYN-ACK" according to figure 8, figure 6 578 * Enforce "SYN-ACK" according to figure 8, figure 6
567 * of RFC793, fixed by RFC1122. 579 * of RFC793, fixed by RFC1122.
580 *
581 * Note that even if there is new data in the SYN packet
582 * they will be thrown away too.
568 */ 583 */
569 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 584 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
570 return NULL; 585 return NULL;
@@ -622,9 +637,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
622 * sent (the segment carries an unacceptable ACK) ... 637 * sent (the segment carries an unacceptable ACK) ...
623 * a reset is sent." 638 * a reset is sent."
624 * 639 *
625 * Invalid ACK: reset will be sent by listening socket 640 * Invalid ACK: reset will be sent by listening socket.
641 * Note that the ACK validity check for a Fast Open socket is done
642 * elsewhere and is checked directly against the child socket rather
643 * than req because user data may have been sent out.
626 */ 644 */
627 if ((flg & TCP_FLAG_ACK) && 645 if ((flg & TCP_FLAG_ACK) && !fastopen &&
628 (TCP_SKB_CB(skb)->ack_seq != 646 (TCP_SKB_CB(skb)->ack_seq !=
629 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) 647 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
630 return sk; 648 return sk;
@@ -637,7 +655,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
637 /* RFC793: "first check sequence number". */ 655 /* RFC793: "first check sequence number". */
638 656
639 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 657 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
640 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { 658 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
641 /* Out of window: send ACK and drop. */ 659 /* Out of window: send ACK and drop. */
642 if (!(flg & TCP_FLAG_RST)) 660 if (!(flg & TCP_FLAG_RST))
643 req->rsk_ops->send_ack(sk, skb, req); 661 req->rsk_ops->send_ack(sk, skb, req);
@@ -648,7 +666,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
648 666
649 /* In sequence, PAWS is OK. */ 667 /* In sequence, PAWS is OK. */
650 668
651 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) 669 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
652 req->ts_recent = tmp_opt.rcv_tsval; 670 req->ts_recent = tmp_opt.rcv_tsval;
653 671
654 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 672 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
@@ -667,10 +685,25 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
667 685
668 /* ACK sequence verified above, just make sure ACK is 686 /* ACK sequence verified above, just make sure ACK is
669 * set. If ACK not set, just silently drop the packet. 687 * set. If ACK not set, just silently drop the packet.
688 *
689 * XXX (TFO) - if we ever allow "data after SYN", the
690 * following check needs to be removed.
670 */ 691 */
671 if (!(flg & TCP_FLAG_ACK)) 692 if (!(flg & TCP_FLAG_ACK))
672 return NULL; 693 return NULL;
673 694
695 /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
696 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
697 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
698 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
699 tcp_rsk(req)->snt_synack = 0;
700
701 /* For Fast Open no more processing is needed (sk is the
702 * child socket).
703 */
704 if (fastopen)
705 return sk;
706
674 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 707 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
675 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 708 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
676 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 709 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
@@ -678,10 +711,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
678 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
679 return NULL; 712 return NULL;
680 } 713 }
681 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
682 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
683 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
684 tcp_rsk(req)->snt_synack = 0;
685 714
686 /* OK, ACK is valid, create big socket and 715 /* OK, ACK is valid, create big socket and
687 * feed this segment to it. It will repeat all 716 * feed this segment to it. It will repeat all
@@ -706,11 +735,21 @@ listen_overflow:
706 } 735 }
707 736
708embryonic_reset: 737embryonic_reset:
709 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 738 if (!(flg & TCP_FLAG_RST)) {
710 if (!(flg & TCP_FLAG_RST)) 739 /* Received a bad SYN pkt - for TFO We try not to reset
740 * the local connection unless it's really necessary to
741 * avoid becoming vulnerable to outside attack aiming at
742 * resetting legit local connections.
743 */
711 req->rsk_ops->send_reset(sk, skb); 744 req->rsk_ops->send_reset(sk, skb);
712 745 } else if (fastopen) { /* received a valid RST pkt */
713 inet_csk_reqsk_queue_drop(sk, req, prev); 746 reqsk_fastopen_remove(sk, req, true);
747 tcp_reset(sk);
748 }
749 if (!fastopen) {
750 inet_csk_reqsk_queue_drop(sk, req, prev);
751 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
752 }
714 return NULL; 753 return NULL;
715} 754}
716EXPORT_SYMBOL(tcp_check_req); 755EXPORT_SYMBOL(tcp_check_req);
@@ -719,6 +758,12 @@ EXPORT_SYMBOL(tcp_check_req);
719 * Queue segment on the new socket if the new socket is active, 758 * Queue segment on the new socket if the new socket is active,
720 * otherwise we just shortcircuit this and continue with 759 * otherwise we just shortcircuit this and continue with
721 * the new socket. 760 * the new socket.
761 *
762 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
763 * when entering. But other states are possible due to a race condition
764 * where after __inet_lookup_established() fails but before the listener
765 * locked is obtained, other packets cause the same connection to
766 * be created.
722 */ 767 */
723 768
724int tcp_child_process(struct sock *parent, struct sock *child, 769int tcp_child_process(struct sock *parent, struct sock *child,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d04632673a9e..cfe6ffe1c177 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk,
702 unsigned int mss, struct sk_buff *skb, 702 unsigned int mss, struct sk_buff *skb,
703 struct tcp_out_options *opts, 703 struct tcp_out_options *opts,
704 struct tcp_md5sig_key **md5, 704 struct tcp_md5sig_key **md5,
705 struct tcp_extend_values *xvp) 705 struct tcp_extend_values *xvp,
706 struct tcp_fastopen_cookie *foc)
706{ 707{
707 struct inet_request_sock *ireq = inet_rsk(req); 708 struct inet_request_sock *ireq = inet_rsk(req);
708 unsigned int remaining = MAX_TCP_OPTION_SPACE; 709 unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk,
747 if (unlikely(!ireq->tstamp_ok)) 748 if (unlikely(!ireq->tstamp_ok))
748 remaining -= TCPOLEN_SACKPERM_ALIGNED; 749 remaining -= TCPOLEN_SACKPERM_ALIGNED;
749 } 750 }
750 751 if (foc != NULL) {
752 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
753 need = (need + 3) & ~3U; /* Align to 32 bits */
754 if (remaining >= need) {
755 opts->options |= OPTION_FAST_OPEN_COOKIE;
756 opts->fastopen_cookie = foc;
757 remaining -= need;
758 }
759 }
751 /* Similar rationale to tcp_syn_options() applies here, too. 760 /* Similar rationale to tcp_syn_options() applies here, too.
752 * If the <SYN> options fit, the same options should fit now! 761 * If the <SYN> options fit, the same options should fit now!
753 */ 762 */
@@ -2028,10 +2037,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2028 if (push_one) 2037 if (push_one)
2029 break; 2038 break;
2030 } 2039 }
2031 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
2032 tp->prr_out += sent_pkts;
2033 2040
2034 if (likely(sent_pkts)) { 2041 if (likely(sent_pkts)) {
2042 if (tcp_in_cwnd_reduction(sk))
2043 tp->prr_out += sent_pkts;
2035 tcp_cwnd_validate(sk); 2044 tcp_cwnd_validate(sk);
2036 return false; 2045 return false;
2037 } 2046 }
@@ -2533,7 +2542,7 @@ begin_fwd:
2533 } 2542 }
2534 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2543 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2535 2544
2536 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) 2545 if (tcp_in_cwnd_reduction(sk))
2537 tp->prr_out += tcp_skb_pcount(skb); 2546 tp->prr_out += tcp_skb_pcount(skb);
2538 2547
2539 if (skb == tcp_write_queue_head(sk)) 2548 if (skb == tcp_write_queue_head(sk))
@@ -2658,7 +2667,8 @@ int tcp_send_synack(struct sock *sk)
2658 */ 2667 */
2659struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2668struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2660 struct request_sock *req, 2669 struct request_sock *req,
2661 struct request_values *rvp) 2670 struct request_values *rvp,
2671 struct tcp_fastopen_cookie *foc)
2662{ 2672{
2663 struct tcp_out_options opts; 2673 struct tcp_out_options opts;
2664 struct tcp_extend_values *xvp = tcp_xv(rvp); 2674 struct tcp_extend_values *xvp = tcp_xv(rvp);
@@ -2718,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2718#endif 2728#endif
2719 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2729 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2720 tcp_header_size = tcp_synack_options(sk, req, mss, 2730 tcp_header_size = tcp_synack_options(sk, req, mss,
2721 skb, &opts, &md5, xvp) 2731 skb, &opts, &md5, xvp, foc)
2722 + sizeof(*th); 2732 + sizeof(*th);
2723 2733
2724 skb_push(skb, tcp_header_size); 2734 skb_push(skb, tcp_header_size);
@@ -2772,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2772 } 2782 }
2773 2783
2774 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2784 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2775 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2785 /* XXX data is queued and acked as is. No buffer/window check */
2786 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
2776 2787
2777 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2788 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2778 th->window = htons(min(req->rcv_wnd, 65535U)); 2789 th->window = htons(min(req->rcv_wnd, 65535U));
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b774a03bd1dc..fc04711e80c8 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -305,6 +305,35 @@ static void tcp_probe_timer(struct sock *sk)
305} 305}
306 306
307/* 307/*
308 * Timer for Fast Open socket to retransmit SYNACK. Note that the
309 * sk here is the child socket, not the parent (listener) socket.
310 */
311static void tcp_fastopen_synack_timer(struct sock *sk)
312{
313 struct inet_connection_sock *icsk = inet_csk(sk);
314 int max_retries = icsk->icsk_syn_retries ? :
315 sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
316 struct request_sock *req;
317
318 req = tcp_sk(sk)->fastopen_rsk;
319 req->rsk_ops->syn_ack_timeout(sk, req);
320
321 if (req->retrans >= max_retries) {
322 tcp_write_err(sk);
323 return;
324 }
325 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
326 * returned from rtx_syn_ack() to make it more persistent like
327 * regular retransmit because if the child socket has been accepted
328 * it's not good to give up too easily.
329 */
330 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
331 req->retrans++;
332 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
333 TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
334}
335
336/*
308 * The TCP retransmit timer. 337 * The TCP retransmit timer.
309 */ 338 */
310 339
@@ -317,7 +346,15 @@ void tcp_retransmit_timer(struct sock *sk)
317 tcp_resume_early_retransmit(sk); 346 tcp_resume_early_retransmit(sk);
318 return; 347 return;
319 } 348 }
320 349 if (tp->fastopen_rsk) {
350 BUG_ON(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1);
352 tcp_fastopen_synack_timer(sk);
353 /* Before we receive ACK to our SYN-ACK don't retransmit
354 * anything else (e.g., data or FIN segments).
355 */
356 return;
357 }
321 if (!tp->packets_out) 358 if (!tp->packets_out)
322 goto out; 359 goto out;
323 360
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index d2f336ea82ca..505b30ad9182 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -26,7 +26,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
26 26
27 return inet_sk_diag_fill(sk, NULL, skb, req, 27 return inet_sk_diag_fill(sk, NULL, skb, req,
28 sk_user_ns(NETLINK_CB(cb->skb).ssk), 28 sk_user_ns(NETLINK_CB(cb->skb).ssk),
29 NETLINK_CB(cb->skb).pid, 29 NETLINK_CB(cb->skb).portid,
30 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 30 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
31} 31}
32 32
@@ -72,14 +72,14 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
72 72
73 err = inet_sk_diag_fill(sk, NULL, rep, req, 73 err = inet_sk_diag_fill(sk, NULL, rep, req,
74 sk_user_ns(NETLINK_CB(in_skb).ssk), 74 sk_user_ns(NETLINK_CB(in_skb).ssk),
75 NETLINK_CB(in_skb).pid, 75 NETLINK_CB(in_skb).portid,
76 nlh->nlmsg_seq, 0, nlh); 76 nlh->nlmsg_seq, 0, nlh);
77 if (err < 0) { 77 if (err < 0) {
78 WARN_ON(err == -EMSGSIZE); 78 WARN_ON(err == -EMSGSIZE);
79 kfree_skb(rep); 79 kfree_skb(rep);
80 goto out; 80 goto out;
81 } 81 }
82 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid, 82 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
83 MSG_DONTWAIT); 83 MSG_DONTWAIT);
84 if (err > 0) 84 if (err > 0)
85 err = 0; 85 err = 0;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 5728695b5449..4f7fe7270e37 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -201,6 +201,22 @@ config IPV6_TUNNEL
201 201
202 If unsure, say N. 202 If unsure, say N.
203 203
204config IPV6_GRE
205 tristate "IPv6: GRE tunnel"
206 select IPV6_TUNNEL
207 ---help---
208 Tunneling means encapsulating data of one protocol type within
209 another protocol and sending it over a channel that understands the
210 encapsulating protocol. This particular tunneling driver implements
211 GRE (Generic Routing Encapsulation) and at this time allows
212 encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
213 This driver is useful if the other endpoint is a Cisco router: Cisco
214 likes GRE much better than the other Linux tunneling driver ("IP
215 tunneling" above). In addition, GRE allows multicast redistribution
216 through the tunnel.
217
218 Saying M here will produce a module called ip6_gre. If unsure, say N.
219
204config IPV6_MULTIPLE_TABLES 220config IPV6_MULTIPLE_TABLES
205 bool "IPv6: Multiple Routing Tables" 221 bool "IPv6: Multiple Routing Tables"
206 depends on EXPERIMENTAL 222 depends on EXPERIMENTAL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 686934acfac1..b6d3f79151e2 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
36 36
37obj-$(CONFIG_IPV6_SIT) += sit.o 37obj-$(CONFIG_IPV6_SIT) += sit.o
38obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 38obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
39obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
39 40
40obj-y += addrconf_core.o exthdrs_core.o 41obj-y += addrconf_core.o exthdrs_core.o
41 42
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6bc85f7c31e3..480e68422efb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -127,8 +127,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
127#endif 127#endif
128 128
129#ifdef CONFIG_IPV6_PRIVACY 129#ifdef CONFIG_IPV6_PRIVACY
130static int __ipv6_regen_rndid(struct inet6_dev *idev); 130static void __ipv6_regen_rndid(struct inet6_dev *idev);
131static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 131static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
132static void ipv6_regen_rndid(unsigned long data); 132static void ipv6_regen_rndid(unsigned long data);
133#endif 133#endif
134 134
@@ -788,10 +788,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
788 struct in6_addr prefix; 788 struct in6_addr prefix;
789 struct rt6_info *rt; 789 struct rt6_info *rt;
790 struct net *net = dev_net(ifp->idev->dev); 790 struct net *net = dev_net(ifp->idev->dev);
791 struct flowi6 fl6 = {};
792
791 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 793 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
792 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); 794 fl6.flowi6_oif = ifp->idev->dev->ifindex;
795 fl6.daddr = prefix;
796 rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
797 RT6_LOOKUP_F_IFACE);
793 798
794 if (rt && addrconf_is_prefix_route(rt)) { 799 if (rt != net->ipv6.ip6_null_entry &&
800 addrconf_is_prefix_route(rt)) {
795 if (onlink == 0) { 801 if (onlink == 0) {
796 ip6_del_rt(rt); 802 ip6_del_rt(rt);
797 rt = NULL; 803 rt = NULL;
@@ -852,16 +858,7 @@ retry:
852 } 858 }
853 in6_ifa_hold(ifp); 859 in6_ifa_hold(ifp);
854 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); 860 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
855 if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) { 861 __ipv6_try_regen_rndid(idev, tmpaddr);
856 spin_unlock_bh(&ifp->lock);
857 write_unlock(&idev->lock);
858 pr_warn("%s: regeneration of randomized interface id failed\n",
859 __func__);
860 in6_ifa_put(ifp);
861 in6_dev_put(idev);
862 ret = -1;
863 goto out;
864 }
865 memcpy(&addr.s6_addr[8], idev->rndid, 8); 862 memcpy(&addr.s6_addr[8], idev->rndid, 8);
866 age = (now - ifp->tstamp) / HZ; 863 age = (now - ifp->tstamp) / HZ;
867 tmp_valid_lft = min_t(__u32, 864 tmp_valid_lft = min_t(__u32,
@@ -1079,8 +1076,10 @@ static int ipv6_get_saddr_eval(struct net *net,
1079 break; 1076 break;
1080 case IPV6_SADDR_RULE_PREFIX: 1077 case IPV6_SADDR_RULE_PREFIX:
1081 /* Rule 8: Use longest matching prefix */ 1078 /* Rule 8: Use longest matching prefix */
1082 score->matchlen = ret = ipv6_addr_diff(&score->ifa->addr, 1079 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1083 dst->addr); 1080 if (ret > score->ifa->prefix_len)
1081 ret = score->ifa->prefix_len;
1082 score->matchlen = ret;
1084 break; 1083 break;
1085 default: 1084 default:
1086 ret = 0; 1085 ret = 0;
@@ -1093,7 +1092,7 @@ out:
1093 return ret; 1092 return ret;
1094} 1093}
1095 1094
1096int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, 1095int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1097 const struct in6_addr *daddr, unsigned int prefs, 1096 const struct in6_addr *daddr, unsigned int prefs,
1098 struct in6_addr *saddr) 1097 struct in6_addr *saddr)
1099{ 1098{
@@ -1600,7 +1599,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
1600 1599
1601#ifdef CONFIG_IPV6_PRIVACY 1600#ifdef CONFIG_IPV6_PRIVACY
1602/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ 1601/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
1603static int __ipv6_regen_rndid(struct inet6_dev *idev) 1602static void __ipv6_regen_rndid(struct inet6_dev *idev)
1604{ 1603{
1605regen: 1604regen:
1606 get_random_bytes(idev->rndid, sizeof(idev->rndid)); 1605 get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -1627,8 +1626,6 @@ regen:
1627 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00) 1626 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
1628 goto regen; 1627 goto regen;
1629 } 1628 }
1630
1631 return 0;
1632} 1629}
1633 1630
1634static void ipv6_regen_rndid(unsigned long data) 1631static void ipv6_regen_rndid(unsigned long data)
@@ -1642,8 +1639,7 @@ static void ipv6_regen_rndid(unsigned long data)
1642 if (idev->dead) 1639 if (idev->dead)
1643 goto out; 1640 goto out;
1644 1641
1645 if (__ipv6_regen_rndid(idev) < 0) 1642 __ipv6_regen_rndid(idev);
1646 goto out;
1647 1643
1648 expires = jiffies + 1644 expires = jiffies +
1649 idev->cnf.temp_prefered_lft * HZ - 1645 idev->cnf.temp_prefered_lft * HZ -
@@ -1664,13 +1660,10 @@ out:
1664 in6_dev_put(idev); 1660 in6_dev_put(idev);
1665} 1661}
1666 1662
1667static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) 1663static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
1668{ 1664{
1669 int ret = 0;
1670
1671 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 1665 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
1672 ret = __ipv6_regen_rndid(idev); 1666 __ipv6_regen_rndid(idev);
1673 return ret;
1674} 1667}
1675#endif 1668#endif
1676 1669
@@ -1721,7 +1714,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1721 if (table == NULL) 1714 if (table == NULL)
1722 return NULL; 1715 return NULL;
1723 1716
1724 write_lock_bh(&table->tb6_lock); 1717 read_lock_bh(&table->tb6_lock);
1725 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0); 1718 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
1726 if (!fn) 1719 if (!fn)
1727 goto out; 1720 goto out;
@@ -1736,7 +1729,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1736 break; 1729 break;
1737 } 1730 }
1738out: 1731out:
1739 write_unlock_bh(&table->tb6_lock); 1732 read_unlock_bh(&table->tb6_lock);
1740 return rt; 1733 return rt;
1741} 1734}
1742 1735
@@ -3549,12 +3542,12 @@ static inline int inet6_ifaddr_msgsize(void)
3549} 3542}
3550 3543
3551static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, 3544static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3552 u32 pid, u32 seq, int event, unsigned int flags) 3545 u32 portid, u32 seq, int event, unsigned int flags)
3553{ 3546{
3554 struct nlmsghdr *nlh; 3547 struct nlmsghdr *nlh;
3555 u32 preferred, valid; 3548 u32 preferred, valid;
3556 3549
3557 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3550 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
3558 if (nlh == NULL) 3551 if (nlh == NULL)
3559 return -EMSGSIZE; 3552 return -EMSGSIZE;
3560 3553
@@ -3592,7 +3585,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3592} 3585}
3593 3586
3594static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, 3587static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
3595 u32 pid, u32 seq, int event, u16 flags) 3588 u32 portid, u32 seq, int event, u16 flags)
3596{ 3589{
3597 struct nlmsghdr *nlh; 3590 struct nlmsghdr *nlh;
3598 u8 scope = RT_SCOPE_UNIVERSE; 3591 u8 scope = RT_SCOPE_UNIVERSE;
@@ -3601,7 +3594,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
3601 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) 3594 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
3602 scope = RT_SCOPE_SITE; 3595 scope = RT_SCOPE_SITE;
3603 3596
3604 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3597 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
3605 if (nlh == NULL) 3598 if (nlh == NULL)
3606 return -EMSGSIZE; 3599 return -EMSGSIZE;
3607 3600
@@ -3617,7 +3610,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
3617} 3610}
3618 3611
3619static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, 3612static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3620 u32 pid, u32 seq, int event, unsigned int flags) 3613 u32 portid, u32 seq, int event, unsigned int flags)
3621{ 3614{
3622 struct nlmsghdr *nlh; 3615 struct nlmsghdr *nlh;
3623 u8 scope = RT_SCOPE_UNIVERSE; 3616 u8 scope = RT_SCOPE_UNIVERSE;
@@ -3626,7 +3619,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3626 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) 3619 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
3627 scope = RT_SCOPE_SITE; 3620 scope = RT_SCOPE_SITE;
3628 3621
3629 nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags); 3622 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
3630 if (nlh == NULL) 3623 if (nlh == NULL)
3631 return -EMSGSIZE; 3624 return -EMSGSIZE;
3632 3625
@@ -3667,7 +3660,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3667 if (++ip_idx < s_ip_idx) 3660 if (++ip_idx < s_ip_idx)
3668 continue; 3661 continue;
3669 err = inet6_fill_ifaddr(skb, ifa, 3662 err = inet6_fill_ifaddr(skb, ifa,
3670 NETLINK_CB(cb->skb).pid, 3663 NETLINK_CB(cb->skb).portid,
3671 cb->nlh->nlmsg_seq, 3664 cb->nlh->nlmsg_seq,
3672 RTM_NEWADDR, 3665 RTM_NEWADDR,
3673 NLM_F_MULTI); 3666 NLM_F_MULTI);
@@ -3683,7 +3676,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3683 if (ip_idx < s_ip_idx) 3676 if (ip_idx < s_ip_idx)
3684 continue; 3677 continue;
3685 err = inet6_fill_ifmcaddr(skb, ifmca, 3678 err = inet6_fill_ifmcaddr(skb, ifmca,
3686 NETLINK_CB(cb->skb).pid, 3679 NETLINK_CB(cb->skb).portid,
3687 cb->nlh->nlmsg_seq, 3680 cb->nlh->nlmsg_seq,
3688 RTM_GETMULTICAST, 3681 RTM_GETMULTICAST,
3689 NLM_F_MULTI); 3682 NLM_F_MULTI);
@@ -3698,7 +3691,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3698 if (ip_idx < s_ip_idx) 3691 if (ip_idx < s_ip_idx)
3699 continue; 3692 continue;
3700 err = inet6_fill_ifacaddr(skb, ifaca, 3693 err = inet6_fill_ifacaddr(skb, ifaca,
3701 NETLINK_CB(cb->skb).pid, 3694 NETLINK_CB(cb->skb).portid,
3702 cb->nlh->nlmsg_seq, 3695 cb->nlh->nlmsg_seq,
3703 RTM_GETANYCAST, 3696 RTM_GETANYCAST,
3704 NLM_F_MULTI); 3697 NLM_F_MULTI);
@@ -3820,7 +3813,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3820 goto errout_ifa; 3813 goto errout_ifa;
3821 } 3814 }
3822 3815
3823 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3816 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
3824 nlh->nlmsg_seq, RTM_NEWADDR, 0); 3817 nlh->nlmsg_seq, RTM_NEWADDR, 0);
3825 if (err < 0) { 3818 if (err < 0) {
3826 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ 3819 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
@@ -3828,7 +3821,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3828 kfree_skb(skb); 3821 kfree_skb(skb);
3829 goto errout_ifa; 3822 goto errout_ifa;
3830 } 3823 }
3831 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); 3824 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3832errout_ifa: 3825errout_ifa:
3833 in6_ifa_put(ifa); 3826 in6_ifa_put(ifa);
3834errout: 3827errout:
@@ -4030,14 +4023,14 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
4030} 4023}
4031 4024
4032static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 4025static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
4033 u32 pid, u32 seq, int event, unsigned int flags) 4026 u32 portid, u32 seq, int event, unsigned int flags)
4034{ 4027{
4035 struct net_device *dev = idev->dev; 4028 struct net_device *dev = idev->dev;
4036 struct ifinfomsg *hdr; 4029 struct ifinfomsg *hdr;
4037 struct nlmsghdr *nlh; 4030 struct nlmsghdr *nlh;
4038 void *protoinfo; 4031 void *protoinfo;
4039 4032
4040 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 4033 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
4041 if (nlh == NULL) 4034 if (nlh == NULL)
4042 return -EMSGSIZE; 4035 return -EMSGSIZE;
4043 4036
@@ -4095,7 +4088,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
4095 if (!idev) 4088 if (!idev)
4096 goto cont; 4089 goto cont;
4097 if (inet6_fill_ifinfo(skb, idev, 4090 if (inet6_fill_ifinfo(skb, idev,
4098 NETLINK_CB(cb->skb).pid, 4091 NETLINK_CB(cb->skb).portid,
4099 cb->nlh->nlmsg_seq, 4092 cb->nlh->nlmsg_seq,
4100 RTM_NEWLINK, NLM_F_MULTI) <= 0) 4093 RTM_NEWLINK, NLM_F_MULTI) <= 0)
4101 goto out; 4094 goto out;
@@ -4143,14 +4136,14 @@ static inline size_t inet6_prefix_nlmsg_size(void)
4143} 4136}
4144 4137
4145static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, 4138static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
4146 struct prefix_info *pinfo, u32 pid, u32 seq, 4139 struct prefix_info *pinfo, u32 portid, u32 seq,
4147 int event, unsigned int flags) 4140 int event, unsigned int flags)
4148{ 4141{
4149 struct prefixmsg *pmsg; 4142 struct prefixmsg *pmsg;
4150 struct nlmsghdr *nlh; 4143 struct nlmsghdr *nlh;
4151 struct prefix_cacheinfo ci; 4144 struct prefix_cacheinfo ci;
4152 4145
4153 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags); 4146 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
4154 if (nlh == NULL) 4147 if (nlh == NULL)
4155 return -EMSGSIZE; 4148 return -EMSGSIZE;
4156 4149
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index eb6a63632d3c..4be23da32b89 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -57,7 +57,7 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
57} 57}
58 58
59/* 59/*
60 * Default policy table (RFC3484 + extensions) 60 * Default policy table (RFC6724 + extensions)
61 * 61 *
62 * prefix addr_type label 62 * prefix addr_type label
63 * ------------------------------------------------------------------------- 63 * -------------------------------------------------------------------------
@@ -69,8 +69,12 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
69 * fc00::/7 N/A 5 ULA (RFC 4193) 69 * fc00::/7 N/A 5 ULA (RFC 4193)
70 * 2001::/32 N/A 6 Teredo (RFC 4380) 70 * 2001::/32 N/A 6 Teredo (RFC 4380)
71 * 2001:10::/28 N/A 7 ORCHID (RFC 4843) 71 * 2001:10::/28 N/A 7 ORCHID (RFC 4843)
72 * fec0::/10 N/A 11 Site-local
73 * (deprecated by RFC3879)
74 * 3ffe::/16 N/A 12 6bone
72 * 75 *
73 * Note: 0xffffffff is used if we do not have any policies. 76 * Note: 0xffffffff is used if we do not have any policies.
77 * Note: Labels for ULA and 6to4 are different from labels listed in RFC6724.
74 */ 78 */
75 79
76#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL 80#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL
@@ -88,10 +92,18 @@ static const __net_initdata struct ip6addrlbl_init_table
88 .prefix = &(struct in6_addr){{{ 0xfc }}}, 92 .prefix = &(struct in6_addr){{{ 0xfc }}},
89 .prefixlen = 7, 93 .prefixlen = 7,
90 .label = 5, 94 .label = 5,
95 },{ /* fec0::/10 */
96 .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
97 .prefixlen = 10,
98 .label = 11,
91 },{ /* 2002::/16 */ 99 },{ /* 2002::/16 */
92 .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}}, 100 .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
93 .prefixlen = 16, 101 .prefixlen = 16,
94 .label = 2, 102 .label = 2,
103 },{ /* 3ffe::/16 */
104 .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
105 .prefixlen = 16,
106 .label = 12,
95 },{ /* 2001::/32 */ 107 },{ /* 2001::/32 */
96 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, 108 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
97 .prefixlen = 32, 109 .prefixlen = 32,
@@ -470,10 +482,10 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
470static int ip6addrlbl_fill(struct sk_buff *skb, 482static int ip6addrlbl_fill(struct sk_buff *skb,
471 struct ip6addrlbl_entry *p, 483 struct ip6addrlbl_entry *p,
472 u32 lseq, 484 u32 lseq,
473 u32 pid, u32 seq, int event, 485 u32 portid, u32 seq, int event,
474 unsigned int flags) 486 unsigned int flags)
475{ 487{
476 struct nlmsghdr *nlh = nlmsg_put(skb, pid, seq, event, 488 struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event,
477 sizeof(struct ifaddrlblmsg), flags); 489 sizeof(struct ifaddrlblmsg), flags);
478 if (!nlh) 490 if (!nlh)
479 return -EMSGSIZE; 491 return -EMSGSIZE;
@@ -503,7 +515,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
503 net_eq(ip6addrlbl_net(p), net)) { 515 net_eq(ip6addrlbl_net(p), net)) {
504 if ((err = ip6addrlbl_fill(skb, p, 516 if ((err = ip6addrlbl_fill(skb, p,
505 ip6addrlbl_table.seq, 517 ip6addrlbl_table.seq,
506 NETLINK_CB(cb->skb).pid, 518 NETLINK_CB(cb->skb).portid,
507 cb->nlh->nlmsg_seq, 519 cb->nlh->nlmsg_seq,
508 RTM_NEWADDRLABEL, 520 RTM_NEWADDRLABEL,
509 NLM_F_MULTI)) <= 0) 521 NLM_F_MULTI)) <= 0)
@@ -574,7 +586,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
574 } 586 }
575 587
576 err = ip6addrlbl_fill(skb, p, lseq, 588 err = ip6addrlbl_fill(skb, p, lseq,
577 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 589 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
578 RTM_NEWADDRLABEL, 0); 590 RTM_NEWADDRLABEL, 0);
579 591
580 ip6addrlbl_put(p); 592 ip6addrlbl_put(p);
@@ -585,7 +597,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
585 goto out; 597 goto out;
586 } 598 }
587 599
588 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); 600 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
589out: 601out:
590 return err; 602 return err;
591} 603}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 286acfc21250..24995a93ef8c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -514,7 +514,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
514 ln = node_alloc(); 514 ln = node_alloc();
515 515
516 if (!ln) 516 if (!ln)
517 return NULL; 517 return ERR_PTR(-ENOMEM);
518 ln->fn_bit = plen; 518 ln->fn_bit = plen;
519 519
520 ln->parent = pn; 520 ln->parent = pn;
@@ -561,7 +561,7 @@ insert_above:
561 node_free(in); 561 node_free(in);
562 if (ln) 562 if (ln)
563 node_free(ln); 563 node_free(ln);
564 return NULL; 564 return ERR_PTR(-ENOMEM);
565 } 565 }
566 566
567 /* 567 /*
@@ -611,7 +611,7 @@ insert_above:
611 ln = node_alloc(); 611 ln = node_alloc();
612 612
613 if (!ln) 613 if (!ln)
614 return NULL; 614 return ERR_PTR(-ENOMEM);
615 615
616 ln->fn_bit = plen; 616 ln->fn_bit = plen;
617 617
@@ -777,11 +777,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
777 777
778 if (IS_ERR(fn)) { 778 if (IS_ERR(fn)) {
779 err = PTR_ERR(fn); 779 err = PTR_ERR(fn);
780 fn = NULL;
781 }
782
783 if (!fn)
784 goto out; 780 goto out;
781 }
785 782
786 pn = fn; 783 pn = fn;
787 784
@@ -820,15 +817,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
820 allow_create, replace_required); 817 allow_create, replace_required);
821 818
822 if (IS_ERR(sn)) { 819 if (IS_ERR(sn)) {
823 err = PTR_ERR(sn);
824 sn = NULL;
825 }
826 if (!sn) {
827 /* If it is failed, discard just allocated 820 /* If it is failed, discard just allocated
828 root, and then (in st_failure) stale node 821 root, and then (in st_failure) stale node
829 in main tree. 822 in main tree.
830 */ 823 */
831 node_free(sfn); 824 node_free(sfn);
825 err = PTR_ERR(sn);
832 goto st_failure; 826 goto st_failure;
833 } 827 }
834 828
@@ -843,10 +837,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
843 837
844 if (IS_ERR(sn)) { 838 if (IS_ERR(sn)) {
845 err = PTR_ERR(sn); 839 err = PTR_ERR(sn);
846 sn = NULL;
847 }
848 if (!sn)
849 goto st_failure; 840 goto st_failure;
841 }
850 } 842 }
851 843
852 if (!fn->leaf) { 844 if (!fn->leaf) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644
index 000000000000..0185679c5f53
--- /dev/null
+++ b/net/ipv6/ip6_gre.c
@@ -0,0 +1,1770 @@
1/*
2 * GRE over IPv6 protocol decoder.
3 *
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/capability.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/in.h>
24#include <linux/tcp.h>
25#include <linux/udp.h>
26#include <linux/if_arp.h>
27#include <linux/mroute.h>
28#include <linux/init.h>
29#include <linux/in6.h>
30#include <linux/inetdevice.h>
31#include <linux/igmp.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/etherdevice.h>
34#include <linux/if_ether.h>
35#include <linux/hash.h>
36#include <linux/if_tunnel.h>
37#include <linux/ip6_tunnel.h>
38
39#include <net/sock.h>
40#include <net/ip.h>
41#include <net/icmp.h>
42#include <net/protocol.h>
43#include <net/addrconf.h>
44#include <net/arp.h>
45#include <net/checksum.h>
46#include <net/dsfield.h>
47#include <net/inet_ecn.h>
48#include <net/xfrm.h>
49#include <net/net_namespace.h>
50#include <net/netns/generic.h>
51#include <net/rtnetlink.h>
52
53#include <net/ipv6.h>
54#include <net/ip6_fib.h>
55#include <net/ip6_route.h>
56#include <net/ip6_tunnel.h>
57
58
59static bool log_ecn_error = true;
60module_param(log_ecn_error, bool, 0644);
61MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
62
63#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
64#define IPV6_TCLASS_SHIFT 20
65
66#define HASH_SIZE_SHIFT 5
67#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
68
69static int ip6gre_net_id __read_mostly;
70struct ip6gre_net {
71 struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
72
73 struct net_device *fb_tunnel_dev;
74};
75
76static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
77static int ip6gre_tunnel_init(struct net_device *dev);
78static void ip6gre_tunnel_setup(struct net_device *dev);
79static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
80static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
81
82/* Tunnel hash table */
83
84/*
85 4 hash tables:
86
87 3: (remote,local)
88 2: (remote,*)
89 1: (*,local)
90 0: (*,*)
91
92 We require exact key match i.e. if a key is present in packet
93 it will match only tunnel with the same key; if it is not present,
94 it will match only keyless tunnel.
95
96 All keysless packets, if not matched configured keyless tunnels
97 will match fallback tunnel.
98 */
99
100#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
101static u32 HASH_ADDR(const struct in6_addr *addr)
102{
103 u32 hash = ipv6_addr_hash(addr);
104
105 return hash_32(hash, HASH_SIZE_SHIFT);
106}
107
108#define tunnels_r_l tunnels[3]
109#define tunnels_r tunnels[2]
110#define tunnels_l tunnels[1]
111#define tunnels_wc tunnels[0]
112/*
113 * Locking : hash tables are protected by RCU and RTNL
114 */
115
116#define for_each_ip_tunnel_rcu(start) \
117 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
118
119/* often modified stats are per cpu, other are shared (netdev->stats) */
120struct pcpu_tstats {
121 u64 rx_packets;
122 u64 rx_bytes;
123 u64 tx_packets;
124 u64 tx_bytes;
125 struct u64_stats_sync syncp;
126};
127
128static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
129 struct rtnl_link_stats64 *tot)
130{
131 int i;
132
133 for_each_possible_cpu(i) {
134 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
135 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
136 unsigned int start;
137
138 do {
139 start = u64_stats_fetch_begin_bh(&tstats->syncp);
140 rx_packets = tstats->rx_packets;
141 tx_packets = tstats->tx_packets;
142 rx_bytes = tstats->rx_bytes;
143 tx_bytes = tstats->tx_bytes;
144 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
145
146 tot->rx_packets += rx_packets;
147 tot->tx_packets += tx_packets;
148 tot->rx_bytes += rx_bytes;
149 tot->tx_bytes += tx_bytes;
150 }
151
152 tot->multicast = dev->stats.multicast;
153 tot->rx_crc_errors = dev->stats.rx_crc_errors;
154 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
155 tot->rx_length_errors = dev->stats.rx_length_errors;
156 tot->rx_frame_errors = dev->stats.rx_frame_errors;
157 tot->rx_errors = dev->stats.rx_errors;
158
159 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
160 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
161 tot->tx_dropped = dev->stats.tx_dropped;
162 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
163 tot->tx_errors = dev->stats.tx_errors;
164
165 return tot;
166}
167
168/* Given src, dst and key, find appropriate for input tunnel. */
169
170static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
171 const struct in6_addr *remote, const struct in6_addr *local,
172 __be32 key, __be16 gre_proto)
173{
174 struct net *net = dev_net(dev);
175 int link = dev->ifindex;
176 unsigned int h0 = HASH_ADDR(remote);
177 unsigned int h1 = HASH_KEY(key);
178 struct ip6_tnl *t, *cand = NULL;
179 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
180 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
181 ARPHRD_ETHER : ARPHRD_IP6GRE;
182 int score, cand_score = 4;
183
184 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
185 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
186 !ipv6_addr_equal(remote, &t->parms.raddr) ||
187 key != t->parms.i_key ||
188 !(t->dev->flags & IFF_UP))
189 continue;
190
191 if (t->dev->type != ARPHRD_IP6GRE &&
192 t->dev->type != dev_type)
193 continue;
194
195 score = 0;
196 if (t->parms.link != link)
197 score |= 1;
198 if (t->dev->type != dev_type)
199 score |= 2;
200 if (score == 0)
201 return t;
202
203 if (score < cand_score) {
204 cand = t;
205 cand_score = score;
206 }
207 }
208
209 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
210 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
211 key != t->parms.i_key ||
212 !(t->dev->flags & IFF_UP))
213 continue;
214
215 if (t->dev->type != ARPHRD_IP6GRE &&
216 t->dev->type != dev_type)
217 continue;
218
219 score = 0;
220 if (t->parms.link != link)
221 score |= 1;
222 if (t->dev->type != dev_type)
223 score |= 2;
224 if (score == 0)
225 return t;
226
227 if (score < cand_score) {
228 cand = t;
229 cand_score = score;
230 }
231 }
232
233 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
234 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
235 (!ipv6_addr_equal(local, &t->parms.raddr) ||
236 !ipv6_addr_is_multicast(local))) ||
237 key != t->parms.i_key ||
238 !(t->dev->flags & IFF_UP))
239 continue;
240
241 if (t->dev->type != ARPHRD_IP6GRE &&
242 t->dev->type != dev_type)
243 continue;
244
245 score = 0;
246 if (t->parms.link != link)
247 score |= 1;
248 if (t->dev->type != dev_type)
249 score |= 2;
250 if (score == 0)
251 return t;
252
253 if (score < cand_score) {
254 cand = t;
255 cand_score = score;
256 }
257 }
258
259 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
260 if (t->parms.i_key != key ||
261 !(t->dev->flags & IFF_UP))
262 continue;
263
264 if (t->dev->type != ARPHRD_IP6GRE &&
265 t->dev->type != dev_type)
266 continue;
267
268 score = 0;
269 if (t->parms.link != link)
270 score |= 1;
271 if (t->dev->type != dev_type)
272 score |= 2;
273 if (score == 0)
274 return t;
275
276 if (score < cand_score) {
277 cand = t;
278 cand_score = score;
279 }
280 }
281
282 if (cand != NULL)
283 return cand;
284
285 dev = ign->fb_tunnel_dev;
286 if (dev->flags & IFF_UP)
287 return netdev_priv(dev);
288
289 return NULL;
290}
291
292static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
293 const struct __ip6_tnl_parm *p)
294{
295 const struct in6_addr *remote = &p->raddr;
296 const struct in6_addr *local = &p->laddr;
297 unsigned int h = HASH_KEY(p->i_key);
298 int prio = 0;
299
300 if (!ipv6_addr_any(local))
301 prio |= 1;
302 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
303 prio |= 2;
304 h ^= HASH_ADDR(remote);
305 }
306
307 return &ign->tunnels[prio][h];
308}
309
310static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
311 const struct ip6_tnl *t)
312{
313 return __ip6gre_bucket(ign, &t->parms);
314}
315
316static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
317{
318 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
319
320 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
321 rcu_assign_pointer(*tp, t);
322}
323
324static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
325{
326 struct ip6_tnl __rcu **tp;
327 struct ip6_tnl *iter;
328
329 for (tp = ip6gre_bucket(ign, t);
330 (iter = rtnl_dereference(*tp)) != NULL;
331 tp = &iter->next) {
332 if (t == iter) {
333 rcu_assign_pointer(*tp, t->next);
334 break;
335 }
336 }
337}
338
339static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
340 const struct __ip6_tnl_parm *parms,
341 int type)
342{
343 const struct in6_addr *remote = &parms->raddr;
344 const struct in6_addr *local = &parms->laddr;
345 __be32 key = parms->i_key;
346 int link = parms->link;
347 struct ip6_tnl *t;
348 struct ip6_tnl __rcu **tp;
349 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
350
351 for (tp = __ip6gre_bucket(ign, parms);
352 (t = rtnl_dereference(*tp)) != NULL;
353 tp = &t->next)
354 if (ipv6_addr_equal(local, &t->parms.laddr) &&
355 ipv6_addr_equal(remote, &t->parms.raddr) &&
356 key == t->parms.i_key &&
357 link == t->parms.link &&
358 type == t->dev->type)
359 break;
360
361 return t;
362}
363
364static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
365 const struct __ip6_tnl_parm *parms, int create)
366{
367 struct ip6_tnl *t, *nt;
368 struct net_device *dev;
369 char name[IFNAMSIZ];
370 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
371
372 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
373 if (t || !create)
374 return t;
375
376 if (parms->name[0])
377 strlcpy(name, parms->name, IFNAMSIZ);
378 else
379 strcpy(name, "ip6gre%d");
380
381 dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
382 if (!dev)
383 return NULL;
384
385 dev_net_set(dev, net);
386
387 nt = netdev_priv(dev);
388 nt->parms = *parms;
389 dev->rtnl_link_ops = &ip6gre_link_ops;
390
391 nt->dev = dev;
392 ip6gre_tnl_link_config(nt, 1);
393
394 if (register_netdevice(dev) < 0)
395 goto failed_free;
396
397 /* Can use a lockless transmit, unless we generate output sequences */
398 if (!(nt->parms.o_flags & GRE_SEQ))
399 dev->features |= NETIF_F_LLTX;
400
401 dev_hold(dev);
402 ip6gre_tunnel_link(ign, nt);
403 return nt;
404
405failed_free:
406 free_netdev(dev);
407 return NULL;
408}
409
410static void ip6gre_tunnel_uninit(struct net_device *dev)
411{
412 struct net *net = dev_net(dev);
413 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
414
415 ip6gre_tunnel_unlink(ign, netdev_priv(dev));
416 dev_put(dev);
417}
418
419
420static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 u8 type, u8 code, int offset, __be32 info)
422{
423 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
424 __be16 *p = (__be16 *)(skb->data + offset);
425 int grehlen = offset + 4;
426 struct ip6_tnl *t;
427 __be16 flags;
428
429 flags = p[0];
430 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
431 if (flags&(GRE_VERSION|GRE_ROUTING))
432 return;
433 if (flags&GRE_KEY) {
434 grehlen += 4;
435 if (flags&GRE_CSUM)
436 grehlen += 4;
437 }
438 }
439
440 /* If only 8 bytes returned, keyed message will be dropped here */
441 if (!pskb_may_pull(skb, grehlen))
442 return;
443 ipv6h = (const struct ipv6hdr *)skb->data;
444 p = (__be16 *)(skb->data + offset);
445
446 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
447 flags & GRE_KEY ?
448 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
449 p[1]);
450 if (t == NULL)
451 return;
452
453 switch (type) {
454 __u32 teli;
455 struct ipv6_tlv_tnl_enc_lim *tel;
456 __u32 mtu;
457 case ICMPV6_DEST_UNREACH:
458 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
459 t->parms.name);
460 break;
461 case ICMPV6_TIME_EXCEED:
462 if (code == ICMPV6_EXC_HOPLIMIT) {
463 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
464 t->parms.name);
465 }
466 break;
467 case ICMPV6_PARAMPROB:
468 teli = 0;
469 if (code == ICMPV6_HDR_FIELD)
470 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
471
472 if (teli && teli == info - 2) {
473 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
474 if (tel->encap_limit == 0) {
475 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
476 t->parms.name);
477 }
478 } else {
479 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
480 t->parms.name);
481 }
482 break;
483 case ICMPV6_PKT_TOOBIG:
484 mtu = info - offset;
485 if (mtu < IPV6_MIN_MTU)
486 mtu = IPV6_MIN_MTU;
487 t->dev->mtu = mtu;
488 break;
489 }
490
491 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
492 t->err_count++;
493 else
494 t->err_count = 1;
495 t->err_time = jiffies;
496}
497
498static int ip6gre_rcv(struct sk_buff *skb)
499{
500 const struct ipv6hdr *ipv6h;
501 u8 *h;
502 __be16 flags;
503 __sum16 csum = 0;
504 __be32 key = 0;
505 u32 seqno = 0;
506 struct ip6_tnl *tunnel;
507 int offset = 4;
508 __be16 gre_proto;
509 int err;
510
511 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
512 goto drop;
513
514 ipv6h = ipv6_hdr(skb);
515 h = skb->data;
516 flags = *(__be16 *)h;
517
518 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
519 /* - Version must be 0.
520 - We do not support routing headers.
521 */
522 if (flags&(GRE_VERSION|GRE_ROUTING))
523 goto drop;
524
525 if (flags&GRE_CSUM) {
526 switch (skb->ip_summed) {
527 case CHECKSUM_COMPLETE:
528 csum = csum_fold(skb->csum);
529 if (!csum)
530 break;
531 /* fall through */
532 case CHECKSUM_NONE:
533 skb->csum = 0;
534 csum = __skb_checksum_complete(skb);
535 skb->ip_summed = CHECKSUM_COMPLETE;
536 }
537 offset += 4;
538 }
539 if (flags&GRE_KEY) {
540 key = *(__be32 *)(h + offset);
541 offset += 4;
542 }
543 if (flags&GRE_SEQ) {
544 seqno = ntohl(*(__be32 *)(h + offset));
545 offset += 4;
546 }
547 }
548
549 gre_proto = *(__be16 *)(h + 2);
550
551 tunnel = ip6gre_tunnel_lookup(skb->dev,
552 &ipv6h->saddr, &ipv6h->daddr, key,
553 gre_proto);
554 if (tunnel) {
555 struct pcpu_tstats *tstats;
556
557 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
558 goto drop;
559
560 if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
561 tunnel->dev->stats.rx_dropped++;
562 goto drop;
563 }
564
565 secpath_reset(skb);
566
567 skb->protocol = gre_proto;
568 /* WCCP version 1 and 2 protocol decoding.
569 * - Change protocol to IP
570 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
571 */
572 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
573 skb->protocol = htons(ETH_P_IP);
574 if ((*(h + offset) & 0xF0) != 0x40)
575 offset += 4;
576 }
577
578 skb->mac_header = skb->network_header;
579 __pskb_pull(skb, offset);
580 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
581 skb->pkt_type = PACKET_HOST;
582
583 if (((flags&GRE_CSUM) && csum) ||
584 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
585 tunnel->dev->stats.rx_crc_errors++;
586 tunnel->dev->stats.rx_errors++;
587 goto drop;
588 }
589 if (tunnel->parms.i_flags&GRE_SEQ) {
590 if (!(flags&GRE_SEQ) ||
591 (tunnel->i_seqno &&
592 (s32)(seqno - tunnel->i_seqno) < 0)) {
593 tunnel->dev->stats.rx_fifo_errors++;
594 tunnel->dev->stats.rx_errors++;
595 goto drop;
596 }
597 tunnel->i_seqno = seqno + 1;
598 }
599
600 /* Warning: All skb pointers will be invalidated! */
601 if (tunnel->dev->type == ARPHRD_ETHER) {
602 if (!pskb_may_pull(skb, ETH_HLEN)) {
603 tunnel->dev->stats.rx_length_errors++;
604 tunnel->dev->stats.rx_errors++;
605 goto drop;
606 }
607
608 ipv6h = ipv6_hdr(skb);
609 skb->protocol = eth_type_trans(skb, tunnel->dev);
610 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
611 }
612
613 __skb_tunnel_rx(skb, tunnel->dev);
614
615 skb_reset_network_header(skb);
616
617 err = IP6_ECN_decapsulate(ipv6h, skb);
618 if (unlikely(err)) {
619 if (log_ecn_error)
620 net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
621 &ipv6h->saddr,
622 ipv6_get_dsfield(ipv6h));
623 if (err > 1) {
624 ++tunnel->dev->stats.rx_frame_errors;
625 ++tunnel->dev->stats.rx_errors;
626 goto drop;
627 }
628 }
629
630 tstats = this_cpu_ptr(tunnel->dev->tstats);
631 u64_stats_update_begin(&tstats->syncp);
632 tstats->rx_packets++;
633 tstats->rx_bytes += skb->len;
634 u64_stats_update_end(&tstats->syncp);
635
636 netif_rx(skb);
637
638 return 0;
639 }
640 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
641
642drop:
643 kfree_skb(skb);
644 return 0;
645}
646
647struct ipv6_tel_txoption {
648 struct ipv6_txoptions ops;
649 __u8 dst_opt[8];
650};
651
652static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
653{
654 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
655
656 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
657 opt->dst_opt[3] = 1;
658 opt->dst_opt[4] = encap_limit;
659 opt->dst_opt[5] = IPV6_TLV_PADN;
660 opt->dst_opt[6] = 1;
661
662 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
663 opt->ops.opt_nflen = 8;
664}
665
666static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
667 struct net_device *dev,
668 __u8 dsfield,
669 struct flowi6 *fl6,
670 int encap_limit,
671 __u32 *pmtu)
672{
673 struct net *net = dev_net(dev);
674 struct ip6_tnl *tunnel = netdev_priv(dev);
675 struct net_device *tdev; /* Device to other host */
676 struct ipv6hdr *ipv6h; /* Our new IP header */
677 unsigned int max_headroom; /* The extra header space needed */
678 int gre_hlen;
679 struct ipv6_tel_txoption opt;
680 int mtu;
681 struct dst_entry *dst = NULL, *ndst = NULL;
682 struct net_device_stats *stats = &tunnel->dev->stats;
683 int err = -1;
684 u8 proto;
685 int pkt_len;
686 struct sk_buff *new_skb;
687
688 if (dev->type == ARPHRD_ETHER)
689 IPCB(skb)->flags = 0;
690
691 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
692 gre_hlen = 0;
693 ipv6h = (struct ipv6hdr *)skb->data;
694 fl6->daddr = ipv6h->daddr;
695 } else {
696 gre_hlen = tunnel->hlen;
697 fl6->daddr = tunnel->parms.raddr;
698 }
699
700 if (!fl6->flowi6_mark)
701 dst = ip6_tnl_dst_check(tunnel);
702
703 if (!dst) {
704 ndst = ip6_route_output(net, NULL, fl6);
705
706 if (ndst->error)
707 goto tx_err_link_failure;
708 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
709 if (IS_ERR(ndst)) {
710 err = PTR_ERR(ndst);
711 ndst = NULL;
712 goto tx_err_link_failure;
713 }
714 dst = ndst;
715 }
716
717 tdev = dst->dev;
718
719 if (tdev == dev) {
720 stats->collisions++;
721 net_warn_ratelimited("%s: Local routing loop detected!\n",
722 tunnel->parms.name);
723 goto tx_err_dst_release;
724 }
725
726 mtu = dst_mtu(dst) - sizeof(*ipv6h);
727 if (encap_limit >= 0) {
728 max_headroom += 8;
729 mtu -= 8;
730 }
731 if (mtu < IPV6_MIN_MTU)
732 mtu = IPV6_MIN_MTU;
733 if (skb_dst(skb))
734 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
735 if (skb->len > mtu) {
736 *pmtu = mtu;
737 err = -EMSGSIZE;
738 goto tx_err_dst_release;
739 }
740
741 if (tunnel->err_count > 0) {
742 if (time_before(jiffies,
743 tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
744 tunnel->err_count--;
745
746 dst_link_failure(skb);
747 } else
748 tunnel->err_count = 0;
749 }
750
751 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
752
753 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
754 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
755 new_skb = skb_realloc_headroom(skb, max_headroom);
756 if (max_headroom > dev->needed_headroom)
757 dev->needed_headroom = max_headroom;
758 if (!new_skb)
759 goto tx_err_dst_release;
760
761 if (skb->sk)
762 skb_set_owner_w(new_skb, skb->sk);
763 consume_skb(skb);
764 skb = new_skb;
765 }
766
767 skb_dst_drop(skb);
768
769 if (fl6->flowi6_mark) {
770 skb_dst_set(skb, dst);
771 ndst = NULL;
772 } else {
773 skb_dst_set_noref(skb, dst);
774 }
775
776 skb->transport_header = skb->network_header;
777
778 proto = NEXTHDR_GRE;
779 if (encap_limit >= 0) {
780 init_tel_txopt(&opt, encap_limit);
781 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
782 }
783
784 skb_push(skb, gre_hlen);
785 skb_reset_network_header(skb);
786
787 /*
788 * Push down and install the IP header.
789 */
790 ipv6h = ipv6_hdr(skb);
791 *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
792 dsfield = INET_ECN_encapsulate(0, dsfield);
793 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
794 ipv6h->hop_limit = tunnel->parms.hop_limit;
795 ipv6h->nexthdr = proto;
796 ipv6h->saddr = fl6->saddr;
797 ipv6h->daddr = fl6->daddr;
798
799 ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
800 ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
801 htons(ETH_P_TEB) : skb->protocol;
802
803 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
804 __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
805
806 if (tunnel->parms.o_flags&GRE_SEQ) {
807 ++tunnel->o_seqno;
808 *ptr = htonl(tunnel->o_seqno);
809 ptr--;
810 }
811 if (tunnel->parms.o_flags&GRE_KEY) {
812 *ptr = tunnel->parms.o_key;
813 ptr--;
814 }
815 if (tunnel->parms.o_flags&GRE_CSUM) {
816 *ptr = 0;
817 *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
818 skb->len - sizeof(struct ipv6hdr));
819 }
820 }
821
822 nf_reset(skb);
823 pkt_len = skb->len;
824 err = ip6_local_out(skb);
825
826 if (net_xmit_eval(err) == 0) {
827 struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
828
829 tstats->tx_bytes += pkt_len;
830 tstats->tx_packets++;
831 } else {
832 stats->tx_errors++;
833 stats->tx_aborted_errors++;
834 }
835
836 if (ndst)
837 ip6_tnl_dst_store(tunnel, ndst);
838
839 return 0;
840tx_err_link_failure:
841 stats->tx_carrier_errors++;
842 dst_link_failure(skb);
843tx_err_dst_release:
844 dst_release(ndst);
845 return err;
846}
847
848static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
849{
850 struct ip6_tnl *t = netdev_priv(dev);
851 const struct iphdr *iph = ip_hdr(skb);
852 int encap_limit = -1;
853 struct flowi6 fl6;
854 __u8 dsfield;
855 __u32 mtu;
856 int err;
857
858 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
859 encap_limit = t->parms.encap_limit;
860
861 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
862 fl6.flowi6_proto = IPPROTO_IPIP;
863
864 dsfield = ipv4_get_dsfield(iph);
865
866 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
867 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
868 & IPV6_TCLASS_MASK;
869 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
870 fl6.flowi6_mark = skb->mark;
871
872 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
873 if (err != 0) {
874 /* XXX: send ICMP error even if DF is not set. */
875 if (err == -EMSGSIZE)
876 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
877 htonl(mtu));
878 return -1;
879 }
880
881 return 0;
882}
883
884static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
885{
886 struct ip6_tnl *t = netdev_priv(dev);
887 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
888 int encap_limit = -1;
889 __u16 offset;
890 struct flowi6 fl6;
891 __u8 dsfield;
892 __u32 mtu;
893 int err;
894
895 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
896 return -1;
897
898 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
899 if (offset > 0) {
900 struct ipv6_tlv_tnl_enc_lim *tel;
901 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
902 if (tel->encap_limit == 0) {
903 icmpv6_send(skb, ICMPV6_PARAMPROB,
904 ICMPV6_HDR_FIELD, offset + 2);
905 return -1;
906 }
907 encap_limit = tel->encap_limit - 1;
908 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
909 encap_limit = t->parms.encap_limit;
910
911 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
912 fl6.flowi6_proto = IPPROTO_IPV6;
913
914 dsfield = ipv6_get_dsfield(ipv6h);
915 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
916 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
917 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
918 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
919 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
920 fl6.flowi6_mark = skb->mark;
921
922 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
923 if (err != 0) {
924 if (err == -EMSGSIZE)
925 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
926 return -1;
927 }
928
929 return 0;
930}
931
932/**
933 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
934 * @t: the outgoing tunnel device
935 * @hdr: IPv6 header from the incoming packet
936 *
937 * Description:
938 * Avoid trivial tunneling loop by checking that tunnel exit-point
939 * doesn't match source of incoming packet.
940 *
941 * Return:
942 * 1 if conflict,
943 * 0 else
944 **/
945
946static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
947 const struct ipv6hdr *hdr)
948{
949 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
950}
951
952static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
953{
954 struct ip6_tnl *t = netdev_priv(dev);
955 int encap_limit = -1;
956 struct flowi6 fl6;
957 __u32 mtu;
958 int err;
959
960 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
961 encap_limit = t->parms.encap_limit;
962
963 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
964 fl6.flowi6_proto = skb->protocol;
965
966 err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
967
968 return err;
969}
970
971static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
972 struct net_device *dev)
973{
974 struct ip6_tnl *t = netdev_priv(dev);
975 struct net_device_stats *stats = &t->dev->stats;
976 int ret;
977
978 if (!ip6_tnl_xmit_ctl(t))
979 return -1;
980
981 switch (skb->protocol) {
982 case htons(ETH_P_IP):
983 ret = ip6gre_xmit_ipv4(skb, dev);
984 break;
985 case htons(ETH_P_IPV6):
986 ret = ip6gre_xmit_ipv6(skb, dev);
987 break;
988 default:
989 ret = ip6gre_xmit_other(skb, dev);
990 break;
991 }
992
993 if (ret < 0)
994 goto tx_err;
995
996 return NETDEV_TX_OK;
997
998tx_err:
999 stats->tx_errors++;
1000 stats->tx_dropped++;
1001 kfree_skb(skb);
1002 return NETDEV_TX_OK;
1003}
1004
1005static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1006{
1007 struct net_device *dev = t->dev;
1008 struct __ip6_tnl_parm *p = &t->parms;
1009 struct flowi6 *fl6 = &t->fl.u.ip6;
1010 int addend = sizeof(struct ipv6hdr) + 4;
1011
1012 if (dev->type != ARPHRD_ETHER) {
1013 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1014 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1015 }
1016
1017 /* Set up flowi template */
1018 fl6->saddr = p->laddr;
1019 fl6->daddr = p->raddr;
1020 fl6->flowi6_oif = p->link;
1021 fl6->flowlabel = 0;
1022
1023 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1024 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1025 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1026 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1027
1028 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1029 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1030
1031 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1032 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1033 dev->flags |= IFF_POINTOPOINT;
1034 else
1035 dev->flags &= ~IFF_POINTOPOINT;
1036
1037 dev->iflink = p->link;
1038
1039 /* Precalculate GRE options length */
1040 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1041 if (t->parms.o_flags&GRE_CSUM)
1042 addend += 4;
1043 if (t->parms.o_flags&GRE_KEY)
1044 addend += 4;
1045 if (t->parms.o_flags&GRE_SEQ)
1046 addend += 4;
1047 }
1048
1049 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1050 int strict = (ipv6_addr_type(&p->raddr) &
1051 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1052
1053 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1054 &p->raddr, &p->laddr,
1055 p->link, strict);
1056
1057 if (rt == NULL)
1058 return;
1059
1060 if (rt->dst.dev) {
1061 dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
1062
1063 if (set_mtu) {
1064 dev->mtu = rt->dst.dev->mtu - addend;
1065 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1066 dev->mtu -= 8;
1067
1068 if (dev->mtu < IPV6_MIN_MTU)
1069 dev->mtu = IPV6_MIN_MTU;
1070 }
1071 }
1072 dst_release(&rt->dst);
1073 }
1074
1075 t->hlen = addend;
1076}
1077
1078static int ip6gre_tnl_change(struct ip6_tnl *t,
1079 const struct __ip6_tnl_parm *p, int set_mtu)
1080{
1081 t->parms.laddr = p->laddr;
1082 t->parms.raddr = p->raddr;
1083 t->parms.flags = p->flags;
1084 t->parms.hop_limit = p->hop_limit;
1085 t->parms.encap_limit = p->encap_limit;
1086 t->parms.flowinfo = p->flowinfo;
1087 t->parms.link = p->link;
1088 t->parms.proto = p->proto;
1089 t->parms.i_key = p->i_key;
1090 t->parms.o_key = p->o_key;
1091 t->parms.i_flags = p->i_flags;
1092 t->parms.o_flags = p->o_flags;
1093 ip6_tnl_dst_reset(t);
1094 ip6gre_tnl_link_config(t, set_mtu);
1095 return 0;
1096}
1097
1098static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1099 const struct ip6_tnl_parm2 *u)
1100{
1101 p->laddr = u->laddr;
1102 p->raddr = u->raddr;
1103 p->flags = u->flags;
1104 p->hop_limit = u->hop_limit;
1105 p->encap_limit = u->encap_limit;
1106 p->flowinfo = u->flowinfo;
1107 p->link = u->link;
1108 p->i_key = u->i_key;
1109 p->o_key = u->o_key;
1110 p->i_flags = u->i_flags;
1111 p->o_flags = u->o_flags;
1112 memcpy(p->name, u->name, sizeof(u->name));
1113}
1114
1115static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1116 const struct __ip6_tnl_parm *p)
1117{
1118 u->proto = IPPROTO_GRE;
1119 u->laddr = p->laddr;
1120 u->raddr = p->raddr;
1121 u->flags = p->flags;
1122 u->hop_limit = p->hop_limit;
1123 u->encap_limit = p->encap_limit;
1124 u->flowinfo = p->flowinfo;
1125 u->link = p->link;
1126 u->i_key = p->i_key;
1127 u->o_key = p->o_key;
1128 u->i_flags = p->i_flags;
1129 u->o_flags = p->o_flags;
1130 memcpy(u->name, p->name, sizeof(u->name));
1131}
1132
1133static int ip6gre_tunnel_ioctl(struct net_device *dev,
1134 struct ifreq *ifr, int cmd)
1135{
1136 int err = 0;
1137 struct ip6_tnl_parm2 p;
1138 struct __ip6_tnl_parm p1;
1139 struct ip6_tnl *t;
1140 struct net *net = dev_net(dev);
1141 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1142
1143 switch (cmd) {
1144 case SIOCGETTUNNEL:
1145 t = NULL;
1146 if (dev == ign->fb_tunnel_dev) {
1147 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1148 err = -EFAULT;
1149 break;
1150 }
1151 ip6gre_tnl_parm_from_user(&p1, &p);
1152 t = ip6gre_tunnel_locate(net, &p1, 0);
1153 }
1154 if (t == NULL)
1155 t = netdev_priv(dev);
1156 ip6gre_tnl_parm_to_user(&p, &t->parms);
1157 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1158 err = -EFAULT;
1159 break;
1160
1161 case SIOCADDTUNNEL:
1162 case SIOCCHGTUNNEL:
1163 err = -EPERM;
1164 if (!capable(CAP_NET_ADMIN))
1165 goto done;
1166
1167 err = -EFAULT;
1168 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1169 goto done;
1170
1171 err = -EINVAL;
1172 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1173 goto done;
1174
1175 if (!(p.i_flags&GRE_KEY))
1176 p.i_key = 0;
1177 if (!(p.o_flags&GRE_KEY))
1178 p.o_key = 0;
1179
1180 ip6gre_tnl_parm_from_user(&p1, &p);
1181 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1182
1183 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1184 if (t != NULL) {
1185 if (t->dev != dev) {
1186 err = -EEXIST;
1187 break;
1188 }
1189 } else {
1190 t = netdev_priv(dev);
1191
1192 ip6gre_tunnel_unlink(ign, t);
1193 synchronize_net();
1194 ip6gre_tnl_change(t, &p1, 1);
1195 ip6gre_tunnel_link(ign, t);
1196 netdev_state_change(dev);
1197 }
1198 }
1199
1200 if (t) {
1201 err = 0;
1202
1203 ip6gre_tnl_parm_to_user(&p, &t->parms);
1204 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1205 err = -EFAULT;
1206 } else
1207 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1208 break;
1209
1210 case SIOCDELTUNNEL:
1211 err = -EPERM;
1212 if (!capable(CAP_NET_ADMIN))
1213 goto done;
1214
1215 if (dev == ign->fb_tunnel_dev) {
1216 err = -EFAULT;
1217 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1218 goto done;
1219 err = -ENOENT;
1220 ip6gre_tnl_parm_from_user(&p1, &p);
1221 t = ip6gre_tunnel_locate(net, &p1, 0);
1222 if (t == NULL)
1223 goto done;
1224 err = -EPERM;
1225 if (t == netdev_priv(ign->fb_tunnel_dev))
1226 goto done;
1227 dev = t->dev;
1228 }
1229 unregister_netdevice(dev);
1230 err = 0;
1231 break;
1232
1233 default:
1234 err = -EINVAL;
1235 }
1236
1237done:
1238 return err;
1239}
1240
1241static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1242{
1243 struct ip6_tnl *tunnel = netdev_priv(dev);
1244 if (new_mtu < 68 ||
1245 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1246 return -EINVAL;
1247 dev->mtu = new_mtu;
1248 return 0;
1249}
1250
1251static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1252 unsigned short type,
1253 const void *daddr, const void *saddr, unsigned int len)
1254{
1255 struct ip6_tnl *t = netdev_priv(dev);
1256 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1257 __be16 *p = (__be16 *)(ipv6h+1);
1258
1259 *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
1260 ipv6h->hop_limit = t->parms.hop_limit;
1261 ipv6h->nexthdr = NEXTHDR_GRE;
1262 ipv6h->saddr = t->parms.laddr;
1263 ipv6h->daddr = t->parms.raddr;
1264
1265 p[0] = t->parms.o_flags;
1266 p[1] = htons(type);
1267
1268 /*
1269 * Set the source hardware address.
1270 */
1271
1272 if (saddr)
1273 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1274 if (daddr)
1275 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1276 if (!ipv6_addr_any(&ipv6h->daddr))
1277 return t->hlen;
1278
1279 return -t->hlen;
1280}
1281
1282static const struct header_ops ip6gre_header_ops = {
1283 .create = ip6gre_header,
1284};
1285
1286static const struct net_device_ops ip6gre_netdev_ops = {
1287 .ndo_init = ip6gre_tunnel_init,
1288 .ndo_uninit = ip6gre_tunnel_uninit,
1289 .ndo_start_xmit = ip6gre_tunnel_xmit,
1290 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1291 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1292 .ndo_get_stats64 = ip6gre_get_stats64,
1293};
1294
1295static void ip6gre_dev_free(struct net_device *dev)
1296{
1297 free_percpu(dev->tstats);
1298 free_netdev(dev);
1299}
1300
1301static void ip6gre_tunnel_setup(struct net_device *dev)
1302{
1303 struct ip6_tnl *t;
1304
1305 dev->netdev_ops = &ip6gre_netdev_ops;
1306 dev->destructor = ip6gre_dev_free;
1307
1308 dev->type = ARPHRD_IP6GRE;
1309 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
1310 dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
1311 t = netdev_priv(dev);
1312 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1313 dev->mtu -= 8;
1314 dev->flags |= IFF_NOARP;
1315 dev->iflink = 0;
1316 dev->addr_len = sizeof(struct in6_addr);
1317 dev->features |= NETIF_F_NETNS_LOCAL;
1318 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1319}
1320
1321static int ip6gre_tunnel_init(struct net_device *dev)
1322{
1323 struct ip6_tnl *tunnel;
1324
1325 tunnel = netdev_priv(dev);
1326
1327 tunnel->dev = dev;
1328 strcpy(tunnel->parms.name, dev->name);
1329
1330 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1331 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1332
1333 if (ipv6_addr_any(&tunnel->parms.raddr))
1334 dev->header_ops = &ip6gre_header_ops;
1335
1336 dev->tstats = alloc_percpu(struct pcpu_tstats);
1337 if (!dev->tstats)
1338 return -ENOMEM;
1339
1340 return 0;
1341}
1342
1343static void ip6gre_fb_tunnel_init(struct net_device *dev)
1344{
1345 struct ip6_tnl *tunnel = netdev_priv(dev);
1346
1347 tunnel->dev = dev;
1348 strcpy(tunnel->parms.name, dev->name);
1349
1350 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1351
1352 dev_hold(dev);
1353}
1354
1355
1356static struct inet6_protocol ip6gre_protocol __read_mostly = {
1357 .handler = ip6gre_rcv,
1358 .err_handler = ip6gre_err,
1359 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1360};
1361
1362static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1363 struct list_head *head)
1364{
1365 int prio;
1366
1367 for (prio = 0; prio < 4; prio++) {
1368 int h;
1369 for (h = 0; h < HASH_SIZE; h++) {
1370 struct ip6_tnl *t;
1371
1372 t = rtnl_dereference(ign->tunnels[prio][h]);
1373
1374 while (t != NULL) {
1375 unregister_netdevice_queue(t->dev, head);
1376 t = rtnl_dereference(t->next);
1377 }
1378 }
1379 }
1380}
1381
1382static int __net_init ip6gre_init_net(struct net *net)
1383{
1384 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1385 int err;
1386
1387 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1388 ip6gre_tunnel_setup);
1389 if (!ign->fb_tunnel_dev) {
1390 err = -ENOMEM;
1391 goto err_alloc_dev;
1392 }
1393 dev_net_set(ign->fb_tunnel_dev, net);
1394
1395 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1396 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1397
1398 err = register_netdev(ign->fb_tunnel_dev);
1399 if (err)
1400 goto err_reg_dev;
1401
1402 rcu_assign_pointer(ign->tunnels_wc[0],
1403 netdev_priv(ign->fb_tunnel_dev));
1404 return 0;
1405
1406err_reg_dev:
1407 ip6gre_dev_free(ign->fb_tunnel_dev);
1408err_alloc_dev:
1409 return err;
1410}
1411
1412static void __net_exit ip6gre_exit_net(struct net *net)
1413{
1414 struct ip6gre_net *ign;
1415 LIST_HEAD(list);
1416
1417 ign = net_generic(net, ip6gre_net_id);
1418 rtnl_lock();
1419 ip6gre_destroy_tunnels(ign, &list);
1420 unregister_netdevice_many(&list);
1421 rtnl_unlock();
1422}
1423
1424static struct pernet_operations ip6gre_net_ops = {
1425 .init = ip6gre_init_net,
1426 .exit = ip6gre_exit_net,
1427 .id = &ip6gre_net_id,
1428 .size = sizeof(struct ip6gre_net),
1429};
1430
1431static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1432{
1433 __be16 flags;
1434
1435 if (!data)
1436 return 0;
1437
1438 flags = 0;
1439 if (data[IFLA_GRE_IFLAGS])
1440 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1441 if (data[IFLA_GRE_OFLAGS])
1442 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1443 if (flags & (GRE_VERSION|GRE_ROUTING))
1444 return -EINVAL;
1445
1446 return 0;
1447}
1448
1449static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1450{
1451 struct in6_addr daddr;
1452
1453 if (tb[IFLA_ADDRESS]) {
1454 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1455 return -EINVAL;
1456 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1457 return -EADDRNOTAVAIL;
1458 }
1459
1460 if (!data)
1461 goto out;
1462
1463 if (data[IFLA_GRE_REMOTE]) {
1464 nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1465 if (ipv6_addr_any(&daddr))
1466 return -EINVAL;
1467 }
1468
1469out:
1470 return ip6gre_tunnel_validate(tb, data);
1471}
1472
1473
1474static void ip6gre_netlink_parms(struct nlattr *data[],
1475 struct __ip6_tnl_parm *parms)
1476{
1477 memset(parms, 0, sizeof(*parms));
1478
1479 if (!data)
1480 return;
1481
1482 if (data[IFLA_GRE_LINK])
1483 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1484
1485 if (data[IFLA_GRE_IFLAGS])
1486 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1487
1488 if (data[IFLA_GRE_OFLAGS])
1489 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1490
1491 if (data[IFLA_GRE_IKEY])
1492 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1493
1494 if (data[IFLA_GRE_OKEY])
1495 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1496
1497 if (data[IFLA_GRE_LOCAL])
1498 nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
1499
1500 if (data[IFLA_GRE_REMOTE])
1501 nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1502
1503 if (data[IFLA_GRE_TTL])
1504 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1505
1506 if (data[IFLA_GRE_ENCAP_LIMIT])
1507 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1508
1509 if (data[IFLA_GRE_FLOWINFO])
1510 parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
1511
1512 if (data[IFLA_GRE_FLAGS])
1513 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1514}
1515
1516static int ip6gre_tap_init(struct net_device *dev)
1517{
1518 struct ip6_tnl *tunnel;
1519
1520 tunnel = netdev_priv(dev);
1521
1522 tunnel->dev = dev;
1523 strcpy(tunnel->parms.name, dev->name);
1524
1525 ip6gre_tnl_link_config(tunnel, 1);
1526
1527 dev->tstats = alloc_percpu(struct pcpu_tstats);
1528 if (!dev->tstats)
1529 return -ENOMEM;
1530
1531 return 0;
1532}
1533
1534static const struct net_device_ops ip6gre_tap_netdev_ops = {
1535 .ndo_init = ip6gre_tap_init,
1536 .ndo_uninit = ip6gre_tunnel_uninit,
1537 .ndo_start_xmit = ip6gre_tunnel_xmit,
1538 .ndo_set_mac_address = eth_mac_addr,
1539 .ndo_validate_addr = eth_validate_addr,
1540 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1541 .ndo_get_stats64 = ip6gre_get_stats64,
1542};
1543
1544static void ip6gre_tap_setup(struct net_device *dev)
1545{
1546
1547 ether_setup(dev);
1548
1549 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1550 dev->destructor = ip6gre_dev_free;
1551
1552 dev->iflink = 0;
1553 dev->features |= NETIF_F_NETNS_LOCAL;
1554}
1555
1556static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1557 struct nlattr *tb[], struct nlattr *data[])
1558{
1559 struct ip6_tnl *nt;
1560 struct net *net = dev_net(dev);
1561 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1562 int err;
1563
1564 nt = netdev_priv(dev);
1565 ip6gre_netlink_parms(data, &nt->parms);
1566
1567 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1568 return -EEXIST;
1569
1570 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1571 eth_hw_addr_random(dev);
1572
1573 nt->dev = dev;
1574 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1575
1576 /* Can use a lockless transmit, unless we generate output sequences */
1577 if (!(nt->parms.o_flags & GRE_SEQ))
1578 dev->features |= NETIF_F_LLTX;
1579
1580 err = register_netdevice(dev);
1581 if (err)
1582 goto out;
1583
1584 dev_hold(dev);
1585 ip6gre_tunnel_link(ign, nt);
1586
1587out:
1588 return err;
1589}
1590
1591static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1592 struct nlattr *data[])
1593{
1594 struct ip6_tnl *t, *nt;
1595 struct net *net = dev_net(dev);
1596 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1597 struct __ip6_tnl_parm p;
1598
1599 if (dev == ign->fb_tunnel_dev)
1600 return -EINVAL;
1601
1602 nt = netdev_priv(dev);
1603 ip6gre_netlink_parms(data, &p);
1604
1605 t = ip6gre_tunnel_locate(net, &p, 0);
1606
1607 if (t) {
1608 if (t->dev != dev)
1609 return -EEXIST;
1610 } else {
1611 t = nt;
1612
1613 ip6gre_tunnel_unlink(ign, t);
1614 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1615 ip6gre_tunnel_link(ign, t);
1616 netdev_state_change(dev);
1617 }
1618
1619 return 0;
1620}
1621
1622static size_t ip6gre_get_size(const struct net_device *dev)
1623{
1624 return
1625 /* IFLA_GRE_LINK */
1626 nla_total_size(4) +
1627 /* IFLA_GRE_IFLAGS */
1628 nla_total_size(2) +
1629 /* IFLA_GRE_OFLAGS */
1630 nla_total_size(2) +
1631 /* IFLA_GRE_IKEY */
1632 nla_total_size(4) +
1633 /* IFLA_GRE_OKEY */
1634 nla_total_size(4) +
1635 /* IFLA_GRE_LOCAL */
1636 nla_total_size(4) +
1637 /* IFLA_GRE_REMOTE */
1638 nla_total_size(4) +
1639 /* IFLA_GRE_TTL */
1640 nla_total_size(1) +
1641 /* IFLA_GRE_TOS */
1642 nla_total_size(1) +
1643 /* IFLA_GRE_ENCAP_LIMIT */
1644 nla_total_size(1) +
1645 /* IFLA_GRE_FLOWINFO */
1646 nla_total_size(4) +
1647 /* IFLA_GRE_FLAGS */
1648 nla_total_size(4) +
1649 0;
1650}
1651
1652static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1653{
1654 struct ip6_tnl *t = netdev_priv(dev);
1655 struct __ip6_tnl_parm *p = &t->parms;
1656
1657 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1658 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1659 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1660 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1661 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1662 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
1663 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
1664 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1665 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
1666 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
1667 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
1668 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
1669 goto nla_put_failure;
1670 return 0;
1671
1672nla_put_failure:
1673 return -EMSGSIZE;
1674}
1675
1676static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
1677 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1678 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1679 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1680 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1681 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1682 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
1683 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
1684 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1685 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
1686 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
1687 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
1688};
1689
1690static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
1691 .kind = "ip6gre",
1692 .maxtype = IFLA_GRE_MAX,
1693 .policy = ip6gre_policy,
1694 .priv_size = sizeof(struct ip6_tnl),
1695 .setup = ip6gre_tunnel_setup,
1696 .validate = ip6gre_tunnel_validate,
1697 .newlink = ip6gre_newlink,
1698 .changelink = ip6gre_changelink,
1699 .get_size = ip6gre_get_size,
1700 .fill_info = ip6gre_fill_info,
1701};
1702
1703static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
1704 .kind = "ip6gretap",
1705 .maxtype = IFLA_GRE_MAX,
1706 .policy = ip6gre_policy,
1707 .priv_size = sizeof(struct ip6_tnl),
1708 .setup = ip6gre_tap_setup,
1709 .validate = ip6gre_tap_validate,
1710 .newlink = ip6gre_newlink,
1711 .changelink = ip6gre_changelink,
1712 .get_size = ip6gre_get_size,
1713 .fill_info = ip6gre_fill_info,
1714};
1715
1716/*
1717 * And now the modules code and kernel interface.
1718 */
1719
1720static int __init ip6gre_init(void)
1721{
1722 int err;
1723
1724 pr_info("GRE over IPv6 tunneling driver\n");
1725
1726 err = register_pernet_device(&ip6gre_net_ops);
1727 if (err < 0)
1728 return err;
1729
1730 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
1731 if (err < 0) {
1732 pr_info("%s: can't add protocol\n", __func__);
1733 goto add_proto_failed;
1734 }
1735
1736 err = rtnl_link_register(&ip6gre_link_ops);
1737 if (err < 0)
1738 goto rtnl_link_failed;
1739
1740 err = rtnl_link_register(&ip6gre_tap_ops);
1741 if (err < 0)
1742 goto tap_ops_failed;
1743
1744out:
1745 return err;
1746
1747tap_ops_failed:
1748 rtnl_link_unregister(&ip6gre_link_ops);
1749rtnl_link_failed:
1750 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1751add_proto_failed:
1752 unregister_pernet_device(&ip6gre_net_ops);
1753 goto out;
1754}
1755
1756static void __exit ip6gre_fini(void)
1757{
1758 rtnl_link_unregister(&ip6gre_tap_ops);
1759 rtnl_link_unregister(&ip6gre_link_ops);
1760 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1761 unregister_pernet_device(&ip6gre_net_ops);
1762}
1763
1764module_init(ip6gre_init);
1765module_exit(ip6gre_fini);
1766MODULE_LICENSE("GPL");
1767MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
1768MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
1769MODULE_ALIAS_RTNL_LINK("ip6gre");
1770MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5b2d63ed793e..aece3e792f84 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -123,16 +123,11 @@ static int ip6_finish_output2(struct sk_buff *skb)
123 skb->len); 123 skb->len);
124 } 124 }
125 125
126 rcu_read_lock();
127 rt = (struct rt6_info *) dst; 126 rt = (struct rt6_info *) dst;
128 neigh = rt->n; 127 neigh = rt->n;
129 if (neigh) { 128 if (neigh)
130 int res = dst_neigh_output(dst, neigh, skb); 129 return dst_neigh_output(dst, neigh, skb);
131 130
132 rcu_read_unlock();
133 return res;
134 }
135 rcu_read_unlock();
136 IP6_INC_STATS_BH(dev_net(dst->dev), 131 IP6_INC_STATS_BH(dev_net(dst->dev),
137 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 132 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
138 kfree_skb(skb); 133 kfree_skb(skb);
@@ -493,7 +488,8 @@ int ip6_forward(struct sk_buff *skb)
493 if (mtu < IPV6_MIN_MTU) 488 if (mtu < IPV6_MIN_MTU)
494 mtu = IPV6_MIN_MTU; 489 mtu = IPV6_MIN_MTU;
495 490
496 if (skb->len > mtu && !skb_is_gso(skb)) { 491 if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
492 (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
497 /* Again, force OUTPUT device used as source address */ 493 /* Again, force OUTPUT device used as source address */
498 skb->dev = dst->dev; 494 skb->dev = dst->dev;
499 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 495 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -636,7 +632,9 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
636 /* We must not fragment if the socket is set to force MTU discovery 632 /* We must not fragment if the socket is set to force MTU discovery
637 * or if the skb it not generated by a local socket. 633 * or if the skb it not generated by a local socket.
638 */ 634 */
639 if (unlikely(!skb->local_df && skb->len > mtu)) { 635 if (unlikely(!skb->local_df && skb->len > mtu) ||
636 (IP6CB(skb)->frag_max_size &&
637 IP6CB(skb)->frag_max_size > mtu)) {
640 if (skb->sk && dst_allfrag(skb_dst(skb))) 638 if (skb->sk && dst_allfrag(skb_dst(skb)))
641 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); 639 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
642 640
@@ -980,7 +978,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
980 * dst entry and replace it instead with the 978 * dst entry and replace it instead with the
981 * dst entry of the nexthop router 979 * dst entry of the nexthop router
982 */ 980 */
983 rcu_read_lock();
984 rt = (struct rt6_info *) *dst; 981 rt = (struct rt6_info *) *dst;
985 n = rt->n; 982 n = rt->n;
986 if (n && !(n->nud_state & NUD_VALID)) { 983 if (n && !(n->nud_state & NUD_VALID)) {
@@ -988,7 +985,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
988 struct flowi6 fl_gw6; 985 struct flowi6 fl_gw6;
989 int redirect; 986 int redirect;
990 987
991 rcu_read_unlock();
992 ifp = ipv6_get_ifaddr(net, &fl6->saddr, 988 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
993 (*dst)->dev, 1); 989 (*dst)->dev, 1);
994 990
@@ -1008,8 +1004,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
1008 if ((err = (*dst)->error)) 1004 if ((err = (*dst)->error))
1009 goto out_err_release; 1005 goto out_err_release;
1010 } 1006 }
1011 } else {
1012 rcu_read_unlock();
1013 } 1007 }
1014#endif 1008#endif
1015 1009
@@ -1285,8 +1279,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1285 if (dst_allfrag(rt->dst.path)) 1279 if (dst_allfrag(rt->dst.path))
1286 cork->flags |= IPCORK_ALLFRAG; 1280 cork->flags |= IPCORK_ALLFRAG;
1287 cork->length = 0; 1281 cork->length = 0;
1288 sk->sk_sndmsg_page = NULL;
1289 sk->sk_sndmsg_off = 0;
1290 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; 1282 exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
1291 length += exthdrlen; 1283 length += exthdrlen;
1292 transhdrlen += exthdrlen; 1284 transhdrlen += exthdrlen;
@@ -1510,48 +1502,31 @@ alloc_new_skb:
1510 } 1502 }
1511 } else { 1503 } else {
1512 int i = skb_shinfo(skb)->nr_frags; 1504 int i = skb_shinfo(skb)->nr_frags;
1513 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; 1505 struct page_frag *pfrag = sk_page_frag(sk);
1514 struct page *page = sk->sk_sndmsg_page;
1515 int off = sk->sk_sndmsg_off;
1516 unsigned int left;
1517
1518 if (page && (left = PAGE_SIZE - off) > 0) {
1519 if (copy >= left)
1520 copy = left;
1521 if (page != skb_frag_page(frag)) {
1522 if (i == MAX_SKB_FRAGS) {
1523 err = -EMSGSIZE;
1524 goto error;
1525 }
1526 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1527 skb_frag_ref(skb, i);
1528 frag = &skb_shinfo(skb)->frags[i];
1529 }
1530 } else if(i < MAX_SKB_FRAGS) {
1531 if (copy > PAGE_SIZE)
1532 copy = PAGE_SIZE;
1533 page = alloc_pages(sk->sk_allocation, 0);
1534 if (page == NULL) {
1535 err = -ENOMEM;
1536 goto error;
1537 }
1538 sk->sk_sndmsg_page = page;
1539 sk->sk_sndmsg_off = 0;
1540 1506
1541 skb_fill_page_desc(skb, i, page, 0, 0); 1507 err = -ENOMEM;
1542 frag = &skb_shinfo(skb)->frags[i]; 1508 if (!sk_page_frag_refill(sk, pfrag))
1543 } else {
1544 err = -EMSGSIZE;
1545 goto error; 1509 goto error;
1510
1511 if (!skb_can_coalesce(skb, i, pfrag->page,
1512 pfrag->offset)) {
1513 err = -EMSGSIZE;
1514 if (i == MAX_SKB_FRAGS)
1515 goto error;
1516
1517 __skb_fill_page_desc(skb, i, pfrag->page,
1518 pfrag->offset, 0);
1519 skb_shinfo(skb)->nr_frags = ++i;
1520 get_page(pfrag->page);
1546 } 1521 }
1522 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1547 if (getfrag(from, 1523 if (getfrag(from,
1548 skb_frag_address(frag) + skb_frag_size(frag), 1524 page_address(pfrag->page) + pfrag->offset,
1549 offset, copy, skb->len, skb) < 0) { 1525 offset, copy, skb->len, skb) < 0)
1550 err = -EFAULT; 1526 goto error_efault;
1551 goto error; 1527
1552 } 1528 pfrag->offset += copy;
1553 sk->sk_sndmsg_off += copy; 1529 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1554 skb_frag_size_add(frag, copy);
1555 skb->len += copy; 1530 skb->len += copy;
1556 skb->data_len += copy; 1531 skb->data_len += copy;
1557 skb->truesize += copy; 1532 skb->truesize += copy;
@@ -1560,7 +1535,11 @@ alloc_new_skb:
1560 offset += copy; 1535 offset += copy;
1561 length -= copy; 1536 length -= copy;
1562 } 1537 }
1538
1563 return 0; 1539 return 0;
1540
1541error_efault:
1542 err = -EFAULT;
1564error: 1543error:
1565 cork->length -= length; 1544 cork->length -= length;
1566 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1545 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9a1d5fe6aef8..cb7e2ded6f08 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,7 +126,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
126 * Locking : hash tables are protected by RCU and RTNL 126 * Locking : hash tables are protected by RCU and RTNL
127 */ 127 */
128 128
129static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 129struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
130{ 130{
131 struct dst_entry *dst = t->dst_cache; 131 struct dst_entry *dst = t->dst_cache;
132 132
@@ -139,20 +139,23 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
139 139
140 return dst; 140 return dst;
141} 141}
142EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
142 143
143static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) 144void ip6_tnl_dst_reset(struct ip6_tnl *t)
144{ 145{
145 dst_release(t->dst_cache); 146 dst_release(t->dst_cache);
146 t->dst_cache = NULL; 147 t->dst_cache = NULL;
147} 148}
149EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
148 150
149static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 151void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
150{ 152{
151 struct rt6_info *rt = (struct rt6_info *) dst; 153 struct rt6_info *rt = (struct rt6_info *) dst;
152 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 154 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
153 dst_release(t->dst_cache); 155 dst_release(t->dst_cache);
154 t->dst_cache = dst; 156 t->dst_cache = dst;
155} 157}
158EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
156 159
157/** 160/**
158 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 161 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
200 **/ 203 **/
201 204
202static struct ip6_tnl __rcu ** 205static struct ip6_tnl __rcu **
203ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) 206ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
204{ 207{
205 const struct in6_addr *remote = &p->raddr; 208 const struct in6_addr *remote = &p->raddr;
206 const struct in6_addr *local = &p->laddr; 209 const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@ static void ip6_dev_free(struct net_device *dev)
267 * created tunnel or NULL 270 * created tunnel or NULL
268 **/ 271 **/
269 272
270static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) 273static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
271{ 274{
272 struct net_device *dev; 275 struct net_device *dev;
273 struct ip6_tnl *t; 276 struct ip6_tnl *t;
@@ -322,7 +325,7 @@ failed:
322 **/ 325 **/
323 326
324static struct ip6_tnl *ip6_tnl_locate(struct net *net, 327static struct ip6_tnl *ip6_tnl_locate(struct net *net,
325 struct ip6_tnl_parm *p, int create) 328 struct __ip6_tnl_parm *p, int create)
326{ 329{
327 const struct in6_addr *remote = &p->raddr; 330 const struct in6_addr *remote = &p->raddr;
328 const struct in6_addr *local = &p->laddr; 331 const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374 * else index to encapsulation limit 377 * else index to encapsulation limit
375 **/ 378 **/
376 379
377static __u16 380__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
378parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
379{ 381{
380 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 382 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
381 __u8 nexthdr = ipv6h->nexthdr; 383 __u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
425 } 427 }
426 return 0; 428 return 0;
427} 429}
430EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
428 431
429/** 432/**
430 * ip6_tnl_err - tunnel error handler 433 * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
480 case ICMPV6_PARAMPROB: 483 case ICMPV6_PARAMPROB:
481 teli = 0; 484 teli = 0;
482 if ((*code) == ICMPV6_HDR_FIELD) 485 if ((*code) == ICMPV6_HDR_FIELD)
483 teli = parse_tlv_tnl_enc_lim(skb, skb->data); 486 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
484 487
485 if (teli && teli == *info - 2) { 488 if (teli && teli == *info - 2) {
486 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 489 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
693 IP6_ECN_set_ce(ipv6_hdr(skb)); 696 IP6_ECN_set_ce(ipv6_hdr(skb));
694} 697}
695 698
696static __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 699__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
697 const struct in6_addr *laddr, 700 const struct in6_addr *laddr,
698 const struct in6_addr *raddr) 701 const struct in6_addr *raddr)
699{ 702{
700 struct ip6_tnl_parm *p = &t->parms; 703 struct __ip6_tnl_parm *p = &t->parms;
701 int ltype = ipv6_addr_type(laddr); 704 int ltype = ipv6_addr_type(laddr);
702 int rtype = ipv6_addr_type(raddr); 705 int rtype = ipv6_addr_type(raddr);
703 __u32 flags = 0; 706 __u32 flags = 0;
@@ -715,13 +718,14 @@ static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
715 } 718 }
716 return flags; 719 return flags;
717} 720}
721EXPORT_SYMBOL(ip6_tnl_get_cap);
718 722
719/* called with rcu_read_lock() */ 723/* called with rcu_read_lock() */
720static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 724int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
721 const struct in6_addr *laddr, 725 const struct in6_addr *laddr,
722 const struct in6_addr *raddr) 726 const struct in6_addr *raddr)
723{ 727{
724 struct ip6_tnl_parm *p = &t->parms; 728 struct __ip6_tnl_parm *p = &t->parms;
725 int ret = 0; 729 int ret = 0;
726 struct net *net = dev_net(t->dev); 730 struct net *net = dev_net(t->dev);
727 731
@@ -740,6 +744,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
740 } 744 }
741 return ret; 745 return ret;
742} 746}
747EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
743 748
744/** 749/**
745 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally 750 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
859 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 864 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
860} 865}
861 866
862static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) 867int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
863{ 868{
864 struct ip6_tnl_parm *p = &t->parms; 869 struct __ip6_tnl_parm *p = &t->parms;
865 int ret = 0; 870 int ret = 0;
866 struct net *net = dev_net(t->dev); 871 struct net *net = dev_net(t->dev);
867 872
@@ -885,6 +890,8 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
885 } 890 }
886 return ret; 891 return ret;
887} 892}
893EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
894
888/** 895/**
889 * ip6_tnl_xmit2 - encapsulate packet and send 896 * ip6_tnl_xmit2 - encapsulate packet and send
890 * @skb: the outgoing socket buffer 897 * @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1085 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) 1092 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
1086 return -1; 1093 return -1;
1087 1094
1088 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); 1095 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1089 if (offset > 0) { 1096 if (offset > 0) {
1090 struct ipv6_tlv_tnl_enc_lim *tel; 1097 struct ipv6_tlv_tnl_enc_lim *tel;
1091 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1098 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@ tx_err:
1152static void ip6_tnl_link_config(struct ip6_tnl *t) 1159static void ip6_tnl_link_config(struct ip6_tnl *t)
1153{ 1160{
1154 struct net_device *dev = t->dev; 1161 struct net_device *dev = t->dev;
1155 struct ip6_tnl_parm *p = &t->parms; 1162 struct __ip6_tnl_parm *p = &t->parms;
1156 struct flowi6 *fl6 = &t->fl.u.ip6; 1163 struct flowi6 *fl6 = &t->fl.u.ip6;
1157 1164
1158 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1165 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1215 **/ 1222 **/
1216 1223
1217static int 1224static int
1218ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) 1225ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1219{ 1226{
1220 t->parms.laddr = p->laddr; 1227 t->parms.laddr = p->laddr;
1221 t->parms.raddr = p->raddr; 1228 t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@ ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1230 return 0; 1237 return 0;
1231} 1238}
1232 1239
1240static void
1241ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1242{
1243 p->laddr = u->laddr;
1244 p->raddr = u->raddr;
1245 p->flags = u->flags;
1246 p->hop_limit = u->hop_limit;
1247 p->encap_limit = u->encap_limit;
1248 p->flowinfo = u->flowinfo;
1249 p->link = u->link;
1250 p->proto = u->proto;
1251 memcpy(p->name, u->name, sizeof(u->name));
1252}
1253
1254static void
1255ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1256{
1257 u->laddr = p->laddr;
1258 u->raddr = p->raddr;
1259 u->flags = p->flags;
1260 u->hop_limit = p->hop_limit;
1261 u->encap_limit = p->encap_limit;
1262 u->flowinfo = p->flowinfo;
1263 u->link = p->link;
1264 u->proto = p->proto;
1265 memcpy(u->name, p->name, sizeof(u->name));
1266}
1267
1233/** 1268/**
1234 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1269 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1235 * @dev: virtual device associated with tunnel 1270 * @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1263{ 1298{
1264 int err = 0; 1299 int err = 0;
1265 struct ip6_tnl_parm p; 1300 struct ip6_tnl_parm p;
1301 struct __ip6_tnl_parm p1;
1266 struct ip6_tnl *t = NULL; 1302 struct ip6_tnl *t = NULL;
1267 struct net *net = dev_net(dev); 1303 struct net *net = dev_net(dev);
1268 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1304 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1274 err = -EFAULT; 1310 err = -EFAULT;
1275 break; 1311 break;
1276 } 1312 }
1277 t = ip6_tnl_locate(net, &p, 0); 1313 ip6_tnl_parm_from_user(&p1, &p);
1314 t = ip6_tnl_locate(net, &p1, 0);
1315 } else {
1316 memset(&p, 0, sizeof(p));
1278 } 1317 }
1279 if (t == NULL) 1318 if (t == NULL)
1280 t = netdev_priv(dev); 1319 t = netdev_priv(dev);
1281 memcpy(&p, &t->parms, sizeof (p)); 1320 ip6_tnl_parm_to_user(&p, &t->parms);
1282 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { 1321 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1283 err = -EFAULT; 1322 err = -EFAULT;
1284 } 1323 }
@@ -1295,7 +1334,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1295 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1334 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1296 p.proto != 0) 1335 p.proto != 0)
1297 break; 1336 break;
1298 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); 1337 ip6_tnl_parm_from_user(&p1, &p);
1338 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1299 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 1339 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1300 if (t != NULL) { 1340 if (t != NULL) {
1301 if (t->dev != dev) { 1341 if (t->dev != dev) {
@@ -1307,13 +1347,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1307 1347
1308 ip6_tnl_unlink(ip6n, t); 1348 ip6_tnl_unlink(ip6n, t);
1309 synchronize_net(); 1349 synchronize_net();
1310 err = ip6_tnl_change(t, &p); 1350 err = ip6_tnl_change(t, &p1);
1311 ip6_tnl_link(ip6n, t); 1351 ip6_tnl_link(ip6n, t);
1312 netdev_state_change(dev); 1352 netdev_state_change(dev);
1313 } 1353 }
1314 if (t) { 1354 if (t) {
1315 err = 0; 1355 err = 0;
1316 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) 1356 ip6_tnl_parm_to_user(&p, &t->parms);
1357 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1317 err = -EFAULT; 1358 err = -EFAULT;
1318 1359
1319 } else 1360 } else
@@ -1329,7 +1370,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1329 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1370 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1330 break; 1371 break;
1331 err = -ENOENT; 1372 err = -ENOENT;
1332 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) 1373 ip6_tnl_parm_from_user(&p1, &p);
1374 t = ip6_tnl_locate(net, &p1, 0);
1375 if (t == NULL)
1333 break; 1376 break;
1334 err = -EPERM; 1377 err = -EPERM;
1335 if (t->dev == ip6n->fb_tnl_dev) 1378 if (t->dev == ip6n->fb_tnl_dev)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 4532973f0dd4..08ea3f0b6e55 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -838,7 +838,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
838 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 838 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
839 skb_trim(skb, nlh->nlmsg_len); 839 skb_trim(skb, nlh->nlmsg_len);
840 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT; 840 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
841 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 841 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
842 } else 842 } else
843 kfree_skb(skb); 843 kfree_skb(skb);
844 } 844 }
@@ -1052,7 +1052,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1052 skb_trim(skb, nlh->nlmsg_len); 1052 skb_trim(skb, nlh->nlmsg_len);
1053 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; 1053 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1054 } 1054 }
1055 rtnl_unicast(skb, net, NETLINK_CB(skb).pid); 1055 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1056 } else 1056 } else
1057 ip6_mr_forward(net, mrt, skb, c); 1057 ip6_mr_forward(net, mrt, skb, c);
1058 } 1058 }
@@ -2202,12 +2202,12 @@ int ip6mr_get_route(struct net *net,
2202} 2202}
2203 2203
2204static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, 2204static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2205 u32 pid, u32 seq, struct mfc6_cache *c) 2205 u32 portid, u32 seq, struct mfc6_cache *c)
2206{ 2206{
2207 struct nlmsghdr *nlh; 2207 struct nlmsghdr *nlh;
2208 struct rtmsg *rtm; 2208 struct rtmsg *rtm;
2209 2209
2210 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2210 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2211 if (nlh == NULL) 2211 if (nlh == NULL)
2212 return -EMSGSIZE; 2212 return -EMSGSIZE;
2213 2213
@@ -2260,7 +2260,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2260 if (e < s_e) 2260 if (e < s_e)
2261 goto next_entry; 2261 goto next_entry;
2262 if (ip6mr_fill_mroute(mrt, skb, 2262 if (ip6mr_fill_mroute(mrt, skb,
2263 NETLINK_CB(cb->skb).pid, 2263 NETLINK_CB(cb->skb).portid,
2264 cb->nlh->nlmsg_seq, 2264 cb->nlh->nlmsg_seq,
2265 mfc) < 0) 2265 mfc) < 0)
2266 goto done; 2266 goto done;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index db31561cc8df..429089cb073d 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -15,6 +15,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
15{ 15{
16 struct net *net = dev_net(skb_dst(skb)->dev); 16 struct net *net = dev_net(skb_dst(skb)->dev);
17 const struct ipv6hdr *iph = ipv6_hdr(skb); 17 const struct ipv6hdr *iph = ipv6_hdr(skb);
18 unsigned int hh_len;
18 struct dst_entry *dst; 19 struct dst_entry *dst;
19 struct flowi6 fl6 = { 20 struct flowi6 fl6 = {
20 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 21 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -47,6 +48,13 @@ int ip6_route_me_harder(struct sk_buff *skb)
47 } 48 }
48#endif 49#endif
49 50
51 /* Change in oif may mean change in hh_len. */
52 hh_len = skb_dst(skb)->dev->hard_header_len;
53 if (skb_headroom(skb) < hh_len &&
54 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
55 0, GFP_ATOMIC))
56 return -1;
57
50 return 0; 58 return 0;
51} 59}
52EXPORT_SYMBOL(ip6_route_me_harder); 60EXPORT_SYMBOL(ip6_route_me_harder);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 10135342799e..c72532a60d88 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -181,9 +181,44 @@ config IP6_NF_SECURITY
181 help 181 help
182 This option adds a `security' table to iptables, for use 182 This option adds a `security' table to iptables, for use
183 with Mandatory Access Control (MAC) policy. 183 with Mandatory Access Control (MAC) policy.
184 184
185 If unsure, say N. 185 If unsure, say N.
186 186
187config NF_NAT_IPV6
188 tristate "IPv6 NAT"
189 depends on NF_CONNTRACK_IPV6
190 depends on NETFILTER_ADVANCED
191 select NF_NAT
192 help
193 The IPv6 NAT option allows masquerading, port forwarding and other
194 forms of full Network Address Port Translation. It is controlled by
195 the `nat' table in ip6tables, see the man page for ip6tables(8).
196
197 To compile it as a module, choose M here. If unsure, say N.
198
199if NF_NAT_IPV6
200
201config IP6_NF_TARGET_MASQUERADE
202 tristate "MASQUERADE target support"
203 help
204 Masquerading is a special case of NAT: all outgoing connections are
205 changed to seem to come from a particular interface's address, and
206 if the interface goes down, those connections are lost. This is
207 only useful for dialup accounts with dynamic IP address (ie. your IP
208 address will be different on next dialup).
209
210 To compile it as a module, choose M here. If unsure, say N.
211
212config IP6_NF_TARGET_NPT
213 tristate "NPT (Network Prefix translation) target support"
214 help
215 This option adds the `SNPT' and `DNPT' target, which perform
216 stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
217
218 To compile it as a module, choose M here. If unsure, say N.
219
220endif # NF_NAT_IPV6
221
187endif # IP6_NF_IPTABLES 222endif # IP6_NF_IPTABLES
188 223
189endmenu 224endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 534d3f216f7b..2d11fcc2cf3c 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
9obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 9obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
10obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o 10obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
11obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
11 12
12# objects for l3 independent conntrack 13# objects for l3 independent conntrack
13nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o 14nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
@@ -15,6 +16,9 @@ nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
15# l3 independent conntrack 16# l3 independent conntrack
16obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o 17obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
17 18
19nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
20obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
21
18# defrag 22# defrag
19nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o 23nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
20obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o 24obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
@@ -30,4 +34,6 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
30obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 34obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
31 35
32# targets 36# targets
37obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
38obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
33obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 39obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
new file mode 100644
index 000000000000..60e9053bab05
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
9 * NAT funded by Astaro.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/ipv6.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter_ipv6.h>
18#include <linux/netfilter/x_tables.h>
19#include <net/netfilter/nf_nat.h>
20#include <net/addrconf.h>
21#include <net/ipv6.h>
22
23static unsigned int
24masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
25{
26 const struct nf_nat_range *range = par->targinfo;
27 enum ip_conntrack_info ctinfo;
28 struct in6_addr src;
29 struct nf_conn *ct;
30 struct nf_nat_range newrange;
31
32 ct = nf_ct_get(skb, &ctinfo);
33 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
34 ctinfo == IP_CT_RELATED_REPLY));
35
36 if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
37 &ipv6_hdr(skb)->daddr, 0, &src) < 0)
38 return NF_DROP;
39
40 nfct_nat(ct)->masq_index = par->out->ifindex;
41
42 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
43 newrange.min_addr.in6 = src;
44 newrange.max_addr.in6 = src;
45 newrange.min_proto = range->min_proto;
46 newrange.max_proto = range->max_proto;
47
48 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
49}
50
51static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
52{
53 const struct nf_nat_range *range = par->targinfo;
54
55 if (range->flags & NF_NAT_RANGE_MAP_IPS)
56 return -EINVAL;
57 return 0;
58}
59
60static int device_cmp(struct nf_conn *ct, void *ifindex)
61{
62 const struct nf_conn_nat *nat = nfct_nat(ct);
63
64 if (!nat)
65 return 0;
66 if (nf_ct_l3num(ct) != NFPROTO_IPV6)
67 return 0;
68 return nat->masq_index == (int)(long)ifindex;
69}
70
71static int masq_device_event(struct notifier_block *this,
72 unsigned long event, void *ptr)
73{
74 const struct net_device *dev = ptr;
75 struct net *net = dev_net(dev);
76
77 if (event == NETDEV_DOWN)
78 nf_ct_iterate_cleanup(net, device_cmp,
79 (void *)(long)dev->ifindex);
80
81 return NOTIFY_DONE;
82}
83
84static struct notifier_block masq_dev_notifier = {
85 .notifier_call = masq_device_event,
86};
87
88static int masq_inet_event(struct notifier_block *this,
89 unsigned long event, void *ptr)
90{
91 struct inet6_ifaddr *ifa = ptr;
92
93 return masq_device_event(this, event, ifa->idev->dev);
94}
95
96static struct notifier_block masq_inet_notifier = {
97 .notifier_call = masq_inet_event,
98};
99
100static struct xt_target masquerade_tg6_reg __read_mostly = {
101 .name = "MASQUERADE",
102 .family = NFPROTO_IPV6,
103 .checkentry = masquerade_tg6_checkentry,
104 .target = masquerade_tg6,
105 .targetsize = sizeof(struct nf_nat_range),
106 .table = "nat",
107 .hooks = 1 << NF_INET_POST_ROUTING,
108 .me = THIS_MODULE,
109};
110
111static int __init masquerade_tg6_init(void)
112{
113 int err;
114
115 err = xt_register_target(&masquerade_tg6_reg);
116 if (err == 0) {
117 register_netdevice_notifier(&masq_dev_notifier);
118 register_inet6addr_notifier(&masq_inet_notifier);
119 }
120
121 return err;
122}
123static void __exit masquerade_tg6_exit(void)
124{
125 unregister_inet6addr_notifier(&masq_inet_notifier);
126 unregister_netdevice_notifier(&masq_dev_notifier);
127 xt_unregister_target(&masquerade_tg6_reg);
128}
129
130module_init(masquerade_tg6_init);
131module_exit(masquerade_tg6_exit);
132
133MODULE_LICENSE("GPL");
134MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
135MODULE_DESCRIPTION("Xtables: automatic address SNAT");
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
new file mode 100644
index 000000000000..e9486915eff6
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2011, 2012 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/ipv6.h>
12#include <linux/netfilter.h>
13#include <linux/netfilter_ipv6.h>
14#include <linux/netfilter_ipv6/ip6t_NPT.h>
15#include <linux/netfilter/x_tables.h>
16
17static __sum16 csum16_complement(__sum16 a)
18{
19 return (__force __sum16)(0xffff - (__force u16)a);
20}
21
22static __sum16 csum16_add(__sum16 a, __sum16 b)
23{
24 u16 sum;
25
26 sum = (__force u16)a + (__force u16)b;
27 sum += (__force u16)a < (__force u16)b;
28 return (__force __sum16)sum;
29}
30
31static __sum16 csum16_sub(__sum16 a, __sum16 b)
32{
33 return csum16_add(a, csum16_complement(b));
34}
35
36static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
37{
38 struct ip6t_npt_tginfo *npt = par->targinfo;
39 __sum16 src_sum = 0, dst_sum = 0;
40 unsigned int i;
41
42 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
43 return -EINVAL;
44
45 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
46 src_sum = csum16_add(src_sum,
47 (__force __sum16)npt->src_pfx.in6.s6_addr16[i]);
48 dst_sum = csum16_add(dst_sum,
49 (__force __sum16)npt->dst_pfx.in6.s6_addr16[i]);
50 }
51
52 npt->adjustment = csum16_sub(src_sum, dst_sum);
53 return 0;
54}
55
56static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
57 struct in6_addr *addr)
58{
59 unsigned int pfx_len;
60 unsigned int i, idx;
61 __be32 mask;
62 __sum16 sum;
63
64 pfx_len = max(npt->src_pfx_len, npt->dst_pfx_len);
65 for (i = 0; i < pfx_len; i += 32) {
66 if (pfx_len - i >= 32)
67 mask = 0;
68 else
69 mask = htonl(~((1 << (pfx_len - i)) - 1));
70
71 idx = i / 32;
72 addr->s6_addr32[idx] &= mask;
73 addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
74 }
75
76 if (pfx_len <= 48)
77 idx = 3;
78 else {
79 for (idx = 4; idx < ARRAY_SIZE(addr->s6_addr16); idx++) {
80 if ((__force __sum16)addr->s6_addr16[idx] !=
81 CSUM_MANGLED_0)
82 break;
83 }
84 if (idx == ARRAY_SIZE(addr->s6_addr16))
85 return false;
86 }
87
88 sum = csum16_add((__force __sum16)addr->s6_addr16[idx],
89 npt->adjustment);
90 if (sum == CSUM_MANGLED_0)
91 sum = 0;
92 *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
93
94 return true;
95}
96
97static unsigned int
98ip6t_snpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
99{
100 const struct ip6t_npt_tginfo *npt = par->targinfo;
101
102 if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->saddr)) {
103 icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
104 offsetof(struct ipv6hdr, saddr));
105 return NF_DROP;
106 }
107 return XT_CONTINUE;
108}
109
110static unsigned int
111ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
112{
113 const struct ip6t_npt_tginfo *npt = par->targinfo;
114
115 if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->daddr)) {
116 icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
117 offsetof(struct ipv6hdr, daddr));
118 return NF_DROP;
119 }
120 return XT_CONTINUE;
121}
122
123static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
124 {
125 .name = "SNPT",
126 .target = ip6t_snpt_tg,
127 .targetsize = sizeof(struct ip6t_npt_tginfo),
128 .checkentry = ip6t_npt_checkentry,
129 .family = NFPROTO_IPV6,
130 .hooks = (1 << NF_INET_LOCAL_IN) |
131 (1 << NF_INET_POST_ROUTING),
132 .me = THIS_MODULE,
133 },
134 {
135 .name = "DNPT",
136 .target = ip6t_dnpt_tg,
137 .targetsize = sizeof(struct ip6t_npt_tginfo),
138 .checkentry = ip6t_npt_checkentry,
139 .family = NFPROTO_IPV6,
140 .hooks = (1 << NF_INET_PRE_ROUTING) |
141 (1 << NF_INET_LOCAL_OUT),
142 .me = THIS_MODULE,
143 },
144};
145
146static int __init ip6t_npt_init(void)
147{
148 return xt_register_targets(ip6t_npt_target_reg,
149 ARRAY_SIZE(ip6t_npt_target_reg));
150}
151
152static void __exit ip6t_npt_exit(void)
153{
154 xt_unregister_targets(ip6t_npt_target_reg,
155 ARRAY_SIZE(ip6t_npt_target_reg));
156}
157
158module_init(ip6t_npt_init);
159module_exit(ip6t_npt_exit);
160
161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("IPv6-to-IPv6 Network Prefix Translation (RFC 6296)");
163MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
164MODULE_ALIAS("ip6t_SNPT");
165MODULE_ALIAS("ip6t_DNPT");
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 325e59a0224f..beb5777d2043 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -61,9 +61,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
61 net->ipv6.ip6table_filter = 61 net->ipv6.ip6table_filter =
62 ip6t_register_table(net, &packet_filter, repl); 62 ip6t_register_table(net, &packet_filter, repl);
63 kfree(repl); 63 kfree(repl);
64 if (IS_ERR(net->ipv6.ip6table_filter)) 64 return PTR_RET(net->ipv6.ip6table_filter);
65 return PTR_ERR(net->ipv6.ip6table_filter);
66 return 0;
67} 65}
68 66
69static void __net_exit ip6table_filter_net_exit(struct net *net) 67static void __net_exit ip6table_filter_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 4d782405f125..7431121b87de 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -97,9 +97,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
97 net->ipv6.ip6table_mangle = 97 net->ipv6.ip6table_mangle =
98 ip6t_register_table(net, &packet_mangler, repl); 98 ip6t_register_table(net, &packet_mangler, repl);
99 kfree(repl); 99 kfree(repl);
100 if (IS_ERR(net->ipv6.ip6table_mangle)) 100 return PTR_RET(net->ipv6.ip6table_mangle);
101 return PTR_ERR(net->ipv6.ip6table_mangle);
102 return 0;
103} 101}
104 102
105static void __net_exit ip6table_mangle_net_exit(struct net *net) 103static void __net_exit ip6table_mangle_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
new file mode 100644
index 000000000000..e418bd6350a4
--- /dev/null
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -0,0 +1,321 @@
1/*
2 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Based on Rusty Russell's IPv4 NAT code. Development of IPv6 NAT
9 * funded by Astaro.
10 */
11
12#include <linux/module.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter_ipv6.h>
15#include <linux/netfilter_ipv6/ip6_tables.h>
16#include <linux/ipv6.h>
17#include <net/ipv6.h>
18
19#include <net/netfilter/nf_nat.h>
20#include <net/netfilter/nf_nat_core.h>
21#include <net/netfilter/nf_nat_l3proto.h>
22
23static const struct xt_table nf_nat_ipv6_table = {
24 .name = "nat",
25 .valid_hooks = (1 << NF_INET_PRE_ROUTING) |
26 (1 << NF_INET_POST_ROUTING) |
27 (1 << NF_INET_LOCAL_OUT) |
28 (1 << NF_INET_LOCAL_IN),
29 .me = THIS_MODULE,
30 .af = NFPROTO_IPV6,
31};
32
33static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
34{
35 /* Force range to this IP; let proto decide mapping for
36 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
37 */
38 struct nf_nat_range range;
39
40 range.flags = 0;
41 pr_debug("Allocating NULL binding for %p (%pI6)\n", ct,
42 HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
43 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 :
44 &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6);
45
46 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
47}
48
49static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
50 const struct net_device *in,
51 const struct net_device *out,
52 struct nf_conn *ct)
53{
54 struct net *net = nf_ct_net(ct);
55 unsigned int ret;
56
57 ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat);
58 if (ret == NF_ACCEPT) {
59 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
60 ret = alloc_null_binding(ct, hooknum);
61 }
62 return ret;
63}
64
65static unsigned int
66nf_nat_ipv6_fn(unsigned int hooknum,
67 struct sk_buff *skb,
68 const struct net_device *in,
69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *))
71{
72 struct nf_conn *ct;
73 enum ip_conntrack_info ctinfo;
74 struct nf_conn_nat *nat;
75 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
76 __be16 frag_off;
77 int hdrlen;
78 u8 nexthdr;
79
80 ct = nf_ct_get(skb, &ctinfo);
81 /* Can't track? It's not due to stress, or conntrack would
82 * have dropped it. Hence it's the user's responsibilty to
83 * packet filter it out, or implement conntrack/NAT for that
84 * protocol. 8) --RR
85 */
86 if (!ct)
87 return NF_ACCEPT;
88
89 /* Don't try to NAT if this packet is not conntracked */
90 if (nf_ct_is_untracked(ct))
91 return NF_ACCEPT;
92
93 nat = nfct_nat(ct);
94 if (!nat) {
95 /* NAT module was loaded late. */
96 if (nf_ct_is_confirmed(ct))
97 return NF_ACCEPT;
98 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
99 if (nat == NULL) {
100 pr_debug("failed to add NAT extension\n");
101 return NF_ACCEPT;
102 }
103 }
104
105 switch (ctinfo) {
106 case IP_CT_RELATED:
107 case IP_CT_RELATED_REPLY:
108 nexthdr = ipv6_hdr(skb)->nexthdr;
109 hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
110 &nexthdr, &frag_off);
111
112 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
113 if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
114 hooknum, hdrlen))
115 return NF_DROP;
116 else
117 return NF_ACCEPT;
118 }
119 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
120 case IP_CT_NEW:
121 /* Seen it before? This can happen for loopback, retrans,
122 * or local packets.
123 */
124 if (!nf_nat_initialized(ct, maniptype)) {
125 unsigned int ret;
126
127 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
128 if (ret != NF_ACCEPT)
129 return ret;
130 } else
131 pr_debug("Already setup manip %s for ct %p\n",
132 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
133 ct);
134 break;
135
136 default:
137 /* ESTABLISHED */
138 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
139 ctinfo == IP_CT_ESTABLISHED_REPLY);
140 }
141
142 return nf_nat_packet(ct, ctinfo, hooknum, skb);
143}
144
145static unsigned int
146nf_nat_ipv6_in(unsigned int hooknum,
147 struct sk_buff *skb,
148 const struct net_device *in,
149 const struct net_device *out,
150 int (*okfn)(struct sk_buff *))
151{
152 unsigned int ret;
153 struct in6_addr daddr = ipv6_hdr(skb)->daddr;
154
155 ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
156 if (ret != NF_DROP && ret != NF_STOLEN &&
157 ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
158 skb_dst_drop(skb);
159
160 return ret;
161}
162
163static unsigned int
164nf_nat_ipv6_out(unsigned int hooknum,
165 struct sk_buff *skb,
166 const struct net_device *in,
167 const struct net_device *out,
168 int (*okfn)(struct sk_buff *))
169{
170#ifdef CONFIG_XFRM
171 const struct nf_conn *ct;
172 enum ip_conntrack_info ctinfo;
173#endif
174 unsigned int ret;
175
176 /* root is playing with raw sockets. */
177 if (skb->len < sizeof(struct ipv6hdr))
178 return NF_ACCEPT;
179
180 ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
181#ifdef CONFIG_XFRM
182 if (ret != NF_DROP && ret != NF_STOLEN &&
183 !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
184 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
185 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
186
187 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
188 &ct->tuplehash[!dir].tuple.dst.u3) ||
189 (ct->tuplehash[dir].tuple.src.u.all !=
190 ct->tuplehash[!dir].tuple.dst.u.all))
191 if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
192 ret = NF_DROP;
193 }
194#endif
195 return ret;
196}
197
198static unsigned int
199nf_nat_ipv6_local_fn(unsigned int hooknum,
200 struct sk_buff *skb,
201 const struct net_device *in,
202 const struct net_device *out,
203 int (*okfn)(struct sk_buff *))
204{
205 const struct nf_conn *ct;
206 enum ip_conntrack_info ctinfo;
207 unsigned int ret;
208
209 /* root is playing with raw sockets. */
210 if (skb->len < sizeof(struct ipv6hdr))
211 return NF_ACCEPT;
212
213 ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
214 if (ret != NF_DROP && ret != NF_STOLEN &&
215 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
216 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
217
218 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
219 &ct->tuplehash[!dir].tuple.src.u3)) {
220 if (ip6_route_me_harder(skb))
221 ret = NF_DROP;
222 }
223#ifdef CONFIG_XFRM
224 else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
225 ct->tuplehash[dir].tuple.dst.u.all !=
226 ct->tuplehash[!dir].tuple.src.u.all)
227 if (nf_xfrm_me_harder(skb, AF_INET6))
228 ret = NF_DROP;
229#endif
230 }
231 return ret;
232}
233
234static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
235 /* Before packet filtering, change destination */
236 {
237 .hook = nf_nat_ipv6_in,
238 .owner = THIS_MODULE,
239 .pf = NFPROTO_IPV6,
240 .hooknum = NF_INET_PRE_ROUTING,
241 .priority = NF_IP6_PRI_NAT_DST,
242 },
243 /* After packet filtering, change source */
244 {
245 .hook = nf_nat_ipv6_out,
246 .owner = THIS_MODULE,
247 .pf = NFPROTO_IPV6,
248 .hooknum = NF_INET_POST_ROUTING,
249 .priority = NF_IP6_PRI_NAT_SRC,
250 },
251 /* Before packet filtering, change destination */
252 {
253 .hook = nf_nat_ipv6_local_fn,
254 .owner = THIS_MODULE,
255 .pf = NFPROTO_IPV6,
256 .hooknum = NF_INET_LOCAL_OUT,
257 .priority = NF_IP6_PRI_NAT_DST,
258 },
259 /* After packet filtering, change source */
260 {
261 .hook = nf_nat_ipv6_fn,
262 .owner = THIS_MODULE,
263 .pf = NFPROTO_IPV6,
264 .hooknum = NF_INET_LOCAL_IN,
265 .priority = NF_IP6_PRI_NAT_SRC,
266 },
267};
268
269static int __net_init ip6table_nat_net_init(struct net *net)
270{
271 struct ip6t_replace *repl;
272
273 repl = ip6t_alloc_initial_table(&nf_nat_ipv6_table);
274 if (repl == NULL)
275 return -ENOMEM;
276 net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
277 kfree(repl);
278 if (IS_ERR(net->ipv6.ip6table_nat))
279 return PTR_ERR(net->ipv6.ip6table_nat);
280 return 0;
281}
282
283static void __net_exit ip6table_nat_net_exit(struct net *net)
284{
285 ip6t_unregister_table(net, net->ipv6.ip6table_nat);
286}
287
288static struct pernet_operations ip6table_nat_net_ops = {
289 .init = ip6table_nat_net_init,
290 .exit = ip6table_nat_net_exit,
291};
292
293static int __init ip6table_nat_init(void)
294{
295 int err;
296
297 err = register_pernet_subsys(&ip6table_nat_net_ops);
298 if (err < 0)
299 goto err1;
300
301 err = nf_register_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
302 if (err < 0)
303 goto err2;
304 return 0;
305
306err2:
307 unregister_pernet_subsys(&ip6table_nat_net_ops);
308err1:
309 return err;
310}
311
312static void __exit ip6table_nat_exit(void)
313{
314 nf_unregister_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
315 unregister_pernet_subsys(&ip6table_nat_net_ops);
316}
317
318module_init(ip6table_nat_init);
319module_exit(ip6table_nat_exit);
320
321MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 5b9926a011bd..60d1bddff7a0 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -40,9 +40,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
40 net->ipv6.ip6table_raw = 40 net->ipv6.ip6table_raw =
41 ip6t_register_table(net, &packet_raw, repl); 41 ip6t_register_table(net, &packet_raw, repl);
42 kfree(repl); 42 kfree(repl);
43 if (IS_ERR(net->ipv6.ip6table_raw)) 43 return PTR_RET(net->ipv6.ip6table_raw);
44 return PTR_ERR(net->ipv6.ip6table_raw);
45 return 0;
46} 44}
47 45
48static void __net_exit ip6table_raw_net_exit(struct net *net) 46static void __net_exit ip6table_raw_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 91aa2b4d83c9..db155351339c 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -58,10 +58,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
58 net->ipv6.ip6table_security = 58 net->ipv6.ip6table_security =
59 ip6t_register_table(net, &security_table, repl); 59 ip6t_register_table(net, &security_table, repl);
60 kfree(repl); 60 kfree(repl);
61 if (IS_ERR(net->ipv6.ip6table_security)) 61 return PTR_RET(net->ipv6.ip6table_security);
62 return PTR_ERR(net->ipv6.ip6table_security);
63
64 return 0;
65} 62}
66 63
67static void __net_exit ip6table_security_net_exit(struct net *net) 64static void __net_exit ip6table_security_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4794f96cf2e0..8860d23e61cf 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_zones.h> 29#include <net/netfilter/nf_conntrack_zones.h>
30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
31#include <net/netfilter/nf_nat_helper.h>
31#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 32#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32#include <net/netfilter/nf_log.h> 33#include <net/netfilter/nf_log.h>
33 34
@@ -64,82 +65,31 @@ static int ipv6_print_tuple(struct seq_file *s,
64 tuple->src.u3.ip6, tuple->dst.u3.ip6); 65 tuple->src.u3.ip6, tuple->dst.u3.ip6);
65} 66}
66 67
67/*
68 * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
69 *
70 * This function parses (probably truncated) exthdr set "hdr"
71 * of length "len". "nexthdrp" initially points to some place,
72 * where type of the first header can be found.
73 *
74 * It skips all well-known exthdrs, and returns pointer to the start
75 * of unparsable area i.e. the first header with unknown type.
76 * if success, *nexthdr is updated by type/protocol of this header.
77 *
78 * NOTES: - it may return pointer pointing beyond end of packet,
79 * if the last recognized header is truncated in the middle.
80 * - if packet is truncated, so that all parsed headers are skipped,
81 * it returns -1.
82 * - if packet is fragmented, return pointer of the fragment header.
83 * - ESP is unparsable for now and considered like
84 * normal payload protocol.
85 * - Note also special handling of AUTH header. Thanks to IPsec wizards.
86 */
87
88static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
89 u8 *nexthdrp, int len)
90{
91 u8 nexthdr = *nexthdrp;
92
93 while (ipv6_ext_hdr(nexthdr)) {
94 struct ipv6_opt_hdr hdr;
95 int hdrlen;
96
97 if (len < (int)sizeof(struct ipv6_opt_hdr))
98 return -1;
99 if (nexthdr == NEXTHDR_NONE)
100 break;
101 if (nexthdr == NEXTHDR_FRAGMENT)
102 break;
103 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
104 BUG();
105 if (nexthdr == NEXTHDR_AUTH)
106 hdrlen = (hdr.hdrlen+2)<<2;
107 else
108 hdrlen = ipv6_optlen(&hdr);
109
110 nexthdr = hdr.nexthdr;
111 len -= hdrlen;
112 start += hdrlen;
113 }
114
115 *nexthdrp = nexthdr;
116 return start;
117}
118
119static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, 68static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
120 unsigned int *dataoff, u_int8_t *protonum) 69 unsigned int *dataoff, u_int8_t *protonum)
121{ 70{
122 unsigned int extoff = nhoff + sizeof(struct ipv6hdr); 71 unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
123 unsigned char pnum; 72 __be16 frag_off;
124 int protoff; 73 int protoff;
74 u8 nexthdr;
125 75
126 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr), 76 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
127 &pnum, sizeof(pnum)) != 0) { 77 &nexthdr, sizeof(nexthdr)) != 0) {
128 pr_debug("ip6_conntrack_core: can't get nexthdr\n"); 78 pr_debug("ip6_conntrack_core: can't get nexthdr\n");
129 return -NF_ACCEPT; 79 return -NF_ACCEPT;
130 } 80 }
131 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff); 81 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
132 /* 82 /*
133 * (protoff == skb->len) mean that the packet doesn't have no data 83 * (protoff == skb->len) mean that the packet doesn't have no data
134 * except of IPv6 & ext headers. but it's tracked anyway. - YK 84 * except of IPv6 & ext headers. but it's tracked anyway. - YK
135 */ 85 */
136 if ((protoff < 0) || (protoff > skb->len)) { 86 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
137 pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); 87 pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
138 return -NF_ACCEPT; 88 return -NF_ACCEPT;
139 } 89 }
140 90
141 *dataoff = protoff; 91 *dataoff = protoff;
142 *protonum = pnum; 92 *protonum = nexthdr;
143 return NF_ACCEPT; 93 return NF_ACCEPT;
144} 94}
145 95
@@ -153,10 +103,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
153 const struct nf_conn_help *help; 103 const struct nf_conn_help *help;
154 const struct nf_conntrack_helper *helper; 104 const struct nf_conntrack_helper *helper;
155 enum ip_conntrack_info ctinfo; 105 enum ip_conntrack_info ctinfo;
156 unsigned int ret, protoff; 106 unsigned int ret;
157 unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; 107 __be16 frag_off;
158 unsigned char pnum = ipv6_hdr(skb)->nexthdr; 108 int protoff;
159 109 u8 nexthdr;
160 110
161 /* This is where we call the helper: as the packet goes out. */ 111 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 112 ct = nf_ct_get(skb, &ctinfo);
@@ -171,9 +121,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
171 if (!helper) 121 if (!helper)
172 return NF_ACCEPT; 122 return NF_ACCEPT;
173 123
174 protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, 124 nexthdr = ipv6_hdr(skb)->nexthdr;
175 skb->len - extoff); 125 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
176 if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) { 126 &frag_off);
127 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
177 pr_debug("proto header not found\n"); 128 pr_debug("proto header not found\n");
178 return NF_ACCEPT; 129 return NF_ACCEPT;
179 } 130 }
@@ -192,6 +143,36 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
192 const struct net_device *out, 143 const struct net_device *out,
193 int (*okfn)(struct sk_buff *)) 144 int (*okfn)(struct sk_buff *))
194{ 145{
146 struct nf_conn *ct;
147 enum ip_conntrack_info ctinfo;
148 unsigned char pnum = ipv6_hdr(skb)->nexthdr;
149 int protoff;
150 __be16 frag_off;
151
152 ct = nf_ct_get(skb, &ctinfo);
153 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
154 goto out;
155
156 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
157 &frag_off);
158 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
159 pr_debug("proto header not found\n");
160 goto out;
161 }
162
163 /* adjust seqs for loopback traffic only in outgoing direction */
164 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
165 !nf_is_loopback_packet(skb)) {
166 typeof(nf_nat_seq_adjust_hook) seq_adjust;
167
168 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
169 if (!seq_adjust ||
170 !seq_adjust(skb, ct, ctinfo, protoff)) {
171 NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
172 return NF_DROP;
173 }
174 }
175out:
195 /* We've seen it coming out the other side: confirm it */ 176 /* We've seen it coming out the other side: confirm it */
196 return nf_conntrack_confirm(skb); 177 return nf_conntrack_confirm(skb);
197} 178}
@@ -199,9 +180,14 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
199static unsigned int __ipv6_conntrack_in(struct net *net, 180static unsigned int __ipv6_conntrack_in(struct net *net,
200 unsigned int hooknum, 181 unsigned int hooknum,
201 struct sk_buff *skb, 182 struct sk_buff *skb,
183 const struct net_device *in,
184 const struct net_device *out,
202 int (*okfn)(struct sk_buff *)) 185 int (*okfn)(struct sk_buff *))
203{ 186{
204 struct sk_buff *reasm = skb->nfct_reasm; 187 struct sk_buff *reasm = skb->nfct_reasm;
188 const struct nf_conn_help *help;
189 struct nf_conn *ct;
190 enum ip_conntrack_info ctinfo;
205 191
206 /* This packet is fragmented and has reassembled packet. */ 192 /* This packet is fragmented and has reassembled packet. */
207 if (reasm) { 193 if (reasm) {
@@ -213,6 +199,25 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
213 if (ret != NF_ACCEPT) 199 if (ret != NF_ACCEPT)
214 return ret; 200 return ret;
215 } 201 }
202
203 /* Conntrack helpers need the entire reassembled packet in the
204 * POST_ROUTING hook. In case of unconfirmed connections NAT
205 * might reassign a helper, so the entire packet is also
206 * required.
207 */
208 ct = nf_ct_get(reasm, &ctinfo);
209 if (ct != NULL && !nf_ct_is_untracked(ct)) {
210 help = nfct_help(ct);
211 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
212 nf_conntrack_get_reasm(skb);
213 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
214 (struct net_device *)in,
215 (struct net_device *)out,
216 okfn, NF_IP6_PRI_CONNTRACK + 1);
217 return NF_DROP_ERR(-ECANCELED);
218 }
219 }
220
216 nf_conntrack_get(reasm->nfct); 221 nf_conntrack_get(reasm->nfct);
217 skb->nfct = reasm->nfct; 222 skb->nfct = reasm->nfct;
218 skb->nfctinfo = reasm->nfctinfo; 223 skb->nfctinfo = reasm->nfctinfo;
@@ -228,7 +233,7 @@ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
228 const struct net_device *out, 233 const struct net_device *out,
229 int (*okfn)(struct sk_buff *)) 234 int (*okfn)(struct sk_buff *))
230{ 235{
231 return __ipv6_conntrack_in(dev_net(in), hooknum, skb, okfn); 236 return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
232} 237}
233 238
234static unsigned int ipv6_conntrack_local(unsigned int hooknum, 239static unsigned int ipv6_conntrack_local(unsigned int hooknum,
@@ -242,7 +247,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
242 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); 247 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
243 return NF_ACCEPT; 248 return NF_ACCEPT;
244 } 249 }
245 return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn); 250 return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
246} 251}
247 252
248static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { 253static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index c9c78c2e666b..18bd9bbbd1c6 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -57,41 +57,27 @@ struct nf_ct_frag6_skb_cb
57 57
58#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb)) 58#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
59 59
60struct nf_ct_frag6_queue
61{
62 struct inet_frag_queue q;
63
64 __be32 id; /* fragment id */
65 u32 user;
66 struct in6_addr saddr;
67 struct in6_addr daddr;
68
69 unsigned int csum;
70 __u16 nhoffset;
71};
72
73static struct inet_frags nf_frags; 60static struct inet_frags nf_frags;
74static struct netns_frags nf_init_frags;
75 61
76#ifdef CONFIG_SYSCTL 62#ifdef CONFIG_SYSCTL
77static struct ctl_table nf_ct_frag6_sysctl_table[] = { 63static struct ctl_table nf_ct_frag6_sysctl_table[] = {
78 { 64 {
79 .procname = "nf_conntrack_frag6_timeout", 65 .procname = "nf_conntrack_frag6_timeout",
80 .data = &nf_init_frags.timeout, 66 .data = &init_net.nf_frag.frags.timeout,
81 .maxlen = sizeof(unsigned int), 67 .maxlen = sizeof(unsigned int),
82 .mode = 0644, 68 .mode = 0644,
83 .proc_handler = proc_dointvec_jiffies, 69 .proc_handler = proc_dointvec_jiffies,
84 }, 70 },
85 { 71 {
86 .procname = "nf_conntrack_frag6_low_thresh", 72 .procname = "nf_conntrack_frag6_low_thresh",
87 .data = &nf_init_frags.low_thresh, 73 .data = &init_net.nf_frag.frags.low_thresh,
88 .maxlen = sizeof(unsigned int), 74 .maxlen = sizeof(unsigned int),
89 .mode = 0644, 75 .mode = 0644,
90 .proc_handler = proc_dointvec, 76 .proc_handler = proc_dointvec,
91 }, 77 },
92 { 78 {
93 .procname = "nf_conntrack_frag6_high_thresh", 79 .procname = "nf_conntrack_frag6_high_thresh",
94 .data = &nf_init_frags.high_thresh, 80 .data = &init_net.nf_frag.frags.high_thresh,
95 .maxlen = sizeof(unsigned int), 81 .maxlen = sizeof(unsigned int),
96 .mode = 0644, 82 .mode = 0644,
97 .proc_handler = proc_dointvec, 83 .proc_handler = proc_dointvec,
@@ -99,68 +85,86 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
99 { } 85 { }
100}; 86};
101 87
102static struct ctl_table_header *nf_ct_frag6_sysctl_header; 88static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
103#endif
104
105static unsigned int nf_hashfn(struct inet_frag_queue *q)
106{ 89{
107 const struct nf_ct_frag6_queue *nq; 90 struct ctl_table *table;
91 struct ctl_table_header *hdr;
92
93 table = nf_ct_frag6_sysctl_table;
94 if (!net_eq(net, &init_net)) {
95 table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
96 GFP_KERNEL);
97 if (table == NULL)
98 goto err_alloc;
99
100 table[0].data = &net->ipv6.frags.high_thresh;
101 table[1].data = &net->ipv6.frags.low_thresh;
102 table[2].data = &net->ipv6.frags.timeout;
103 }
108 104
109 nq = container_of(q, struct nf_ct_frag6_queue, q); 105 hdr = register_net_sysctl(net, "net/netfilter", table);
110 return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); 106 if (hdr == NULL)
107 goto err_reg;
108
109 net->nf_frag.sysctl.frags_hdr = hdr;
110 return 0;
111
112err_reg:
113 if (!net_eq(net, &init_net))
114 kfree(table);
115err_alloc:
116 return -ENOMEM;
111} 117}
112 118
113static void nf_skb_free(struct sk_buff *skb) 119static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
114{ 120{
115 if (NFCT_FRAG6_CB(skb)->orig) 121 struct ctl_table *table;
116 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
117}
118 122
119/* Destruction primitives. */ 123 table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
124 unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
125 if (!net_eq(net, &init_net))
126 kfree(table);
127}
120 128
121static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) 129#else
130static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
122{ 131{
123 inet_frag_put(&fq->q, &nf_frags); 132 return 0;
124} 133}
134static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
135{
136}
137#endif
125 138
126/* Kill fq entry. It is not destroyed immediately, 139static unsigned int nf_hashfn(struct inet_frag_queue *q)
127 * because caller (and someone more) holds reference count.
128 */
129static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
130{ 140{
131 inet_frag_kill(&fq->q, &nf_frags); 141 const struct frag_queue *nq;
142
143 nq = container_of(q, struct frag_queue, q);
144 return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
132} 145}
133 146
134static void nf_ct_frag6_evictor(void) 147static void nf_skb_free(struct sk_buff *skb)
135{ 148{
136 local_bh_disable(); 149 if (NFCT_FRAG6_CB(skb)->orig)
137 inet_frag_evictor(&nf_init_frags, &nf_frags); 150 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
138 local_bh_enable();
139} 151}
140 152
141static void nf_ct_frag6_expire(unsigned long data) 153static void nf_ct_frag6_expire(unsigned long data)
142{ 154{
143 struct nf_ct_frag6_queue *fq; 155 struct frag_queue *fq;
144 156 struct net *net;
145 fq = container_of((struct inet_frag_queue *)data,
146 struct nf_ct_frag6_queue, q);
147
148 spin_lock(&fq->q.lock);
149 157
150 if (fq->q.last_in & INET_FRAG_COMPLETE) 158 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
151 goto out; 159 net = container_of(fq->q.net, struct net, nf_frag.frags);
152 160
153 fq_kill(fq); 161 ip6_expire_frag_queue(net, fq, &nf_frags);
154
155out:
156 spin_unlock(&fq->q.lock);
157 fq_put(fq);
158} 162}
159 163
160/* Creation primitives. */ 164/* Creation primitives. */
161 165static inline struct frag_queue *fq_find(struct net *net, __be32 id,
162static __inline__ struct nf_ct_frag6_queue * 166 u32 user, struct in6_addr *src,
163fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) 167 struct in6_addr *dst)
164{ 168{
165 struct inet_frag_queue *q; 169 struct inet_frag_queue *q;
166 struct ip6_create_arg arg; 170 struct ip6_create_arg arg;
@@ -174,22 +178,23 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
174 read_lock_bh(&nf_frags.lock); 178 read_lock_bh(&nf_frags.lock);
175 hash = inet6_hash_frag(id, src, dst, nf_frags.rnd); 179 hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
176 180
177 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 181 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
178 local_bh_enable(); 182 local_bh_enable();
179 if (q == NULL) 183 if (q == NULL)
180 goto oom; 184 goto oom;
181 185
182 return container_of(q, struct nf_ct_frag6_queue, q); 186 return container_of(q, struct frag_queue, q);
183 187
184oom: 188oom:
185 return NULL; 189 return NULL;
186} 190}
187 191
188 192
189static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, 193static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
190 const struct frag_hdr *fhdr, int nhoff) 194 const struct frag_hdr *fhdr, int nhoff)
191{ 195{
192 struct sk_buff *prev, *next; 196 struct sk_buff *prev, *next;
197 unsigned int payload_len;
193 int offset, end; 198 int offset, end;
194 199
195 if (fq->q.last_in & INET_FRAG_COMPLETE) { 200 if (fq->q.last_in & INET_FRAG_COMPLETE) {
@@ -197,8 +202,10 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
197 goto err; 202 goto err;
198 } 203 }
199 204
205 payload_len = ntohs(ipv6_hdr(skb)->payload_len);
206
200 offset = ntohs(fhdr->frag_off) & ~0x7; 207 offset = ntohs(fhdr->frag_off) & ~0x7;
201 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 208 end = offset + (payload_len -
202 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 209 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
203 210
204 if ((unsigned int)end > IPV6_MAXPLEN) { 211 if ((unsigned int)end > IPV6_MAXPLEN) {
@@ -307,7 +314,9 @@ found:
307 skb->dev = NULL; 314 skb->dev = NULL;
308 fq->q.stamp = skb->tstamp; 315 fq->q.stamp = skb->tstamp;
309 fq->q.meat += skb->len; 316 fq->q.meat += skb->len;
310 atomic_add(skb->truesize, &nf_init_frags.mem); 317 if (payload_len > fq->q.max_size)
318 fq->q.max_size = payload_len;
319 atomic_add(skb->truesize, &fq->q.net->mem);
311 320
312 /* The first fragment. 321 /* The first fragment.
313 * nhoffset is obtained from the first fragment, of course. 322 * nhoffset is obtained from the first fragment, of course.
@@ -317,12 +326,12 @@ found:
317 fq->q.last_in |= INET_FRAG_FIRST_IN; 326 fq->q.last_in |= INET_FRAG_FIRST_IN;
318 } 327 }
319 write_lock(&nf_frags.lock); 328 write_lock(&nf_frags.lock);
320 list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list); 329 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
321 write_unlock(&nf_frags.lock); 330 write_unlock(&nf_frags.lock);
322 return 0; 331 return 0;
323 332
324discard_fq: 333discard_fq:
325 fq_kill(fq); 334 inet_frag_kill(&fq->q, &nf_frags);
326err: 335err:
327 return -1; 336 return -1;
328} 337}
@@ -337,12 +346,12 @@ err:
337 * the last and the first frames arrived and all the bits are here. 346 * the last and the first frames arrived and all the bits are here.
338 */ 347 */
339static struct sk_buff * 348static struct sk_buff *
340nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) 349nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
341{ 350{
342 struct sk_buff *fp, *op, *head = fq->q.fragments; 351 struct sk_buff *fp, *op, *head = fq->q.fragments;
343 int payload_len; 352 int payload_len;
344 353
345 fq_kill(fq); 354 inet_frag_kill(&fq->q, &nf_frags);
346 355
347 WARN_ON(head == NULL); 356 WARN_ON(head == NULL);
348 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); 357 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -386,7 +395,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
386 clone->ip_summed = head->ip_summed; 395 clone->ip_summed = head->ip_summed;
387 396
388 NFCT_FRAG6_CB(clone)->orig = NULL; 397 NFCT_FRAG6_CB(clone)->orig = NULL;
389 atomic_add(clone->truesize, &nf_init_frags.mem); 398 atomic_add(clone->truesize, &fq->q.net->mem);
390 } 399 }
391 400
392 /* We have to remove fragment header from datagram and to relocate 401 /* We have to remove fragment header from datagram and to relocate
@@ -410,12 +419,14 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
410 head->csum = csum_add(head->csum, fp->csum); 419 head->csum = csum_add(head->csum, fp->csum);
411 head->truesize += fp->truesize; 420 head->truesize += fp->truesize;
412 } 421 }
413 atomic_sub(head->truesize, &nf_init_frags.mem); 422 atomic_sub(head->truesize, &fq->q.net->mem);
414 423
424 head->local_df = 1;
415 head->next = NULL; 425 head->next = NULL;
416 head->dev = dev; 426 head->dev = dev;
417 head->tstamp = fq->q.stamp; 427 head->tstamp = fq->q.stamp;
418 ipv6_hdr(head)->payload_len = htons(payload_len); 428 ipv6_hdr(head)->payload_len = htons(payload_len);
429 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
419 430
420 /* Yes, and fold redundant checksum back. 8) */ 431 /* Yes, and fold redundant checksum back. 8) */
421 if (head->ip_summed == CHECKSUM_COMPLETE) 432 if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -520,8 +531,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
520{ 531{
521 struct sk_buff *clone; 532 struct sk_buff *clone;
522 struct net_device *dev = skb->dev; 533 struct net_device *dev = skb->dev;
534 struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
535 : dev_net(skb->dev);
523 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
524 struct nf_ct_frag6_queue *fq; 537 struct frag_queue *fq;
525 struct ipv6hdr *hdr; 538 struct ipv6hdr *hdr;
526 int fhoff, nhoff; 539 int fhoff, nhoff;
527 u8 prevhdr; 540 u8 prevhdr;
@@ -553,10 +566,11 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
553 hdr = ipv6_hdr(clone); 566 hdr = ipv6_hdr(clone);
554 fhdr = (struct frag_hdr *)skb_transport_header(clone); 567 fhdr = (struct frag_hdr *)skb_transport_header(clone);
555 568
556 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 569 local_bh_disable();
557 nf_ct_frag6_evictor(); 570 inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
571 local_bh_enable();
558 572
559 fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); 573 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr);
560 if (fq == NULL) { 574 if (fq == NULL) {
561 pr_debug("Can't find and can't create new queue\n"); 575 pr_debug("Can't find and can't create new queue\n");
562 goto ret_orig; 576 goto ret_orig;
@@ -567,7 +581,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
567 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 581 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
568 spin_unlock_bh(&fq->q.lock); 582 spin_unlock_bh(&fq->q.lock);
569 pr_debug("Can't insert skb to queue\n"); 583 pr_debug("Can't insert skb to queue\n");
570 fq_put(fq); 584 inet_frag_put(&fq->q, &nf_frags);
571 goto ret_orig; 585 goto ret_orig;
572 } 586 }
573 587
@@ -579,7 +593,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
579 } 593 }
580 spin_unlock_bh(&fq->q.lock); 594 spin_unlock_bh(&fq->q.lock);
581 595
582 fq_put(fq); 596 inet_frag_put(&fq->q, &nf_frags);
583 return ret_skb; 597 return ret_skb;
584 598
585ret_orig: 599ret_orig:
@@ -592,6 +606,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
592 int (*okfn)(struct sk_buff *)) 606 int (*okfn)(struct sk_buff *))
593{ 607{
594 struct sk_buff *s, *s2; 608 struct sk_buff *s, *s2;
609 unsigned int ret = 0;
595 610
596 for (s = NFCT_FRAG6_CB(skb)->orig; s;) { 611 for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
597 nf_conntrack_put_reasm(s->nfct_reasm); 612 nf_conntrack_put_reasm(s->nfct_reasm);
@@ -601,49 +616,62 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
601 s2 = s->next; 616 s2 = s->next;
602 s->next = NULL; 617 s->next = NULL;
603 618
604 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn, 619 if (ret != -ECANCELED)
605 NF_IP6_PRI_CONNTRACK_DEFRAG + 1); 620 ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
621 in, out, okfn,
622 NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
623 else
624 kfree_skb(s);
625
606 s = s2; 626 s = s2;
607 } 627 }
608 nf_conntrack_put_reasm(skb); 628 nf_conntrack_put_reasm(skb);
609} 629}
610 630
631static int nf_ct_net_init(struct net *net)
632{
633 net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
634 net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
635 net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
636 inet_frags_init_net(&net->nf_frag.frags);
637
638 return nf_ct_frag6_sysctl_register(net);
639}
640
641static void nf_ct_net_exit(struct net *net)
642{
643 nf_ct_frags6_sysctl_unregister(net);
644 inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
645}
646
647static struct pernet_operations nf_ct_net_ops = {
648 .init = nf_ct_net_init,
649 .exit = nf_ct_net_exit,
650};
651
611int nf_ct_frag6_init(void) 652int nf_ct_frag6_init(void)
612{ 653{
654 int ret = 0;
655
613 nf_frags.hashfn = nf_hashfn; 656 nf_frags.hashfn = nf_hashfn;
614 nf_frags.constructor = ip6_frag_init; 657 nf_frags.constructor = ip6_frag_init;
615 nf_frags.destructor = NULL; 658 nf_frags.destructor = NULL;
616 nf_frags.skb_free = nf_skb_free; 659 nf_frags.skb_free = nf_skb_free;
617 nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); 660 nf_frags.qsize = sizeof(struct frag_queue);
618 nf_frags.match = ip6_frag_match; 661 nf_frags.match = ip6_frag_match;
619 nf_frags.frag_expire = nf_ct_frag6_expire; 662 nf_frags.frag_expire = nf_ct_frag6_expire;
620 nf_frags.secret_interval = 10 * 60 * HZ; 663 nf_frags.secret_interval = 10 * 60 * HZ;
621 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
622 nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
623 nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
624 inet_frags_init_net(&nf_init_frags);
625 inet_frags_init(&nf_frags); 664 inet_frags_init(&nf_frags);
626 665
627#ifdef CONFIG_SYSCTL 666 ret = register_pernet_subsys(&nf_ct_net_ops);
628 nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter", 667 if (ret)
629 nf_ct_frag6_sysctl_table);
630 if (!nf_ct_frag6_sysctl_header) {
631 inet_frags_fini(&nf_frags); 668 inet_frags_fini(&nf_frags);
632 return -ENOMEM;
633 }
634#endif
635 669
636 return 0; 670 return ret;
637} 671}
638 672
639void nf_ct_frag6_cleanup(void) 673void nf_ct_frag6_cleanup(void)
640{ 674{
641#ifdef CONFIG_SYSCTL 675 unregister_pernet_subsys(&nf_ct_net_ops);
642 unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
643 nf_ct_frag6_sysctl_header = NULL;
644#endif
645 inet_frags_fini(&nf_frags); 676 inet_frags_fini(&nf_frags);
646
647 nf_init_frags.low_thresh = 0;
648 nf_ct_frag6_evictor();
649} 677}
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
new file mode 100644
index 000000000000..abfe75a2e316
--- /dev/null
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -0,0 +1,288 @@
1/*
2 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of IPv6 NAT funded by Astaro.
9 */
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/ipv6.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter_ipv6.h>
16#include <net/secure_seq.h>
17#include <net/checksum.h>
18#include <net/ip6_checksum.h>
19#include <net/ip6_route.h>
20#include <net/ipv6.h>
21
22#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_nat_core.h>
25#include <net/netfilter/nf_nat_l3proto.h>
26#include <net/netfilter/nf_nat_l4proto.h>
27
28static const struct nf_nat_l3proto nf_nat_l3proto_ipv6;
29
30#ifdef CONFIG_XFRM
31static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
32 const struct nf_conn *ct,
33 enum ip_conntrack_dir dir,
34 unsigned long statusbit,
35 struct flowi *fl)
36{
37 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
38 struct flowi6 *fl6 = &fl->u.ip6;
39
40 if (ct->status & statusbit) {
41 fl6->daddr = t->dst.u3.in6;
42 if (t->dst.protonum == IPPROTO_TCP ||
43 t->dst.protonum == IPPROTO_UDP ||
44 t->dst.protonum == IPPROTO_UDPLITE ||
45 t->dst.protonum == IPPROTO_DCCP ||
46 t->dst.protonum == IPPROTO_SCTP)
47 fl6->fl6_dport = t->dst.u.all;
48 }
49
50 statusbit ^= IPS_NAT_MASK;
51
52 if (ct->status & statusbit) {
53 fl6->saddr = t->src.u3.in6;
54 if (t->dst.protonum == IPPROTO_TCP ||
55 t->dst.protonum == IPPROTO_UDP ||
56 t->dst.protonum == IPPROTO_UDPLITE ||
57 t->dst.protonum == IPPROTO_DCCP ||
58 t->dst.protonum == IPPROTO_SCTP)
59 fl6->fl6_sport = t->src.u.all;
60 }
61}
62#endif
63
64static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
65 const struct nf_nat_range *range)
66{
67 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
68 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
69}
70
71static u32 nf_nat_ipv6_secure_port(const struct nf_conntrack_tuple *t,
72 __be16 dport)
73{
74 return secure_ipv6_port_ephemeral(t->src.u3.ip6, t->dst.u3.ip6, dport);
75}
76
77static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
78 unsigned int iphdroff,
79 const struct nf_nat_l4proto *l4proto,
80 const struct nf_conntrack_tuple *target,
81 enum nf_nat_manip_type maniptype)
82{
83 struct ipv6hdr *ipv6h;
84 __be16 frag_off;
85 int hdroff;
86 u8 nexthdr;
87
88 if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
89 return false;
90
91 ipv6h = (void *)skb->data + iphdroff;
92 nexthdr = ipv6h->nexthdr;
93 hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
94 &nexthdr, &frag_off);
95 if (hdroff < 0)
96 goto manip_addr;
97
98 if ((frag_off & htons(~0x7)) == 0 &&
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
100 target, maniptype))
101 return false;
102manip_addr:
103 if (maniptype == NF_NAT_MANIP_SRC)
104 ipv6h->saddr = target->src.u3.in6;
105 else
106 ipv6h->daddr = target->dst.u3.in6;
107
108 return true;
109}
110
111static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
112 unsigned int iphdroff, __sum16 *check,
113 const struct nf_conntrack_tuple *t,
114 enum nf_nat_manip_type maniptype)
115{
116 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
117 const struct in6_addr *oldip, *newip;
118
119 if (maniptype == NF_NAT_MANIP_SRC) {
120 oldip = &ipv6h->saddr;
121 newip = &t->src.u3.in6;
122 } else {
123 oldip = &ipv6h->daddr;
124 newip = &t->dst.u3.in6;
125 }
126 inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
127 newip->s6_addr32, 1);
128}
129
130static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
131 u8 proto, void *data, __sum16 *check,
132 int datalen, int oldlen)
133{
134 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
135 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
136
137 if (skb->ip_summed != CHECKSUM_PARTIAL) {
138 if (!(rt->rt6i_flags & RTF_LOCAL) &&
139 (!skb->dev || skb->dev->features & NETIF_F_V6_CSUM)) {
140 skb->ip_summed = CHECKSUM_PARTIAL;
141 skb->csum_start = skb_headroom(skb) +
142 skb_network_offset(skb) +
143 (data - (void *)skb->data);
144 skb->csum_offset = (void *)check - data;
145 *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
146 datalen, proto, 0);
147 } else {
148 *check = 0;
149 *check = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
150 datalen, proto,
151 csum_partial(data, datalen,
152 0));
153 if (proto == IPPROTO_UDP && !*check)
154 *check = CSUM_MANGLED_0;
155 }
156 } else
157 inet_proto_csum_replace2(check, skb,
158 htons(oldlen), htons(datalen), 1);
159}
160
161static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
162 struct nf_nat_range *range)
163{
164 if (tb[CTA_NAT_V6_MINIP]) {
165 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
166 sizeof(struct in6_addr));
167 range->flags |= NF_NAT_RANGE_MAP_IPS;
168 }
169
170 if (tb[CTA_NAT_V6_MAXIP])
171 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
172 sizeof(struct in6_addr));
173 else
174 range->max_addr = range->min_addr;
175
176 return 0;
177}
178
179static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
180 .l3proto = NFPROTO_IPV6,
181 .secure_port = nf_nat_ipv6_secure_port,
182 .in_range = nf_nat_ipv6_in_range,
183 .manip_pkt = nf_nat_ipv6_manip_pkt,
184 .csum_update = nf_nat_ipv6_csum_update,
185 .csum_recalc = nf_nat_ipv6_csum_recalc,
186 .nlattr_to_range = nf_nat_ipv6_nlattr_to_range,
187#ifdef CONFIG_XFRM
188 .decode_session = nf_nat_ipv6_decode_session,
189#endif
190};
191
192int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
193 struct nf_conn *ct,
194 enum ip_conntrack_info ctinfo,
195 unsigned int hooknum,
196 unsigned int hdrlen)
197{
198 struct {
199 struct icmp6hdr icmp6;
200 struct ipv6hdr ip6;
201 } *inside;
202 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
203 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
204 const struct nf_nat_l4proto *l4proto;
205 struct nf_conntrack_tuple target;
206 unsigned long statusbit;
207
208 NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
209
210 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
211 return 0;
212 if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
213 return 0;
214
215 inside = (void *)skb->data + hdrlen;
216 if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
217 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
218 return 0;
219 if (ct->status & IPS_NAT_MASK)
220 return 0;
221 }
222
223 if (manip == NF_NAT_MANIP_SRC)
224 statusbit = IPS_SRC_NAT;
225 else
226 statusbit = IPS_DST_NAT;
227
228 /* Invert if this is reply direction */
229 if (dir == IP_CT_DIR_REPLY)
230 statusbit ^= IPS_NAT_MASK;
231
232 if (!(ct->status & statusbit))
233 return 1;
234
235 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, inside->ip6.nexthdr);
236 if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
237 l4proto, &ct->tuplehash[!dir].tuple, !manip))
238 return 0;
239
240 if (skb->ip_summed != CHECKSUM_PARTIAL) {
241 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
242 inside = (void *)skb->data + hdrlen;
243 inside->icmp6.icmp6_cksum = 0;
244 inside->icmp6.icmp6_cksum =
245 csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
246 skb->len - hdrlen, IPPROTO_ICMPV6,
247 csum_partial(&inside->icmp6,
248 skb->len - hdrlen, 0));
249 }
250
251 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
252 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, IPPROTO_ICMPV6);
253 if (!nf_nat_ipv6_manip_pkt(skb, 0, l4proto, &target, manip))
254 return 0;
255
256 return 1;
257}
258EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
259
260static int __init nf_nat_l3proto_ipv6_init(void)
261{
262 int err;
263
264 err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
265 if (err < 0)
266 goto err1;
267 err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
268 if (err < 0)
269 goto err2;
270 return err;
271
272err2:
273 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
274err1:
275 return err;
276}
277
278static void __exit nf_nat_l3proto_ipv6_exit(void)
279{
280 nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
281 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
282}
283
284MODULE_LICENSE("GPL");
285MODULE_ALIAS("nf-nat-" __stringify(AF_INET6));
286
287module_init(nf_nat_l3proto_ipv6_init);
288module_exit(nf_nat_l3proto_ipv6_exit);
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
new file mode 100644
index 000000000000..5d6da784305b
--- /dev/null
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright (c) 2011 Patrick Mchardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Based on Rusty Russell's IPv4 ICMP NAT code. Development of IPv6
9 * NAT funded by Astaro.
10 */
11
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/icmpv6.h>
15
16#include <linux/netfilter.h>
17#include <net/netfilter/nf_nat.h>
18#include <net/netfilter/nf_nat_core.h>
19#include <net/netfilter/nf_nat_l3proto.h>
20#include <net/netfilter/nf_nat_l4proto.h>
21
22static bool
23icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
24 enum nf_nat_manip_type maniptype,
25 const union nf_conntrack_man_proto *min,
26 const union nf_conntrack_man_proto *max)
27{
28 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
29 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
30}
31
32static void
33icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
34 struct nf_conntrack_tuple *tuple,
35 const struct nf_nat_range *range,
36 enum nf_nat_manip_type maniptype,
37 const struct nf_conn *ct)
38{
39 static u16 id;
40 unsigned int range_size;
41 unsigned int i;
42
43 range_size = ntohs(range->max_proto.icmp.id) -
44 ntohs(range->min_proto.icmp.id) + 1;
45
46 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
47 range_size = 0xffff;
48
49 for (i = 0; ; ++id) {
50 tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
51 (id % range_size));
52 if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
53 return;
54 }
55}
56
57static bool
58icmpv6_manip_pkt(struct sk_buff *skb,
59 const struct nf_nat_l3proto *l3proto,
60 unsigned int iphdroff, unsigned int hdroff,
61 const struct nf_conntrack_tuple *tuple,
62 enum nf_nat_manip_type maniptype)
63{
64 struct icmp6hdr *hdr;
65
66 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
67 return false;
68
69 hdr = (struct icmp6hdr *)(skb->data + hdroff);
70 l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
71 tuple, maniptype);
72 if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST ||
73 hdr->icmp6_code == ICMPV6_ECHO_REPLY) {
74 inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
75 hdr->icmp6_identifier,
76 tuple->src.u.icmp.id, 0);
77 hdr->icmp6_identifier = tuple->src.u.icmp.id;
78 }
79 return true;
80}
81
82const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
83 .l4proto = IPPROTO_ICMPV6,
84 .manip_pkt = icmpv6_manip_pkt,
85 .in_range = icmpv6_in_range,
86 .unique_tuple = icmpv6_unique_tuple,
87#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
88 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
89#endif
90};
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4ff9af628e72..da8a4e301b1b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -65,36 +65,8 @@ struct ip6frag_skb_cb
65#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 65#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
66 66
67 67
68/*
69 * Equivalent of ipv4 struct ipq
70 */
71
72struct frag_queue
73{
74 struct inet_frag_queue q;
75
76 __be32 id; /* fragment id */
77 u32 user;
78 struct in6_addr saddr;
79 struct in6_addr daddr;
80
81 int iif;
82 unsigned int csum;
83 __u16 nhoffset;
84};
85
86static struct inet_frags ip6_frags; 68static struct inet_frags ip6_frags;
87 69
88int ip6_frag_nqueues(struct net *net)
89{
90 return net->ipv6.frags.nqueues;
91}
92
93int ip6_frag_mem(struct net *net)
94{
95 return atomic_read(&net->ipv6.frags.mem);
96}
97
98static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 70static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
99 struct net_device *dev); 71 struct net_device *dev);
100 72
@@ -159,46 +131,18 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
159} 131}
160EXPORT_SYMBOL(ip6_frag_init); 132EXPORT_SYMBOL(ip6_frag_init);
161 133
162/* Destruction primitives. */ 134void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
163 135 struct inet_frags *frags)
164static __inline__ void fq_put(struct frag_queue *fq)
165{
166 inet_frag_put(&fq->q, &ip6_frags);
167}
168
169/* Kill fq entry. It is not destroyed immediately,
170 * because caller (and someone more) holds reference count.
171 */
172static __inline__ void fq_kill(struct frag_queue *fq)
173{
174 inet_frag_kill(&fq->q, &ip6_frags);
175}
176
177static void ip6_evictor(struct net *net, struct inet6_dev *idev)
178{ 136{
179 int evicted;
180
181 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
182 if (evicted)
183 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
184}
185
186static void ip6_frag_expire(unsigned long data)
187{
188 struct frag_queue *fq;
189 struct net_device *dev = NULL; 137 struct net_device *dev = NULL;
190 struct net *net;
191
192 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
193 138
194 spin_lock(&fq->q.lock); 139 spin_lock(&fq->q.lock);
195 140
196 if (fq->q.last_in & INET_FRAG_COMPLETE) 141 if (fq->q.last_in & INET_FRAG_COMPLETE)
197 goto out; 142 goto out;
198 143
199 fq_kill(fq); 144 inet_frag_kill(&fq->q, frags);
200 145
201 net = container_of(fq->q.net, struct net, ipv6.frags);
202 rcu_read_lock(); 146 rcu_read_lock();
203 dev = dev_get_by_index_rcu(net, fq->iif); 147 dev = dev_get_by_index_rcu(net, fq->iif);
204 if (!dev) 148 if (!dev)
@@ -222,7 +166,19 @@ out_rcu_unlock:
222 rcu_read_unlock(); 166 rcu_read_unlock();
223out: 167out:
224 spin_unlock(&fq->q.lock); 168 spin_unlock(&fq->q.lock);
225 fq_put(fq); 169 inet_frag_put(&fq->q, frags);
170}
171EXPORT_SYMBOL(ip6_expire_frag_queue);
172
173static void ip6_frag_expire(unsigned long data)
174{
175 struct frag_queue *fq;
176 struct net *net;
177
178 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
179 net = container_of(fq->q.net, struct net, ipv6.frags);
180
181 ip6_expire_frag_queue(net, fq, &ip6_frags);
226} 182}
227 183
228static __inline__ struct frag_queue * 184static __inline__ struct frag_queue *
@@ -391,7 +347,7 @@ found:
391 return -1; 347 return -1;
392 348
393discard_fq: 349discard_fq:
394 fq_kill(fq); 350 inet_frag_kill(&fq->q, &ip6_frags);
395err: 351err:
396 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 352 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
397 IPSTATS_MIB_REASMFAILS); 353 IPSTATS_MIB_REASMFAILS);
@@ -417,7 +373,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
417 unsigned int nhoff; 373 unsigned int nhoff;
418 int sum_truesize; 374 int sum_truesize;
419 375
420 fq_kill(fq); 376 inet_frag_kill(&fq->q, &ip6_frags);
421 377
422 /* Make the one we just received the head. */ 378 /* Make the one we just received the head. */
423 if (prev) { 379 if (prev) {
@@ -550,6 +506,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
550 struct frag_queue *fq; 506 struct frag_queue *fq;
551 const struct ipv6hdr *hdr = ipv6_hdr(skb); 507 const struct ipv6hdr *hdr = ipv6_hdr(skb);
552 struct net *net = dev_net(skb_dst(skb)->dev); 508 struct net *net = dev_net(skb_dst(skb)->dev);
509 int evicted;
553 510
554 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 511 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
555 512
@@ -574,8 +531,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
574 return 1; 531 return 1;
575 } 532 }
576 533
577 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 534 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
578 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 535 if (evicted)
536 IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
537 IPSTATS_MIB_REASMFAILS, evicted);
579 538
580 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); 539 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
581 if (fq != NULL) { 540 if (fq != NULL) {
@@ -586,7 +545,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
586 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 545 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
587 546
588 spin_unlock(&fq->q.lock); 547 spin_unlock(&fq->q.lock);
589 fq_put(fq); 548 inet_frag_put(&fq->q, &ip6_frags);
590 return ret; 549 return ret;
591 } 550 }
592 551
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 854e4018d205..d1ddbc6ddac5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -222,7 +222,7 @@ static const u32 ip6_template_metrics[RTAX_MAX] = {
222 [RTAX_HOPLIMIT - 1] = 255, 222 [RTAX_HOPLIMIT - 1] = 255,
223}; 223};
224 224
225static struct rt6_info ip6_null_entry_template = { 225static const struct rt6_info ip6_null_entry_template = {
226 .dst = { 226 .dst = {
227 .__refcnt = ATOMIC_INIT(1), 227 .__refcnt = ATOMIC_INIT(1),
228 .__use = 1, 228 .__use = 1,
@@ -242,7 +242,7 @@ static struct rt6_info ip6_null_entry_template = {
242static int ip6_pkt_prohibit(struct sk_buff *skb); 242static int ip6_pkt_prohibit(struct sk_buff *skb);
243static int ip6_pkt_prohibit_out(struct sk_buff *skb); 243static int ip6_pkt_prohibit_out(struct sk_buff *skb);
244 244
245static struct rt6_info ip6_prohibit_entry_template = { 245static const struct rt6_info ip6_prohibit_entry_template = {
246 .dst = { 246 .dst = {
247 .__refcnt = ATOMIC_INIT(1), 247 .__refcnt = ATOMIC_INIT(1),
248 .__use = 1, 248 .__use = 1,
@@ -257,7 +257,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
257 .rt6i_ref = ATOMIC_INIT(1), 257 .rt6i_ref = ATOMIC_INIT(1),
258}; 258};
259 259
260static struct rt6_info ip6_blk_hole_entry_template = { 260static const struct rt6_info ip6_blk_hole_entry_template = {
261 .dst = { 261 .dst = {
262 .__refcnt = ATOMIC_INIT(1), 262 .__refcnt = ATOMIC_INIT(1),
263 .__use = 1, 263 .__use = 1,
@@ -370,15 +370,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
370 370
371static bool rt6_check_expired(const struct rt6_info *rt) 371static bool rt6_check_expired(const struct rt6_info *rt)
372{ 372{
373 struct rt6_info *ort = NULL;
374
375 if (rt->rt6i_flags & RTF_EXPIRES) { 373 if (rt->rt6i_flags & RTF_EXPIRES) {
376 if (time_after(jiffies, rt->dst.expires)) 374 if (time_after(jiffies, rt->dst.expires))
377 return true; 375 return true;
378 } else if (rt->dst.from) { 376 } else if (rt->dst.from) {
379 ort = (struct rt6_info *) rt->dst.from; 377 return rt6_check_expired((struct rt6_info *) rt->dst.from);
380 return (ort->rt6i_flags & RTF_EXPIRES) &&
381 time_after(jiffies, ort->dst.expires);
382 } 378 }
383 return false; 379 return false;
384} 380}
@@ -452,10 +448,9 @@ static void rt6_probe(struct rt6_info *rt)
452 * Router Reachability Probe MUST be rate-limited 448 * Router Reachability Probe MUST be rate-limited
453 * to no more than one per minute. 449 * to no more than one per minute.
454 */ 450 */
455 rcu_read_lock();
456 neigh = rt ? rt->n : NULL; 451 neigh = rt ? rt->n : NULL;
457 if (!neigh || (neigh->nud_state & NUD_VALID)) 452 if (!neigh || (neigh->nud_state & NUD_VALID))
458 goto out; 453 return;
459 read_lock_bh(&neigh->lock); 454 read_lock_bh(&neigh->lock);
460 if (!(neigh->nud_state & NUD_VALID) && 455 if (!(neigh->nud_state & NUD_VALID) &&
461 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 456 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -471,8 +466,6 @@ static void rt6_probe(struct rt6_info *rt)
471 } else { 466 } else {
472 read_unlock_bh(&neigh->lock); 467 read_unlock_bh(&neigh->lock);
473 } 468 }
474out:
475 rcu_read_unlock();
476} 469}
477#else 470#else
478static inline void rt6_probe(struct rt6_info *rt) 471static inline void rt6_probe(struct rt6_info *rt)
@@ -499,7 +492,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
499 struct neighbour *neigh; 492 struct neighbour *neigh;
500 int m; 493 int m;
501 494
502 rcu_read_lock();
503 neigh = rt->n; 495 neigh = rt->n;
504 if (rt->rt6i_flags & RTF_NONEXTHOP || 496 if (rt->rt6i_flags & RTF_NONEXTHOP ||
505 !(rt->rt6i_flags & RTF_GATEWAY)) 497 !(rt->rt6i_flags & RTF_GATEWAY))
@@ -517,7 +509,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
517 read_unlock_bh(&neigh->lock); 509 read_unlock_bh(&neigh->lock);
518 } else 510 } else
519 m = 0; 511 m = 0;
520 rcu_read_unlock();
521 return m; 512 return m;
522} 513}
523 514
@@ -966,7 +957,7 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
966{ 957{
967 int flags = 0; 958 int flags = 0;
968 959
969 fl6->flowi6_iif = net->loopback_dev->ifindex; 960 fl6->flowi6_iif = LOOPBACK_IFINDEX;
970 961
971 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) 962 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
972 flags |= RT6_LOOKUP_F_IFACE; 963 flags |= RT6_LOOKUP_F_IFACE;
@@ -1469,8 +1460,21 @@ int ip6_route_add(struct fib6_config *cfg)
1469 } 1460 }
1470 rt->dst.output = ip6_pkt_discard_out; 1461 rt->dst.output = ip6_pkt_discard_out;
1471 rt->dst.input = ip6_pkt_discard; 1462 rt->dst.input = ip6_pkt_discard;
1472 rt->dst.error = -ENETUNREACH;
1473 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 1463 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1464 switch (cfg->fc_type) {
1465 case RTN_BLACKHOLE:
1466 rt->dst.error = -EINVAL;
1467 break;
1468 case RTN_PROHIBIT:
1469 rt->dst.error = -EACCES;
1470 break;
1471 case RTN_THROW:
1472 rt->dst.error = -EAGAIN;
1473 break;
1474 default:
1475 rt->dst.error = -ENETUNREACH;
1476 break;
1477 }
1474 goto install_route; 1478 goto install_route;
1475 } 1479 }
1476 1480
@@ -1835,7 +1839,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
1835 if (!table) 1839 if (!table)
1836 return NULL; 1840 return NULL;
1837 1841
1838 write_lock_bh(&table->tb6_lock); 1842 read_lock_bh(&table->tb6_lock);
1839 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0); 1843 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1840 if (!fn) 1844 if (!fn)
1841 goto out; 1845 goto out;
@@ -1851,7 +1855,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
1851 break; 1855 break;
1852 } 1856 }
1853out: 1857out:
1854 write_unlock_bh(&table->tb6_lock); 1858 read_unlock_bh(&table->tb6_lock);
1855 return rt; 1859 return rt;
1856} 1860}
1857 1861
@@ -1867,7 +1871,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
1867 .fc_dst_len = prefixlen, 1871 .fc_dst_len = prefixlen,
1868 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 1872 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1869 RTF_UP | RTF_PREF(pref), 1873 RTF_UP | RTF_PREF(pref),
1870 .fc_nlinfo.pid = 0, 1874 .fc_nlinfo.portid = 0,
1871 .fc_nlinfo.nlh = NULL, 1875 .fc_nlinfo.nlh = NULL,
1872 .fc_nlinfo.nl_net = net, 1876 .fc_nlinfo.nl_net = net,
1873 }; 1877 };
@@ -1894,7 +1898,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
1894 if (!table) 1898 if (!table)
1895 return NULL; 1899 return NULL;
1896 1900
1897 write_lock_bh(&table->tb6_lock); 1901 read_lock_bh(&table->tb6_lock);
1898 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { 1902 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1899 if (dev == rt->dst.dev && 1903 if (dev == rt->dst.dev &&
1900 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 1904 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
@@ -1903,7 +1907,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
1903 } 1907 }
1904 if (rt) 1908 if (rt)
1905 dst_hold(&rt->dst); 1909 dst_hold(&rt->dst);
1906 write_unlock_bh(&table->tb6_lock); 1910 read_unlock_bh(&table->tb6_lock);
1907 return rt; 1911 return rt;
1908} 1912}
1909 1913
@@ -1917,7 +1921,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1917 .fc_ifindex = dev->ifindex, 1921 .fc_ifindex = dev->ifindex,
1918 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 1922 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1919 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 1923 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1920 .fc_nlinfo.pid = 0, 1924 .fc_nlinfo.portid = 0,
1921 .fc_nlinfo.nlh = NULL, 1925 .fc_nlinfo.nlh = NULL,
1922 .fc_nlinfo.nl_net = dev_net(dev), 1926 .fc_nlinfo.nl_net = dev_net(dev),
1923 }; 1927 };
@@ -2266,14 +2270,18 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2266 cfg->fc_src_len = rtm->rtm_src_len; 2270 cfg->fc_src_len = rtm->rtm_src_len;
2267 cfg->fc_flags = RTF_UP; 2271 cfg->fc_flags = RTF_UP;
2268 cfg->fc_protocol = rtm->rtm_protocol; 2272 cfg->fc_protocol = rtm->rtm_protocol;
2273 cfg->fc_type = rtm->rtm_type;
2269 2274
2270 if (rtm->rtm_type == RTN_UNREACHABLE) 2275 if (rtm->rtm_type == RTN_UNREACHABLE ||
2276 rtm->rtm_type == RTN_BLACKHOLE ||
2277 rtm->rtm_type == RTN_PROHIBIT ||
2278 rtm->rtm_type == RTN_THROW)
2271 cfg->fc_flags |= RTF_REJECT; 2279 cfg->fc_flags |= RTF_REJECT;
2272 2280
2273 if (rtm->rtm_type == RTN_LOCAL) 2281 if (rtm->rtm_type == RTN_LOCAL)
2274 cfg->fc_flags |= RTF_LOCAL; 2282 cfg->fc_flags |= RTF_LOCAL;
2275 2283
2276 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; 2284 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2277 cfg->fc_nlinfo.nlh = nlh; 2285 cfg->fc_nlinfo.nlh = nlh;
2278 cfg->fc_nlinfo.nl_net = sock_net(skb->sk); 2286 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2279 2287
@@ -2364,7 +2372,7 @@ static inline size_t rt6_nlmsg_size(void)
2364static int rt6_fill_node(struct net *net, 2372static int rt6_fill_node(struct net *net,
2365 struct sk_buff *skb, struct rt6_info *rt, 2373 struct sk_buff *skb, struct rt6_info *rt,
2366 struct in6_addr *dst, struct in6_addr *src, 2374 struct in6_addr *dst, struct in6_addr *src,
2367 int iif, int type, u32 pid, u32 seq, 2375 int iif, int type, u32 portid, u32 seq,
2368 int prefix, int nowait, unsigned int flags) 2376 int prefix, int nowait, unsigned int flags)
2369{ 2377{
2370 struct rtmsg *rtm; 2378 struct rtmsg *rtm;
@@ -2380,7 +2388,7 @@ static int rt6_fill_node(struct net *net,
2380 } 2388 }
2381 } 2389 }
2382 2390
2383 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); 2391 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2384 if (!nlh) 2392 if (!nlh)
2385 return -EMSGSIZE; 2393 return -EMSGSIZE;
2386 2394
@@ -2396,8 +2404,22 @@ static int rt6_fill_node(struct net *net,
2396 rtm->rtm_table = table; 2404 rtm->rtm_table = table;
2397 if (nla_put_u32(skb, RTA_TABLE, table)) 2405 if (nla_put_u32(skb, RTA_TABLE, table))
2398 goto nla_put_failure; 2406 goto nla_put_failure;
2399 if (rt->rt6i_flags & RTF_REJECT) 2407 if (rt->rt6i_flags & RTF_REJECT) {
2400 rtm->rtm_type = RTN_UNREACHABLE; 2408 switch (rt->dst.error) {
2409 case -EINVAL:
2410 rtm->rtm_type = RTN_BLACKHOLE;
2411 break;
2412 case -EACCES:
2413 rtm->rtm_type = RTN_PROHIBIT;
2414 break;
2415 case -EAGAIN:
2416 rtm->rtm_type = RTN_THROW;
2417 break;
2418 default:
2419 rtm->rtm_type = RTN_UNREACHABLE;
2420 break;
2421 }
2422 }
2401 else if (rt->rt6i_flags & RTF_LOCAL) 2423 else if (rt->rt6i_flags & RTF_LOCAL)
2402 rtm->rtm_type = RTN_LOCAL; 2424 rtm->rtm_type = RTN_LOCAL;
2403 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 2425 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
@@ -2470,15 +2492,11 @@ static int rt6_fill_node(struct net *net,
2470 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2492 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2471 goto nla_put_failure; 2493 goto nla_put_failure;
2472 2494
2473 rcu_read_lock();
2474 n = rt->n; 2495 n = rt->n;
2475 if (n) { 2496 if (n) {
2476 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { 2497 if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0)
2477 rcu_read_unlock();
2478 goto nla_put_failure; 2498 goto nla_put_failure;
2479 }
2480 } 2499 }
2481 rcu_read_unlock();
2482 2500
2483 if (rt->dst.dev && 2501 if (rt->dst.dev &&
2484 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 2502 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
@@ -2511,7 +2529,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2511 2529
2512 return rt6_fill_node(arg->net, 2530 return rt6_fill_node(arg->net,
2513 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 2531 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2514 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, 2532 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2515 prefix, 0, NLM_F_MULTI); 2533 prefix, 0, NLM_F_MULTI);
2516} 2534}
2517 2535
@@ -2591,14 +2609,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2591 skb_dst_set(skb, &rt->dst); 2609 skb_dst_set(skb, &rt->dst);
2592 2610
2593 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, 2611 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2594 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2612 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2595 nlh->nlmsg_seq, 0, 0, 0); 2613 nlh->nlmsg_seq, 0, 0, 0);
2596 if (err < 0) { 2614 if (err < 0) {
2597 kfree_skb(skb); 2615 kfree_skb(skb);
2598 goto errout; 2616 goto errout;
2599 } 2617 }
2600 2618
2601 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); 2619 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2602errout: 2620errout:
2603 return err; 2621 return err;
2604} 2622}
@@ -2618,14 +2636,14 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2618 goto errout; 2636 goto errout;
2619 2637
2620 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 2638 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2621 event, info->pid, seq, 0, 0, 0); 2639 event, info->portid, seq, 0, 0, 0);
2622 if (err < 0) { 2640 if (err < 0) {
2623 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 2641 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2624 WARN_ON(err == -EMSGSIZE); 2642 WARN_ON(err == -EMSGSIZE);
2625 kfree_skb(skb); 2643 kfree_skb(skb);
2626 goto errout; 2644 goto errout;
2627 } 2645 }
2628 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE, 2646 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2629 info->nlh, gfp_any()); 2647 info->nlh, gfp_any());
2630 return; 2648 return;
2631errout: 2649errout:
@@ -2680,14 +2698,12 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2680#else 2698#else
2681 seq_puts(m, "00000000000000000000000000000000 00 "); 2699 seq_puts(m, "00000000000000000000000000000000 00 ");
2682#endif 2700#endif
2683 rcu_read_lock();
2684 n = rt->n; 2701 n = rt->n;
2685 if (n) { 2702 if (n) {
2686 seq_printf(m, "%pi6", n->primary_key); 2703 seq_printf(m, "%pi6", n->primary_key);
2687 } else { 2704 } else {
2688 seq_puts(m, "00000000000000000000000000000000"); 2705 seq_puts(m, "00000000000000000000000000000000");
2689 } 2706 }
2690 rcu_read_unlock();
2691 seq_printf(m, " %08x %08x %08x %08x %8s\n", 2707 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2692 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), 2708 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2693 rt->dst.__use, rt->rt6i_flags, 2709 rt->dst.__use, rt->rt6i_flags,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3bd1bfc01f85..3ed54ffd8d50 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -545,7 +545,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
545 545
546 err = -ENOENT; 546 err = -ENOENT;
547 547
548 rcu_read_lock();
549 t = ipip6_tunnel_lookup(dev_net(skb->dev), 548 t = ipip6_tunnel_lookup(dev_net(skb->dev),
550 skb->dev, 549 skb->dev,
551 iph->daddr, 550 iph->daddr,
@@ -579,7 +578,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
579 t->err_count = 1; 578 t->err_count = 1;
580 t->err_time = jiffies; 579 t->err_time = jiffies;
581out: 580out:
582 rcu_read_unlock();
583 return err; 581 return err;
584} 582}
585 583
@@ -599,7 +597,6 @@ static int ipip6_rcv(struct sk_buff *skb)
599 597
600 iph = ip_hdr(skb); 598 iph = ip_hdr(skb);
601 599
602 rcu_read_lock();
603 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 600 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
604 iph->saddr, iph->daddr); 601 iph->saddr, iph->daddr);
605 if (tunnel != NULL) { 602 if (tunnel != NULL) {
@@ -615,7 +612,6 @@ static int ipip6_rcv(struct sk_buff *skb)
615 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 612 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
616 !isatap_chksrc(skb, iph, tunnel)) { 613 !isatap_chksrc(skb, iph, tunnel)) {
617 tunnel->dev->stats.rx_errors++; 614 tunnel->dev->stats.rx_errors++;
618 rcu_read_unlock();
619 kfree_skb(skb); 615 kfree_skb(skb);
620 return 0; 616 return 0;
621 } 617 }
@@ -630,12 +626,10 @@ static int ipip6_rcv(struct sk_buff *skb)
630 626
631 netif_rx(skb); 627 netif_rx(skb);
632 628
633 rcu_read_unlock();
634 return 0; 629 return 0;
635 } 630 }
636 631
637 /* no tunnel matched, let upstream know, ipsec may handle it */ 632 /* no tunnel matched, let upstream know, ipsec may handle it */
638 rcu_read_unlock();
639 return 1; 633 return 1;
640out: 634out:
641 kfree_skb(skb); 635 kfree_skb(skb);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb46061c813a..182ab9a85d6c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -190,6 +190,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
190 ireq = inet_rsk(req); 190 ireq = inet_rsk(req);
191 ireq6 = inet6_rsk(req); 191 ireq6 = inet6_rsk(req);
192 treq = tcp_rsk(req); 192 treq = tcp_rsk(req);
193 treq->listener = NULL;
193 194
194 if (security_inet_conn_request(sk, skb, req)) 195 if (security_inet_conn_request(sk, skb, req))
195 goto out_free; 196 goto out_free;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 342ec62cdbde..49c890386ce9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
476 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 476 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
477 goto done; 477 goto done;
478 478
479 skb = tcp_make_synack(sk, dst, req, rvp); 479 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
480 480
481 if (skb) { 481 if (skb) {
482 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 482 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -763,6 +763,8 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
763 struct sk_buff *skb) 763 struct sk_buff *skb)
764{ 764{
765 const struct ipv6hdr *iph = skb_gro_network_header(skb); 765 const struct ipv6hdr *iph = skb_gro_network_header(skb);
766 __wsum wsum;
767 __sum16 sum;
766 768
767 switch (skb->ip_summed) { 769 switch (skb->ip_summed) {
768 case CHECKSUM_COMPLETE: 770 case CHECKSUM_COMPLETE:
@@ -771,11 +773,23 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
771 skb->ip_summed = CHECKSUM_UNNECESSARY; 773 skb->ip_summed = CHECKSUM_UNNECESSARY;
772 break; 774 break;
773 } 775 }
774 776flush:
775 /* fall through */
776 case CHECKSUM_NONE:
777 NAPI_GRO_CB(skb)->flush = 1; 777 NAPI_GRO_CB(skb)->flush = 1;
778 return NULL; 778 return NULL;
779
780 case CHECKSUM_NONE:
781 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
782 skb_gro_len(skb),
783 IPPROTO_TCP, 0));
784 sum = csum_fold(skb_checksum(skb,
785 skb_gro_offset(skb),
786 skb_gro_len(skb),
787 wsum));
788 if (sum)
789 goto flush;
790
791 skb->ip_summed = CHECKSUM_UNNECESSARY;
792 break;
779 } 793 }
780 794
781 return tcp_gro_receive(head, skb); 795 return tcp_gro_receive(head, skb);
@@ -988,7 +1002,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
988 &ipv6_hdr(skb)->saddr, 1002 &ipv6_hdr(skb)->saddr,
989 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 1003 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
990 if (req) 1004 if (req)
991 return tcp_check_req(sk, skb, req, prev); 1005 return tcp_check_req(sk, skb, req, prev, false);
992 1006
993 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 1007 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
994 &ipv6_hdr(skb)->saddr, th->source, 1008 &ipv6_hdr(skb)->saddr, th->source,
@@ -1169,7 +1183,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 } 1183 }
1170have_isn: 1184have_isn:
1171 tcp_rsk(req)->snt_isn = isn; 1185 tcp_rsk(req)->snt_isn = isn;
1172 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1173 1186
1174 if (security_inet_conn_request(sk, skb, req)) 1187 if (security_inet_conn_request(sk, skb, req))
1175 goto drop_and_release; 1188 goto drop_and_release;
@@ -1180,6 +1193,8 @@ have_isn:
1180 want_cookie) 1193 want_cookie)
1181 goto drop_and_free; 1194 goto drop_and_free;
1182 1195
1196 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1197 tcp_rsk(req)->listener = NULL;
1183 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1198 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1184 return 0; 1199 return 0;
1185 1200
@@ -1347,9 +1362,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1347 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1362 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1348 1363
1349 tcp_initialize_rcv_mss(newsk); 1364 tcp_initialize_rcv_mss(newsk);
1350 if (tcp_rsk(req)->snt_synack) 1365 tcp_synack_rtt_meas(newsk, req);
1351 tcp_valid_rtt_meas(newsk,
1352 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1353 newtp->total_retrans = req->retrans; 1366 newtp->total_retrans = req->retrans;
1354 1367
1355 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1368 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
@@ -1901,7 +1914,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1901 tp->write_seq-tp->snd_una, 1914 tp->write_seq-tp->snd_una,
1902 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1915 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1903 timer_active, 1916 timer_active,
1904 jiffies_to_clock_t(timer_expires - jiffies), 1917 jiffies_delta_to_clock_t(timer_expires - jiffies),
1905 icsk->icsk_retransmits, 1918 icsk->icsk_retransmits,
1906 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1919 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1907 icsk->icsk_probes_out, 1920 icsk->icsk_probes_out,
@@ -1921,10 +1934,7 @@ static void get_timewait6_sock(struct seq_file *seq,
1921 const struct in6_addr *dest, *src; 1934 const struct in6_addr *dest, *src;
1922 __u16 destp, srcp; 1935 __u16 destp, srcp;
1923 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 1936 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1924 int ttd = tw->tw_ttd - jiffies; 1937 long delta = tw->tw_ttd - jiffies;
1925
1926 if (ttd < 0)
1927 ttd = 0;
1928 1938
1929 dest = &tw6->tw_v6_daddr; 1939 dest = &tw6->tw_v6_daddr;
1930 src = &tw6->tw_v6_rcv_saddr; 1940 src = &tw6->tw_v6_rcv_saddr;
@@ -1940,7 +1950,7 @@ static void get_timewait6_sock(struct seq_file *seq,
1940 dest->s6_addr32[0], dest->s6_addr32[1], 1950 dest->s6_addr32[0], dest->s6_addr32[1],
1941 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1951 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1942 tw->tw_substate, 0, 0, 1952 tw->tw_substate, 0, 0,
1943 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 1953 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1944 atomic_read(&tw->tw_refcnt), tw); 1954 atomic_read(&tw->tw_refcnt), tw);
1945} 1955}
1946 1956
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 6c7c4b92e4f8..c32971269280 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -100,7 +100,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
100 goto err_out; 100 goto err_out;
101 } 101 }
102 102
103 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 103 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
104 &irda_nl_family, 0, IRDA_NL_CMD_GET_MODE); 104 &irda_nl_family, 0, IRDA_NL_CMD_GET_MODE);
105 if (hdr == NULL) { 105 if (hdr == NULL) {
106 ret = -EMSGSIZE; 106 ret = -EMSGSIZE;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 0481d4b51476..08897a3c7ec7 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -54,7 +54,7 @@ struct pfkey_sock {
54 54
55 struct { 55 struct {
56 uint8_t msg_version; 56 uint8_t msg_version;
57 uint32_t msg_pid; 57 uint32_t msg_portid;
58 int (*dump)(struct pfkey_sock *sk); 58 int (*dump)(struct pfkey_sock *sk);
59 void (*done)(struct pfkey_sock *sk); 59 void (*done)(struct pfkey_sock *sk);
60 union { 60 union {
@@ -1447,7 +1447,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
1447 hdr->sadb_msg_errno = 0; 1447 hdr->sadb_msg_errno = 0;
1448 hdr->sadb_msg_reserved = 0; 1448 hdr->sadb_msg_reserved = 0;
1449 hdr->sadb_msg_seq = c->seq; 1449 hdr->sadb_msg_seq = c->seq;
1450 hdr->sadb_msg_pid = c->pid; 1450 hdr->sadb_msg_pid = c->portid;
1451 1451
1452 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); 1452 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
1453 1453
@@ -1486,7 +1486,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
1486 else 1486 else
1487 c.event = XFRM_MSG_UPDSA; 1487 c.event = XFRM_MSG_UPDSA;
1488 c.seq = hdr->sadb_msg_seq; 1488 c.seq = hdr->sadb_msg_seq;
1489 c.pid = hdr->sadb_msg_pid; 1489 c.portid = hdr->sadb_msg_pid;
1490 km_state_notify(x, &c); 1490 km_state_notify(x, &c);
1491out: 1491out:
1492 xfrm_state_put(x); 1492 xfrm_state_put(x);
@@ -1523,7 +1523,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
1523 goto out; 1523 goto out;
1524 1524
1525 c.seq = hdr->sadb_msg_seq; 1525 c.seq = hdr->sadb_msg_seq;
1526 c.pid = hdr->sadb_msg_pid; 1526 c.portid = hdr->sadb_msg_pid;
1527 c.event = XFRM_MSG_DELSA; 1527 c.event = XFRM_MSG_DELSA;
1528 km_state_notify(x, &c); 1528 km_state_notify(x, &c);
1529out: 1529out:
@@ -1701,7 +1701,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1701 hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); 1701 hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
1702 hdr->sadb_msg_type = SADB_FLUSH; 1702 hdr->sadb_msg_type = SADB_FLUSH;
1703 hdr->sadb_msg_seq = c->seq; 1703 hdr->sadb_msg_seq = c->seq;
1704 hdr->sadb_msg_pid = c->pid; 1704 hdr->sadb_msg_pid = c->portid;
1705 hdr->sadb_msg_version = PF_KEY_V2; 1705 hdr->sadb_msg_version = PF_KEY_V2;
1706 hdr->sadb_msg_errno = (uint8_t) 0; 1706 hdr->sadb_msg_errno = (uint8_t) 0;
1707 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 1707 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -1736,7 +1736,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1736 1736
1737 c.data.proto = proto; 1737 c.data.proto = proto;
1738 c.seq = hdr->sadb_msg_seq; 1738 c.seq = hdr->sadb_msg_seq;
1739 c.pid = hdr->sadb_msg_pid; 1739 c.portid = hdr->sadb_msg_pid;
1740 c.event = XFRM_MSG_FLUSHSA; 1740 c.event = XFRM_MSG_FLUSHSA;
1741 c.net = net; 1741 c.net = net;
1742 km_state_notify(NULL, &c); 1742 km_state_notify(NULL, &c);
@@ -1764,7 +1764,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
1764 out_hdr->sadb_msg_errno = 0; 1764 out_hdr->sadb_msg_errno = 0;
1765 out_hdr->sadb_msg_reserved = 0; 1765 out_hdr->sadb_msg_reserved = 0;
1766 out_hdr->sadb_msg_seq = count + 1; 1766 out_hdr->sadb_msg_seq = count + 1;
1767 out_hdr->sadb_msg_pid = pfk->dump.msg_pid; 1767 out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
1768 1768
1769 if (pfk->dump.skb) 1769 if (pfk->dump.skb)
1770 pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, 1770 pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -1798,7 +1798,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1798 return -EINVAL; 1798 return -EINVAL;
1799 1799
1800 pfk->dump.msg_version = hdr->sadb_msg_version; 1800 pfk->dump.msg_version = hdr->sadb_msg_version;
1801 pfk->dump.msg_pid = hdr->sadb_msg_pid; 1801 pfk->dump.msg_portid = hdr->sadb_msg_pid;
1802 pfk->dump.dump = pfkey_dump_sa; 1802 pfk->dump.dump = pfkey_dump_sa;
1803 pfk->dump.done = pfkey_dump_sa_done; 1803 pfk->dump.done = pfkey_dump_sa_done;
1804 xfrm_state_walk_init(&pfk->dump.u.state, proto); 1804 xfrm_state_walk_init(&pfk->dump.u.state, proto);
@@ -1923,6 +1923,9 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
1923 int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy); 1923 int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
1924 struct sadb_x_ipsecrequest *rq = (void*)(pol+1); 1924 struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
1925 1925
1926 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
1927 return -EINVAL;
1928
1926 while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1929 while (len >= sizeof(struct sadb_x_ipsecrequest)) {
1927 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1930 if ((err = parse_ipsecrequest(xp, rq)) < 0)
1928 return err; 1931 return err;
@@ -2157,7 +2160,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
2157 out_hdr->sadb_msg_type = event2poltype(c->event); 2160 out_hdr->sadb_msg_type = event2poltype(c->event);
2158 out_hdr->sadb_msg_errno = 0; 2161 out_hdr->sadb_msg_errno = 0;
2159 out_hdr->sadb_msg_seq = c->seq; 2162 out_hdr->sadb_msg_seq = c->seq;
2160 out_hdr->sadb_msg_pid = c->pid; 2163 out_hdr->sadb_msg_pid = c->portid;
2161 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); 2164 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2162 return 0; 2165 return 0;
2163 2166
@@ -2272,7 +2275,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2272 c.event = XFRM_MSG_NEWPOLICY; 2275 c.event = XFRM_MSG_NEWPOLICY;
2273 2276
2274 c.seq = hdr->sadb_msg_seq; 2277 c.seq = hdr->sadb_msg_seq;
2275 c.pid = hdr->sadb_msg_pid; 2278 c.portid = hdr->sadb_msg_pid;
2276 2279
2277 km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); 2280 km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
2278 xfrm_pol_put(xp); 2281 xfrm_pol_put(xp);
@@ -2351,7 +2354,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2351 goto out; 2354 goto out;
2352 2355
2353 c.seq = hdr->sadb_msg_seq; 2356 c.seq = hdr->sadb_msg_seq;
2354 c.pid = hdr->sadb_msg_pid; 2357 c.portid = hdr->sadb_msg_pid;
2355 c.data.byid = 0; 2358 c.data.byid = 0;
2356 c.event = XFRM_MSG_DELPOLICY; 2359 c.event = XFRM_MSG_DELPOLICY;
2357 km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); 2360 km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
@@ -2597,7 +2600,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
2597 if (err) 2600 if (err)
2598 goto out; 2601 goto out;
2599 c.seq = hdr->sadb_msg_seq; 2602 c.seq = hdr->sadb_msg_seq;
2600 c.pid = hdr->sadb_msg_pid; 2603 c.portid = hdr->sadb_msg_pid;
2601 c.data.byid = 1; 2604 c.data.byid = 1;
2602 c.event = XFRM_MSG_DELPOLICY; 2605 c.event = XFRM_MSG_DELPOLICY;
2603 km_policy_notify(xp, dir, &c); 2606 km_policy_notify(xp, dir, &c);
@@ -2634,7 +2637,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2634 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2637 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2635 out_hdr->sadb_msg_errno = 0; 2638 out_hdr->sadb_msg_errno = 0;
2636 out_hdr->sadb_msg_seq = count + 1; 2639 out_hdr->sadb_msg_seq = count + 1;
2637 out_hdr->sadb_msg_pid = pfk->dump.msg_pid; 2640 out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
2638 2641
2639 if (pfk->dump.skb) 2642 if (pfk->dump.skb)
2640 pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, 2643 pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -2663,7 +2666,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
2663 return -EBUSY; 2666 return -EBUSY;
2664 2667
2665 pfk->dump.msg_version = hdr->sadb_msg_version; 2668 pfk->dump.msg_version = hdr->sadb_msg_version;
2666 pfk->dump.msg_pid = hdr->sadb_msg_pid; 2669 pfk->dump.msg_portid = hdr->sadb_msg_pid;
2667 pfk->dump.dump = pfkey_dump_sp; 2670 pfk->dump.dump = pfkey_dump_sp;
2668 pfk->dump.done = pfkey_dump_sp_done; 2671 pfk->dump.done = pfkey_dump_sp_done;
2669 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2672 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
@@ -2682,7 +2685,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2682 hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); 2685 hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
2683 hdr->sadb_msg_type = SADB_X_SPDFLUSH; 2686 hdr->sadb_msg_type = SADB_X_SPDFLUSH;
2684 hdr->sadb_msg_seq = c->seq; 2687 hdr->sadb_msg_seq = c->seq;
2685 hdr->sadb_msg_pid = c->pid; 2688 hdr->sadb_msg_pid = c->portid;
2686 hdr->sadb_msg_version = PF_KEY_V2; 2689 hdr->sadb_msg_version = PF_KEY_V2;
2687 hdr->sadb_msg_errno = (uint8_t) 0; 2690 hdr->sadb_msg_errno = (uint8_t) 0;
2688 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2691 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -2711,7 +2714,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
2711 2714
2712 c.data.type = XFRM_POLICY_TYPE_MAIN; 2715 c.data.type = XFRM_POLICY_TYPE_MAIN;
2713 c.event = XFRM_MSG_FLUSHPOLICY; 2716 c.event = XFRM_MSG_FLUSHPOLICY;
2714 c.pid = hdr->sadb_msg_pid; 2717 c.portid = hdr->sadb_msg_pid;
2715 c.seq = hdr->sadb_msg_seq; 2718 c.seq = hdr->sadb_msg_seq;
2716 c.net = net; 2719 c.net = net;
2717 km_policy_notify(NULL, 0, &c); 2720 km_policy_notify(NULL, 0, &c);
@@ -3024,7 +3027,7 @@ static u32 get_acqseq(void)
3024 return res; 3027 return res;
3025} 3028}
3026 3029
3027static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp, int dir) 3030static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
3028{ 3031{
3029 struct sk_buff *skb; 3032 struct sk_buff *skb;
3030 struct sadb_msg *hdr; 3033 struct sadb_msg *hdr;
@@ -3105,7 +3108,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3105 pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t); 3108 pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
3106 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; 3109 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
3107 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; 3110 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
3108 pol->sadb_x_policy_dir = dir+1; 3111 pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
3109 pol->sadb_x_policy_id = xp->index; 3112 pol->sadb_x_policy_id = xp->index;
3110 3113
3111 /* Set sadb_comb's. */ 3114 /* Set sadb_comb's. */
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
index 4b1e71751e10..147a8fd47a17 100644
--- a/net/l2tp/Kconfig
+++ b/net/l2tp/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig L2TP 5menuconfig L2TP
6 tristate "Layer Two Tunneling Protocol (L2TP)" 6 tristate "Layer Two Tunneling Protocol (L2TP)"
7 depends on (IPV6 || IPV6=n)
7 depends on INET 8 depends on INET
8 ---help--- 9 ---help---
9 Layer Two Tunneling Protocol 10 Layer Two Tunneling Protocol
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 3bfb34aaee29..37b8b8ba31f7 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -67,6 +67,7 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
67 return net_generic(net, l2tp_eth_net_id); 67 return net_generic(net, l2tp_eth_net_id);
68} 68}
69 69
70static struct lock_class_key l2tp_eth_tx_busylock;
70static int l2tp_eth_dev_init(struct net_device *dev) 71static int l2tp_eth_dev_init(struct net_device *dev)
71{ 72{
72 struct l2tp_eth *priv = netdev_priv(dev); 73 struct l2tp_eth *priv = netdev_priv(dev);
@@ -74,7 +75,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
74 priv->dev = dev; 75 priv->dev = dev;
75 eth_hw_addr_random(dev); 76 eth_hw_addr_random(dev);
76 memset(&dev->broadcast[0], 0xff, 6); 77 memset(&dev->broadcast[0], 0xff, 6);
77 78 dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
78 return 0; 79 return 0;
79} 80}
80 81
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 6f936358d664..6c4cc12c7414 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -78,7 +78,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
78 goto out; 78 goto out;
79 } 79 }
80 80
81 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 81 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
82 &l2tp_nl_family, 0, L2TP_CMD_NOOP); 82 &l2tp_nl_family, 0, L2TP_CMD_NOOP);
83 if (!hdr) { 83 if (!hdr) {
84 ret = -EMSGSIZE; 84 ret = -EMSGSIZE;
@@ -87,7 +87,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
87 87
88 genlmsg_end(msg, hdr); 88 genlmsg_end(msg, hdr);
89 89
90 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 90 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
91 91
92err_out: 92err_out:
93 nlmsg_free(msg); 93 nlmsg_free(msg);
@@ -235,7 +235,7 @@ out:
235 return ret; 235 return ret;
236} 236}
237 237
238static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, 238static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
239 struct l2tp_tunnel *tunnel) 239 struct l2tp_tunnel *tunnel)
240{ 240{
241 void *hdr; 241 void *hdr;
@@ -248,7 +248,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
248 struct l2tp_stats stats; 248 struct l2tp_stats stats;
249 unsigned int start; 249 unsigned int start;
250 250
251 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, 251 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
252 L2TP_CMD_TUNNEL_GET); 252 L2TP_CMD_TUNNEL_GET);
253 if (!hdr) 253 if (!hdr)
254 return -EMSGSIZE; 254 return -EMSGSIZE;
@@ -359,12 +359,12 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
359 goto out; 359 goto out;
360 } 360 }
361 361
362 ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, 362 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
363 NLM_F_ACK, tunnel); 363 NLM_F_ACK, tunnel);
364 if (ret < 0) 364 if (ret < 0)
365 goto err_out; 365 goto err_out;
366 366
367 return genlmsg_unicast(net, msg, info->snd_pid); 367 return genlmsg_unicast(net, msg, info->snd_portid);
368 368
369err_out: 369err_out:
370 nlmsg_free(msg); 370 nlmsg_free(msg);
@@ -384,7 +384,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
384 if (tunnel == NULL) 384 if (tunnel == NULL)
385 goto out; 385 goto out;
386 386
387 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, 387 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
388 cb->nlh->nlmsg_seq, NLM_F_MULTI, 388 cb->nlh->nlmsg_seq, NLM_F_MULTI,
389 tunnel) <= 0) 389 tunnel) <= 0)
390 goto out; 390 goto out;
@@ -604,7 +604,7 @@ out:
604 return ret; 604 return ret;
605} 605}
606 606
607static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, 607static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
608 struct l2tp_session *session) 608 struct l2tp_session *session)
609{ 609{
610 void *hdr; 610 void *hdr;
@@ -616,7 +616,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
616 616
617 sk = tunnel->sock; 617 sk = tunnel->sock;
618 618
619 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); 619 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
620 if (!hdr) 620 if (!hdr)
621 return -EMSGSIZE; 621 return -EMSGSIZE;
622 622
@@ -705,12 +705,12 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
705 goto out; 705 goto out;
706 } 706 }
707 707
708 ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, 708 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
709 0, session); 709 0, session);
710 if (ret < 0) 710 if (ret < 0)
711 goto err_out; 711 goto err_out;
712 712
713 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); 713 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
714 714
715err_out: 715err_out:
716 nlmsg_free(msg); 716 nlmsg_free(msg);
@@ -742,7 +742,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
742 continue; 742 continue;
743 } 743 }
744 744
745 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, 745 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
746 cb->nlh->nlmsg_seq, NLM_F_MULTI, 746 cb->nlh->nlmsg_seq, NLM_F_MULTI,
747 session) <= 0) 747 session) <= 0)
748 break; 748 break;
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index b2f2bac2c2a2..204a8351efff 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -25,253 +25,26 @@
25#include <net/llc_s_st.h> 25#include <net/llc_s_st.h>
26#include <net/llc_pdu.h> 26#include <net/llc_pdu.h>
27 27
28/**
29 * struct llc_station - LLC station component
30 *
31 * SAP and connection resource manager, one per adapter.
32 *
33 * @state: state of station
34 * @xid_r_count: XID response PDU counter
35 * @mac_sa: MAC source address
36 * @sap_list: list of related SAPs
37 * @ev_q: events entering state mach.
38 * @mac_pdu_q: PDUs ready to send to MAC
39 */
40struct llc_station {
41 u8 state;
42 u8 xid_r_count;
43 struct timer_list ack_timer;
44 u8 retry_count;
45 u8 maximum_retry;
46 struct {
47 struct sk_buff_head list;
48 spinlock_t lock;
49 } ev_q;
50 struct sk_buff_head mac_pdu_q;
51};
52
53#define LLC_STATION_ACK_TIME (3 * HZ)
54
55int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME;
56
57/* Types of events (possible values in 'ev->type') */
58#define LLC_STATION_EV_TYPE_SIMPLE 1
59#define LLC_STATION_EV_TYPE_CONDITION 2
60#define LLC_STATION_EV_TYPE_PRIM 3
61#define LLC_STATION_EV_TYPE_PDU 4 /* command/response PDU */
62#define LLC_STATION_EV_TYPE_ACK_TMR 5
63#define LLC_STATION_EV_TYPE_RPT_STATUS 6
64
65/* Events */
66#define LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK 1
67#define LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK 2
68#define LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY 3
69#define LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY 4
70#define LLC_STATION_EV_RX_NULL_DSAP_XID_C 5
71#define LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ 6
72#define LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ 7
73#define LLC_STATION_EV_RX_NULL_DSAP_TEST_C 8
74#define LLC_STATION_EV_DISABLE_REQ 9
75
76struct llc_station_state_ev {
77 u8 type;
78 u8 prim;
79 u8 prim_type;
80 u8 reason;
81 struct list_head node; /* node in station->ev_q.list */
82};
83
84static __inline__ struct llc_station_state_ev *
85 llc_station_ev(struct sk_buff *skb)
86{
87 return (struct llc_station_state_ev *)skb->cb;
88}
89
90typedef int (*llc_station_ev_t)(struct sk_buff *skb);
91
92#define LLC_STATION_STATE_DOWN 1 /* initial state */
93#define LLC_STATION_STATE_DUP_ADDR_CHK 2
94#define LLC_STATION_STATE_UP 3
95
96#define LLC_NBR_STATION_STATES 3 /* size of state table */
97
98typedef int (*llc_station_action_t)(struct sk_buff *skb);
99
100/* Station component state table structure */
101struct llc_station_state_trans {
102 llc_station_ev_t ev;
103 u8 next_state;
104 llc_station_action_t *ev_actions;
105};
106
107struct llc_station_state {
108 u8 curr_state;
109 struct llc_station_state_trans **transitions;
110};
111
112static struct llc_station llc_main_station;
113
114static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb)
115{
116 struct llc_station_state_ev *ev = llc_station_ev(skb);
117
118 return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
119 ev->prim_type ==
120 LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1;
121}
122
123static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb)
124{
125 struct llc_station_state_ev *ev = llc_station_ev(skb);
126
127 return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
128 ev->prim_type ==
129 LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1;
130}
131
132static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb)
133{
134 struct llc_station_state_ev *ev = llc_station_ev(skb);
135
136 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
137 llc_main_station.retry_count <
138 llc_main_station.maximum_retry ? 0 : 1;
139}
140
141static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb)
142{
143 struct llc_station_state_ev *ev = llc_station_ev(skb);
144
145 return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
146 llc_main_station.retry_count ==
147 llc_main_station.maximum_retry ? 0 : 1;
148}
149
150static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) 28static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
151{ 29{
152 struct llc_station_state_ev *ev = llc_station_ev(skb);
153 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 30 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
154 31
155 return ev->type == LLC_STATION_EV_TYPE_PDU && 32 return LLC_PDU_IS_CMD(pdu) && /* command PDU */
156 LLC_PDU_IS_CMD(pdu) && /* command PDU */
157 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ 33 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
158 LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID && 34 LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
159 !pdu->dsap ? 0 : 1; /* NULL DSAP value */ 35 !pdu->dsap ? 0 : 1; /* NULL DSAP value */
160} 36}
161 37
162static int llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
163{
164 struct llc_station_state_ev *ev = llc_station_ev(skb);
165 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
166
167 return ev->type == LLC_STATION_EV_TYPE_PDU &&
168 LLC_PDU_IS_RSP(pdu) && /* response PDU */
169 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
170 LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
171 !pdu->dsap && /* NULL DSAP value */
172 !llc_main_station.xid_r_count ? 0 : 1;
173}
174
175static int llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
176{
177 struct llc_station_state_ev *ev = llc_station_ev(skb);
178 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
179
180 return ev->type == LLC_STATION_EV_TYPE_PDU &&
181 LLC_PDU_IS_RSP(pdu) && /* response PDU */
182 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
183 LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
184 !pdu->dsap && /* NULL DSAP value */
185 llc_main_station.xid_r_count == 1 ? 0 : 1;
186}
187
188static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) 38static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
189{ 39{
190 struct llc_station_state_ev *ev = llc_station_ev(skb);
191 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); 40 struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
192 41
193 return ev->type == LLC_STATION_EV_TYPE_PDU && 42 return LLC_PDU_IS_CMD(pdu) && /* command PDU */
194 LLC_PDU_IS_CMD(pdu) && /* command PDU */
195 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ 43 LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
196 LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST && 44 LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
197 !pdu->dsap ? 0 : 1; /* NULL DSAP */ 45 !pdu->dsap ? 0 : 1; /* NULL DSAP */
198} 46}
199 47
200static int llc_stat_ev_disable_req(struct sk_buff *skb)
201{
202 struct llc_station_state_ev *ev = llc_station_ev(skb);
203
204 return ev->type == LLC_STATION_EV_TYPE_PRIM &&
205 ev->prim == LLC_DISABLE_PRIM &&
206 ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
207}
208
209/**
210 * llc_station_send_pdu - queues PDU to send
211 * @skb: Address of the PDU
212 *
213 * Queues a PDU to send to the MAC layer.
214 */
215static void llc_station_send_pdu(struct sk_buff *skb)
216{
217 skb_queue_tail(&llc_main_station.mac_pdu_q, skb);
218 while ((skb = skb_dequeue(&llc_main_station.mac_pdu_q)) != NULL)
219 if (dev_queue_xmit(skb))
220 break;
221}
222
223static int llc_station_ac_start_ack_timer(struct sk_buff *skb)
224{
225 mod_timer(&llc_main_station.ack_timer,
226 jiffies + sysctl_llc_station_ack_timeout);
227 return 0;
228}
229
230static int llc_station_ac_set_retry_cnt_0(struct sk_buff *skb)
231{
232 llc_main_station.retry_count = 0;
233 return 0;
234}
235
236static int llc_station_ac_inc_retry_cnt_by_1(struct sk_buff *skb)
237{
238 llc_main_station.retry_count++;
239 return 0;
240}
241
242static int llc_station_ac_set_xid_r_cnt_0(struct sk_buff *skb)
243{
244 llc_main_station.xid_r_count = 0;
245 return 0;
246}
247
248static int llc_station_ac_inc_xid_r_cnt_by_1(struct sk_buff *skb)
249{
250 llc_main_station.xid_r_count++;
251 return 0;
252}
253
254static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
255{
256 int rc = 1;
257 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
258 sizeof(struct llc_xid_info));
259
260 if (!nskb)
261 goto out;
262 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD);
263 llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127);
264 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, skb->dev->dev_addr);
265 if (unlikely(rc))
266 goto free;
267 llc_station_send_pdu(nskb);
268out:
269 return rc;
270free:
271 kfree_skb(nskb);
272 goto out;
273}
274
275static int llc_station_ac_send_xid_r(struct sk_buff *skb) 48static int llc_station_ac_send_xid_r(struct sk_buff *skb)
276{ 49{
277 u8 mac_da[ETH_ALEN], dsap; 50 u8 mac_da[ETH_ALEN], dsap;
@@ -289,7 +62,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
289 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); 62 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
290 if (unlikely(rc)) 63 if (unlikely(rc))
291 goto free; 64 goto free;
292 llc_station_send_pdu(nskb); 65 dev_queue_xmit(nskb);
293out: 66out:
294 return rc; 67 return rc;
295free: 68free:
@@ -318,7 +91,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
318 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); 91 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
319 if (unlikely(rc)) 92 if (unlikely(rc))
320 goto free; 93 goto free;
321 llc_station_send_pdu(nskb); 94 dev_queue_xmit(nskb);
322out: 95out:
323 return rc; 96 return rc;
324free: 97free:
@@ -326,352 +99,6 @@ free:
326 goto out; 99 goto out;
327} 100}
328 101
329static int llc_station_ac_report_status(struct sk_buff *skb)
330{
331 return 0;
332}
333
334/* COMMON STATION STATE transitions */
335
336/* dummy last-transition indicator; common to all state transition groups
337 * last entry for this state
338 * all members are zeros, .bss zeroes it
339 */
340static struct llc_station_state_trans llc_stat_state_trans_end;
341
342/* DOWN STATE transitions */
343
344/* state transition for LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK event */
345static llc_station_action_t llc_stat_down_state_actions_1[] = {
346 [0] = llc_station_ac_start_ack_timer,
347 [1] = llc_station_ac_set_retry_cnt_0,
348 [2] = llc_station_ac_set_xid_r_cnt_0,
349 [3] = llc_station_ac_send_null_dsap_xid_c,
350 [4] = NULL,
351};
352
353static struct llc_station_state_trans llc_stat_down_state_trans_1 = {
354 .ev = llc_stat_ev_enable_with_dup_addr_check,
355 .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
356 .ev_actions = llc_stat_down_state_actions_1,
357};
358
359/* state transition for LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK event */
360static llc_station_action_t llc_stat_down_state_actions_2[] = {
361 [0] = llc_station_ac_report_status, /* STATION UP */
362 [1] = NULL,
363};
364
365static struct llc_station_state_trans llc_stat_down_state_trans_2 = {
366 .ev = llc_stat_ev_enable_without_dup_addr_check,
367 .next_state = LLC_STATION_STATE_UP,
368 .ev_actions = llc_stat_down_state_actions_2,
369};
370
371/* array of pointers; one to each transition */
372static struct llc_station_state_trans *llc_stat_dwn_state_trans[] = {
373 [0] = &llc_stat_down_state_trans_1,
374 [1] = &llc_stat_down_state_trans_2,
375 [2] = &llc_stat_state_trans_end,
376};
377
378/* UP STATE transitions */
379/* state transition for LLC_STATION_EV_DISABLE_REQ event */
380static llc_station_action_t llc_stat_up_state_actions_1[] = {
381 [0] = llc_station_ac_report_status, /* STATION DOWN */
382 [1] = NULL,
383};
384
385static struct llc_station_state_trans llc_stat_up_state_trans_1 = {
386 .ev = llc_stat_ev_disable_req,
387 .next_state = LLC_STATION_STATE_DOWN,
388 .ev_actions = llc_stat_up_state_actions_1,
389};
390
391/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
392static llc_station_action_t llc_stat_up_state_actions_2[] = {
393 [0] = llc_station_ac_send_xid_r,
394 [1] = NULL,
395};
396
397static struct llc_station_state_trans llc_stat_up_state_trans_2 = {
398 .ev = llc_stat_ev_rx_null_dsap_xid_c,
399 .next_state = LLC_STATION_STATE_UP,
400 .ev_actions = llc_stat_up_state_actions_2,
401};
402
403/* state transition for LLC_STATION_EV_RX_NULL_DSAP_TEST_C event */
404static llc_station_action_t llc_stat_up_state_actions_3[] = {
405 [0] = llc_station_ac_send_test_r,
406 [1] = NULL,
407};
408
409static struct llc_station_state_trans llc_stat_up_state_trans_3 = {
410 .ev = llc_stat_ev_rx_null_dsap_test_c,
411 .next_state = LLC_STATION_STATE_UP,
412 .ev_actions = llc_stat_up_state_actions_3,
413};
414
415/* array of pointers; one to each transition */
416static struct llc_station_state_trans *llc_stat_up_state_trans [] = {
417 [0] = &llc_stat_up_state_trans_1,
418 [1] = &llc_stat_up_state_trans_2,
419 [2] = &llc_stat_up_state_trans_3,
420 [3] = &llc_stat_state_trans_end,
421};
422
423/* DUP ADDR CHK STATE transitions */
424/* state transition for LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ
425 * event
426 */
427static llc_station_action_t llc_stat_dupaddr_state_actions_1[] = {
428 [0] = llc_station_ac_inc_xid_r_cnt_by_1,
429 [1] = NULL,
430};
431
432static struct llc_station_state_trans llc_stat_dupaddr_state_trans_1 = {
433 .ev = llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq,
434 .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
435 .ev_actions = llc_stat_dupaddr_state_actions_1,
436};
437
438/* state transition for LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ
439 * event
440 */
441static llc_station_action_t llc_stat_dupaddr_state_actions_2[] = {
442 [0] = llc_station_ac_report_status, /* DUPLICATE ADDRESS FOUND */
443 [1] = NULL,
444};
445
446static struct llc_station_state_trans llc_stat_dupaddr_state_trans_2 = {
447 .ev = llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq,
448 .next_state = LLC_STATION_STATE_DOWN,
449 .ev_actions = llc_stat_dupaddr_state_actions_2,
450};
451
452/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
453static llc_station_action_t llc_stat_dupaddr_state_actions_3[] = {
454 [0] = llc_station_ac_send_xid_r,
455 [1] = NULL,
456};
457
458static struct llc_station_state_trans llc_stat_dupaddr_state_trans_3 = {
459 .ev = llc_stat_ev_rx_null_dsap_xid_c,
460 .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
461 .ev_actions = llc_stat_dupaddr_state_actions_3,
462};
463
464/* state transition for LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY
465 * event
466 */
467static llc_station_action_t llc_stat_dupaddr_state_actions_4[] = {
468 [0] = llc_station_ac_start_ack_timer,
469 [1] = llc_station_ac_inc_retry_cnt_by_1,
470 [2] = llc_station_ac_set_xid_r_cnt_0,
471 [3] = llc_station_ac_send_null_dsap_xid_c,
472 [4] = NULL,
473};
474
475static struct llc_station_state_trans llc_stat_dupaddr_state_trans_4 = {
476 .ev = llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry,
477 .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
478 .ev_actions = llc_stat_dupaddr_state_actions_4,
479};
480
481/* state transition for LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY
482 * event
483 */
484static llc_station_action_t llc_stat_dupaddr_state_actions_5[] = {
485 [0] = llc_station_ac_report_status, /* STATION UP */
486 [1] = NULL,
487};
488
489static struct llc_station_state_trans llc_stat_dupaddr_state_trans_5 = {
490 .ev = llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry,
491 .next_state = LLC_STATION_STATE_UP,
492 .ev_actions = llc_stat_dupaddr_state_actions_5,
493};
494
495/* state transition for LLC_STATION_EV_DISABLE_REQ event */
496static llc_station_action_t llc_stat_dupaddr_state_actions_6[] = {
497 [0] = llc_station_ac_report_status, /* STATION DOWN */
498 [1] = NULL,
499};
500
501static struct llc_station_state_trans llc_stat_dupaddr_state_trans_6 = {
502 .ev = llc_stat_ev_disable_req,
503 .next_state = LLC_STATION_STATE_DOWN,
504 .ev_actions = llc_stat_dupaddr_state_actions_6,
505};
506
507/* array of pointers; one to each transition */
508static struct llc_station_state_trans *llc_stat_dupaddr_state_trans[] = {
509 [0] = &llc_stat_dupaddr_state_trans_6, /* Request */
510 [1] = &llc_stat_dupaddr_state_trans_4, /* Timer */
511 [2] = &llc_stat_dupaddr_state_trans_5,
512 [3] = &llc_stat_dupaddr_state_trans_1, /* Receive frame */
513 [4] = &llc_stat_dupaddr_state_trans_2,
514 [5] = &llc_stat_dupaddr_state_trans_3,
515 [6] = &llc_stat_state_trans_end,
516};
517
518static struct llc_station_state
519 llc_station_state_table[LLC_NBR_STATION_STATES] = {
520 [LLC_STATION_STATE_DOWN - 1] = {
521 .curr_state = LLC_STATION_STATE_DOWN,
522 .transitions = llc_stat_dwn_state_trans,
523 },
524 [LLC_STATION_STATE_DUP_ADDR_CHK - 1] = {
525 .curr_state = LLC_STATION_STATE_DUP_ADDR_CHK,
526 .transitions = llc_stat_dupaddr_state_trans,
527 },
528 [LLC_STATION_STATE_UP - 1] = {
529 .curr_state = LLC_STATION_STATE_UP,
530 .transitions = llc_stat_up_state_trans,
531 },
532};
533
534/**
535 * llc_exec_station_trans_actions - executes actions for transition
536 * @trans: Address of the transition
537 * @skb: Address of the event that caused the transition
538 *
539 * Executes actions of a transition of the station state machine. Returns
540 * 0 if all actions complete successfully, nonzero otherwise.
541 */
542static u16 llc_exec_station_trans_actions(struct llc_station_state_trans *trans,
543 struct sk_buff *skb)
544{
545 u16 rc = 0;
546 llc_station_action_t *next_action = trans->ev_actions;
547
548 for (; next_action && *next_action; next_action++)
549 if ((*next_action)(skb))
550 rc = 1;
551 return rc;
552}
553
554/**
555 * llc_find_station_trans - finds transition for this event
556 * @skb: Address of the event
557 *
558 * Search thru events of the current state of the station until list
559 * exhausted or it's obvious that the event is not valid for the current
560 * state. Returns the address of the transition if cound, %NULL otherwise.
561 */
562static struct llc_station_state_trans *
563 llc_find_station_trans(struct sk_buff *skb)
564{
565 int i = 0;
566 struct llc_station_state_trans *rc = NULL;
567 struct llc_station_state_trans **next_trans;
568 struct llc_station_state *curr_state =
569 &llc_station_state_table[llc_main_station.state - 1];
570
571 for (next_trans = curr_state->transitions; next_trans[i]->ev; i++)
572 if (!next_trans[i]->ev(skb)) {
573 rc = next_trans[i];
574 break;
575 }
576 return rc;
577}
578
579/**
580 * llc_station_free_ev - frees an event
581 * @skb: Address of the event
582 *
583 * Frees an event.
584 */
585static void llc_station_free_ev(struct sk_buff *skb)
586{
587 struct llc_station_state_ev *ev = llc_station_ev(skb);
588
589 if (ev->type == LLC_STATION_EV_TYPE_PDU)
590 kfree_skb(skb);
591}
592
593/**
594 * llc_station_next_state - processes event and goes to the next state
595 * @skb: Address of the event
596 *
597 * Processes an event, executes any transitions related to that event and
598 * updates the state of the station.
599 */
600static u16 llc_station_next_state(struct sk_buff *skb)
601{
602 u16 rc = 1;
603 struct llc_station_state_trans *trans;
604
605 if (llc_main_station.state > LLC_NBR_STATION_STATES)
606 goto out;
607 trans = llc_find_station_trans(skb);
608 if (trans) {
609 /* got the state to which we next transition; perform the
610 * actions associated with this transition before actually
611 * transitioning to the next state
612 */
613 rc = llc_exec_station_trans_actions(trans, skb);
614 if (!rc)
615 /* transition station to next state if all actions
616 * execute successfully; done; wait for next event
617 */
618 llc_main_station.state = trans->next_state;
619 } else
620 /* event not recognized in current state; re-queue it for
621 * processing again at a later time; return failure
622 */
623 rc = 0;
624out:
625 llc_station_free_ev(skb);
626 return rc;
627}
628
629/**
630 * llc_station_service_events - service events in the queue
631 *
632 * Get an event from the station event queue (if any); attempt to service
633 * the event; if event serviced, get the next event (if any) on the event
634 * queue; if event not service, re-queue the event on the event queue and
635 * attempt to service the next event; when serviced all events in queue,
636 * finished; if don't transition to different state, just service all
637 * events once; if transition to new state, service all events again.
638 * Caller must hold llc_main_station.ev_q.lock.
639 */
640static void llc_station_service_events(void)
641{
642 struct sk_buff *skb;
643
644 while ((skb = skb_dequeue(&llc_main_station.ev_q.list)) != NULL)
645 llc_station_next_state(skb);
646}
647
648/**
649 * llc_station_state_process - queue event and try to process queue.
650 * @skb: Address of the event
651 *
652 * Queues an event (on the station event queue) for handling by the
653 * station state machine and attempts to process any queued-up events.
654 */
655static void llc_station_state_process(struct sk_buff *skb)
656{
657 spin_lock_bh(&llc_main_station.ev_q.lock);
658 skb_queue_tail(&llc_main_station.ev_q.list, skb);
659 llc_station_service_events();
660 spin_unlock_bh(&llc_main_station.ev_q.lock);
661}
662
663static void llc_station_ack_tmr_cb(unsigned long timeout_data)
664{
665 struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
666
667 if (skb) {
668 struct llc_station_state_ev *ev = llc_station_ev(skb);
669
670 ev->type = LLC_STATION_EV_TYPE_ACK_TMR;
671 llc_station_state_process(skb);
672 }
673}
674
675/** 102/**
676 * llc_station_rcv - send received pdu to the station state machine 103 * llc_station_rcv - send received pdu to the station state machine
677 * @skb: received frame. 104 * @skb: received frame.
@@ -680,24 +107,15 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
680 */ 107 */
681static void llc_station_rcv(struct sk_buff *skb) 108static void llc_station_rcv(struct sk_buff *skb)
682{ 109{
683 struct llc_station_state_ev *ev = llc_station_ev(skb); 110 if (llc_stat_ev_rx_null_dsap_xid_c(skb))
684 111 llc_station_ac_send_xid_r(skb);
685 ev->type = LLC_STATION_EV_TYPE_PDU; 112 else if (llc_stat_ev_rx_null_dsap_test_c(skb))
686 ev->reason = 0; 113 llc_station_ac_send_test_r(skb);
687 llc_station_state_process(skb); 114 kfree_skb(skb);
688} 115}
689 116
690void __init llc_station_init(void) 117void __init llc_station_init(void)
691{ 118{
692 skb_queue_head_init(&llc_main_station.mac_pdu_q);
693 skb_queue_head_init(&llc_main_station.ev_q.list);
694 spin_lock_init(&llc_main_station.ev_q.lock);
695 setup_timer(&llc_main_station.ack_timer, llc_station_ack_tmr_cb,
696 (unsigned long)&llc_main_station);
697 llc_main_station.ack_timer.expires = jiffies +
698 sysctl_llc_station_ack_timeout;
699 llc_main_station.maximum_retry = 1;
700 llc_main_station.state = LLC_STATION_STATE_UP;
701 llc_set_station_handler(llc_station_rcv); 119 llc_set_station_handler(llc_station_rcv);
702} 120}
703 121
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index d75306b9c2f3..612a5ddaf93b 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -47,13 +47,6 @@ static struct ctl_table llc2_timeout_table[] = {
47}; 47};
48 48
49static struct ctl_table llc_station_table[] = { 49static struct ctl_table llc_station_table[] = {
50 {
51 .procname = "ack_timeout",
52 .data = &sysctl_llc_station_ack_timeout,
53 .maxlen = sizeof(long),
54 .mode = 0644,
55 .proc_handler = proc_dointvec_jiffies,
56 },
57 { }, 50 { },
58}; 51};
59 52
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 8dfd70d8fcfb..a04752e91023 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
38static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, 38static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
39 const u8 *addr[], const size_t *len, u8 *mac) 39 const u8 *addr[], const size_t *len, u8 *mac)
40{ 40{
41 u8 scratch[2 * AES_BLOCK_SIZE]; 41 u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
42 u8 *cbc, *pad;
43 const u8 *pos, *end; 42 const u8 *pos, *end;
44 size_t i, e, left, total_len; 43 size_t i, e, left, total_len;
45 44
46 cbc = scratch;
47 pad = scratch + AES_BLOCK_SIZE;
48
49 memset(cbc, 0, AES_BLOCK_SIZE); 45 memset(cbc, 0, AES_BLOCK_SIZE);
50 46
51 total_len = 0; 47 total_len = 0;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index d0deb3edae21..3195a6307f50 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -869,7 +869,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
869 869
870 } else { 870 } else {
871 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 871 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
872 true); 872 false);
873 } 873 }
874 874
875 out: 875 out:
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a58c0b649ba1..05f3a313db88 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -20,7 +20,8 @@
20#include "rate.h" 20#include "rate.h"
21#include "mesh.h" 21#include "mesh.h"
22 22
23static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name, 23static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
24 const char *name,
24 enum nl80211_iftype type, 25 enum nl80211_iftype type,
25 u32 *flags, 26 u32 *flags,
26 struct vif_params *params) 27 struct vif_params *params)
@@ -102,6 +103,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
102 return 0; 103 return 0;
103} 104}
104 105
106static int ieee80211_start_p2p_device(struct wiphy *wiphy,
107 struct wireless_dev *wdev)
108{
109 return ieee80211_do_open(wdev, true);
110}
111
112static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
113 struct wireless_dev *wdev)
114{
115 ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
116}
117
105static int ieee80211_set_noack_map(struct wiphy *wiphy, 118static int ieee80211_set_noack_map(struct wiphy *wiphy,
106 struct net_device *dev, 119 struct net_device *dev,
107 u16 noack_map) 120 u16 noack_map)
@@ -158,6 +171,38 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
158 } 171 }
159 } 172 }
160 173
174 switch (sdata->vif.type) {
175 case NL80211_IFTYPE_STATION:
176 if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
177 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
178 break;
179 case NL80211_IFTYPE_AP:
180 case NL80211_IFTYPE_AP_VLAN:
181 /* Keys without a station are used for TX only */
182 if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
183 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
184 break;
185 case NL80211_IFTYPE_ADHOC:
186 /* no MFP (yet) */
187 break;
188 case NL80211_IFTYPE_MESH_POINT:
189#ifdef CONFIG_MAC80211_MESH
190 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
191 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
192 break;
193#endif
194 case NL80211_IFTYPE_WDS:
195 case NL80211_IFTYPE_MONITOR:
196 case NL80211_IFTYPE_P2P_DEVICE:
197 case NL80211_IFTYPE_UNSPECIFIED:
198 case NUM_NL80211_IFTYPES:
199 case NL80211_IFTYPE_P2P_CLIENT:
200 case NL80211_IFTYPE_P2P_GO:
201 /* shouldn't happen */
202 WARN_ON_ONCE(1);
203 break;
204 }
205
161 err = ieee80211_key_link(key, sdata, sta); 206 err = ieee80211_key_link(key, sdata, sta);
162 if (err) 207 if (err)
163 ieee80211_key_free(sdata->local, key); 208 ieee80211_key_free(sdata->local, key);
@@ -330,7 +375,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
330 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) { 375 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
331 struct ieee80211_supported_band *sband; 376 struct ieee80211_supported_band *sband;
332 sband = sta->local->hw.wiphy->bands[ 377 sband = sta->local->hw.wiphy->bands[
333 sta->local->hw.conf.channel->band]; 378 sta->local->oper_channel->band];
334 rate->legacy = sband->bitrates[idx].bitrate; 379 rate->legacy = sband->bitrates[idx].bitrate;
335 } else 380 } else
336 rate->mcs = idx; 381 rate->mcs = idx;
@@ -725,25 +770,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
725static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 770static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
726 const u8 *resp, size_t resp_len) 771 const u8 *resp, size_t resp_len)
727{ 772{
728 struct sk_buff *new, *old; 773 struct probe_resp *new, *old;
729 774
730 if (!resp || !resp_len) 775 if (!resp || !resp_len)
731 return 1; 776 return 1;
732 777
733 old = rtnl_dereference(sdata->u.ap.probe_resp); 778 old = rtnl_dereference(sdata->u.ap.probe_resp);
734 779
735 new = dev_alloc_skb(resp_len); 780 new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
736 if (!new) 781 if (!new)
737 return -ENOMEM; 782 return -ENOMEM;
738 783
739 memcpy(skb_put(new, resp_len), resp, resp_len); 784 new->len = resp_len;
785 memcpy(new->data, resp, resp_len);
740 786
741 rcu_assign_pointer(sdata->u.ap.probe_resp, new); 787 rcu_assign_pointer(sdata->u.ap.probe_resp, new);
742 if (old) { 788 if (old)
743 /* TODO: use call_rcu() */ 789 kfree_rcu(old, rcu_head);
744 synchronize_rcu();
745 dev_kfree_skb(old);
746 }
747 790
748 return 0; 791 return 0;
749} 792}
@@ -950,7 +993,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
950 /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) 993 /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
951 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ 994 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
952 995
953 memset(msg->da, 0xff, ETH_ALEN); 996 eth_broadcast_addr(msg->da);
954 memcpy(msg->sa, sta->sta.addr, ETH_ALEN); 997 memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
955 msg->len = htons(6); 998 msg->len = htons(6);
956 msg->dsap = 0; 999 msg->dsap = 0;
@@ -1285,9 +1328,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1285 mutex_unlock(&local->sta_mtx); 1328 mutex_unlock(&local->sta_mtx);
1286 1329
1287 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1330 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1288 params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) 1331 params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1289 ieee80211_recalc_ps(local, -1); 1332 ieee80211_recalc_ps(local, -1);
1290 1333 ieee80211_recalc_ps_vif(sdata);
1334 }
1291 return 0; 1335 return 0;
1292} 1336}
1293 1337
@@ -1660,7 +1704,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1660 } 1704 }
1661 1705
1662 if (!sdata->vif.bss_conf.use_short_slot && 1706 if (!sdata->vif.bss_conf.use_short_slot &&
1663 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) { 1707 sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
1664 sdata->vif.bss_conf.use_short_slot = true; 1708 sdata->vif.bss_conf.use_short_slot = true;
1665 changed |= BSS_CHANGED_ERP_SLOT; 1709 changed |= BSS_CHANGED_ERP_SLOT;
1666 } 1710 }
@@ -1774,6 +1818,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
1774 case NL80211_IFTYPE_ADHOC: 1818 case NL80211_IFTYPE_ADHOC:
1775 case NL80211_IFTYPE_MESH_POINT: 1819 case NL80211_IFTYPE_MESH_POINT:
1776 case NL80211_IFTYPE_P2P_CLIENT: 1820 case NL80211_IFTYPE_P2P_CLIENT:
1821 case NL80211_IFTYPE_P2P_DEVICE:
1777 break; 1822 break;
1778 case NL80211_IFTYPE_P2P_GO: 1823 case NL80211_IFTYPE_P2P_GO:
1779 if (sdata->local->ops->hw_scan) 1824 if (sdata->local->ops->hw_scan)
@@ -1926,7 +1971,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
1926 enum nl80211_tx_power_setting type, int mbm) 1971 enum nl80211_tx_power_setting type, int mbm)
1927{ 1972{
1928 struct ieee80211_local *local = wiphy_priv(wiphy); 1973 struct ieee80211_local *local = wiphy_priv(wiphy);
1929 struct ieee80211_channel *chan = local->hw.conf.channel; 1974 struct ieee80211_channel *chan = local->oper_channel;
1930 u32 changes = 0; 1975 u32 changes = 0;
1931 1976
1932 switch (type) { 1977 switch (type) {
@@ -2026,9 +2071,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
2026 */ 2071 */
2027 if (!sdata->u.mgd.associated || 2072 if (!sdata->u.mgd.associated ||
2028 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { 2073 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
2029 mutex_lock(&sdata->local->iflist_mtx);
2030 ieee80211_recalc_smps(sdata->local); 2074 ieee80211_recalc_smps(sdata->local);
2031 mutex_unlock(&sdata->local->iflist_mtx);
2032 return 0; 2075 return 0;
2033 } 2076 }
2034 2077
@@ -2078,6 +2121,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2078 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 2121 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2079 2122
2080 ieee80211_recalc_ps(local, -1); 2123 ieee80211_recalc_ps(local, -1);
2124 ieee80211_recalc_ps_vif(sdata);
2081 2125
2082 return 0; 2126 return 0;
2083} 2127}
@@ -2460,6 +2504,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2460 if (!sdata->u.mgd.associated) 2504 if (!sdata->u.mgd.associated)
2461 need_offchan = true; 2505 need_offchan = true;
2462 break; 2506 break;
2507 case NL80211_IFTYPE_P2P_DEVICE:
2508 need_offchan = true;
2509 break;
2463 default: 2510 default:
2464 return -EOPNOTSUPP; 2511 return -EOPNOTSUPP;
2465 } 2512 }
@@ -2652,6 +2699,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2652 u16 status_code, struct sk_buff *skb) 2699 u16 status_code, struct sk_buff *skb)
2653{ 2700{
2654 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2701 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2702 struct ieee80211_local *local = sdata->local;
2655 struct ieee80211_tdls_data *tf; 2703 struct ieee80211_tdls_data *tf;
2656 2704
2657 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); 2705 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2671,8 +2719,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2671 tf->u.setup_req.capability = 2719 tf->u.setup_req.capability =
2672 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2720 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2673 2721
2674 ieee80211_add_srates_ie(sdata, skb, false); 2722 ieee80211_add_srates_ie(sdata, skb, false,
2675 ieee80211_add_ext_srates_ie(sdata, skb, false); 2723 local->oper_channel->band);
2724 ieee80211_add_ext_srates_ie(sdata, skb, false,
2725 local->oper_channel->band);
2676 ieee80211_tdls_add_ext_capab(skb); 2726 ieee80211_tdls_add_ext_capab(skb);
2677 break; 2727 break;
2678 case WLAN_TDLS_SETUP_RESPONSE: 2728 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2685,8 +2735,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2685 tf->u.setup_resp.capability = 2735 tf->u.setup_resp.capability =
2686 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2736 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2687 2737
2688 ieee80211_add_srates_ie(sdata, skb, false); 2738 ieee80211_add_srates_ie(sdata, skb, false,
2689 ieee80211_add_ext_srates_ie(sdata, skb, false); 2739 local->oper_channel->band);
2740 ieee80211_add_ext_srates_ie(sdata, skb, false,
2741 local->oper_channel->band);
2690 ieee80211_tdls_add_ext_capab(skb); 2742 ieee80211_tdls_add_ext_capab(skb);
2691 break; 2743 break;
2692 case WLAN_TDLS_SETUP_CONFIRM: 2744 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2724,6 +2776,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2724 u16 status_code, struct sk_buff *skb) 2776 u16 status_code, struct sk_buff *skb)
2725{ 2777{
2726 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2778 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2779 struct ieee80211_local *local = sdata->local;
2727 struct ieee80211_mgmt *mgmt; 2780 struct ieee80211_mgmt *mgmt;
2728 2781
2729 mgmt = (void *)skb_put(skb, 24); 2782 mgmt = (void *)skb_put(skb, 24);
@@ -2746,8 +2799,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2746 mgmt->u.action.u.tdls_discover_resp.capability = 2799 mgmt->u.action.u.tdls_discover_resp.capability =
2747 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2800 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2748 2801
2749 ieee80211_add_srates_ie(sdata, skb, false); 2802 ieee80211_add_srates_ie(sdata, skb, false,
2750 ieee80211_add_ext_srates_ie(sdata, skb, false); 2803 local->oper_channel->band);
2804 ieee80211_add_ext_srates_ie(sdata, skb, false,
2805 local->oper_channel->band);
2751 ieee80211_tdls_add_ext_capab(skb); 2806 ieee80211_tdls_add_ext_capab(skb);
2752 break; 2807 break;
2753 default: 2808 default:
@@ -3004,6 +3059,8 @@ struct cfg80211_ops mac80211_config_ops = {
3004 .add_virtual_intf = ieee80211_add_iface, 3059 .add_virtual_intf = ieee80211_add_iface,
3005 .del_virtual_intf = ieee80211_del_iface, 3060 .del_virtual_intf = ieee80211_del_iface,
3006 .change_virtual_intf = ieee80211_change_iface, 3061 .change_virtual_intf = ieee80211_change_iface,
3062 .start_p2p_device = ieee80211_start_p2p_device,
3063 .stop_p2p_device = ieee80211_stop_p2p_device,
3007 .add_key = ieee80211_add_key, 3064 .add_key = ieee80211_add_key,
3008 .del_key = ieee80211_del_key, 3065 .del_key = ieee80211_del_key,
3009 .get_key = ieee80211_get_key, 3066 .get_key = ieee80211_get_key,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f0f87e5a1d35..0bfc914ddd15 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -68,16 +68,14 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
68 return mode; 68 return mode;
69} 69}
70 70
71bool ieee80211_set_channel_type(struct ieee80211_local *local, 71static enum nl80211_channel_type
72 struct ieee80211_sub_if_data *sdata, 72ieee80211_get_superchan(struct ieee80211_local *local,
73 enum nl80211_channel_type chantype) 73 struct ieee80211_sub_if_data *sdata)
74{ 74{
75 struct ieee80211_sub_if_data *tmp;
76 enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT; 75 enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
77 bool result; 76 struct ieee80211_sub_if_data *tmp;
78 77
79 mutex_lock(&local->iflist_mtx); 78 mutex_lock(&local->iflist_mtx);
80
81 list_for_each_entry(tmp, &local->interfaces, list) { 79 list_for_each_entry(tmp, &local->interfaces, list) {
82 if (tmp == sdata) 80 if (tmp == sdata)
83 continue; 81 continue;
@@ -103,39 +101,70 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
103 break; 101 break;
104 } 102 }
105 } 103 }
104 mutex_unlock(&local->iflist_mtx);
106 105
107 switch (superchan) { 106 return superchan;
107}
108
109static bool
110ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1,
111 enum nl80211_channel_type chantype2,
112 enum nl80211_channel_type *compat)
113{
114 /*
115 * start out with chantype1 being the result,
116 * overwriting later if needed
117 */
118 if (compat)
119 *compat = chantype1;
120
121 switch (chantype1) {
108 case NL80211_CHAN_NO_HT: 122 case NL80211_CHAN_NO_HT:
123 if (compat)
124 *compat = chantype2;
125 break;
109 case NL80211_CHAN_HT20: 126 case NL80211_CHAN_HT20:
110 /* 127 /*
111 * allow any change that doesn't go to no-HT 128 * allow any change that doesn't go to no-HT
112 * (if it already is no-HT no change is needed) 129 * (if it already is no-HT no change is needed)
113 */ 130 */
114 if (chantype == NL80211_CHAN_NO_HT) 131 if (chantype2 == NL80211_CHAN_NO_HT)
115 break; 132 break;
116 superchan = chantype; 133 if (compat)
134 *compat = chantype2;
117 break; 135 break;
118 case NL80211_CHAN_HT40PLUS: 136 case NL80211_CHAN_HT40PLUS:
119 case NL80211_CHAN_HT40MINUS: 137 case NL80211_CHAN_HT40MINUS:
120 /* allow smaller bandwidth and same */ 138 /* allow smaller bandwidth and same */
121 if (chantype == NL80211_CHAN_NO_HT) 139 if (chantype2 == NL80211_CHAN_NO_HT)
122 break; 140 break;
123 if (chantype == NL80211_CHAN_HT20) 141 if (chantype2 == NL80211_CHAN_HT20)
124 break; 142 break;
125 if (superchan == chantype) 143 if (chantype2 == chantype1)
126 break; 144 break;
127 result = false; 145 return false;
128 goto out;
129 } 146 }
130 147
131 local->_oper_channel_type = superchan; 148 return true;
149}
150
151bool ieee80211_set_channel_type(struct ieee80211_local *local,
152 struct ieee80211_sub_if_data *sdata,
153 enum nl80211_channel_type chantype)
154{
155 enum nl80211_channel_type superchan;
156 enum nl80211_channel_type compatchan;
157
158 superchan = ieee80211_get_superchan(local, sdata);
159 if (!ieee80211_channel_types_are_compatible(superchan, chantype,
160 &compatchan))
161 return false;
162
163 local->_oper_channel_type = compatchan;
132 164
133 if (sdata) 165 if (sdata)
134 sdata->vif.bss_conf.channel_type = chantype; 166 sdata->vif.bss_conf.channel_type = chantype;
135 167
136 result = true; 168 return true;
137 out:
138 mutex_unlock(&local->iflist_mtx);
139 169
140 return result;
141} 170}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b8dfb440c8ef..466f4b45dd94 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
63 local->user_power_level); 63 local->user_power_level);
64DEBUGFS_READONLY_FILE(power, "%d", 64DEBUGFS_READONLY_FILE(power, "%d",
65 local->hw.conf.power_level); 65 local->hw.conf.power_level);
66DEBUGFS_READONLY_FILE(frequency, "%d",
67 local->hw.conf.channel->center_freq);
68DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", 66DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
69 local->total_ps_buffered); 67 local->total_ps_buffered);
70DEBUGFS_READONLY_FILE(wep_iv, "%#08x", 68DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -72,6 +70,7 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
72DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", 70DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
73 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); 71 local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
74 72
73#ifdef CONFIG_PM
75static ssize_t reset_write(struct file *file, const char __user *user_buf, 74static ssize_t reset_write(struct file *file, const char __user *user_buf,
76 size_t count, loff_t *ppos) 75 size_t count, loff_t *ppos)
77{ 76{
@@ -90,33 +89,7 @@ static const struct file_operations reset_ops = {
90 .open = simple_open, 89 .open = simple_open,
91 .llseek = noop_llseek, 90 .llseek = noop_llseek,
92}; 91};
93 92#endif
94static ssize_t channel_type_read(struct file *file, char __user *user_buf,
95 size_t count, loff_t *ppos)
96{
97 struct ieee80211_local *local = file->private_data;
98 const char *buf;
99
100 switch (local->hw.conf.channel_type) {
101 case NL80211_CHAN_NO_HT:
102 buf = "no ht\n";
103 break;
104 case NL80211_CHAN_HT20:
105 buf = "ht20\n";
106 break;
107 case NL80211_CHAN_HT40MINUS:
108 buf = "ht40-\n";
109 break;
110 case NL80211_CHAN_HT40PLUS:
111 buf = "ht40+\n";
112 break;
113 default:
114 buf = "???";
115 break;
116 }
117
118 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
119}
120 93
121static ssize_t hwflags_read(struct file *file, char __user *user_buf, 94static ssize_t hwflags_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos) 95 size_t count, loff_t *ppos)
@@ -205,7 +178,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
205} 178}
206 179
207DEBUGFS_READONLY_FILE_OPS(hwflags); 180DEBUGFS_READONLY_FILE_OPS(hwflags);
208DEBUGFS_READONLY_FILE_OPS(channel_type);
209DEBUGFS_READONLY_FILE_OPS(queues); 181DEBUGFS_READONLY_FILE_OPS(queues);
210 182
211/* statistics stuff */ 183/* statistics stuff */
@@ -272,12 +244,12 @@ void debugfs_hw_add(struct ieee80211_local *local)
272 244
273 local->debugfs.keys = debugfs_create_dir("keys", phyd); 245 local->debugfs.keys = debugfs_create_dir("keys", phyd);
274 246
275 DEBUGFS_ADD(frequency);
276 DEBUGFS_ADD(total_ps_buffered); 247 DEBUGFS_ADD(total_ps_buffered);
277 DEBUGFS_ADD(wep_iv); 248 DEBUGFS_ADD(wep_iv);
278 DEBUGFS_ADD(queues); 249 DEBUGFS_ADD(queues);
250#ifdef CONFIG_PM
279 DEBUGFS_ADD_MODE(reset, 0200); 251 DEBUGFS_ADD_MODE(reset, 0200);
280 DEBUGFS_ADD(channel_type); 252#endif
281 DEBUGFS_ADD(hwflags); 253 DEBUGFS_ADD(hwflags);
282 DEBUGFS_ADD(user_power); 254 DEBUGFS_ADD(user_power);
283 DEBUGFS_ADD(power); 255 DEBUGFS_ADD(power);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index df9203199102..da9003b20004 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), 10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", 11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev->name, sdata->flags); 12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
13} 13}
14 14
15static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
22 return sdata; 22 return sdata;
23} 23}
24 24
25static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb) 25static inline void drv_tx(struct ieee80211_local *local,
26 struct ieee80211_tx_control *control,
27 struct sk_buff *skb)
26{ 28{
27 local->ops->tx(&local->hw, skb); 29 local->ops->tx(&local->hw, control, skb);
28} 30}
29 31
30static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, 32static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
526 sdata = get_bss_sdata(sdata); 528 sdata = get_bss_sdata(sdata);
527 check_sdata_in_driver(sdata); 529 check_sdata_in_driver(sdata);
528 530
531 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
532 sdata->vif.type != NL80211_IFTYPE_ADHOC);
533
529 trace_drv_sta_rc_update(local, sdata, sta, changed); 534 trace_drv_sta_rc_update(local, sdata, sta, changed);
530 if (local->ops->sta_rc_update) 535 if (local->ops->sta_rc_update)
531 local->ops->sta_rc_update(&local->hw, &sdata->vif, 536 local->ops->sta_rc_update(&local->hw, &sdata->vif,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 5746d62faba1..5f3620f0bc0a 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
109 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 109 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
110 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 110 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
111 IEEE80211_STYPE_PROBE_RESP); 111 IEEE80211_STYPE_PROBE_RESP);
112 memset(mgmt->da, 0xff, ETH_ALEN); 112 eth_broadcast_addr(mgmt->da);
113 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 113 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
114 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 114 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
115 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 115 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
205 mod_timer(&ifibss->timer, 205 mod_timer(&ifibss->timer,
206 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); 206 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
207 207
208 bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, 208 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
209 mgmt, skb->len, 0, GFP_KERNEL); 209 mgmt, skb->len, 0, GFP_KERNEL);
210 cfg80211_put_bss(bss); 210 cfg80211_put_bss(bss);
211 netif_carrier_on(sdata->dev); 211 netif_carrier_on(sdata->dev);
@@ -278,7 +278,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
278 if (auth && !sdata->u.ibss.auth_frame_registrations) { 278 if (auth && !sdata->u.ibss.auth_frame_registrations) {
279 ibss_dbg(sdata, 279 ibss_dbg(sdata,
280 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", 280 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
281 sdata->vif.addr, sdata->u.ibss.bssid, addr); 281 sdata->vif.addr, addr, sdata->u.ibss.bssid);
282 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0, 282 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
283 addr, sdata->u.ibss.bssid, NULL, 0, 0); 283 addr, sdata->u.ibss.bssid, NULL, 0, 0);
284 } 284 }
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
294 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 294 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
295 struct ieee80211_local *local = sdata->local; 295 struct ieee80211_local *local = sdata->local;
296 struct sta_info *sta; 296 struct sta_info *sta;
297 int band = local->hw.conf.channel->band; 297 int band = local->oper_channel->band;
298 298
299 /* 299 /*
300 * XXX: Consider removing the least recently used entry and 300 * XXX: Consider removing the least recently used entry and
@@ -332,11 +332,27 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
332 return ieee80211_ibss_finish_sta(sta, auth); 332 return ieee80211_ibss_finish_sta(sta, auth);
333} 333}
334 334
335static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
336 struct ieee80211_mgmt *mgmt,
337 size_t len)
338{
339 u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code);
340
341 if (len < IEEE80211_DEAUTH_FRAME_LEN)
342 return;
343
344 ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
345 mgmt->sa, mgmt->da, mgmt->bssid, reason);
346 sta_info_destroy_addr(sdata, mgmt->sa);
347}
348
335static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, 349static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
336 struct ieee80211_mgmt *mgmt, 350 struct ieee80211_mgmt *mgmt,
337 size_t len) 351 size_t len)
338{ 352{
339 u16 auth_alg, auth_transaction; 353 u16 auth_alg, auth_transaction;
354 struct sta_info *sta;
355 u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
340 356
341 lockdep_assert_held(&sdata->u.ibss.mtx); 357 lockdep_assert_held(&sdata->u.ibss.mtx);
342 358
@@ -352,10 +368,22 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
352 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", 368 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
353 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); 369 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
354 sta_info_destroy_addr(sdata, mgmt->sa); 370 sta_info_destroy_addr(sdata, mgmt->sa);
355 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); 371 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
356 rcu_read_unlock(); 372 rcu_read_unlock();
357 373
358 /* 374 /*
375 * if we have any problem in allocating the new station, we reply with a
376 * DEAUTH frame to tell the other end that we had a problem
377 */
378 if (!sta) {
379 ieee80211_send_deauth_disassoc(sdata, sdata->u.ibss.bssid,
380 IEEE80211_STYPE_DEAUTH,
381 WLAN_REASON_UNSPECIFIED, true,
382 deauth_frame_buf);
383 return;
384 }
385
386 /*
359 * IEEE 802.11 standard does not require authentication in IBSS 387 * IEEE 802.11 standard does not require authentication in IBSS
360 * networks and most implementations do not seem to use it. 388 * networks and most implementations do not seem to use it.
361 * However, try to reply to authentication attempts if someone 389 * However, try to reply to authentication attempts if someone
@@ -459,8 +487,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
459 } 487 }
460 } 488 }
461 489
462 if (sta && rates_updated) 490 if (sta && rates_updated) {
491 drv_sta_rc_update(local, sdata, &sta->sta,
492 IEEE80211_RC_SUPP_RATES_CHANGED);
463 rate_control_rate_init(sta); 493 rate_control_rate_init(sta);
494 }
464 495
465 rcu_read_unlock(); 496 rcu_read_unlock();
466 } 497 }
@@ -561,7 +592,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
561 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 592 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
562 struct ieee80211_local *local = sdata->local; 593 struct ieee80211_local *local = sdata->local;
563 struct sta_info *sta; 594 struct sta_info *sta;
564 int band = local->hw.conf.channel->band; 595 int band = local->oper_channel->band;
565 596
566 /* 597 /*
567 * XXX: Consider removing the least recently used entry and 598 * XXX: Consider removing the least recently used entry and
@@ -759,7 +790,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
759 return; 790 return;
760 } 791 }
761 sdata_info(sdata, "IBSS not allowed on %d MHz\n", 792 sdata_info(sdata, "IBSS not allowed on %d MHz\n",
762 local->hw.conf.channel->center_freq); 793 local->oper_channel->center_freq);
763 794
764 /* No IBSS found - decrease scan interval and continue 795 /* No IBSS found - decrease scan interval and continue
765 * scanning. */ 796 * scanning. */
@@ -899,6 +930,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
899 case IEEE80211_STYPE_AUTH: 930 case IEEE80211_STYPE_AUTH:
900 ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); 931 ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
901 break; 932 break;
933 case IEEE80211_STYPE_DEAUTH:
934 ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
935 break;
902 } 936 }
903 937
904 mgmt_out: 938 mgmt_out:
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bb61f7718c4c..8c804550465b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -68,6 +68,8 @@ struct ieee80211_local;
68#define IEEE80211_DEFAULT_MAX_SP_LEN \ 68#define IEEE80211_DEFAULT_MAX_SP_LEN \
69 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 69 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
70 70
71#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */)
72
71struct ieee80211_fragment_entry { 73struct ieee80211_fragment_entry {
72 unsigned long first_frag_time; 74 unsigned long first_frag_time;
73 unsigned int seq; 75 unsigned int seq;
@@ -193,8 +195,6 @@ struct ieee80211_tx_data {
193 struct sta_info *sta; 195 struct sta_info *sta;
194 struct ieee80211_key *key; 196 struct ieee80211_key *key;
195 197
196 struct ieee80211_channel *channel;
197
198 unsigned int flags; 198 unsigned int flags;
199}; 199};
200 200
@@ -274,9 +274,15 @@ struct beacon_data {
274 struct rcu_head rcu_head; 274 struct rcu_head rcu_head;
275}; 275};
276 276
277struct probe_resp {
278 struct rcu_head rcu_head;
279 int len;
280 u8 data[0];
281};
282
277struct ieee80211_if_ap { 283struct ieee80211_if_ap {
278 struct beacon_data __rcu *beacon; 284 struct beacon_data __rcu *beacon;
279 struct sk_buff __rcu *probe_resp; 285 struct probe_resp __rcu *probe_resp;
280 286
281 struct list_head vlans; 287 struct list_head vlans;
282 288
@@ -359,6 +365,7 @@ enum ieee80211_sta_flags {
359 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 365 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
360 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 366 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
361 IEEE80211_STA_DISABLE_40MHZ = BIT(10), 367 IEEE80211_STA_DISABLE_40MHZ = BIT(10),
368 IEEE80211_STA_DISABLE_VHT = BIT(11),
362}; 369};
363 370
364struct ieee80211_mgd_auth_data { 371struct ieee80211_mgd_auth_data {
@@ -406,6 +413,7 @@ struct ieee80211_if_managed {
406 struct work_struct monitor_work; 413 struct work_struct monitor_work;
407 struct work_struct chswitch_work; 414 struct work_struct chswitch_work;
408 struct work_struct beacon_connection_loss_work; 415 struct work_struct beacon_connection_loss_work;
416 struct work_struct csa_connection_drop_work;
409 417
410 unsigned long beacon_timeout; 418 unsigned long beacon_timeout;
411 unsigned long probe_timeout; 419 unsigned long probe_timeout;
@@ -965,7 +973,6 @@ struct ieee80211_local {
965 int scan_channel_idx; 973 int scan_channel_idx;
966 int scan_ies_len; 974 int scan_ies_len;
967 975
968 struct ieee80211_sched_scan_ies sched_scan_ies;
969 struct work_struct sched_scan_stopped_work; 976 struct work_struct sched_scan_stopped_work;
970 struct ieee80211_sub_if_data __rcu *sched_scan_sdata; 977 struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
971 978
@@ -1052,7 +1059,7 @@ struct ieee80211_local {
1052 bool disable_dynamic_ps; 1059 bool disable_dynamic_ps;
1053 1060
1054 int user_power_level; /* in dBm */ 1061 int user_power_level; /* in dBm */
1055 int power_constr_level; /* in dBm */ 1062 int ap_power_level; /* in dBm */
1056 1063
1057 enum ieee80211_smps_mode smps_mode; 1064 enum ieee80211_smps_mode smps_mode;
1058 1065
@@ -1075,6 +1082,8 @@ struct ieee80211_local {
1075 struct idr ack_status_frames; 1082 struct idr ack_status_frames;
1076 spinlock_t ack_status_lock; 1083 spinlock_t ack_status_lock;
1077 1084
1085 struct ieee80211_sub_if_data __rcu *p2p_sdata;
1086
1078 /* dummy netdev for use w/ NAPI */ 1087 /* dummy netdev for use w/ NAPI */
1079 struct net_device napi_dev; 1088 struct net_device napi_dev;
1080 1089
@@ -1131,7 +1140,7 @@ struct ieee802_11_elems {
1131 u8 *prep; 1140 u8 *prep;
1132 u8 *perr; 1141 u8 *perr;
1133 struct ieee80211_rann_ie *rann; 1142 struct ieee80211_rann_ie *rann;
1134 u8 *ch_switch_elem; 1143 struct ieee80211_channel_sw_ie *ch_switch_ie;
1135 u8 *country_elem; 1144 u8 *country_elem;
1136 u8 *pwr_constr_elem; 1145 u8 *pwr_constr_elem;
1137 u8 *quiet_elem; /* first quite element */ 1146 u8 *quiet_elem; /* first quite element */
@@ -1157,9 +1166,7 @@ struct ieee802_11_elems {
1157 u8 preq_len; 1166 u8 preq_len;
1158 u8 prep_len; 1167 u8 prep_len;
1159 u8 perr_len; 1168 u8 perr_len;
1160 u8 ch_switch_elem_len;
1161 u8 country_elem_len; 1169 u8 country_elem_len;
1162 u8 pwr_constr_elem_len;
1163 u8 quiet_elem_len; 1170 u8 quiet_elem_len;
1164 u8 num_of_quiet_elem; /* can be more the one */ 1171 u8 num_of_quiet_elem; /* can be more the one */
1165 u8 timeout_int_len; 1172 u8 timeout_int_len;
@@ -1202,6 +1209,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
1202void ieee80211_send_pspoll(struct ieee80211_local *local, 1209void ieee80211_send_pspoll(struct ieee80211_local *local,
1203 struct ieee80211_sub_if_data *sdata); 1210 struct ieee80211_sub_if_data *sdata);
1204void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 1211void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
1212void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
1205int ieee80211_max_network_latency(struct notifier_block *nb, 1213int ieee80211_max_network_latency(struct notifier_block *nb,
1206 unsigned long data, void *dummy); 1214 unsigned long data, void *dummy);
1207int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); 1215int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1299,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
1291void ieee80211_recalc_idle(struct ieee80211_local *local); 1299void ieee80211_recalc_idle(struct ieee80211_local *local);
1292void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 1300void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
1293 const int offset); 1301 const int offset);
1302int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
1303void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
1294 1304
1295static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) 1305static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1296{ 1306{
@@ -1358,7 +1368,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1358int ieee80211_reconfig(struct ieee80211_local *local); 1368int ieee80211_reconfig(struct ieee80211_local *local);
1359void ieee80211_stop_device(struct ieee80211_local *local); 1369void ieee80211_stop_device(struct ieee80211_local *local);
1360 1370
1361#ifdef CONFIG_PM
1362int __ieee80211_suspend(struct ieee80211_hw *hw, 1371int __ieee80211_suspend(struct ieee80211_hw *hw,
1363 struct cfg80211_wowlan *wowlan); 1372 struct cfg80211_wowlan *wowlan);
1364 1373
@@ -1372,18 +1381,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1372 1381
1373 return ieee80211_reconfig(hw_to_local(hw)); 1382 return ieee80211_reconfig(hw_to_local(hw));
1374} 1383}
1375#else
1376static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
1377 struct cfg80211_wowlan *wowlan)
1378{
1379 return 0;
1380}
1381
1382static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1383{
1384 return 0;
1385}
1386#endif
1387 1384
1388/* utility functions/constants */ 1385/* utility functions/constants */
1389extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1386extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1425,7 +1422,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1425 struct ieee80211_hdr *hdr); 1422 struct ieee80211_hdr *hdr);
1426void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, 1423void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1427 struct ieee80211_hdr *hdr, bool ack); 1424 struct ieee80211_hdr *hdr, bool ack);
1428void ieee80211_beacon_connection_loss_work(struct work_struct *work);
1429 1425
1430void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1426void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1431 enum queue_stop_reason reason); 1427 enum queue_stop_reason reason);
@@ -1451,19 +1447,24 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1451 u16 transaction, u16 auth_alg, 1447 u16 transaction, u16 auth_alg,
1452 u8 *extra, size_t extra_len, const u8 *bssid, 1448 u8 *extra, size_t extra_len, const u8 *bssid,
1453 const u8 *da, const u8 *key, u8 key_len, u8 key_idx); 1449 const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
1450void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1451 const u8 *bssid, u16 stype, u16 reason,
1452 bool send_frame, u8 *frame_buf);
1454int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1453int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1455 const u8 *ie, size_t ie_len, 1454 const u8 *ie, size_t ie_len,
1456 enum ieee80211_band band, u32 rate_mask, 1455 enum ieee80211_band band, u32 rate_mask,
1457 u8 channel); 1456 u8 channel);
1458struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1457struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1459 u8 *dst, u32 ratemask, 1458 u8 *dst, u32 ratemask,
1459 struct ieee80211_channel *chan,
1460 const u8 *ssid, size_t ssid_len, 1460 const u8 *ssid, size_t ssid_len,
1461 const u8 *ie, size_t ie_len, 1461 const u8 *ie, size_t ie_len,
1462 bool directed); 1462 bool directed);
1463void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1463void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1464 const u8 *ssid, size_t ssid_len, 1464 const u8 *ssid, size_t ssid_len,
1465 const u8 *ie, size_t ie_len, 1465 const u8 *ie, size_t ie_len,
1466 u32 ratemask, bool directed, bool no_cck); 1466 u32 ratemask, bool directed, bool no_cck,
1467 struct ieee80211_channel *channel);
1467 1468
1468void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1469void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1469 const size_t supp_rates_len, 1470 const size_t supp_rates_len,
@@ -1487,9 +1488,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1487u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, 1488u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1488 u32 cap); 1489 u32 cap);
1489int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, 1490int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1490 struct sk_buff *skb, bool need_basic); 1491 struct sk_buff *skb, bool need_basic,
1492 enum ieee80211_band band);
1491int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, 1493int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1492 struct sk_buff *skb, bool need_basic); 1494 struct sk_buff *skb, bool need_basic,
1495 enum ieee80211_band band);
1493 1496
1494/* channel management */ 1497/* channel management */
1495enum ieee80211_chan_mode { 1498enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bfb57dcc1538..6f8a73c64fb3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
100 sdata->vif.bss_conf.idle = true; 100 sdata->vif.bss_conf.idle = true;
101 continue; 101 continue;
102 } 102 }
103
104 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
105 continue;
106
103 /* count everything else */ 107 /* count everything else */
104 sdata->vif.bss_conf.idle = false; 108 sdata->vif.bss_conf.idle = false;
105 count++; 109 count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
121 125
122 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
123 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 127 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
124 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 128 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
129 sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
125 continue; 130 continue;
126 if (sdata->old_idle == sdata->vif.bss_conf.idle) 131 if (sdata->old_idle == sdata->vif.bss_conf.idle)
127 continue; 132 continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
204{ 209{
205 return type1 == NL80211_IFTYPE_MONITOR || 210 return type1 == NL80211_IFTYPE_MONITOR ||
206 type2 == NL80211_IFTYPE_MONITOR || 211 type2 == NL80211_IFTYPE_MONITOR ||
212 type1 == NL80211_IFTYPE_P2P_DEVICE ||
213 type2 == NL80211_IFTYPE_P2P_DEVICE ||
207 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || 214 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
208 (type1 == NL80211_IFTYPE_WDS && 215 (type1 == NL80211_IFTYPE_WDS &&
209 (type2 == NL80211_IFTYPE_WDS || 216 (type2 == NL80211_IFTYPE_WDS ||
@@ -271,13 +278,15 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
271 int n_queues = sdata->local->hw.queues; 278 int n_queues = sdata->local->hw.queues;
272 int i; 279 int i;
273 280
274 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 281 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
275 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] == 282 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
276 IEEE80211_INVAL_HW_QUEUE)) 283 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
277 return -EINVAL; 284 IEEE80211_INVAL_HW_QUEUE))
278 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >= 285 return -EINVAL;
279 n_queues)) 286 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
280 return -EINVAL; 287 n_queues))
288 return -EINVAL;
289 }
281 } 290 }
282 291
283 if ((sdata->vif.type != NL80211_IFTYPE_AP) || 292 if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
@@ -406,9 +415,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
406 * an error on interface type changes that have been pre-checked, so most 415 * an error on interface type changes that have been pre-checked, so most
407 * checks should be in ieee80211_check_concurrent_iface. 416 * checks should be in ieee80211_check_concurrent_iface.
408 */ 417 */
409static int ieee80211_do_open(struct net_device *dev, bool coming_up) 418int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
410{ 419{
411 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 420 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
421 struct net_device *dev = wdev->netdev;
412 struct ieee80211_local *local = sdata->local; 422 struct ieee80211_local *local = sdata->local;
413 struct sta_info *sta; 423 struct sta_info *sta;
414 u32 changed = 0; 424 u32 changed = 0;
@@ -443,6 +453,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
443 case NL80211_IFTYPE_STATION: 453 case NL80211_IFTYPE_STATION:
444 case NL80211_IFTYPE_MONITOR: 454 case NL80211_IFTYPE_MONITOR:
445 case NL80211_IFTYPE_ADHOC: 455 case NL80211_IFTYPE_ADHOC:
456 case NL80211_IFTYPE_P2P_DEVICE:
446 /* no special treatment */ 457 /* no special treatment */
447 break; 458 break;
448 case NL80211_IFTYPE_UNSPECIFIED: 459 case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +482,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
471 * Copy the hopefully now-present MAC address to 482 * Copy the hopefully now-present MAC address to
472 * this interface, if it has the special null one. 483 * this interface, if it has the special null one.
473 */ 484 */
474 if (is_zero_ether_addr(dev->dev_addr)) { 485 if (dev && is_zero_ether_addr(dev->dev_addr)) {
475 memcpy(dev->dev_addr, 486 memcpy(dev->dev_addr,
476 local->hw.wiphy->perm_addr, 487 local->hw.wiphy->perm_addr,
477 ETH_ALEN); 488 ETH_ALEN);
@@ -536,15 +547,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
536 local->fif_probe_req++; 547 local->fif_probe_req++;
537 } 548 }
538 549
539 changed |= ieee80211_reset_erp_info(sdata); 550 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
551 changed |= ieee80211_reset_erp_info(sdata);
540 ieee80211_bss_info_change_notify(sdata, changed); 552 ieee80211_bss_info_change_notify(sdata, changed);
541 553
542 if (sdata->vif.type == NL80211_IFTYPE_STATION || 554 switch (sdata->vif.type) {
543 sdata->vif.type == NL80211_IFTYPE_ADHOC || 555 case NL80211_IFTYPE_STATION:
544 sdata->vif.type == NL80211_IFTYPE_AP) 556 case NL80211_IFTYPE_ADHOC:
557 case NL80211_IFTYPE_AP:
558 case NL80211_IFTYPE_MESH_POINT:
545 netif_carrier_off(dev); 559 netif_carrier_off(dev);
546 else 560 break;
561 case NL80211_IFTYPE_WDS:
562 case NL80211_IFTYPE_P2P_DEVICE:
563 break;
564 default:
547 netif_carrier_on(dev); 565 netif_carrier_on(dev);
566 }
548 567
549 /* 568 /*
550 * set default queue parameters so drivers don't 569 * set default queue parameters so drivers don't
@@ -576,6 +595,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
576 } 595 }
577 596
578 rate_control_rate_init(sta); 597 rate_control_rate_init(sta);
598 netif_carrier_on(dev);
599 } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
600 rcu_assign_pointer(local->p2p_sdata, sdata);
579 } 601 }
580 602
581 /* 603 /*
@@ -601,7 +623,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
601 623
602 ieee80211_recalc_ps(local, -1); 624 ieee80211_recalc_ps(local, -1);
603 625
604 netif_tx_start_all_queues(dev); 626 if (dev)
627 netif_tx_start_all_queues(dev);
605 628
606 return 0; 629 return 0;
607 err_del_interface: 630 err_del_interface:
@@ -631,7 +654,7 @@ static int ieee80211_open(struct net_device *dev)
631 if (err) 654 if (err)
632 return err; 655 return err;
633 656
634 return ieee80211_do_open(dev, true); 657 return ieee80211_do_open(&sdata->wdev, true);
635} 658}
636 659
637static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, 660static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +675,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
652 /* 675 /*
653 * Stop TX on this interface first. 676 * Stop TX on this interface first.
654 */ 677 */
655 netif_tx_stop_all_queues(sdata->dev); 678 if (sdata->dev)
679 netif_tx_stop_all_queues(sdata->dev);
656 680
657 ieee80211_roc_purge(sdata); 681 ieee80211_roc_purge(sdata);
658 682
@@ -691,14 +715,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
691 local->fif_probe_req--; 715 local->fif_probe_req--;
692 } 716 }
693 717
694 netif_addr_lock_bh(sdata->dev); 718 if (sdata->dev) {
695 spin_lock_bh(&local->filter_lock); 719 netif_addr_lock_bh(sdata->dev);
696 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, 720 spin_lock_bh(&local->filter_lock);
697 sdata->dev->addr_len); 721 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
698 spin_unlock_bh(&local->filter_lock); 722 sdata->dev->addr_len);
699 netif_addr_unlock_bh(sdata->dev); 723 spin_unlock_bh(&local->filter_lock);
724 netif_addr_unlock_bh(sdata->dev);
700 725
701 ieee80211_configure_filter(local); 726 ieee80211_configure_filter(local);
727 }
702 728
703 del_timer_sync(&local->dynamic_ps_timer); 729 del_timer_sync(&local->dynamic_ps_timer);
704 cancel_work_sync(&local->dynamic_ps_enable_work); 730 cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +734,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
708 struct ieee80211_sub_if_data *vlan, *tmpsdata; 734 struct ieee80211_sub_if_data *vlan, *tmpsdata;
709 struct beacon_data *old_beacon = 735 struct beacon_data *old_beacon =
710 rtnl_dereference(sdata->u.ap.beacon); 736 rtnl_dereference(sdata->u.ap.beacon);
711 struct sk_buff *old_probe_resp = 737 struct probe_resp *old_probe_resp =
712 rtnl_dereference(sdata->u.ap.probe_resp); 738 rtnl_dereference(sdata->u.ap.probe_resp);
713 739
714 /* sdata_running will return false, so this will disable */ 740 /* sdata_running will return false, so this will disable */
@@ -720,7 +746,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
720 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); 746 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
721 synchronize_rcu(); 747 synchronize_rcu();
722 kfree(old_beacon); 748 kfree(old_beacon);
723 kfree_skb(old_probe_resp); 749 kfree(old_probe_resp);
724 750
725 /* down all dependent devices, that is VLANs */ 751 /* down all dependent devices, that is VLANs */
726 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 752 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,24 +785,29 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
759 ieee80211_adjust_monitor_flags(sdata, -1); 785 ieee80211_adjust_monitor_flags(sdata, -1);
760 ieee80211_configure_filter(local); 786 ieee80211_configure_filter(local);
761 break; 787 break;
788 case NL80211_IFTYPE_P2P_DEVICE:
789 /* relies on synchronize_rcu() below */
790 rcu_assign_pointer(local->p2p_sdata, NULL);
791 /* fall through */
762 default: 792 default:
763 flush_work(&sdata->work); 793 flush_work(&sdata->work);
764 /* 794 /*
765 * When we get here, the interface is marked down. 795 * When we get here, the interface is marked down.
766 * Call synchronize_rcu() to wait for the RX path 796 * Call rcu_barrier() to wait both for the RX path
767 * should it be using the interface and enqueuing 797 * should it be using the interface and enqueuing
768 * frames at this very time on another CPU. 798 * frames at this very time on another CPU, and
799 * for the sta free call_rcu callbacks.
769 */ 800 */
770 synchronize_rcu(); 801 rcu_barrier();
771 skb_queue_purge(&sdata->skb_queue);
772 802
773 /* 803 /*
774 * Disable beaconing here for mesh only, AP and IBSS 804 * free_sta_rcu() enqueues a work for the actual
775 * are already taken care of. 805 * sta cleanup, so we need to flush it while
806 * sdata is still valid.
776 */ 807 */
777 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 808 flush_workqueue(local->workqueue);
778 ieee80211_bss_info_change_notify(sdata, 809
779 BSS_CHANGED_BEACON_ENABLED); 810 skb_queue_purge(&sdata->skb_queue);
780 811
781 /* 812 /*
782 * Free all remaining keys, there shouldn't be any, 813 * Free all remaining keys, there shouldn't be any,
@@ -877,9 +908,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
877 * Called when the netdev is removed or, by the code below, before 908 * Called when the netdev is removed or, by the code below, before
878 * the interface type changes. 909 * the interface type changes.
879 */ 910 */
880static void ieee80211_teardown_sdata(struct net_device *dev) 911static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
881{ 912{
882 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
883 struct ieee80211_local *local = sdata->local; 913 struct ieee80211_local *local = sdata->local;
884 int flushed; 914 int flushed;
885 int i; 915 int i;
@@ -900,6 +930,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
900 WARN_ON(flushed); 930 WARN_ON(flushed);
901} 931}
902 932
933static void ieee80211_uninit(struct net_device *dev)
934{
935 ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
936}
937
903static u16 ieee80211_netdev_select_queue(struct net_device *dev, 938static u16 ieee80211_netdev_select_queue(struct net_device *dev,
904 struct sk_buff *skb) 939 struct sk_buff *skb)
905{ 940{
@@ -909,7 +944,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
909static const struct net_device_ops ieee80211_dataif_ops = { 944static const struct net_device_ops ieee80211_dataif_ops = {
910 .ndo_open = ieee80211_open, 945 .ndo_open = ieee80211_open,
911 .ndo_stop = ieee80211_stop, 946 .ndo_stop = ieee80211_stop,
912 .ndo_uninit = ieee80211_teardown_sdata, 947 .ndo_uninit = ieee80211_uninit,
913 .ndo_start_xmit = ieee80211_subif_start_xmit, 948 .ndo_start_xmit = ieee80211_subif_start_xmit,
914 .ndo_set_rx_mode = ieee80211_set_multicast_list, 949 .ndo_set_rx_mode = ieee80211_set_multicast_list,
915 .ndo_change_mtu = ieee80211_change_mtu, 950 .ndo_change_mtu = ieee80211_change_mtu,
@@ -940,7 +975,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
940static const struct net_device_ops ieee80211_monitorif_ops = { 975static const struct net_device_ops ieee80211_monitorif_ops = {
941 .ndo_open = ieee80211_open, 976 .ndo_open = ieee80211_open,
942 .ndo_stop = ieee80211_stop, 977 .ndo_stop = ieee80211_stop,
943 .ndo_uninit = ieee80211_teardown_sdata, 978 .ndo_uninit = ieee80211_uninit,
944 .ndo_start_xmit = ieee80211_monitor_start_xmit, 979 .ndo_start_xmit = ieee80211_monitor_start_xmit,
945 .ndo_set_rx_mode = ieee80211_set_multicast_list, 980 .ndo_set_rx_mode = ieee80211_set_multicast_list,
946 .ndo_change_mtu = ieee80211_change_mtu, 981 .ndo_change_mtu = ieee80211_change_mtu,
@@ -1099,7 +1134,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1099 /* and set some type-dependent values */ 1134 /* and set some type-dependent values */
1100 sdata->vif.type = type; 1135 sdata->vif.type = type;
1101 sdata->vif.p2p = false; 1136 sdata->vif.p2p = false;
1102 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
1103 sdata->wdev.iftype = type; 1137 sdata->wdev.iftype = type;
1104 1138
1105 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); 1139 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1141,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1107 1141
1108 sdata->noack_map = 0; 1142 sdata->noack_map = 0;
1109 1143
1110 /* only monitor differs */ 1144 /* only monitor/p2p-device differ */
1111 sdata->dev->type = ARPHRD_ETHER; 1145 if (sdata->dev) {
1146 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
1147 sdata->dev->type = ARPHRD_ETHER;
1148 }
1112 1149
1113 skb_queue_head_init(&sdata->skb_queue); 1150 skb_queue_head_init(&sdata->skb_queue);
1114 INIT_WORK(&sdata->work, ieee80211_iface_work); 1151 INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1183,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1146 break; 1183 break;
1147 case NL80211_IFTYPE_WDS: 1184 case NL80211_IFTYPE_WDS:
1148 case NL80211_IFTYPE_AP_VLAN: 1185 case NL80211_IFTYPE_AP_VLAN:
1186 case NL80211_IFTYPE_P2P_DEVICE:
1149 break; 1187 break;
1150 case NL80211_IFTYPE_UNSPECIFIED: 1188 case NL80211_IFTYPE_UNSPECIFIED:
1151 case NUM_NL80211_IFTYPES: 1189 case NUM_NL80211_IFTYPES:
@@ -1156,18 +1194,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1156 ieee80211_debugfs_add_netdev(sdata); 1194 ieee80211_debugfs_add_netdev(sdata);
1157} 1195}
1158 1196
1159static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
1160{
1161 switch (sdata->vif.type) {
1162 case NL80211_IFTYPE_MESH_POINT:
1163 mesh_path_flush_by_iface(sdata);
1164 break;
1165
1166 default:
1167 break;
1168 }
1169}
1170
1171static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, 1197static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1172 enum nl80211_iftype type) 1198 enum nl80211_iftype type)
1173{ 1199{
@@ -1225,7 +1251,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1225 1251
1226 ieee80211_do_stop(sdata, false); 1252 ieee80211_do_stop(sdata, false);
1227 1253
1228 ieee80211_teardown_sdata(sdata->dev); 1254 ieee80211_teardown_sdata(sdata);
1229 1255
1230 ret = drv_change_interface(local, sdata, internal_type, p2p); 1256 ret = drv_change_interface(local, sdata, internal_type, p2p);
1231 if (ret) 1257 if (ret)
@@ -1240,7 +1266,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1240 1266
1241 ieee80211_setup_sdata(sdata, type); 1267 ieee80211_setup_sdata(sdata, type);
1242 1268
1243 err = ieee80211_do_open(sdata->dev, false); 1269 err = ieee80211_do_open(&sdata->wdev, false);
1244 WARN(err, "type change: do_open returned %d", err); 1270 WARN(err, "type change: do_open returned %d", err);
1245 1271
1246 return ret; 1272 return ret;
@@ -1267,14 +1293,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1267 return ret; 1293 return ret;
1268 } else { 1294 } else {
1269 /* Purge and reset type-dependent state. */ 1295 /* Purge and reset type-dependent state. */
1270 ieee80211_teardown_sdata(sdata->dev); 1296 ieee80211_teardown_sdata(sdata);
1271 ieee80211_setup_sdata(sdata, type); 1297 ieee80211_setup_sdata(sdata, type);
1272 } 1298 }
1273 1299
1274 /* reset some values that shouldn't be kept across type changes */ 1300 /* reset some values that shouldn't be kept across type changes */
1275 sdata->vif.bss_conf.basic_rates = 1301 sdata->vif.bss_conf.basic_rates =
1276 ieee80211_mandatory_rates(sdata->local, 1302 ieee80211_mandatory_rates(sdata->local,
1277 sdata->local->hw.conf.channel->band); 1303 sdata->local->oper_channel->band);
1278 sdata->drop_unencrypted = 0; 1304 sdata->drop_unencrypted = 0;
1279 if (type == NL80211_IFTYPE_STATION) 1305 if (type == NL80211_IFTYPE_STATION)
1280 sdata->u.mgd.use_4addr = false; 1306 sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1309,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1283} 1309}
1284 1310
1285static void ieee80211_assign_perm_addr(struct ieee80211_local *local, 1311static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1286 struct net_device *dev, 1312 u8 *perm_addr, enum nl80211_iftype type)
1287 enum nl80211_iftype type)
1288{ 1313{
1289 struct ieee80211_sub_if_data *sdata; 1314 struct ieee80211_sub_if_data *sdata;
1290 u64 mask, start, addr, val, inc; 1315 u64 mask, start, addr, val, inc;
@@ -1293,13 +1318,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1293 int i; 1318 int i;
1294 1319
1295 /* default ... something at least */ 1320 /* default ... something at least */
1296 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 1321 memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
1297 1322
1298 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && 1323 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
1299 local->hw.wiphy->n_addresses <= 1) 1324 local->hw.wiphy->n_addresses <= 1)
1300 return; 1325 return;
1301 1326
1302
1303 mutex_lock(&local->iflist_mtx); 1327 mutex_lock(&local->iflist_mtx);
1304 1328
1305 switch (type) { 1329 switch (type) {
@@ -1312,11 +1336,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1312 list_for_each_entry(sdata, &local->interfaces, list) { 1336 list_for_each_entry(sdata, &local->interfaces, list) {
1313 if (sdata->vif.type != NL80211_IFTYPE_AP) 1337 if (sdata->vif.type != NL80211_IFTYPE_AP)
1314 continue; 1338 continue;
1315 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); 1339 memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
1316 break; 1340 break;
1317 } 1341 }
1318 /* keep default if no AP interface present */ 1342 /* keep default if no AP interface present */
1319 break; 1343 break;
1344 case NL80211_IFTYPE_P2P_CLIENT:
1345 case NL80211_IFTYPE_P2P_GO:
1346 if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
1347 list_for_each_entry(sdata, &local->interfaces, list) {
1348 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
1349 continue;
1350 if (!ieee80211_sdata_running(sdata))
1351 continue;
1352 memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
1353 goto out_unlock;
1354 }
1355 }
1356 /* otherwise fall through */
1320 default: 1357 default:
1321 /* assign a new address if possible -- try n_addresses first */ 1358 /* assign a new address if possible -- try n_addresses first */
1322 for (i = 0; i < local->hw.wiphy->n_addresses; i++) { 1359 for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1368,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1331 } 1368 }
1332 1369
1333 if (!used) { 1370 if (!used) {
1334 memcpy(dev->perm_addr, 1371 memcpy(perm_addr,
1335 local->hw.wiphy->addresses[i].addr, 1372 local->hw.wiphy->addresses[i].addr,
1336 ETH_ALEN); 1373 ETH_ALEN);
1337 break; 1374 break;
@@ -1382,7 +1419,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1382 } 1419 }
1383 1420
1384 if (!used) { 1421 if (!used) {
1385 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); 1422 memcpy(perm_addr, tmp_addr, ETH_ALEN);
1386 break; 1423 break;
1387 } 1424 }
1388 addr = (start & ~mask) | (val & mask); 1425 addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1428,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1391 break; 1428 break;
1392 } 1429 }
1393 1430
1431 out_unlock:
1394 mutex_unlock(&local->iflist_mtx); 1432 mutex_unlock(&local->iflist_mtx);
1395} 1433}
1396 1434
@@ -1398,49 +1436,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1398 struct wireless_dev **new_wdev, enum nl80211_iftype type, 1436 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1399 struct vif_params *params) 1437 struct vif_params *params)
1400{ 1438{
1401 struct net_device *ndev; 1439 struct net_device *ndev = NULL;
1402 struct ieee80211_sub_if_data *sdata = NULL; 1440 struct ieee80211_sub_if_data *sdata = NULL;
1403 int ret, i; 1441 int ret, i;
1404 int txqs = 1; 1442 int txqs = 1;
1405 1443
1406 ASSERT_RTNL(); 1444 ASSERT_RTNL();
1407 1445
1408 if (local->hw.queues >= IEEE80211_NUM_ACS) 1446 if (type == NL80211_IFTYPE_P2P_DEVICE) {
1409 txqs = IEEE80211_NUM_ACS; 1447 struct wireless_dev *wdev;
1410 1448
1411 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, 1449 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
1412 name, ieee80211_if_setup, txqs, 1); 1450 GFP_KERNEL);
1413 if (!ndev) 1451 if (!sdata)
1414 return -ENOMEM; 1452 return -ENOMEM;
1415 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1453 wdev = &sdata->wdev;
1416 1454
1417 ndev->needed_headroom = local->tx_headroom + 1455 sdata->dev = NULL;
1418 4*6 /* four MAC addresses */ 1456 strlcpy(sdata->name, name, IFNAMSIZ);
1419 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ 1457 ieee80211_assign_perm_addr(local, wdev->address, type);
1420 + 6 /* mesh */ 1458 memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
1421 + 8 /* rfc1042/bridge tunnel */ 1459 } else {
1422 - ETH_HLEN /* ethernet hard_header_len */ 1460 if (local->hw.queues >= IEEE80211_NUM_ACS)
1423 + IEEE80211_ENCRYPT_HEADROOM; 1461 txqs = IEEE80211_NUM_ACS;
1424 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1462
1425 1463 ndev = alloc_netdev_mqs(sizeof(*sdata) +
1426 ret = dev_alloc_name(ndev, ndev->name); 1464 local->hw.vif_data_size,
1427 if (ret < 0) 1465 name, ieee80211_if_setup, txqs, 1);
1428 goto fail; 1466 if (!ndev)
1429 1467 return -ENOMEM;
1430 ieee80211_assign_perm_addr(local, ndev, type); 1468 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
1431 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1469
1432 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1470 ndev->needed_headroom = local->tx_headroom +
1433 1471 4*6 /* four MAC addresses */
1434 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 1472 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
1435 sdata = netdev_priv(ndev); 1473 + 6 /* mesh */
1436 ndev->ieee80211_ptr = &sdata->wdev; 1474 + 8 /* rfc1042/bridge tunnel */
1437 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); 1475 - ETH_HLEN /* ethernet hard_header_len */
1438 memcpy(sdata->name, ndev->name, IFNAMSIZ); 1476 + IEEE80211_ENCRYPT_HEADROOM;
1477 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1478
1479 ret = dev_alloc_name(ndev, ndev->name);
1480 if (ret < 0) {
1481 free_netdev(ndev);
1482 return ret;
1483 }
1484
1485 ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
1486 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1487 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
1488
1489 /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
1490 sdata = netdev_priv(ndev);
1491 ndev->ieee80211_ptr = &sdata->wdev;
1492 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
1493 memcpy(sdata->name, ndev->name, IFNAMSIZ);
1494
1495 sdata->dev = ndev;
1496 }
1439 1497
1440 /* initialise type-independent data */ 1498 /* initialise type-independent data */
1441 sdata->wdev.wiphy = local->hw.wiphy; 1499 sdata->wdev.wiphy = local->hw.wiphy;
1442 sdata->local = local; 1500 sdata->local = local;
1443 sdata->dev = ndev;
1444#ifdef CONFIG_INET 1501#ifdef CONFIG_INET
1445 sdata->arp_filter_state = true; 1502 sdata->arp_filter_state = true;
1446#endif 1503#endif
@@ -1469,17 +1526,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1469 /* setup type-dependent data */ 1526 /* setup type-dependent data */
1470 ieee80211_setup_sdata(sdata, type); 1527 ieee80211_setup_sdata(sdata, type);
1471 1528
1472 if (params) { 1529 if (ndev) {
1473 ndev->ieee80211_ptr->use_4addr = params->use_4addr; 1530 if (params) {
1474 if (type == NL80211_IFTYPE_STATION) 1531 ndev->ieee80211_ptr->use_4addr = params->use_4addr;
1475 sdata->u.mgd.use_4addr = params->use_4addr; 1532 if (type == NL80211_IFTYPE_STATION)
1476 } 1533 sdata->u.mgd.use_4addr = params->use_4addr;
1534 }
1477 1535
1478 ndev->features |= local->hw.netdev_features; 1536 ndev->features |= local->hw.netdev_features;
1479 1537
1480 ret = register_netdevice(ndev); 1538 ret = register_netdevice(ndev);
1481 if (ret) 1539 if (ret) {
1482 goto fail; 1540 free_netdev(ndev);
1541 return ret;
1542 }
1543 }
1483 1544
1484 mutex_lock(&local->iflist_mtx); 1545 mutex_lock(&local->iflist_mtx);
1485 list_add_tail_rcu(&sdata->list, &local->interfaces); 1546 list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1550,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1489 *new_wdev = &sdata->wdev; 1550 *new_wdev = &sdata->wdev;
1490 1551
1491 return 0; 1552 return 0;
1492
1493 fail:
1494 free_netdev(ndev);
1495 return ret;
1496} 1553}
1497 1554
1498void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) 1555void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1560,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1503 list_del_rcu(&sdata->list); 1560 list_del_rcu(&sdata->list);
1504 mutex_unlock(&sdata->local->iflist_mtx); 1561 mutex_unlock(&sdata->local->iflist_mtx);
1505 1562
1506 /* clean up type-dependent data */
1507 ieee80211_clean_sdata(sdata);
1508
1509 synchronize_rcu(); 1563 synchronize_rcu();
1510 unregister_netdevice(sdata->dev); 1564
1565 if (sdata->dev) {
1566 unregister_netdevice(sdata->dev);
1567 } else {
1568 cfg80211_unregister_wdev(&sdata->wdev);
1569 kfree(sdata);
1570 }
1571}
1572
1573void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1574{
1575 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
1576 return;
1577 ieee80211_do_stop(sdata, true);
1578 ieee80211_teardown_sdata(sdata);
1511} 1579}
1512 1580
1513/* 1581/*
@@ -1518,6 +1586,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1518{ 1586{
1519 struct ieee80211_sub_if_data *sdata, *tmp; 1587 struct ieee80211_sub_if_data *sdata, *tmp;
1520 LIST_HEAD(unreg_list); 1588 LIST_HEAD(unreg_list);
1589 LIST_HEAD(wdev_list);
1521 1590
1522 ASSERT_RTNL(); 1591 ASSERT_RTNL();
1523 1592
@@ -1525,13 +1594,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1525 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1594 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1526 list_del(&sdata->list); 1595 list_del(&sdata->list);
1527 1596
1528 ieee80211_clean_sdata(sdata); 1597 if (sdata->dev)
1529 1598 unregister_netdevice_queue(sdata->dev, &unreg_list);
1530 unregister_netdevice_queue(sdata->dev, &unreg_list); 1599 else
1600 list_add(&sdata->list, &wdev_list);
1531 } 1601 }
1532 mutex_unlock(&local->iflist_mtx); 1602 mutex_unlock(&local->iflist_mtx);
1533 unregister_netdevice_many(&unreg_list); 1603 unregister_netdevice_many(&unreg_list);
1534 list_del(&unreg_list); 1604 list_del(&unreg_list);
1605
1606 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1607 list_del(&sdata->list);
1608 cfg80211_unregister_wdev(&sdata->wdev);
1609 kfree(sdata);
1610 }
1535} 1611}
1536 1612
1537static int netdev_notify(struct notifier_block *nb, 1613static int netdev_notify(struct notifier_block *nb,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 7ae678ba5d67..d27e61aaa71b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -402,7 +402,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
402 * Synchronize so the TX path can no longer be using 402 * Synchronize so the TX path can no longer be using
403 * this key before we free/remove it. 403 * this key before we free/remove it.
404 */ 404 */
405 synchronize_rcu(); 405 synchronize_net();
406 406
407 if (key->local) 407 if (key->local)
408 ieee80211_key_disable_hw_accel(key); 408 ieee80211_key_disable_hw_accel(key);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c26e231c733a..c80c4490351c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -150,13 +150,11 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
150 150
151 if (test_bit(SCAN_SW_SCANNING, &local->scanning) || 151 if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || 152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
153 test_bit(SCAN_HW_SCANNING, &local->scanning)) 153 test_bit(SCAN_HW_SCANNING, &local->scanning) ||
154 !local->ap_power_level)
154 power = chan->max_power; 155 power = chan->max_power;
155 else 156 else
156 power = local->power_constr_level ? 157 power = min(chan->max_power, local->ap_power_level);
157 min(chan->max_power,
158 (chan->max_reg_power - local->power_constr_level)) :
159 chan->max_power;
160 158
161 if (local->user_power_level >= 0) 159 if (local->user_power_level >= 0)
162 power = min(power, local->user_power_level); 160 power = min(power, local->user_power_level);
@@ -207,6 +205,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
207 sdata->vif.bss_conf.bssid = NULL; 205 sdata->vif.bss_conf.bssid = NULL;
208 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 206 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
209 sdata->vif.bss_conf.bssid = zero; 207 sdata->vif.bss_conf.bssid = zero;
208 } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
209 sdata->vif.bss_conf.bssid = sdata->vif.addr;
210 WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
211 "P2P Device BSS changed %#x", changed);
210 } else { 212 } else {
211 WARN_ON(1); 213 WARN_ON(1);
212 return; 214 return;
@@ -362,9 +364,7 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
362 struct ieee80211_local *local = 364 struct ieee80211_local *local =
363 container_of(work, struct ieee80211_local, recalc_smps); 365 container_of(work, struct ieee80211_local, recalc_smps);
364 366
365 mutex_lock(&local->iflist_mtx);
366 ieee80211_recalc_smps(local); 367 ieee80211_recalc_smps(local);
367 mutex_unlock(&local->iflist_mtx);
368} 368}
369 369
370#ifdef CONFIG_INET 370#ifdef CONFIG_INET
@@ -514,6 +514,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
514 BIT(IEEE80211_STYPE_AUTH >> 4) | 514 BIT(IEEE80211_STYPE_AUTH >> 4) |
515 BIT(IEEE80211_STYPE_DEAUTH >> 4), 515 BIT(IEEE80211_STYPE_DEAUTH >> 4),
516 }, 516 },
517 [NL80211_IFTYPE_P2P_DEVICE] = {
518 .tx = 0xffff,
519 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
520 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
521 },
517}; 522};
518 523
519static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { 524static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +541,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
536 int priv_size, i; 541 int priv_size, i;
537 struct wiphy *wiphy; 542 struct wiphy *wiphy;
538 543
544 if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
545 !ops->add_interface || !ops->remove_interface ||
546 !ops->configure_filter))
547 return NULL;
548
539 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) 549 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
540 return NULL; 550 return NULL;
541 551
@@ -588,13 +598,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
588 598
589 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); 599 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
590 600
591 BUG_ON(!ops->tx);
592 BUG_ON(!ops->start);
593 BUG_ON(!ops->stop);
594 BUG_ON(!ops->config);
595 BUG_ON(!ops->add_interface);
596 BUG_ON(!ops->remove_interface);
597 BUG_ON(!ops->configure_filter);
598 local->ops = ops; 601 local->ops = ops;
599 602
600 /* set up some defaults */ 603 /* set up some defaults */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 85572353a7e3..ff0296c7bab8 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
109 109
110 /* Disallow HT40+/- mismatch */ 110 /* Disallow HT40+/- mismatch */
111 if (ie->ht_operation && 111 if (ie->ht_operation &&
112 (local->_oper_channel_type == NL80211_CHAN_HT40MINUS || 112 (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
113 local->_oper_channel_type == NL80211_CHAN_HT40PLUS) && 113 sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
114 (sta_channel_type == NL80211_CHAN_HT40MINUS || 114 (sta_channel_type == NL80211_CHAN_HT40MINUS ||
115 sta_channel_type == NL80211_CHAN_HT40PLUS) && 115 sta_channel_type == NL80211_CHAN_HT40PLUS) &&
116 local->_oper_channel_type != sta_channel_type) 116 sdata->vif.bss_conf.channel_type != sta_channel_type)
117 goto mismatch; 117 goto mismatch;
118 118
119 return true; 119 return true;
@@ -136,10 +136,13 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
136 * mesh_accept_plinks_update - update accepting_plink in local mesh beacons 136 * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
137 * 137 *
138 * @sdata: mesh interface in which mesh beacons are going to be updated 138 * @sdata: mesh interface in which mesh beacons are going to be updated
139 *
140 * Returns: beacon changed flag if the beacon content changed.
139 */ 141 */
140void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) 142u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
141{ 143{
142 bool free_plinks; 144 bool free_plinks;
145 u32 changed = 0;
143 146
144 /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, 147 /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
145 * the mesh interface might be able to establish plinks with peers that 148 * the mesh interface might be able to establish plinks with peers that
@@ -149,8 +152,12 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
149 */ 152 */
150 free_plinks = mesh_plink_availables(sdata); 153 free_plinks = mesh_plink_availables(sdata);
151 154
152 if (free_plinks != sdata->u.mesh.accepting_plinks) 155 if (free_plinks != sdata->u.mesh.accepting_plinks) {
153 ieee80211_mesh_housekeeping_timer((unsigned long) sdata); 156 sdata->u.mesh.accepting_plinks = free_plinks;
157 changed = BSS_CHANGED_BEACON;
158 }
159
160 return changed;
154} 161}
155 162
156int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) 163int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -262,7 +269,6 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
262 neighbors = (neighbors > 15) ? 15 : neighbors; 269 neighbors = (neighbors > 15) ? 15 : neighbors;
263 *pos++ = neighbors << 1; 270 *pos++ = neighbors << 1;
264 /* Mesh capability */ 271 /* Mesh capability */
265 ifmsh->accepting_plinks = mesh_plink_availables(sdata);
266 *pos = MESHCONF_CAPAB_FORWARDING; 272 *pos = MESHCONF_CAPAB_FORWARDING;
267 *pos |= ifmsh->accepting_plinks ? 273 *pos |= ifmsh->accepting_plinks ?
268 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 274 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
@@ -349,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
349{ 355{
350 struct ieee80211_local *local = sdata->local; 356 struct ieee80211_local *local = sdata->local;
351 struct ieee80211_supported_band *sband; 357 struct ieee80211_supported_band *sband;
358 struct ieee80211_channel *chan = local->oper_channel;
352 u8 *pos; 359 u8 *pos;
353 360
354 if (skb_tailroom(skb) < 3) 361 if (skb_tailroom(skb) < 3)
355 return -ENOMEM; 362 return -ENOMEM;
356 363
357 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 364 sband = local->hw.wiphy->bands[chan->band];
358 if (sband->band == IEEE80211_BAND_2GHZ) { 365 if (sband->band == IEEE80211_BAND_2GHZ) {
359 pos = skb_put(skb, 2 + 1); 366 pos = skb_put(skb, 2 + 1);
360 *pos++ = WLAN_EID_DS_PARAMS; 367 *pos++ = WLAN_EID_DS_PARAMS;
361 *pos++ = 1; 368 *pos++ = 1;
362 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); 369 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
363 } 370 }
364 371
365 return 0; 372 return 0;
@@ -374,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
374 381
375 sband = local->hw.wiphy->bands[local->oper_channel->band]; 382 sband = local->hw.wiphy->bands[local->oper_channel->band];
376 if (!sband->ht_cap.ht_supported || 383 if (!sband->ht_cap.ht_supported ||
377 local->_oper_channel_type == NL80211_CHAN_NO_HT) 384 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
378 return 0; 385 return 0;
379 386
380 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) 387 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -391,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
391{ 398{
392 struct ieee80211_local *local = sdata->local; 399 struct ieee80211_local *local = sdata->local;
393 struct ieee80211_channel *channel = local->oper_channel; 400 struct ieee80211_channel *channel = local->oper_channel;
394 enum nl80211_channel_type channel_type = local->_oper_channel_type; 401 enum nl80211_channel_type channel_type =
402 sdata->vif.bss_conf.channel_type;
395 struct ieee80211_supported_band *sband = 403 struct ieee80211_supported_band *sband =
396 local->hw.wiphy->bands[channel->band]; 404 local->hw.wiphy->bands[channel->band];
397 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 405 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -521,14 +529,13 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
521static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, 529static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
522 struct ieee80211_if_mesh *ifmsh) 530 struct ieee80211_if_mesh *ifmsh)
523{ 531{
524 bool free_plinks; 532 u32 changed;
525 533
526 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 534 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
527 mesh_path_expire(sdata); 535 mesh_path_expire(sdata);
528 536
529 free_plinks = mesh_plink_availables(sdata); 537 changed = mesh_accept_plinks_update(sdata);
530 if (free_plinks != sdata->u.mesh.accepting_plinks) 538 ieee80211_bss_info_change_notify(sdata, changed);
531 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
532 539
533 mod_timer(&ifmsh->housekeeping_timer, 540 mod_timer(&ifmsh->housekeeping_timer,
534 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 541 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -603,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
603 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 610 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
604 sdata->vif.bss_conf.basic_rates = 611 sdata->vif.bss_conf.basic_rates =
605 ieee80211_mandatory_rates(sdata->local, 612 ieee80211_mandatory_rates(sdata->local,
606 sdata->local->hw.conf.channel->band); 613 sdata->local->oper_channel->band);
607 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 614 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
608 BSS_CHANGED_BEACON_ENABLED | 615 BSS_CHANGED_BEACON_ENABLED |
609 BSS_CHANGED_HT | 616 BSS_CHANGED_HT |
610 BSS_CHANGED_BASIC_RATES | 617 BSS_CHANGED_BASIC_RATES |
611 BSS_CHANGED_BEACON_INT); 618 BSS_CHANGED_BEACON_INT);
619
620 netif_carrier_on(sdata->dev);
612} 621}
613 622
614void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 623void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -616,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
616 struct ieee80211_local *local = sdata->local; 625 struct ieee80211_local *local = sdata->local;
617 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 626 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
618 627
628 netif_carrier_off(sdata->dev);
629
630 /* stop the beacon */
619 ifmsh->mesh_id_len = 0; 631 ifmsh->mesh_id_len = 0;
620 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 632 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
621 sta_info_flush(local, NULL); 633
634 /* flush STAs and mpaths on this iface */
635 sta_info_flush(sdata->local, sdata);
636 mesh_path_flush_by_iface(sdata);
622 637
623 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 638 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
624 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 639 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index faaa39bcfd10..25d0f17dec71 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -215,6 +215,9 @@ struct mesh_rmc {
215/* Maximum number of paths per interface */ 215/* Maximum number of paths per interface */
216#define MESH_MAX_MPATHS 1024 216#define MESH_MAX_MPATHS 1024
217 217
218/* Number of frames buffered per destination for unresolved destinations */
219#define MESH_FRAME_QUEUE_LEN 10
220
218/* Public interfaces */ 221/* Public interfaces */
219/* Various */ 222/* Various */
220int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 223int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -282,7 +285,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
282 u8 *hw_addr, 285 u8 *hw_addr,
283 struct ieee802_11_elems *ie); 286 struct ieee802_11_elems *ie);
284bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 287bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
285void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 288u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
286void mesh_plink_broken(struct sta_info *sta); 289void mesh_plink_broken(struct sta_info *sta);
287void mesh_plink_deactivate(struct sta_info *sta); 290void mesh_plink_deactivate(struct sta_info *sta);
288int mesh_plink_open(struct sta_info *sta); 291int mesh_plink_open(struct sta_info *sta);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 494bc39f61a4..47aeee2d8db1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -17,8 +17,6 @@
17#define MAX_METRIC 0xffffffff 17#define MAX_METRIC 0xffffffff
18#define ARITH_SHIFT 8 18#define ARITH_SHIFT 8
19 19
20/* Number of frames buffered per destination for unresolved destinations */
21#define MESH_FRAME_QUEUE_LEN 10
22#define MAX_PREQ_QUEUE_LEN 64 20#define MAX_PREQ_QUEUE_LEN 64
23 21
24/* Destination only */ 22/* Destination only */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 075bc535c601..aa749818860e 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
203{ 203{
204 struct sk_buff *skb; 204 struct sk_buff *skb;
205 struct ieee80211_hdr *hdr; 205 struct ieee80211_hdr *hdr;
206 struct sk_buff_head tmpq;
207 unsigned long flags; 206 unsigned long flags;
208 207
209 rcu_assign_pointer(mpath->next_hop, sta); 208 rcu_assign_pointer(mpath->next_hop, sta);
210 209
211 __skb_queue_head_init(&tmpq);
212
213 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 210 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 211 skb_queue_walk(&mpath->frame_queue, skb) {
215 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
216 hdr = (struct ieee80211_hdr *) skb->data; 212 hdr = (struct ieee80211_hdr *) skb->data;
217 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219 __skb_queue_tail(&tmpq, skb);
220 } 215 }
221 216
222 skb_queue_splice(&tmpq, &mpath->frame_queue);
223 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 217 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
224} 218}
225 219
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
285 struct mesh_path *from_mpath, 279 struct mesh_path *from_mpath,
286 bool copy) 280 bool copy)
287{ 281{
288 struct sk_buff *skb, *cp_skb = NULL; 282 struct sk_buff *skb, *fskb, *tmp;
289 struct sk_buff_head gateq, failq; 283 struct sk_buff_head failq;
290 unsigned long flags; 284 unsigned long flags;
291 int num_skbs;
292 285
293 BUG_ON(gate_mpath == from_mpath); 286 BUG_ON(gate_mpath == from_mpath);
294 BUG_ON(!gate_mpath->next_hop); 287 BUG_ON(!gate_mpath->next_hop);
295 288
296 __skb_queue_head_init(&gateq);
297 __skb_queue_head_init(&failq); 289 __skb_queue_head_init(&failq);
298 290
299 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 291 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
300 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 292 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
301 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 293 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
302 294
303 num_skbs = skb_queue_len(&failq); 295 skb_queue_walk_safe(&failq, fskb, tmp) {
304 296 if (skb_queue_len(&gate_mpath->frame_queue) >=
305 while (num_skbs--) { 297 MESH_FRAME_QUEUE_LEN) {
306 skb = __skb_dequeue(&failq); 298 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
307 if (copy) { 299 break;
308 cp_skb = skb_copy(skb, GFP_ATOMIC);
309 if (cp_skb)
310 __skb_queue_tail(&failq, cp_skb);
311 } 300 }
312 301
302 skb = skb_copy(fskb, GFP_ATOMIC);
303 if (WARN_ON(!skb))
304 break;
305
313 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 306 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 __skb_queue_tail(&gateq, skb); 307 skb_queue_tail(&gate_mpath->frame_queue, skb);
308
309 if (copy)
310 continue;
311
312 __skb_unlink(fskb, &failq);
313 kfree_skb(fskb);
315 } 314 }
316 315
317 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
318 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
319 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 316 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
320 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 317 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
321 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
322 318
323 if (!copy) 319 if (!copy)
324 return; 320 return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
531 527
532 read_lock_bh(&pathtbl_resize_lock); 528 read_lock_bh(&pathtbl_resize_lock);
533 memcpy(new_mpath->dst, dst, ETH_ALEN); 529 memcpy(new_mpath->dst, dst, ETH_ALEN);
534 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN); 530 eth_broadcast_addr(new_mpath->rann_snd_addr);
535 new_mpath->is_root = false; 531 new_mpath->is_root = false;
536 new_mpath->sdata = sdata; 532 new_mpath->sdata = sdata;
537 new_mpath->flags = 0; 533 new_mpath->flags = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index af671b984df3..3ab34d816897 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -48,17 +48,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
48 u8 *da, __le16 llid, __le16 plid, __le16 reason); 48 u8 *da, __le16 llid, __le16 plid, __le16 reason);
49 49
50static inline 50static inline
51void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 51u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
52{ 52{
53 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); 53 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
54 mesh_accept_plinks_update(sdata); 54 return mesh_accept_plinks_update(sdata);
55} 55}
56 56
57static inline 57static inline
58void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) 58u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
59{ 59{
60 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); 60 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
61 mesh_accept_plinks_update(sdata); 61 return mesh_accept_plinks_update(sdata);
62} 62}
63 63
64/** 64/**
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
117 u16 ht_opmode; 117 u16 ht_opmode;
118 bool non_ht_sta = false, ht20_sta = false; 118 bool non_ht_sta = false, ht20_sta = false;
119 119
120 if (local->_oper_channel_type == NL80211_CHAN_NO_HT) 120 if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
121 return 0; 121 return 0;
122 122
123 rcu_read_lock(); 123 rcu_read_lock();
@@ -147,7 +147,8 @@ out:
147 147
148 if (non_ht_sta) 148 if (non_ht_sta)
149 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; 149 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
150 else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20) 150 else if (ht20_sta &&
151 sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
151 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; 152 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
152 else 153 else
153 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 154 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -170,22 +171,21 @@ out:
170 * @sta: mesh peer link to deactivate 171 * @sta: mesh peer link to deactivate
171 * 172 *
172 * All mesh paths with this peer as next hop will be flushed 173 * All mesh paths with this peer as next hop will be flushed
174 * Returns beacon changed flag if the beacon content changed.
173 * 175 *
174 * Locking: the caller must hold sta->lock 176 * Locking: the caller must hold sta->lock
175 */ 177 */
176static bool __mesh_plink_deactivate(struct sta_info *sta) 178static u32 __mesh_plink_deactivate(struct sta_info *sta)
177{ 179{
178 struct ieee80211_sub_if_data *sdata = sta->sdata; 180 struct ieee80211_sub_if_data *sdata = sta->sdata;
179 bool deactivated = false; 181 u32 changed = 0;
180 182
181 if (sta->plink_state == NL80211_PLINK_ESTAB) { 183 if (sta->plink_state == NL80211_PLINK_ESTAB)
182 mesh_plink_dec_estab_count(sdata); 184 changed = mesh_plink_dec_estab_count(sdata);
183 deactivated = true;
184 }
185 sta->plink_state = NL80211_PLINK_BLOCKED; 185 sta->plink_state = NL80211_PLINK_BLOCKED;
186 mesh_path_flush_by_nexthop(sta); 186 mesh_path_flush_by_nexthop(sta);
187 187
188 return deactivated; 188 return changed;
189} 189}
190 190
191/** 191/**
@@ -198,18 +198,17 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
198void mesh_plink_deactivate(struct sta_info *sta) 198void mesh_plink_deactivate(struct sta_info *sta)
199{ 199{
200 struct ieee80211_sub_if_data *sdata = sta->sdata; 200 struct ieee80211_sub_if_data *sdata = sta->sdata;
201 bool deactivated; 201 u32 changed;
202 202
203 spin_lock_bh(&sta->lock); 203 spin_lock_bh(&sta->lock);
204 deactivated = __mesh_plink_deactivate(sta); 204 changed = __mesh_plink_deactivate(sta);
205 sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED); 205 sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
206 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 206 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
207 sta->sta.addr, sta->llid, sta->plid, 207 sta->sta.addr, sta->llid, sta->plid,
208 sta->reason); 208 sta->reason);
209 spin_unlock_bh(&sta->lock); 209 spin_unlock_bh(&sta->lock);
210 210
211 if (deactivated) 211 ieee80211_bss_info_change_notify(sdata, changed);
212 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
213} 212}
214 213
215static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 214static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -217,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
217 u8 *da, __le16 llid, __le16 plid, __le16 reason) { 216 u8 *da, __le16 llid, __le16 plid, __le16 reason) {
218 struct ieee80211_local *local = sdata->local; 217 struct ieee80211_local *local = sdata->local;
219 struct sk_buff *skb; 218 struct sk_buff *skb;
219 struct ieee80211_tx_info *info;
220 struct ieee80211_mgmt *mgmt; 220 struct ieee80211_mgmt *mgmt;
221 bool include_plid = false; 221 bool include_plid = false;
222 u16 peering_proto = 0; 222 u16 peering_proto = 0;
223 u8 *pos, ie_len = 4; 223 u8 *pos, ie_len = 4;
224 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + 224 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
225 sizeof(mgmt->u.action.u.self_prot); 225 sizeof(mgmt->u.action.u.self_prot);
226 int err = -ENOMEM;
226 227
227 skb = dev_alloc_skb(local->tx_headroom + 228 skb = dev_alloc_skb(local->tx_headroom +
228 hdr_len + 229 hdr_len +
@@ -238,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
238 sdata->u.mesh.ie_len); 239 sdata->u.mesh.ie_len);
239 if (!skb) 240 if (!skb)
240 return -1; 241 return -1;
242 info = IEEE80211_SKB_CB(skb);
241 skb_reserve(skb, local->tx_headroom); 243 skb_reserve(skb, local->tx_headroom);
242 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 244 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
243 memset(mgmt, 0, hdr_len); 245 memset(mgmt, 0, hdr_len);
@@ -258,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
258 pos = skb_put(skb, 2); 260 pos = skb_put(skb, 2);
259 memcpy(pos + 2, &plid, 2); 261 memcpy(pos + 2, &plid, 2);
260 } 262 }
261 if (ieee80211_add_srates_ie(sdata, skb, true) || 263 if (ieee80211_add_srates_ie(sdata, skb, true,
262 ieee80211_add_ext_srates_ie(sdata, skb, true) || 264 local->oper_channel->band) ||
265 ieee80211_add_ext_srates_ie(sdata, skb, true,
266 local->oper_channel->band) ||
263 mesh_add_rsn_ie(skb, sdata) || 267 mesh_add_rsn_ie(skb, sdata) ||
264 mesh_add_meshid_ie(skb, sdata) || 268 mesh_add_meshid_ie(skb, sdata) ||
265 mesh_add_meshconf_ie(skb, sdata)) 269 mesh_add_meshconf_ie(skb, sdata))
266 return -1; 270 goto free;
267 } else { /* WLAN_SP_MESH_PEERING_CLOSE */ 271 } else { /* WLAN_SP_MESH_PEERING_CLOSE */
272 info->flags |= IEEE80211_TX_CTL_NO_ACK;
268 if (mesh_add_meshid_ie(skb, sdata)) 273 if (mesh_add_meshid_ie(skb, sdata))
269 return -1; 274 goto free;
270 } 275 }
271 276
272 /* Add Mesh Peering Management element */ 277 /* Add Mesh Peering Management element */
@@ -285,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
285 ie_len += 2; /* reason code */ 290 ie_len += 2; /* reason code */
286 break; 291 break;
287 default: 292 default:
288 return -EINVAL; 293 err = -EINVAL;
294 goto free;
289 } 295 }
290 296
291 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len)) 297 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
292 return -ENOMEM; 298 goto free;
293 299
294 pos = skb_put(skb, 2 + ie_len); 300 pos = skb_put(skb, 2 + ie_len);
295 *pos++ = WLAN_EID_PEER_MGMT; 301 *pos++ = WLAN_EID_PEER_MGMT;
@@ -310,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
310 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 316 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
311 if (mesh_add_ht_cap_ie(skb, sdata) || 317 if (mesh_add_ht_cap_ie(skb, sdata) ||
312 mesh_add_ht_oper_ie(skb, sdata)) 318 mesh_add_ht_oper_ie(skb, sdata))
313 return -1; 319 goto free;
314 } 320 }
315 321
316 if (mesh_add_vendor_ies(skb, sdata)) 322 if (mesh_add_vendor_ies(skb, sdata))
317 return -1; 323 goto free;
318 324
319 ieee80211_tx_skb(sdata, skb); 325 ieee80211_tx_skb(sdata, skb);
320 return 0; 326 return 0;
327free:
328 kfree_skb(skb);
329 return err;
321} 330}
322 331
323/** 332/**
@@ -362,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
362 371
363 spin_lock_bh(&sta->lock); 372 spin_lock_bh(&sta->lock);
364 sta->last_rx = jiffies; 373 sta->last_rx = jiffies;
374 if (sta->plink_state == NL80211_PLINK_ESTAB) {
375 spin_unlock_bh(&sta->lock);
376 return sta;
377 }
378
365 sta->sta.supp_rates[band] = rates; 379 sta->sta.supp_rates[band] = rates;
366 if (elems->ht_cap_elem && 380 if (elems->ht_cap_elem &&
367 sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT) 381 sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
368 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 382 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
369 elems->ht_cap_elem, 383 elems->ht_cap_elem,
370 &sta->sta.ht_cap); 384 &sta->sta.ht_cap);
@@ -523,7 +537,8 @@ int mesh_plink_open(struct sta_info *sta)
523 spin_lock_bh(&sta->lock); 537 spin_lock_bh(&sta->lock);
524 get_random_bytes(&llid, 2); 538 get_random_bytes(&llid, 2);
525 sta->llid = llid; 539 sta->llid = llid;
526 if (sta->plink_state != NL80211_PLINK_LISTEN) { 540 if (sta->plink_state != NL80211_PLINK_LISTEN &&
541 sta->plink_state != NL80211_PLINK_BLOCKED) {
527 spin_unlock_bh(&sta->lock); 542 spin_unlock_bh(&sta->lock);
528 return -EBUSY; 543 return -EBUSY;
529 } 544 }
@@ -541,15 +556,14 @@ int mesh_plink_open(struct sta_info *sta)
541void mesh_plink_block(struct sta_info *sta) 556void mesh_plink_block(struct sta_info *sta)
542{ 557{
543 struct ieee80211_sub_if_data *sdata = sta->sdata; 558 struct ieee80211_sub_if_data *sdata = sta->sdata;
544 bool deactivated; 559 u32 changed;
545 560
546 spin_lock_bh(&sta->lock); 561 spin_lock_bh(&sta->lock);
547 deactivated = __mesh_plink_deactivate(sta); 562 changed = __mesh_plink_deactivate(sta);
548 sta->plink_state = NL80211_PLINK_BLOCKED; 563 sta->plink_state = NL80211_PLINK_BLOCKED;
549 spin_unlock_bh(&sta->lock); 564 spin_unlock_bh(&sta->lock);
550 565
551 if (deactivated) 566 ieee80211_bss_info_change_notify(sdata, changed);
552 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
553} 567}
554 568
555 569
@@ -852,9 +866,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
852 del_timer(&sta->plink_timer); 866 del_timer(&sta->plink_timer);
853 sta->plink_state = NL80211_PLINK_ESTAB; 867 sta->plink_state = NL80211_PLINK_ESTAB;
854 spin_unlock_bh(&sta->lock); 868 spin_unlock_bh(&sta->lock);
855 mesh_plink_inc_estab_count(sdata); 869 changed |= mesh_plink_inc_estab_count(sdata);
856 changed |= mesh_set_ht_prot_mode(sdata); 870 changed |= mesh_set_ht_prot_mode(sdata);
857 changed |= BSS_CHANGED_BEACON;
858 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 871 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
859 sta->sta.addr); 872 sta->sta.addr);
860 break; 873 break;
@@ -888,9 +901,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
888 del_timer(&sta->plink_timer); 901 del_timer(&sta->plink_timer);
889 sta->plink_state = NL80211_PLINK_ESTAB; 902 sta->plink_state = NL80211_PLINK_ESTAB;
890 spin_unlock_bh(&sta->lock); 903 spin_unlock_bh(&sta->lock);
891 mesh_plink_inc_estab_count(sdata); 904 changed |= mesh_plink_inc_estab_count(sdata);
892 changed |= mesh_set_ht_prot_mode(sdata); 905 changed |= mesh_set_ht_prot_mode(sdata);
893 changed |= BSS_CHANGED_BEACON;
894 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 906 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
895 sta->sta.addr); 907 sta->sta.addr);
896 mesh_plink_frame_tx(sdata, 908 mesh_plink_frame_tx(sdata,
@@ -908,13 +920,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
908 case CLS_ACPT: 920 case CLS_ACPT:
909 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE); 921 reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
910 sta->reason = reason; 922 sta->reason = reason;
911 __mesh_plink_deactivate(sta); 923 changed |= __mesh_plink_deactivate(sta);
912 sta->plink_state = NL80211_PLINK_HOLDING; 924 sta->plink_state = NL80211_PLINK_HOLDING;
913 llid = sta->llid; 925 llid = sta->llid;
914 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 926 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
915 spin_unlock_bh(&sta->lock); 927 spin_unlock_bh(&sta->lock);
916 changed |= mesh_set_ht_prot_mode(sdata); 928 changed |= mesh_set_ht_prot_mode(sdata);
917 changed |= BSS_CHANGED_BEACON;
918 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 929 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
919 sta->sta.addr, llid, plid, reason); 930 sta->sta.addr, llid, plid, reason);
920 break; 931 break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76b83341cf9..e714ed8bb198 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -88,8 +88,6 @@ MODULE_PARM_DESC(probe_wait_ms,
88#define TMR_RUNNING_TIMER 0 88#define TMR_RUNNING_TIMER 0
89#define TMR_RUNNING_CHANSW 1 89#define TMR_RUNNING_CHANSW 1
90 90
91#define DEAUTH_DISASSOC_LEN (24 /* hdr */ + 2 /* reason */)
92
93/* 91/*
94 * All cfg80211 functions have to be called outside a locked 92 * All cfg80211 functions have to be called outside a locked
95 * section so that they can acquire a lock themselves... This 93 * section so that they can acquire a lock themselves... This
@@ -146,6 +144,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
146 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) 144 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
147 return; 145 return;
148 146
147 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
148 return;
149
149 mod_timer(&sdata->u.mgd.bcn_mon_timer, 150 mod_timer(&sdata->u.mgd.bcn_mon_timer,
150 round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); 151 round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
151} 152}
@@ -182,15 +183,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
182 u16 ht_opmode; 183 u16 ht_opmode;
183 bool disable_40 = false; 184 bool disable_40 = false;
184 185
185 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 186 sband = local->hw.wiphy->bands[local->oper_channel->band];
186 187
187 switch (sdata->vif.bss_conf.channel_type) { 188 switch (sdata->vif.bss_conf.channel_type) {
188 case NL80211_CHAN_HT40PLUS: 189 case NL80211_CHAN_HT40PLUS:
189 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS) 190 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
190 disable_40 = true; 191 disable_40 = true;
191 break; 192 break;
192 case NL80211_CHAN_HT40MINUS: 193 case NL80211_CHAN_HT40MINUS:
193 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS) 194 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
194 disable_40 = true; 195 disable_40 = true;
195 break; 196 break;
196 default: 197 default:
@@ -326,6 +327,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
326 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); 327 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
327} 328}
328 329
330static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
331 struct sk_buff *skb,
332 struct ieee80211_supported_band *sband)
333{
334 u8 *pos;
335 u32 cap;
336 struct ieee80211_sta_vht_cap vht_cap;
337
338 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
339
340 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
341
342 /* determine capability flags */
343 cap = vht_cap.cap;
344
345 /* reserve and fill IE */
346 pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
347 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
348}
349
329static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 350static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
330{ 351{
331 struct ieee80211_local *local = sdata->local; 352 struct ieee80211_local *local = sdata->local;
@@ -371,6 +392,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
371 4 + /* power capability */ 392 4 + /* power capability */
372 2 + 2 * sband->n_channels + /* supported channels */ 393 2 + 2 * sband->n_channels + /* supported channels */
373 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 394 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
395 2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
374 assoc_data->ie_len + /* extra IEs */ 396 assoc_data->ie_len + /* extra IEs */
375 9, /* WMM */ 397 9, /* WMM */
376 GFP_KERNEL); 398 GFP_KERNEL);
@@ -503,6 +525,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
503 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, 525 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
504 sband, local->oper_channel, ifmgd->ap_smps); 526 sband, local->oper_channel, ifmgd->ap_smps);
505 527
528 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
529 ieee80211_add_vht_ie(sdata, skb, sband);
530
506 /* if present, add any custom non-vendor IEs that go after HT */ 531 /* if present, add any custom non-vendor IEs that go after HT */
507 if (assoc_data->ie_len && assoc_data->ie) { 532 if (assoc_data->ie_len && assoc_data->ie) {
508 noffset = ieee80211_ie_split_vendor(assoc_data->ie, 533 noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -547,48 +572,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
547 ieee80211_tx_skb(sdata, skb); 572 ieee80211_tx_skb(sdata, skb);
548} 573}
549 574
550static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
551 const u8 *bssid, u16 stype,
552 u16 reason, bool send_frame,
553 u8 *frame_buf)
554{
555 struct ieee80211_local *local = sdata->local;
556 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
557 struct sk_buff *skb;
558 struct ieee80211_mgmt *mgmt = (void *)frame_buf;
559
560 /* build frame */
561 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
562 mgmt->duration = 0; /* initialize only */
563 mgmt->seq_ctrl = 0; /* initialize only */
564 memcpy(mgmt->da, bssid, ETH_ALEN);
565 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
566 memcpy(mgmt->bssid, bssid, ETH_ALEN);
567 /* u.deauth.reason_code == u.disassoc.reason_code */
568 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
569
570 if (send_frame) {
571 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
572 DEAUTH_DISASSOC_LEN);
573 if (!skb)
574 return;
575
576 skb_reserve(skb, local->hw.extra_tx_headroom);
577
578 /* copy in frame */
579 memcpy(skb_put(skb, DEAUTH_DISASSOC_LEN),
580 mgmt, DEAUTH_DISASSOC_LEN);
581
582 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
583 IEEE80211_SKB_CB(skb)->flags |=
584 IEEE80211_TX_INTFL_DONT_ENCRYPT;
585
586 drv_mgd_prepare_tx(local, sdata);
587
588 ieee80211_tx_skb(sdata, skb);
589 }
590}
591
592void ieee80211_send_pspoll(struct ieee80211_local *local, 575void ieee80211_send_pspoll(struct ieee80211_local *local,
593 struct ieee80211_sub_if_data *sdata) 576 struct ieee80211_sub_if_data *sdata)
594{ 577{
@@ -687,6 +670,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
687 /* XXX: shouldn't really modify cfg80211-owned data! */ 670 /* XXX: shouldn't really modify cfg80211-owned data! */
688 ifmgd->associated->channel = sdata->local->oper_channel; 671 ifmgd->associated->channel = sdata->local->oper_channel;
689 672
673 /* XXX: wait for a beacon first? */
690 ieee80211_wake_queues_by_reason(&sdata->local->hw, 674 ieee80211_wake_queues_by_reason(&sdata->local->hw,
691 IEEE80211_QUEUE_STOP_REASON_CSA); 675 IEEE80211_QUEUE_STOP_REASON_CSA);
692 out: 676 out:
@@ -704,16 +688,13 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
704 688
705 trace_api_chswitch_done(sdata, success); 689 trace_api_chswitch_done(sdata, success);
706 if (!success) { 690 if (!success) {
707 /* 691 sdata_info(sdata,
708 * If the channel switch was not successful, stay 692 "driver channel switch failed, disconnecting\n");
709 * around on the old channel. We currently lack 693 ieee80211_queue_work(&sdata->local->hw,
710 * good handling of this situation, possibly we 694 &ifmgd->csa_connection_drop_work);
711 * should just drop the association. 695 } else {
712 */ 696 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
713 sdata->local->csa_channel = sdata->local->oper_channel;
714 } 697 }
715
716 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
717} 698}
718EXPORT_SYMBOL(ieee80211_chswitch_done); 699EXPORT_SYMBOL(ieee80211_chswitch_done);
719 700
@@ -758,61 +739,111 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
758 return; 739 return;
759 740
760 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); 741 new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
761 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) 742 if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) {
743 sdata_info(sdata,
744 "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
745 ifmgd->associated->bssid, new_freq);
746 ieee80211_queue_work(&sdata->local->hw,
747 &ifmgd->csa_connection_drop_work);
762 return; 748 return;
749 }
763 750
764 sdata->local->csa_channel = new_ch; 751 sdata->local->csa_channel = new_ch;
765 752
753 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
754
755 if (sw_elem->mode)
756 ieee80211_stop_queues_by_reason(&sdata->local->hw,
757 IEEE80211_QUEUE_STOP_REASON_CSA);
758
766 if (sdata->local->ops->channel_switch) { 759 if (sdata->local->ops->channel_switch) {
767 /* use driver's channel switch callback */ 760 /* use driver's channel switch callback */
768 struct ieee80211_channel_switch ch_switch; 761 struct ieee80211_channel_switch ch_switch = {
769 memset(&ch_switch, 0, sizeof(ch_switch)); 762 .timestamp = timestamp,
770 ch_switch.timestamp = timestamp; 763 .block_tx = sw_elem->mode,
771 if (sw_elem->mode) { 764 .channel = new_ch,
772 ch_switch.block_tx = true; 765 .count = sw_elem->count,
773 ieee80211_stop_queues_by_reason(&sdata->local->hw, 766 };
774 IEEE80211_QUEUE_STOP_REASON_CSA); 767
775 }
776 ch_switch.channel = new_ch;
777 ch_switch.count = sw_elem->count;
778 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
779 drv_channel_switch(sdata->local, &ch_switch); 768 drv_channel_switch(sdata->local, &ch_switch);
780 return; 769 return;
781 } 770 }
782 771
783 /* channel switch handled in software */ 772 /* channel switch handled in software */
784 if (sw_elem->count <= 1) { 773 if (sw_elem->count <= 1)
785 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); 774 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
786 } else { 775 else
787 if (sw_elem->mode)
788 ieee80211_stop_queues_by_reason(&sdata->local->hw,
789 IEEE80211_QUEUE_STOP_REASON_CSA);
790 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
791 mod_timer(&ifmgd->chswitch_timer, 776 mod_timer(&ifmgd->chswitch_timer,
792 jiffies + 777 TU_TO_EXP_TIME(sw_elem->count *
793 msecs_to_jiffies(sw_elem->count * 778 cbss->beacon_interval));
794 cbss->beacon_interval));
795 }
796} 779}
797 780
798static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, 781static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
799 u16 capab_info, u8 *pwr_constr_elem, 782 struct ieee80211_channel *channel,
800 u8 pwr_constr_elem_len) 783 const u8 *country_ie, u8 country_ie_len,
784 const u8 *pwr_constr_elem)
801{ 785{
802 struct ieee80211_conf *conf = &sdata->local->hw.conf; 786 struct ieee80211_country_ie_triplet *triplet;
787 int chan = ieee80211_frequency_to_channel(channel->center_freq);
788 int i, chan_pwr, chan_increment, new_ap_level;
789 bool have_chan_pwr = false;
803 790
804 if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT)) 791 /* Invalid IE */
792 if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
805 return; 793 return;
806 794
807 /* Power constraint IE length should be 1 octet */ 795 triplet = (void *)(country_ie + 3);
808 if (pwr_constr_elem_len != 1) 796 country_ie_len -= 3;
809 return; 797
798 switch (channel->band) {
799 default:
800 WARN_ON_ONCE(1);
801 /* fall through */
802 case IEEE80211_BAND_2GHZ:
803 case IEEE80211_BAND_60GHZ:
804 chan_increment = 1;
805 break;
806 case IEEE80211_BAND_5GHZ:
807 chan_increment = 4;
808 break;
809 }
810
811 /* find channel */
812 while (country_ie_len >= 3) {
813 u8 first_channel = triplet->chans.first_channel;
810 814
811 if ((*pwr_constr_elem <= conf->channel->max_reg_power) && 815 if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID)
812 (*pwr_constr_elem != sdata->local->power_constr_level)) { 816 goto next;
813 sdata->local->power_constr_level = *pwr_constr_elem; 817
814 ieee80211_hw_config(sdata->local, 0); 818 for (i = 0; i < triplet->chans.num_channels; i++) {
819 if (first_channel + i * chan_increment == chan) {
820 have_chan_pwr = true;
821 chan_pwr = triplet->chans.max_power;
822 break;
823 }
824 }
825 if (have_chan_pwr)
826 break;
827
828 next:
829 triplet++;
830 country_ie_len -= 3;
815 } 831 }
832
833 if (!have_chan_pwr)
834 return;
835
836 new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
837
838 if (sdata->local->ap_power_level == new_ap_level)
839 return;
840
841 sdata_info(sdata,
842 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
843 new_ap_level, chan_pwr, *pwr_constr_elem,
844 sdata->u.mgd.bssid);
845 sdata->local->ap_power_level = new_ap_level;
846 ieee80211_hw_config(sdata->local, 0);
816} 847}
817 848
818void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) 849void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
@@ -1007,6 +1038,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1007 ieee80211_change_ps(local); 1038 ieee80211_change_ps(local);
1008} 1039}
1009 1040
1041void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
1042{
1043 bool ps_allowed = ieee80211_powersave_allowed(sdata);
1044
1045 if (sdata->vif.bss_conf.ps != ps_allowed) {
1046 sdata->vif.bss_conf.ps = ps_allowed;
1047 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
1048 }
1049}
1050
1010void ieee80211_dynamic_ps_disable_work(struct work_struct *work) 1051void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
1011{ 1052{
1012 struct ieee80211_local *local = 1053 struct ieee80211_local *local =
@@ -1239,7 +1280,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1239 } 1280 }
1240 1281
1241 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 1282 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
1242 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) 1283 if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
1243 use_short_slot = true; 1284 use_short_slot = true;
1244 1285
1245 if (use_protection != bss_conf->use_cts_prot) { 1286 if (use_protection != bss_conf->use_cts_prot) {
@@ -1307,9 +1348,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1307 1348
1308 mutex_lock(&local->iflist_mtx); 1349 mutex_lock(&local->iflist_mtx);
1309 ieee80211_recalc_ps(local, -1); 1350 ieee80211_recalc_ps(local, -1);
1310 ieee80211_recalc_smps(local);
1311 mutex_unlock(&local->iflist_mtx); 1351 mutex_unlock(&local->iflist_mtx);
1312 1352
1353 ieee80211_recalc_smps(local);
1354 ieee80211_recalc_ps_vif(sdata);
1355
1313 netif_tx_start_all_queues(sdata->dev); 1356 netif_tx_start_all_queues(sdata->dev);
1314 netif_carrier_on(sdata->dev); 1357 netif_carrier_on(sdata->dev);
1315} 1358}
@@ -1356,7 +1399,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1356 sta = sta_info_get(sdata, ifmgd->bssid); 1399 sta = sta_info_get(sdata, ifmgd->bssid);
1357 if (sta) { 1400 if (sta) {
1358 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1401 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1359 ieee80211_sta_tear_down_BA_sessions(sta, tx); 1402 ieee80211_sta_tear_down_BA_sessions(sta, false);
1360 } 1403 }
1361 mutex_unlock(&local->sta_mtx); 1404 mutex_unlock(&local->sta_mtx);
1362 1405
@@ -1371,6 +1414,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1371 } 1414 }
1372 local->ps_sdata = NULL; 1415 local->ps_sdata = NULL;
1373 1416
1417 /* disable per-vif ps */
1418 ieee80211_recalc_ps_vif(sdata);
1419
1374 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ 1420 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
1375 if (tx) 1421 if (tx)
1376 drv_flush(local, false); 1422 drv_flush(local, false);
@@ -1401,7 +1447,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1401 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1447 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1402 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1448 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1403 1449
1404 local->power_constr_level = 0; 1450 local->ap_power_level = 0;
1405 1451
1406 del_timer_sync(&local->dynamic_ps_timer); 1452 del_timer_sync(&local->dynamic_ps_timer);
1407 cancel_work_sync(&local->dynamic_ps_enable_work); 1453 cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -1542,7 +1588,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1542 ssid_len = ssid[1]; 1588 ssid_len = ssid[1];
1543 1589
1544 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, 1590 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1545 0, (u32) -1, true, false); 1591 0, (u32) -1, true, false,
1592 ifmgd->associated->channel);
1546 } 1593 }
1547 1594
1548 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1595 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,19 +1692,21 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1645 ssid_len = ssid[1]; 1692 ssid_len = ssid[1];
1646 1693
1647 skb = ieee80211_build_probe_req(sdata, cbss->bssid, 1694 skb = ieee80211_build_probe_req(sdata, cbss->bssid,
1648 (u32) -1, ssid + 2, ssid_len, 1695 (u32) -1,
1696 sdata->local->oper_channel,
1697 ssid + 2, ssid_len,
1649 NULL, 0, true); 1698 NULL, 0, true);
1650 1699
1651 return skb; 1700 return skb;
1652} 1701}
1653EXPORT_SYMBOL(ieee80211_ap_probereq_get); 1702EXPORT_SYMBOL(ieee80211_ap_probereq_get);
1654 1703
1655static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) 1704static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
1705 bool transmit_frame)
1656{ 1706{
1657 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1707 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1658 struct ieee80211_local *local = sdata->local; 1708 struct ieee80211_local *local = sdata->local;
1659 u8 bssid[ETH_ALEN]; 1709 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1660 u8 frame_buf[DEAUTH_DISASSOC_LEN];
1661 1710
1662 mutex_lock(&ifmgd->mtx); 1711 mutex_lock(&ifmgd->mtx);
1663 if (!ifmgd->associated) { 1712 if (!ifmgd->associated) {
@@ -1665,27 +1714,24 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1665 return; 1714 return;
1666 } 1715 }
1667 1716
1668 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1669
1670 sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
1671
1672 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 1717 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1673 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1718 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1674 false, frame_buf); 1719 transmit_frame, frame_buf);
1720 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
1675 mutex_unlock(&ifmgd->mtx); 1721 mutex_unlock(&ifmgd->mtx);
1676 1722
1677 /* 1723 /*
1678 * must be outside lock due to cfg80211, 1724 * must be outside lock due to cfg80211,
1679 * but that's not a problem. 1725 * but that's not a problem.
1680 */ 1726 */
1681 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN); 1727 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
1682 1728
1683 mutex_lock(&local->mtx); 1729 mutex_lock(&local->mtx);
1684 ieee80211_recalc_idle(local); 1730 ieee80211_recalc_idle(local);
1685 mutex_unlock(&local->mtx); 1731 mutex_unlock(&local->mtx);
1686} 1732}
1687 1733
1688void ieee80211_beacon_connection_loss_work(struct work_struct *work) 1734static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
1689{ 1735{
1690 struct ieee80211_sub_if_data *sdata = 1736 struct ieee80211_sub_if_data *sdata =
1691 container_of(work, struct ieee80211_sub_if_data, 1737 container_of(work, struct ieee80211_sub_if_data,
@@ -1701,10 +1747,24 @@ void ieee80211_beacon_connection_loss_work(struct work_struct *work)
1701 rcu_read_unlock(); 1747 rcu_read_unlock();
1702 } 1748 }
1703 1749
1704 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 1750 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) {
1705 __ieee80211_connection_loss(sdata); 1751 sdata_info(sdata, "Connection to AP %pM lost\n",
1706 else 1752 ifmgd->bssid);
1753 __ieee80211_disconnect(sdata, false);
1754 } else {
1707 ieee80211_mgd_probe_ap(sdata, true); 1755 ieee80211_mgd_probe_ap(sdata, true);
1756 }
1757}
1758
1759static void ieee80211_csa_connection_drop_work(struct work_struct *work)
1760{
1761 struct ieee80211_sub_if_data *sdata =
1762 container_of(work, struct ieee80211_sub_if_data,
1763 u.mgd.csa_connection_drop_work);
1764
1765 ieee80211_wake_queues_by_reason(&sdata->local->hw,
1766 IEEE80211_QUEUE_STOP_REASON_CSA);
1767 __ieee80211_disconnect(sdata, true);
1708} 1768}
1709 1769
1710void ieee80211_beacon_loss(struct ieee80211_vif *vif) 1770void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -2232,14 +2292,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2232 mutex_unlock(&local->iflist_mtx); 2292 mutex_unlock(&local->iflist_mtx);
2233 } 2293 }
2234 2294
2235 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 2295 if (elems->ch_switch_ie &&
2236 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, 2296 memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
2237 ETH_ALEN) == 0)) { 2297 ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
2238 struct ieee80211_channel_sw_ie *sw_elem =
2239 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
2240 ieee80211_sta_process_chanswitch(sdata, sw_elem,
2241 bss, rx_status->mactime); 2298 bss, rx_status->mactime);
2242 }
2243} 2299}
2244 2300
2245 2301
@@ -2326,7 +2382,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2326 if (baselen > len) 2382 if (baselen > len)
2327 return; 2383 return;
2328 2384
2329 if (rx_status->freq != local->hw.conf.channel->center_freq) 2385 if (rx_status->freq != local->oper_channel->center_freq)
2330 return; 2386 return;
2331 2387
2332 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && 2388 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,21 +2546,19 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2490 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { 2546 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
2491 struct ieee80211_supported_band *sband; 2547 struct ieee80211_supported_band *sband;
2492 2548
2493 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2549 sband = local->hw.wiphy->bands[local->oper_channel->band];
2494 2550
2495 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 2551 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2496 bssid, true); 2552 bssid, true);
2497 } 2553 }
2498 2554
2499 /* Note: country IE parsing is done for us by cfg80211 */ 2555 if (elems.country_elem && elems.pwr_constr_elem &&
2500 if (elems.country_elem) { 2556 mgmt->u.probe_resp.capab_info &
2501 /* TODO: IBSS also needs this */ 2557 cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
2502 if (elems.pwr_constr_elem) 2558 ieee80211_handle_pwr_constr(sdata, local->oper_channel,
2503 ieee80211_handle_pwr_constr(sdata, 2559 elems.country_elem,
2504 le16_to_cpu(mgmt->u.probe_resp.capab_info), 2560 elems.country_elem_len,
2505 elems.pwr_constr_elem, 2561 elems.pwr_constr_elem);
2506 elems.pwr_constr_elem_len);
2507 }
2508 2562
2509 ieee80211_bss_info_change_notify(sdata, changed); 2563 ieee80211_bss_info_change_notify(sdata, changed);
2510} 2564}
@@ -2601,7 +2655,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2601{ 2655{
2602 struct ieee80211_local *local = sdata->local; 2656 struct ieee80211_local *local = sdata->local;
2603 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2657 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2604 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2658 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2605 2659
2606 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2660 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2607 false, frame_buf); 2661 false, frame_buf);
@@ -2611,7 +2665,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2611 * must be outside lock due to cfg80211, 2665 * must be outside lock due to cfg80211,
2612 * but that's not a problem. 2666 * but that's not a problem.
2613 */ 2667 */
2614 cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN); 2668 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
2615 2669
2616 mutex_lock(&local->mtx); 2670 mutex_lock(&local->mtx);
2617 ieee80211_recalc_idle(local); 2671 ieee80211_recalc_idle(local);
@@ -2673,7 +2727,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2673 * will not answer to direct packet in unassociated state. 2727 * will not answer to direct packet in unassociated state.
2674 */ 2728 */
2675 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 2729 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2676 NULL, 0, (u32) -1, true, false); 2730 NULL, 0, (u32) -1, true, false,
2731 auth_data->bss->channel);
2677 } 2732 }
2678 2733
2679 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 2734 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -2894,6 +2949,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
2894 2949
2895 cancel_work_sync(&ifmgd->monitor_work); 2950 cancel_work_sync(&ifmgd->monitor_work);
2896 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 2951 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
2952 cancel_work_sync(&ifmgd->csa_connection_drop_work);
2897 if (del_timer_sync(&ifmgd->timer)) 2953 if (del_timer_sync(&ifmgd->timer))
2898 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 2954 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
2899 2955
@@ -2950,6 +3006,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2950 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 3006 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
2951 INIT_WORK(&ifmgd->beacon_connection_loss_work, 3007 INIT_WORK(&ifmgd->beacon_connection_loss_work,
2952 ieee80211_beacon_connection_loss_work); 3008 ieee80211_beacon_connection_loss_work);
3009 INIT_WORK(&ifmgd->csa_connection_drop_work,
3010 ieee80211_csa_connection_drop_work);
2953 INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work); 3011 INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
2954 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 3012 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
2955 (unsigned long) sdata); 3013 (unsigned long) sdata);
@@ -3000,41 +3058,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3000 return 0; 3058 return 0;
3001} 3059}
3002 3060
3003static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, 3061static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3004 struct cfg80211_bss *cbss, bool assoc) 3062 struct cfg80211_bss *cbss)
3005{ 3063{
3006 struct ieee80211_local *local = sdata->local; 3064 struct ieee80211_local *local = sdata->local;
3007 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3065 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3008 struct ieee80211_bss *bss = (void *)cbss->priv;
3009 struct sta_info *sta = NULL;
3010 bool have_sta = false;
3011 int err;
3012 int ht_cfreq; 3066 int ht_cfreq;
3013 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 3067 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
3014 const u8 *ht_oper_ie; 3068 const u8 *ht_oper_ie;
3015 const struct ieee80211_ht_operation *ht_oper = NULL; 3069 const struct ieee80211_ht_operation *ht_oper = NULL;
3016 struct ieee80211_supported_band *sband; 3070 struct ieee80211_supported_band *sband;
3017 3071
3018 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3019 return -EINVAL;
3020
3021 if (assoc) {
3022 rcu_read_lock();
3023 have_sta = sta_info_get(sdata, cbss->bssid);
3024 rcu_read_unlock();
3025 }
3026
3027 if (!have_sta) {
3028 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
3029 if (!sta)
3030 return -ENOMEM;
3031 }
3032
3033 mutex_lock(&local->mtx);
3034 ieee80211_recalc_idle(sdata->local);
3035 mutex_unlock(&local->mtx);
3036
3037 /* switch to the right channel */
3038 sband = local->hw.wiphy->bands[cbss->channel->band]; 3072 sband = local->hw.wiphy->bands[cbss->channel->band];
3039 3073
3040 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ; 3074 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3131,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3097 local->oper_channel = cbss->channel; 3131 local->oper_channel = cbss->channel;
3098 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3132 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3099 3133
3100 if (sta) { 3134 return 0;
3135}
3136
3137static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3138 struct cfg80211_bss *cbss, bool assoc)
3139{
3140 struct ieee80211_local *local = sdata->local;
3141 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3142 struct ieee80211_bss *bss = (void *)cbss->priv;
3143 struct sta_info *new_sta = NULL;
3144 bool have_sta = false;
3145 int err;
3146
3147 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3148 return -EINVAL;
3149
3150 if (assoc) {
3151 rcu_read_lock();
3152 have_sta = sta_info_get(sdata, cbss->bssid);
3153 rcu_read_unlock();
3154 }
3155
3156 if (!have_sta) {
3157 new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
3158 if (!new_sta)
3159 return -ENOMEM;
3160 }
3161
3162 mutex_lock(&local->mtx);
3163 ieee80211_recalc_idle(sdata->local);
3164 mutex_unlock(&local->mtx);
3165
3166 if (new_sta) {
3101 u32 rates = 0, basic_rates = 0; 3167 u32 rates = 0, basic_rates = 0;
3102 bool have_higher_than_11mbit; 3168 bool have_higher_than_11mbit;
3103 int min_rate = INT_MAX, min_rate_index = -1; 3169 int min_rate = INT_MAX, min_rate_index = -1;
3170 struct ieee80211_supported_band *sband;
3171
3172 sband = local->hw.wiphy->bands[cbss->channel->band];
3173
3174 err = ieee80211_prep_channel(sdata, cbss);
3175 if (err) {
3176 sta_info_free(local, new_sta);
3177 return err;
3178 }
3104 3179
3105 ieee80211_get_rates(sband, bss->supp_rates, 3180 ieee80211_get_rates(sband, bss->supp_rates,
3106 bss->supp_rates_len, 3181 bss->supp_rates_len,
@@ -3122,7 +3197,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3122 basic_rates = BIT(min_rate_index); 3197 basic_rates = BIT(min_rate_index);
3123 } 3198 }
3124 3199
3125 sta->sta.supp_rates[cbss->channel->band] = rates; 3200 new_sta->sta.supp_rates[cbss->channel->band] = rates;
3126 sdata->vif.bss_conf.basic_rates = basic_rates; 3201 sdata->vif.bss_conf.basic_rates = basic_rates;
3127 3202
3128 /* cf. IEEE 802.11 9.2.12 */ 3203 /* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3220,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3145 BSS_CHANGED_BEACON_INT); 3220 BSS_CHANGED_BEACON_INT);
3146 3221
3147 if (assoc) 3222 if (assoc)
3148 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 3223 sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
3149 3224
3150 err = sta_info_insert(sta); 3225 err = sta_info_insert(new_sta);
3151 sta = NULL; 3226 new_sta = NULL;
3152 if (err) { 3227 if (err) {
3153 sdata_info(sdata, 3228 sdata_info(sdata,
3154 "failed to insert STA entry for the AP (error %d)\n", 3229 "failed to insert STA entry for the AP (error %d)\n",
@@ -3302,9 +3377,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3302 } 3377 }
3303 3378
3304 /* prepare assoc data */ 3379 /* prepare assoc data */
3305 3380
3306 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 3381 /*
3307 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 3382 * keep only the 40 MHz disable bit set as it might have
3383 * been set during authentication already, all other bits
3384 * should be reset for a new connection
3385 */
3386 ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
3308 3387
3309 ifmgd->beacon_crc_valid = false; 3388 ifmgd->beacon_crc_valid = false;
3310 3389
@@ -3320,21 +3399,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3320 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3399 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
3321 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { 3400 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
3322 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3401 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3402 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3323 netdev_info(sdata->dev, 3403 netdev_info(sdata->dev,
3324 "disabling HT due to WEP/TKIP use\n"); 3404 "disabling HT/VHT due to WEP/TKIP use\n");
3325 } 3405 }
3326 } 3406 }
3327 3407
3328 if (req->flags & ASSOC_REQ_DISABLE_HT) 3408 if (req->flags & ASSOC_REQ_DISABLE_HT) {
3329 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3409 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3410 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3411 }
3330 3412
3331 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3413 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3332 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3414 sband = local->hw.wiphy->bands[req->bss->channel->band];
3333 if (!sband->ht_cap.ht_supported || 3415 if (!sband->ht_cap.ht_supported ||
3334 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) { 3416 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3335 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3417 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3336 netdev_info(sdata->dev, 3418 if (!bss->wmm_used)
3337 "disabling HT as WMM/QoS is not supported\n"); 3419 netdev_info(sdata->dev,
3420 "disabling HT as WMM/QoS is not supported by the AP\n");
3421 }
3422
3423 /* disable VHT if we don't support it or the AP doesn't use WMM */
3424 if (!sband->vht_cap.vht_supported ||
3425 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3426 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3427 if (!bss->wmm_used)
3428 netdev_info(sdata->dev,
3429 "disabling VHT as WMM/QoS is not supported by the AP\n");
3338 } 3430 }
3339 3431
3340 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3432 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3456,7 +3548,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3456 struct cfg80211_deauth_request *req) 3548 struct cfg80211_deauth_request *req)
3457{ 3549{
3458 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3550 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3459 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 3551 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3460 3552
3461 mutex_lock(&ifmgd->mtx); 3553 mutex_lock(&ifmgd->mtx);
3462 3554
@@ -3471,17 +3563,21 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3471 req->bssid, req->reason_code); 3563 req->bssid, req->reason_code);
3472 3564
3473 if (ifmgd->associated && 3565 if (ifmgd->associated &&
3474 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) 3566 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
3475 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 3567 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3476 req->reason_code, true, frame_buf); 3568 req->reason_code, true, frame_buf);
3477 else 3569 } else {
3570 drv_mgd_prepare_tx(sdata->local, sdata);
3478 ieee80211_send_deauth_disassoc(sdata, req->bssid, 3571 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3479 IEEE80211_STYPE_DEAUTH, 3572 IEEE80211_STYPE_DEAUTH,
3480 req->reason_code, true, 3573 req->reason_code, true,
3481 frame_buf); 3574 frame_buf);
3575 }
3576
3482 mutex_unlock(&ifmgd->mtx); 3577 mutex_unlock(&ifmgd->mtx);
3483 3578
3484 __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN); 3579 __cfg80211_send_deauth(sdata->dev, frame_buf,
3580 IEEE80211_DEAUTH_FRAME_LEN);
3485 3581
3486 mutex_lock(&sdata->local->mtx); 3582 mutex_lock(&sdata->local->mtx);
3487 ieee80211_recalc_idle(sdata->local); 3583 ieee80211_recalc_idle(sdata->local);
@@ -3495,7 +3591,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3495{ 3591{
3496 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3592 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3497 u8 bssid[ETH_ALEN]; 3593 u8 bssid[ETH_ALEN];
3498 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 3594 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3499 3595
3500 mutex_lock(&ifmgd->mtx); 3596 mutex_lock(&ifmgd->mtx);
3501 3597
@@ -3520,7 +3616,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
3520 frame_buf); 3616 frame_buf);
3521 mutex_unlock(&ifmgd->mtx); 3617 mutex_unlock(&ifmgd->mtx);
3522 3618
3523 __cfg80211_send_disassoc(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN); 3619 __cfg80211_send_disassoc(sdata->dev, frame_buf,
3620 IEEE80211_DEAUTH_FRAME_LEN);
3524 3621
3525 mutex_lock(&sdata->local->mtx); 3622 mutex_lock(&sdata->local->mtx);
3526 ieee80211_recalc_idle(sdata->local); 3623 ieee80211_recalc_idle(sdata->local);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 635c3250c668..83608ac16780 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
116 if (!ieee80211_sdata_running(sdata)) 116 if (!ieee80211_sdata_running(sdata))
117 continue; 117 continue;
118 118
119 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
120 continue;
121
119 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 122 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
120 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); 123 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
121 124
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
144 147
145 mutex_lock(&local->iflist_mtx); 148 mutex_lock(&local->iflist_mtx);
146 list_for_each_entry(sdata, &local->interfaces, list) { 149 list_for_each_entry(sdata, &local->interfaces, list) {
150 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
151 continue;
152
147 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 153 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
148 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); 154 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
149 155
@@ -227,8 +233,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
227 u32 dur = dep->duration; 233 u32 dur = dep->duration;
228 dep->duration = dur - roc->duration; 234 dep->duration = dur - roc->duration;
229 roc->duration = dur; 235 roc->duration = dur;
230 list_del(&dep->list); 236 list_move(&dep->list, &roc->list);
231 list_add(&dep->list, &roc->list);
232 } 237 }
233 } 238 }
234 out_unlock: 239 out_unlock:
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 6e4fd32c6617..10de668eb9f6 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
56 if (!ref) 56 if (!ref)
57 return; 57 return;
58 58
59 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 59 sband = local->hw.wiphy->bands[local->oper_channel->band];
60 60
61 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 61 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
62 set_sta_flag(sta, WLAN_STA_RATE_CONTROL); 62 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0cb4edee6af5..61c621e9273f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62 62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
64 RX_FLAG_FAILED_PLCP_CRC |
65 RX_FLAG_AMPDU_IS_ZEROLEN))
64 return 1; 66 return 1;
65 if (unlikely(skb->len < 16 + present_fcs_len)) 67 if (unlikely(skb->len < 16 + present_fcs_len))
66 return 1; 68 return 1;
@@ -91,10 +93,17 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
91 if (status->flag & RX_FLAG_HT) /* HT info */ 93 if (status->flag & RX_FLAG_HT) /* HT info */
92 len += 3; 94 len += 3;
93 95
96 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
97 /* padding */
98 while (len & 3)
99 len++;
100 len += 8;
101 }
102
94 return len; 103 return len;
95} 104}
96 105
97/** 106/*
98 * ieee80211_add_rx_radiotap_header - add radiotap header 107 * ieee80211_add_rx_radiotap_header - add radiotap header
99 * 108 *
100 * add a radiotap header containing all the fields which the hardware provided. 109 * add a radiotap header containing all the fields which the hardware provided.
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
215 pos++; 224 pos++;
216 *pos++ = status->rate_idx; 225 *pos++ = status->rate_idx;
217 } 226 }
227
228 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
229 u16 flags = 0;
230
231 /* ensure 4 byte alignment */
232 while ((pos - (u8 *)rthdr) & 3)
233 pos++;
234 rthdr->it_present |=
235 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
236 put_unaligned_le32(status->ampdu_reference, pos);
237 pos += 4;
238 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
239 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
240 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
241 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
242 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
243 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
244 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
245 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
246 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
247 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
248 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
249 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
250 put_unaligned_le16(flags, pos);
251 pos += 2;
252 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
253 *pos++ = status->ampdu_delimiter_crc;
254 else
255 *pos++ = 0;
256 *pos++ = 0;
257 }
218} 258}
219 259
220/* 260/*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2268 2308
2269 goto queue; 2309 goto queue;
2270 case WLAN_CATEGORY_SPECTRUM_MGMT: 2310 case WLAN_CATEGORY_SPECTRUM_MGMT:
2271 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 2311 if (status->band != IEEE80211_BAND_5GHZ)
2272 break; 2312 break;
2273 2313
2274 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2314 if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2772 if (!bssid) { 2812 if (!bssid) {
2773 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) 2813 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2774 return 0; 2814 return 0;
2775 } else if (!ieee80211_bssid_match(bssid, 2815 } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
2776 sdata->vif.addr)) {
2777 /* 2816 /*
2778 * Accept public action frames even when the 2817 * Accept public action frames even when the
2779 * BSSID doesn't match, this is used for P2P 2818 * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2793 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) 2832 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2794 return 0; 2833 return 0;
2795 break; 2834 break;
2835 case NL80211_IFTYPE_P2P_DEVICE:
2836 if (!ieee80211_is_public_action(hdr, skb->len) &&
2837 !ieee80211_is_probe_req(hdr->frame_control) &&
2838 !ieee80211_is_probe_resp(hdr->frame_control) &&
2839 !ieee80211_is_beacon(hdr->frame_control))
2840 return 0;
2841 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2842 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2843 break;
2796 default: 2844 default:
2797 /* should never get here */ 2845 /* should never get here */
2798 WARN_ON(1); 2846 WARN_ON_ONCE(1);
2799 break; 2847 break;
2800 } 2848 }
2801 2849
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 839dd9737989..c4cdbde24fd3 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -407,7 +407,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
407 enum ieee80211_band band = local->hw.conf.channel->band; 407 enum ieee80211_band band = local->hw.conf.channel->band;
408 408
409 sdata = rcu_dereference_protected(local->scan_sdata, 409 sdata = rcu_dereference_protected(local->scan_sdata,
410 lockdep_is_held(&local->mtx));; 410 lockdep_is_held(&local->mtx));
411 411
412 for (i = 0; i < local->scan_req->n_ssids; i++) 412 for (i = 0; i < local->scan_req->n_ssids; i++)
413 ieee80211_send_probe_req( 413 ieee80211_send_probe_req(
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
416 local->scan_req->ssids[i].ssid_len, 416 local->scan_req->ssids[i].ssid_len,
417 local->scan_req->ie, local->scan_req->ie_len, 417 local->scan_req->ie, local->scan_req->ie_len,
418 local->scan_req->rates[band], false, 418 local->scan_req->rates[band], false,
419 local->scan_req->no_cck); 419 local->scan_req->no_cck,
420 local->hw.conf.channel);
420 421
421 /* 422 /*
422 * After sending probe requests, wait for probe responses 423 * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
479 if (local->ops->hw_scan) { 480 if (local->ops->hw_scan) {
480 __set_bit(SCAN_HW_SCANNING, &local->scanning); 481 __set_bit(SCAN_HW_SCANNING, &local->scanning);
481 } else if ((req->n_channels == 1) && 482 } else if ((req->n_channels == 1) &&
482 (req->channels[0]->center_freq == 483 (req->channels[0] == local->oper_channel)) {
483 local->hw.conf.channel->center_freq)) { 484 /*
484 485 * If we are scanning only on the operating channel
485 /* If we are scanning only on the current channel, then 486 * then we do not need to stop normal activities
486 * we do not need to stop normal activities
487 */ 487 */
488 unsigned long next_delay; 488 unsigned long next_delay;
489 489
@@ -917,6 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
917 struct cfg80211_sched_scan_request *req) 917 struct cfg80211_sched_scan_request *req)
918{ 918{
919 struct ieee80211_local *local = sdata->local; 919 struct ieee80211_local *local = sdata->local;
920 struct ieee80211_sched_scan_ies sched_scan_ies;
920 int ret, i; 921 int ret, i;
921 922
922 mutex_lock(&local->mtx); 923 mutex_lock(&local->mtx);
@@ -935,33 +936,28 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
935 if (!local->hw.wiphy->bands[i]) 936 if (!local->hw.wiphy->bands[i])
936 continue; 937 continue;
937 938
938 local->sched_scan_ies.ie[i] = kzalloc(2 + 939 sched_scan_ies.ie[i] = kzalloc(2 + IEEE80211_MAX_SSID_LEN +
939 IEEE80211_MAX_SSID_LEN + 940 local->scan_ies_len +
940 local->scan_ies_len + 941 req->ie_len,
941 req->ie_len, 942 GFP_KERNEL);
942 GFP_KERNEL); 943 if (!sched_scan_ies.ie[i]) {
943 if (!local->sched_scan_ies.ie[i]) {
944 ret = -ENOMEM; 944 ret = -ENOMEM;
945 goto out_free; 945 goto out_free;
946 } 946 }
947 947
948 local->sched_scan_ies.len[i] = 948 sched_scan_ies.len[i] =
949 ieee80211_build_preq_ies(local, 949 ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
950 local->sched_scan_ies.ie[i],
951 req->ie, req->ie_len, i, 950 req->ie, req->ie_len, i,
952 (u32) -1, 0); 951 (u32) -1, 0);
953 } 952 }
954 953
955 ret = drv_sched_scan_start(local, sdata, req, 954 ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
956 &local->sched_scan_ies); 955 if (ret == 0)
957 if (ret == 0) {
958 rcu_assign_pointer(local->sched_scan_sdata, sdata); 956 rcu_assign_pointer(local->sched_scan_sdata, sdata);
959 goto out;
960 }
961 957
962out_free: 958out_free:
963 while (i > 0) 959 while (i > 0)
964 kfree(local->sched_scan_ies.ie[--i]); 960 kfree(sched_scan_ies.ie[--i]);
965out: 961out:
966 mutex_unlock(&local->mtx); 962 mutex_unlock(&local->mtx);
967 return ret; 963 return ret;
@@ -970,7 +966,7 @@ out:
970int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) 966int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
971{ 967{
972 struct ieee80211_local *local = sdata->local; 968 struct ieee80211_local *local = sdata->local;
973 int ret = 0, i; 969 int ret = 0;
974 970
975 mutex_lock(&local->mtx); 971 mutex_lock(&local->mtx);
976 972
@@ -979,12 +975,9 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
979 goto out; 975 goto out;
980 } 976 }
981 977
982 if (rcu_access_pointer(local->sched_scan_sdata)) { 978 if (rcu_access_pointer(local->sched_scan_sdata))
983 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
984 kfree(local->sched_scan_ies.ie[i]);
985
986 drv_sched_scan_stop(local, sdata); 979 drv_sched_scan_stop(local, sdata);
987 } 980
988out: 981out:
989 mutex_unlock(&local->mtx); 982 mutex_unlock(&local->mtx);
990 983
@@ -1006,7 +999,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1006 struct ieee80211_local *local = 999 struct ieee80211_local *local =
1007 container_of(work, struct ieee80211_local, 1000 container_of(work, struct ieee80211_local,
1008 sched_scan_stopped_work); 1001 sched_scan_stopped_work);
1009 int i;
1010 1002
1011 mutex_lock(&local->mtx); 1003 mutex_lock(&local->mtx);
1012 1004
@@ -1015,9 +1007,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
1015 return; 1007 return;
1016 } 1008 }
1017 1009
1018 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1019 kfree(local->sched_scan_ies.ie[i]);
1020
1021 rcu_assign_pointer(local->sched_scan_sdata, NULL); 1010 rcu_assign_pointer(local->sched_scan_sdata, NULL);
1022 1011
1023 mutex_unlock(&local->mtx); 1012 mutex_unlock(&local->mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 06fa75ceb025..797dd36a220d 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,6 +91,70 @@ static int sta_info_hash_del(struct ieee80211_local *local,
91 return -ENOENT; 91 return -ENOENT;
92} 92}
93 93
94static void free_sta_work(struct work_struct *wk)
95{
96 struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
97 int ac, i;
98 struct tid_ampdu_tx *tid_tx;
99 struct ieee80211_sub_if_data *sdata = sta->sdata;
100 struct ieee80211_local *local = sdata->local;
101
102 /*
103 * At this point, when being called as call_rcu callback,
104 * neither mac80211 nor the driver can reference this
105 * sta struct any more except by still existing timers
106 * associated with this station that we clean up below.
107 */
108
109 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
110 BUG_ON(!sdata->bss);
111
112 clear_sta_flag(sta, WLAN_STA_PS_STA);
113
114 atomic_dec(&sdata->bss->num_sta_ps);
115 sta_info_recalc_tim(sta);
116 }
117
118 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
119 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
120 __skb_queue_purge(&sta->ps_tx_buf[ac]);
121 __skb_queue_purge(&sta->tx_filtered[ac]);
122 }
123
124#ifdef CONFIG_MAC80211_MESH
125 if (ieee80211_vif_is_mesh(&sdata->vif)) {
126 mesh_accept_plinks_update(sdata);
127 mesh_plink_deactivate(sta);
128 del_timer_sync(&sta->plink_timer);
129 }
130#endif
131
132 cancel_work_sync(&sta->drv_unblock_wk);
133
134 /*
135 * Destroy aggregation state here. It would be nice to wait for the
136 * driver to finish aggregation stop and then clean up, but for now
137 * drivers have to handle aggregation stop being requested, followed
138 * directly by station destruction.
139 */
140 for (i = 0; i < STA_TID_NUM; i++) {
141 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
142 if (!tid_tx)
143 continue;
144 __skb_queue_purge(&tid_tx->pending);
145 kfree(tid_tx);
146 }
147
148 sta_info_free(local, sta);
149}
150
151static void free_sta_rcu(struct rcu_head *h)
152{
153 struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
154
155 ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
156}
157
94/* protected by RCU */ 158/* protected by RCU */
95struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 159struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
96 const u8 *addr) 160 const u8 *addr)
@@ -241,6 +305,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
241 305
242 spin_lock_init(&sta->lock); 306 spin_lock_init(&sta->lock);
243 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 307 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
308 INIT_WORK(&sta->free_sta_wk, free_sta_work);
244 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 309 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
245 mutex_init(&sta->ampdu_mlme.mtx); 310 mutex_init(&sta->ampdu_mlme.mtx);
246 311
@@ -654,8 +719,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
654{ 719{
655 struct ieee80211_local *local; 720 struct ieee80211_local *local;
656 struct ieee80211_sub_if_data *sdata; 721 struct ieee80211_sub_if_data *sdata;
657 int ret, i, ac; 722 int ret, i;
658 struct tid_ampdu_tx *tid_tx;
659 723
660 might_sleep(); 724 might_sleep();
661 725
@@ -674,7 +738,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
674 * will be sufficient. 738 * will be sufficient.
675 */ 739 */
676 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 740 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
677 ieee80211_sta_tear_down_BA_sessions(sta, true); 741 ieee80211_sta_tear_down_BA_sessions(sta, false);
678 742
679 ret = sta_info_hash_del(local, sta); 743 ret = sta_info_hash_del(local, sta);
680 if (ret) 744 if (ret)
@@ -711,65 +775,14 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
711 WARN_ON_ONCE(ret != 0); 775 WARN_ON_ONCE(ret != 0);
712 } 776 }
713 777
714 /*
715 * At this point, after we wait for an RCU grace period,
716 * neither mac80211 nor the driver can reference this
717 * sta struct any more except by still existing timers
718 * associated with this station that we clean up below.
719 */
720 synchronize_rcu();
721
722 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
723 BUG_ON(!sdata->bss);
724
725 clear_sta_flag(sta, WLAN_STA_PS_STA);
726
727 atomic_dec(&sdata->bss->num_sta_ps);
728 sta_info_recalc_tim(sta);
729 }
730
731 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
732 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
733 __skb_queue_purge(&sta->ps_tx_buf[ac]);
734 __skb_queue_purge(&sta->tx_filtered[ac]);
735 }
736
737#ifdef CONFIG_MAC80211_MESH
738 if (ieee80211_vif_is_mesh(&sdata->vif))
739 mesh_accept_plinks_update(sdata);
740#endif
741
742 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 778 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
743 779
744 cancel_work_sync(&sta->drv_unblock_wk);
745
746 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL); 780 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
747 781
748 rate_control_remove_sta_debugfs(sta); 782 rate_control_remove_sta_debugfs(sta);
749 ieee80211_sta_debugfs_remove(sta); 783 ieee80211_sta_debugfs_remove(sta);
750 784
751#ifdef CONFIG_MAC80211_MESH 785 call_rcu(&sta->rcu_head, free_sta_rcu);
752 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
753 mesh_plink_deactivate(sta);
754 del_timer_sync(&sta->plink_timer);
755 }
756#endif
757
758 /*
759 * Destroy aggregation state here. It would be nice to wait for the
760 * driver to finish aggregation stop and then clean up, but for now
761 * drivers have to handle aggregation stop being requested, followed
762 * directly by station destruction.
763 */
764 for (i = 0; i < STA_TID_NUM; i++) {
765 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
766 if (!tid_tx)
767 continue;
768 __skb_queue_purge(&tid_tx->pending);
769 kfree(tid_tx);
770 }
771
772 sta_info_free(local, sta);
773 786
774 return 0; 787 return 0;
775} 788}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index a470e1123a55..c88f161f8118 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -287,6 +287,7 @@ struct sta_ampdu_mlme {
287struct sta_info { 287struct sta_info {
288 /* General information, mostly static */ 288 /* General information, mostly static */
289 struct list_head list; 289 struct list_head list;
290 struct rcu_head rcu_head;
290 struct sta_info __rcu *hnext; 291 struct sta_info __rcu *hnext;
291 struct ieee80211_local *local; 292 struct ieee80211_local *local;
292 struct ieee80211_sub_if_data *sdata; 293 struct ieee80211_sub_if_data *sdata;
@@ -297,6 +298,7 @@ struct sta_info {
297 spinlock_t lock; 298 spinlock_t lock;
298 299
299 struct work_struct drv_unblock_wk; 300 struct work_struct drv_unblock_wk;
301 struct work_struct free_sta_wk;
300 302
301 u16 listen_interval; 303 u16 listen_interval;
302 304
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8cd72914cdaf..2ce89732d0f2 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -517,21 +517,41 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
517 517
518 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 518 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
519 u64 cookie = (unsigned long)skb; 519 u64 cookie = (unsigned long)skb;
520 bool found = false;
521
520 acked = info->flags & IEEE80211_TX_STAT_ACK; 522 acked = info->flags & IEEE80211_TX_STAT_ACK;
521 523
522 /* 524 rcu_read_lock();
523 * TODO: When we have non-netdev frame TX, 525
524 * we cannot use skb->dev->ieee80211_ptr 526 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
525 */ 527 if (!sdata->dev)
528 continue;
529
530 if (skb->dev != sdata->dev)
531 continue;
526 532
527 if (ieee80211_is_nullfunc(hdr->frame_control) || 533 found = true;
528 ieee80211_is_qos_nullfunc(hdr->frame_control)) 534 break;
529 cfg80211_probe_status(skb->dev, hdr->addr1, 535 }
536
537 if (!skb->dev) {
538 sdata = rcu_dereference(local->p2p_sdata);
539 if (sdata)
540 found = true;
541 }
542
543 if (!found)
544 skb->dev = NULL;
545 else if (ieee80211_is_nullfunc(hdr->frame_control) ||
546 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
547 cfg80211_probe_status(sdata->dev, hdr->addr1,
530 cookie, acked, GFP_ATOMIC); 548 cookie, acked, GFP_ATOMIC);
531 else 549 } else {
532 cfg80211_mgmt_tx_status( 550 cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
533 skb->dev->ieee80211_ptr, cookie, skb->data, 551 skb->len, acked, GFP_ATOMIC);
534 skb->len, acked, GFP_ATOMIC); 552 }
553
554 rcu_read_unlock();
535 } 555 }
536 556
537 if (unlikely(info->ack_frame_id)) { 557 if (unlikely(info->ack_frame_id)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c6d33b55b2df..18d9c8a52e9e 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -24,7 +24,7 @@
24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
26 __entry->p2p = sdata->vif.p2p; \ 26 __entry->p2p = sdata->vif.p2p; \
27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
274 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; 274 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
275 __entry->max_sleep_period = local->hw.conf.max_sleep_period; 275 __entry->max_sleep_period = local->hw.conf.max_sleep_period;
276 __entry->listen_interval = local->hw.conf.listen_interval; 276 __entry->listen_interval = local->hw.conf.listen_interval;
277 __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count; 277 __entry->long_frame_max_tx_count =
278 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 278 local->hw.conf.long_frame_max_tx_count;
279 __entry->center_freq = local->hw.conf.channel->center_freq; 279 __entry->short_frame_max_tx_count =
280 local->hw.conf.short_frame_max_tx_count;
281 __entry->center_freq = local->hw.conf.channel ?
282 local->hw.conf.channel->center_freq : 0;
280 __entry->channel_type = local->hw.conf.channel_type; 283 __entry->channel_type = local->hw.conf.channel_type;
281 __entry->smps = local->hw.conf.smps_mode; 284 __entry->smps = local->hw.conf.smps_mode;
282 ), 285 ),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c5e8c9c31f76..e0e0d1d0e830 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
55 if (WARN_ON_ONCE(info->control.rates[0].idx < 0)) 55 if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
56 return 0; 56 return 0;
57 57
58 sband = local->hw.wiphy->bands[tx->channel->band]; 58 sband = local->hw.wiphy->bands[info->band];
59 txrate = &sband->bitrates[info->control.rates[0].idx]; 59 txrate = &sband->bitrates[info->control.rates[0].idx];
60 60
61 erp = txrate->flags & IEEE80211_RATE_ERP_G; 61 erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -580,7 +580,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
580 tx->key = NULL; 580 tx->key = NULL;
581 else 581 else
582 skip_hw = (tx->key->conf.flags & 582 skip_hw = (tx->key->conf.flags &
583 IEEE80211_KEY_FLAG_SW_MGMT) && 583 IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
584 ieee80211_is_mgmt(hdr->frame_control); 584 ieee80211_is_mgmt(hdr->frame_control);
585 break; 585 break;
586 case WLAN_CIPHER_SUITE_AES_CMAC: 586 case WLAN_CIPHER_SUITE_AES_CMAC:
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
615 615
616 memset(&txrc, 0, sizeof(txrc)); 616 memset(&txrc, 0, sizeof(txrc));
617 617
618 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 618 sband = tx->local->hw.wiphy->bands[info->band];
619 619
620 len = min_t(u32, tx->skb->len + FCS_LEN, 620 len = min_t(u32, tx->skb->len + FCS_LEN,
621 tx->local->hw.wiphy->frag_threshold); 621 tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
626 txrc.bss_conf = &tx->sdata->vif.bss_conf; 626 txrc.bss_conf = &tx->sdata->vif.bss_conf;
627 txrc.skb = tx->skb; 627 txrc.skb = tx->skb;
628 txrc.reported_rate.idx = -1; 628 txrc.reported_rate.idx = -1;
629 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band]; 629 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
630 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 630 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
631 txrc.max_rate_idx = -1; 631 txrc.max_rate_idx = -1;
632 else 632 else
633 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 633 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
634 memcpy(txrc.rate_idx_mcs_mask, 634 memcpy(txrc.rate_idx_mcs_mask,
635 tx->sdata->rc_rateidx_mcs_mask[tx->channel->band], 635 tx->sdata->rc_rateidx_mcs_mask[info->band],
636 sizeof(txrc.rate_idx_mcs_mask)); 636 sizeof(txrc.rate_idx_mcs_mask));
637 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || 637 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
638 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || 638 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
667 "scanning and associated. Target station: " 667 "scanning and associated. Target station: "
668 "%pM on %d GHz band\n", 668 "%pM on %d GHz band\n",
669 tx->sdata->name, hdr->addr1, 669 tx->sdata->name, hdr->addr1,
670 tx->channel->band ? 5 : 2)) 670 info->band ? 5 : 2))
671 return TX_DROP; 671 return TX_DROP;
672 672
673 /* 673 /*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1131 tx->skb = skb; 1131 tx->skb = skb;
1132 tx->local = local; 1132 tx->local = local;
1133 tx->sdata = sdata; 1133 tx->sdata = sdata;
1134 tx->channel = local->hw.conf.channel;
1135 __skb_queue_head_init(&tx->skbs); 1134 __skb_queue_head_init(&tx->skbs);
1136 1135
1137 /* 1136 /*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1204 struct sk_buff_head *skbs, 1203 struct sk_buff_head *skbs,
1205 bool txpending) 1204 bool txpending)
1206{ 1205{
1206 struct ieee80211_tx_control control;
1207 struct sk_buff *skb, *tmp; 1207 struct sk_buff *skb, *tmp;
1208 unsigned long flags; 1208 unsigned long flags;
1209 1209
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1240 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1240 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1241 1241
1242 info->control.vif = vif; 1242 info->control.vif = vif;
1243 info->control.sta = sta; 1243 control.sta = sta;
1244 1244
1245 __skb_unlink(skb, skbs); 1245 __skb_unlink(skb, skbs);
1246 drv_tx(local, skb); 1246 drv_tx(local, &control, skb);
1247 } 1247 }
1248 1248
1249 return true; 1249 return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1399 goto out; 1399 goto out;
1400 } 1400 }
1401 1401
1402 tx.channel = local->hw.conf.channel; 1402 info->band = local->hw.conf.channel->band;
1403 info->band = tx.channel->band;
1404 1403
1405 /* set up hw_queue value early */ 1404 /* set up hw_queue value early */
1406 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 1405 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1720 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1719 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1721 struct ieee80211_local *local = sdata->local; 1720 struct ieee80211_local *local = sdata->local;
1722 struct ieee80211_tx_info *info; 1721 struct ieee80211_tx_info *info;
1723 int ret = NETDEV_TX_BUSY, head_need; 1722 int head_need;
1724 u16 ethertype, hdrlen, meshhdrlen = 0; 1723 u16 ethertype, hdrlen, meshhdrlen = 0;
1725 __le16 fc; 1724 __le16 fc;
1726 struct ieee80211_hdr hdr; 1725 struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1736 u32 info_flags = 0; 1735 u32 info_flags = 0;
1737 u16 info_id = 0; 1736 u16 info_id = 0;
1738 1737
1739 if (unlikely(skb->len < ETH_HLEN)) { 1738 if (unlikely(skb->len < ETH_HLEN))
1740 ret = NETDEV_TX_OK;
1741 goto fail; 1739 goto fail;
1742 }
1743 1740
1744 /* convert Ethernet header to proper 802.11 header (based on 1741 /* convert Ethernet header to proper 802.11 header (based on
1745 * operation mode) */ 1742 * operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1787 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { 1784 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1788 /* Do not send frames with mesh_ttl == 0 */ 1785 /* Do not send frames with mesh_ttl == 0 */
1789 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1786 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1790 ret = NETDEV_TX_OK;
1791 goto fail; 1787 goto fail;
1792 } 1788 }
1793 rcu_read_lock(); 1789 rcu_read_lock();
@@ -1874,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1874 1870
1875 if (tdls_direct) { 1871 if (tdls_direct) {
1876 /* link during setup - throw out frames to peer */ 1872 /* link during setup - throw out frames to peer */
1877 if (!tdls_auth) { 1873 if (!tdls_auth)
1878 ret = NETDEV_TX_OK;
1879 goto fail; 1874 goto fail;
1880 }
1881 1875
1882 /* DA SA BSSID */ 1876 /* DA SA BSSID */
1883 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1877 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1911,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1911 hdrlen = 24; 1905 hdrlen = 24;
1912 break; 1906 break;
1913 default: 1907 default:
1914 ret = NETDEV_TX_OK;
1915 goto fail; 1908 goto fail;
1916 } 1909 }
1917 1910
@@ -1956,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1956 1949
1957 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 1950 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1958 1951
1959 ret = NETDEV_TX_OK;
1960 goto fail; 1952 goto fail;
1961 } 1953 }
1962 1954
@@ -2011,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2011 skb = skb_clone(skb, GFP_ATOMIC); 2003 skb = skb_clone(skb, GFP_ATOMIC);
2012 kfree_skb(tmp_skb); 2004 kfree_skb(tmp_skb);
2013 2005
2014 if (!skb) { 2006 if (!skb)
2015 ret = NETDEV_TX_OK;
2016 goto fail; 2007 goto fail;
2017 }
2018 } 2008 }
2019 2009
2020 hdr.frame_control = fc; 2010 hdr.frame_control = fc;
@@ -2117,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2117 return NETDEV_TX_OK; 2107 return NETDEV_TX_OK;
2118 2108
2119 fail: 2109 fail:
2120 if (ret == NETDEV_TX_OK) 2110 dev_kfree_skb(skb);
2121 dev_kfree_skb(skb); 2111 return NETDEV_TX_OK;
2122
2123 return ret;
2124} 2112}
2125 2113
2126 2114
@@ -2295,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2295 struct ieee80211_sub_if_data *sdata = NULL; 2283 struct ieee80211_sub_if_data *sdata = NULL;
2296 struct ieee80211_if_ap *ap = NULL; 2284 struct ieee80211_if_ap *ap = NULL;
2297 struct beacon_data *beacon; 2285 struct beacon_data *beacon;
2298 struct ieee80211_supported_band *sband; 2286 enum ieee80211_band band = local->oper_channel->band;
2299 enum ieee80211_band band = local->hw.conf.channel->band;
2300 struct ieee80211_tx_rate_control txrc; 2287 struct ieee80211_tx_rate_control txrc;
2301 2288
2302 sband = local->hw.wiphy->bands[band];
2303
2304 rcu_read_lock(); 2289 rcu_read_lock();
2305 2290
2306 sdata = vif_to_sdata(vif); 2291 sdata = vif_to_sdata(vif);
@@ -2410,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2410 memset(mgmt, 0, hdr_len); 2395 memset(mgmt, 0, hdr_len);
2411 mgmt->frame_control = 2396 mgmt->frame_control =
2412 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2397 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2413 memset(mgmt->da, 0xff, ETH_ALEN); 2398 eth_broadcast_addr(mgmt->da);
2414 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 2399 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2415 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 2400 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2416 mgmt->u.beacon.beacon_int = 2401 mgmt->u.beacon.beacon_int =
@@ -2422,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2422 *pos++ = WLAN_EID_SSID; 2407 *pos++ = WLAN_EID_SSID;
2423 *pos++ = 0x0; 2408 *pos++ = 0x0;
2424 2409
2425 if (ieee80211_add_srates_ie(sdata, skb, true) || 2410 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
2426 mesh_add_ds_params_ie(skb, sdata) || 2411 mesh_add_ds_params_ie(skb, sdata) ||
2427 ieee80211_add_ext_srates_ie(sdata, skb, true) || 2412 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
2428 mesh_add_rsn_ie(skb, sdata) || 2413 mesh_add_rsn_ie(skb, sdata) ||
2429 mesh_add_ht_cap_ie(skb, sdata) || 2414 mesh_add_ht_cap_ie(skb, sdata) ||
2430 mesh_add_ht_oper_ie(skb, sdata) || 2415 mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2447,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2447 2432
2448 memset(&txrc, 0, sizeof(txrc)); 2433 memset(&txrc, 0, sizeof(txrc));
2449 txrc.hw = hw; 2434 txrc.hw = hw;
2450 txrc.sband = sband; 2435 txrc.sband = local->hw.wiphy->bands[band];
2451 txrc.bss_conf = &sdata->vif.bss_conf; 2436 txrc.bss_conf = &sdata->vif.bss_conf;
2452 txrc.skb = skb; 2437 txrc.skb = skb;
2453 txrc.reported_rate.idx = -1; 2438 txrc.reported_rate.idx = -1;
2454 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; 2439 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2455 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 2440 if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
2456 txrc.max_rate_idx = -1; 2441 txrc.max_rate_idx = -1;
2457 else 2442 else
2458 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2443 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2476,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2476 struct ieee80211_vif *vif) 2461 struct ieee80211_vif *vif)
2477{ 2462{
2478 struct ieee80211_if_ap *ap = NULL; 2463 struct ieee80211_if_ap *ap = NULL;
2479 struct sk_buff *presp = NULL, *skb = NULL; 2464 struct sk_buff *skb = NULL;
2465 struct probe_resp *presp = NULL;
2480 struct ieee80211_hdr *hdr; 2466 struct ieee80211_hdr *hdr;
2481 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 2467 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2482 2468
@@ -2490,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2490 if (!presp) 2476 if (!presp)
2491 goto out; 2477 goto out;
2492 2478
2493 skb = skb_copy(presp, GFP_ATOMIC); 2479 skb = dev_alloc_skb(presp->len);
2494 if (!skb) 2480 if (!skb)
2495 goto out; 2481 goto out;
2496 2482
2483 memcpy(skb_put(skb, presp->len), presp->data, presp->len);
2484
2497 hdr = (struct ieee80211_hdr *) skb->data; 2485 hdr = (struct ieee80211_hdr *) skb->data;
2498 memset(hdr->addr1, 0, sizeof(hdr->addr1)); 2486 memset(hdr->addr1, 0, sizeof(hdr->addr1));
2499 2487
@@ -2604,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2604 memset(hdr, 0, sizeof(*hdr)); 2592 memset(hdr, 0, sizeof(*hdr));
2605 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2593 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2606 IEEE80211_STYPE_PROBE_REQ); 2594 IEEE80211_STYPE_PROBE_REQ);
2607 memset(hdr->addr1, 0xff, ETH_ALEN); 2595 eth_broadcast_addr(hdr->addr1);
2608 memcpy(hdr->addr2, vif->addr, ETH_ALEN); 2596 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2609 memset(hdr->addr3, 0xff, ETH_ALEN); 2597 eth_broadcast_addr(hdr->addr3);
2610 2598
2611 pos = skb_put(skb, ie_ssid_len); 2599 pos = skb_put(skb, ie_ssid_len);
2612 *pos++ = WLAN_EID_SSID; 2600 *pos++ = WLAN_EID_SSID;
@@ -2703,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2703 info = IEEE80211_SKB_CB(skb); 2691 info = IEEE80211_SKB_CB(skb);
2704 2692
2705 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2693 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2706 tx.channel = local->hw.conf.channel; 2694 info->band = local->oper_channel->band;
2707 info->band = tx.channel->band;
2708 2695
2709 if (invoke_tx_handlers(&tx)) 2696 if (invoke_tx_handlers(&tx))
2710 skb = NULL; 2697 skb = NULL;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 39b82fee4904..22ca35054dd0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
276 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 276 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
277 int ac; 277 int ac;
278 278
279 if (!sdata->dev)
280 continue;
281
279 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) 282 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
280 continue; 283 continue;
281 284
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
364 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 367 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
365 int ac; 368 int ac;
366 369
370 if (!sdata->dev)
371 continue;
372
367 for (ac = 0; ac < n_acs; ac++) { 373 for (ac = 0; ac < n_acs; ac++) {
368 if (sdata->vif.hw_queue[ac] == queue || 374 if (sdata->vif.hw_queue[ac] == queue ||
369 sdata->vif.cab_queue == queue) 375 sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
768 elem_parse_failed = true; 774 elem_parse_failed = true;
769 break; 775 break;
770 case WLAN_EID_CHANNEL_SWITCH: 776 case WLAN_EID_CHANNEL_SWITCH:
771 elems->ch_switch_elem = pos; 777 if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
772 elems->ch_switch_elem_len = elen; 778 elem_parse_failed = true;
779 break;
780 }
781 elems->ch_switch_ie = (void *)pos;
773 break; 782 break;
774 case WLAN_EID_QUIET: 783 case WLAN_EID_QUIET:
775 if (!elems->quiet_elem) { 784 if (!elems->quiet_elem) {
@@ -783,8 +792,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
783 elems->country_elem_len = elen; 792 elems->country_elem_len = elen;
784 break; 793 break;
785 case WLAN_EID_PWR_CONSTRAINT: 794 case WLAN_EID_PWR_CONSTRAINT:
795 if (elen != 1) {
796 elem_parse_failed = true;
797 break;
798 }
786 elems->pwr_constr_elem = pos; 799 elems->pwr_constr_elem = pos;
787 elems->pwr_constr_elem_len = elen;
788 break; 800 break;
789 case WLAN_EID_TIMEOUT_INTERVAL: 801 case WLAN_EID_TIMEOUT_INTERVAL:
790 elems->timeout_int = pos; 802 elems->timeout_int = pos;
@@ -832,7 +844,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
832 844
833 memset(&qparam, 0, sizeof(qparam)); 845 memset(&qparam, 0, sizeof(qparam));
834 846
835 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 847 use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
836 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 848 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
837 849
838 /* 850 /*
@@ -899,7 +911,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
899 drv_conf_tx(local, sdata, ac, &qparam); 911 drv_conf_tx(local, sdata, ac, &qparam);
900 } 912 }
901 913
902 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 914 if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
915 sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
903 sdata->vif.bss_conf.qos = enable_qos; 916 sdata->vif.bss_conf.qos = enable_qos;
904 if (bss_notify) 917 if (bss_notify)
905 ieee80211_bss_info_change_notify(sdata, 918 ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +932,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
919 if ((supp_rates[i] & 0x7f) * 5 > 110) 932 if ((supp_rates[i] & 0x7f) * 5 > 110)
920 have_higher_than_11mbit = 1; 933 have_higher_than_11mbit = 1;
921 934
922 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 935 if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
923 have_higher_than_11mbit) 936 have_higher_than_11mbit)
924 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; 937 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
925 else 938 else
@@ -994,6 +1007,45 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
994 ieee80211_tx_skb(sdata, skb); 1007 ieee80211_tx_skb(sdata, skb);
995} 1008}
996 1009
1010void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1011 const u8 *bssid, u16 stype, u16 reason,
1012 bool send_frame, u8 *frame_buf)
1013{
1014 struct ieee80211_local *local = sdata->local;
1015 struct sk_buff *skb;
1016 struct ieee80211_mgmt *mgmt = (void *)frame_buf;
1017
1018 /* build frame */
1019 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
1020 mgmt->duration = 0; /* initialize only */
1021 mgmt->seq_ctrl = 0; /* initialize only */
1022 memcpy(mgmt->da, bssid, ETH_ALEN);
1023 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
1024 memcpy(mgmt->bssid, bssid, ETH_ALEN);
1025 /* u.deauth.reason_code == u.disassoc.reason_code */
1026 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
1027
1028 if (send_frame) {
1029 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
1030 IEEE80211_DEAUTH_FRAME_LEN);
1031 if (!skb)
1032 return;
1033
1034 skb_reserve(skb, local->hw.extra_tx_headroom);
1035
1036 /* copy in frame */
1037 memcpy(skb_put(skb, IEEE80211_DEAUTH_FRAME_LEN),
1038 mgmt, IEEE80211_DEAUTH_FRAME_LEN);
1039
1040 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1041 !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
1042 IEEE80211_SKB_CB(skb)->flags |=
1043 IEEE80211_TX_INTFL_DONT_ENCRYPT;
1044
1045 ieee80211_tx_skb(sdata, skb);
1046 }
1047}
1048
997int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1049int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
998 const u8 *ie, size_t ie_len, 1050 const u8 *ie, size_t ie_len,
999 enum ieee80211_band band, u32 rate_mask, 1051 enum ieee80211_band band, u32 rate_mask,
@@ -1100,6 +1152,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1100 1152
1101struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1153struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1102 u8 *dst, u32 ratemask, 1154 u8 *dst, u32 ratemask,
1155 struct ieee80211_channel *chan,
1103 const u8 *ssid, size_t ssid_len, 1156 const u8 *ssid, size_t ssid_len,
1104 const u8 *ie, size_t ie_len, 1157 const u8 *ie, size_t ie_len,
1105 bool directed) 1158 bool directed)
@@ -1109,7 +1162,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1109 struct ieee80211_mgmt *mgmt; 1162 struct ieee80211_mgmt *mgmt;
1110 size_t buf_len; 1163 size_t buf_len;
1111 u8 *buf; 1164 u8 *buf;
1112 u8 chan; 1165 u8 chan_no;
1113 1166
1114 /* FIXME: come up with a proper value */ 1167 /* FIXME: come up with a proper value */
1115 buf = kmalloc(200 + ie_len, GFP_KERNEL); 1168 buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1175,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1122 * badly-behaved APs don't respond when this parameter is included. 1175 * badly-behaved APs don't respond when this parameter is included.
1123 */ 1176 */
1124 if (directed) 1177 if (directed)
1125 chan = 0; 1178 chan_no = 0;
1126 else 1179 else
1127 chan = ieee80211_frequency_to_channel( 1180 chan_no = ieee80211_frequency_to_channel(chan->center_freq);
1128 local->hw.conf.channel->center_freq);
1129 1181
1130 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, 1182 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
1131 local->hw.conf.channel->band, 1183 ratemask, chan_no);
1132 ratemask, chan);
1133 1184
1134 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 1185 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
1135 ssid, ssid_len, 1186 ssid, ssid_len,
@@ -1154,11 +1205,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1154void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1205void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1155 const u8 *ssid, size_t ssid_len, 1206 const u8 *ssid, size_t ssid_len,
1156 const u8 *ie, size_t ie_len, 1207 const u8 *ie, size_t ie_len,
1157 u32 ratemask, bool directed, bool no_cck) 1208 u32 ratemask, bool directed, bool no_cck,
1209 struct ieee80211_channel *channel)
1158{ 1210{
1159 struct sk_buff *skb; 1211 struct sk_buff *skb;
1160 1212
1161 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len, 1213 skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
1214 ssid, ssid_len,
1162 ie, ie_len, directed); 1215 ie, ie_len, directed);
1163 if (skb) { 1216 if (skb) {
1164 if (no_cck) 1217 if (no_cck)
@@ -1359,7 +1412,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1359 switch (sdata->vif.type) { 1412 switch (sdata->vif.type) {
1360 case NL80211_IFTYPE_STATION: 1413 case NL80211_IFTYPE_STATION:
1361 changed |= BSS_CHANGED_ASSOC | 1414 changed |= BSS_CHANGED_ASSOC |
1362 BSS_CHANGED_ARP_FILTER; 1415 BSS_CHANGED_ARP_FILTER |
1416 BSS_CHANGED_PS;
1363 mutex_lock(&sdata->u.mgd.mtx); 1417 mutex_lock(&sdata->u.mgd.mtx);
1364 ieee80211_bss_info_change_notify(sdata, changed); 1418 ieee80211_bss_info_change_notify(sdata, changed);
1365 mutex_unlock(&sdata->u.mgd.mtx); 1419 mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1439,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1385 case NL80211_IFTYPE_MONITOR: 1439 case NL80211_IFTYPE_MONITOR:
1386 /* ignore virtual */ 1440 /* ignore virtual */
1387 break; 1441 break;
1442 case NL80211_IFTYPE_P2P_DEVICE:
1443 changed = BSS_CHANGED_IDLE;
1444 break;
1388 case NL80211_IFTYPE_UNSPECIFIED: 1445 case NL80211_IFTYPE_UNSPECIFIED:
1389 case NUM_NL80211_IFTYPES: 1446 case NUM_NL80211_IFTYPES:
1390 case NL80211_IFTYPE_P2P_CLIENT: 1447 case NL80211_IFTYPE_P2P_CLIENT:
@@ -1549,14 +1606,13 @@ static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1549 return 0; 1606 return 0;
1550} 1607}
1551 1608
1552/* must hold iflist_mtx */
1553void ieee80211_recalc_smps(struct ieee80211_local *local) 1609void ieee80211_recalc_smps(struct ieee80211_local *local)
1554{ 1610{
1555 struct ieee80211_sub_if_data *sdata; 1611 struct ieee80211_sub_if_data *sdata;
1556 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF; 1612 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1557 int count = 0; 1613 int count = 0;
1558 1614
1559 lockdep_assert_held(&local->iflist_mtx); 1615 mutex_lock(&local->iflist_mtx);
1560 1616
1561 /* 1617 /*
1562 * This function could be improved to handle multiple 1618 * This function could be improved to handle multiple
@@ -1571,6 +1627,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
1571 list_for_each_entry(sdata, &local->interfaces, list) { 1627 list_for_each_entry(sdata, &local->interfaces, list) {
1572 if (!ieee80211_sdata_running(sdata)) 1628 if (!ieee80211_sdata_running(sdata))
1573 continue; 1629 continue;
1630 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
1631 continue;
1574 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1632 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1575 goto set; 1633 goto set;
1576 1634
@@ -1583,12 +1641,14 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
1583 } 1641 }
1584 1642
1585 if (smps_mode == local->smps_mode) 1643 if (smps_mode == local->smps_mode)
1586 return; 1644 goto unlock;
1587 1645
1588 set: 1646 set:
1589 local->smps_mode = smps_mode; 1647 local->smps_mode = smps_mode;
1590 /* changed flag is auto-detected for this */ 1648 /* changed flag is auto-detected for this */
1591 ieee80211_hw_config(local, 0); 1649 ieee80211_hw_config(local, 0);
1650 unlock:
1651 mutex_unlock(&local->iflist_mtx);
1592} 1652}
1593 1653
1594static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) 1654static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
@@ -1809,7 +1869,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1809} 1869}
1810 1870
1811int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, 1871int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1812 struct sk_buff *skb, bool need_basic) 1872 struct sk_buff *skb, bool need_basic,
1873 enum ieee80211_band band)
1813{ 1874{
1814 struct ieee80211_local *local = sdata->local; 1875 struct ieee80211_local *local = sdata->local;
1815 struct ieee80211_supported_band *sband; 1876 struct ieee80211_supported_band *sband;
@@ -1817,7 +1878,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1817 u8 i, rates, *pos; 1878 u8 i, rates, *pos;
1818 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 1879 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1819 1880
1820 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1881 sband = local->hw.wiphy->bands[band];
1821 rates = sband->n_bitrates; 1882 rates = sband->n_bitrates;
1822 if (rates > 8) 1883 if (rates > 8)
1823 rates = 8; 1884 rates = 8;
@@ -1840,7 +1901,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1840} 1901}
1841 1902
1842int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, 1903int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1843 struct sk_buff *skb, bool need_basic) 1904 struct sk_buff *skb, bool need_basic,
1905 enum ieee80211_band band)
1844{ 1906{
1845 struct ieee80211_local *local = sdata->local; 1907 struct ieee80211_local *local = sdata->local;
1846 struct ieee80211_supported_band *sband; 1908 struct ieee80211_supported_band *sband;
@@ -1848,7 +1910,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1848 u8 i, exrates, *pos; 1910 u8 i, exrates, *pos;
1849 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 1911 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1850 1912
1851 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1913 sband = local->hw.wiphy->bands[band];
1852 exrates = sband->n_bitrates; 1914 exrates = sband->n_bitrates;
1853 if (exrates > 8) 1915 if (exrates > 8)
1854 exrates -= 8; 1916 exrates -= 8;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c19b214ffd57..fefa514b9917 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -356,6 +356,55 @@ config NETFILTER_NETLINK_QUEUE_CT
356 If this option is enabled, NFQUEUE can include Connection Tracking 356 If this option is enabled, NFQUEUE can include Connection Tracking
357 information together with the packet is the enqueued via NFNETLINK. 357 information together with the packet is the enqueued via NFNETLINK.
358 358
359config NF_NAT
360 tristate
361
362config NF_NAT_NEEDED
363 bool
364 depends on NF_NAT
365 default y
366
367config NF_NAT_PROTO_DCCP
368 tristate
369 depends on NF_NAT && NF_CT_PROTO_DCCP
370 default NF_NAT && NF_CT_PROTO_DCCP
371
372config NF_NAT_PROTO_UDPLITE
373 tristate
374 depends on NF_NAT && NF_CT_PROTO_UDPLITE
375 default NF_NAT && NF_CT_PROTO_UDPLITE
376
377config NF_NAT_PROTO_SCTP
378 tristate
379 default NF_NAT && NF_CT_PROTO_SCTP
380 depends on NF_NAT && NF_CT_PROTO_SCTP
381 select LIBCRC32C
382
383config NF_NAT_AMANDA
384 tristate
385 depends on NF_CONNTRACK && NF_NAT
386 default NF_NAT && NF_CONNTRACK_AMANDA
387
388config NF_NAT_FTP
389 tristate
390 depends on NF_CONNTRACK && NF_NAT
391 default NF_NAT && NF_CONNTRACK_FTP
392
393config NF_NAT_IRC
394 tristate
395 depends on NF_CONNTRACK && NF_NAT
396 default NF_NAT && NF_CONNTRACK_IRC
397
398config NF_NAT_SIP
399 tristate
400 depends on NF_CONNTRACK && NF_NAT
401 default NF_NAT && NF_CONNTRACK_SIP
402
403config NF_NAT_TFTP
404 tristate
405 depends on NF_CONNTRACK && NF_NAT
406 default NF_NAT && NF_CONNTRACK_TFTP
407
359endif # NF_CONNTRACK 408endif # NF_CONNTRACK
360 409
361# transparent proxy support 410# transparent proxy support
@@ -599,6 +648,16 @@ config NETFILTER_XT_TARGET_MARK
599 (e.g. when running oldconfig). It selects 648 (e.g. when running oldconfig). It selects
600 CONFIG_NETFILTER_XT_MARK (combined mark/MARK module). 649 CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
601 650
651config NETFILTER_XT_TARGET_NETMAP
652 tristate '"NETMAP" target support'
653 depends on NF_NAT
654 ---help---
655 NETMAP is an implementation of static 1:1 NAT mapping of network
656 addresses. It maps the network address part, while keeping the host
657 address part intact.
658
659 To compile it as a module, choose M here. If unsure, say N.
660
602config NETFILTER_XT_TARGET_NFLOG 661config NETFILTER_XT_TARGET_NFLOG
603 tristate '"NFLOG" target support' 662 tristate '"NFLOG" target support'
604 default m if NETFILTER_ADVANCED=n 663 default m if NETFILTER_ADVANCED=n
@@ -621,19 +680,6 @@ config NETFILTER_XT_TARGET_NFQUEUE
621 680
622 To compile it as a module, choose M here. If unsure, say N. 681 To compile it as a module, choose M here. If unsure, say N.
623 682
624config NETFILTER_XT_TARGET_NOTRACK
625 tristate '"NOTRACK" target support'
626 depends on IP_NF_RAW || IP6_NF_RAW
627 depends on NF_CONNTRACK
628 help
629 The NOTRACK target allows a select rule to specify
630 which packets *not* to enter the conntrack/NAT
631 subsystem with all the consequences (no ICMP error tracking,
632 no protocol helpers for the selected packets).
633
634 If you want to compile it as a module, say M here and read
635 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
636
637config NETFILTER_XT_TARGET_RATEEST 683config NETFILTER_XT_TARGET_RATEEST
638 tristate '"RATEEST" target support' 684 tristate '"RATEEST" target support'
639 depends on NETFILTER_ADVANCED 685 depends on NETFILTER_ADVANCED
@@ -644,6 +690,17 @@ config NETFILTER_XT_TARGET_RATEEST
644 690
645 To compile it as a module, choose M here. If unsure, say N. 691 To compile it as a module, choose M here. If unsure, say N.
646 692
693config NETFILTER_XT_TARGET_REDIRECT
694 tristate "REDIRECT target support"
695 depends on NF_NAT
696 ---help---
697 REDIRECT is a special case of NAT: all incoming connections are
698 mapped onto the incoming interface's address, causing the packets to
699 come to the local machine instead of passing through. This is
700 useful for transparent proxies.
701
702 To compile it as a module, choose M here. If unsure, say N.
703
647config NETFILTER_XT_TARGET_TEE 704config NETFILTER_XT_TARGET_TEE
648 tristate '"TEE" - packet cloning to alternate destination' 705 tristate '"TEE" - packet cloning to alternate destination'
649 depends on NETFILTER_ADVANCED 706 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1c5160f2278e..32596978df1d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -43,6 +43,23 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
43obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o 43obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
44obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o 44obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
45 45
46nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
47 nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
48
49obj-$(CONFIG_NF_NAT) += nf_nat.o
50
51# NAT protocols (nf_nat)
52obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
53obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
54obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
55
56# NAT helpers
57obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
58obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
59obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
60obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
61obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
62
46# transparent proxy support 63# transparent proxy support
47obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o 64obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
48 65
@@ -53,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
53obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o 70obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
54obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o 71obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
55obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o 72obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
73obj-$(CONFIG_NF_NAT) += xt_nat.o
56 74
57# targets 75# targets
58obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o 76obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
@@ -65,10 +83,11 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
65obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o 83obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
66obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 84obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
67obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o 85obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
86obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
68obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 87obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
69obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o 88obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
70obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
71obj-$(CONFIG_NETFILTER_XT_TARGET_RATEEST) += xt_RATEEST.o 89obj-$(CONFIG_NETFILTER_XT_TARGET_RATEEST) += xt_RATEEST.o
90obj-$(CONFIG_NETFILTER_XT_TARGET_REDIRECT) += xt_REDIRECT.o
72obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o 91obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
73obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o 92obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
74obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o 93obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 0bc6b60db4df..68912dadf13d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -126,7 +126,7 @@ unsigned int nf_iterate(struct list_head *head,
126 unsigned int hook, 126 unsigned int hook,
127 const struct net_device *indev, 127 const struct net_device *indev,
128 const struct net_device *outdev, 128 const struct net_device *outdev,
129 struct list_head **i, 129 struct nf_hook_ops **elemp,
130 int (*okfn)(struct sk_buff *), 130 int (*okfn)(struct sk_buff *),
131 int hook_thresh) 131 int hook_thresh)
132{ 132{
@@ -136,22 +136,20 @@ unsigned int nf_iterate(struct list_head *head,
136 * The caller must not block between calls to this 136 * The caller must not block between calls to this
137 * function because of risk of continuing from deleted element. 137 * function because of risk of continuing from deleted element.
138 */ 138 */
139 list_for_each_continue_rcu(*i, head) { 139 list_for_each_entry_continue_rcu((*elemp), head, list) {
140 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i; 140 if (hook_thresh > (*elemp)->priority)
141
142 if (hook_thresh > elem->priority)
143 continue; 141 continue;
144 142
145 /* Optimization: we don't need to hold module 143 /* Optimization: we don't need to hold module
146 reference here, since function can't sleep. --RR */ 144 reference here, since function can't sleep. --RR */
147repeat: 145repeat:
148 verdict = elem->hook(hook, skb, indev, outdev, okfn); 146 verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
149 if (verdict != NF_ACCEPT) { 147 if (verdict != NF_ACCEPT) {
150#ifdef CONFIG_NETFILTER_DEBUG 148#ifdef CONFIG_NETFILTER_DEBUG
151 if (unlikely((verdict & NF_VERDICT_MASK) 149 if (unlikely((verdict & NF_VERDICT_MASK)
152 > NF_MAX_VERDICT)) { 150 > NF_MAX_VERDICT)) {
153 NFDEBUG("Evil return from %p(%u).\n", 151 NFDEBUG("Evil return from %p(%u).\n",
154 elem->hook, hook); 152 (*elemp)->hook, hook);
155 continue; 153 continue;
156 } 154 }
157#endif 155#endif
@@ -172,14 +170,14 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
172 int (*okfn)(struct sk_buff *), 170 int (*okfn)(struct sk_buff *),
173 int hook_thresh) 171 int hook_thresh)
174{ 172{
175 struct list_head *elem; 173 struct nf_hook_ops *elem;
176 unsigned int verdict; 174 unsigned int verdict;
177 int ret = 0; 175 int ret = 0;
178 176
179 /* We may already have this, but read-locks nest anyway */ 177 /* We may already have this, but read-locks nest anyway */
180 rcu_read_lock(); 178 rcu_read_lock();
181 179
182 elem = &nf_hooks[pf][hook]; 180 elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
183next_hook: 181next_hook:
184 verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev, 182 verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
185 outdev, &elem, okfn, hook_thresh); 183 outdev, &elem, okfn, hook_thresh);
@@ -273,6 +271,11 @@ EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
273 271
274#endif /* CONFIG_NF_CONNTRACK */ 272#endif /* CONFIG_NF_CONNTRACK */
275 273
274#ifdef CONFIG_NF_NAT_NEEDED
275void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
276EXPORT_SYMBOL(nf_nat_decode_session_hook);
277#endif
278
276#ifdef CONFIG_PROC_FS 279#ifdef CONFIG_PROC_FS
277struct proc_dir_entry *proc_net_netfilter; 280struct proc_dir_entry *proc_net_netfilter;
278EXPORT_SYMBOL(proc_net_netfilter); 281EXPORT_SYMBOL(proc_net_netfilter);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 7e1b061aeeba..4a92fd47bd4c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -27,9 +27,12 @@
27#define IP_SET_BITMAP_TIMEOUT 27#define IP_SET_BITMAP_TIMEOUT
28#include <linux/netfilter/ipset/ip_set_timeout.h> 28#include <linux/netfilter/ipset/ip_set_timeout.h>
29 29
30#define REVISION_MIN 0
31#define REVISION_MAX 0
32
30MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 34MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
32MODULE_DESCRIPTION("bitmap:ip type of IP sets"); 35IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
33MODULE_ALIAS("ip_set_bitmap:ip"); 36MODULE_ALIAS("ip_set_bitmap:ip");
34 37
35/* Type structure */ 38/* Type structure */
@@ -284,7 +287,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
284 } else if (tb[IPSET_ATTR_CIDR]) { 287 } else if (tb[IPSET_ATTR_CIDR]) {
285 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 288 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
286 289
287 if (cidr > 32) 290 if (!cidr || cidr > 32)
288 return -IPSET_ERR_INVALID_CIDR; 291 return -IPSET_ERR_INVALID_CIDR;
289 ip_set_mask_from_to(ip, ip_to, cidr); 292 ip_set_mask_from_to(ip, ip_to, cidr);
290 } else 293 } else
@@ -454,7 +457,8 @@ static int
454bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags) 457bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
455{ 458{
456 struct bitmap_ip *map; 459 struct bitmap_ip *map;
457 u32 first_ip, last_ip, hosts, elements; 460 u32 first_ip, last_ip, hosts;
461 u64 elements;
458 u8 netmask = 32; 462 u8 netmask = 32;
459 int ret; 463 int ret;
460 464
@@ -497,7 +501,7 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
497 501
498 if (netmask == 32) { 502 if (netmask == 32) {
499 hosts = 1; 503 hosts = 1;
500 elements = last_ip - first_ip + 1; 504 elements = (u64)last_ip - first_ip + 1;
501 } else { 505 } else {
502 u8 mask_bits; 506 u8 mask_bits;
503 u32 mask; 507 u32 mask;
@@ -515,7 +519,8 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
515 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 519 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
516 return -IPSET_ERR_BITMAP_RANGE_SIZE; 520 return -IPSET_ERR_BITMAP_RANGE_SIZE;
517 521
518 pr_debug("hosts %u, elements %u\n", hosts, elements); 522 pr_debug("hosts %u, elements %llu\n",
523 hosts, (unsigned long long)elements);
519 524
520 map = kzalloc(sizeof(*map), GFP_KERNEL); 525 map = kzalloc(sizeof(*map), GFP_KERNEL);
521 if (!map) 526 if (!map)
@@ -554,8 +559,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
554 .features = IPSET_TYPE_IP, 559 .features = IPSET_TYPE_IP,
555 .dimension = IPSET_DIM_ONE, 560 .dimension = IPSET_DIM_ONE,
556 .family = NFPROTO_IPV4, 561 .family = NFPROTO_IPV4,
557 .revision_min = 0, 562 .revision_min = REVISION_MIN,
558 .revision_max = 0, 563 .revision_max = REVISION_MAX,
559 .create = bitmap_ip_create, 564 .create = bitmap_ip_create,
560 .create_policy = { 565 .create_policy = {
561 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 566 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index d7eaf10edb6d..0f92dc24cb89 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -26,9 +26,12 @@
26#include <linux/netfilter/ipset/ip_set_timeout.h> 26#include <linux/netfilter/ipset/ip_set_timeout.h>
27#include <linux/netfilter/ipset/ip_set_bitmap.h> 27#include <linux/netfilter/ipset/ip_set_bitmap.h>
28 28
29#define REVISION_MIN 0
30#define REVISION_MAX 0
31
29MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 33MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
31MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets"); 34IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
32MODULE_ALIAS("ip_set_bitmap:ip,mac"); 35MODULE_ALIAS("ip_set_bitmap:ip,mac");
33 36
34enum { 37enum {
@@ -320,11 +323,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,
320 (elem->match == MAC_FILLED && 323 (elem->match == MAC_FILLED &&
321 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, 324 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
322 elem->ether))) 325 elem->ether)))
323 goto nla_put_failure; 326 goto nla_put_failure;
324 timeout = elem->match == MAC_UNSET ? elem->timeout 327 timeout = elem->match == MAC_UNSET ? elem->timeout
325 : ip_set_timeout_get(elem->timeout); 328 : ip_set_timeout_get(elem->timeout);
326 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout))) 329 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
327 goto nla_put_failure; 330 goto nla_put_failure;
328 ipset_nest_end(skb, nested); 331 ipset_nest_end(skb, nested);
329 } 332 }
330 ipset_nest_end(skb, atd); 333 ipset_nest_end(skb, atd);
@@ -557,7 +560,8 @@ static int
557bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[], 560bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
558 u32 flags) 561 u32 flags)
559{ 562{
560 u32 first_ip, last_ip, elements; 563 u32 first_ip, last_ip;
564 u64 elements;
561 struct bitmap_ipmac *map; 565 struct bitmap_ipmac *map;
562 int ret; 566 int ret;
563 567
@@ -588,7 +592,7 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
588 } else 592 } else
589 return -IPSET_ERR_PROTOCOL; 593 return -IPSET_ERR_PROTOCOL;
590 594
591 elements = last_ip - first_ip + 1; 595 elements = (u64)last_ip - first_ip + 1;
592 596
593 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 597 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
594 return -IPSET_ERR_BITMAP_RANGE_SIZE; 598 return -IPSET_ERR_BITMAP_RANGE_SIZE;
@@ -629,8 +633,8 @@ static struct ip_set_type bitmap_ipmac_type = {
629 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC, 633 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
630 .dimension = IPSET_DIM_TWO, 634 .dimension = IPSET_DIM_TWO,
631 .family = NFPROTO_IPV4, 635 .family = NFPROTO_IPV4,
632 .revision_min = 0, 636 .revision_min = REVISION_MIN,
633 .revision_max = 0, 637 .revision_max = REVISION_MAX,
634 .create = bitmap_ipmac_create, 638 .create = bitmap_ipmac_create,
635 .create_policy = { 639 .create_policy = {
636 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 640 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index b9f1fce7053b..e6b2db76f4c3 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -22,9 +22,12 @@
22#define IP_SET_BITMAP_TIMEOUT 22#define IP_SET_BITMAP_TIMEOUT
23#include <linux/netfilter/ipset/ip_set_timeout.h> 23#include <linux/netfilter/ipset/ip_set_timeout.h>
24 24
25#define REVISION_MIN 0
26#define REVISION_MAX 0
27
25MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 29MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
27MODULE_DESCRIPTION("bitmap:port type of IP sets"); 30IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
28MODULE_ALIAS("ip_set_bitmap:port"); 31MODULE_ALIAS("ip_set_bitmap:port");
29 32
30/* Type structure */ 33/* Type structure */
@@ -487,8 +490,8 @@ static struct ip_set_type bitmap_port_type = {
487 .features = IPSET_TYPE_PORT, 490 .features = IPSET_TYPE_PORT,
488 .dimension = IPSET_DIM_ONE, 491 .dimension = IPSET_DIM_ONE,
489 .family = NFPROTO_UNSPEC, 492 .family = NFPROTO_UNSPEC,
490 .revision_min = 0, 493 .revision_min = REVISION_MIN,
491 .revision_max = 0, 494 .revision_max = REVISION_MAX,
492 .create = bitmap_port_create, 495 .create = bitmap_port_create,
493 .create_policy = { 496 .create_policy = {
494 [IPSET_ATTR_PORT] = { .type = NLA_U16 }, 497 [IPSET_ATTR_PORT] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 9730882697aa..778465f217fa 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -69,7 +69,8 @@ find_set_type(const char *name, u8 family, u8 revision)
69 69
70 list_for_each_entry_rcu(type, &ip_set_type_list, list) 70 list_for_each_entry_rcu(type, &ip_set_type_list, list)
71 if (STREQ(type->name, name) && 71 if (STREQ(type->name, name) &&
72 (type->family == family || type->family == NFPROTO_UNSPEC) && 72 (type->family == family ||
73 type->family == NFPROTO_UNSPEC) &&
73 revision >= type->revision_min && 74 revision >= type->revision_min &&
74 revision <= type->revision_max) 75 revision <= type->revision_max)
75 return type; 76 return type;
@@ -149,7 +150,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
149 rcu_read_lock(); 150 rcu_read_lock();
150 list_for_each_entry_rcu(type, &ip_set_type_list, list) 151 list_for_each_entry_rcu(type, &ip_set_type_list, list)
151 if (STREQ(type->name, name) && 152 if (STREQ(type->name, name) &&
152 (type->family == family || type->family == NFPROTO_UNSPEC)) { 153 (type->family == family ||
154 type->family == NFPROTO_UNSPEC)) {
153 found = true; 155 found = true;
154 if (type->revision_min < *min) 156 if (type->revision_min < *min)
155 *min = type->revision_min; 157 *min = type->revision_min;
@@ -368,6 +370,12 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
368 set->variant->kadt(set, skb, par, IPSET_ADD, opt); 370 set->variant->kadt(set, skb, par, IPSET_ADD, opt);
369 write_unlock_bh(&set->lock); 371 write_unlock_bh(&set->lock);
370 ret = 1; 372 ret = 1;
373 } else {
374 /* --return-nomatch: invert matched element */
375 if ((opt->flags & IPSET_RETURN_NOMATCH) &&
376 (set->type->features & IPSET_TYPE_NOMATCH) &&
377 (ret > 0 || ret == -ENOTEMPTY))
378 ret = -ret;
371 } 379 }
372 380
373 /* Convert error codes to nomatch */ 381 /* Convert error codes to nomatch */
@@ -563,13 +571,13 @@ flag_exist(const struct nlmsghdr *nlh)
563} 571}
564 572
565static struct nlmsghdr * 573static struct nlmsghdr *
566start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags, 574start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
567 enum ipset_cmd cmd) 575 enum ipset_cmd cmd)
568{ 576{
569 struct nlmsghdr *nlh; 577 struct nlmsghdr *nlh;
570 struct nfgenmsg *nfmsg; 578 struct nfgenmsg *nfmsg;
571 579
572 nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8), 580 nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
573 sizeof(*nfmsg), flags); 581 sizeof(*nfmsg), flags);
574 if (nlh == NULL) 582 if (nlh == NULL)
575 return NULL; 583 return NULL;
@@ -721,7 +729,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
721 * by the nfnl mutex. Find the first free index in ip_set_list 729 * by the nfnl mutex. Find the first free index in ip_set_list
722 * and check clashing. 730 * and check clashing.
723 */ 731 */
724 if ((ret = find_free_id(set->name, &index, &clash)) != 0) { 732 ret = find_free_id(set->name, &index, &clash);
733 if (ret != 0) {
725 /* If this is the same set and requested, ignore error */ 734 /* If this is the same set and requested, ignore error */
726 if (ret == -EEXIST && 735 if (ret == -EEXIST &&
727 (flags & IPSET_FLAG_EXIST) && 736 (flags & IPSET_FLAG_EXIST) &&
@@ -1045,7 +1054,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1045 ip_set_id_t index = IPSET_INVALID_ID, max; 1054 ip_set_id_t index = IPSET_INVALID_ID, max;
1046 struct ip_set *set = NULL; 1055 struct ip_set *set = NULL;
1047 struct nlmsghdr *nlh = NULL; 1056 struct nlmsghdr *nlh = NULL;
1048 unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0; 1057 unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
1049 u32 dump_type, dump_flags; 1058 u32 dump_type, dump_flags;
1050 int ret = 0; 1059 int ret = 0;
1051 1060
@@ -1093,7 +1102,7 @@ dump_last:
1093 pr_debug("reference set\n"); 1102 pr_debug("reference set\n");
1094 __ip_set_get(index); 1103 __ip_set_get(index);
1095 } 1104 }
1096 nlh = start_msg(skb, NETLINK_CB(cb->skb).pid, 1105 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
1097 cb->nlh->nlmsg_seq, flags, 1106 cb->nlh->nlmsg_seq, flags,
1098 IPSET_CMD_LIST); 1107 IPSET_CMD_LIST);
1099 if (!nlh) { 1108 if (!nlh) {
@@ -1226,7 +1235,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1226 skb2 = nlmsg_new(payload, GFP_KERNEL); 1235 skb2 = nlmsg_new(payload, GFP_KERNEL);
1227 if (skb2 == NULL) 1236 if (skb2 == NULL)
1228 return -ENOMEM; 1237 return -ENOMEM;
1229 rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid, 1238 rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
1230 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); 1239 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
1231 errmsg = nlmsg_data(rep); 1240 errmsg = nlmsg_data(rep);
1232 errmsg->error = ret; 1241 errmsg->error = ret;
@@ -1241,7 +1250,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1241 1250
1242 *errline = lineno; 1251 *errline = lineno;
1243 1252
1244 netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1253 netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1245 /* Signal netlink not to send its ACK/errmsg. */ 1254 /* Signal netlink not to send its ACK/errmsg. */
1246 return -EINTR; 1255 return -EINTR;
1247 } 1256 }
@@ -1416,7 +1425,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1416 if (skb2 == NULL) 1425 if (skb2 == NULL)
1417 return -ENOMEM; 1426 return -ENOMEM;
1418 1427
1419 nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 1428 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1420 IPSET_CMD_HEADER); 1429 IPSET_CMD_HEADER);
1421 if (!nlh2) 1430 if (!nlh2)
1422 goto nlmsg_failure; 1431 goto nlmsg_failure;
@@ -1428,7 +1437,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1428 goto nla_put_failure; 1437 goto nla_put_failure;
1429 nlmsg_end(skb2, nlh2); 1438 nlmsg_end(skb2, nlh2);
1430 1439
1431 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1440 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1432 if (ret < 0) 1441 if (ret < 0)
1433 return ret; 1442 return ret;
1434 1443
@@ -1476,7 +1485,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
1476 if (skb2 == NULL) 1485 if (skb2 == NULL)
1477 return -ENOMEM; 1486 return -ENOMEM;
1478 1487
1479 nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 1488 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1480 IPSET_CMD_TYPE); 1489 IPSET_CMD_TYPE);
1481 if (!nlh2) 1490 if (!nlh2)
1482 goto nlmsg_failure; 1491 goto nlmsg_failure;
@@ -1489,7 +1498,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
1489 nlmsg_end(skb2, nlh2); 1498 nlmsg_end(skb2, nlh2);
1490 1499
1491 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); 1500 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
1492 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1501 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1493 if (ret < 0) 1502 if (ret < 0)
1494 return ret; 1503 return ret;
1495 1504
@@ -1525,7 +1534,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
1525 if (skb2 == NULL) 1534 if (skb2 == NULL)
1526 return -ENOMEM; 1535 return -ENOMEM;
1527 1536
1528 nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 1537 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1529 IPSET_CMD_PROTOCOL); 1538 IPSET_CMD_PROTOCOL);
1530 if (!nlh2) 1539 if (!nlh2)
1531 goto nlmsg_failure; 1540 goto nlmsg_failure;
@@ -1533,7 +1542,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
1533 goto nla_put_failure; 1542 goto nla_put_failure;
1534 nlmsg_end(skb2, nlh2); 1543 nlmsg_end(skb2, nlh2);
1535 1544
1536 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1545 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1537 if (ret < 0) 1546 if (ret < 0)
1538 return ret; 1547 return ret;
1539 1548
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index a68dbd4f1e4e..ec3dba5dcd62 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -24,9 +24,12 @@
24#include <linux/netfilter/ipset/ip_set_timeout.h> 24#include <linux/netfilter/ipset/ip_set_timeout.h>
25#include <linux/netfilter/ipset/ip_set_hash.h> 25#include <linux/netfilter/ipset/ip_set_hash.h>
26 26
27#define REVISION_MIN 0
28#define REVISION_MAX 0
29
27MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 31MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
29MODULE_DESCRIPTION("hash:ip type of IP sets"); 32IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
30MODULE_ALIAS("ip_set_hash:ip"); 33MODULE_ALIAS("ip_set_hash:ip");
31 34
32/* Type specific function prefix */ 35/* Type specific function prefix */
@@ -114,7 +117,7 @@ nla_put_failure:
114static inline void 117static inline void
115hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d) 118hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
116{ 119{
117 h->next.ip = ntohl(d->ip); 120 h->next.ip = d->ip;
118} 121}
119 122
120static int 123static int
@@ -179,7 +182,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
179 } else if (tb[IPSET_ATTR_CIDR]) { 182 } else if (tb[IPSET_ATTR_CIDR]) {
180 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 183 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
181 184
182 if (cidr > 32) 185 if (!cidr || cidr > 32)
183 return -IPSET_ERR_INVALID_CIDR; 186 return -IPSET_ERR_INVALID_CIDR;
184 ip_set_mask_from_to(ip, ip_to, cidr); 187 ip_set_mask_from_to(ip, ip_to, cidr);
185 } else 188 } else
@@ -188,7 +191,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
188 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); 191 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
189 192
190 if (retried) 193 if (retried)
191 ip = h->next.ip; 194 ip = ntohl(h->next.ip);
192 for (; !before(ip_to, ip); ip += hosts) { 195 for (; !before(ip_to, ip); ip += hosts) {
193 nip = htonl(ip); 196 nip = htonl(ip);
194 if (nip == 0) 197 if (nip == 0)
@@ -452,8 +455,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
452 .features = IPSET_TYPE_IP, 455 .features = IPSET_TYPE_IP,
453 .dimension = IPSET_DIM_ONE, 456 .dimension = IPSET_DIM_ONE,
454 .family = NFPROTO_UNSPEC, 457 .family = NFPROTO_UNSPEC,
455 .revision_min = 0, 458 .revision_min = REVISION_MIN,
456 .revision_max = 0, 459 .revision_max = REVISION_MAX,
457 .create = hash_ip_create, 460 .create = hash_ip_create,
458 .create_policy = { 461 .create_policy = {
459 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 462 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 92722bb82eea..0171f7502fa5 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -25,9 +25,12 @@
25#include <linux/netfilter/ipset/ip_set_getport.h> 25#include <linux/netfilter/ipset/ip_set_getport.h>
26#include <linux/netfilter/ipset/ip_set_hash.h> 26#include <linux/netfilter/ipset/ip_set_hash.h>
27 27
28#define REVISION_MIN 0
29#define REVISION_MAX 1 /* SCTP and UDPLITE support added */
30
28MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 32MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
30MODULE_DESCRIPTION("hash:ip,port type of IP sets"); 33IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
31MODULE_ALIAS("ip_set_hash:ip,port"); 34MODULE_ALIAS("ip_set_hash:ip,port");
32 35
33/* Type specific function prefix */ 36/* Type specific function prefix */
@@ -130,8 +133,8 @@ static inline void
130hash_ipport4_data_next(struct ip_set_hash *h, 133hash_ipport4_data_next(struct ip_set_hash *h,
131 const struct hash_ipport4_elem *d) 134 const struct hash_ipport4_elem *d)
132{ 135{
133 h->next.ip = ntohl(d->ip); 136 h->next.ip = d->ip;
134 h->next.port = ntohs(d->port); 137 h->next.port = d->port;
135} 138}
136 139
137static int 140static int
@@ -217,7 +220,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
217 } else if (tb[IPSET_ATTR_CIDR]) { 220 } else if (tb[IPSET_ATTR_CIDR]) {
218 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 221 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
219 222
220 if (cidr > 32) 223 if (!cidr || cidr > 32)
221 return -IPSET_ERR_INVALID_CIDR; 224 return -IPSET_ERR_INVALID_CIDR;
222 ip_set_mask_from_to(ip, ip_to, cidr); 225 ip_set_mask_from_to(ip, ip_to, cidr);
223 } else 226 } else
@@ -231,9 +234,10 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
231 } 234 }
232 235
233 if (retried) 236 if (retried)
234 ip = h->next.ip; 237 ip = ntohl(h->next.ip);
235 for (; !before(ip_to, ip); ip++) { 238 for (; !before(ip_to, ip); ip++) {
236 p = retried && ip == h->next.ip ? h->next.port : port; 239 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
240 : port;
237 for (; p <= port_to; p++) { 241 for (; p <= port_to; p++) {
238 data.ip = htonl(ip); 242 data.ip = htonl(ip);
239 data.port = htons(p); 243 data.port = htons(p);
@@ -349,7 +353,7 @@ static inline void
349hash_ipport6_data_next(struct ip_set_hash *h, 353hash_ipport6_data_next(struct ip_set_hash *h,
350 const struct hash_ipport6_elem *d) 354 const struct hash_ipport6_elem *d)
351{ 355{
352 h->next.port = ntohs(d->port); 356 h->next.port = d->port;
353} 357}
354 358
355static int 359static int
@@ -431,7 +435,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
431 swap(port, port_to); 435 swap(port, port_to);
432 436
433 if (retried) 437 if (retried)
434 port = h->next.port; 438 port = ntohs(h->next.port);
435 for (; port <= port_to; port++) { 439 for (; port <= port_to; port++) {
436 data.port = htons(port); 440 data.port = htons(port);
437 ret = adtfn(set, &data, timeout, flags); 441 ret = adtfn(set, &data, timeout, flags);
@@ -522,8 +526,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
522 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
523 .dimension = IPSET_DIM_TWO, 527 .dimension = IPSET_DIM_TWO,
524 .family = NFPROTO_UNSPEC, 528 .family = NFPROTO_UNSPEC,
525 .revision_min = 0, 529 .revision_min = REVISION_MIN,
526 .revision_max = 1, /* SCTP and UDPLITE support added */ 530 .revision_max = REVISION_MAX,
527 .create = hash_ipport_create, 531 .create = hash_ipport_create,
528 .create_policy = { 532 .create_policy = {
529 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 533 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 0637ce096def..6344ef551ec8 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -25,9 +25,12 @@
25#include <linux/netfilter/ipset/ip_set_getport.h> 25#include <linux/netfilter/ipset/ip_set_getport.h>
26#include <linux/netfilter/ipset/ip_set_hash.h> 26#include <linux/netfilter/ipset/ip_set_hash.h>
27 27
28#define REVISION_MIN 0
29#define REVISION_MAX 1 /* SCTP and UDPLITE support added */
30
28MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 32MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
30MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets"); 33IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
31MODULE_ALIAS("ip_set_hash:ip,port,ip"); 34MODULE_ALIAS("ip_set_hash:ip,port,ip");
32 35
33/* Type specific function prefix */ 36/* Type specific function prefix */
@@ -133,8 +136,8 @@ static inline void
133hash_ipportip4_data_next(struct ip_set_hash *h, 136hash_ipportip4_data_next(struct ip_set_hash *h,
134 const struct hash_ipportip4_elem *d) 137 const struct hash_ipportip4_elem *d)
135{ 138{
136 h->next.ip = ntohl(d->ip); 139 h->next.ip = d->ip;
137 h->next.port = ntohs(d->port); 140 h->next.port = d->port;
138} 141}
139 142
140static int 143static int
@@ -225,7 +228,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
225 } else if (tb[IPSET_ATTR_CIDR]) { 228 } else if (tb[IPSET_ATTR_CIDR]) {
226 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 229 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
227 230
228 if (cidr > 32) 231 if (!cidr || cidr > 32)
229 return -IPSET_ERR_INVALID_CIDR; 232 return -IPSET_ERR_INVALID_CIDR;
230 ip_set_mask_from_to(ip, ip_to, cidr); 233 ip_set_mask_from_to(ip, ip_to, cidr);
231 } else 234 } else
@@ -239,9 +242,10 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
239 } 242 }
240 243
241 if (retried) 244 if (retried)
242 ip = h->next.ip; 245 ip = ntohl(h->next.ip);
243 for (; !before(ip_to, ip); ip++) { 246 for (; !before(ip_to, ip); ip++) {
244 p = retried && ip == h->next.ip ? h->next.port : port; 247 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
248 : port;
245 for (; p <= port_to; p++) { 249 for (; p <= port_to; p++) {
246 data.ip = htonl(ip); 250 data.ip = htonl(ip);
247 data.port = htons(p); 251 data.port = htons(p);
@@ -362,7 +366,7 @@ static inline void
362hash_ipportip6_data_next(struct ip_set_hash *h, 366hash_ipportip6_data_next(struct ip_set_hash *h,
363 const struct hash_ipportip6_elem *d) 367 const struct hash_ipportip6_elem *d)
364{ 368{
365 h->next.port = ntohs(d->port); 369 h->next.port = d->port;
366} 370}
367 371
368static int 372static int
@@ -449,7 +453,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
449 swap(port, port_to); 453 swap(port, port_to);
450 454
451 if (retried) 455 if (retried)
452 port = h->next.port; 456 port = ntohs(h->next.port);
453 for (; port <= port_to; port++) { 457 for (; port <= port_to; port++) {
454 data.port = htons(port); 458 data.port = htons(port);
455 ret = adtfn(set, &data, timeout, flags); 459 ret = adtfn(set, &data, timeout, flags);
@@ -540,8 +544,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
540 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 544 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
541 .dimension = IPSET_DIM_THREE, 545 .dimension = IPSET_DIM_THREE,
542 .family = NFPROTO_UNSPEC, 546 .family = NFPROTO_UNSPEC,
543 .revision_min = 0, 547 .revision_min = REVISION_MIN,
544 .revision_max = 1, /* SCTP and UDPLITE support added */ 548 .revision_max = REVISION_MAX,
545 .create = hash_ipportip_create, 549 .create = hash_ipportip_create,
546 .create_policy = { 550 .create_policy = {
547 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 551 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 1ce21ca976e1..cb71f9a774e7 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -25,9 +25,14 @@
25#include <linux/netfilter/ipset/ip_set_getport.h> 25#include <linux/netfilter/ipset/ip_set_getport.h>
26#include <linux/netfilter/ipset/ip_set_hash.h> 26#include <linux/netfilter/ipset/ip_set_hash.h>
27 27
28#define REVISION_MIN 0
29/* 1 SCTP and UDPLITE support added */
30/* 2 Range as input support for IPv4 added */
31#define REVISION_MAX 3 /* nomatch flag support added */
32
28MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 34MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
30MODULE_DESCRIPTION("hash:ip,port,net type of IP sets"); 35IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
31MODULE_ALIAS("ip_set_hash:ip,port,net"); 36MODULE_ALIAS("ip_set_hash:ip,port,net");
32 37
33/* Type specific function prefix */ 38/* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100} 105}
101 106
102static inline bool 107static inline int
103hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) 108hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
104{ 109{
105 return !elem->nomatch; 110 return elem->nomatch ? -ENOTEMPTY : 1;
106} 111}
107 112
108static inline void 113static inline void
@@ -173,9 +178,9 @@ static inline void
173hash_ipportnet4_data_next(struct ip_set_hash *h, 178hash_ipportnet4_data_next(struct ip_set_hash *h,
174 const struct hash_ipportnet4_elem *d) 179 const struct hash_ipportnet4_elem *d)
175{ 180{
176 h->next.ip = ntohl(d->ip); 181 h->next.ip = d->ip;
177 h->next.port = ntohs(d->port); 182 h->next.port = d->port;
178 h->next.ip2 = ntohl(d->ip2); 183 h->next.ip2 = d->ip2;
179} 184}
180 185
181static int 186static int
@@ -290,7 +295,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
290 } else if (tb[IPSET_ATTR_CIDR]) { 295 } else if (tb[IPSET_ATTR_CIDR]) {
291 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 296 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
292 297
293 if (cidr > 32) 298 if (!cidr || cidr > 32)
294 return -IPSET_ERR_INVALID_CIDR; 299 return -IPSET_ERR_INVALID_CIDR;
295 ip_set_mask_from_to(ip, ip_to, cidr); 300 ip_set_mask_from_to(ip, ip_to, cidr);
296 } 301 }
@@ -314,14 +319,17 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
314 } 319 }
315 320
316 if (retried) 321 if (retried)
317 ip = h->next.ip; 322 ip = ntohl(h->next.ip);
318 for (; !before(ip_to, ip); ip++) { 323 for (; !before(ip_to, ip); ip++) {
319 data.ip = htonl(ip); 324 data.ip = htonl(ip);
320 p = retried && ip == h->next.ip ? h->next.port : port; 325 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
326 : port;
321 for (; p <= port_to; p++) { 327 for (; p <= port_to; p++) {
322 data.port = htons(p); 328 data.port = htons(p);
323 ip2 = retried && ip == h->next.ip && p == h->next.port 329 ip2 = retried
324 ? h->next.ip2 : ip2_from; 330 && ip == ntohl(h->next.ip)
331 && p == ntohs(h->next.port)
332 ? ntohl(h->next.ip2) : ip2_from;
325 while (!after(ip2, ip2_to)) { 333 while (!after(ip2, ip2_to)) {
326 data.ip2 = htonl(ip2); 334 data.ip2 = htonl(ip2);
327 ip2_last = ip_set_range_to_cidr(ip2, ip2_to, 335 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
@@ -403,10 +411,10 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
403 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 411 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
404} 412}
405 413
406static inline bool 414static inline int
407hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) 415hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
408{ 416{
409 return !elem->nomatch; 417 return elem->nomatch ? -ENOTEMPTY : 1;
410} 418}
411 419
412static inline void 420static inline void
@@ -486,7 +494,7 @@ static inline void
486hash_ipportnet6_data_next(struct ip_set_hash *h, 494hash_ipportnet6_data_next(struct ip_set_hash *h,
487 const struct hash_ipportnet6_elem *d) 495 const struct hash_ipportnet6_elem *d)
488{ 496{
489 h->next.port = ntohs(d->port); 497 h->next.port = d->port;
490} 498}
491 499
492static int 500static int
@@ -598,7 +606,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
598 swap(port, port_to); 606 swap(port, port_to);
599 607
600 if (retried) 608 if (retried)
601 port = h->next.port; 609 port = ntohs(h->next.port);
602 for (; port <= port_to; port++) { 610 for (; port <= port_to; port++) {
603 data.port = htons(port); 611 data.port = htons(port);
604 ret = adtfn(set, &data, timeout, flags); 612 ret = adtfn(set, &data, timeout, flags);
@@ -689,13 +697,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
689static struct ip_set_type hash_ipportnet_type __read_mostly = { 697static struct ip_set_type hash_ipportnet_type __read_mostly = {
690 .name = "hash:ip,port,net", 698 .name = "hash:ip,port,net",
691 .protocol = IPSET_PROTOCOL, 699 .protocol = IPSET_PROTOCOL,
692 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 700 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
701 IPSET_TYPE_NOMATCH,
693 .dimension = IPSET_DIM_THREE, 702 .dimension = IPSET_DIM_THREE,
694 .family = NFPROTO_UNSPEC, 703 .family = NFPROTO_UNSPEC,
695 .revision_min = 0, 704 .revision_min = REVISION_MIN,
696 /* 1 SCTP and UDPLITE support added */ 705 .revision_max = REVISION_MAX,
697 /* 2 Range as input support for IPv4 added */
698 .revision_max = 3, /* nomatch flag support added */
699 .create = hash_ipportnet_create, 706 .create = hash_ipportnet_create,
700 .create_policy = { 707 .create_policy = {
701 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 708 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c57a6a09906d..29e94b981f3f 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -23,9 +23,13 @@
23#include <linux/netfilter/ipset/ip_set_timeout.h> 23#include <linux/netfilter/ipset/ip_set_timeout.h>
24#include <linux/netfilter/ipset/ip_set_hash.h> 24#include <linux/netfilter/ipset/ip_set_hash.h>
25 25
26#define REVISION_MIN 0
27/* 1 Range as input support for IPv4 added */
28#define REVISION_MAX 2 /* nomatch flag support added */
29
26MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 31MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
28MODULE_DESCRIPTION("hash:net type of IP sets"); 32IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
29MODULE_ALIAS("ip_set_hash:net"); 33MODULE_ALIAS("ip_set_hash:net");
30 34
31/* Type specific function prefix */ 35/* Type specific function prefix */
@@ -86,10 +90,10 @@ hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
86 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 90 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
87} 91}
88 92
89static inline bool 93static inline int
90hash_net4_data_match(const struct hash_net4_elem *elem) 94hash_net4_data_match(const struct hash_net4_elem *elem)
91{ 95{
92 return !elem->nomatch; 96 return elem->nomatch ? -ENOTEMPTY : 1;
93} 97}
94 98
95static inline void 99static inline void
@@ -152,7 +156,7 @@ static inline void
152hash_net4_data_next(struct ip_set_hash *h, 156hash_net4_data_next(struct ip_set_hash *h,
153 const struct hash_net4_elem *d) 157 const struct hash_net4_elem *d)
154{ 158{
155 h->next.ip = ntohl(d->ip); 159 h->next.ip = d->ip;
156} 160}
157 161
158static int 162static int
@@ -235,7 +239,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
235 return -IPSET_ERR_HASH_RANGE; 239 return -IPSET_ERR_HASH_RANGE;
236 } 240 }
237 if (retried) 241 if (retried)
238 ip = h->next.ip; 242 ip = ntohl(h->next.ip);
239 while (!after(ip, ip_to)) { 243 while (!after(ip, ip_to)) {
240 data.ip = htonl(ip); 244 data.ip = htonl(ip);
241 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); 245 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -307,10 +311,10 @@ hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
307 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 311 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
308} 312}
309 313
310static inline bool 314static inline int
311hash_net6_data_match(const struct hash_net6_elem *elem) 315hash_net6_data_match(const struct hash_net6_elem *elem)
312{ 316{
313 return !elem->nomatch; 317 return elem->nomatch ? -ENOTEMPTY : 1;
314} 318}
315 319
316static inline void 320static inline void
@@ -532,12 +536,11 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
532static struct ip_set_type hash_net_type __read_mostly = { 536static struct ip_set_type hash_net_type __read_mostly = {
533 .name = "hash:net", 537 .name = "hash:net",
534 .protocol = IPSET_PROTOCOL, 538 .protocol = IPSET_PROTOCOL,
535 .features = IPSET_TYPE_IP, 539 .features = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
536 .dimension = IPSET_DIM_ONE, 540 .dimension = IPSET_DIM_ONE,
537 .family = NFPROTO_UNSPEC, 541 .family = NFPROTO_UNSPEC,
538 .revision_min = 0, 542 .revision_min = REVISION_MIN,
539 /* = 1 Range as input support for IPv4 added */ 543 .revision_max = REVISION_MAX,
540 .revision_max = 2, /* nomatch flag support added */
541 .create = hash_net_create, 544 .create = hash_net_create,
542 .create_policy = { 545 .create_policy = {
543 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 546 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index d5d3607ae7bc..b9a63381e349 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -24,9 +24,13 @@
24#include <linux/netfilter/ipset/ip_set_timeout.h> 24#include <linux/netfilter/ipset/ip_set_timeout.h>
25#include <linux/netfilter/ipset/ip_set_hash.h> 25#include <linux/netfilter/ipset/ip_set_hash.h>
26 26
27#define REVISION_MIN 0
28/* 1 nomatch flag support added */
29#define REVISION_MAX 2 /* /0 support added */
30
27MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 32MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
29MODULE_DESCRIPTION("hash:net,iface type of IP sets"); 33IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
30MODULE_ALIAS("ip_set_hash:net,iface"); 34MODULE_ALIAS("ip_set_hash:net,iface");
31 35
32/* Interface name rbtree */ 36/* Interface name rbtree */
@@ -140,7 +144,7 @@ struct hash_netiface4_elem_hashed {
140 u8 physdev; 144 u8 physdev;
141 u8 cidr; 145 u8 cidr;
142 u8 nomatch; 146 u8 nomatch;
143 u8 padding; 147 u8 elem;
144}; 148};
145 149
146#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) 150#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
@@ -151,7 +155,7 @@ struct hash_netiface4_elem {
151 u8 physdev; 155 u8 physdev;
152 u8 cidr; 156 u8 cidr;
153 u8 nomatch; 157 u8 nomatch;
154 u8 padding; 158 u8 elem;
155 const char *iface; 159 const char *iface;
156}; 160};
157 161
@@ -161,7 +165,7 @@ struct hash_netiface4_telem {
161 u8 physdev; 165 u8 physdev;
162 u8 cidr; 166 u8 cidr;
163 u8 nomatch; 167 u8 nomatch;
164 u8 padding; 168 u8 elem;
165 const char *iface; 169 const char *iface;
166 unsigned long timeout; 170 unsigned long timeout;
167}; 171};
@@ -181,18 +185,14 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
181static inline bool 185static inline bool
182hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem) 186hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
183{ 187{
184 return elem->cidr == 0; 188 return elem->elem == 0;
185} 189}
186 190
187static inline void 191static inline void
188hash_netiface4_data_copy(struct hash_netiface4_elem *dst, 192hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
189 const struct hash_netiface4_elem *src) 193 const struct hash_netiface4_elem *src)
190{ 194{
191 dst->ip = src->ip; 195 memcpy(dst, src, sizeof(*dst));
192 dst->cidr = src->cidr;
193 dst->physdev = src->physdev;
194 dst->iface = src->iface;
195 dst->nomatch = src->nomatch;
196} 196}
197 197
198static inline void 198static inline void
@@ -201,10 +201,10 @@ hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
201 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 201 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
202} 202}
203 203
204static inline bool 204static inline int
205hash_netiface4_data_match(const struct hash_netiface4_elem *elem) 205hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
206{ 206{
207 return !elem->nomatch; 207 return elem->nomatch ? -ENOTEMPTY : 1;
208} 208}
209 209
210static inline void 210static inline void
@@ -217,7 +217,7 @@ hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
217static inline void 217static inline void
218hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem) 218hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
219{ 219{
220 elem->cidr = 0; 220 elem->elem = 0;
221} 221}
222 222
223static bool 223static bool
@@ -277,7 +277,7 @@ static inline void
277hash_netiface4_data_next(struct ip_set_hash *h, 277hash_netiface4_data_next(struct ip_set_hash *h,
278 const struct hash_netiface4_elem *d) 278 const struct hash_netiface4_elem *d)
279{ 279{
280 h->next.ip = ntohl(d->ip); 280 h->next.ip = d->ip;
281} 281}
282 282
283static int 283static int
@@ -288,7 +288,8 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
288 struct ip_set_hash *h = set->data; 288 struct ip_set_hash *h = set->data;
289 ipset_adtfn adtfn = set->variant->adt[adt]; 289 ipset_adtfn adtfn = set->variant->adt[adt];
290 struct hash_netiface4_elem data = { 290 struct hash_netiface4_elem data = {
291 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 291 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
292 .elem = 1,
292 }; 293 };
293 int ret; 294 int ret;
294 295
@@ -339,7 +340,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
339{ 340{
340 struct ip_set_hash *h = set->data; 341 struct ip_set_hash *h = set->data;
341 ipset_adtfn adtfn = set->variant->adt[adt]; 342 ipset_adtfn adtfn = set->variant->adt[adt];
342 struct hash_netiface4_elem data = { .cidr = HOST_MASK }; 343 struct hash_netiface4_elem data = { .cidr = HOST_MASK, .elem = 1 };
343 u32 ip = 0, ip_to, last; 344 u32 ip = 0, ip_to, last;
344 u32 timeout = h->timeout; 345 u32 timeout = h->timeout;
345 char iface[IFNAMSIZ]; 346 char iface[IFNAMSIZ];
@@ -360,7 +361,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
360 361
361 if (tb[IPSET_ATTR_CIDR]) { 362 if (tb[IPSET_ATTR_CIDR]) {
362 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 363 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
363 if (!data.cidr || data.cidr > HOST_MASK) 364 if (data.cidr > HOST_MASK)
364 return -IPSET_ERR_INVALID_CIDR; 365 return -IPSET_ERR_INVALID_CIDR;
365 } 366 }
366 367
@@ -389,7 +390,6 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
389 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH)) 390 if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
390 flags |= (cadt_flags << 16); 391 flags |= (cadt_flags << 16);
391 } 392 }
392
393 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 393 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
394 data.ip = htonl(ip & ip_set_hostmask(data.cidr)); 394 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
395 ret = adtfn(set, &data, timeout, flags); 395 ret = adtfn(set, &data, timeout, flags);
@@ -409,7 +409,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
409 } 409 }
410 410
411 if (retried) 411 if (retried)
412 ip = h->next.ip; 412 ip = ntohl(h->next.ip);
413 while (!after(ip, ip_to)) { 413 while (!after(ip, ip_to)) {
414 data.ip = htonl(ip); 414 data.ip = htonl(ip);
415 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr); 415 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -442,7 +442,7 @@ struct hash_netiface6_elem_hashed {
442 u8 physdev; 442 u8 physdev;
443 u8 cidr; 443 u8 cidr;
444 u8 nomatch; 444 u8 nomatch;
445 u8 padding; 445 u8 elem;
446}; 446};
447 447
448#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) 448#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
@@ -452,7 +452,7 @@ struct hash_netiface6_elem {
452 u8 physdev; 452 u8 physdev;
453 u8 cidr; 453 u8 cidr;
454 u8 nomatch; 454 u8 nomatch;
455 u8 padding; 455 u8 elem;
456 const char *iface; 456 const char *iface;
457}; 457};
458 458
@@ -461,7 +461,7 @@ struct hash_netiface6_telem {
461 u8 physdev; 461 u8 physdev;
462 u8 cidr; 462 u8 cidr;
463 u8 nomatch; 463 u8 nomatch;
464 u8 padding; 464 u8 elem;
465 const char *iface; 465 const char *iface;
466 unsigned long timeout; 466 unsigned long timeout;
467}; 467};
@@ -481,7 +481,7 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
481static inline bool 481static inline bool
482hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem) 482hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
483{ 483{
484 return elem->cidr == 0; 484 return elem->elem == 0;
485} 485}
486 486
487static inline void 487static inline void
@@ -497,16 +497,16 @@ hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
497 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 497 dst->nomatch = flags & IPSET_FLAG_NOMATCH;
498} 498}
499 499
500static inline bool 500static inline int
501hash_netiface6_data_match(const struct hash_netiface6_elem *elem) 501hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
502{ 502{
503 return !elem->nomatch; 503 return elem->nomatch ? -ENOTEMPTY : 1;
504} 504}
505 505
506static inline void 506static inline void
507hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) 507hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
508{ 508{
509 elem->cidr = 0; 509 elem->elem = 0;
510} 510}
511 511
512static inline void 512static inline void
@@ -590,7 +590,8 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
590 struct ip_set_hash *h = set->data; 590 struct ip_set_hash *h = set->data;
591 ipset_adtfn adtfn = set->variant->adt[adt]; 591 ipset_adtfn adtfn = set->variant->adt[adt];
592 struct hash_netiface6_elem data = { 592 struct hash_netiface6_elem data = {
593 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK 593 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
594 .elem = 1,
594 }; 595 };
595 int ret; 596 int ret;
596 597
@@ -637,7 +638,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
637{ 638{
638 struct ip_set_hash *h = set->data; 639 struct ip_set_hash *h = set->data;
639 ipset_adtfn adtfn = set->variant->adt[adt]; 640 ipset_adtfn adtfn = set->variant->adt[adt];
640 struct hash_netiface6_elem data = { .cidr = HOST_MASK }; 641 struct hash_netiface6_elem data = { .cidr = HOST_MASK, .elem = 1 };
641 u32 timeout = h->timeout; 642 u32 timeout = h->timeout;
642 char iface[IFNAMSIZ]; 643 char iface[IFNAMSIZ];
643 int ret; 644 int ret;
@@ -659,7 +660,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
659 660
660 if (tb[IPSET_ATTR_CIDR]) 661 if (tb[IPSET_ATTR_CIDR])
661 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 662 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
662 if (!data.cidr || data.cidr > HOST_MASK) 663 if (data.cidr > HOST_MASK)
663 return -IPSET_ERR_INVALID_CIDR; 664 return -IPSET_ERR_INVALID_CIDR;
664 ip6_netmask(&data.ip, data.cidr); 665 ip6_netmask(&data.ip, data.cidr);
665 666
@@ -773,11 +774,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
773static struct ip_set_type hash_netiface_type __read_mostly = { 774static struct ip_set_type hash_netiface_type __read_mostly = {
774 .name = "hash:net,iface", 775 .name = "hash:net,iface",
775 .protocol = IPSET_PROTOCOL, 776 .protocol = IPSET_PROTOCOL,
776 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE, 777 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE |
778 IPSET_TYPE_NOMATCH,
777 .dimension = IPSET_DIM_TWO, 779 .dimension = IPSET_DIM_TWO,
778 .family = NFPROTO_UNSPEC, 780 .family = NFPROTO_UNSPEC,
779 .revision_min = 0, 781 .revision_min = REVISION_MIN,
780 .revision_max = 1, /* nomatch flag support added */ 782 .revision_max = REVISION_MAX,
781 .create = hash_netiface_create, 783 .create = hash_netiface_create,
782 .create_policy = { 784 .create_policy = {
783 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 785 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index fc3143a2d41b..7ef700de596c 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -24,9 +24,14 @@
24#include <linux/netfilter/ipset/ip_set_getport.h> 24#include <linux/netfilter/ipset/ip_set_getport.h>
25#include <linux/netfilter/ipset/ip_set_hash.h> 25#include <linux/netfilter/ipset/ip_set_hash.h>
26 26
27#define REVISION_MIN 0
28/* 1 SCTP and UDPLITE support added */
29/* 2 Range as input support for IPv4 added */
30#define REVISION_MAX 3 /* nomatch flag support added */
31
27MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 33MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
29MODULE_DESCRIPTION("hash:net,port type of IP sets"); 34IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
30MODULE_ALIAS("ip_set_hash:net,port"); 35MODULE_ALIAS("ip_set_hash:net,port");
31 36
32/* Type specific function prefix */ 37/* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
99 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
100} 105}
101 106
102static inline bool 107static inline int
103hash_netport4_data_match(const struct hash_netport4_elem *elem) 108hash_netport4_data_match(const struct hash_netport4_elem *elem)
104{ 109{
105 return !elem->nomatch; 110 return elem->nomatch ? -ENOTEMPTY : 1;
106} 111}
107 112
108static inline void 113static inline void
@@ -171,8 +176,8 @@ static inline void
171hash_netport4_data_next(struct ip_set_hash *h, 176hash_netport4_data_next(struct ip_set_hash *h,
172 const struct hash_netport4_elem *d) 177 const struct hash_netport4_elem *d)
173{ 178{
174 h->next.ip = ntohl(d->ip); 179 h->next.ip = d->ip;
175 h->next.port = ntohs(d->port); 180 h->next.port = d->port;
176} 181}
177 182
178static int 183static int
@@ -289,12 +294,13 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
289 } 294 }
290 295
291 if (retried) 296 if (retried)
292 ip = h->next.ip; 297 ip = ntohl(h->next.ip);
293 while (!after(ip, ip_to)) { 298 while (!after(ip, ip_to)) {
294 data.ip = htonl(ip); 299 data.ip = htonl(ip);
295 last = ip_set_range_to_cidr(ip, ip_to, &cidr); 300 last = ip_set_range_to_cidr(ip, ip_to, &cidr);
296 data.cidr = cidr - 1; 301 data.cidr = cidr - 1;
297 p = retried && ip == h->next.ip ? h->next.port : port; 302 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
303 : port;
298 for (; p <= port_to; p++) { 304 for (; p <= port_to; p++) {
299 data.port = htons(p); 305 data.port = htons(p);
300 ret = adtfn(set, &data, timeout, flags); 306 ret = adtfn(set, &data, timeout, flags);
@@ -369,10 +375,10 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
369 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 375 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
370} 376}
371 377
372static inline bool 378static inline int
373hash_netport6_data_match(const struct hash_netport6_elem *elem) 379hash_netport6_data_match(const struct hash_netport6_elem *elem)
374{ 380{
375 return !elem->nomatch; 381 return elem->nomatch ? -ENOTEMPTY : 1;
376} 382}
377 383
378static inline void 384static inline void
@@ -450,7 +456,7 @@ static inline void
450hash_netport6_data_next(struct ip_set_hash *h, 456hash_netport6_data_next(struct ip_set_hash *h,
451 const struct hash_netport6_elem *d) 457 const struct hash_netport6_elem *d)
452{ 458{
453 h->next.port = ntohs(d->port); 459 h->next.port = d->port;
454} 460}
455 461
456static int 462static int
@@ -554,7 +560,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
554 swap(port, port_to); 560 swap(port, port_to);
555 561
556 if (retried) 562 if (retried)
557 port = h->next.port; 563 port = ntohs(h->next.port);
558 for (; port <= port_to; port++) { 564 for (; port <= port_to; port++) {
559 data.port = htons(port); 565 data.port = htons(port);
560 ret = adtfn(set, &data, timeout, flags); 566 ret = adtfn(set, &data, timeout, flags);
@@ -644,13 +650,11 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
644static struct ip_set_type hash_netport_type __read_mostly = { 650static struct ip_set_type hash_netport_type __read_mostly = {
645 .name = "hash:net,port", 651 .name = "hash:net,port",
646 .protocol = IPSET_PROTOCOL, 652 .protocol = IPSET_PROTOCOL,
647 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 653 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
648 .dimension = IPSET_DIM_TWO, 654 .dimension = IPSET_DIM_TWO,
649 .family = NFPROTO_UNSPEC, 655 .family = NFPROTO_UNSPEC,
650 .revision_min = 0, 656 .revision_min = REVISION_MIN,
651 /* 1 SCTP and UDPLITE support added */ 657 .revision_max = REVISION_MAX,
652 /* 2, Range as input support for IPv4 added */
653 .revision_max = 3, /* nomatch flag support added */
654 .create = hash_netport_create, 658 .create = hash_netport_create,
655 .create_policy = { 659 .create_policy = {
656 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 660 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 6cb1225765f9..8371c2bac2e4 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -16,9 +16,12 @@
16#include <linux/netfilter/ipset/ip_set_timeout.h> 16#include <linux/netfilter/ipset/ip_set_timeout.h>
17#include <linux/netfilter/ipset/ip_set_list.h> 17#include <linux/netfilter/ipset/ip_set_list.h>
18 18
19#define REVISION_MIN 0
20#define REVISION_MAX 0
21
19MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); 23MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
21MODULE_DESCRIPTION("list:set type of IP sets"); 24IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
22MODULE_ALIAS("ip_set_list:set"); 25MODULE_ALIAS("ip_set_list:set");
23 26
24/* Member elements without and with timeout */ 27/* Member elements without and with timeout */
@@ -579,8 +582,8 @@ static struct ip_set_type list_set_type __read_mostly = {
579 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, 582 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
580 .dimension = IPSET_DIM_ONE, 583 .dimension = IPSET_DIM_ONE,
581 .family = NFPROTO_UNSPEC, 584 .family = NFPROTO_UNSPEC,
582 .revision_min = 0, 585 .revision_min = REVISION_MIN,
583 .revision_max = 0, 586 .revision_max = REVISION_MAX,
584 .create = list_set_create, 587 .create = list_set_create,
585 .create_policy = { 588 .create_policy = {
586 [IPSET_ATTR_SIZE] = { .type = NLA_U32 }, 589 [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index f9871385a65e..8b2cffdfdd99 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -250,7 +250,8 @@ comment 'IPVS application helper'
250 250
251config IP_VS_FTP 251config IP_VS_FTP
252 tristate "FTP protocol helper" 252 tristate "FTP protocol helper"
253 depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT 253 depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
254 NF_CONNTRACK_FTP
254 select IP_VS_NFCT 255 select IP_VS_NFCT
255 ---help--- 256 ---help---
256 FTP is a protocol that transfers IP address and/or port number in 257 FTP is a protocol that transfers IP address and/or port number in
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 64f9e8f13207..9713e6e86d47 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -180,22 +180,38 @@ register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
180} 180}
181 181
182 182
183/* 183/* Register application for netns */
184 * ip_vs_app registration routine 184struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
185 */
186int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
187{ 185{
188 struct netns_ipvs *ipvs = net_ipvs(net); 186 struct netns_ipvs *ipvs = net_ipvs(net);
189 /* increase the module use count */ 187 struct ip_vs_app *a;
190 ip_vs_use_count_inc(); 188 int err = 0;
189
190 if (!ipvs)
191 return ERR_PTR(-ENOENT);
191 192
192 mutex_lock(&__ip_vs_app_mutex); 193 mutex_lock(&__ip_vs_app_mutex);
193 194
194 list_add(&app->a_list, &ipvs->app_list); 195 list_for_each_entry(a, &ipvs->app_list, a_list) {
196 if (!strcmp(app->name, a->name)) {
197 err = -EEXIST;
198 goto out_unlock;
199 }
200 }
201 a = kmemdup(app, sizeof(*app), GFP_KERNEL);
202 if (!a) {
203 err = -ENOMEM;
204 goto out_unlock;
205 }
206 INIT_LIST_HEAD(&a->incs_list);
207 list_add(&a->a_list, &ipvs->app_list);
208 /* increase the module use count */
209 ip_vs_use_count_inc();
195 210
211out_unlock:
196 mutex_unlock(&__ip_vs_app_mutex); 212 mutex_unlock(&__ip_vs_app_mutex);
197 213
198 return 0; 214 return err ? ERR_PTR(err) : a;
199} 215}
200 216
201 217
@@ -205,20 +221,29 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
205 */ 221 */
206void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app) 222void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
207{ 223{
208 struct ip_vs_app *inc, *nxt; 224 struct netns_ipvs *ipvs = net_ipvs(net);
225 struct ip_vs_app *a, *anxt, *inc, *nxt;
226
227 if (!ipvs)
228 return;
209 229
210 mutex_lock(&__ip_vs_app_mutex); 230 mutex_lock(&__ip_vs_app_mutex);
211 231
212 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) { 232 list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
213 ip_vs_app_inc_release(net, inc); 233 if (app && strcmp(app->name, a->name))
214 } 234 continue;
235 list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
236 ip_vs_app_inc_release(net, inc);
237 }
215 238
216 list_del(&app->a_list); 239 list_del(&a->a_list);
240 kfree(a);
217 241
218 mutex_unlock(&__ip_vs_app_mutex); 242 /* decrease the module use count */
243 ip_vs_use_count_dec();
244 }
219 245
220 /* decrease the module use count */ 246 mutex_unlock(&__ip_vs_app_mutex);
221 ip_vs_use_count_dec();
222} 247}
223 248
224 249
@@ -586,5 +611,6 @@ int __net_init ip_vs_app_net_init(struct net *net)
586 611
587void __net_exit ip_vs_app_net_cleanup(struct net *net) 612void __net_exit ip_vs_app_net_cleanup(struct net *net)
588{ 613{
614 unregister_ip_vs_app(net, NULL /* all */);
589 proc_net_remove(net, "ip_vs_app"); 615 proc_net_remove(net, "ip_vs_app");
590} 616}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b54eccef40b5..58918e20f9d5 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1303,7 +1303,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1303 struct ip_vs_conn *cp; 1303 struct ip_vs_conn *cp;
1304 struct ip_vs_protocol *pp; 1304 struct ip_vs_protocol *pp;
1305 struct ip_vs_proto_data *pd; 1305 struct ip_vs_proto_data *pd;
1306 unsigned int offset, ihl, verdict; 1306 unsigned int offset, offset2, ihl, verdict;
1307 bool ipip;
1307 1308
1308 *related = 1; 1309 *related = 1;
1309 1310
@@ -1345,6 +1346,21 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1345 1346
1346 net = skb_net(skb); 1347 net = skb_net(skb);
1347 1348
1349 /* Special case for errors for IPIP packets */
1350 ipip = false;
1351 if (cih->protocol == IPPROTO_IPIP) {
1352 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1353 return NF_ACCEPT;
1354 /* Error for our IPIP must arrive at LOCAL_IN */
1355 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1356 return NF_ACCEPT;
1357 offset += cih->ihl * 4;
1358 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1359 if (cih == NULL)
1360 return NF_ACCEPT; /* The packet looks wrong, ignore */
1361 ipip = true;
1362 }
1363
1348 pd = ip_vs_proto_data_get(net, cih->protocol); 1364 pd = ip_vs_proto_data_get(net, cih->protocol);
1349 if (!pd) 1365 if (!pd)
1350 return NF_ACCEPT; 1366 return NF_ACCEPT;
@@ -1358,11 +1374,14 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1358 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, 1374 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1359 "Checking incoming ICMP for"); 1375 "Checking incoming ICMP for");
1360 1376
1377 offset2 = offset;
1361 offset += cih->ihl * 4; 1378 offset += cih->ihl * 4;
1362 1379
1363 ip_vs_fill_iphdr(AF_INET, cih, &ciph); 1380 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
1364 /* The embedded headers contain source and dest in reverse order */ 1381 /* The embedded headers contain source and dest in reverse order.
1365 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1); 1382 * For IPIP this is error for request, not for reply.
1383 */
1384 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
1366 if (!cp) 1385 if (!cp)
1367 return NF_ACCEPT; 1386 return NF_ACCEPT;
1368 1387
@@ -1376,6 +1395,57 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1376 goto out; 1395 goto out;
1377 } 1396 }
1378 1397
1398 if (ipip) {
1399 __be32 info = ic->un.gateway;
1400
1401 /* Update the MTU */
1402 if (ic->type == ICMP_DEST_UNREACH &&
1403 ic->code == ICMP_FRAG_NEEDED) {
1404 struct ip_vs_dest *dest = cp->dest;
1405 u32 mtu = ntohs(ic->un.frag.mtu);
1406
1407 /* Strip outer IP and ICMP, go to IPIP header */
1408 __skb_pull(skb, ihl + sizeof(_icmph));
1409 offset2 -= ihl + sizeof(_icmph);
1410 skb_reset_network_header(skb);
1411 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1412 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1413 rcu_read_lock();
1414 ipv4_update_pmtu(skb, dev_net(skb->dev),
1415 mtu, 0, 0, 0, 0);
1416 rcu_read_unlock();
1417 /* Client uses PMTUD? */
1418 if (!(cih->frag_off & htons(IP_DF)))
1419 goto ignore_ipip;
1420 /* Prefer the resulting PMTU */
1421 if (dest) {
1422 spin_lock(&dest->dst_lock);
1423 if (dest->dst_cache)
1424 mtu = dst_mtu(dest->dst_cache);
1425 spin_unlock(&dest->dst_lock);
1426 }
1427 if (mtu > 68 + sizeof(struct iphdr))
1428 mtu -= sizeof(struct iphdr);
1429 info = htonl(mtu);
1430 }
1431 /* Strip outer IP, ICMP and IPIP, go to IP header of
1432 * original request.
1433 */
1434 __skb_pull(skb, offset2);
1435 skb_reset_network_header(skb);
1436 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1437 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1438 ic->type, ic->code, ntohl(info));
1439 icmp_send(skb, ic->type, ic->code, info);
1440 /* ICMP can be shorter but anyways, account it */
1441 ip_vs_out_stats(cp, skb);
1442
1443ignore_ipip:
1444 consume_skb(skb);
1445 verdict = NF_STOLEN;
1446 goto out;
1447 }
1448
1379 /* do the statistics and put it back */ 1449 /* do the statistics and put it back */
1380 ip_vs_in_stats(cp, skb); 1450 ip_vs_in_stats(cp, skb);
1381 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1451 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index f51013c07b9f..7e7198b51c06 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -539,8 +539,7 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
539 * Remove it from the rs_table table. 539 * Remove it from the rs_table table.
540 */ 540 */
541 if (!list_empty(&dest->d_list)) { 541 if (!list_empty(&dest->d_list)) {
542 list_del(&dest->d_list); 542 list_del_init(&dest->d_list);
543 INIT_LIST_HEAD(&dest->d_list);
544 } 543 }
545 544
546 return 1; 545 return 1;
@@ -1803,6 +1802,12 @@ static struct ctl_table vs_vars[] = {
1803 .mode = 0644, 1802 .mode = 0644,
1804 .proc_handler = proc_dointvec, 1803 .proc_handler = proc_dointvec,
1805 }, 1804 },
1805 {
1806 .procname = "pmtu_disc",
1807 .maxlen = sizeof(int),
1808 .mode = 0644,
1809 .proc_handler = proc_dointvec,
1810 },
1806#ifdef CONFIG_IP_VS_DEBUG 1811#ifdef CONFIG_IP_VS_DEBUG
1807 { 1812 {
1808 .procname = "debug_level", 1813 .procname = "debug_level",
@@ -2933,7 +2938,7 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb,
2933{ 2938{
2934 void *hdr; 2939 void *hdr;
2935 2940
2936 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 2941 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2937 &ip_vs_genl_family, NLM_F_MULTI, 2942 &ip_vs_genl_family, NLM_F_MULTI,
2938 IPVS_CMD_NEW_SERVICE); 2943 IPVS_CMD_NEW_SERVICE);
2939 if (!hdr) 2944 if (!hdr)
@@ -3122,7 +3127,7 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
3122{ 3127{
3123 void *hdr; 3128 void *hdr;
3124 3129
3125 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 3130 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3126 &ip_vs_genl_family, NLM_F_MULTI, 3131 &ip_vs_genl_family, NLM_F_MULTI,
3127 IPVS_CMD_NEW_DEST); 3132 IPVS_CMD_NEW_DEST);
3128 if (!hdr) 3133 if (!hdr)
@@ -3251,7 +3256,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
3251 struct netlink_callback *cb) 3256 struct netlink_callback *cb)
3252{ 3257{
3253 void *hdr; 3258 void *hdr;
3254 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 3259 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3255 &ip_vs_genl_family, NLM_F_MULTI, 3260 &ip_vs_genl_family, NLM_F_MULTI,
3256 IPVS_CMD_NEW_DAEMON); 3261 IPVS_CMD_NEW_DAEMON);
3257 if (!hdr) 3262 if (!hdr)
@@ -3678,7 +3683,7 @@ static void ip_vs_genl_unregister(void)
3678 * per netns intit/exit func. 3683 * per netns intit/exit func.
3679 */ 3684 */
3680#ifdef CONFIG_SYSCTL 3685#ifdef CONFIG_SYSCTL
3681int __net_init ip_vs_control_net_init_sysctl(struct net *net) 3686static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3682{ 3687{
3683 int idx; 3688 int idx;
3684 struct netns_ipvs *ipvs = net_ipvs(net); 3689 struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3729,6 +3734,8 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3729 ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3); 3734 ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
3730 tbl[idx++].data = &ipvs->sysctl_sync_retries; 3735 tbl[idx++].data = &ipvs->sysctl_sync_retries;
3731 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3736 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3737 ipvs->sysctl_pmtu_disc = 1;
3738 tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
3732 3739
3733 3740
3734 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); 3741 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
@@ -3746,7 +3753,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3746 return 0; 3753 return 0;
3747} 3754}
3748 3755
3749void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) 3756static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3750{ 3757{
3751 struct netns_ipvs *ipvs = net_ipvs(net); 3758 struct netns_ipvs *ipvs = net_ipvs(net);
3752 3759
@@ -3757,8 +3764,8 @@ void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3757 3764
3758#else 3765#else
3759 3766
3760int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } 3767static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3761void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { } 3768static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3762 3769
3763#endif 3770#endif
3764 3771
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index b20b29c903ef..4f53a5f04437 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -268,6 +268,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
268 * packet. 268 * packet.
269 */ 269 */
270 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 270 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
271 iph->ihl * 4,
271 start-data, end-start, 272 start-data, end-start,
272 buf, buf_len); 273 buf, buf_len);
273 if (ret) { 274 if (ret) {
@@ -441,16 +442,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
441 442
442 if (!ipvs) 443 if (!ipvs)
443 return -ENOENT; 444 return -ENOENT;
444 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
445 if (!app)
446 return -ENOMEM;
447 INIT_LIST_HEAD(&app->a_list);
448 INIT_LIST_HEAD(&app->incs_list);
449 ipvs->ftp_app = app;
450 445
451 ret = register_ip_vs_app(net, app); 446 app = register_ip_vs_app(net, &ip_vs_ftp);
452 if (ret) 447 if (IS_ERR(app))
453 goto err_exit; 448 return PTR_ERR(app);
454 449
455 for (i = 0; i < ports_count; i++) { 450 for (i = 0; i < ports_count; i++) {
456 if (!ports[i]) 451 if (!ports[i])
@@ -464,9 +459,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
464 return 0; 459 return 0;
465 460
466err_unreg: 461err_unreg:
467 unregister_ip_vs_app(net, app); 462 unregister_ip_vs_app(net, &ip_vs_ftp);
468err_exit:
469 kfree(ipvs->ftp_app);
470 return ret; 463 return ret;
471} 464}
472/* 465/*
@@ -474,10 +467,7 @@ err_exit:
474 */ 467 */
475static void __ip_vs_ftp_exit(struct net *net) 468static void __ip_vs_ftp_exit(struct net *net)
476{ 469{
477 struct netns_ipvs *ipvs = net_ipvs(net); 470 unregister_ip_vs_app(net, &ip_vs_ftp);
478
479 unregister_ip_vs_app(net, ipvs->ftp_app);
480 kfree(ipvs->ftp_app);
481} 471}
482 472
483static struct pernet_operations ip_vs_ftp_ops = { 473static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 65b616ae1716..56f6d5d81a77 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -49,6 +49,7 @@ enum {
49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to 49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
50 * local 50 * local
51 */ 51 */
52 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
52}; 53};
53 54
54/* 55/*
@@ -84,6 +85,58 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
84 return dst; 85 return dst;
85} 86}
86 87
88static inline bool
89__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
90{
91 if (IP6CB(skb)->frag_max_size) {
92 /* frag_max_size tell us that, this packet have been
93 * defragmented by netfilter IPv6 conntrack module.
94 */
95 if (IP6CB(skb)->frag_max_size > mtu)
96 return true; /* largest fragment violate MTU */
97 }
98 else if (skb->len > mtu && !skb_is_gso(skb)) {
99 return true; /* Packet size violate MTU size */
100 }
101 return false;
102}
103
104/* Get route to daddr, update *saddr, optionally bind route to saddr */
105static struct rtable *do_output_route4(struct net *net, __be32 daddr,
106 u32 rtos, int rt_mode, __be32 *saddr)
107{
108 struct flowi4 fl4;
109 struct rtable *rt;
110 int loop = 0;
111
112 memset(&fl4, 0, sizeof(fl4));
113 fl4.daddr = daddr;
114 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
115 fl4.flowi4_tos = rtos;
116
117retry:
118 rt = ip_route_output_key(net, &fl4);
119 if (IS_ERR(rt)) {
120 /* Invalid saddr ? */
121 if (PTR_ERR(rt) == -EINVAL && *saddr &&
122 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
123 *saddr = 0;
124 flowi4_update_output(&fl4, 0, rtos, daddr, 0);
125 goto retry;
126 }
127 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
128 return NULL;
129 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
130 ip_rt_put(rt);
131 *saddr = fl4.saddr;
132 flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
133 loop++;
134 goto retry;
135 }
136 *saddr = fl4.saddr;
137 return rt;
138}
139
87/* Get route to destination or remote server */ 140/* Get route to destination or remote server */
88static struct rtable * 141static struct rtable *
89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 142__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -98,20 +151,13 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
98 spin_lock(&dest->dst_lock); 151 spin_lock(&dest->dst_lock);
99 if (!(rt = (struct rtable *) 152 if (!(rt = (struct rtable *)
100 __ip_vs_dst_check(dest, rtos))) { 153 __ip_vs_dst_check(dest, rtos))) {
101 struct flowi4 fl4; 154 rt = do_output_route4(net, dest->addr.ip, rtos,
102 155 rt_mode, &dest->dst_saddr.ip);
103 memset(&fl4, 0, sizeof(fl4)); 156 if (!rt) {
104 fl4.daddr = dest->addr.ip;
105 fl4.flowi4_tos = rtos;
106 rt = ip_route_output_key(net, &fl4);
107 if (IS_ERR(rt)) {
108 spin_unlock(&dest->dst_lock); 157 spin_unlock(&dest->dst_lock);
109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
110 &dest->addr.ip);
111 return NULL; 158 return NULL;
112 } 159 }
113 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0); 160 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
114 dest->dst_saddr.ip = fl4.saddr;
115 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, " 161 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
116 "rtos=%X\n", 162 "rtos=%X\n",
117 &dest->addr.ip, &dest->dst_saddr.ip, 163 &dest->addr.ip, &dest->dst_saddr.ip,
@@ -122,19 +168,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
122 *ret_saddr = dest->dst_saddr.ip; 168 *ret_saddr = dest->dst_saddr.ip;
123 spin_unlock(&dest->dst_lock); 169 spin_unlock(&dest->dst_lock);
124 } else { 170 } else {
125 struct flowi4 fl4; 171 __be32 saddr = htonl(INADDR_ANY);
126 172
127 memset(&fl4, 0, sizeof(fl4)); 173 /* For such unconfigured boxes avoid many route lookups
128 fl4.daddr = daddr; 174 * for performance reasons because we do not remember saddr
129 fl4.flowi4_tos = rtos; 175 */
130 rt = ip_route_output_key(net, &fl4); 176 rt_mode &= ~IP_VS_RT_MODE_CONNECT;
131 if (IS_ERR(rt)) { 177 rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
132 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 178 if (!rt)
133 &daddr);
134 return NULL; 179 return NULL;
135 }
136 if (ret_saddr) 180 if (ret_saddr)
137 *ret_saddr = fl4.saddr; 181 *ret_saddr = saddr;
138 } 182 }
139 183
140 local = rt->rt_flags & RTCF_LOCAL; 184 local = rt->rt_flags & RTCF_LOCAL;
@@ -331,6 +375,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
331 old_dst = dest->dst_cache; 375 old_dst = dest->dst_cache;
332 dest->dst_cache = NULL; 376 dest->dst_cache = NULL;
333 dst_release(old_dst); 377 dst_release(old_dst);
378 dest->dst_saddr.ip = 0;
334} 379}
335 380
336#define IP_VS_XMIT_TUNNEL(skb, cp) \ 381#define IP_VS_XMIT_TUNNEL(skb, cp) \
@@ -462,7 +507,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
462 507
463 /* MTU checking */ 508 /* MTU checking */
464 mtu = dst_mtu(&rt->dst); 509 mtu = dst_mtu(&rt->dst);
465 if (skb->len > mtu && !skb_is_gso(skb)) { 510 if (__mtu_check_toobig_v6(skb, mtu)) {
466 if (!skb->dev) { 511 if (!skb->dev) {
467 struct net *net = dev_net(skb_dst(skb)->dev); 512 struct net *net = dev_net(skb_dst(skb)->dev);
468 513
@@ -683,7 +728,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
683 728
684 /* MTU checking */ 729 /* MTU checking */
685 mtu = dst_mtu(&rt->dst); 730 mtu = dst_mtu(&rt->dst);
686 if (skb->len > mtu && !skb_is_gso(skb)) { 731 if (__mtu_check_toobig_v6(skb, mtu)) {
687 if (!skb->dev) { 732 if (!skb->dev) {
688 struct net *net = dev_net(skb_dst(skb)->dev); 733 struct net *net = dev_net(skb_dst(skb)->dev);
689 734
@@ -766,12 +811,13 @@ int
766ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 811ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
767 struct ip_vs_protocol *pp) 812 struct ip_vs_protocol *pp)
768{ 813{
814 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
769 struct rtable *rt; /* Route to the other host */ 815 struct rtable *rt; /* Route to the other host */
770 __be32 saddr; /* Source for tunnel */ 816 __be32 saddr; /* Source for tunnel */
771 struct net_device *tdev; /* Device to other host */ 817 struct net_device *tdev; /* Device to other host */
772 struct iphdr *old_iph = ip_hdr(skb); 818 struct iphdr *old_iph = ip_hdr(skb);
773 u8 tos = old_iph->tos; 819 u8 tos = old_iph->tos;
774 __be16 df = old_iph->frag_off; 820 __be16 df;
775 struct iphdr *iph; /* Our new IP header */ 821 struct iphdr *iph; /* Our new IP header */
776 unsigned int max_headroom; /* The extra header space needed */ 822 unsigned int max_headroom; /* The extra header space needed */
777 int mtu; 823 int mtu;
@@ -781,7 +827,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
781 827
782 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 828 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
783 RT_TOS(tos), IP_VS_RT_MODE_LOCAL | 829 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
784 IP_VS_RT_MODE_NON_LOCAL, 830 IP_VS_RT_MODE_NON_LOCAL |
831 IP_VS_RT_MODE_CONNECT,
785 &saddr))) 832 &saddr)))
786 goto tx_error_icmp; 833 goto tx_error_icmp;
787 if (rt->rt_flags & RTCF_LOCAL) { 834 if (rt->rt_flags & RTCF_LOCAL) {
@@ -796,13 +843,13 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
796 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); 843 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
797 goto tx_error_put; 844 goto tx_error_put;
798 } 845 }
799 if (skb_dst(skb)) 846 if (rt_is_output_route(skb_rtable(skb)))
800 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 847 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
801 848
802 df |= (old_iph->frag_off & htons(IP_DF)); 849 /* Copy DF, reset fragment offset and MF */
850 df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
803 851
804 if ((old_iph->frag_off & htons(IP_DF) && 852 if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
805 mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
806 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); 853 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
807 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 854 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
808 goto tx_error_put; 855 goto tx_error_put;
@@ -915,8 +962,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
915 if (skb_dst(skb)) 962 if (skb_dst(skb))
916 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 963 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
917 964
918 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) && 965 /* MTU checking: Notice that 'mtu' have been adjusted before hand */
919 !skb_is_gso(skb)) { 966 if (__mtu_check_toobig_v6(skb, mtu)) {
920 if (!skb->dev) { 967 if (!skb->dev) {
921 struct net *net = dev_net(skb_dst(skb)->dev); 968 struct net *net = dev_net(skb_dst(skb)->dev);
922 969
@@ -1082,7 +1129,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1082 1129
1083 /* MTU checking */ 1130 /* MTU checking */
1084 mtu = dst_mtu(&rt->dst); 1131 mtu = dst_mtu(&rt->dst);
1085 if (skb->len > mtu) { 1132 if (__mtu_check_toobig_v6(skb, mtu)) {
1086 if (!skb->dev) { 1133 if (!skb->dev) {
1087 struct net *net = dev_net(skb_dst(skb)->dev); 1134 struct net *net = dev_net(skb_dst(skb)->dev);
1088 1135
@@ -1318,7 +1365,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1318 1365
1319 /* MTU checking */ 1366 /* MTU checking */
1320 mtu = dst_mtu(&rt->dst); 1367 mtu = dst_mtu(&rt->dst);
1321 if (skb->len > mtu && !skb_is_gso(skb)) { 1368 if (__mtu_check_toobig_v6(skb, mtu)) {
1322 if (!skb->dev) { 1369 if (!skb->dev) {
1323 struct net *net = dev_net(skb_dst(skb)->dev); 1370 struct net *net = dev_net(skb_dst(skb)->dev);
1324 1371
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index f2de8c55ac50..c514fe6033d2 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -40,6 +40,7 @@ MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
40 40
41unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, 41unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
42 enum ip_conntrack_info ctinfo, 42 enum ip_conntrack_info ctinfo,
43 unsigned int protoff,
43 unsigned int matchoff, 44 unsigned int matchoff,
44 unsigned int matchlen, 45 unsigned int matchlen,
45 struct nf_conntrack_expect *exp) 46 struct nf_conntrack_expect *exp)
@@ -155,8 +156,8 @@ static int amanda_help(struct sk_buff *skb,
155 156
156 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); 157 nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
157 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) 158 if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
158 ret = nf_nat_amanda(skb, ctinfo, off - dataoff, 159 ret = nf_nat_amanda(skb, ctinfo, protoff,
159 len, exp); 160 off - dataoff, len, exp);
160 else if (nf_ct_expect_related(exp) != 0) 161 else if (nf_ct_expect_related(exp) != 0)
161 ret = NF_DROP; 162 ret = NF_DROP;
162 nf_ct_expect_put(exp); 163 nf_ct_expect_put(exp);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2ceec64b19f9..0f241be28f9e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -55,6 +55,12 @@ int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
55 const struct nlattr *attr) __read_mostly; 55 const struct nlattr *attr) __read_mostly;
56EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); 56EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
57 57
58int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
59 struct nf_conn *ct,
60 enum ip_conntrack_info ctinfo,
61 unsigned int protoff);
62EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
63
58DEFINE_SPINLOCK(nf_conntrack_lock); 64DEFINE_SPINLOCK(nf_conntrack_lock);
59EXPORT_SYMBOL_GPL(nf_conntrack_lock); 65EXPORT_SYMBOL_GPL(nf_conntrack_lock);
60 66
@@ -930,7 +936,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
930 enum ip_conntrack_info ctinfo; 936 enum ip_conntrack_info ctinfo;
931 struct nf_conntrack_l3proto *l3proto; 937 struct nf_conntrack_l3proto *l3proto;
932 struct nf_conntrack_l4proto *l4proto; 938 struct nf_conntrack_l4proto *l4proto;
933 struct nf_conn_timeout *timeout_ext;
934 unsigned int *timeouts; 939 unsigned int *timeouts;
935 unsigned int dataoff; 940 unsigned int dataoff;
936 u_int8_t protonum; 941 u_int8_t protonum;
@@ -997,11 +1002,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
997 NF_CT_ASSERT(skb->nfct); 1002 NF_CT_ASSERT(skb->nfct);
998 1003
999 /* Decide what timeout policy we want to apply to this flow. */ 1004 /* Decide what timeout policy we want to apply to this flow. */
1000 timeout_ext = nf_ct_timeout_find(ct); 1005 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1001 if (timeout_ext)
1002 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
1003 else
1004 timeouts = l4proto->get_timeouts(net);
1005 1006
1006 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); 1007 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1007 if (ret <= 0) { 1008 if (ret <= 0) {
@@ -1223,6 +1224,8 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1223 spin_lock_bh(&nf_conntrack_lock); 1224 spin_lock_bh(&nf_conntrack_lock);
1224 for (; *bucket < net->ct.htable_size; (*bucket)++) { 1225 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1225 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1226 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1227 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1228 continue;
1226 ct = nf_ct_tuplehash_to_ctrack(h); 1229 ct = nf_ct_tuplehash_to_ctrack(h);
1227 if (iter(ct, data)) 1230 if (iter(ct, data))
1228 goto found; 1231 goto found;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index e7be79e640de..de9781b6464f 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -61,7 +61,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
61 goto out_unlock; 61 goto out_unlock;
62 62
63 item.ct = ct; 63 item.ct = ct;
64 item.pid = 0; 64 item.portid = 0;
65 item.report = 0; 65 item.report = 0;
66 66
67 ret = notify->fcn(events | missed, &item); 67 ret = notify->fcn(events | missed, &item);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 4bb771d1f57a..1ce3befb7c8a 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -48,6 +48,7 @@ module_param(loose, bool, 0600);
48unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, 48unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
49 enum ip_conntrack_info ctinfo, 49 enum ip_conntrack_info ctinfo,
50 enum nf_ct_ftp_type type, 50 enum nf_ct_ftp_type type,
51 unsigned int protoff,
51 unsigned int matchoff, 52 unsigned int matchoff,
52 unsigned int matchlen, 53 unsigned int matchlen,
53 struct nf_conntrack_expect *exp); 54 struct nf_conntrack_expect *exp);
@@ -395,6 +396,12 @@ static int help(struct sk_buff *skb,
395 396
396 /* Look up to see if we're just after a \n. */ 397 /* Look up to see if we're just after a \n. */
397 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { 398 if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
399 /* We're picking up this, clear flags and let it continue */
400 if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) {
401 ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP;
402 goto skip_nl_seq;
403 }
404
398 /* Now if this ends in \n, update ftp info. */ 405 /* Now if this ends in \n, update ftp info. */
399 pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n", 406 pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n",
400 ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)", 407 ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
@@ -405,6 +412,7 @@ static int help(struct sk_buff *skb,
405 goto out_update_nl; 412 goto out_update_nl;
406 } 413 }
407 414
415skip_nl_seq:
408 /* Initialize IP/IPv6 addr to expected address (it's not mentioned 416 /* Initialize IP/IPv6 addr to expected address (it's not mentioned
409 in EPSV responses) */ 417 in EPSV responses) */
410 cmd.l3num = nf_ct_l3num(ct); 418 cmd.l3num = nf_ct_l3num(ct);
@@ -489,7 +497,7 @@ static int help(struct sk_buff *skb,
489 nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); 497 nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook);
490 if (nf_nat_ftp && ct->status & IPS_NAT_MASK) 498 if (nf_nat_ftp && ct->status & IPS_NAT_MASK)
491 ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype, 499 ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype,
492 matchoff, matchlen, exp); 500 protoff, matchoff, matchlen, exp);
493 else { 501 else {
494 /* Can't expect this? Best to drop packet now. */ 502 /* Can't expect this? Best to drop packet now. */
495 if (nf_ct_expect_related(exp) != 0) 503 if (nf_ct_expect_related(exp) != 0)
@@ -511,6 +519,19 @@ out_update_nl:
511 return ret; 519 return ret;
512} 520}
513 521
522static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
523{
524 struct nf_ct_ftp_master *ftp = nfct_help_data(ct);
525
526 /* This conntrack has been injected from user-space, always pick up
527 * sequence tracking. Otherwise, the first FTP command after the
528 * failover breaks.
529 */
530 ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP;
531 ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP;
532 return 0;
533}
534
514static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly; 535static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
515 536
516static const struct nf_conntrack_expect_policy ftp_exp_policy = { 537static const struct nf_conntrack_expect_policy ftp_exp_policy = {
@@ -560,6 +581,7 @@ static int __init nf_conntrack_ftp_init(void)
560 ftp[i][j].expect_policy = &ftp_exp_policy; 581 ftp[i][j].expect_policy = &ftp_exp_policy;
561 ftp[i][j].me = THIS_MODULE; 582 ftp[i][j].me = THIS_MODULE;
562 ftp[i][j].help = help; 583 ftp[i][j].help = help;
584 ftp[i][j].from_nlattr = nf_ct_ftp_from_nlattr;
563 if (ports[i] == FTP_PORT) 585 if (ports[i] == FTP_PORT)
564 sprintf(ftp[i][j].name, "ftp"); 586 sprintf(ftp[i][j].name, "ftp");
565 else 587 else
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 4283b207e63b..1b30b0dee708 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -49,12 +49,12 @@ MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
49 "(determined by routing information)"); 49 "(determined by routing information)");
50 50
51/* Hooks for NAT */ 51/* Hooks for NAT */
52int (*set_h245_addr_hook) (struct sk_buff *skb, 52int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
53 unsigned char **data, int dataoff, 53 unsigned char **data, int dataoff,
54 H245_TransportAddress *taddr, 54 H245_TransportAddress *taddr,
55 union nf_inet_addr *addr, __be16 port) 55 union nf_inet_addr *addr, __be16 port)
56 __read_mostly; 56 __read_mostly;
57int (*set_h225_addr_hook) (struct sk_buff *skb, 57int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
58 unsigned char **data, int dataoff, 58 unsigned char **data, int dataoff,
59 TransportAddress *taddr, 59 TransportAddress *taddr,
60 union nf_inet_addr *addr, __be16 port) 60 union nf_inet_addr *addr, __be16 port)
@@ -62,16 +62,17 @@ int (*set_h225_addr_hook) (struct sk_buff *skb,
62int (*set_sig_addr_hook) (struct sk_buff *skb, 62int (*set_sig_addr_hook) (struct sk_buff *skb,
63 struct nf_conn *ct, 63 struct nf_conn *ct,
64 enum ip_conntrack_info ctinfo, 64 enum ip_conntrack_info ctinfo,
65 unsigned char **data, 65 unsigned int protoff, unsigned char **data,
66 TransportAddress *taddr, int count) __read_mostly; 66 TransportAddress *taddr, int count) __read_mostly;
67int (*set_ras_addr_hook) (struct sk_buff *skb, 67int (*set_ras_addr_hook) (struct sk_buff *skb,
68 struct nf_conn *ct, 68 struct nf_conn *ct,
69 enum ip_conntrack_info ctinfo, 69 enum ip_conntrack_info ctinfo,
70 unsigned char **data, 70 unsigned int protoff, unsigned char **data,
71 TransportAddress *taddr, int count) __read_mostly; 71 TransportAddress *taddr, int count) __read_mostly;
72int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, 72int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
73 struct nf_conn *ct, 73 struct nf_conn *ct,
74 enum ip_conntrack_info ctinfo, 74 enum ip_conntrack_info ctinfo,
75 unsigned int protoff,
75 unsigned char **data, int dataoff, 76 unsigned char **data, int dataoff,
76 H245_TransportAddress *taddr, 77 H245_TransportAddress *taddr,
77 __be16 port, __be16 rtp_port, 78 __be16 port, __be16 rtp_port,
@@ -80,24 +81,28 @@ int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
80int (*nat_t120_hook) (struct sk_buff *skb, 81int (*nat_t120_hook) (struct sk_buff *skb,
81 struct nf_conn *ct, 82 struct nf_conn *ct,
82 enum ip_conntrack_info ctinfo, 83 enum ip_conntrack_info ctinfo,
84 unsigned int protoff,
83 unsigned char **data, int dataoff, 85 unsigned char **data, int dataoff,
84 H245_TransportAddress *taddr, __be16 port, 86 H245_TransportAddress *taddr, __be16 port,
85 struct nf_conntrack_expect *exp) __read_mostly; 87 struct nf_conntrack_expect *exp) __read_mostly;
86int (*nat_h245_hook) (struct sk_buff *skb, 88int (*nat_h245_hook) (struct sk_buff *skb,
87 struct nf_conn *ct, 89 struct nf_conn *ct,
88 enum ip_conntrack_info ctinfo, 90 enum ip_conntrack_info ctinfo,
91 unsigned int protoff,
89 unsigned char **data, int dataoff, 92 unsigned char **data, int dataoff,
90 TransportAddress *taddr, __be16 port, 93 TransportAddress *taddr, __be16 port,
91 struct nf_conntrack_expect *exp) __read_mostly; 94 struct nf_conntrack_expect *exp) __read_mostly;
92int (*nat_callforwarding_hook) (struct sk_buff *skb, 95int (*nat_callforwarding_hook) (struct sk_buff *skb,
93 struct nf_conn *ct, 96 struct nf_conn *ct,
94 enum ip_conntrack_info ctinfo, 97 enum ip_conntrack_info ctinfo,
98 unsigned int protoff,
95 unsigned char **data, int dataoff, 99 unsigned char **data, int dataoff,
96 TransportAddress *taddr, __be16 port, 100 TransportAddress *taddr, __be16 port,
97 struct nf_conntrack_expect *exp) __read_mostly; 101 struct nf_conntrack_expect *exp) __read_mostly;
98int (*nat_q931_hook) (struct sk_buff *skb, 102int (*nat_q931_hook) (struct sk_buff *skb,
99 struct nf_conn *ct, 103 struct nf_conn *ct,
100 enum ip_conntrack_info ctinfo, 104 enum ip_conntrack_info ctinfo,
105 unsigned int protoff,
101 unsigned char **data, TransportAddress *taddr, int idx, 106 unsigned char **data, TransportAddress *taddr, int idx,
102 __be16 port, struct nf_conntrack_expect *exp) 107 __be16 port, struct nf_conntrack_expect *exp)
103 __read_mostly; 108 __read_mostly;
@@ -251,6 +256,7 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
251/****************************************************************************/ 256/****************************************************************************/
252static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, 257static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
253 enum ip_conntrack_info ctinfo, 258 enum ip_conntrack_info ctinfo,
259 unsigned int protoff,
254 unsigned char **data, int dataoff, 260 unsigned char **data, int dataoff,
255 H245_TransportAddress *taddr) 261 H245_TransportAddress *taddr)
256{ 262{
@@ -295,9 +301,10 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
295 &ct->tuplehash[!dir].tuple.dst.u3, 301 &ct->tuplehash[!dir].tuple.dst.u3,
296 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 302 sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
297 (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && 303 (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
304 nf_ct_l3num(ct) == NFPROTO_IPV4 &&
298 ct->status & IPS_NAT_MASK) { 305 ct->status & IPS_NAT_MASK) {
299 /* NAT needed */ 306 /* NAT needed */
300 ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff, 307 ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
301 taddr, port, rtp_port, rtp_exp, rtcp_exp); 308 taddr, port, rtp_port, rtp_exp, rtcp_exp);
302 } else { /* Conntrack only */ 309 } else { /* Conntrack only */
303 if (nf_ct_expect_related(rtp_exp) == 0) { 310 if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -324,6 +331,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
324static int expect_t120(struct sk_buff *skb, 331static int expect_t120(struct sk_buff *skb,
325 struct nf_conn *ct, 332 struct nf_conn *ct,
326 enum ip_conntrack_info ctinfo, 333 enum ip_conntrack_info ctinfo,
334 unsigned int protoff,
327 unsigned char **data, int dataoff, 335 unsigned char **data, int dataoff,
328 H245_TransportAddress *taddr) 336 H245_TransportAddress *taddr)
329{ 337{
@@ -353,9 +361,10 @@ static int expect_t120(struct sk_buff *skb,
353 &ct->tuplehash[!dir].tuple.dst.u3, 361 &ct->tuplehash[!dir].tuple.dst.u3,
354 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 362 sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
355 (nat_t120 = rcu_dereference(nat_t120_hook)) && 363 (nat_t120 = rcu_dereference(nat_t120_hook)) &&
364 nf_ct_l3num(ct) == NFPROTO_IPV4 &&
356 ct->status & IPS_NAT_MASK) { 365 ct->status & IPS_NAT_MASK) {
357 /* NAT needed */ 366 /* NAT needed */
358 ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr, 367 ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
359 port, exp); 368 port, exp);
360 } else { /* Conntrack only */ 369 } else { /* Conntrack only */
361 if (nf_ct_expect_related(exp) == 0) { 370 if (nf_ct_expect_related(exp) == 0) {
@@ -374,6 +383,7 @@ static int expect_t120(struct sk_buff *skb,
374static int process_h245_channel(struct sk_buff *skb, 383static int process_h245_channel(struct sk_buff *skb,
375 struct nf_conn *ct, 384 struct nf_conn *ct,
376 enum ip_conntrack_info ctinfo, 385 enum ip_conntrack_info ctinfo,
386 unsigned int protoff,
377 unsigned char **data, int dataoff, 387 unsigned char **data, int dataoff,
378 H2250LogicalChannelParameters *channel) 388 H2250LogicalChannelParameters *channel)
379{ 389{
@@ -381,7 +391,7 @@ static int process_h245_channel(struct sk_buff *skb,
381 391
382 if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { 392 if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
383 /* RTP */ 393 /* RTP */
384 ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, 394 ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
385 &channel->mediaChannel); 395 &channel->mediaChannel);
386 if (ret < 0) 396 if (ret < 0)
387 return -1; 397 return -1;
@@ -390,7 +400,7 @@ static int process_h245_channel(struct sk_buff *skb,
390 if (channel-> 400 if (channel->
391 options & eH2250LogicalChannelParameters_mediaControlChannel) { 401 options & eH2250LogicalChannelParameters_mediaControlChannel) {
392 /* RTCP */ 402 /* RTCP */
393 ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, 403 ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
394 &channel->mediaControlChannel); 404 &channel->mediaControlChannel);
395 if (ret < 0) 405 if (ret < 0)
396 return -1; 406 return -1;
@@ -402,6 +412,7 @@ static int process_h245_channel(struct sk_buff *skb,
402/****************************************************************************/ 412/****************************************************************************/
403static int process_olc(struct sk_buff *skb, struct nf_conn *ct, 413static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
404 enum ip_conntrack_info ctinfo, 414 enum ip_conntrack_info ctinfo,
415 unsigned int protoff,
405 unsigned char **data, int dataoff, 416 unsigned char **data, int dataoff,
406 OpenLogicalChannel *olc) 417 OpenLogicalChannel *olc)
407{ 418{
@@ -412,7 +423,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
412 if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == 423 if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
413 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) 424 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
414 { 425 {
415 ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, 426 ret = process_h245_channel(skb, ct, ctinfo,
427 protoff, data, dataoff,
416 &olc-> 428 &olc->
417 forwardLogicalChannelParameters. 429 forwardLogicalChannelParameters.
418 multiplexParameters. 430 multiplexParameters.
@@ -430,7 +442,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
430 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) 442 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
431 { 443 {
432 ret = 444 ret =
433 process_h245_channel(skb, ct, ctinfo, data, dataoff, 445 process_h245_channel(skb, ct, ctinfo,
446 protoff, data, dataoff,
434 &olc-> 447 &olc->
435 reverseLogicalChannelParameters. 448 reverseLogicalChannelParameters.
436 multiplexParameters. 449 multiplexParameters.
@@ -448,7 +461,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
448 t120.choice == eDataProtocolCapability_separateLANStack && 461 t120.choice == eDataProtocolCapability_separateLANStack &&
449 olc->separateStack.networkAddress.choice == 462 olc->separateStack.networkAddress.choice ==
450 eNetworkAccessParameters_networkAddress_localAreaAddress) { 463 eNetworkAccessParameters_networkAddress_localAreaAddress) {
451 ret = expect_t120(skb, ct, ctinfo, data, dataoff, 464 ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
452 &olc->separateStack.networkAddress. 465 &olc->separateStack.networkAddress.
453 localAreaAddress); 466 localAreaAddress);
454 if (ret < 0) 467 if (ret < 0)
@@ -461,7 +474,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
461/****************************************************************************/ 474/****************************************************************************/
462static int process_olca(struct sk_buff *skb, struct nf_conn *ct, 475static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
463 enum ip_conntrack_info ctinfo, 476 enum ip_conntrack_info ctinfo,
464 unsigned char **data, int dataoff, 477 unsigned int protoff, unsigned char **data, int dataoff,
465 OpenLogicalChannelAck *olca) 478 OpenLogicalChannelAck *olca)
466{ 479{
467 H2250LogicalChannelAckParameters *ack; 480 H2250LogicalChannelAckParameters *ack;
@@ -477,7 +490,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
477 choice == 490 choice ==
478 eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) 491 eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
479 { 492 {
480 ret = process_h245_channel(skb, ct, ctinfo, data, dataoff, 493 ret = process_h245_channel(skb, ct, ctinfo,
494 protoff, data, dataoff,
481 &olca-> 495 &olca->
482 reverseLogicalChannelParameters. 496 reverseLogicalChannelParameters.
483 multiplexParameters. 497 multiplexParameters.
@@ -496,7 +510,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
496 if (ack->options & 510 if (ack->options &
497 eH2250LogicalChannelAckParameters_mediaChannel) { 511 eH2250LogicalChannelAckParameters_mediaChannel) {
498 /* RTP */ 512 /* RTP */
499 ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, 513 ret = expect_rtp_rtcp(skb, ct, ctinfo,
514 protoff, data, dataoff,
500 &ack->mediaChannel); 515 &ack->mediaChannel);
501 if (ret < 0) 516 if (ret < 0)
502 return -1; 517 return -1;
@@ -505,7 +520,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
505 if (ack->options & 520 if (ack->options &
506 eH2250LogicalChannelAckParameters_mediaControlChannel) { 521 eH2250LogicalChannelAckParameters_mediaControlChannel) {
507 /* RTCP */ 522 /* RTCP */
508 ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff, 523 ret = expect_rtp_rtcp(skb, ct, ctinfo,
524 protoff, data, dataoff,
509 &ack->mediaControlChannel); 525 &ack->mediaControlChannel);
510 if (ret < 0) 526 if (ret < 0)
511 return -1; 527 return -1;
@@ -515,7 +531,7 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
515 if ((olca->options & eOpenLogicalChannelAck_separateStack) && 531 if ((olca->options & eOpenLogicalChannelAck_separateStack) &&
516 olca->separateStack.networkAddress.choice == 532 olca->separateStack.networkAddress.choice ==
517 eNetworkAccessParameters_networkAddress_localAreaAddress) { 533 eNetworkAccessParameters_networkAddress_localAreaAddress) {
518 ret = expect_t120(skb, ct, ctinfo, data, dataoff, 534 ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
519 &olca->separateStack.networkAddress. 535 &olca->separateStack.networkAddress.
520 localAreaAddress); 536 localAreaAddress);
521 if (ret < 0) 537 if (ret < 0)
@@ -528,14 +544,15 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
528/****************************************************************************/ 544/****************************************************************************/
529static int process_h245(struct sk_buff *skb, struct nf_conn *ct, 545static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
530 enum ip_conntrack_info ctinfo, 546 enum ip_conntrack_info ctinfo,
531 unsigned char **data, int dataoff, 547 unsigned int protoff, unsigned char **data, int dataoff,
532 MultimediaSystemControlMessage *mscm) 548 MultimediaSystemControlMessage *mscm)
533{ 549{
534 switch (mscm->choice) { 550 switch (mscm->choice) {
535 case eMultimediaSystemControlMessage_request: 551 case eMultimediaSystemControlMessage_request:
536 if (mscm->request.choice == 552 if (mscm->request.choice ==
537 eRequestMessage_openLogicalChannel) { 553 eRequestMessage_openLogicalChannel) {
538 return process_olc(skb, ct, ctinfo, data, dataoff, 554 return process_olc(skb, ct, ctinfo,
555 protoff, data, dataoff,
539 &mscm->request.openLogicalChannel); 556 &mscm->request.openLogicalChannel);
540 } 557 }
541 pr_debug("nf_ct_h323: H.245 Request %d\n", 558 pr_debug("nf_ct_h323: H.245 Request %d\n",
@@ -544,7 +561,8 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
544 case eMultimediaSystemControlMessage_response: 561 case eMultimediaSystemControlMessage_response:
545 if (mscm->response.choice == 562 if (mscm->response.choice ==
546 eResponseMessage_openLogicalChannelAck) { 563 eResponseMessage_openLogicalChannelAck) {
547 return process_olca(skb, ct, ctinfo, data, dataoff, 564 return process_olca(skb, ct, ctinfo,
565 protoff, data, dataoff,
548 &mscm->response. 566 &mscm->response.
549 openLogicalChannelAck); 567 openLogicalChannelAck);
550 } 568 }
@@ -595,7 +613,8 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
595 } 613 }
596 614
597 /* Process H.245 signal */ 615 /* Process H.245 signal */
598 if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0) 616 if (process_h245(skb, ct, ctinfo, protoff,
617 &data, dataoff, &mscm) < 0)
599 goto drop; 618 goto drop;
600 } 619 }
601 620
@@ -659,7 +678,7 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
659/****************************************************************************/ 678/****************************************************************************/
660static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, 679static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
661 enum ip_conntrack_info ctinfo, 680 enum ip_conntrack_info ctinfo,
662 unsigned char **data, int dataoff, 681 unsigned int protoff, unsigned char **data, int dataoff,
663 TransportAddress *taddr) 682 TransportAddress *taddr)
664{ 683{
665 int dir = CTINFO2DIR(ctinfo); 684 int dir = CTINFO2DIR(ctinfo);
@@ -688,9 +707,10 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
688 &ct->tuplehash[!dir].tuple.dst.u3, 707 &ct->tuplehash[!dir].tuple.dst.u3,
689 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 708 sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
690 (nat_h245 = rcu_dereference(nat_h245_hook)) && 709 (nat_h245 = rcu_dereference(nat_h245_hook)) &&
710 nf_ct_l3num(ct) == NFPROTO_IPV4 &&
691 ct->status & IPS_NAT_MASK) { 711 ct->status & IPS_NAT_MASK) {
692 /* NAT needed */ 712 /* NAT needed */
693 ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr, 713 ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
694 port, exp); 714 port, exp);
695 } else { /* Conntrack only */ 715 } else { /* Conntrack only */
696 if (nf_ct_expect_related(exp) == 0) { 716 if (nf_ct_expect_related(exp) == 0) {
@@ -776,6 +796,7 @@ static int callforward_do_filter(const union nf_inet_addr *src,
776static int expect_callforwarding(struct sk_buff *skb, 796static int expect_callforwarding(struct sk_buff *skb,
777 struct nf_conn *ct, 797 struct nf_conn *ct,
778 enum ip_conntrack_info ctinfo, 798 enum ip_conntrack_info ctinfo,
799 unsigned int protoff,
779 unsigned char **data, int dataoff, 800 unsigned char **data, int dataoff,
780 TransportAddress *taddr) 801 TransportAddress *taddr)
781{ 802{
@@ -811,9 +832,11 @@ static int expect_callforwarding(struct sk_buff *skb,
811 &ct->tuplehash[!dir].tuple.dst.u3, 832 &ct->tuplehash[!dir].tuple.dst.u3,
812 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 833 sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
813 (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && 834 (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
835 nf_ct_l3num(ct) == NFPROTO_IPV4 &&
814 ct->status & IPS_NAT_MASK) { 836 ct->status & IPS_NAT_MASK) {
815 /* Need NAT */ 837 /* Need NAT */
816 ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff, 838 ret = nat_callforwarding(skb, ct, ctinfo,
839 protoff, data, dataoff,
817 taddr, port, exp); 840 taddr, port, exp);
818 } else { /* Conntrack only */ 841 } else { /* Conntrack only */
819 if (nf_ct_expect_related(exp) == 0) { 842 if (nf_ct_expect_related(exp) == 0) {
@@ -831,6 +854,7 @@ static int expect_callforwarding(struct sk_buff *skb,
831/****************************************************************************/ 854/****************************************************************************/
832static int process_setup(struct sk_buff *skb, struct nf_conn *ct, 855static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
833 enum ip_conntrack_info ctinfo, 856 enum ip_conntrack_info ctinfo,
857 unsigned int protoff,
834 unsigned char **data, int dataoff, 858 unsigned char **data, int dataoff,
835 Setup_UUIE *setup) 859 Setup_UUIE *setup)
836{ 860{
@@ -844,7 +868,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
844 pr_debug("nf_ct_q931: Setup\n"); 868 pr_debug("nf_ct_q931: Setup\n");
845 869
846 if (setup->options & eSetup_UUIE_h245Address) { 870 if (setup->options & eSetup_UUIE_h245Address) {
847 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 871 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
848 &setup->h245Address); 872 &setup->h245Address);
849 if (ret < 0) 873 if (ret < 0)
850 return -1; 874 return -1;
@@ -852,14 +876,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
852 876
853 set_h225_addr = rcu_dereference(set_h225_addr_hook); 877 set_h225_addr = rcu_dereference(set_h225_addr_hook);
854 if ((setup->options & eSetup_UUIE_destCallSignalAddress) && 878 if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
855 (set_h225_addr) && ct->status & IPS_NAT_MASK && 879 (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
880 ct->status & IPS_NAT_MASK &&
856 get_h225_addr(ct, *data, &setup->destCallSignalAddress, 881 get_h225_addr(ct, *data, &setup->destCallSignalAddress,
857 &addr, &port) && 882 &addr, &port) &&
858 memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { 883 memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
859 pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", 884 pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
860 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, 885 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
861 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); 886 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
862 ret = set_h225_addr(skb, data, dataoff, 887 ret = set_h225_addr(skb, protoff, data, dataoff,
863 &setup->destCallSignalAddress, 888 &setup->destCallSignalAddress,
864 &ct->tuplehash[!dir].tuple.src.u3, 889 &ct->tuplehash[!dir].tuple.src.u3,
865 ct->tuplehash[!dir].tuple.src.u.tcp.port); 890 ct->tuplehash[!dir].tuple.src.u.tcp.port);
@@ -868,14 +893,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
868 } 893 }
869 894
870 if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && 895 if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
871 (set_h225_addr) && ct->status & IPS_NAT_MASK && 896 (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
897 ct->status & IPS_NAT_MASK &&
872 get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, 898 get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
873 &addr, &port) && 899 &addr, &port) &&
874 memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { 900 memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
875 pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", 901 pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
876 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, 902 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
877 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); 903 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
878 ret = set_h225_addr(skb, data, dataoff, 904 ret = set_h225_addr(skb, protoff, data, dataoff,
879 &setup->sourceCallSignalAddress, 905 &setup->sourceCallSignalAddress,
880 &ct->tuplehash[!dir].tuple.dst.u3, 906 &ct->tuplehash[!dir].tuple.dst.u3,
881 ct->tuplehash[!dir].tuple.dst.u.tcp.port); 907 ct->tuplehash[!dir].tuple.dst.u.tcp.port);
@@ -885,7 +911,8 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
885 911
886 if (setup->options & eSetup_UUIE_fastStart) { 912 if (setup->options & eSetup_UUIE_fastStart) {
887 for (i = 0; i < setup->fastStart.count; i++) { 913 for (i = 0; i < setup->fastStart.count; i++) {
888 ret = process_olc(skb, ct, ctinfo, data, dataoff, 914 ret = process_olc(skb, ct, ctinfo,
915 protoff, data, dataoff,
889 &setup->fastStart.item[i]); 916 &setup->fastStart.item[i]);
890 if (ret < 0) 917 if (ret < 0)
891 return -1; 918 return -1;
@@ -899,6 +926,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
899static int process_callproceeding(struct sk_buff *skb, 926static int process_callproceeding(struct sk_buff *skb,
900 struct nf_conn *ct, 927 struct nf_conn *ct,
901 enum ip_conntrack_info ctinfo, 928 enum ip_conntrack_info ctinfo,
929 unsigned int protoff,
902 unsigned char **data, int dataoff, 930 unsigned char **data, int dataoff,
903 CallProceeding_UUIE *callproc) 931 CallProceeding_UUIE *callproc)
904{ 932{
@@ -908,7 +936,7 @@ static int process_callproceeding(struct sk_buff *skb,
908 pr_debug("nf_ct_q931: CallProceeding\n"); 936 pr_debug("nf_ct_q931: CallProceeding\n");
909 937
910 if (callproc->options & eCallProceeding_UUIE_h245Address) { 938 if (callproc->options & eCallProceeding_UUIE_h245Address) {
911 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 939 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
912 &callproc->h245Address); 940 &callproc->h245Address);
913 if (ret < 0) 941 if (ret < 0)
914 return -1; 942 return -1;
@@ -916,7 +944,8 @@ static int process_callproceeding(struct sk_buff *skb,
916 944
917 if (callproc->options & eCallProceeding_UUIE_fastStart) { 945 if (callproc->options & eCallProceeding_UUIE_fastStart) {
918 for (i = 0; i < callproc->fastStart.count; i++) { 946 for (i = 0; i < callproc->fastStart.count; i++) {
919 ret = process_olc(skb, ct, ctinfo, data, dataoff, 947 ret = process_olc(skb, ct, ctinfo,
948 protoff, data, dataoff,
920 &callproc->fastStart.item[i]); 949 &callproc->fastStart.item[i]);
921 if (ret < 0) 950 if (ret < 0)
922 return -1; 951 return -1;
@@ -929,6 +958,7 @@ static int process_callproceeding(struct sk_buff *skb,
929/****************************************************************************/ 958/****************************************************************************/
930static int process_connect(struct sk_buff *skb, struct nf_conn *ct, 959static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
931 enum ip_conntrack_info ctinfo, 960 enum ip_conntrack_info ctinfo,
961 unsigned int protoff,
932 unsigned char **data, int dataoff, 962 unsigned char **data, int dataoff,
933 Connect_UUIE *connect) 963 Connect_UUIE *connect)
934{ 964{
@@ -938,7 +968,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
938 pr_debug("nf_ct_q931: Connect\n"); 968 pr_debug("nf_ct_q931: Connect\n");
939 969
940 if (connect->options & eConnect_UUIE_h245Address) { 970 if (connect->options & eConnect_UUIE_h245Address) {
941 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 971 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
942 &connect->h245Address); 972 &connect->h245Address);
943 if (ret < 0) 973 if (ret < 0)
944 return -1; 974 return -1;
@@ -946,7 +976,8 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
946 976
947 if (connect->options & eConnect_UUIE_fastStart) { 977 if (connect->options & eConnect_UUIE_fastStart) {
948 for (i = 0; i < connect->fastStart.count; i++) { 978 for (i = 0; i < connect->fastStart.count; i++) {
949 ret = process_olc(skb, ct, ctinfo, data, dataoff, 979 ret = process_olc(skb, ct, ctinfo,
980 protoff, data, dataoff,
950 &connect->fastStart.item[i]); 981 &connect->fastStart.item[i]);
951 if (ret < 0) 982 if (ret < 0)
952 return -1; 983 return -1;
@@ -959,6 +990,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
959/****************************************************************************/ 990/****************************************************************************/
960static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, 991static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
961 enum ip_conntrack_info ctinfo, 992 enum ip_conntrack_info ctinfo,
993 unsigned int protoff,
962 unsigned char **data, int dataoff, 994 unsigned char **data, int dataoff,
963 Alerting_UUIE *alert) 995 Alerting_UUIE *alert)
964{ 996{
@@ -968,7 +1000,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
968 pr_debug("nf_ct_q931: Alerting\n"); 1000 pr_debug("nf_ct_q931: Alerting\n");
969 1001
970 if (alert->options & eAlerting_UUIE_h245Address) { 1002 if (alert->options & eAlerting_UUIE_h245Address) {
971 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 1003 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
972 &alert->h245Address); 1004 &alert->h245Address);
973 if (ret < 0) 1005 if (ret < 0)
974 return -1; 1006 return -1;
@@ -976,7 +1008,8 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
976 1008
977 if (alert->options & eAlerting_UUIE_fastStart) { 1009 if (alert->options & eAlerting_UUIE_fastStart) {
978 for (i = 0; i < alert->fastStart.count; i++) { 1010 for (i = 0; i < alert->fastStart.count; i++) {
979 ret = process_olc(skb, ct, ctinfo, data, dataoff, 1011 ret = process_olc(skb, ct, ctinfo,
1012 protoff, data, dataoff,
980 &alert->fastStart.item[i]); 1013 &alert->fastStart.item[i]);
981 if (ret < 0) 1014 if (ret < 0)
982 return -1; 1015 return -1;
@@ -989,6 +1022,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
989/****************************************************************************/ 1022/****************************************************************************/
990static int process_facility(struct sk_buff *skb, struct nf_conn *ct, 1023static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
991 enum ip_conntrack_info ctinfo, 1024 enum ip_conntrack_info ctinfo,
1025 unsigned int protoff,
992 unsigned char **data, int dataoff, 1026 unsigned char **data, int dataoff,
993 Facility_UUIE *facility) 1027 Facility_UUIE *facility)
994{ 1028{
@@ -999,15 +1033,15 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
999 1033
1000 if (facility->reason.choice == eFacilityReason_callForwarded) { 1034 if (facility->reason.choice == eFacilityReason_callForwarded) {
1001 if (facility->options & eFacility_UUIE_alternativeAddress) 1035 if (facility->options & eFacility_UUIE_alternativeAddress)
1002 return expect_callforwarding(skb, ct, ctinfo, data, 1036 return expect_callforwarding(skb, ct, ctinfo,
1003 dataoff, 1037 protoff, data, dataoff,
1004 &facility-> 1038 &facility->
1005 alternativeAddress); 1039 alternativeAddress);
1006 return 0; 1040 return 0;
1007 } 1041 }
1008 1042
1009 if (facility->options & eFacility_UUIE_h245Address) { 1043 if (facility->options & eFacility_UUIE_h245Address) {
1010 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 1044 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
1011 &facility->h245Address); 1045 &facility->h245Address);
1012 if (ret < 0) 1046 if (ret < 0)
1013 return -1; 1047 return -1;
@@ -1015,7 +1049,8 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
1015 1049
1016 if (facility->options & eFacility_UUIE_fastStart) { 1050 if (facility->options & eFacility_UUIE_fastStart) {
1017 for (i = 0; i < facility->fastStart.count; i++) { 1051 for (i = 0; i < facility->fastStart.count; i++) {
1018 ret = process_olc(skb, ct, ctinfo, data, dataoff, 1052 ret = process_olc(skb, ct, ctinfo,
1053 protoff, data, dataoff,
1019 &facility->fastStart.item[i]); 1054 &facility->fastStart.item[i]);
1020 if (ret < 0) 1055 if (ret < 0)
1021 return -1; 1056 return -1;
@@ -1028,6 +1063,7 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
1028/****************************************************************************/ 1063/****************************************************************************/
1029static int process_progress(struct sk_buff *skb, struct nf_conn *ct, 1064static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
1030 enum ip_conntrack_info ctinfo, 1065 enum ip_conntrack_info ctinfo,
1066 unsigned int protoff,
1031 unsigned char **data, int dataoff, 1067 unsigned char **data, int dataoff,
1032 Progress_UUIE *progress) 1068 Progress_UUIE *progress)
1033{ 1069{
@@ -1037,7 +1073,7 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
1037 pr_debug("nf_ct_q931: Progress\n"); 1073 pr_debug("nf_ct_q931: Progress\n");
1038 1074
1039 if (progress->options & eProgress_UUIE_h245Address) { 1075 if (progress->options & eProgress_UUIE_h245Address) {
1040 ret = expect_h245(skb, ct, ctinfo, data, dataoff, 1076 ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
1041 &progress->h245Address); 1077 &progress->h245Address);
1042 if (ret < 0) 1078 if (ret < 0)
1043 return -1; 1079 return -1;
@@ -1045,7 +1081,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
1045 1081
1046 if (progress->options & eProgress_UUIE_fastStart) { 1082 if (progress->options & eProgress_UUIE_fastStart) {
1047 for (i = 0; i < progress->fastStart.count; i++) { 1083 for (i = 0; i < progress->fastStart.count; i++) {
1048 ret = process_olc(skb, ct, ctinfo, data, dataoff, 1084 ret = process_olc(skb, ct, ctinfo,
1085 protoff, data, dataoff,
1049 &progress->fastStart.item[i]); 1086 &progress->fastStart.item[i]);
1050 if (ret < 0) 1087 if (ret < 0)
1051 return -1; 1088 return -1;
@@ -1058,7 +1095,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
1058/****************************************************************************/ 1095/****************************************************************************/
1059static int process_q931(struct sk_buff *skb, struct nf_conn *ct, 1096static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
1060 enum ip_conntrack_info ctinfo, 1097 enum ip_conntrack_info ctinfo,
1061 unsigned char **data, int dataoff, Q931 *q931) 1098 unsigned int protoff, unsigned char **data, int dataoff,
1099 Q931 *q931)
1062{ 1100{
1063 H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; 1101 H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
1064 int i; 1102 int i;
@@ -1066,28 +1104,29 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
1066 1104
1067 switch (pdu->h323_message_body.choice) { 1105 switch (pdu->h323_message_body.choice) {
1068 case eH323_UU_PDU_h323_message_body_setup: 1106 case eH323_UU_PDU_h323_message_body_setup:
1069 ret = process_setup(skb, ct, ctinfo, data, dataoff, 1107 ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff,
1070 &pdu->h323_message_body.setup); 1108 &pdu->h323_message_body.setup);
1071 break; 1109 break;
1072 case eH323_UU_PDU_h323_message_body_callProceeding: 1110 case eH323_UU_PDU_h323_message_body_callProceeding:
1073 ret = process_callproceeding(skb, ct, ctinfo, data, dataoff, 1111 ret = process_callproceeding(skb, ct, ctinfo,
1112 protoff, data, dataoff,
1074 &pdu->h323_message_body. 1113 &pdu->h323_message_body.
1075 callProceeding); 1114 callProceeding);
1076 break; 1115 break;
1077 case eH323_UU_PDU_h323_message_body_connect: 1116 case eH323_UU_PDU_h323_message_body_connect:
1078 ret = process_connect(skb, ct, ctinfo, data, dataoff, 1117 ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff,
1079 &pdu->h323_message_body.connect); 1118 &pdu->h323_message_body.connect);
1080 break; 1119 break;
1081 case eH323_UU_PDU_h323_message_body_alerting: 1120 case eH323_UU_PDU_h323_message_body_alerting:
1082 ret = process_alerting(skb, ct, ctinfo, data, dataoff, 1121 ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff,
1083 &pdu->h323_message_body.alerting); 1122 &pdu->h323_message_body.alerting);
1084 break; 1123 break;
1085 case eH323_UU_PDU_h323_message_body_facility: 1124 case eH323_UU_PDU_h323_message_body_facility:
1086 ret = process_facility(skb, ct, ctinfo, data, dataoff, 1125 ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff,
1087 &pdu->h323_message_body.facility); 1126 &pdu->h323_message_body.facility);
1088 break; 1127 break;
1089 case eH323_UU_PDU_h323_message_body_progress: 1128 case eH323_UU_PDU_h323_message_body_progress:
1090 ret = process_progress(skb, ct, ctinfo, data, dataoff, 1129 ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff,
1091 &pdu->h323_message_body.progress); 1130 &pdu->h323_message_body.progress);
1092 break; 1131 break;
1093 default: 1132 default:
@@ -1101,7 +1140,8 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
1101 1140
1102 if (pdu->options & eH323_UU_PDU_h245Control) { 1141 if (pdu->options & eH323_UU_PDU_h245Control) {
1103 for (i = 0; i < pdu->h245Control.count; i++) { 1142 for (i = 0; i < pdu->h245Control.count; i++) {
1104 ret = process_h245(skb, ct, ctinfo, data, dataoff, 1143 ret = process_h245(skb, ct, ctinfo,
1144 protoff, data, dataoff,
1105 &pdu->h245Control.item[i]); 1145 &pdu->h245Control.item[i]);
1106 if (ret < 0) 1146 if (ret < 0)
1107 return -1; 1147 return -1;
@@ -1146,7 +1186,8 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1146 } 1186 }
1147 1187
1148 /* Process Q.931 signal */ 1188 /* Process Q.931 signal */
1149 if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0) 1189 if (process_q931(skb, ct, ctinfo, protoff,
1190 &data, dataoff, &q931) < 0)
1150 goto drop; 1191 goto drop;
1151 } 1192 }
1152 1193
@@ -1243,7 +1284,7 @@ static int set_expect_timeout(struct nf_conntrack_expect *exp,
1243/****************************************************************************/ 1284/****************************************************************************/
1244static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, 1285static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1245 enum ip_conntrack_info ctinfo, 1286 enum ip_conntrack_info ctinfo,
1246 unsigned char **data, 1287 unsigned int protoff, unsigned char **data,
1247 TransportAddress *taddr, int count) 1288 TransportAddress *taddr, int count)
1248{ 1289{
1249 struct nf_ct_h323_master *info = nfct_help_data(ct); 1290 struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1278,8 +1319,10 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1278 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ 1319 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */
1279 1320
1280 nat_q931 = rcu_dereference(nat_q931_hook); 1321 nat_q931 = rcu_dereference(nat_q931_hook);
1281 if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ 1322 if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1282 ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp); 1323 ct->status & IPS_NAT_MASK) { /* Need NAT */
1324 ret = nat_q931(skb, ct, ctinfo, protoff, data,
1325 taddr, i, port, exp);
1283 } else { /* Conntrack only */ 1326 } else { /* Conntrack only */
1284 if (nf_ct_expect_related(exp) == 0) { 1327 if (nf_ct_expect_related(exp) == 0) {
1285 pr_debug("nf_ct_ras: expect Q.931 "); 1328 pr_debug("nf_ct_ras: expect Q.931 ");
@@ -1299,6 +1342,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
1299/****************************************************************************/ 1342/****************************************************************************/
1300static int process_grq(struct sk_buff *skb, struct nf_conn *ct, 1343static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
1301 enum ip_conntrack_info ctinfo, 1344 enum ip_conntrack_info ctinfo,
1345 unsigned int protoff,
1302 unsigned char **data, GatekeeperRequest *grq) 1346 unsigned char **data, GatekeeperRequest *grq)
1303{ 1347{
1304 typeof(set_ras_addr_hook) set_ras_addr; 1348 typeof(set_ras_addr_hook) set_ras_addr;
@@ -1306,8 +1350,9 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
1306 pr_debug("nf_ct_ras: GRQ\n"); 1350 pr_debug("nf_ct_ras: GRQ\n");
1307 1351
1308 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1352 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1309 if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ 1353 if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1310 return set_ras_addr(skb, ct, ctinfo, data, 1354 ct->status & IPS_NAT_MASK) /* NATed */
1355 return set_ras_addr(skb, ct, ctinfo, protoff, data,
1311 &grq->rasAddress, 1); 1356 &grq->rasAddress, 1);
1312 return 0; 1357 return 0;
1313} 1358}
@@ -1315,6 +1360,7 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
1315/****************************************************************************/ 1360/****************************************************************************/
1316static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, 1361static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
1317 enum ip_conntrack_info ctinfo, 1362 enum ip_conntrack_info ctinfo,
1363 unsigned int protoff,
1318 unsigned char **data, GatekeeperConfirm *gcf) 1364 unsigned char **data, GatekeeperConfirm *gcf)
1319{ 1365{
1320 int dir = CTINFO2DIR(ctinfo); 1366 int dir = CTINFO2DIR(ctinfo);
@@ -1359,6 +1405,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
1359/****************************************************************************/ 1405/****************************************************************************/
1360static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, 1406static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
1361 enum ip_conntrack_info ctinfo, 1407 enum ip_conntrack_info ctinfo,
1408 unsigned int protoff,
1362 unsigned char **data, RegistrationRequest *rrq) 1409 unsigned char **data, RegistrationRequest *rrq)
1363{ 1410{
1364 struct nf_ct_h323_master *info = nfct_help_data(ct); 1411 struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1367,15 +1414,16 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
1367 1414
1368 pr_debug("nf_ct_ras: RRQ\n"); 1415 pr_debug("nf_ct_ras: RRQ\n");
1369 1416
1370 ret = expect_q931(skb, ct, ctinfo, data, 1417 ret = expect_q931(skb, ct, ctinfo, protoff, data,
1371 rrq->callSignalAddress.item, 1418 rrq->callSignalAddress.item,
1372 rrq->callSignalAddress.count); 1419 rrq->callSignalAddress.count);
1373 if (ret < 0) 1420 if (ret < 0)
1374 return -1; 1421 return -1;
1375 1422
1376 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1423 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1377 if (set_ras_addr && ct->status & IPS_NAT_MASK) { 1424 if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1378 ret = set_ras_addr(skb, ct, ctinfo, data, 1425 ct->status & IPS_NAT_MASK) {
1426 ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
1379 rrq->rasAddress.item, 1427 rrq->rasAddress.item,
1380 rrq->rasAddress.count); 1428 rrq->rasAddress.count);
1381 if (ret < 0) 1429 if (ret < 0)
@@ -1394,6 +1442,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
1394/****************************************************************************/ 1442/****************************************************************************/
1395static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, 1443static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1396 enum ip_conntrack_info ctinfo, 1444 enum ip_conntrack_info ctinfo,
1445 unsigned int protoff,
1397 unsigned char **data, RegistrationConfirm *rcf) 1446 unsigned char **data, RegistrationConfirm *rcf)
1398{ 1447{
1399 struct nf_ct_h323_master *info = nfct_help_data(ct); 1448 struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1405,8 +1454,9 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1405 pr_debug("nf_ct_ras: RCF\n"); 1454 pr_debug("nf_ct_ras: RCF\n");
1406 1455
1407 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1456 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1408 if (set_sig_addr && ct->status & IPS_NAT_MASK) { 1457 if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1409 ret = set_sig_addr(skb, ct, ctinfo, data, 1458 ct->status & IPS_NAT_MASK) {
1459 ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
1410 rcf->callSignalAddress.item, 1460 rcf->callSignalAddress.item,
1411 rcf->callSignalAddress.count); 1461 rcf->callSignalAddress.count);
1412 if (ret < 0) 1462 if (ret < 0)
@@ -1443,6 +1493,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1443/****************************************************************************/ 1493/****************************************************************************/
1444static int process_urq(struct sk_buff *skb, struct nf_conn *ct, 1494static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
1445 enum ip_conntrack_info ctinfo, 1495 enum ip_conntrack_info ctinfo,
1496 unsigned int protoff,
1446 unsigned char **data, UnregistrationRequest *urq) 1497 unsigned char **data, UnregistrationRequest *urq)
1447{ 1498{
1448 struct nf_ct_h323_master *info = nfct_help_data(ct); 1499 struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1453,8 +1504,9 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
1453 pr_debug("nf_ct_ras: URQ\n"); 1504 pr_debug("nf_ct_ras: URQ\n");
1454 1505
1455 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1506 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1456 if (set_sig_addr && ct->status & IPS_NAT_MASK) { 1507 if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1457 ret = set_sig_addr(skb, ct, ctinfo, data, 1508 ct->status & IPS_NAT_MASK) {
1509 ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
1458 urq->callSignalAddress.item, 1510 urq->callSignalAddress.item,
1459 urq->callSignalAddress.count); 1511 urq->callSignalAddress.count);
1460 if (ret < 0) 1512 if (ret < 0)
@@ -1475,6 +1527,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
1475/****************************************************************************/ 1527/****************************************************************************/
1476static int process_arq(struct sk_buff *skb, struct nf_conn *ct, 1528static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1477 enum ip_conntrack_info ctinfo, 1529 enum ip_conntrack_info ctinfo,
1530 unsigned int protoff,
1478 unsigned char **data, AdmissionRequest *arq) 1531 unsigned char **data, AdmissionRequest *arq)
1479{ 1532{
1480 const struct nf_ct_h323_master *info = nfct_help_data(ct); 1533 const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1491,9 +1544,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1491 &addr, &port) && 1544 &addr, &port) &&
1492 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && 1545 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
1493 port == info->sig_port[dir] && 1546 port == info->sig_port[dir] &&
1547 nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1494 set_h225_addr && ct->status & IPS_NAT_MASK) { 1548 set_h225_addr && ct->status & IPS_NAT_MASK) {
1495 /* Answering ARQ */ 1549 /* Answering ARQ */
1496 return set_h225_addr(skb, data, 0, 1550 return set_h225_addr(skb, protoff, data, 0,
1497 &arq->destCallSignalAddress, 1551 &arq->destCallSignalAddress,
1498 &ct->tuplehash[!dir].tuple.dst.u3, 1552 &ct->tuplehash[!dir].tuple.dst.u3,
1499 info->sig_port[!dir]); 1553 info->sig_port[!dir]);
@@ -1503,9 +1557,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1503 get_h225_addr(ct, *data, &arq->srcCallSignalAddress, 1557 get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
1504 &addr, &port) && 1558 &addr, &port) &&
1505 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && 1559 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
1506 set_h225_addr && ct->status & IPS_NAT_MASK) { 1560 set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1561 ct->status & IPS_NAT_MASK) {
1507 /* Calling ARQ */ 1562 /* Calling ARQ */
1508 return set_h225_addr(skb, data, 0, 1563 return set_h225_addr(skb, protoff, data, 0,
1509 &arq->srcCallSignalAddress, 1564 &arq->srcCallSignalAddress,
1510 &ct->tuplehash[!dir].tuple.dst.u3, 1565 &ct->tuplehash[!dir].tuple.dst.u3,
1511 port); 1566 port);
@@ -1517,6 +1572,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
1517/****************************************************************************/ 1572/****************************************************************************/
1518static int process_acf(struct sk_buff *skb, struct nf_conn *ct, 1573static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1519 enum ip_conntrack_info ctinfo, 1574 enum ip_conntrack_info ctinfo,
1575 unsigned int protoff,
1520 unsigned char **data, AdmissionConfirm *acf) 1576 unsigned char **data, AdmissionConfirm *acf)
1521{ 1577{
1522 int dir = CTINFO2DIR(ctinfo); 1578 int dir = CTINFO2DIR(ctinfo);
@@ -1535,8 +1591,9 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1535 if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { 1591 if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
1536 /* Answering ACF */ 1592 /* Answering ACF */
1537 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1593 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1538 if (set_sig_addr && ct->status & IPS_NAT_MASK) 1594 if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1539 return set_sig_addr(skb, ct, ctinfo, data, 1595 ct->status & IPS_NAT_MASK)
1596 return set_sig_addr(skb, ct, ctinfo, protoff, data,
1540 &acf->destCallSignalAddress, 1); 1597 &acf->destCallSignalAddress, 1);
1541 return 0; 1598 return 0;
1542 } 1599 }
@@ -1564,6 +1621,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
1564/****************************************************************************/ 1621/****************************************************************************/
1565static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, 1622static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
1566 enum ip_conntrack_info ctinfo, 1623 enum ip_conntrack_info ctinfo,
1624 unsigned int protoff,
1567 unsigned char **data, LocationRequest *lrq) 1625 unsigned char **data, LocationRequest *lrq)
1568{ 1626{
1569 typeof(set_ras_addr_hook) set_ras_addr; 1627 typeof(set_ras_addr_hook) set_ras_addr;
@@ -1571,8 +1629,9 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
1571 pr_debug("nf_ct_ras: LRQ\n"); 1629 pr_debug("nf_ct_ras: LRQ\n");
1572 1630
1573 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1631 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1574 if (set_ras_addr && ct->status & IPS_NAT_MASK) 1632 if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1575 return set_ras_addr(skb, ct, ctinfo, data, 1633 ct->status & IPS_NAT_MASK)
1634 return set_ras_addr(skb, ct, ctinfo, protoff, data,
1576 &lrq->replyAddress, 1); 1635 &lrq->replyAddress, 1);
1577 return 0; 1636 return 0;
1578} 1637}
@@ -1580,6 +1639,7 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
1580/****************************************************************************/ 1639/****************************************************************************/
1581static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, 1640static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1582 enum ip_conntrack_info ctinfo, 1641 enum ip_conntrack_info ctinfo,
1642 unsigned int protoff,
1583 unsigned char **data, LocationConfirm *lcf) 1643 unsigned char **data, LocationConfirm *lcf)
1584{ 1644{
1585 int dir = CTINFO2DIR(ctinfo); 1645 int dir = CTINFO2DIR(ctinfo);
@@ -1619,6 +1679,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
1619/****************************************************************************/ 1679/****************************************************************************/
1620static int process_irr(struct sk_buff *skb, struct nf_conn *ct, 1680static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
1621 enum ip_conntrack_info ctinfo, 1681 enum ip_conntrack_info ctinfo,
1682 unsigned int protoff,
1622 unsigned char **data, InfoRequestResponse *irr) 1683 unsigned char **data, InfoRequestResponse *irr)
1623{ 1684{
1624 int ret; 1685 int ret;
@@ -1628,16 +1689,18 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
1628 pr_debug("nf_ct_ras: IRR\n"); 1689 pr_debug("nf_ct_ras: IRR\n");
1629 1690
1630 set_ras_addr = rcu_dereference(set_ras_addr_hook); 1691 set_ras_addr = rcu_dereference(set_ras_addr_hook);
1631 if (set_ras_addr && ct->status & IPS_NAT_MASK) { 1692 if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1632 ret = set_ras_addr(skb, ct, ctinfo, data, 1693 ct->status & IPS_NAT_MASK) {
1694 ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
1633 &irr->rasAddress, 1); 1695 &irr->rasAddress, 1);
1634 if (ret < 0) 1696 if (ret < 0)
1635 return -1; 1697 return -1;
1636 } 1698 }
1637 1699
1638 set_sig_addr = rcu_dereference(set_sig_addr_hook); 1700 set_sig_addr = rcu_dereference(set_sig_addr_hook);
1639 if (set_sig_addr && ct->status & IPS_NAT_MASK) { 1701 if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
1640 ret = set_sig_addr(skb, ct, ctinfo, data, 1702 ct->status & IPS_NAT_MASK) {
1703 ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
1641 irr->callSignalAddress.item, 1704 irr->callSignalAddress.item,
1642 irr->callSignalAddress.count); 1705 irr->callSignalAddress.count);
1643 if (ret < 0) 1706 if (ret < 0)
@@ -1650,38 +1713,39 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
1650/****************************************************************************/ 1713/****************************************************************************/
1651static int process_ras(struct sk_buff *skb, struct nf_conn *ct, 1714static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
1652 enum ip_conntrack_info ctinfo, 1715 enum ip_conntrack_info ctinfo,
1716 unsigned int protoff,
1653 unsigned char **data, RasMessage *ras) 1717 unsigned char **data, RasMessage *ras)
1654{ 1718{
1655 switch (ras->choice) { 1719 switch (ras->choice) {
1656 case eRasMessage_gatekeeperRequest: 1720 case eRasMessage_gatekeeperRequest:
1657 return process_grq(skb, ct, ctinfo, data, 1721 return process_grq(skb, ct, ctinfo, protoff, data,
1658 &ras->gatekeeperRequest); 1722 &ras->gatekeeperRequest);
1659 case eRasMessage_gatekeeperConfirm: 1723 case eRasMessage_gatekeeperConfirm:
1660 return process_gcf(skb, ct, ctinfo, data, 1724 return process_gcf(skb, ct, ctinfo, protoff, data,
1661 &ras->gatekeeperConfirm); 1725 &ras->gatekeeperConfirm);
1662 case eRasMessage_registrationRequest: 1726 case eRasMessage_registrationRequest:
1663 return process_rrq(skb, ct, ctinfo, data, 1727 return process_rrq(skb, ct, ctinfo, protoff, data,
1664 &ras->registrationRequest); 1728 &ras->registrationRequest);
1665 case eRasMessage_registrationConfirm: 1729 case eRasMessage_registrationConfirm:
1666 return process_rcf(skb, ct, ctinfo, data, 1730 return process_rcf(skb, ct, ctinfo, protoff, data,
1667 &ras->registrationConfirm); 1731 &ras->registrationConfirm);
1668 case eRasMessage_unregistrationRequest: 1732 case eRasMessage_unregistrationRequest:
1669 return process_urq(skb, ct, ctinfo, data, 1733 return process_urq(skb, ct, ctinfo, protoff, data,
1670 &ras->unregistrationRequest); 1734 &ras->unregistrationRequest);
1671 case eRasMessage_admissionRequest: 1735 case eRasMessage_admissionRequest:
1672 return process_arq(skb, ct, ctinfo, data, 1736 return process_arq(skb, ct, ctinfo, protoff, data,
1673 &ras->admissionRequest); 1737 &ras->admissionRequest);
1674 case eRasMessage_admissionConfirm: 1738 case eRasMessage_admissionConfirm:
1675 return process_acf(skb, ct, ctinfo, data, 1739 return process_acf(skb, ct, ctinfo, protoff, data,
1676 &ras->admissionConfirm); 1740 &ras->admissionConfirm);
1677 case eRasMessage_locationRequest: 1741 case eRasMessage_locationRequest:
1678 return process_lrq(skb, ct, ctinfo, data, 1742 return process_lrq(skb, ct, ctinfo, protoff, data,
1679 &ras->locationRequest); 1743 &ras->locationRequest);
1680 case eRasMessage_locationConfirm: 1744 case eRasMessage_locationConfirm:
1681 return process_lcf(skb, ct, ctinfo, data, 1745 return process_lcf(skb, ct, ctinfo, protoff, data,
1682 &ras->locationConfirm); 1746 &ras->locationConfirm);
1683 case eRasMessage_infoRequestResponse: 1747 case eRasMessage_infoRequestResponse:
1684 return process_irr(skb, ct, ctinfo, data, 1748 return process_irr(skb, ct, ctinfo, protoff, data,
1685 &ras->infoRequestResponse); 1749 &ras->infoRequestResponse);
1686 default: 1750 default:
1687 pr_debug("nf_ct_ras: RAS message %d\n", ras->choice); 1751 pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
@@ -1721,7 +1785,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
1721 } 1785 }
1722 1786
1723 /* Process RAS message */ 1787 /* Process RAS message */
1724 if (process_ras(skb, ct, ctinfo, &data, &ras) < 0) 1788 if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0)
1725 goto drop; 1789 goto drop;
1726 1790
1727 accept: 1791 accept:
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 009c52cfd1ec..3b20aa77cfc8 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -33,6 +33,7 @@ static DEFINE_SPINLOCK(irc_buffer_lock);
33 33
34unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, 34unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
35 enum ip_conntrack_info ctinfo, 35 enum ip_conntrack_info ctinfo,
36 unsigned int protoff,
36 unsigned int matchoff, 37 unsigned int matchoff,
37 unsigned int matchlen, 38 unsigned int matchlen,
38 struct nf_conntrack_expect *exp) __read_mostly; 39 struct nf_conntrack_expect *exp) __read_mostly;
@@ -205,7 +206,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
205 206
206 nf_nat_irc = rcu_dereference(nf_nat_irc_hook); 207 nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
207 if (nf_nat_irc && ct->status & IPS_NAT_MASK) 208 if (nf_nat_irc && ct->status & IPS_NAT_MASK)
208 ret = nf_nat_irc(skb, ctinfo, 209 ret = nf_nat_irc(skb, ctinfo, protoff,
209 addr_beg_p - ib_ptr, 210 addr_beg_p - ib_ptr,
210 addr_end_p - addr_beg_p, 211 addr_end_p - addr_beg_p,
211 exp); 212 exp);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9807f3278fcb..7bbfb3deea30 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,7 +45,7 @@
45#include <net/netfilter/nf_conntrack_timestamp.h> 45#include <net/netfilter/nf_conntrack_timestamp.h>
46#ifdef CONFIG_NF_NAT_NEEDED 46#ifdef CONFIG_NF_NAT_NEEDED
47#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
48#include <net/netfilter/nf_nat_protocol.h> 48#include <net/netfilter/nf_nat_l4proto.h>
49#include <net/netfilter/nf_nat_helper.h> 49#include <net/netfilter/nf_nat_helper.h>
50#endif 50#endif
51 51
@@ -418,16 +418,16 @@ nla_put_failure:
418} 418}
419 419
420static int 420static int
421ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 421ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
422 struct nf_conn *ct) 422 struct nf_conn *ct)
423{ 423{
424 struct nlmsghdr *nlh; 424 struct nlmsghdr *nlh;
425 struct nfgenmsg *nfmsg; 425 struct nfgenmsg *nfmsg;
426 struct nlattr *nest_parms; 426 struct nlattr *nest_parms;
427 unsigned int flags = pid ? NLM_F_MULTI : 0, event; 427 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
428 428
429 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW); 429 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
430 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 430 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
431 if (nlh == NULL) 431 if (nlh == NULL)
432 goto nlmsg_failure; 432 goto nlmsg_failure;
433 433
@@ -604,7 +604,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
604 goto errout; 604 goto errout;
605 605
606 type |= NFNL_SUBSYS_CTNETLINK << 8; 606 type |= NFNL_SUBSYS_CTNETLINK << 8;
607 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); 607 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
608 if (nlh == NULL) 608 if (nlh == NULL)
609 goto nlmsg_failure; 609 goto nlmsg_failure;
610 610
@@ -680,7 +680,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
680 rcu_read_unlock(); 680 rcu_read_unlock();
681 681
682 nlmsg_end(skb, nlh); 682 nlmsg_end(skb, nlh);
683 err = nfnetlink_send(skb, net, item->pid, group, item->report, 683 err = nfnetlink_send(skb, net, item->portid, group, item->report,
684 GFP_ATOMIC); 684 GFP_ATOMIC);
685 if (err == -ENOBUFS || err == -EAGAIN) 685 if (err == -ENOBUFS || err == -EAGAIN)
686 return -ENOBUFS; 686 return -ENOBUFS;
@@ -757,7 +757,7 @@ restart:
757#endif 757#endif
758 rcu_read_lock(); 758 rcu_read_lock();
759 res = 759 res =
760 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 760 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
761 cb->nlh->nlmsg_seq, 761 cb->nlh->nlmsg_seq,
762 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 762 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
763 ct); 763 ct);
@@ -961,7 +961,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
961 else { 961 else {
962 /* Flush the whole table */ 962 /* Flush the whole table */
963 nf_conntrack_flush_report(net, 963 nf_conntrack_flush_report(net,
964 NETLINK_CB(skb).pid, 964 NETLINK_CB(skb).portid,
965 nlmsg_report(nlh)); 965 nlmsg_report(nlh));
966 return 0; 966 return 0;
967 } 967 }
@@ -985,7 +985,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
985 985
986 if (del_timer(&ct->timeout)) { 986 if (del_timer(&ct->timeout)) {
987 if (nf_conntrack_event_report(IPCT_DESTROY, ct, 987 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
988 NETLINK_CB(skb).pid, 988 NETLINK_CB(skb).portid,
989 nlmsg_report(nlh)) < 0) { 989 nlmsg_report(nlh)) < 0) {
990 nf_ct_delete_from_lists(ct); 990 nf_ct_delete_from_lists(ct);
991 /* we failed to report the event, try later */ 991 /* we failed to report the event, try later */
@@ -1069,14 +1069,14 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1069 } 1069 }
1070 1070
1071 rcu_read_lock(); 1071 rcu_read_lock();
1072 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 1072 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1073 NFNL_MSG_TYPE(nlh->nlmsg_type), ct); 1073 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1074 rcu_read_unlock(); 1074 rcu_read_unlock();
1075 nf_ct_put(ct); 1075 nf_ct_put(ct);
1076 if (err <= 0) 1076 if (err <= 0)
1077 goto free; 1077 goto free;
1078 1078
1079 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1079 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1080 if (err < 0) 1080 if (err < 0)
1081 goto out; 1081 goto out;
1082 1082
@@ -1096,13 +1096,14 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1096 const struct nlattr *attr) 1096 const struct nlattr *attr)
1097{ 1097{
1098 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; 1098 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1099 int err;
1099 1100
1100 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook); 1101 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1101 if (!parse_nat_setup) { 1102 if (!parse_nat_setup) {
1102#ifdef CONFIG_MODULES 1103#ifdef CONFIG_MODULES
1103 rcu_read_unlock(); 1104 rcu_read_unlock();
1104 nfnl_unlock(); 1105 nfnl_unlock();
1105 if (request_module("nf-nat-ipv4") < 0) { 1106 if (request_module("nf-nat") < 0) {
1106 nfnl_lock(); 1107 nfnl_lock();
1107 rcu_read_lock(); 1108 rcu_read_lock();
1108 return -EOPNOTSUPP; 1109 return -EOPNOTSUPP;
@@ -1115,7 +1116,23 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
1115 return -EOPNOTSUPP; 1116 return -EOPNOTSUPP;
1116 } 1117 }
1117 1118
1118 return parse_nat_setup(ct, manip, attr); 1119 err = parse_nat_setup(ct, manip, attr);
1120 if (err == -EAGAIN) {
1121#ifdef CONFIG_MODULES
1122 rcu_read_unlock();
1123 nfnl_unlock();
1124 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1125 nfnl_lock();
1126 rcu_read_lock();
1127 return -EOPNOTSUPP;
1128 }
1129 nfnl_lock();
1130 rcu_read_lock();
1131#else
1132 err = -EOPNOTSUPP;
1133#endif
1134 }
1135 return err;
1119} 1136}
1120#endif 1137#endif
1121 1138
@@ -1221,7 +1238,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1221 if (help) { 1238 if (help) {
1222 if (help->helper == helper) { 1239 if (help->helper == helper) {
1223 /* update private helper data if allowed. */ 1240 /* update private helper data if allowed. */
1224 if (helper->from_nlattr && helpinfo) 1241 if (helper->from_nlattr)
1225 helper->from_nlattr(helpinfo, ct); 1242 helper->from_nlattr(helpinfo, ct);
1226 return 0; 1243 return 0;
1227 } else 1244 } else
@@ -1450,7 +1467,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1450 goto err2; 1467 goto err2;
1451 } 1468 }
1452 /* set private helper data if allowed. */ 1469 /* set private helper data if allowed. */
1453 if (helper->from_nlattr && helpinfo) 1470 if (helper->from_nlattr)
1454 helper->from_nlattr(helpinfo, ct); 1471 helper->from_nlattr(helpinfo, ct);
1455 1472
1456 /* not in hash table yet so not strictly necessary */ 1473 /* not in hash table yet so not strictly necessary */
@@ -1596,7 +1613,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1596 (1 << IPCT_PROTOINFO) | 1613 (1 << IPCT_PROTOINFO) |
1597 (1 << IPCT_NATSEQADJ) | 1614 (1 << IPCT_NATSEQADJ) |
1598 (1 << IPCT_MARK) | events, 1615 (1 << IPCT_MARK) | events,
1599 ct, NETLINK_CB(skb).pid, 1616 ct, NETLINK_CB(skb).portid,
1600 nlmsg_report(nlh)); 1617 nlmsg_report(nlh));
1601 nf_ct_put(ct); 1618 nf_ct_put(ct);
1602 } 1619 }
@@ -1618,7 +1635,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1618 (1 << IPCT_PROTOINFO) | 1635 (1 << IPCT_PROTOINFO) |
1619 (1 << IPCT_NATSEQADJ) | 1636 (1 << IPCT_NATSEQADJ) |
1620 (1 << IPCT_MARK), 1637 (1 << IPCT_MARK),
1621 ct, NETLINK_CB(skb).pid, 1638 ct, NETLINK_CB(skb).portid,
1622 nlmsg_report(nlh)); 1639 nlmsg_report(nlh));
1623 } 1640 }
1624 } 1641 }
@@ -1628,15 +1645,15 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1628} 1645}
1629 1646
1630static int 1647static int
1631ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1648ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1632 __u16 cpu, const struct ip_conntrack_stat *st) 1649 __u16 cpu, const struct ip_conntrack_stat *st)
1633{ 1650{
1634 struct nlmsghdr *nlh; 1651 struct nlmsghdr *nlh;
1635 struct nfgenmsg *nfmsg; 1652 struct nfgenmsg *nfmsg;
1636 unsigned int flags = pid ? NLM_F_MULTI : 0, event; 1653 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1637 1654
1638 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU); 1655 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1639 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 1656 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1640 if (nlh == NULL) 1657 if (nlh == NULL)
1641 goto nlmsg_failure; 1658 goto nlmsg_failure;
1642 1659
@@ -1688,7 +1705,7 @@ ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1688 1705
1689 st = per_cpu_ptr(net->ct.stat, cpu); 1706 st = per_cpu_ptr(net->ct.stat, cpu);
1690 if (ctnetlink_ct_stat_cpu_fill_info(skb, 1707 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1691 NETLINK_CB(cb->skb).pid, 1708 NETLINK_CB(cb->skb).portid,
1692 cb->nlh->nlmsg_seq, 1709 cb->nlh->nlmsg_seq,
1693 cpu, st) < 0) 1710 cpu, st) < 0)
1694 break; 1711 break;
@@ -1714,16 +1731,16 @@ ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1714} 1731}
1715 1732
1716static int 1733static int
1717ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 1734ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1718 struct net *net) 1735 struct net *net)
1719{ 1736{
1720 struct nlmsghdr *nlh; 1737 struct nlmsghdr *nlh;
1721 struct nfgenmsg *nfmsg; 1738 struct nfgenmsg *nfmsg;
1722 unsigned int flags = pid ? NLM_F_MULTI : 0, event; 1739 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1723 unsigned int nr_conntracks = atomic_read(&net->ct.count); 1740 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1724 1741
1725 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS); 1742 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1726 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 1743 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1727 if (nlh == NULL) 1744 if (nlh == NULL)
1728 goto nlmsg_failure; 1745 goto nlmsg_failure;
1729 1746
@@ -1756,14 +1773,14 @@ ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1756 if (skb2 == NULL) 1773 if (skb2 == NULL)
1757 return -ENOMEM; 1774 return -ENOMEM;
1758 1775
1759 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid, 1776 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
1760 nlh->nlmsg_seq, 1777 nlh->nlmsg_seq,
1761 NFNL_MSG_TYPE(nlh->nlmsg_type), 1778 NFNL_MSG_TYPE(nlh->nlmsg_type),
1762 sock_net(skb->sk)); 1779 sock_net(skb->sk));
1763 if (err <= 0) 1780 if (err <= 0)
1764 goto free; 1781 goto free;
1765 1782
1766 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1783 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1767 if (err < 0) 1784 if (err < 0)
1768 goto out; 1785 goto out;
1769 1786
@@ -1979,6 +1996,8 @@ nla_put_failure:
1979 return -1; 1996 return -1;
1980} 1997}
1981 1998
1999static const union nf_inet_addr any_addr;
2000
1982static int 2001static int
1983ctnetlink_exp_dump_expect(struct sk_buff *skb, 2002ctnetlink_exp_dump_expect(struct sk_buff *skb,
1984 const struct nf_conntrack_expect *exp) 2003 const struct nf_conntrack_expect *exp)
@@ -2005,7 +2024,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
2005 goto nla_put_failure; 2024 goto nla_put_failure;
2006 2025
2007#ifdef CONFIG_NF_NAT_NEEDED 2026#ifdef CONFIG_NF_NAT_NEEDED
2008 if (exp->saved_ip || exp->saved_proto.all) { 2027 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2028 exp->saved_proto.all) {
2009 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED); 2029 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2010 if (!nest_parms) 2030 if (!nest_parms)
2011 goto nla_put_failure; 2031 goto nla_put_failure;
@@ -2014,7 +2034,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
2014 goto nla_put_failure; 2034 goto nla_put_failure;
2015 2035
2016 nat_tuple.src.l3num = nf_ct_l3num(master); 2036 nat_tuple.src.l3num = nf_ct_l3num(master);
2017 nat_tuple.src.u3.ip = exp->saved_ip; 2037 nat_tuple.src.u3 = exp->saved_addr;
2018 nat_tuple.dst.protonum = nf_ct_protonum(master); 2038 nat_tuple.dst.protonum = nf_ct_protonum(master);
2019 nat_tuple.src.u = exp->saved_proto; 2039 nat_tuple.src.u = exp->saved_proto;
2020 2040
@@ -2050,15 +2070,15 @@ nla_put_failure:
2050} 2070}
2051 2071
2052static int 2072static int
2053ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 2073ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2054 int event, const struct nf_conntrack_expect *exp) 2074 int event, const struct nf_conntrack_expect *exp)
2055{ 2075{
2056 struct nlmsghdr *nlh; 2076 struct nlmsghdr *nlh;
2057 struct nfgenmsg *nfmsg; 2077 struct nfgenmsg *nfmsg;
2058 unsigned int flags = pid ? NLM_F_MULTI : 0; 2078 unsigned int flags = portid ? NLM_F_MULTI : 0;
2059 2079
2060 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; 2080 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2061 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 2081 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2062 if (nlh == NULL) 2082 if (nlh == NULL)
2063 goto nlmsg_failure; 2083 goto nlmsg_failure;
2064 2084
@@ -2109,7 +2129,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2109 goto errout; 2129 goto errout;
2110 2130
2111 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; 2131 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2112 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags); 2132 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2113 if (nlh == NULL) 2133 if (nlh == NULL)
2114 goto nlmsg_failure; 2134 goto nlmsg_failure;
2115 2135
@@ -2124,7 +2144,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2124 rcu_read_unlock(); 2144 rcu_read_unlock();
2125 2145
2126 nlmsg_end(skb, nlh); 2146 nlmsg_end(skb, nlh);
2127 nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC); 2147 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2128 return 0; 2148 return 0;
2129 2149
2130nla_put_failure: 2150nla_put_failure:
@@ -2167,7 +2187,7 @@ restart:
2167 cb->args[1] = 0; 2187 cb->args[1] = 0;
2168 } 2188 }
2169 if (ctnetlink_exp_fill_info(skb, 2189 if (ctnetlink_exp_fill_info(skb,
2170 NETLINK_CB(cb->skb).pid, 2190 NETLINK_CB(cb->skb).portid,
2171 cb->nlh->nlmsg_seq, 2191 cb->nlh->nlmsg_seq,
2172 IPCTNL_MSG_EXP_NEW, 2192 IPCTNL_MSG_EXP_NEW,
2173 exp) < 0) { 2193 exp) < 0) {
@@ -2260,14 +2280,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2260 } 2280 }
2261 2281
2262 rcu_read_lock(); 2282 rcu_read_lock();
2263 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, 2283 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2264 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); 2284 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2265 rcu_read_unlock(); 2285 rcu_read_unlock();
2266 nf_ct_expect_put(exp); 2286 nf_ct_expect_put(exp);
2267 if (err <= 0) 2287 if (err <= 0)
2268 goto free; 2288 goto free;
2269 2289
2270 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 2290 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2271 if (err < 0) 2291 if (err < 0)
2272 goto out; 2292 goto out;
2273 2293
@@ -2321,7 +2341,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2321 /* after list removal, usage count == 1 */ 2341 /* after list removal, usage count == 1 */
2322 spin_lock_bh(&nf_conntrack_lock); 2342 spin_lock_bh(&nf_conntrack_lock);
2323 if (del_timer(&exp->timeout)) { 2343 if (del_timer(&exp->timeout)) {
2324 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid, 2344 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2325 nlmsg_report(nlh)); 2345 nlmsg_report(nlh));
2326 nf_ct_expect_put(exp); 2346 nf_ct_expect_put(exp);
2327 } 2347 }
@@ -2343,7 +2363,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2343 if (!strcmp(m_help->helper->name, name) && 2363 if (!strcmp(m_help->helper->name, name) &&
2344 del_timer(&exp->timeout)) { 2364 del_timer(&exp->timeout)) {
2345 nf_ct_unlink_expect_report(exp, 2365 nf_ct_unlink_expect_report(exp,
2346 NETLINK_CB(skb).pid, 2366 NETLINK_CB(skb).portid,
2347 nlmsg_report(nlh)); 2367 nlmsg_report(nlh));
2348 nf_ct_expect_put(exp); 2368 nf_ct_expect_put(exp);
2349 } 2369 }
@@ -2359,7 +2379,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2359 hnode) { 2379 hnode) {
2360 if (del_timer(&exp->timeout)) { 2380 if (del_timer(&exp->timeout)) {
2361 nf_ct_unlink_expect_report(exp, 2381 nf_ct_unlink_expect_report(exp,
2362 NETLINK_CB(skb).pid, 2382 NETLINK_CB(skb).portid,
2363 nlmsg_report(nlh)); 2383 nlmsg_report(nlh));
2364 nf_ct_expect_put(exp); 2384 nf_ct_expect_put(exp);
2365 } 2385 }
@@ -2410,7 +2430,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
2410 if (err < 0) 2430 if (err < 0)
2411 return err; 2431 return err;
2412 2432
2413 exp->saved_ip = nat_tuple.src.u3.ip; 2433 exp->saved_addr = nat_tuple.src.u3;
2414 exp->saved_proto = nat_tuple.src.u; 2434 exp->saved_proto = nat_tuple.src.u;
2415 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR])); 2435 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2416 2436
@@ -2424,7 +2444,7 @@ static int
2424ctnetlink_create_expect(struct net *net, u16 zone, 2444ctnetlink_create_expect(struct net *net, u16 zone,
2425 const struct nlattr * const cda[], 2445 const struct nlattr * const cda[],
2426 u_int8_t u3, 2446 u_int8_t u3,
2427 u32 pid, int report) 2447 u32 portid, int report)
2428{ 2448{
2429 struct nf_conntrack_tuple tuple, mask, master_tuple; 2449 struct nf_conntrack_tuple tuple, mask, master_tuple;
2430 struct nf_conntrack_tuple_hash *h = NULL; 2450 struct nf_conntrack_tuple_hash *h = NULL;
@@ -2537,7 +2557,7 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2537 if (err < 0) 2557 if (err < 0)
2538 goto err_out; 2558 goto err_out;
2539 } 2559 }
2540 err = nf_ct_expect_related_report(exp, pid, report); 2560 err = nf_ct_expect_related_report(exp, portid, report);
2541err_out: 2561err_out:
2542 nf_ct_expect_put(exp); 2562 nf_ct_expect_put(exp);
2543out: 2563out:
@@ -2580,7 +2600,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2580 if (nlh->nlmsg_flags & NLM_F_CREATE) { 2600 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2581 err = ctnetlink_create_expect(net, zone, cda, 2601 err = ctnetlink_create_expect(net, zone, cda,
2582 u3, 2602 u3,
2583 NETLINK_CB(skb).pid, 2603 NETLINK_CB(skb).portid,
2584 nlmsg_report(nlh)); 2604 nlmsg_report(nlh));
2585 } 2605 }
2586 return err; 2606 return err;
@@ -2595,15 +2615,15 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2595} 2615}
2596 2616
2597static int 2617static int
2598ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu, 2618ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
2599 const struct ip_conntrack_stat *st) 2619 const struct ip_conntrack_stat *st)
2600{ 2620{
2601 struct nlmsghdr *nlh; 2621 struct nlmsghdr *nlh;
2602 struct nfgenmsg *nfmsg; 2622 struct nfgenmsg *nfmsg;
2603 unsigned int flags = pid ? NLM_F_MULTI : 0, event; 2623 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2604 2624
2605 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU); 2625 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2606 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 2626 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2607 if (nlh == NULL) 2627 if (nlh == NULL)
2608 goto nlmsg_failure; 2628 goto nlmsg_failure;
2609 2629
@@ -2642,7 +2662,7 @@ ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2642 continue; 2662 continue;
2643 2663
2644 st = per_cpu_ptr(net->ct.stat, cpu); 2664 st = per_cpu_ptr(net->ct.stat, cpu);
2645 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid, 2665 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
2646 cb->nlh->nlmsg_seq, 2666 cb->nlh->nlmsg_seq,
2647 cpu, st) < 0) 2667 cpu, st) < 0)
2648 break; 2668 break;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 6fed9ec35248..cc7669ef0b95 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -45,14 +45,14 @@ static DEFINE_SPINLOCK(nf_pptp_lock);
45int 45int
46(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, 46(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
47 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 47 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
48 struct PptpControlHeader *ctlh, 48 unsigned int protoff, struct PptpControlHeader *ctlh,
49 union pptp_ctrl_union *pptpReq) __read_mostly; 49 union pptp_ctrl_union *pptpReq) __read_mostly;
50EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound); 50EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound);
51 51
52int 52int
53(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, 53(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
54 struct nf_conn *ct, enum ip_conntrack_info ctinfo, 54 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
55 struct PptpControlHeader *ctlh, 55 unsigned int protoff, struct PptpControlHeader *ctlh,
56 union pptp_ctrl_union *pptpReq) __read_mostly; 56 union pptp_ctrl_union *pptpReq) __read_mostly;
57EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound); 57EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound);
58 58
@@ -262,7 +262,7 @@ out_unexpect_orig:
262} 262}
263 263
264static inline int 264static inline int
265pptp_inbound_pkt(struct sk_buff *skb, 265pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
266 struct PptpControlHeader *ctlh, 266 struct PptpControlHeader *ctlh,
267 union pptp_ctrl_union *pptpReq, 267 union pptp_ctrl_union *pptpReq,
268 unsigned int reqlen, 268 unsigned int reqlen,
@@ -376,7 +376,8 @@ pptp_inbound_pkt(struct sk_buff *skb,
376 376
377 nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound); 377 nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
378 if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) 378 if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
379 return nf_nat_pptp_inbound(skb, ct, ctinfo, ctlh, pptpReq); 379 return nf_nat_pptp_inbound(skb, ct, ctinfo,
380 protoff, ctlh, pptpReq);
380 return NF_ACCEPT; 381 return NF_ACCEPT;
381 382
382invalid: 383invalid:
@@ -389,7 +390,7 @@ invalid:
389} 390}
390 391
391static inline int 392static inline int
392pptp_outbound_pkt(struct sk_buff *skb, 393pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
393 struct PptpControlHeader *ctlh, 394 struct PptpControlHeader *ctlh,
394 union pptp_ctrl_union *pptpReq, 395 union pptp_ctrl_union *pptpReq,
395 unsigned int reqlen, 396 unsigned int reqlen,
@@ -471,7 +472,8 @@ pptp_outbound_pkt(struct sk_buff *skb,
471 472
472 nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound); 473 nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
473 if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) 474 if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
474 return nf_nat_pptp_outbound(skb, ct, ctinfo, ctlh, pptpReq); 475 return nf_nat_pptp_outbound(skb, ct, ctinfo,
476 protoff, ctlh, pptpReq);
475 return NF_ACCEPT; 477 return NF_ACCEPT;
476 478
477invalid: 479invalid:
@@ -570,11 +572,11 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
570 * established from PNS->PAC. However, RFC makes no guarantee */ 572 * established from PNS->PAC. However, RFC makes no guarantee */
571 if (dir == IP_CT_DIR_ORIGINAL) 573 if (dir == IP_CT_DIR_ORIGINAL)
572 /* client -> server (PNS -> PAC) */ 574 /* client -> server (PNS -> PAC) */
573 ret = pptp_outbound_pkt(skb, ctlh, pptpReq, reqlen, ct, 575 ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
574 ctinfo); 576 ctinfo);
575 else 577 else
576 /* server -> client (PAC -> PNS) */ 578 /* server -> client (PAC -> PNS) */
577 ret = pptp_inbound_pkt(skb, ctlh, pptpReq, reqlen, ct, 579 ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
578 ctinfo); 580 ctinfo);
579 pr_debug("sstate: %d->%d, cstate: %d->%d\n", 581 pr_debug("sstate: %d->%d, cstate: %d->%d\n",
580 oldsstate, info->sstate, oldcstate, info->cstate); 582 oldsstate, info->sstate, oldcstate, info->cstate);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0dc63854390f..51e928db48c8 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -21,7 +21,6 @@
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/rtnetlink.h>
25 24
26#include <net/netfilter/nf_conntrack.h> 25#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_l3proto.h> 26#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -294,9 +293,7 @@ void nf_conntrack_l3proto_unregister(struct net *net,
294 nf_ct_l3proto_unregister_sysctl(net, proto); 293 nf_ct_l3proto_unregister_sysctl(net, proto);
295 294
296 /* Remove all contrack entries for this protocol */ 295 /* Remove all contrack entries for this protocol */
297 rtnl_lock();
298 nf_ct_iterate_cleanup(net, kill_l3proto, proto); 296 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
299 rtnl_unlock();
300} 297}
301EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 298EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
302 299
@@ -502,9 +499,7 @@ void nf_conntrack_l4proto_unregister(struct net *net,
502 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto); 499 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
503 500
504 /* Remove all contrack entries for this protocol */ 501 /* Remove all contrack entries for this protocol */
505 rtnl_lock();
506 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); 502 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
507 rtnl_unlock();
508} 503}
509EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 504EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
510 505
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index e046b3756aab..61f9285111d1 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -502,10 +502,10 @@ static inline s16 nat_offset(const struct nf_conn *ct,
502 502
503 return get_offset != NULL ? get_offset(ct, dir, seq) : 0; 503 return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
504} 504}
505#define NAT_OFFSET(pf, ct, dir, seq) \ 505#define NAT_OFFSET(ct, dir, seq) \
506 (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0) 506 (nat_offset(ct, dir, seq))
507#else 507#else
508#define NAT_OFFSET(pf, ct, dir, seq) 0 508#define NAT_OFFSET(ct, dir, seq) 0
509#endif 509#endif
510 510
511static bool tcp_in_window(const struct nf_conn *ct, 511static bool tcp_in_window(const struct nf_conn *ct,
@@ -538,7 +538,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
538 tcp_sack(skb, dataoff, tcph, &sack); 538 tcp_sack(skb, dataoff, tcph, &sack);
539 539
540 /* Take into account NAT sequence number mangling */ 540 /* Take into account NAT sequence number mangling */
541 receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1); 541 receiver_offset = NAT_OFFSET(ct, !dir, ack - 1);
542 ack -= receiver_offset; 542 ack -= receiver_offset;
543 sack -= receiver_offset; 543 sack -= receiver_offset;
544 544
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 5c0a112aeee6..df8f4f284481 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -52,15 +52,17 @@ module_param(sip_direct_media, int, 0600);
52MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " 52MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
53 "endpoints only (default 1)"); 53 "endpoints only (default 1)");
54 54
55unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff, 55unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
56 const char **dptr, 56 unsigned int dataoff, const char **dptr,
57 unsigned int *datalen) __read_mostly; 57 unsigned int *datalen) __read_mostly;
58EXPORT_SYMBOL_GPL(nf_nat_sip_hook); 58EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
59 59
60void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly; 60void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
61 s16 off) __read_mostly;
61EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook); 62EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
62 63
63unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 64unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
65 unsigned int protoff,
64 unsigned int dataoff, 66 unsigned int dataoff,
65 const char **dptr, 67 const char **dptr,
66 unsigned int *datalen, 68 unsigned int *datalen,
@@ -69,7 +71,8 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
69 unsigned int matchlen) __read_mostly; 71 unsigned int matchlen) __read_mostly;
70EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); 72EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
71 73
72unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff, 74unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
75 unsigned int dataoff,
73 const char **dptr, 76 const char **dptr,
74 unsigned int *datalen, 77 unsigned int *datalen,
75 unsigned int sdpoff, 78 unsigned int sdpoff,
@@ -79,7 +82,8 @@ unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
79 __read_mostly; 82 __read_mostly;
80EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); 83EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
81 84
82unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff, 85unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
86 unsigned int dataoff,
83 const char **dptr, 87 const char **dptr,
84 unsigned int *datalen, 88 unsigned int *datalen,
85 unsigned int matchoff, 89 unsigned int matchoff,
@@ -88,6 +92,7 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
88EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); 92EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
89 93
90unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, 94unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
95 unsigned int protoff,
91 unsigned int dataoff, 96 unsigned int dataoff,
92 const char **dptr, 97 const char **dptr,
93 unsigned int *datalen, 98 unsigned int *datalen,
@@ -96,7 +101,8 @@ unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
96 __read_mostly; 101 __read_mostly;
97EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); 102EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
98 103
99unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff, 104unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
105 unsigned int dataoff,
100 const char **dptr, 106 const char **dptr,
101 unsigned int *datalen, 107 unsigned int *datalen,
102 struct nf_conntrack_expect *rtp_exp, 108 struct nf_conntrack_expect *rtp_exp,
@@ -737,13 +743,18 @@ static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
737 * be tolerant and also accept records terminated with a single newline 743 * be tolerant and also accept records terminated with a single newline
738 * character". We handle both cases. 744 * character". We handle both cases.
739 */ 745 */
740static const struct sip_header ct_sdp_hdrs[] = { 746static const struct sip_header ct_sdp_hdrs_v4[] = {
741 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), 747 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
742 [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len), 748 [SDP_HDR_OWNER] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
743 [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len), 749 [SDP_HDR_CONNECTION] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
744 [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len), 750 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
745 [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len), 751};
746 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), 752
753static const struct sip_header ct_sdp_hdrs_v6[] = {
754 [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
755 [SDP_HDR_OWNER] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
756 [SDP_HDR_CONNECTION] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
757 [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
747}; 758};
748 759
749/* Linear string search within SDP header values */ 760/* Linear string search within SDP header values */
@@ -769,11 +780,14 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
769 enum sdp_header_types term, 780 enum sdp_header_types term,
770 unsigned int *matchoff, unsigned int *matchlen) 781 unsigned int *matchoff, unsigned int *matchlen)
771{ 782{
772 const struct sip_header *hdr = &ct_sdp_hdrs[type]; 783 const struct sip_header *hdrs, *hdr, *thdr;
773 const struct sip_header *thdr = &ct_sdp_hdrs[term];
774 const char *start = dptr, *limit = dptr + datalen; 784 const char *start = dptr, *limit = dptr + datalen;
775 int shift = 0; 785 int shift = 0;
776 786
787 hdrs = nf_ct_l3num(ct) == NFPROTO_IPV4 ? ct_sdp_hdrs_v4 : ct_sdp_hdrs_v6;
788 hdr = &hdrs[type];
789 thdr = &hdrs[term];
790
777 for (dptr += dataoff; dptr < limit; dptr++) { 791 for (dptr += dataoff; dptr < limit; dptr++) {
778 /* Find beginning of line */ 792 /* Find beginning of line */
779 if (*dptr != '\r' && *dptr != '\n') 793 if (*dptr != '\r' && *dptr != '\n')
@@ -883,7 +897,8 @@ static void flush_expectations(struct nf_conn *ct, bool media)
883 spin_unlock_bh(&nf_conntrack_lock); 897 spin_unlock_bh(&nf_conntrack_lock);
884} 898}
885 899
886static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff, 900static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
901 unsigned int dataoff,
887 const char **dptr, unsigned int *datalen, 902 const char **dptr, unsigned int *datalen,
888 union nf_inet_addr *daddr, __be16 port, 903 union nf_inet_addr *daddr, __be16 port,
889 enum sip_expectation_classes class, 904 enum sip_expectation_classes class,
@@ -939,12 +954,12 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
939 exp->class != class) 954 exp->class != class)
940 break; 955 break;
941#ifdef CONFIG_NF_NAT_NEEDED 956#ifdef CONFIG_NF_NAT_NEEDED
942 if (exp->tuple.src.l3num == AF_INET && !direct_rtp && 957 if (!direct_rtp &&
943 (exp->saved_ip != exp->tuple.dst.u3.ip || 958 (!nf_inet_addr_cmp(&exp->saved_addr, &exp->tuple.dst.u3) ||
944 exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) && 959 exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
945 ct->status & IPS_NAT_MASK) { 960 ct->status & IPS_NAT_MASK) {
946 daddr->ip = exp->saved_ip; 961 *daddr = exp->saved_addr;
947 tuple.dst.u3.ip = exp->saved_ip; 962 tuple.dst.u3 = exp->saved_addr;
948 tuple.dst.u.udp.port = exp->saved_proto.udp.port; 963 tuple.dst.u.udp.port = exp->saved_proto.udp.port;
949 direct_rtp = 1; 964 direct_rtp = 1;
950 } else 965 } else
@@ -960,7 +975,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
960 if (direct_rtp) { 975 if (direct_rtp) {
961 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); 976 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
962 if (nf_nat_sdp_port && 977 if (nf_nat_sdp_port &&
963 !nf_nat_sdp_port(skb, dataoff, dptr, datalen, 978 !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
964 mediaoff, medialen, ntohs(rtp_port))) 979 mediaoff, medialen, ntohs(rtp_port)))
965 goto err1; 980 goto err1;
966 } 981 }
@@ -982,7 +997,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
982 997
983 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); 998 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
984 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) 999 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
985 ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen, 1000 ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
986 rtp_exp, rtcp_exp, 1001 rtp_exp, rtcp_exp,
987 mediaoff, medialen, daddr); 1002 mediaoff, medialen, daddr);
988 else { 1003 else {
@@ -1023,7 +1038,8 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
1023 return NULL; 1038 return NULL;
1024} 1039}
1025 1040
1026static int process_sdp(struct sk_buff *skb, unsigned int dataoff, 1041static int process_sdp(struct sk_buff *skb, unsigned int protoff,
1042 unsigned int dataoff,
1027 const char **dptr, unsigned int *datalen, 1043 const char **dptr, unsigned int *datalen,
1028 unsigned int cseq) 1044 unsigned int cseq)
1029{ 1045{
@@ -1036,15 +1052,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1036 unsigned int i; 1052 unsigned int i;
1037 union nf_inet_addr caddr, maddr, rtp_addr; 1053 union nf_inet_addr caddr, maddr, rtp_addr;
1038 unsigned int port; 1054 unsigned int port;
1039 enum sdp_header_types c_hdr;
1040 const struct sdp_media_type *t; 1055 const struct sdp_media_type *t;
1041 int ret = NF_ACCEPT; 1056 int ret = NF_ACCEPT;
1042 typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr; 1057 typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
1043 typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session; 1058 typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
1044 1059
1045 nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook); 1060 nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
1046 c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 :
1047 SDP_HDR_CONNECTION_IP6;
1048 1061
1049 /* Find beginning of session description */ 1062 /* Find beginning of session description */
1050 if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, 1063 if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1058,7 +1071,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1058 * the end of the session description. */ 1071 * the end of the session description. */
1059 caddr_len = 0; 1072 caddr_len = 0;
1060 if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen, 1073 if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen,
1061 c_hdr, SDP_HDR_MEDIA, 1074 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
1062 &matchoff, &matchlen, &caddr) > 0) 1075 &matchoff, &matchlen, &caddr) > 0)
1063 caddr_len = matchlen; 1076 caddr_len = matchlen;
1064 1077
@@ -1088,7 +1101,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1088 /* The media description overrides the session description. */ 1101 /* The media description overrides the session description. */
1089 maddr_len = 0; 1102 maddr_len = 0;
1090 if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen, 1103 if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen,
1091 c_hdr, SDP_HDR_MEDIA, 1104 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
1092 &matchoff, &matchlen, &maddr) > 0) { 1105 &matchoff, &matchlen, &maddr) > 0) {
1093 maddr_len = matchlen; 1106 maddr_len = matchlen;
1094 memcpy(&rtp_addr, &maddr, sizeof(rtp_addr)); 1107 memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
@@ -1097,7 +1110,8 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1097 else 1110 else
1098 return NF_DROP; 1111 return NF_DROP;
1099 1112
1100 ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen, 1113 ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
1114 dptr, datalen,
1101 &rtp_addr, htons(port), t->class, 1115 &rtp_addr, htons(port), t->class,
1102 mediaoff, medialen); 1116 mediaoff, medialen);
1103 if (ret != NF_ACCEPT) 1117 if (ret != NF_ACCEPT)
@@ -1105,8 +1119,9 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1105 1119
1106 /* Update media connection address if present */ 1120 /* Update media connection address if present */
1107 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { 1121 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
1108 ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen, 1122 ret = nf_nat_sdp_addr(skb, protoff, dataoff,
1109 mediaoff, c_hdr, SDP_HDR_MEDIA, 1123 dptr, datalen, mediaoff,
1124 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
1110 &rtp_addr); 1125 &rtp_addr);
1111 if (ret != NF_ACCEPT) 1126 if (ret != NF_ACCEPT)
1112 return ret; 1127 return ret;
@@ -1117,12 +1132,13 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
1117 /* Update session connection and owner addresses */ 1132 /* Update session connection and owner addresses */
1118 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); 1133 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
1119 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) 1134 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
1120 ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff, 1135 ret = nf_nat_sdp_session(skb, protoff, dataoff,
1121 &rtp_addr); 1136 dptr, datalen, sdpoff, &rtp_addr);
1122 1137
1123 return ret; 1138 return ret;
1124} 1139}
1125static int process_invite_response(struct sk_buff *skb, unsigned int dataoff, 1140static int process_invite_response(struct sk_buff *skb, unsigned int protoff,
1141 unsigned int dataoff,
1126 const char **dptr, unsigned int *datalen, 1142 const char **dptr, unsigned int *datalen,
1127 unsigned int cseq, unsigned int code) 1143 unsigned int cseq, unsigned int code)
1128{ 1144{
@@ -1132,13 +1148,14 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
1132 1148
1133 if ((code >= 100 && code <= 199) || 1149 if ((code >= 100 && code <= 199) ||
1134 (code >= 200 && code <= 299)) 1150 (code >= 200 && code <= 299))
1135 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1151 return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
1136 else if (ct_sip_info->invite_cseq == cseq) 1152 else if (ct_sip_info->invite_cseq == cseq)
1137 flush_expectations(ct, true); 1153 flush_expectations(ct, true);
1138 return NF_ACCEPT; 1154 return NF_ACCEPT;
1139} 1155}
1140 1156
1141static int process_update_response(struct sk_buff *skb, unsigned int dataoff, 1157static int process_update_response(struct sk_buff *skb, unsigned int protoff,
1158 unsigned int dataoff,
1142 const char **dptr, unsigned int *datalen, 1159 const char **dptr, unsigned int *datalen,
1143 unsigned int cseq, unsigned int code) 1160 unsigned int cseq, unsigned int code)
1144{ 1161{
@@ -1148,13 +1165,14 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
1148 1165
1149 if ((code >= 100 && code <= 199) || 1166 if ((code >= 100 && code <= 199) ||
1150 (code >= 200 && code <= 299)) 1167 (code >= 200 && code <= 299))
1151 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1168 return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
1152 else if (ct_sip_info->invite_cseq == cseq) 1169 else if (ct_sip_info->invite_cseq == cseq)
1153 flush_expectations(ct, true); 1170 flush_expectations(ct, true);
1154 return NF_ACCEPT; 1171 return NF_ACCEPT;
1155} 1172}
1156 1173
1157static int process_prack_response(struct sk_buff *skb, unsigned int dataoff, 1174static int process_prack_response(struct sk_buff *skb, unsigned int protoff,
1175 unsigned int dataoff,
1158 const char **dptr, unsigned int *datalen, 1176 const char **dptr, unsigned int *datalen,
1159 unsigned int cseq, unsigned int code) 1177 unsigned int cseq, unsigned int code)
1160{ 1178{
@@ -1164,13 +1182,14 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1164 1182
1165 if ((code >= 100 && code <= 199) || 1183 if ((code >= 100 && code <= 199) ||
1166 (code >= 200 && code <= 299)) 1184 (code >= 200 && code <= 299))
1167 return process_sdp(skb, dataoff, dptr, datalen, cseq); 1185 return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
1168 else if (ct_sip_info->invite_cseq == cseq) 1186 else if (ct_sip_info->invite_cseq == cseq)
1169 flush_expectations(ct, true); 1187 flush_expectations(ct, true);
1170 return NF_ACCEPT; 1188 return NF_ACCEPT;
1171} 1189}
1172 1190
1173static int process_invite_request(struct sk_buff *skb, unsigned int dataoff, 1191static int process_invite_request(struct sk_buff *skb, unsigned int protoff,
1192 unsigned int dataoff,
1174 const char **dptr, unsigned int *datalen, 1193 const char **dptr, unsigned int *datalen,
1175 unsigned int cseq) 1194 unsigned int cseq)
1176{ 1195{
@@ -1180,13 +1199,14 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1180 unsigned int ret; 1199 unsigned int ret;
1181 1200
1182 flush_expectations(ct, true); 1201 flush_expectations(ct, true);
1183 ret = process_sdp(skb, dataoff, dptr, datalen, cseq); 1202 ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
1184 if (ret == NF_ACCEPT) 1203 if (ret == NF_ACCEPT)
1185 ct_sip_info->invite_cseq = cseq; 1204 ct_sip_info->invite_cseq = cseq;
1186 return ret; 1205 return ret;
1187} 1206}
1188 1207
1189static int process_bye_request(struct sk_buff *skb, unsigned int dataoff, 1208static int process_bye_request(struct sk_buff *skb, unsigned int protoff,
1209 unsigned int dataoff,
1190 const char **dptr, unsigned int *datalen, 1210 const char **dptr, unsigned int *datalen,
1191 unsigned int cseq) 1211 unsigned int cseq)
1192{ 1212{
@@ -1201,7 +1221,8 @@ static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
1201 * signalling connections. The expectation is marked inactive and is activated 1221 * signalling connections. The expectation is marked inactive and is activated
1202 * when receiving a response indicating success from the registrar. 1222 * when receiving a response indicating success from the registrar.
1203 */ 1223 */
1204static int process_register_request(struct sk_buff *skb, unsigned int dataoff, 1224static int process_register_request(struct sk_buff *skb, unsigned int protoff,
1225 unsigned int dataoff,
1205 const char **dptr, unsigned int *datalen, 1226 const char **dptr, unsigned int *datalen,
1206 unsigned int cseq) 1227 unsigned int cseq)
1207{ 1228{
@@ -1276,8 +1297,8 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1276 1297
1277 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); 1298 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
1278 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) 1299 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
1279 ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp, 1300 ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
1280 matchoff, matchlen); 1301 exp, matchoff, matchlen);
1281 else { 1302 else {
1282 if (nf_ct_expect_related(exp) != 0) 1303 if (nf_ct_expect_related(exp) != 0)
1283 ret = NF_DROP; 1304 ret = NF_DROP;
@@ -1292,7 +1313,8 @@ store_cseq:
1292 return ret; 1313 return ret;
1293} 1314}
1294 1315
1295static int process_register_response(struct sk_buff *skb, unsigned int dataoff, 1316static int process_register_response(struct sk_buff *skb, unsigned int protoff,
1317 unsigned int dataoff,
1296 const char **dptr, unsigned int *datalen, 1318 const char **dptr, unsigned int *datalen,
1297 unsigned int cseq, unsigned int code) 1319 unsigned int cseq, unsigned int code)
1298{ 1320{
@@ -1374,7 +1396,8 @@ static const struct sip_handler sip_handlers[] = {
1374 SIP_HANDLER("REGISTER", process_register_request, process_register_response), 1396 SIP_HANDLER("REGISTER", process_register_request, process_register_response),
1375}; 1397};
1376 1398
1377static int process_sip_response(struct sk_buff *skb, unsigned int dataoff, 1399static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
1400 unsigned int dataoff,
1378 const char **dptr, unsigned int *datalen) 1401 const char **dptr, unsigned int *datalen)
1379{ 1402{
1380 enum ip_conntrack_info ctinfo; 1403 enum ip_conntrack_info ctinfo;
@@ -1405,13 +1428,14 @@ static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
1405 if (*datalen < matchend + handler->len || 1428 if (*datalen < matchend + handler->len ||
1406 strnicmp(*dptr + matchend, handler->method, handler->len)) 1429 strnicmp(*dptr + matchend, handler->method, handler->len))
1407 continue; 1430 continue;
1408 return handler->response(skb, dataoff, dptr, datalen, 1431 return handler->response(skb, protoff, dataoff, dptr, datalen,
1409 cseq, code); 1432 cseq, code);
1410 } 1433 }
1411 return NF_ACCEPT; 1434 return NF_ACCEPT;
1412} 1435}
1413 1436
1414static int process_sip_request(struct sk_buff *skb, unsigned int dataoff, 1437static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1438 unsigned int dataoff,
1415 const char **dptr, unsigned int *datalen) 1439 const char **dptr, unsigned int *datalen)
1416{ 1440{
1417 enum ip_conntrack_info ctinfo; 1441 enum ip_conntrack_info ctinfo;
@@ -1436,26 +1460,28 @@ static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
1436 if (!cseq) 1460 if (!cseq)
1437 return NF_DROP; 1461 return NF_DROP;
1438 1462
1439 return handler->request(skb, dataoff, dptr, datalen, cseq); 1463 return handler->request(skb, protoff, dataoff, dptr, datalen,
1464 cseq);
1440 } 1465 }
1441 return NF_ACCEPT; 1466 return NF_ACCEPT;
1442} 1467}
1443 1468
1444static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, 1469static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
1445 unsigned int dataoff, const char **dptr, 1470 unsigned int protoff, unsigned int dataoff,
1446 unsigned int *datalen) 1471 const char **dptr, unsigned int *datalen)
1447{ 1472{
1448 typeof(nf_nat_sip_hook) nf_nat_sip; 1473 typeof(nf_nat_sip_hook) nf_nat_sip;
1449 int ret; 1474 int ret;
1450 1475
1451 if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) 1476 if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
1452 ret = process_sip_request(skb, dataoff, dptr, datalen); 1477 ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
1453 else 1478 else
1454 ret = process_sip_response(skb, dataoff, dptr, datalen); 1479 ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
1455 1480
1456 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1481 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1457 nf_nat_sip = rcu_dereference(nf_nat_sip_hook); 1482 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
1458 if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen)) 1483 if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
1484 dptr, datalen))
1459 ret = NF_DROP; 1485 ret = NF_DROP;
1460 } 1486 }
1461 1487
@@ -1523,7 +1549,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1523 if (msglen > datalen) 1549 if (msglen > datalen)
1524 return NF_DROP; 1550 return NF_DROP;
1525 1551
1526 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); 1552 ret = process_sip_msg(skb, ct, protoff, dataoff,
1553 &dptr, &msglen);
1527 if (ret != NF_ACCEPT) 1554 if (ret != NF_ACCEPT)
1528 break; 1555 break;
1529 diff = msglen - origlen; 1556 diff = msglen - origlen;
@@ -1537,7 +1564,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1537 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1564 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1538 nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook); 1565 nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
1539 if (nf_nat_sip_seq_adjust) 1566 if (nf_nat_sip_seq_adjust)
1540 nf_nat_sip_seq_adjust(skb, tdiff); 1567 nf_nat_sip_seq_adjust(skb, protoff, tdiff);
1541 } 1568 }
1542 1569
1543 return ret; 1570 return ret;
@@ -1564,7 +1591,7 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1564 if (datalen < strlen("SIP/2.0 200")) 1591 if (datalen < strlen("SIP/2.0 200"))
1565 return NF_ACCEPT; 1592 return NF_ACCEPT;
1566 1593
1567 return process_sip_msg(skb, ct, dataoff, &dptr, &datalen); 1594 return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
1568} 1595}
1569 1596
1570static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; 1597static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 770f76432ad0..3deec997be89 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,13 +18,13 @@ extern unsigned int nf_iterate(struct list_head *head,
18 unsigned int hook, 18 unsigned int hook,
19 const struct net_device *indev, 19 const struct net_device *indev,
20 const struct net_device *outdev, 20 const struct net_device *outdev,
21 struct list_head **i, 21 struct nf_hook_ops **elemp,
22 int (*okfn)(struct sk_buff *), 22 int (*okfn)(struct sk_buff *),
23 int hook_thresh); 23 int hook_thresh);
24 24
25/* nf_queue.c */ 25/* nf_queue.c */
26extern int nf_queue(struct sk_buff *skb, 26extern int nf_queue(struct sk_buff *skb,
27 struct list_head *elem, 27 struct nf_hook_ops *elem,
28 u_int8_t pf, unsigned int hook, 28 u_int8_t pf, unsigned int hook,
29 struct net_device *indev, 29 struct net_device *indev,
30 struct net_device *outdev, 30 struct net_device *outdev,
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c
index 3c04d24e2976..42d337881171 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/netfilter/nf_nat_amanda.c
@@ -16,7 +16,6 @@
16#include <net/netfilter/nf_conntrack_helper.h> 16#include <net/netfilter/nf_conntrack_helper.h>
17#include <net/netfilter/nf_conntrack_expect.h> 17#include <net/netfilter/nf_conntrack_expect.h>
18#include <net/netfilter/nf_nat_helper.h> 18#include <net/netfilter/nf_nat_helper.h>
19#include <net/netfilter/nf_nat_rule.h>
20#include <linux/netfilter/nf_conntrack_amanda.h> 19#include <linux/netfilter/nf_conntrack_amanda.h>
21 20
22MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); 21MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
@@ -26,6 +25,7 @@ MODULE_ALIAS("ip_nat_amanda");
26 25
27static unsigned int help(struct sk_buff *skb, 26static unsigned int help(struct sk_buff *skb,
28 enum ip_conntrack_info ctinfo, 27 enum ip_conntrack_info ctinfo,
28 unsigned int protoff,
29 unsigned int matchoff, 29 unsigned int matchoff,
30 unsigned int matchlen, 30 unsigned int matchlen,
31 struct nf_conntrack_expect *exp) 31 struct nf_conntrack_expect *exp)
@@ -61,7 +61,7 @@ static unsigned int help(struct sk_buff *skb,
61 61
62 sprintf(buffer, "%u", port); 62 sprintf(buffer, "%u", port);
63 ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, 63 ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
64 matchoff, matchlen, 64 protoff, matchoff, matchlen,
65 buffer, strlen(buffer)); 65 buffer, strlen(buffer));
66 if (ret != NF_ACCEPT) 66 if (ret != NF_ACCEPT)
67 nf_ct_unexpect_related(exp); 67 nf_ct_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 44b082fd48ab..5f2f9109f461 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -1,7 +1,7 @@
1/* NAT for netfilter; shared with compatibility layer. */ 1/*
2 2 * (C) 1999-2001 Paul `Rusty' Russell
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -13,38 +13,105 @@
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <net/checksum.h> 16#include <net/xfrm.h>
17#include <net/icmp.h>
18#include <net/ip.h>
19#include <net/tcp.h> /* For tcp_prot in getorigdst */
20#include <linux/icmp.h>
21#include <linux/udp.h>
22#include <linux/jhash.h> 17#include <linux/jhash.h>
18#include <linux/rtnetlink.h>
23 19
24#include <linux/netfilter_ipv4.h>
25#include <net/netfilter/nf_conntrack.h> 20#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_core.h> 21#include <net/netfilter/nf_conntrack_core.h>
27#include <net/netfilter/nf_nat.h> 22#include <net/netfilter/nf_nat.h>
28#include <net/netfilter/nf_nat_protocol.h> 23#include <net/netfilter/nf_nat_l3proto.h>
24#include <net/netfilter/nf_nat_l4proto.h>
29#include <net/netfilter/nf_nat_core.h> 25#include <net/netfilter/nf_nat_core.h>
30#include <net/netfilter/nf_nat_helper.h> 26#include <net/netfilter/nf_nat_helper.h>
31#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
33#include <net/netfilter/nf_conntrack_zones.h> 29#include <net/netfilter/nf_conntrack_zones.h>
30#include <linux/netfilter/nf_nat.h>
34 31
35static DEFINE_SPINLOCK(nf_nat_lock); 32static DEFINE_SPINLOCK(nf_nat_lock);
36 33
37static struct nf_conntrack_l3proto *l3proto __read_mostly; 34static DEFINE_MUTEX(nf_nat_proto_mutex);
38 35static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
39#define MAX_IP_NAT_PROTO 256 36 __read_mostly;
40static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO] 37static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
41 __read_mostly; 38 __read_mostly;
42 39
43static inline const struct nf_nat_protocol * 40
44__nf_nat_proto_find(u_int8_t protonum) 41inline const struct nf_nat_l3proto *
42__nf_nat_l3proto_find(u8 family)
43{
44 return rcu_dereference(nf_nat_l3protos[family]);
45}
46
47inline const struct nf_nat_l4proto *
48__nf_nat_l4proto_find(u8 family, u8 protonum)
49{
50 return rcu_dereference(nf_nat_l4protos[family][protonum]);
51}
52EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
53
54#ifdef CONFIG_XFRM
55static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
56{
57 const struct nf_nat_l3proto *l3proto;
58 const struct nf_conn *ct;
59 enum ip_conntrack_info ctinfo;
60 enum ip_conntrack_dir dir;
61 unsigned long statusbit;
62 u8 family;
63
64 ct = nf_ct_get(skb, &ctinfo);
65 if (ct == NULL)
66 return;
67
68 family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
69 rcu_read_lock();
70 l3proto = __nf_nat_l3proto_find(family);
71 if (l3proto == NULL)
72 goto out;
73
74 dir = CTINFO2DIR(ctinfo);
75 if (dir == IP_CT_DIR_ORIGINAL)
76 statusbit = IPS_DST_NAT;
77 else
78 statusbit = IPS_SRC_NAT;
79
80 l3proto->decode_session(skb, ct, dir, statusbit, fl);
81out:
82 rcu_read_unlock();
83}
84
85int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family)
45{ 86{
46 return rcu_dereference(nf_nat_protos[protonum]); 87 struct flowi fl;
88 unsigned int hh_len;
89 struct dst_entry *dst;
90
91 if (xfrm_decode_session(skb, &fl, family) < 0)
92 return -1;
93
94 dst = skb_dst(skb);
95 if (dst->xfrm)
96 dst = ((struct xfrm_dst *)dst)->route;
97 dst_hold(dst);
98
99 dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
100 if (IS_ERR(dst))
101 return -1;
102
103 skb_dst_drop(skb);
104 skb_dst_set(skb, dst);
105
106 /* Change in oif may mean change in hh_len. */
107 hh_len = skb_dst(skb)->dev->hard_header_len;
108 if (skb_headroom(skb) < hh_len &&
109 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
110 return -1;
111 return 0;
47} 112}
113EXPORT_SYMBOL(nf_xfrm_me_harder);
114#endif /* CONFIG_XFRM */
48 115
49/* We keep an extra hash for each conntrack, for fast searching. */ 116/* We keep an extra hash for each conntrack, for fast searching. */
50static inline unsigned int 117static inline unsigned int
@@ -54,10 +121,9 @@ hash_by_src(const struct net *net, u16 zone,
54 unsigned int hash; 121 unsigned int hash;
55 122
56 /* Original src, to ensure we map it consistently if poss. */ 123 /* Original src, to ensure we map it consistently if poss. */
57 hash = jhash_3words((__force u32)tuple->src.u3.ip, 124 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
58 (__force u32)tuple->src.u.all ^ zone, 125 tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
59 tuple->dst.protonum, nf_conntrack_hash_rnd); 126 return ((u64)hash * net->ct.nat_htable_size) >> 32;
60 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
61} 127}
62 128
63/* Is this tuple already taken? (not by us) */ 129/* Is this tuple already taken? (not by us) */
@@ -66,10 +132,11 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
66 const struct nf_conn *ignored_conntrack) 132 const struct nf_conn *ignored_conntrack)
67{ 133{
68 /* Conntrack tracking doesn't keep track of outgoing tuples; only 134 /* Conntrack tracking doesn't keep track of outgoing tuples; only
69 incoming ones. NAT means they don't have a fixed mapping, 135 * incoming ones. NAT means they don't have a fixed mapping,
70 so we invert the tuple and look for the incoming reply. 136 * so we invert the tuple and look for the incoming reply.
71 137 *
72 We could keep a separate hash if this proves too slow. */ 138 * We could keep a separate hash if this proves too slow.
139 */
73 struct nf_conntrack_tuple reply; 140 struct nf_conntrack_tuple reply;
74 141
75 nf_ct_invert_tuplepr(&reply, tuple); 142 nf_ct_invert_tuplepr(&reply, tuple);
@@ -78,31 +145,26 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
78EXPORT_SYMBOL(nf_nat_used_tuple); 145EXPORT_SYMBOL(nf_nat_used_tuple);
79 146
80/* If we source map this tuple so reply looks like reply_tuple, will 147/* If we source map this tuple so reply looks like reply_tuple, will
81 * that meet the constraints of range. */ 148 * that meet the constraints of range.
82static int 149 */
83in_range(const struct nf_conntrack_tuple *tuple, 150static int in_range(const struct nf_nat_l3proto *l3proto,
84 const struct nf_nat_ipv4_range *range) 151 const struct nf_nat_l4proto *l4proto,
152 const struct nf_conntrack_tuple *tuple,
153 const struct nf_nat_range *range)
85{ 154{
86 const struct nf_nat_protocol *proto;
87 int ret = 0;
88
89 /* If we are supposed to map IPs, then we must be in the 155 /* If we are supposed to map IPs, then we must be in the
90 range specified, otherwise let this drag us onto a new src IP. */ 156 * range specified, otherwise let this drag us onto a new src IP.
91 if (range->flags & NF_NAT_RANGE_MAP_IPS) { 157 */
92 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || 158 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
93 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) 159 !l3proto->in_range(tuple, range))
94 return 0; 160 return 0;
95 }
96 161
97 rcu_read_lock();
98 proto = __nf_nat_proto_find(tuple->dst.protonum);
99 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 162 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
100 proto->in_range(tuple, NF_NAT_MANIP_SRC, 163 l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
101 &range->min, &range->max)) 164 &range->min_proto, &range->max_proto))
102 ret = 1; 165 return 1;
103 rcu_read_unlock();
104 166
105 return ret; 167 return 0;
106} 168}
107 169
108static inline int 170static inline int
@@ -113,24 +175,25 @@ same_src(const struct nf_conn *ct,
113 175
114 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 176 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
115 return (t->dst.protonum == tuple->dst.protonum && 177 return (t->dst.protonum == tuple->dst.protonum &&
116 t->src.u3.ip == tuple->src.u3.ip && 178 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
117 t->src.u.all == tuple->src.u.all); 179 t->src.u.all == tuple->src.u.all);
118} 180}
119 181
120/* Only called for SRC manip */ 182/* Only called for SRC manip */
121static int 183static int
122find_appropriate_src(struct net *net, u16 zone, 184find_appropriate_src(struct net *net, u16 zone,
185 const struct nf_nat_l3proto *l3proto,
186 const struct nf_nat_l4proto *l4proto,
123 const struct nf_conntrack_tuple *tuple, 187 const struct nf_conntrack_tuple *tuple,
124 struct nf_conntrack_tuple *result, 188 struct nf_conntrack_tuple *result,
125 const struct nf_nat_ipv4_range *range) 189 const struct nf_nat_range *range)
126{ 190{
127 unsigned int h = hash_by_src(net, zone, tuple); 191 unsigned int h = hash_by_src(net, zone, tuple);
128 const struct nf_conn_nat *nat; 192 const struct nf_conn_nat *nat;
129 const struct nf_conn *ct; 193 const struct nf_conn *ct;
130 const struct hlist_node *n; 194 const struct hlist_node *n;
131 195
132 rcu_read_lock(); 196 hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) {
133 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
134 ct = nat->ct; 197 ct = nat->ct;
135 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { 198 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
136 /* Copy source part from reply tuple. */ 199 /* Copy source part from reply tuple. */
@@ -138,119 +201,148 @@ find_appropriate_src(struct net *net, u16 zone,
138 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 201 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
139 result->dst = tuple->dst; 202 result->dst = tuple->dst;
140 203
141 if (in_range(result, range)) { 204 if (in_range(l3proto, l4proto, result, range))
142 rcu_read_unlock();
143 return 1; 205 return 1;
144 }
145 } 206 }
146 } 207 }
147 rcu_read_unlock();
148 return 0; 208 return 0;
149} 209}
150 210
151/* For [FUTURE] fragmentation handling, we want the least-used 211/* For [FUTURE] fragmentation handling, we want the least-used
152 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 212 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
153 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 213 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
154 1-65535, we don't do pro-rata allocation based on ports; we choose 214 * 1-65535, we don't do pro-rata allocation based on ports; we choose
155 the ip with the lowest src-ip/dst-ip/proto usage. 215 * the ip with the lowest src-ip/dst-ip/proto usage.
156*/ 216 */
157static void 217static void
158find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, 218find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
159 const struct nf_nat_ipv4_range *range, 219 const struct nf_nat_range *range,
160 const struct nf_conn *ct, 220 const struct nf_conn *ct,
161 enum nf_nat_manip_type maniptype) 221 enum nf_nat_manip_type maniptype)
162{ 222{
163 __be32 *var_ipp; 223 union nf_inet_addr *var_ipp;
224 unsigned int i, max;
164 /* Host order */ 225 /* Host order */
165 u_int32_t minip, maxip, j; 226 u32 minip, maxip, j, dist;
227 bool full_range;
166 228
167 /* No IP mapping? Do nothing. */ 229 /* No IP mapping? Do nothing. */
168 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 230 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
169 return; 231 return;
170 232
171 if (maniptype == NF_NAT_MANIP_SRC) 233 if (maniptype == NF_NAT_MANIP_SRC)
172 var_ipp = &tuple->src.u3.ip; 234 var_ipp = &tuple->src.u3;
173 else 235 else
174 var_ipp = &tuple->dst.u3.ip; 236 var_ipp = &tuple->dst.u3;
175 237
176 /* Fast path: only one choice. */ 238 /* Fast path: only one choice. */
177 if (range->min_ip == range->max_ip) { 239 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
178 *var_ipp = range->min_ip; 240 *var_ipp = range->min_addr;
179 return; 241 return;
180 } 242 }
181 243
244 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
245 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
246 else
247 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
248
182 /* Hashing source and destination IPs gives a fairly even 249 /* Hashing source and destination IPs gives a fairly even
183 * spread in practice (if there are a small number of IPs 250 * spread in practice (if there are a small number of IPs
184 * involved, there usually aren't that many connections 251 * involved, there usually aren't that many connections
185 * anyway). The consistency means that servers see the same 252 * anyway). The consistency means that servers see the same
186 * client coming from the same IP (some Internet Banking sites 253 * client coming from the same IP (some Internet Banking sites
187 * like this), even across reboots. */ 254 * like this), even across reboots.
188 minip = ntohl(range->min_ip); 255 */
189 maxip = ntohl(range->max_ip); 256 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
190 j = jhash_2words((__force u32)tuple->src.u3.ip, 257 range->flags & NF_NAT_RANGE_PERSISTENT ?
191 range->flags & NF_NAT_RANGE_PERSISTENT ? 258 0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
192 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0); 259
193 j = ((u64)j * (maxip - minip + 1)) >> 32; 260 full_range = false;
194 *var_ipp = htonl(minip + j); 261 for (i = 0; i <= max; i++) {
262 /* If first bytes of the address are at the maximum, use the
263 * distance. Otherwise use the full range.
264 */
265 if (!full_range) {
266 minip = ntohl((__force __be32)range->min_addr.all[i]);
267 maxip = ntohl((__force __be32)range->max_addr.all[i]);
268 dist = maxip - minip + 1;
269 } else {
270 minip = 0;
271 dist = ~0;
272 }
273
274 var_ipp->all[i] = (__force __u32)
275 htonl(minip + (((u64)j * dist) >> 32));
276 if (var_ipp->all[i] != range->max_addr.all[i])
277 full_range = true;
278
279 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
280 j ^= (__force u32)tuple->dst.u3.all[i];
281 }
195} 282}
196 283
197/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 284/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
198 * we change the source to map into the range. For NF_INET_PRE_ROUTING 285 * we change the source to map into the range. For NF_INET_PRE_ROUTING
199 * and NF_INET_LOCAL_OUT, we change the destination to map into the 286 * and NF_INET_LOCAL_OUT, we change the destination to map into the
200 * range. It might not be possible to get a unique tuple, but we try. 287 * range. It might not be possible to get a unique tuple, but we try.
201 * At worst (or if we race), we will end up with a final duplicate in 288 * At worst (or if we race), we will end up with a final duplicate in
202 * __ip_conntrack_confirm and drop the packet. */ 289 * __ip_conntrack_confirm and drop the packet. */
203static void 290static void
204get_unique_tuple(struct nf_conntrack_tuple *tuple, 291get_unique_tuple(struct nf_conntrack_tuple *tuple,
205 const struct nf_conntrack_tuple *orig_tuple, 292 const struct nf_conntrack_tuple *orig_tuple,
206 const struct nf_nat_ipv4_range *range, 293 const struct nf_nat_range *range,
207 struct nf_conn *ct, 294 struct nf_conn *ct,
208 enum nf_nat_manip_type maniptype) 295 enum nf_nat_manip_type maniptype)
209{ 296{
297 const struct nf_nat_l3proto *l3proto;
298 const struct nf_nat_l4proto *l4proto;
210 struct net *net = nf_ct_net(ct); 299 struct net *net = nf_ct_net(ct);
211 const struct nf_nat_protocol *proto;
212 u16 zone = nf_ct_zone(ct); 300 u16 zone = nf_ct_zone(ct);
213 301
214 /* 1) If this srcip/proto/src-proto-part is currently mapped, 302 rcu_read_lock();
215 and that same mapping gives a unique tuple within the given 303 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
216 range, use that. 304 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
305 orig_tuple->dst.protonum);
217 306
218 This is only required for source (ie. NAT/masq) mappings. 307 /* 1) If this srcip/proto/src-proto-part is currently mapped,
219 So far, we don't do local source mappings, so multiple 308 * and that same mapping gives a unique tuple within the given
220 manips not an issue. */ 309 * range, use that.
310 *
311 * This is only required for source (ie. NAT/masq) mappings.
312 * So far, we don't do local source mappings, so multiple
313 * manips not an issue.
314 */
221 if (maniptype == NF_NAT_MANIP_SRC && 315 if (maniptype == NF_NAT_MANIP_SRC &&
222 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { 316 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
223 /* try the original tuple first */ 317 /* try the original tuple first */
224 if (in_range(orig_tuple, range)) { 318 if (in_range(l3proto, l4proto, orig_tuple, range)) {
225 if (!nf_nat_used_tuple(orig_tuple, ct)) { 319 if (!nf_nat_used_tuple(orig_tuple, ct)) {
226 *tuple = *orig_tuple; 320 *tuple = *orig_tuple;
227 return; 321 goto out;
228 } 322 }
229 } else if (find_appropriate_src(net, zone, orig_tuple, tuple, 323 } else if (find_appropriate_src(net, zone, l3proto, l4proto,
230 range)) { 324 orig_tuple, tuple, range)) {
231 pr_debug("get_unique_tuple: Found current src map\n"); 325 pr_debug("get_unique_tuple: Found current src map\n");
232 if (!nf_nat_used_tuple(tuple, ct)) 326 if (!nf_nat_used_tuple(tuple, ct))
233 return; 327 goto out;
234 } 328 }
235 } 329 }
236 330
237 /* 2) Select the least-used IP/proto combination in the given 331 /* 2) Select the least-used IP/proto combination in the given range */
238 range. */
239 *tuple = *orig_tuple; 332 *tuple = *orig_tuple;
240 find_best_ips_proto(zone, tuple, range, ct, maniptype); 333 find_best_ips_proto(zone, tuple, range, ct, maniptype);
241 334
242 /* 3) The per-protocol part of the manip is made to map into 335 /* 3) The per-protocol part of the manip is made to map into
243 the range to make a unique tuple. */ 336 * the range to make a unique tuple.
244 337 */
245 rcu_read_lock();
246 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
247 338
248 /* Only bother mapping if it's not already in range and unique */ 339 /* Only bother mapping if it's not already in range and unique */
249 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) { 340 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
250 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 341 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
251 if (proto->in_range(tuple, maniptype, &range->min, 342 if (l4proto->in_range(tuple, maniptype,
252 &range->max) && 343 &range->min_proto,
253 (range->min.all == range->max.all || 344 &range->max_proto) &&
345 (range->min_proto.all == range->max_proto.all ||
254 !nf_nat_used_tuple(tuple, ct))) 346 !nf_nat_used_tuple(tuple, ct)))
255 goto out; 347 goto out;
256 } else if (!nf_nat_used_tuple(tuple, ct)) { 348 } else if (!nf_nat_used_tuple(tuple, ct)) {
@@ -259,14 +351,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
259 } 351 }
260 352
261 /* Last change: get protocol to try to obtain unique tuple. */ 353 /* Last change: get protocol to try to obtain unique tuple. */
262 proto->unique_tuple(tuple, range, maniptype, ct); 354 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
263out: 355out:
264 rcu_read_unlock(); 356 rcu_read_unlock();
265} 357}
266 358
267unsigned int 359unsigned int
268nf_nat_setup_info(struct nf_conn *ct, 360nf_nat_setup_info(struct nf_conn *ct,
269 const struct nf_nat_ipv4_range *range, 361 const struct nf_nat_range *range,
270 enum nf_nat_manip_type maniptype) 362 enum nf_nat_manip_type maniptype)
271{ 363{
272 struct net *net = nf_ct_net(ct); 364 struct net *net = nf_ct_net(ct);
@@ -288,10 +380,10 @@ nf_nat_setup_info(struct nf_conn *ct,
288 BUG_ON(nf_nat_initialized(ct, maniptype)); 380 BUG_ON(nf_nat_initialized(ct, maniptype));
289 381
290 /* What we've got will look like inverse of reply. Normally 382 /* What we've got will look like inverse of reply. Normally
291 this is what is in the conntrack, except for prior 383 * this is what is in the conntrack, except for prior
292 manipulations (future optimization: if num_manips == 0, 384 * manipulations (future optimization: if num_manips == 0,
293 orig_tp = 385 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
294 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ 386 */
295 nf_ct_invert_tuplepr(&curr_tuple, 387 nf_ct_invert_tuplepr(&curr_tuple,
296 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 388 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
297 389
@@ -317,11 +409,11 @@ nf_nat_setup_info(struct nf_conn *ct,
317 srchash = hash_by_src(net, nf_ct_zone(ct), 409 srchash = hash_by_src(net, nf_ct_zone(ct),
318 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 410 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
319 spin_lock_bh(&nf_nat_lock); 411 spin_lock_bh(&nf_nat_lock);
320 /* nf_conntrack_alter_reply might re-allocate extension area */ 412 /* nf_conntrack_alter_reply might re-allocate extension aera */
321 nat = nfct_nat(ct); 413 nat = nfct_nat(ct);
322 nat->ct = ct; 414 nat->ct = ct;
323 hlist_add_head_rcu(&nat->bysource, 415 hlist_add_head_rcu(&nat->bysource,
324 &net->ipv4.nat_bysource[srchash]); 416 &net->ct.nat_bysource[srchash]);
325 spin_unlock_bh(&nf_nat_lock); 417 spin_unlock_bh(&nf_nat_lock);
326 } 418 }
327 419
@@ -335,47 +427,14 @@ nf_nat_setup_info(struct nf_conn *ct,
335} 427}
336EXPORT_SYMBOL(nf_nat_setup_info); 428EXPORT_SYMBOL(nf_nat_setup_info);
337 429
338/* Returns true if succeeded. */
339static bool
340manip_pkt(u_int16_t proto,
341 struct sk_buff *skb,
342 unsigned int iphdroff,
343 const struct nf_conntrack_tuple *target,
344 enum nf_nat_manip_type maniptype)
345{
346 struct iphdr *iph;
347 const struct nf_nat_protocol *p;
348
349 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
350 return false;
351
352 iph = (void *)skb->data + iphdroff;
353
354 /* Manipulate protcol part. */
355
356 /* rcu_read_lock()ed by nf_hook_slow */
357 p = __nf_nat_proto_find(proto);
358 if (!p->manip_pkt(skb, iphdroff, target, maniptype))
359 return false;
360
361 iph = (void *)skb->data + iphdroff;
362
363 if (maniptype == NF_NAT_MANIP_SRC) {
364 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
365 iph->saddr = target->src.u3.ip;
366 } else {
367 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
368 iph->daddr = target->dst.u3.ip;
369 }
370 return true;
371}
372
373/* Do packet manipulations according to nf_nat_setup_info. */ 430/* Do packet manipulations according to nf_nat_setup_info. */
374unsigned int nf_nat_packet(struct nf_conn *ct, 431unsigned int nf_nat_packet(struct nf_conn *ct,
375 enum ip_conntrack_info ctinfo, 432 enum ip_conntrack_info ctinfo,
376 unsigned int hooknum, 433 unsigned int hooknum,
377 struct sk_buff *skb) 434 struct sk_buff *skb)
378{ 435{
436 const struct nf_nat_l3proto *l3proto;
437 const struct nf_nat_l4proto *l4proto;
379 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 438 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
380 unsigned long statusbit; 439 unsigned long statusbit;
381 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 440 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
@@ -396,129 +455,176 @@ unsigned int nf_nat_packet(struct nf_conn *ct,
396 /* We are aiming to look like inverse of other direction. */ 455 /* We are aiming to look like inverse of other direction. */
397 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 456 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
398 457
399 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype)) 458 l3proto = __nf_nat_l3proto_find(target.src.l3num);
459 l4proto = __nf_nat_l4proto_find(target.src.l3num,
460 target.dst.protonum);
461 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
400 return NF_DROP; 462 return NF_DROP;
401 } 463 }
402 return NF_ACCEPT; 464 return NF_ACCEPT;
403} 465}
404EXPORT_SYMBOL_GPL(nf_nat_packet); 466EXPORT_SYMBOL_GPL(nf_nat_packet);
405 467
406/* Dir is direction ICMP is coming from (opposite to packet it contains) */ 468struct nf_nat_proto_clean {
407int nf_nat_icmp_reply_translation(struct nf_conn *ct, 469 u8 l3proto;
408 enum ip_conntrack_info ctinfo, 470 u8 l4proto;
409 unsigned int hooknum, 471 bool hash;
410 struct sk_buff *skb) 472};
473
474/* Clear NAT section of all conntracks, in case we're loaded again. */
475static int nf_nat_proto_clean(struct nf_conn *i, void *data)
411{ 476{
412 struct { 477 const struct nf_nat_proto_clean *clean = data;
413 struct icmphdr icmp; 478 struct nf_conn_nat *nat = nfct_nat(i);
414 struct iphdr ip;
415 } *inside;
416 struct nf_conntrack_tuple target;
417 int hdrlen = ip_hdrlen(skb);
418 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
419 unsigned long statusbit;
420 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
421 479
422 if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) 480 if (!nat)
423 return 0; 481 return 0;
424 482 if (!(i->status & IPS_SRC_NAT_DONE))
425 inside = (void *)skb->data + hdrlen; 483 return 0;
426 484 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
427 /* We're actually going to mangle it beyond trivial checksum 485 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
428 adjustment, so make sure the current checksum is correct. */
429 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
430 return 0; 486 return 0;
431 487
432 /* Must be RELATED */ 488 if (clean->hash) {
433 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || 489 spin_lock_bh(&nf_nat_lock);
434 skb->nfctinfo == IP_CT_RELATED_REPLY); 490 hlist_del_rcu(&nat->bysource);
435 491 spin_unlock_bh(&nf_nat_lock);
436 /* Redirects on non-null nats must be dropped, else they'll 492 } else {
437 start talking to each other without our translation, and be 493 memset(nat, 0, sizeof(*nat));
438 confused... --RR */ 494 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
439 if (inside->icmp.type == ICMP_REDIRECT) { 495 IPS_SEQ_ADJUST);
440 /* If NAT isn't finished, assume it and drop. */
441 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
442 return 0;
443
444 if (ct->status & IPS_NAT_MASK)
445 return 0;
446 } 496 }
497 return 0;
498}
447 499
448 if (manip == NF_NAT_MANIP_SRC) 500static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
449 statusbit = IPS_SRC_NAT; 501{
450 else 502 struct nf_nat_proto_clean clean = {
451 statusbit = IPS_DST_NAT; 503 .l3proto = l3proto,
452 504 .l4proto = l4proto,
453 /* Invert if this is reply dir. */ 505 };
454 if (dir == IP_CT_DIR_REPLY) 506 struct net *net;
455 statusbit ^= IPS_NAT_MASK; 507
456 508 rtnl_lock();
457 if (!(ct->status & statusbit)) 509 /* Step 1 - remove from bysource hash */
458 return 1; 510 clean.hash = true;
459 511 for_each_net(net)
460 pr_debug("icmp_reply_translation: translating error %p manip %u " 512 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
461 "dir %s\n", skb, manip, 513 synchronize_rcu();
462 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
463
464 /* Change inner back to look like incoming packet. We do the
465 opposite manip on this hook to normal, because it might not
466 pass all hooks (locally-generated ICMP). Consider incoming
467 packet: PREROUTING (DST manip), routing produces ICMP, goes
468 through POSTROUTING (which must correct the DST manip). */
469 if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
470 &ct->tuplehash[!dir].tuple, !manip))
471 return 0;
472 514
473 if (skb->ip_summed != CHECKSUM_PARTIAL) { 515 /* Step 2 - clean NAT section */
474 /* Reloading "inside" here since manip_pkt inner. */ 516 clean.hash = false;
475 inside = (void *)skb->data + hdrlen; 517 for_each_net(net)
476 inside->icmp.checksum = 0; 518 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
477 inside->icmp.checksum = 519 rtnl_unlock();
478 csum_fold(skb_checksum(skb, hdrlen, 520}
479 skb->len - hdrlen, 0));
480 }
481 521
482 /* Change outer to look the reply to an incoming packet 522static void nf_nat_l3proto_clean(u8 l3proto)
483 * (proto 0 means don't invert per-proto part). */ 523{
484 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 524 struct nf_nat_proto_clean clean = {
485 if (!manip_pkt(0, skb, 0, &target, manip)) 525 .l3proto = l3proto,
486 return 0; 526 };
527 struct net *net;
528
529 rtnl_lock();
530 /* Step 1 - remove from bysource hash */
531 clean.hash = true;
532 for_each_net(net)
533 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
534 synchronize_rcu();
487 535
488 return 1; 536 /* Step 2 - clean NAT section */
537 clean.hash = false;
538 for_each_net(net)
539 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
540 rtnl_unlock();
489} 541}
490EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
491 542
492/* Protocol registration. */ 543/* Protocol registration. */
493int nf_nat_protocol_register(const struct nf_nat_protocol *proto) 544int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
494{ 545{
546 const struct nf_nat_l4proto **l4protos;
547 unsigned int i;
495 int ret = 0; 548 int ret = 0;
496 549
497 spin_lock_bh(&nf_nat_lock); 550 mutex_lock(&nf_nat_proto_mutex);
551 if (nf_nat_l4protos[l3proto] == NULL) {
552 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
553 GFP_KERNEL);
554 if (l4protos == NULL) {
555 ret = -ENOMEM;
556 goto out;
557 }
558
559 for (i = 0; i < IPPROTO_MAX; i++)
560 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
561
562 /* Before making proto_array visible to lockless readers,
563 * we must make sure its content is committed to memory.
564 */
565 smp_wmb();
566
567 nf_nat_l4protos[l3proto] = l4protos;
568 }
569
498 if (rcu_dereference_protected( 570 if (rcu_dereference_protected(
499 nf_nat_protos[proto->protonum], 571 nf_nat_l4protos[l3proto][l4proto->l4proto],
500 lockdep_is_held(&nf_nat_lock) 572 lockdep_is_held(&nf_nat_proto_mutex)
501 ) != &nf_nat_unknown_protocol) { 573 ) != &nf_nat_l4proto_unknown) {
502 ret = -EBUSY; 574 ret = -EBUSY;
503 goto out; 575 goto out;
504 } 576 }
505 RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto); 577 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
506 out: 578 out:
507 spin_unlock_bh(&nf_nat_lock); 579 mutex_unlock(&nf_nat_proto_mutex);
508 return ret; 580 return ret;
509} 581}
510EXPORT_SYMBOL(nf_nat_protocol_register); 582EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
511 583
512/* No one stores the protocol anywhere; simply delete it. */ 584/* No one stores the protocol anywhere; simply delete it. */
513void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 585void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
514{ 586{
515 spin_lock_bh(&nf_nat_lock); 587 mutex_lock(&nf_nat_proto_mutex);
516 RCU_INIT_POINTER(nf_nat_protos[proto->protonum], 588 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
517 &nf_nat_unknown_protocol); 589 &nf_nat_l4proto_unknown);
518 spin_unlock_bh(&nf_nat_lock); 590 mutex_unlock(&nf_nat_proto_mutex);
519 synchronize_rcu(); 591 synchronize_rcu();
592
593 nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
594}
595EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
596
597int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
598{
599 int err;
600
601 err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
602 if (err < 0)
603 return err;
604
605 mutex_lock(&nf_nat_proto_mutex);
606 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
607 &nf_nat_l4proto_tcp);
608 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
609 &nf_nat_l4proto_udp);
610 mutex_unlock(&nf_nat_proto_mutex);
611
612 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
613 return 0;
614}
615EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
616
617void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
618{
619 mutex_lock(&nf_nat_proto_mutex);
620 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
621 mutex_unlock(&nf_nat_proto_mutex);
622 synchronize_rcu();
623
624 nf_nat_l3proto_clean(l3proto->l3proto);
625 nf_ct_l3proto_module_put(l3proto->l3proto);
520} 626}
521EXPORT_SYMBOL(nf_nat_protocol_unregister); 627EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
522 628
523/* No one using conntrack by the time this called. */ 629/* No one using conntrack by the time this called. */
524static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 630static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
@@ -570,34 +676,36 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
570 676
571static int nfnetlink_parse_nat_proto(struct nlattr *attr, 677static int nfnetlink_parse_nat_proto(struct nlattr *attr,
572 const struct nf_conn *ct, 678 const struct nf_conn *ct,
573 struct nf_nat_ipv4_range *range) 679 struct nf_nat_range *range)
574{ 680{
575 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 681 struct nlattr *tb[CTA_PROTONAT_MAX+1];
576 const struct nf_nat_protocol *npt; 682 const struct nf_nat_l4proto *l4proto;
577 int err; 683 int err;
578 684
579 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); 685 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
580 if (err < 0) 686 if (err < 0)
581 return err; 687 return err;
582 688
583 rcu_read_lock(); 689 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
584 npt = __nf_nat_proto_find(nf_ct_protonum(ct)); 690 if (l4proto->nlattr_to_range)
585 if (npt->nlattr_to_range) 691 err = l4proto->nlattr_to_range(tb, range);
586 err = npt->nlattr_to_range(tb, range); 692
587 rcu_read_unlock();
588 return err; 693 return err;
589} 694}
590 695
591static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 696static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
592 [CTA_NAT_MINIP] = { .type = NLA_U32 }, 697 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
593 [CTA_NAT_MAXIP] = { .type = NLA_U32 }, 698 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
699 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
700 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
594 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 701 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
595}; 702};
596 703
597static int 704static int
598nfnetlink_parse_nat(const struct nlattr *nat, 705nfnetlink_parse_nat(const struct nlattr *nat,
599 const struct nf_conn *ct, struct nf_nat_ipv4_range *range) 706 const struct nf_conn *ct, struct nf_nat_range *range)
600{ 707{
708 const struct nf_nat_l3proto *l3proto;
601 struct nlattr *tb[CTA_NAT_MAX+1]; 709 struct nlattr *tb[CTA_NAT_MAX+1];
602 int err; 710 int err;
603 711
@@ -607,25 +715,23 @@ nfnetlink_parse_nat(const struct nlattr *nat,
607 if (err < 0) 715 if (err < 0)
608 return err; 716 return err;
609 717
610 if (tb[CTA_NAT_MINIP]) 718 rcu_read_lock();
611 range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]); 719 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
612 720 if (l3proto == NULL) {
613 if (!tb[CTA_NAT_MAXIP]) 721 err = -EAGAIN;
614 range->max_ip = range->min_ip; 722 goto out;
615 else 723 }
616 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]); 724 err = l3proto->nlattr_to_range(tb, range);
617 725 if (err < 0)
618 if (range->min_ip) 726 goto out;
619 range->flags |= NF_NAT_RANGE_MAP_IPS;
620 727
621 if (!tb[CTA_NAT_PROTO]) 728 if (!tb[CTA_NAT_PROTO])
622 return 0; 729 goto out;
623 730
624 err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 731 err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
625 if (err < 0) 732out:
626 return err; 733 rcu_read_unlock();
627 734 return err;
628 return 0;
629} 735}
630 736
631static int 737static int
@@ -633,10 +739,12 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
633 enum nf_nat_manip_type manip, 739 enum nf_nat_manip_type manip,
634 const struct nlattr *attr) 740 const struct nlattr *attr)
635{ 741{
636 struct nf_nat_ipv4_range range; 742 struct nf_nat_range range;
743 int err;
637 744
638 if (nfnetlink_parse_nat(attr, ct, &range) < 0) 745 err = nfnetlink_parse_nat(attr, ct, &range);
639 return -EINVAL; 746 if (err < 0)
747 return err;
640 if (nf_nat_initialized(ct, manip)) 748 if (nf_nat_initialized(ct, manip))
641 return -EEXIST; 749 return -EEXIST;
642 750
@@ -655,30 +763,20 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
655static int __net_init nf_nat_net_init(struct net *net) 763static int __net_init nf_nat_net_init(struct net *net)
656{ 764{
657 /* Leave them the same for the moment. */ 765 /* Leave them the same for the moment. */
658 net->ipv4.nat_htable_size = net->ct.htable_size; 766 net->ct.nat_htable_size = net->ct.htable_size;
659 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0); 767 net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
660 if (!net->ipv4.nat_bysource) 768 if (!net->ct.nat_bysource)
661 return -ENOMEM; 769 return -ENOMEM;
662 return 0; 770 return 0;
663} 771}
664 772
665/* Clear NAT section of all conntracks, in case we're loaded again. */
666static int clean_nat(struct nf_conn *i, void *data)
667{
668 struct nf_conn_nat *nat = nfct_nat(i);
669
670 if (!nat)
671 return 0;
672 memset(nat, 0, sizeof(*nat));
673 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
674 return 0;
675}
676
677static void __net_exit nf_nat_net_exit(struct net *net) 773static void __net_exit nf_nat_net_exit(struct net *net)
678{ 774{
679 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 775 struct nf_nat_proto_clean clean = {};
776
777 nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
680 synchronize_rcu(); 778 synchronize_rcu();
681 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size); 779 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
682} 780}
683 781
684static struct pernet_operations nf_nat_net_ops = { 782static struct pernet_operations nf_nat_net_ops = {
@@ -697,11 +795,8 @@ static struct nfq_ct_nat_hook nfq_ct_nat = {
697 795
698static int __init nf_nat_init(void) 796static int __init nf_nat_init(void)
699{ 797{
700 size_t i;
701 int ret; 798 int ret;
702 799
703 need_ipv4_conntrack();
704
705 ret = nf_ct_extend_register(&nat_extend); 800 ret = nf_ct_extend_register(&nat_extend);
706 if (ret < 0) { 801 if (ret < 0) {
707 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 802 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
@@ -712,22 +807,11 @@ static int __init nf_nat_init(void)
712 if (ret < 0) 807 if (ret < 0)
713 goto cleanup_extend; 808 goto cleanup_extend;
714 809
715 /* Sew in builtin protocols. */ 810 nf_ct_helper_expectfn_register(&follow_master_nat);
716 spin_lock_bh(&nf_nat_lock);
717 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
718 RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
719 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
720 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
721 RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
722 spin_unlock_bh(&nf_nat_lock);
723 811
724 /* Initialize fake conntrack so that NAT will skip it */ 812 /* Initialize fake conntrack so that NAT will skip it */
725 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); 813 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
726 814
727 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
728
729 nf_ct_helper_expectfn_register(&follow_master_nat);
730
731 BUG_ON(nf_nat_seq_adjust_hook != NULL); 815 BUG_ON(nf_nat_seq_adjust_hook != NULL);
732 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); 816 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
733 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 817 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -736,6 +820,10 @@ static int __init nf_nat_init(void)
736 BUG_ON(nf_ct_nat_offset != NULL); 820 BUG_ON(nf_ct_nat_offset != NULL);
737 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset); 821 RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
738 RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat); 822 RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
823#ifdef CONFIG_XFRM
824 BUG_ON(nf_nat_decode_session_hook != NULL);
825 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
826#endif
739 return 0; 827 return 0;
740 828
741 cleanup_extend: 829 cleanup_extend:
@@ -745,19 +833,24 @@ static int __init nf_nat_init(void)
745 833
746static void __exit nf_nat_cleanup(void) 834static void __exit nf_nat_cleanup(void)
747{ 835{
836 unsigned int i;
837
748 unregister_pernet_subsys(&nf_nat_net_ops); 838 unregister_pernet_subsys(&nf_nat_net_ops);
749 nf_ct_l3proto_put(l3proto);
750 nf_ct_extend_unregister(&nat_extend); 839 nf_ct_extend_unregister(&nat_extend);
751 nf_ct_helper_expectfn_unregister(&follow_master_nat); 840 nf_ct_helper_expectfn_unregister(&follow_master_nat);
752 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); 841 RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
753 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 842 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
754 RCU_INIT_POINTER(nf_ct_nat_offset, NULL); 843 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
755 RCU_INIT_POINTER(nfq_ct_nat_hook, NULL); 844 RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
845#ifdef CONFIG_XFRM
846 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
847#endif
848 for (i = 0; i < NFPROTO_NUMPROTO; i++)
849 kfree(nf_nat_l4protos[i]);
756 synchronize_net(); 850 synchronize_net();
757} 851}
758 852
759MODULE_LICENSE("GPL"); 853MODULE_LICENSE("GPL");
760MODULE_ALIAS("nf-nat-ipv4");
761 854
762module_init(nf_nat_init); 855module_init(nf_nat_init);
763module_exit(nf_nat_cleanup); 856module_exit(nf_nat_cleanup);
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index e462a957d080..e839b97b2863 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/netfilter/nf_nat_ftp.c
@@ -10,12 +10,11 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
13#include <linux/ip.h> 13#include <linux/inet.h>
14#include <linux/tcp.h> 14#include <linux/tcp.h>
15#include <linux/netfilter_ipv4.h> 15#include <linux/netfilter_ipv4.h>
16#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_helper.h> 17#include <net/netfilter/nf_nat_helper.h>
18#include <net/netfilter/nf_nat_rule.h>
19#include <net/netfilter/nf_conntrack_helper.h> 18#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_conntrack_expect.h> 19#include <net/netfilter/nf_conntrack_expect.h>
21#include <linux/netfilter/nf_conntrack_ftp.h> 20#include <linux/netfilter/nf_conntrack_ftp.h>
@@ -27,22 +26,27 @@ MODULE_ALIAS("ip_nat_ftp");
27 26
28/* FIXME: Time out? --RR */ 27/* FIXME: Time out? --RR */
29 28
30static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type, 29static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type,
31 char *buffer, size_t buflen, 30 char *buffer, size_t buflen,
32 __be32 addr, u16 port) 31 union nf_inet_addr *addr, u16 port)
33{ 32{
34 switch (type) { 33 switch (type) {
35 case NF_CT_FTP_PORT: 34 case NF_CT_FTP_PORT:
36 case NF_CT_FTP_PASV: 35 case NF_CT_FTP_PASV:
37 return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u", 36 return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
38 ((unsigned char *)&addr)[0], 37 ((unsigned char *)&addr->ip)[0],
39 ((unsigned char *)&addr)[1], 38 ((unsigned char *)&addr->ip)[1],
40 ((unsigned char *)&addr)[2], 39 ((unsigned char *)&addr->ip)[2],
41 ((unsigned char *)&addr)[3], 40 ((unsigned char *)&addr->ip)[3],
42 port >> 8, 41 port >> 8,
43 port & 0xFF); 42 port & 0xFF);
44 case NF_CT_FTP_EPRT: 43 case NF_CT_FTP_EPRT:
45 return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port); 44 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
45 return snprintf(buffer, buflen, "|1|%pI4|%u|",
46 &addr->ip, port);
47 else
48 return snprintf(buffer, buflen, "|2|%pI6|%u|",
49 &addr->ip6, port);
46 case NF_CT_FTP_EPSV: 50 case NF_CT_FTP_EPSV:
47 return snprintf(buffer, buflen, "|||%u|", port); 51 return snprintf(buffer, buflen, "|||%u|", port);
48 } 52 }
@@ -55,21 +59,22 @@ static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
55static unsigned int nf_nat_ftp(struct sk_buff *skb, 59static unsigned int nf_nat_ftp(struct sk_buff *skb,
56 enum ip_conntrack_info ctinfo, 60 enum ip_conntrack_info ctinfo,
57 enum nf_ct_ftp_type type, 61 enum nf_ct_ftp_type type,
62 unsigned int protoff,
58 unsigned int matchoff, 63 unsigned int matchoff,
59 unsigned int matchlen, 64 unsigned int matchlen,
60 struct nf_conntrack_expect *exp) 65 struct nf_conntrack_expect *exp)
61{ 66{
62 __be32 newip; 67 union nf_inet_addr newaddr;
63 u_int16_t port; 68 u_int16_t port;
64 int dir = CTINFO2DIR(ctinfo); 69 int dir = CTINFO2DIR(ctinfo);
65 struct nf_conn *ct = exp->master; 70 struct nf_conn *ct = exp->master;
66 char buffer[sizeof("|1|255.255.255.255|65535|")]; 71 char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
67 unsigned int buflen; 72 unsigned int buflen;
68 73
69 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); 74 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
70 75
71 /* Connection will come from wherever this packet goes, hence !dir */ 76 /* Connection will come from wherever this packet goes, hence !dir */
72 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 77 newaddr = ct->tuplehash[!dir].tuple.dst.u3;
73 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; 78 exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
74 exp->dir = !dir; 79 exp->dir = !dir;
75 80
@@ -94,13 +99,14 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
94 if (port == 0) 99 if (port == 0)
95 return NF_DROP; 100 return NF_DROP;
96 101
97 buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port); 102 buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer),
103 &newaddr, port);
98 if (!buflen) 104 if (!buflen)
99 goto out; 105 goto out;
100 106
101 pr_debug("calling nf_nat_mangle_tcp_packet\n"); 107 pr_debug("calling nf_nat_mangle_tcp_packet\n");
102 108
103 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 109 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
104 matchlen, buffer, buflen)) 110 matchlen, buffer, buflen))
105 goto out; 111 goto out;
106 112
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 2e59ad0b90ca..23c2b38676a6 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -1,4 +1,4 @@
1/* ip_nat_helper.c - generic support functions for NAT helpers 1/* nf_nat_helper.c - generic support functions for NAT helpers
2 * 2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org> 3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
@@ -9,23 +9,19 @@
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/kmod.h>
13#include <linux/types.h> 12#include <linux/types.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h> 13#include <linux/skbuff.h>
16#include <linux/tcp.h> 14#include <linux/tcp.h>
17#include <linux/udp.h> 15#include <linux/udp.h>
18#include <net/checksum.h>
19#include <net/tcp.h> 16#include <net/tcp.h>
20#include <net/route.h>
21 17
22#include <linux/netfilter_ipv4.h>
23#include <net/netfilter/nf_conntrack.h> 18#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_helper.h> 19#include <net/netfilter/nf_conntrack_helper.h>
25#include <net/netfilter/nf_conntrack_ecache.h> 20#include <net/netfilter/nf_conntrack_ecache.h>
26#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_nat.h> 22#include <net/netfilter/nf_nat.h>
28#include <net/netfilter/nf_nat_protocol.h> 23#include <net/netfilter/nf_nat_l3proto.h>
24#include <net/netfilter/nf_nat_l4proto.h>
29#include <net/netfilter/nf_nat_core.h> 25#include <net/netfilter/nf_nat_core.h>
30#include <net/netfilter/nf_nat_helper.h> 26#include <net/netfilter/nf_nat_helper.h>
31 27
@@ -90,7 +86,6 @@ s16 nf_nat_get_offset(const struct nf_conn *ct,
90 86
91 return offset; 87 return offset;
92} 88}
93EXPORT_SYMBOL_GPL(nf_nat_get_offset);
94 89
95/* Frobs data inside this packet, which is linear. */ 90/* Frobs data inside this packet, which is linear. */
96static void mangle_contents(struct sk_buff *skb, 91static void mangle_contents(struct sk_buff *skb,
@@ -125,9 +120,13 @@ static void mangle_contents(struct sk_buff *skb,
125 __skb_trim(skb, skb->len + rep_len - match_len); 120 __skb_trim(skb, skb->len + rep_len - match_len);
126 } 121 }
127 122
128 /* fix IP hdr checksum information */ 123 if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
129 ip_hdr(skb)->tot_len = htons(skb->len); 124 /* fix IP hdr checksum information */
130 ip_send_check(ip_hdr(skb)); 125 ip_hdr(skb)->tot_len = htons(skb->len);
126 ip_send_check(ip_hdr(skb));
127 } else
128 ipv6_hdr(skb)->payload_len =
129 htons(skb->len - sizeof(struct ipv6hdr));
131} 130}
132 131
133/* Unusual, but possible case. */ 132/* Unusual, but possible case. */
@@ -166,35 +165,6 @@ void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
166} 165}
167EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust); 166EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
168 167
169static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
170 int datalen, __sum16 *check, int oldlen)
171{
172 struct rtable *rt = skb_rtable(skb);
173
174 if (skb->ip_summed != CHECKSUM_PARTIAL) {
175 if (!(rt->rt_flags & RTCF_LOCAL) &&
176 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
177 skb->ip_summed = CHECKSUM_PARTIAL;
178 skb->csum_start = skb_headroom(skb) +
179 skb_network_offset(skb) +
180 iph->ihl * 4;
181 skb->csum_offset = (void *)check - data;
182 *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
183 datalen, iph->protocol, 0);
184 } else {
185 *check = 0;
186 *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
187 datalen, iph->protocol,
188 csum_partial(data, datalen,
189 0));
190 if (iph->protocol == IPPROTO_UDP && !*check)
191 *check = CSUM_MANGLED_0;
192 }
193 } else
194 inet_proto_csum_replace2(check, skb,
195 htons(oldlen), htons(datalen), 1);
196}
197
198/* Generic function for mangling variable-length address changes inside 168/* Generic function for mangling variable-length address changes inside
199 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX 169 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
200 * command in FTP). 170 * command in FTP).
@@ -206,12 +176,13 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
206int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, 176int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
207 struct nf_conn *ct, 177 struct nf_conn *ct,
208 enum ip_conntrack_info ctinfo, 178 enum ip_conntrack_info ctinfo,
179 unsigned int protoff,
209 unsigned int match_offset, 180 unsigned int match_offset,
210 unsigned int match_len, 181 unsigned int match_len,
211 const char *rep_buffer, 182 const char *rep_buffer,
212 unsigned int rep_len, bool adjust) 183 unsigned int rep_len, bool adjust)
213{ 184{
214 struct iphdr *iph; 185 const struct nf_nat_l3proto *l3proto;
215 struct tcphdr *tcph; 186 struct tcphdr *tcph;
216 int oldlen, datalen; 187 int oldlen, datalen;
217 188
@@ -225,15 +196,17 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
225 196
226 SKB_LINEAR_ASSERT(skb); 197 SKB_LINEAR_ASSERT(skb);
227 198
228 iph = ip_hdr(skb); 199 tcph = (void *)skb->data + protoff;
229 tcph = (void *)iph + iph->ihl*4;
230 200
231 oldlen = skb->len - iph->ihl*4; 201 oldlen = skb->len - protoff;
232 mangle_contents(skb, iph->ihl*4 + tcph->doff*4, 202 mangle_contents(skb, protoff + tcph->doff*4,
233 match_offset, match_len, rep_buffer, rep_len); 203 match_offset, match_len, rep_buffer, rep_len);
234 204
235 datalen = skb->len - iph->ihl*4; 205 datalen = skb->len - protoff;
236 nf_nat_csum(skb, iph, tcph, datalen, &tcph->check, oldlen); 206
207 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
208 l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
209 datalen, oldlen);
237 210
238 if (adjust && rep_len != match_len) 211 if (adjust && rep_len != match_len)
239 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq, 212 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
@@ -257,12 +230,13 @@ int
257nf_nat_mangle_udp_packet(struct sk_buff *skb, 230nf_nat_mangle_udp_packet(struct sk_buff *skb,
258 struct nf_conn *ct, 231 struct nf_conn *ct,
259 enum ip_conntrack_info ctinfo, 232 enum ip_conntrack_info ctinfo,
233 unsigned int protoff,
260 unsigned int match_offset, 234 unsigned int match_offset,
261 unsigned int match_len, 235 unsigned int match_len,
262 const char *rep_buffer, 236 const char *rep_buffer,
263 unsigned int rep_len) 237 unsigned int rep_len)
264{ 238{
265 struct iphdr *iph; 239 const struct nf_nat_l3proto *l3proto;
266 struct udphdr *udph; 240 struct udphdr *udph;
267 int datalen, oldlen; 241 int datalen, oldlen;
268 242
@@ -274,22 +248,23 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
274 !enlarge_skb(skb, rep_len - match_len)) 248 !enlarge_skb(skb, rep_len - match_len))
275 return 0; 249 return 0;
276 250
277 iph = ip_hdr(skb); 251 udph = (void *)skb->data + protoff;
278 udph = (void *)iph + iph->ihl*4;
279 252
280 oldlen = skb->len - iph->ihl*4; 253 oldlen = skb->len - protoff;
281 mangle_contents(skb, iph->ihl*4 + sizeof(*udph), 254 mangle_contents(skb, protoff + sizeof(*udph),
282 match_offset, match_len, rep_buffer, rep_len); 255 match_offset, match_len, rep_buffer, rep_len);
283 256
284 /* update the length of the UDP packet */ 257 /* update the length of the UDP packet */
285 datalen = skb->len - iph->ihl*4; 258 datalen = skb->len - protoff;
286 udph->len = htons(datalen); 259 udph->len = htons(datalen);
287 260
288 /* fix udp checksum if udp checksum was previously calculated */ 261 /* fix udp checksum if udp checksum was previously calculated */
289 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL) 262 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
290 return 1; 263 return 1;
291 264
292 nf_nat_csum(skb, iph, udph, datalen, &udph->check, oldlen); 265 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
266 l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check,
267 datalen, oldlen);
293 268
294 return 1; 269 return 1;
295} 270}
@@ -341,6 +316,7 @@ sack_adjust(struct sk_buff *skb,
341/* TCP SACK sequence number adjustment */ 316/* TCP SACK sequence number adjustment */
342static inline unsigned int 317static inline unsigned int
343nf_nat_sack_adjust(struct sk_buff *skb, 318nf_nat_sack_adjust(struct sk_buff *skb,
319 unsigned int protoff,
344 struct tcphdr *tcph, 320 struct tcphdr *tcph,
345 struct nf_conn *ct, 321 struct nf_conn *ct,
346 enum ip_conntrack_info ctinfo) 322 enum ip_conntrack_info ctinfo)
@@ -348,8 +324,8 @@ nf_nat_sack_adjust(struct sk_buff *skb,
348 unsigned int dir, optoff, optend; 324 unsigned int dir, optoff, optend;
349 struct nf_conn_nat *nat = nfct_nat(ct); 325 struct nf_conn_nat *nat = nfct_nat(ct);
350 326
351 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr); 327 optoff = protoff + sizeof(struct tcphdr);
352 optend = ip_hdrlen(skb) + tcph->doff * 4; 328 optend = protoff + tcph->doff * 4;
353 329
354 if (!skb_make_writable(skb, optend)) 330 if (!skb_make_writable(skb, optend))
355 return 0; 331 return 0;
@@ -387,7 +363,8 @@ nf_nat_sack_adjust(struct sk_buff *skb,
387int 363int
388nf_nat_seq_adjust(struct sk_buff *skb, 364nf_nat_seq_adjust(struct sk_buff *skb,
389 struct nf_conn *ct, 365 struct nf_conn *ct,
390 enum ip_conntrack_info ctinfo) 366 enum ip_conntrack_info ctinfo,
367 unsigned int protoff)
391{ 368{
392 struct tcphdr *tcph; 369 struct tcphdr *tcph;
393 int dir; 370 int dir;
@@ -401,10 +378,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
401 this_way = &nat->seq[dir]; 378 this_way = &nat->seq[dir];
402 other_way = &nat->seq[!dir]; 379 other_way = &nat->seq[!dir];
403 380
404 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph))) 381 if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
405 return 0; 382 return 0;
406 383
407 tcph = (void *)skb->data + ip_hdrlen(skb); 384 tcph = (void *)skb->data + protoff;
408 if (after(ntohl(tcph->seq), this_way->correction_pos)) 385 if (after(ntohl(tcph->seq), this_way->correction_pos))
409 seqoff = this_way->offset_after; 386 seqoff = this_way->offset_after;
410 else 387 else
@@ -429,7 +406,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
429 tcph->seq = newseq; 406 tcph->seq = newseq;
430 tcph->ack_seq = newack; 407 tcph->ack_seq = newack;
431 408
432 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo); 409 return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
433} 410}
434 411
435/* Setup NAT on this expected conntrack so it follows master. */ 412/* Setup NAT on this expected conntrack so it follows master. */
@@ -437,22 +414,22 @@ nf_nat_seq_adjust(struct sk_buff *skb,
437void nf_nat_follow_master(struct nf_conn *ct, 414void nf_nat_follow_master(struct nf_conn *ct,
438 struct nf_conntrack_expect *exp) 415 struct nf_conntrack_expect *exp)
439{ 416{
440 struct nf_nat_ipv4_range range; 417 struct nf_nat_range range;
441 418
442 /* This must be a fresh one. */ 419 /* This must be a fresh one. */
443 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 420 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
444 421
445 /* Change src to where master sends to */ 422 /* Change src to where master sends to */
446 range.flags = NF_NAT_RANGE_MAP_IPS; 423 range.flags = NF_NAT_RANGE_MAP_IPS;
447 range.min_ip = range.max_ip 424 range.min_addr = range.max_addr
448 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 425 = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
449 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 426 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
450 427
451 /* For DST manip, map port here to where it's expected. */ 428 /* For DST manip, map port here to where it's expected. */
452 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 429 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
453 range.min = range.max = exp->saved_proto; 430 range.min_proto = range.max_proto = exp->saved_proto;
454 range.min_ip = range.max_ip 431 range.min_addr = range.max_addr
455 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; 432 = ct->master->tuplehash[!exp->dir].tuple.src.u3;
456 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 433 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
457} 434}
458EXPORT_SYMBOL(nf_nat_follow_master); 435EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index 979ae165f4ef..1fedee6e7fb6 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -17,7 +17,6 @@
17 17
18#include <net/netfilter/nf_nat.h> 18#include <net/netfilter/nf_nat.h>
19#include <net/netfilter/nf_nat_helper.h> 19#include <net/netfilter/nf_nat_helper.h>
20#include <net/netfilter/nf_nat_rule.h>
21#include <net/netfilter/nf_conntrack_helper.h> 20#include <net/netfilter/nf_conntrack_helper.h>
22#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
23#include <linux/netfilter/nf_conntrack_irc.h> 22#include <linux/netfilter/nf_conntrack_irc.h>
@@ -29,12 +28,12 @@ MODULE_ALIAS("ip_nat_irc");
29 28
30static unsigned int help(struct sk_buff *skb, 29static unsigned int help(struct sk_buff *skb,
31 enum ip_conntrack_info ctinfo, 30 enum ip_conntrack_info ctinfo,
31 unsigned int protoff,
32 unsigned int matchoff, 32 unsigned int matchoff,
33 unsigned int matchlen, 33 unsigned int matchlen,
34 struct nf_conntrack_expect *exp) 34 struct nf_conntrack_expect *exp)
35{ 35{
36 char buffer[sizeof("4294967296 65635")]; 36 char buffer[sizeof("4294967296 65635")];
37 u_int32_t ip;
38 u_int16_t port; 37 u_int16_t port;
39 unsigned int ret; 38 unsigned int ret;
40 39
@@ -60,13 +59,8 @@ static unsigned int help(struct sk_buff *skb,
60 if (port == 0) 59 if (port == 0)
61 return NF_DROP; 60 return NF_DROP;
62 61
63 ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
64 sprintf(buffer, "%u %u", ip, port);
65 pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
66 buffer, &ip, port);
67
68 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, 62 ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
69 matchoff, matchlen, buffer, 63 protoff, matchoff, matchlen, buffer,
70 strlen(buffer)); 64 strlen(buffer));
71 if (ret != NF_ACCEPT) 65 if (ret != NF_ACCEPT)
72 nf_ct_unexpect_related(exp); 66 nf_ct_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index 9993bc93e102..9baaf734c142 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -9,20 +9,18 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <linux/ip.h>
13
14#include <linux/netfilter.h> 12#include <linux/netfilter.h>
15#include <linux/export.h> 13#include <linux/export.h>
16#include <net/secure_seq.h> 14
17#include <net/netfilter/nf_nat.h> 15#include <net/netfilter/nf_nat.h>
18#include <net/netfilter/nf_nat_core.h> 16#include <net/netfilter/nf_nat_core.h>
19#include <net/netfilter/nf_nat_rule.h> 17#include <net/netfilter/nf_nat_l3proto.h>
20#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_l4proto.h>
21 19
22bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, 20bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
23 enum nf_nat_manip_type maniptype, 21 enum nf_nat_manip_type maniptype,
24 const union nf_conntrack_man_proto *min, 22 const union nf_conntrack_man_proto *min,
25 const union nf_conntrack_man_proto *max) 23 const union nf_conntrack_man_proto *max)
26{ 24{
27 __be16 port; 25 __be16 port;
28 26
@@ -34,13 +32,14 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
34 return ntohs(port) >= ntohs(min->all) && 32 return ntohs(port) >= ntohs(min->all) &&
35 ntohs(port) <= ntohs(max->all); 33 ntohs(port) <= ntohs(max->all);
36} 34}
37EXPORT_SYMBOL_GPL(nf_nat_proto_in_range); 35EXPORT_SYMBOL_GPL(nf_nat_l4proto_in_range);
38 36
39void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, 37void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
40 const struct nf_nat_ipv4_range *range, 38 struct nf_conntrack_tuple *tuple,
41 enum nf_nat_manip_type maniptype, 39 const struct nf_nat_range *range,
42 const struct nf_conn *ct, 40 enum nf_nat_manip_type maniptype,
43 u_int16_t *rover) 41 const struct nf_conn *ct,
42 u16 *rover)
44{ 43{
45 unsigned int range_size, min, i; 44 unsigned int range_size, min, i;
46 __be16 *portptr; 45 __be16 *portptr;
@@ -71,15 +70,14 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
71 range_size = 65535 - 1024 + 1; 70 range_size = 65535 - 1024 + 1;
72 } 71 }
73 } else { 72 } else {
74 min = ntohs(range->min.all); 73 min = ntohs(range->min_proto.all);
75 range_size = ntohs(range->max.all) - min + 1; 74 range_size = ntohs(range->max_proto.all) - min + 1;
76 } 75 }
77 76
78 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) 77 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
79 off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip, 78 off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
80 maniptype == NF_NAT_MANIP_SRC 79 ? tuple->dst.u.all
81 ? tuple->dst.u.all 80 : tuple->src.u.all);
82 : tuple->src.u.all);
83 else 81 else
84 off = *rover; 82 off = *rover;
85 83
@@ -93,22 +91,22 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
93 } 91 }
94 return; 92 return;
95} 93}
96EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple); 94EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
97 95
98#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 96#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
99int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], 97int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
100 struct nf_nat_ipv4_range *range) 98 struct nf_nat_range *range)
101{ 99{
102 if (tb[CTA_PROTONAT_PORT_MIN]) { 100 if (tb[CTA_PROTONAT_PORT_MIN]) {
103 range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); 101 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
104 range->max.all = range->min.tcp.port; 102 range->max_proto.all = range->min_proto.all;
105 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 103 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
106 } 104 }
107 if (tb[CTA_PROTONAT_PORT_MAX]) { 105 if (tb[CTA_PROTONAT_PORT_MAX]) {
108 range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); 106 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
109 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 107 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
110 } 108 }
111 return 0; 109 return 0;
112} 110}
113EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range); 111EXPORT_SYMBOL_GPL(nf_nat_l4proto_nlattr_to_range);
114#endif 112#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
index 3f67138d187c..c8be2cdac0bf 100644
--- a/net/ipv4/netfilter/nf_nat_proto_dccp.c
+++ b/net/netfilter/nf_nat_proto_dccp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * DCCP NAT protocol helper 2 * DCCP NAT protocol helper
3 * 3 *
4 * Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net> 4 * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -13,35 +13,34 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/ip.h>
17#include <linux/dccp.h> 16#include <linux/dccp.h>
18 17
19#include <net/netfilter/nf_conntrack.h> 18#include <net/netfilter/nf_conntrack.h>
20#include <net/netfilter/nf_nat.h> 19#include <net/netfilter/nf_nat.h>
21#include <net/netfilter/nf_nat_protocol.h> 20#include <net/netfilter/nf_nat_l3proto.h>
21#include <net/netfilter/nf_nat_l4proto.h>
22 22
23static u_int16_t dccp_port_rover; 23static u_int16_t dccp_port_rover;
24 24
25static void 25static void
26dccp_unique_tuple(struct nf_conntrack_tuple *tuple, 26dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
27 const struct nf_nat_ipv4_range *range, 27 struct nf_conntrack_tuple *tuple,
28 const struct nf_nat_range *range,
28 enum nf_nat_manip_type maniptype, 29 enum nf_nat_manip_type maniptype,
29 const struct nf_conn *ct) 30 const struct nf_conn *ct)
30{ 31{
31 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 32 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
32 &dccp_port_rover); 33 &dccp_port_rover);
33} 34}
34 35
35static bool 36static bool
36dccp_manip_pkt(struct sk_buff *skb, 37dccp_manip_pkt(struct sk_buff *skb,
37 unsigned int iphdroff, 38 const struct nf_nat_l3proto *l3proto,
39 unsigned int iphdroff, unsigned int hdroff,
38 const struct nf_conntrack_tuple *tuple, 40 const struct nf_conntrack_tuple *tuple,
39 enum nf_nat_manip_type maniptype) 41 enum nf_nat_manip_type maniptype)
40{ 42{
41 const struct iphdr *iph = (const void *)(skb->data + iphdroff);
42 struct dccp_hdr *hdr; 43 struct dccp_hdr *hdr;
43 unsigned int hdroff = iphdroff + iph->ihl * 4;
44 __be32 oldip, newip;
45 __be16 *portptr, oldport, newport; 44 __be16 *portptr, oldport, newport;
46 int hdrsize = 8; /* DCCP connection tracking guarantees this much */ 45 int hdrsize = 8; /* DCCP connection tracking guarantees this much */
47 46
@@ -51,17 +50,12 @@ dccp_manip_pkt(struct sk_buff *skb,
51 if (!skb_make_writable(skb, hdroff + hdrsize)) 50 if (!skb_make_writable(skb, hdroff + hdrsize))
52 return false; 51 return false;
53 52
54 iph = (struct iphdr *)(skb->data + iphdroff);
55 hdr = (struct dccp_hdr *)(skb->data + hdroff); 53 hdr = (struct dccp_hdr *)(skb->data + hdroff);
56 54
57 if (maniptype == NF_NAT_MANIP_SRC) { 55 if (maniptype == NF_NAT_MANIP_SRC) {
58 oldip = iph->saddr;
59 newip = tuple->src.u3.ip;
60 newport = tuple->src.u.dccp.port; 56 newport = tuple->src.u.dccp.port;
61 portptr = &hdr->dccph_sport; 57 portptr = &hdr->dccph_sport;
62 } else { 58 } else {
63 oldip = iph->daddr;
64 newip = tuple->dst.u3.ip;
65 newport = tuple->dst.u.dccp.port; 59 newport = tuple->dst.u.dccp.port;
66 portptr = &hdr->dccph_dport; 60 portptr = &hdr->dccph_dport;
67 } 61 }
@@ -72,30 +66,46 @@ dccp_manip_pkt(struct sk_buff *skb,
72 if (hdrsize < sizeof(*hdr)) 66 if (hdrsize < sizeof(*hdr))
73 return true; 67 return true;
74 68
75 inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1); 69 l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
70 tuple, maniptype);
76 inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport, 71 inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
77 0); 72 0);
78 return true; 73 return true;
79} 74}
80 75
81static const struct nf_nat_protocol nf_nat_protocol_dccp = { 76static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
82 .protonum = IPPROTO_DCCP, 77 .l4proto = IPPROTO_DCCP,
83 .manip_pkt = dccp_manip_pkt, 78 .manip_pkt = dccp_manip_pkt,
84 .in_range = nf_nat_proto_in_range, 79 .in_range = nf_nat_l4proto_in_range,
85 .unique_tuple = dccp_unique_tuple, 80 .unique_tuple = dccp_unique_tuple,
86#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 81#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
87 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 82 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
88#endif 83#endif
89}; 84};
90 85
91static int __init nf_nat_proto_dccp_init(void) 86static int __init nf_nat_proto_dccp_init(void)
92{ 87{
93 return nf_nat_protocol_register(&nf_nat_protocol_dccp); 88 int err;
89
90 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
91 if (err < 0)
92 goto err1;
93 err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
94 if (err < 0)
95 goto err2;
96 return 0;
97
98err2:
99 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
100err1:
101 return err;
94} 102}
95 103
96static void __exit nf_nat_proto_dccp_fini(void) 104static void __exit nf_nat_proto_dccp_fini(void)
97{ 105{
98 nf_nat_protocol_unregister(&nf_nat_protocol_dccp); 106 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
107 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
108
99} 109}
100 110
101module_init(nf_nat_proto_dccp_init); 111module_init(nf_nat_proto_dccp_init);
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 3cce9b6c1c29..e64faa5ca893 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -8,53 +8,46 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/ip.h>
12#include <linux/sctp.h> 11#include <linux/sctp.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <net/sctp/checksum.h> 13#include <net/sctp/checksum.h>
15 14
16#include <net/netfilter/nf_nat_protocol.h> 15#include <net/netfilter/nf_nat_l4proto.h>
17 16
18static u_int16_t nf_sctp_port_rover; 17static u_int16_t nf_sctp_port_rover;
19 18
20static void 19static void
21sctp_unique_tuple(struct nf_conntrack_tuple *tuple, 20sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
22 const struct nf_nat_ipv4_range *range, 21 struct nf_conntrack_tuple *tuple,
22 const struct nf_nat_range *range,
23 enum nf_nat_manip_type maniptype, 23 enum nf_nat_manip_type maniptype,
24 const struct nf_conn *ct) 24 const struct nf_conn *ct)
25{ 25{
26 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 26 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
27 &nf_sctp_port_rover); 27 &nf_sctp_port_rover);
28} 28}
29 29
30static bool 30static bool
31sctp_manip_pkt(struct sk_buff *skb, 31sctp_manip_pkt(struct sk_buff *skb,
32 unsigned int iphdroff, 32 const struct nf_nat_l3proto *l3proto,
33 unsigned int iphdroff, unsigned int hdroff,
33 const struct nf_conntrack_tuple *tuple, 34 const struct nf_conntrack_tuple *tuple,
34 enum nf_nat_manip_type maniptype) 35 enum nf_nat_manip_type maniptype)
35{ 36{
36 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
37 struct sk_buff *frag; 37 struct sk_buff *frag;
38 sctp_sctphdr_t *hdr; 38 sctp_sctphdr_t *hdr;
39 unsigned int hdroff = iphdroff + iph->ihl*4;
40 __be32 oldip, newip;
41 __be32 crc32; 39 __be32 crc32;
42 40
43 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 41 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
44 return false; 42 return false;
45 43
46 iph = (struct iphdr *)(skb->data + iphdroff);
47 hdr = (struct sctphdr *)(skb->data + hdroff); 44 hdr = (struct sctphdr *)(skb->data + hdroff);
48 45
49 if (maniptype == NF_NAT_MANIP_SRC) { 46 if (maniptype == NF_NAT_MANIP_SRC) {
50 /* Get rid of src ip and src pt */ 47 /* Get rid of src port */
51 oldip = iph->saddr;
52 newip = tuple->src.u3.ip;
53 hdr->source = tuple->src.u.sctp.port; 48 hdr->source = tuple->src.u.sctp.port;
54 } else { 49 } else {
55 /* Get rid of dst ip and dst pt */ 50 /* Get rid of dst port */
56 oldip = iph->daddr;
57 newip = tuple->dst.u3.ip;
58 hdr->dest = tuple->dst.u.sctp.port; 51 hdr->dest = tuple->dst.u.sctp.port;
59 } 52 }
60 53
@@ -68,24 +61,38 @@ sctp_manip_pkt(struct sk_buff *skb,
68 return true; 61 return true;
69} 62}
70 63
71static const struct nf_nat_protocol nf_nat_protocol_sctp = { 64static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
72 .protonum = IPPROTO_SCTP, 65 .l4proto = IPPROTO_SCTP,
73 .manip_pkt = sctp_manip_pkt, 66 .manip_pkt = sctp_manip_pkt,
74 .in_range = nf_nat_proto_in_range, 67 .in_range = nf_nat_l4proto_in_range,
75 .unique_tuple = sctp_unique_tuple, 68 .unique_tuple = sctp_unique_tuple,
76#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 69#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
77 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 70 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
78#endif 71#endif
79}; 72};
80 73
81static int __init nf_nat_proto_sctp_init(void) 74static int __init nf_nat_proto_sctp_init(void)
82{ 75{
83 return nf_nat_protocol_register(&nf_nat_protocol_sctp); 76 int err;
77
78 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
79 if (err < 0)
80 goto err1;
81 err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
82 if (err < 0)
83 goto err2;
84 return 0;
85
86err2:
87 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
88err1:
89 return err;
84} 90}
85 91
86static void __exit nf_nat_proto_sctp_exit(void) 92static void __exit nf_nat_proto_sctp_exit(void)
87{ 93{
88 nf_nat_protocol_unregister(&nf_nat_protocol_sctp); 94 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
95 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
89} 96}
90 97
91module_init(nf_nat_proto_sctp_init); 98module_init(nf_nat_proto_sctp_init);
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
index 9fb4b4e72bbf..83ec8a6e4c36 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/netfilter/nf_nat_proto_tcp.c
@@ -9,37 +9,36 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/ip.h>
13#include <linux/tcp.h> 12#include <linux/tcp.h>
14 13
15#include <linux/netfilter.h> 14#include <linux/netfilter.h>
16#include <linux/netfilter/nfnetlink_conntrack.h> 15#include <linux/netfilter/nfnetlink_conntrack.h>
17#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
18#include <net/netfilter/nf_nat_rule.h> 17#include <net/netfilter/nf_nat_l3proto.h>
19#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_l4proto.h>
20#include <net/netfilter/nf_nat_core.h> 19#include <net/netfilter/nf_nat_core.h>
21 20
22static u_int16_t tcp_port_rover; 21static u16 tcp_port_rover;
23 22
24static void 23static void
25tcp_unique_tuple(struct nf_conntrack_tuple *tuple, 24tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
26 const struct nf_nat_ipv4_range *range, 25 struct nf_conntrack_tuple *tuple,
26 const struct nf_nat_range *range,
27 enum nf_nat_manip_type maniptype, 27 enum nf_nat_manip_type maniptype,
28 const struct nf_conn *ct) 28 const struct nf_conn *ct)
29{ 29{
30 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover); 30 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
31 &tcp_port_rover);
31} 32}
32 33
33static bool 34static bool
34tcp_manip_pkt(struct sk_buff *skb, 35tcp_manip_pkt(struct sk_buff *skb,
35 unsigned int iphdroff, 36 const struct nf_nat_l3proto *l3proto,
37 unsigned int iphdroff, unsigned int hdroff,
36 const struct nf_conntrack_tuple *tuple, 38 const struct nf_conntrack_tuple *tuple,
37 enum nf_nat_manip_type maniptype) 39 enum nf_nat_manip_type maniptype)
38{ 40{
39 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
40 struct tcphdr *hdr; 41 struct tcphdr *hdr;
41 unsigned int hdroff = iphdroff + iph->ihl*4;
42 __be32 oldip, newip;
43 __be16 *portptr, newport, oldport; 42 __be16 *portptr, newport, oldport;
44 int hdrsize = 8; /* TCP connection tracking guarantees this much */ 43 int hdrsize = 8; /* TCP connection tracking guarantees this much */
45 44
@@ -52,19 +51,14 @@ tcp_manip_pkt(struct sk_buff *skb,
52 if (!skb_make_writable(skb, hdroff + hdrsize)) 51 if (!skb_make_writable(skb, hdroff + hdrsize))
53 return false; 52 return false;
54 53
55 iph = (struct iphdr *)(skb->data + iphdroff);
56 hdr = (struct tcphdr *)(skb->data + hdroff); 54 hdr = (struct tcphdr *)(skb->data + hdroff);
57 55
58 if (maniptype == NF_NAT_MANIP_SRC) { 56 if (maniptype == NF_NAT_MANIP_SRC) {
59 /* Get rid of src ip and src pt */ 57 /* Get rid of src port */
60 oldip = iph->saddr;
61 newip = tuple->src.u3.ip;
62 newport = tuple->src.u.tcp.port; 58 newport = tuple->src.u.tcp.port;
63 portptr = &hdr->source; 59 portptr = &hdr->source;
64 } else { 60 } else {
65 /* Get rid of dst ip and dst pt */ 61 /* Get rid of dst port */
66 oldip = iph->daddr;
67 newip = tuple->dst.u3.ip;
68 newport = tuple->dst.u.tcp.port; 62 newport = tuple->dst.u.tcp.port;
69 portptr = &hdr->dest; 63 portptr = &hdr->dest;
70 } 64 }
@@ -75,17 +69,17 @@ tcp_manip_pkt(struct sk_buff *skb,
75 if (hdrsize < sizeof(*hdr)) 69 if (hdrsize < sizeof(*hdr))
76 return true; 70 return true;
77 71
78 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); 72 l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
79 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); 73 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
80 return true; 74 return true;
81} 75}
82 76
83const struct nf_nat_protocol nf_nat_protocol_tcp = { 77const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
84 .protonum = IPPROTO_TCP, 78 .l4proto = IPPROTO_TCP,
85 .manip_pkt = tcp_manip_pkt, 79 .manip_pkt = tcp_manip_pkt,
86 .in_range = nf_nat_proto_in_range, 80 .in_range = nf_nat_l4proto_in_range,
87 .unique_tuple = tcp_unique_tuple, 81 .unique_tuple = tcp_unique_tuple,
88#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 82#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
89 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 83 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
90#endif 84#endif
91}; 85};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index 9883336e628f..7df613fb34a2 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -9,59 +9,53 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/ip.h>
13#include <linux/udp.h> 12#include <linux/udp.h>
14 13
15#include <linux/netfilter.h> 14#include <linux/netfilter.h>
16#include <net/netfilter/nf_nat.h> 15#include <net/netfilter/nf_nat.h>
17#include <net/netfilter/nf_nat_core.h> 16#include <net/netfilter/nf_nat_core.h>
18#include <net/netfilter/nf_nat_rule.h> 17#include <net/netfilter/nf_nat_l3proto.h>
19#include <net/netfilter/nf_nat_protocol.h> 18#include <net/netfilter/nf_nat_l4proto.h>
20 19
21static u_int16_t udp_port_rover; 20static u16 udp_port_rover;
22 21
23static void 22static void
24udp_unique_tuple(struct nf_conntrack_tuple *tuple, 23udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
25 const struct nf_nat_ipv4_range *range, 24 struct nf_conntrack_tuple *tuple,
25 const struct nf_nat_range *range,
26 enum nf_nat_manip_type maniptype, 26 enum nf_nat_manip_type maniptype,
27 const struct nf_conn *ct) 27 const struct nf_conn *ct)
28{ 28{
29 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover); 29 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
30 &udp_port_rover);
30} 31}
31 32
32static bool 33static bool
33udp_manip_pkt(struct sk_buff *skb, 34udp_manip_pkt(struct sk_buff *skb,
34 unsigned int iphdroff, 35 const struct nf_nat_l3proto *l3proto,
36 unsigned int iphdroff, unsigned int hdroff,
35 const struct nf_conntrack_tuple *tuple, 37 const struct nf_conntrack_tuple *tuple,
36 enum nf_nat_manip_type maniptype) 38 enum nf_nat_manip_type maniptype)
37{ 39{
38 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
39 struct udphdr *hdr; 40 struct udphdr *hdr;
40 unsigned int hdroff = iphdroff + iph->ihl*4;
41 __be32 oldip, newip;
42 __be16 *portptr, newport; 41 __be16 *portptr, newport;
43 42
44 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 43 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
45 return false; 44 return false;
46
47 iph = (struct iphdr *)(skb->data + iphdroff);
48 hdr = (struct udphdr *)(skb->data + hdroff); 45 hdr = (struct udphdr *)(skb->data + hdroff);
49 46
50 if (maniptype == NF_NAT_MANIP_SRC) { 47 if (maniptype == NF_NAT_MANIP_SRC) {
51 /* Get rid of src ip and src pt */ 48 /* Get rid of src port */
52 oldip = iph->saddr;
53 newip = tuple->src.u3.ip;
54 newport = tuple->src.u.udp.port; 49 newport = tuple->src.u.udp.port;
55 portptr = &hdr->source; 50 portptr = &hdr->source;
56 } else { 51 } else {
57 /* Get rid of dst ip and dst pt */ 52 /* Get rid of dst port */
58 oldip = iph->daddr;
59 newip = tuple->dst.u3.ip;
60 newport = tuple->dst.u.udp.port; 53 newport = tuple->dst.u.udp.port;
61 portptr = &hdr->dest; 54 portptr = &hdr->dest;
62 } 55 }
63 if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) { 56 if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
64 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); 57 l3proto->csum_update(skb, iphdroff, &hdr->check,
58 tuple, maniptype);
65 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 59 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
66 0); 60 0);
67 if (!hdr->check) 61 if (!hdr->check)
@@ -71,12 +65,12 @@ udp_manip_pkt(struct sk_buff *skb,
71 return true; 65 return true;
72} 66}
73 67
74const struct nf_nat_protocol nf_nat_protocol_udp = { 68const struct nf_nat_l4proto nf_nat_l4proto_udp = {
75 .protonum = IPPROTO_UDP, 69 .l4proto = IPPROTO_UDP,
76 .manip_pkt = udp_manip_pkt, 70 .manip_pkt = udp_manip_pkt,
77 .in_range = nf_nat_proto_in_range, 71 .in_range = nf_nat_l4proto_in_range,
78 .unique_tuple = udp_unique_tuple, 72 .unique_tuple = udp_unique_tuple,
79#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 73#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
80 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 74 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
81#endif 75#endif
82}; 76};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
index d24d10a7beb2..776a0d1317b1 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/netfilter/nf_nat_proto_udplite.c
@@ -9,59 +9,53 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/ip.h>
13#include <linux/udp.h> 12#include <linux/udp.h>
14 13
15#include <linux/netfilter.h> 14#include <linux/netfilter.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <net/netfilter/nf_nat.h> 16#include <net/netfilter/nf_nat.h>
18#include <net/netfilter/nf_nat_protocol.h> 17#include <net/netfilter/nf_nat_l3proto.h>
18#include <net/netfilter/nf_nat_l4proto.h>
19 19
20static u_int16_t udplite_port_rover; 20static u16 udplite_port_rover;
21 21
22static void 22static void
23udplite_unique_tuple(struct nf_conntrack_tuple *tuple, 23udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
24 const struct nf_nat_ipv4_range *range, 24 struct nf_conntrack_tuple *tuple,
25 const struct nf_nat_range *range,
25 enum nf_nat_manip_type maniptype, 26 enum nf_nat_manip_type maniptype,
26 const struct nf_conn *ct) 27 const struct nf_conn *ct)
27{ 28{
28 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 29 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
29 &udplite_port_rover); 30 &udplite_port_rover);
30} 31}
31 32
32static bool 33static bool
33udplite_manip_pkt(struct sk_buff *skb, 34udplite_manip_pkt(struct sk_buff *skb,
34 unsigned int iphdroff, 35 const struct nf_nat_l3proto *l3proto,
36 unsigned int iphdroff, unsigned int hdroff,
35 const struct nf_conntrack_tuple *tuple, 37 const struct nf_conntrack_tuple *tuple,
36 enum nf_nat_manip_type maniptype) 38 enum nf_nat_manip_type maniptype)
37{ 39{
38 const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
39 struct udphdr *hdr; 40 struct udphdr *hdr;
40 unsigned int hdroff = iphdroff + iph->ihl*4;
41 __be32 oldip, newip;
42 __be16 *portptr, newport; 41 __be16 *portptr, newport;
43 42
44 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 43 if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
45 return false; 44 return false;
46 45
47 iph = (struct iphdr *)(skb->data + iphdroff);
48 hdr = (struct udphdr *)(skb->data + hdroff); 46 hdr = (struct udphdr *)(skb->data + hdroff);
49 47
50 if (maniptype == NF_NAT_MANIP_SRC) { 48 if (maniptype == NF_NAT_MANIP_SRC) {
51 /* Get rid of src ip and src pt */ 49 /* Get rid of source port */
52 oldip = iph->saddr;
53 newip = tuple->src.u3.ip;
54 newport = tuple->src.u.udp.port; 50 newport = tuple->src.u.udp.port;
55 portptr = &hdr->source; 51 portptr = &hdr->source;
56 } else { 52 } else {
57 /* Get rid of dst ip and dst pt */ 53 /* Get rid of dst port */
58 oldip = iph->daddr;
59 newip = tuple->dst.u3.ip;
60 newport = tuple->dst.u.udp.port; 54 newport = tuple->dst.u.udp.port;
61 portptr = &hdr->dest; 55 portptr = &hdr->dest;
62 } 56 }
63 57
64 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); 58 l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
65 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0); 59 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
66 if (!hdr->check) 60 if (!hdr->check)
67 hdr->check = CSUM_MANGLED_0; 61 hdr->check = CSUM_MANGLED_0;
@@ -70,24 +64,38 @@ udplite_manip_pkt(struct sk_buff *skb,
70 return true; 64 return true;
71} 65}
72 66
73static const struct nf_nat_protocol nf_nat_protocol_udplite = { 67static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
74 .protonum = IPPROTO_UDPLITE, 68 .l4proto = IPPROTO_UDPLITE,
75 .manip_pkt = udplite_manip_pkt, 69 .manip_pkt = udplite_manip_pkt,
76 .in_range = nf_nat_proto_in_range, 70 .in_range = nf_nat_l4proto_in_range,
77 .unique_tuple = udplite_unique_tuple, 71 .unique_tuple = udplite_unique_tuple,
78#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 72#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
79 .nlattr_to_range = nf_nat_proto_nlattr_to_range, 73 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
80#endif 74#endif
81}; 75};
82 76
83static int __init nf_nat_proto_udplite_init(void) 77static int __init nf_nat_proto_udplite_init(void)
84{ 78{
85 return nf_nat_protocol_register(&nf_nat_protocol_udplite); 79 int err;
80
81 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
82 if (err < 0)
83 goto err1;
84 err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
85 if (err < 0)
86 goto err2;
87 return 0;
88
89err2:
90 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
91err1:
92 return err;
86} 93}
87 94
88static void __exit nf_nat_proto_udplite_fini(void) 95static void __exit nf_nat_proto_udplite_fini(void)
89{ 96{
90 nf_nat_protocol_unregister(&nf_nat_protocol_udplite); 97 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
98 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
91} 99}
92 100
93module_init(nf_nat_proto_udplite_init); 101module_init(nf_nat_proto_udplite_init);
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/netfilter/nf_nat_proto_unknown.c
index e0afe8112b1c..6e494d584412 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/netfilter/nf_nat_proto_unknown.c
@@ -15,8 +15,7 @@
15 15
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <net/netfilter/nf_nat.h> 17#include <net/netfilter/nf_nat.h>
18#include <net/netfilter/nf_nat_rule.h> 18#include <net/netfilter/nf_nat_l4proto.h>
19#include <net/netfilter/nf_nat_protocol.h>
20 19
21static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, 20static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
22 enum nf_nat_manip_type manip_type, 21 enum nf_nat_manip_type manip_type,
@@ -26,26 +25,29 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
26 return true; 25 return true;
27} 26}
28 27
29static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple, 28static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
30 const struct nf_nat_ipv4_range *range, 29 struct nf_conntrack_tuple *tuple,
30 const struct nf_nat_range *range,
31 enum nf_nat_manip_type maniptype, 31 enum nf_nat_manip_type maniptype,
32 const struct nf_conn *ct) 32 const struct nf_conn *ct)
33{ 33{
34 /* Sorry: we can't help you; if it's not unique, we can't frob 34 /* Sorry: we can't help you; if it's not unique, we can't frob
35 anything. */ 35 * anything.
36 */
36 return; 37 return;
37} 38}
38 39
39static bool 40static bool
40unknown_manip_pkt(struct sk_buff *skb, 41unknown_manip_pkt(struct sk_buff *skb,
41 unsigned int iphdroff, 42 const struct nf_nat_l3proto *l3proto,
43 unsigned int iphdroff, unsigned int hdroff,
42 const struct nf_conntrack_tuple *tuple, 44 const struct nf_conntrack_tuple *tuple,
43 enum nf_nat_manip_type maniptype) 45 enum nf_nat_manip_type maniptype)
44{ 46{
45 return true; 47 return true;
46} 48}
47 49
48const struct nf_nat_protocol nf_nat_unknown_protocol = { 50const struct nf_nat_l4proto nf_nat_l4proto_unknown = {
49 .manip_pkt = unknown_manip_pkt, 51 .manip_pkt = unknown_manip_pkt,
50 .in_range = unknown_in_range, 52 .in_range = unknown_in_range,
51 .unique_tuple = unknown_unique_tuple, 53 .unique_tuple = unknown_unique_tuple,
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 9c87cde28ff8..16303c752213 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -3,7 +3,7 @@
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_nat_ftp.c and other modules. 4 * based on RR's ip_nat_ftp.c and other modules.
5 * (C) 2007 United Security Providers 5 * (C) 2007 United Security Providers
6 * (C) 2007, 2008 Patrick McHardy <kaber@trash.net> 6 * (C) 2007, 2008, 2011, 2012 Patrick McHardy <kaber@trash.net>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -12,14 +12,12 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/ip.h> 15#include <linux/inet.h>
16#include <net/ip.h>
17#include <linux/udp.h> 16#include <linux/udp.h>
18#include <linux/tcp.h> 17#include <linux/tcp.h>
19 18
20#include <net/netfilter/nf_nat.h> 19#include <net/netfilter/nf_nat.h>
21#include <net/netfilter/nf_nat_helper.h> 20#include <net/netfilter/nf_nat_helper.h>
22#include <net/netfilter/nf_nat_rule.h>
23#include <net/netfilter/nf_conntrack_helper.h> 21#include <net/netfilter/nf_conntrack_helper.h>
24#include <net/netfilter/nf_conntrack_expect.h> 22#include <net/netfilter/nf_conntrack_expect.h>
25#include <linux/netfilter/nf_conntrack_sip.h> 23#include <linux/netfilter/nf_conntrack_sip.h>
@@ -30,7 +28,8 @@ MODULE_DESCRIPTION("SIP NAT helper");
30MODULE_ALIAS("ip_nat_sip"); 28MODULE_ALIAS("ip_nat_sip");
31 29
32 30
33static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff, 31static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff,
32 unsigned int dataoff,
34 const char **dptr, unsigned int *datalen, 33 const char **dptr, unsigned int *datalen,
35 unsigned int matchoff, unsigned int matchlen, 34 unsigned int matchoff, unsigned int matchlen,
36 const char *buffer, unsigned int buflen) 35 const char *buffer, unsigned int buflen)
@@ -41,20 +40,20 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
41 unsigned int baseoff; 40 unsigned int baseoff;
42 41
43 if (nf_ct_protonum(ct) == IPPROTO_TCP) { 42 if (nf_ct_protonum(ct) == IPPROTO_TCP) {
44 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); 43 th = (struct tcphdr *)(skb->data + protoff);
45 baseoff = ip_hdrlen(skb) + th->doff * 4; 44 baseoff = protoff + th->doff * 4;
46 matchoff += dataoff - baseoff; 45 matchoff += dataoff - baseoff;
47 46
48 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 47 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
49 matchoff, matchlen, 48 protoff, matchoff, matchlen,
50 buffer, buflen, false)) 49 buffer, buflen, false))
51 return 0; 50 return 0;
52 } else { 51 } else {
53 baseoff = ip_hdrlen(skb) + sizeof(struct udphdr); 52 baseoff = protoff + sizeof(struct udphdr);
54 matchoff += dataoff - baseoff; 53 matchoff += dataoff - baseoff;
55 54
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, 55 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
57 matchoff, matchlen, 56 protoff, matchoff, matchlen,
58 buffer, buflen)) 57 buffer, buflen))
59 return 0; 58 return 0;
60 } 59 }
@@ -65,7 +64,30 @@ static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
65 return 1; 64 return 1;
66} 65}
67 66
68static int map_addr(struct sk_buff *skb, unsigned int dataoff, 67static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer,
68 const union nf_inet_addr *addr, bool delim)
69{
70 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
71 return sprintf(buffer, "%pI4", &addr->ip);
72 else {
73 if (delim)
74 return sprintf(buffer, "[%pI6c]", &addr->ip6);
75 else
76 return sprintf(buffer, "%pI6c", &addr->ip6);
77 }
78}
79
80static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer,
81 const union nf_inet_addr *addr, u16 port)
82{
83 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
84 return sprintf(buffer, "%pI4:%u", &addr->ip, port);
85 else
86 return sprintf(buffer, "[%pI6c]:%u", &addr->ip6, port);
87}
88
89static int map_addr(struct sk_buff *skb, unsigned int protoff,
90 unsigned int dataoff,
69 const char **dptr, unsigned int *datalen, 91 const char **dptr, unsigned int *datalen,
70 unsigned int matchoff, unsigned int matchlen, 92 unsigned int matchoff, unsigned int matchlen,
71 union nf_inet_addr *addr, __be16 port) 93 union nf_inet_addr *addr, __be16 port)
@@ -73,32 +95,32 @@ static int map_addr(struct sk_buff *skb, unsigned int dataoff,
73 enum ip_conntrack_info ctinfo; 95 enum ip_conntrack_info ctinfo;
74 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 96 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
75 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 97 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
76 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 98 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
77 unsigned int buflen; 99 unsigned int buflen;
78 __be32 newaddr; 100 union nf_inet_addr newaddr;
79 __be16 newport; 101 __be16 newport;
80 102
81 if (ct->tuplehash[dir].tuple.src.u3.ip == addr->ip && 103 if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) &&
82 ct->tuplehash[dir].tuple.src.u.udp.port == port) { 104 ct->tuplehash[dir].tuple.src.u.udp.port == port) {
83 newaddr = ct->tuplehash[!dir].tuple.dst.u3.ip; 105 newaddr = ct->tuplehash[!dir].tuple.dst.u3;
84 newport = ct->tuplehash[!dir].tuple.dst.u.udp.port; 106 newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
85 } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip && 107 } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
86 ct->tuplehash[dir].tuple.dst.u.udp.port == port) { 108 ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
87 newaddr = ct->tuplehash[!dir].tuple.src.u3.ip; 109 newaddr = ct->tuplehash[!dir].tuple.src.u3;
88 newport = ct->tuplehash[!dir].tuple.src.u.udp.port; 110 newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
89 } else 111 } else
90 return 1; 112 return 1;
91 113
92 if (newaddr == addr->ip && newport == port) 114 if (nf_inet_addr_cmp(&newaddr, addr) && newport == port)
93 return 1; 115 return 1;
94 116
95 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); 117 buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport));
96 118 return mangle_packet(skb, protoff, dataoff, dptr, datalen,
97 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, 119 matchoff, matchlen, buffer, buflen);
98 buffer, buflen);
99} 120}
100 121
101static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff, 122static int map_sip_addr(struct sk_buff *skb, unsigned int protoff,
123 unsigned int dataoff,
102 const char **dptr, unsigned int *datalen, 124 const char **dptr, unsigned int *datalen,
103 enum sip_header_types type) 125 enum sip_header_types type)
104{ 126{
@@ -111,11 +133,12 @@ static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
111 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, 133 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
112 &matchoff, &matchlen, &addr, &port) <= 0) 134 &matchoff, &matchlen, &addr, &port) <= 0)
113 return 1; 135 return 1;
114 return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, 136 return map_addr(skb, protoff, dataoff, dptr, datalen,
115 &addr, port); 137 matchoff, matchlen, &addr, port);
116} 138}
117 139
118static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, 140static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
141 unsigned int dataoff,
119 const char **dptr, unsigned int *datalen) 142 const char **dptr, unsigned int *datalen)
120{ 143{
121 enum ip_conntrack_info ctinfo; 144 enum ip_conntrack_info ctinfo;
@@ -132,8 +155,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
132 if (ct_sip_parse_request(ct, *dptr, *datalen, 155 if (ct_sip_parse_request(ct, *dptr, *datalen,
133 &matchoff, &matchlen, 156 &matchoff, &matchlen,
134 &addr, &port) > 0 && 157 &addr, &port) > 0 &&
135 !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, 158 !map_addr(skb, protoff, dataoff, dptr, datalen,
136 &addr, port)) 159 matchoff, matchlen, &addr, port))
137 return NF_DROP; 160 return NF_DROP;
138 request = 1; 161 request = 1;
139 } else 162 } else
@@ -149,23 +172,25 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
149 hdr, NULL, &matchoff, &matchlen, 172 hdr, NULL, &matchoff, &matchlen,
150 &addr, &port) > 0) { 173 &addr, &port) > 0) {
151 unsigned int olen, matchend, poff, plen, buflen, n; 174 unsigned int olen, matchend, poff, plen, buflen, n;
152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 175 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
153 176
154 /* We're only interested in headers related to this 177 /* We're only interested in headers related to this
155 * connection */ 178 * connection */
156 if (request) { 179 if (request) {
157 if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip || 180 if (!nf_inet_addr_cmp(&addr,
181 &ct->tuplehash[dir].tuple.src.u3) ||
158 port != ct->tuplehash[dir].tuple.src.u.udp.port) 182 port != ct->tuplehash[dir].tuple.src.u.udp.port)
159 goto next; 183 goto next;
160 } else { 184 } else {
161 if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip || 185 if (!nf_inet_addr_cmp(&addr,
186 &ct->tuplehash[dir].tuple.dst.u3) ||
162 port != ct->tuplehash[dir].tuple.dst.u.udp.port) 187 port != ct->tuplehash[dir].tuple.dst.u.udp.port)
163 goto next; 188 goto next;
164 } 189 }
165 190
166 olen = *datalen; 191 olen = *datalen;
167 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, 192 if (!map_addr(skb, protoff, dataoff, dptr, datalen,
168 &addr, port)) 193 matchoff, matchlen, &addr, port))
169 return NF_DROP; 194 return NF_DROP;
170 195
171 matchend = matchoff + matchlen + *datalen - olen; 196 matchend = matchoff + matchlen + *datalen - olen;
@@ -175,11 +200,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
175 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, 200 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
176 "maddr=", &poff, &plen, 201 "maddr=", &poff, &plen,
177 &addr, true) > 0 && 202 &addr, true) > 0 &&
178 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && 203 nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) &&
179 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { 204 !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) {
180 buflen = sprintf(buffer, "%pI4", 205 buflen = sip_sprintf_addr(ct, buffer,
181 &ct->tuplehash[!dir].tuple.dst.u3.ip); 206 &ct->tuplehash[!dir].tuple.dst.u3,
182 if (!mangle_packet(skb, dataoff, dptr, datalen, 207 true);
208 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
183 poff, plen, buffer, buflen)) 209 poff, plen, buffer, buflen))
184 return NF_DROP; 210 return NF_DROP;
185 } 211 }
@@ -189,11 +215,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
189 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, 215 if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
190 "received=", &poff, &plen, 216 "received=", &poff, &plen,
191 &addr, false) > 0 && 217 &addr, false) > 0 &&
192 addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && 218 nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) &&
193 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { 219 !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) {
194 buflen = sprintf(buffer, "%pI4", 220 buflen = sip_sprintf_addr(ct, buffer,
195 &ct->tuplehash[!dir].tuple.src.u3.ip); 221 &ct->tuplehash[!dir].tuple.src.u3,
196 if (!mangle_packet(skb, dataoff, dptr, datalen, 222 false);
223 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
197 poff, plen, buffer, buflen)) 224 poff, plen, buffer, buflen))
198 return NF_DROP; 225 return NF_DROP;
199 } 226 }
@@ -207,7 +234,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
207 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { 234 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
208 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; 235 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
209 buflen = sprintf(buffer, "%u", ntohs(p)); 236 buflen = sprintf(buffer, "%u", ntohs(p));
210 if (!mangle_packet(skb, dataoff, dptr, datalen, 237 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
211 poff, plen, buffer, buflen)) 238 poff, plen, buffer, buflen))
212 return NF_DROP; 239 return NF_DROP;
213 } 240 }
@@ -221,19 +248,21 @@ next:
221 SIP_HDR_CONTACT, &in_header, 248 SIP_HDR_CONTACT, &in_header,
222 &matchoff, &matchlen, 249 &matchoff, &matchlen,
223 &addr, &port) > 0) { 250 &addr, &port) > 0) {
224 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, 251 if (!map_addr(skb, protoff, dataoff, dptr, datalen,
252 matchoff, matchlen,
225 &addr, port)) 253 &addr, port))
226 return NF_DROP; 254 return NF_DROP;
227 } 255 }
228 256
229 if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) || 257 if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
230 !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO)) 258 !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO))
231 return NF_DROP; 259 return NF_DROP;
232 260
233 return NF_ACCEPT; 261 return NF_ACCEPT;
234} 262}
235 263
236static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off) 264static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
265 s16 off)
237{ 266{
238 enum ip_conntrack_info ctinfo; 267 enum ip_conntrack_info ctinfo;
239 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 268 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
@@ -242,37 +271,38 @@ static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
242 if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0) 271 if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
243 return; 272 return;
244 273
245 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); 274 th = (struct tcphdr *)(skb->data + protoff);
246 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off); 275 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
247} 276}
248 277
249/* Handles expected signalling connections and media streams */ 278/* Handles expected signalling connections and media streams */
250static void ip_nat_sip_expected(struct nf_conn *ct, 279static void nf_nat_sip_expected(struct nf_conn *ct,
251 struct nf_conntrack_expect *exp) 280 struct nf_conntrack_expect *exp)
252{ 281{
253 struct nf_nat_ipv4_range range; 282 struct nf_nat_range range;
254 283
255 /* This must be a fresh one. */ 284 /* This must be a fresh one. */
256 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 285 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
257 286
258 /* For DST manip, map port here to where it's expected. */ 287 /* For DST manip, map port here to where it's expected. */
259 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); 288 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
260 range.min = range.max = exp->saved_proto; 289 range.min_proto = range.max_proto = exp->saved_proto;
261 range.min_ip = range.max_ip = exp->saved_ip; 290 range.min_addr = range.max_addr = exp->saved_addr;
262 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 291 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
263 292
264 /* Change src to where master sends to, but only if the connection 293 /* Change src to where master sends to, but only if the connection
265 * actually came from the same source. */ 294 * actually came from the same source. */
266 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 295 if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
267 ct->master->tuplehash[exp->dir].tuple.src.u3.ip) { 296 &ct->master->tuplehash[exp->dir].tuple.src.u3)) {
268 range.flags = NF_NAT_RANGE_MAP_IPS; 297 range.flags = NF_NAT_RANGE_MAP_IPS;
269 range.min_ip = range.max_ip 298 range.min_addr = range.max_addr
270 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; 299 = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
271 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); 300 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
272 } 301 }
273} 302}
274 303
275static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff, 304static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
305 unsigned int dataoff,
276 const char **dptr, unsigned int *datalen, 306 const char **dptr, unsigned int *datalen,
277 struct nf_conntrack_expect *exp, 307 struct nf_conntrack_expect *exp,
278 unsigned int matchoff, 308 unsigned int matchoff,
@@ -281,16 +311,17 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
281 enum ip_conntrack_info ctinfo; 311 enum ip_conntrack_info ctinfo;
282 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 312 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
283 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 313 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
284 __be32 newip; 314 union nf_inet_addr newaddr;
285 u_int16_t port; 315 u_int16_t port;
286 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 316 char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
287 unsigned int buflen; 317 unsigned int buflen;
288 318
289 /* Connection will come from reply */ 319 /* Connection will come from reply */
290 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) 320 if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
291 newip = exp->tuple.dst.u3.ip; 321 &ct->tuplehash[!dir].tuple.dst.u3))
322 newaddr = exp->tuple.dst.u3;
292 else 323 else
293 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 324 newaddr = ct->tuplehash[!dir].tuple.dst.u3;
294 325
295 /* If the signalling port matches the connection's source port in the 326 /* If the signalling port matches the connection's source port in the
296 * original direction, try to use the destination port in the opposite 327 * original direction, try to use the destination port in the opposite
@@ -301,11 +332,11 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
301 else 332 else
302 port = ntohs(exp->tuple.dst.u.udp.port); 333 port = ntohs(exp->tuple.dst.u.udp.port);
303 334
304 exp->saved_ip = exp->tuple.dst.u3.ip; 335 exp->saved_addr = exp->tuple.dst.u3;
305 exp->tuple.dst.u3.ip = newip; 336 exp->tuple.dst.u3 = newaddr;
306 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; 337 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
307 exp->dir = !dir; 338 exp->dir = !dir;
308 exp->expectfn = ip_nat_sip_expected; 339 exp->expectfn = nf_nat_sip_expected;
309 340
310 for (; port != 0; port++) { 341 for (; port != 0; port++) {
311 int ret; 342 int ret;
@@ -323,10 +354,10 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
323 if (port == 0) 354 if (port == 0)
324 return NF_DROP; 355 return NF_DROP;
325 356
326 if (exp->tuple.dst.u3.ip != exp->saved_ip || 357 if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) ||
327 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { 358 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
328 buflen = sprintf(buffer, "%pI4:%u", &newip, port); 359 buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port);
329 if (!mangle_packet(skb, dataoff, dptr, datalen, 360 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
330 matchoff, matchlen, buffer, buflen)) 361 matchoff, matchlen, buffer, buflen))
331 goto err; 362 goto err;
332 } 363 }
@@ -337,7 +368,8 @@ err:
337 return NF_DROP; 368 return NF_DROP;
338} 369}
339 370
340static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff, 371static int mangle_content_len(struct sk_buff *skb, unsigned int protoff,
372 unsigned int dataoff,
341 const char **dptr, unsigned int *datalen) 373 const char **dptr, unsigned int *datalen)
342{ 374{
343 enum ip_conntrack_info ctinfo; 375 enum ip_conntrack_info ctinfo;
@@ -359,11 +391,12 @@ static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
359 return 0; 391 return 0;
360 392
361 buflen = sprintf(buffer, "%u", c_len); 393 buflen = sprintf(buffer, "%u", c_len);
362 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, 394 return mangle_packet(skb, protoff, dataoff, dptr, datalen,
363 buffer, buflen); 395 matchoff, matchlen, buffer, buflen);
364} 396}
365 397
366static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff, 398static int mangle_sdp_packet(struct sk_buff *skb, unsigned int protoff,
399 unsigned int dataoff,
367 const char **dptr, unsigned int *datalen, 400 const char **dptr, unsigned int *datalen,
368 unsigned int sdpoff, 401 unsigned int sdpoff,
369 enum sdp_header_types type, 402 enum sdp_header_types type,
@@ -377,29 +410,33 @@ static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
377 if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term, 410 if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
378 &matchoff, &matchlen) <= 0) 411 &matchoff, &matchlen) <= 0)
379 return -ENOENT; 412 return -ENOENT;
380 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, 413 return mangle_packet(skb, protoff, dataoff, dptr, datalen,
381 buffer, buflen) ? 0 : -EINVAL; 414 matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL;
382} 415}
383 416
384static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff, 417static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff,
418 unsigned int dataoff,
385 const char **dptr, unsigned int *datalen, 419 const char **dptr, unsigned int *datalen,
386 unsigned int sdpoff, 420 unsigned int sdpoff,
387 enum sdp_header_types type, 421 enum sdp_header_types type,
388 enum sdp_header_types term, 422 enum sdp_header_types term,
389 const union nf_inet_addr *addr) 423 const union nf_inet_addr *addr)
390{ 424{
391 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 425 enum ip_conntrack_info ctinfo;
426 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
427 char buffer[INET6_ADDRSTRLEN];
392 unsigned int buflen; 428 unsigned int buflen;
393 429
394 buflen = sprintf(buffer, "%pI4", &addr->ip); 430 buflen = sip_sprintf_addr(ct, buffer, addr, false);
395 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term, 431 if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen,
396 buffer, buflen)) 432 sdpoff, type, term, buffer, buflen))
397 return 0; 433 return 0;
398 434
399 return mangle_content_len(skb, dataoff, dptr, datalen); 435 return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
400} 436}
401 437
402static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff, 438static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff,
439 unsigned int dataoff,
403 const char **dptr, unsigned int *datalen, 440 const char **dptr, unsigned int *datalen,
404 unsigned int matchoff, 441 unsigned int matchoff,
405 unsigned int matchlen, 442 unsigned int matchlen,
@@ -409,30 +446,32 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
409 unsigned int buflen; 446 unsigned int buflen;
410 447
411 buflen = sprintf(buffer, "%u", port); 448 buflen = sprintf(buffer, "%u", port);
412 if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, 449 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
413 buffer, buflen)) 450 matchoff, matchlen, buffer, buflen))
414 return 0; 451 return 0;
415 452
416 return mangle_content_len(skb, dataoff, dptr, datalen); 453 return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
417} 454}
418 455
419static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff, 456static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff,
457 unsigned int dataoff,
420 const char **dptr, unsigned int *datalen, 458 const char **dptr, unsigned int *datalen,
421 unsigned int sdpoff, 459 unsigned int sdpoff,
422 const union nf_inet_addr *addr) 460 const union nf_inet_addr *addr)
423{ 461{
424 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 462 enum ip_conntrack_info ctinfo;
463 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
464 char buffer[INET6_ADDRSTRLEN];
425 unsigned int buflen; 465 unsigned int buflen;
426 466
427 /* Mangle session description owner and contact addresses */ 467 /* Mangle session description owner and contact addresses */
428 buflen = sprintf(buffer, "%pI4", &addr->ip); 468 buflen = sip_sprintf_addr(ct, buffer, addr, false);
429 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, 469 if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
430 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, 470 SDP_HDR_OWNER, SDP_HDR_MEDIA, buffer, buflen))
431 buffer, buflen))
432 return 0; 471 return 0;
433 472
434 switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, 473 switch (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
435 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, 474 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
436 buffer, buflen)) { 475 buffer, buflen)) {
437 case 0: 476 case 0:
438 /* 477 /*
@@ -448,12 +487,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff
448 return 0; 487 return 0;
449 } 488 }
450 489
451 return mangle_content_len(skb, dataoff, dptr, datalen); 490 return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
452} 491}
453 492
454/* So, this packet has hit the connection tracking matching code. 493/* So, this packet has hit the connection tracking matching code.
455 Mangle it, and change the expectation to match the new version. */ 494 Mangle it, and change the expectation to match the new version. */
456static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, 495static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
496 unsigned int dataoff,
457 const char **dptr, unsigned int *datalen, 497 const char **dptr, unsigned int *datalen,
458 struct nf_conntrack_expect *rtp_exp, 498 struct nf_conntrack_expect *rtp_exp,
459 struct nf_conntrack_expect *rtcp_exp, 499 struct nf_conntrack_expect *rtcp_exp,
@@ -467,23 +507,23 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
467 u_int16_t port; 507 u_int16_t port;
468 508
469 /* Connection will come from reply */ 509 /* Connection will come from reply */
470 if (ct->tuplehash[dir].tuple.src.u3.ip == 510 if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
471 ct->tuplehash[!dir].tuple.dst.u3.ip) 511 &ct->tuplehash[!dir].tuple.dst.u3))
472 rtp_addr->ip = rtp_exp->tuple.dst.u3.ip; 512 *rtp_addr = rtp_exp->tuple.dst.u3;
473 else 513 else
474 rtp_addr->ip = ct->tuplehash[!dir].tuple.dst.u3.ip; 514 *rtp_addr = ct->tuplehash[!dir].tuple.dst.u3;
475 515
476 rtp_exp->saved_ip = rtp_exp->tuple.dst.u3.ip; 516 rtp_exp->saved_addr = rtp_exp->tuple.dst.u3;
477 rtp_exp->tuple.dst.u3.ip = rtp_addr->ip; 517 rtp_exp->tuple.dst.u3 = *rtp_addr;
478 rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; 518 rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
479 rtp_exp->dir = !dir; 519 rtp_exp->dir = !dir;
480 rtp_exp->expectfn = ip_nat_sip_expected; 520 rtp_exp->expectfn = nf_nat_sip_expected;
481 521
482 rtcp_exp->saved_ip = rtcp_exp->tuple.dst.u3.ip; 522 rtcp_exp->saved_addr = rtcp_exp->tuple.dst.u3;
483 rtcp_exp->tuple.dst.u3.ip = rtp_addr->ip; 523 rtcp_exp->tuple.dst.u3 = *rtp_addr;
484 rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; 524 rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
485 rtcp_exp->dir = !dir; 525 rtcp_exp->dir = !dir;
486 rtcp_exp->expectfn = ip_nat_sip_expected; 526 rtcp_exp->expectfn = nf_nat_sip_expected;
487 527
488 /* Try to get same pair of ports: if not, try to change them. */ 528 /* Try to get same pair of ports: if not, try to change them. */
489 for (port = ntohs(rtp_exp->tuple.dst.u.udp.port); 529 for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
@@ -517,7 +557,7 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
517 557
518 /* Update media port. */ 558 /* Update media port. */
519 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && 559 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
520 !ip_nat_sdp_port(skb, dataoff, dptr, datalen, 560 !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
521 mediaoff, medialen, port)) 561 mediaoff, medialen, port))
522 goto err2; 562 goto err2;
523 563
@@ -531,8 +571,8 @@ err1:
531} 571}
532 572
533static struct nf_ct_helper_expectfn sip_nat = { 573static struct nf_ct_helper_expectfn sip_nat = {
534 .name = "sip", 574 .name = "sip",
535 .expectfn = ip_nat_sip_expected, 575 .expectfn = nf_nat_sip_expected,
536}; 576};
537 577
538static void __exit nf_nat_sip_fini(void) 578static void __exit nf_nat_sip_fini(void)
@@ -557,13 +597,13 @@ static int __init nf_nat_sip_init(void)
557 BUG_ON(nf_nat_sdp_port_hook != NULL); 597 BUG_ON(nf_nat_sdp_port_hook != NULL);
558 BUG_ON(nf_nat_sdp_session_hook != NULL); 598 BUG_ON(nf_nat_sdp_session_hook != NULL);
559 BUG_ON(nf_nat_sdp_media_hook != NULL); 599 BUG_ON(nf_nat_sdp_media_hook != NULL);
560 RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip); 600 RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
561 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); 601 RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
562 RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect); 602 RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
563 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 603 RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
564 RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port); 604 RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
565 RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session); 605 RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
566 RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media); 606 RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
567 nf_ct_helper_expectfn_register(&sip_nat); 607 nf_ct_helper_expectfn_register(&sip_nat);
568 return 0; 608 return 0;
569} 609}
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c
index 9dbb8d284f99..ccabbda71a3e 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/netfilter/nf_nat_tftp.c
@@ -11,7 +11,6 @@
11#include <net/netfilter/nf_conntrack_helper.h> 11#include <net/netfilter/nf_conntrack_helper.h>
12#include <net/netfilter/nf_conntrack_expect.h> 12#include <net/netfilter/nf_conntrack_expect.h>
13#include <net/netfilter/nf_nat_helper.h> 13#include <net/netfilter/nf_nat_helper.h>
14#include <net/netfilter/nf_nat_rule.h>
15#include <linux/netfilter/nf_conntrack_tftp.h> 14#include <linux/netfilter/nf_conntrack_tftp.h>
16 15
17MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); 16MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index ce60cf0f6c11..8d2cf9ec37a8 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -118,7 +118,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
118 * through nf_reinject(). 118 * through nf_reinject().
119 */ 119 */
120static int __nf_queue(struct sk_buff *skb, 120static int __nf_queue(struct sk_buff *skb,
121 struct list_head *elem, 121 struct nf_hook_ops *elem,
122 u_int8_t pf, unsigned int hook, 122 u_int8_t pf, unsigned int hook,
123 struct net_device *indev, 123 struct net_device *indev,
124 struct net_device *outdev, 124 struct net_device *outdev,
@@ -155,7 +155,7 @@ static int __nf_queue(struct sk_buff *skb,
155 155
156 *entry = (struct nf_queue_entry) { 156 *entry = (struct nf_queue_entry) {
157 .skb = skb, 157 .skb = skb,
158 .elem = list_entry(elem, struct nf_hook_ops, list), 158 .elem = elem,
159 .pf = pf, 159 .pf = pf,
160 .hook = hook, 160 .hook = hook,
161 .indev = indev, 161 .indev = indev,
@@ -225,7 +225,7 @@ static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
225#endif 225#endif
226 226
227int nf_queue(struct sk_buff *skb, 227int nf_queue(struct sk_buff *skb,
228 struct list_head *elem, 228 struct nf_hook_ops *elem,
229 u_int8_t pf, unsigned int hook, 229 u_int8_t pf, unsigned int hook,
230 struct net_device *indev, 230 struct net_device *indev,
231 struct net_device *outdev, 231 struct net_device *outdev,
@@ -287,7 +287,7 @@ int nf_queue(struct sk_buff *skb,
287void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) 287void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
288{ 288{
289 struct sk_buff *skb = entry->skb; 289 struct sk_buff *skb = entry->skb;
290 struct list_head *elem = &entry->elem->list; 290 struct nf_hook_ops *elem = entry->elem;
291 const struct nf_afinfo *afinfo; 291 const struct nf_afinfo *afinfo;
292 int err; 292 int err;
293 293
@@ -297,7 +297,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
297 297
298 /* Continue traversal iff userspace said ok... */ 298 /* Continue traversal iff userspace said ok... */
299 if (verdict == NF_REPEAT) { 299 if (verdict == NF_REPEAT) {
300 elem = elem->prev; 300 elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
301 verdict = NF_ACCEPT; 301 verdict = NF_ACCEPT;
302 } 302 }
303 303
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index a26503342e71..ffb92c03a358 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -241,7 +241,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
241#endif 241#endif
242 }; 242 };
243 243
244 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg); 244 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
245 if (!nfnl) 245 if (!nfnl)
246 return -ENOMEM; 246 return -ENOMEM;
247 net->nfnl_stash = nfnl; 247 net->nfnl_stash = nfnl;
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index b2e7310ca0b8..589d686f0b4c 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -79,11 +79,11 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
79 79
80 if (tb[NFACCT_BYTES]) { 80 if (tb[NFACCT_BYTES]) {
81 atomic64_set(&nfacct->bytes, 81 atomic64_set(&nfacct->bytes,
82 be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES]))); 82 be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
83 } 83 }
84 if (tb[NFACCT_PKTS]) { 84 if (tb[NFACCT_PKTS]) {
85 atomic64_set(&nfacct->pkts, 85 atomic64_set(&nfacct->pkts,
86 be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS]))); 86 be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
87 } 87 }
88 atomic_set(&nfacct->refcnt, 1); 88 atomic_set(&nfacct->refcnt, 1);
89 list_add_tail_rcu(&nfacct->head, &nfnl_acct_list); 89 list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
@@ -91,16 +91,16 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
91} 91}
92 92
93static int 93static int
94nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 94nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
95 int event, struct nf_acct *acct) 95 int event, struct nf_acct *acct)
96{ 96{
97 struct nlmsghdr *nlh; 97 struct nlmsghdr *nlh;
98 struct nfgenmsg *nfmsg; 98 struct nfgenmsg *nfmsg;
99 unsigned int flags = pid ? NLM_F_MULTI : 0; 99 unsigned int flags = portid ? NLM_F_MULTI : 0;
100 u64 pkts, bytes; 100 u64 pkts, bytes;
101 101
102 event |= NFNL_SUBSYS_ACCT << 8; 102 event |= NFNL_SUBSYS_ACCT << 8;
103 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 103 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
104 if (nlh == NULL) 104 if (nlh == NULL)
105 goto nlmsg_failure; 105 goto nlmsg_failure;
106 106
@@ -150,7 +150,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
150 if (last && cur != last) 150 if (last && cur != last)
151 continue; 151 continue;
152 152
153 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid, 153 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
154 cb->nlh->nlmsg_seq, 154 cb->nlh->nlmsg_seq,
155 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 155 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
156 NFNL_MSG_ACCT_NEW, cur) < 0) { 156 NFNL_MSG_ACCT_NEW, cur) < 0) {
@@ -195,7 +195,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
195 break; 195 break;
196 } 196 }
197 197
198 ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid, 198 ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
199 nlh->nlmsg_seq, 199 nlh->nlmsg_seq,
200 NFNL_MSG_TYPE(nlh->nlmsg_type), 200 NFNL_MSG_TYPE(nlh->nlmsg_type),
201 NFNL_MSG_ACCT_NEW, cur); 201 NFNL_MSG_ACCT_NEW, cur);
@@ -203,7 +203,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
203 kfree_skb(skb2); 203 kfree_skb(skb2);
204 break; 204 break;
205 } 205 }
206 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid, 206 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
207 MSG_DONTWAIT); 207 MSG_DONTWAIT);
208 if (ret > 0) 208 if (ret > 0)
209 ret = 0; 209 ret = 0;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index d6836193d479..945950a8b1f1 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -74,7 +74,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
74 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) 74 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
75 return -EINVAL; 75 return -EINVAL;
76 76
77 tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM])); 77 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
78 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); 78 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
79 79
80 return 0; 80 return 0;
@@ -85,6 +85,9 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
85{ 85{
86 const struct nf_conn_help *help = nfct_help(ct); 86 const struct nf_conn_help *help = nfct_help(ct);
87 87
88 if (attr == NULL)
89 return -EINVAL;
90
88 if (help->helper->data_len == 0) 91 if (help->helper->data_len == 0)
89 return -EINVAL; 92 return -EINVAL;
90 93
@@ -395,16 +398,16 @@ nla_put_failure:
395} 398}
396 399
397static int 400static int
398nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 401nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
399 int event, struct nf_conntrack_helper *helper) 402 int event, struct nf_conntrack_helper *helper)
400{ 403{
401 struct nlmsghdr *nlh; 404 struct nlmsghdr *nlh;
402 struct nfgenmsg *nfmsg; 405 struct nfgenmsg *nfmsg;
403 unsigned int flags = pid ? NLM_F_MULTI : 0; 406 unsigned int flags = portid ? NLM_F_MULTI : 0;
404 int status; 407 int status;
405 408
406 event |= NFNL_SUBSYS_CTHELPER << 8; 409 event |= NFNL_SUBSYS_CTHELPER << 8;
407 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 410 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
408 if (nlh == NULL) 411 if (nlh == NULL)
409 goto nlmsg_failure; 412 goto nlmsg_failure;
410 413
@@ -468,7 +471,7 @@ restart:
468 cb->args[1] = 0; 471 cb->args[1] = 0;
469 } 472 }
470 if (nfnl_cthelper_fill_info(skb, 473 if (nfnl_cthelper_fill_info(skb,
471 NETLINK_CB(cb->skb).pid, 474 NETLINK_CB(cb->skb).portid,
472 cb->nlh->nlmsg_seq, 475 cb->nlh->nlmsg_seq,
473 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 476 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
474 NFNL_MSG_CTHELPER_NEW, cur) < 0) { 477 NFNL_MSG_CTHELPER_NEW, cur) < 0) {
@@ -538,7 +541,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
538 break; 541 break;
539 } 542 }
540 543
541 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid, 544 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
542 nlh->nlmsg_seq, 545 nlh->nlmsg_seq,
543 NFNL_MSG_TYPE(nlh->nlmsg_type), 546 NFNL_MSG_TYPE(nlh->nlmsg_type),
544 NFNL_MSG_CTHELPER_NEW, cur); 547 NFNL_MSG_CTHELPER_NEW, cur);
@@ -547,7 +550,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
547 break; 550 break;
548 } 551 }
549 552
550 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid, 553 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
551 MSG_DONTWAIT); 554 MSG_DONTWAIT);
552 if (ret > 0) 555 if (ret > 0)
553 ret = 0; 556 ret = 0;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index cdecbc8fe965..8847b4d8be06 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -155,16 +155,16 @@ err_proto_put:
155} 155}
156 156
157static int 157static int
158ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type, 158ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
159 int event, struct ctnl_timeout *timeout) 159 int event, struct ctnl_timeout *timeout)
160{ 160{
161 struct nlmsghdr *nlh; 161 struct nlmsghdr *nlh;
162 struct nfgenmsg *nfmsg; 162 struct nfgenmsg *nfmsg;
163 unsigned int flags = pid ? NLM_F_MULTI : 0; 163 unsigned int flags = portid ? NLM_F_MULTI : 0;
164 struct nf_conntrack_l4proto *l4proto = timeout->l4proto; 164 struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
165 165
166 event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8; 166 event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
167 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags); 167 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
168 if (nlh == NULL) 168 if (nlh == NULL)
169 goto nlmsg_failure; 169 goto nlmsg_failure;
170 170
@@ -222,7 +222,7 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
222 if (last && cur != last) 222 if (last && cur != last)
223 continue; 223 continue;
224 224
225 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid, 225 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
226 cb->nlh->nlmsg_seq, 226 cb->nlh->nlmsg_seq,
227 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 227 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
228 IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) { 228 IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
@@ -268,7 +268,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
268 break; 268 break;
269 } 269 }
270 270
271 ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid, 271 ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).portid,
272 nlh->nlmsg_seq, 272 nlh->nlmsg_seq,
273 NFNL_MSG_TYPE(nlh->nlmsg_type), 273 NFNL_MSG_TYPE(nlh->nlmsg_type),
274 IPCTNL_MSG_TIMEOUT_NEW, cur); 274 IPCTNL_MSG_TIMEOUT_NEW, cur);
@@ -276,7 +276,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
276 kfree_skb(skb2); 276 kfree_skb(skb2);
277 break; 277 break;
278 } 278 }
279 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, 279 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
280 MSG_DONTWAIT); 280 MSG_DONTWAIT);
281 if (ret > 0) 281 if (ret > 0)
282 ret = 0; 282 ret = 0;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 8cfc401e197e..9f199f2e31fa 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -56,7 +56,7 @@ struct nfulnl_instance {
56 struct sk_buff *skb; /* pre-allocatd skb */ 56 struct sk_buff *skb; /* pre-allocatd skb */
57 struct timer_list timer; 57 struct timer_list timer;
58 struct user_namespace *peer_user_ns; /* User namespace of the peer process */ 58 struct user_namespace *peer_user_ns; /* User namespace of the peer process */
59 int peer_pid; /* PID of the peer process */ 59 int peer_portid; /* PORTID of the peer process */
60 60
61 /* configurable parameters */ 61 /* configurable parameters */
62 unsigned int flushtimeout; /* timeout until queue flush */ 62 unsigned int flushtimeout; /* timeout until queue flush */
@@ -133,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
133static void nfulnl_timer(unsigned long data); 133static void nfulnl_timer(unsigned long data);
134 134
135static struct nfulnl_instance * 135static struct nfulnl_instance *
136instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns) 136instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
137{ 137{
138 struct nfulnl_instance *inst; 138 struct nfulnl_instance *inst;
139 int err; 139 int err;
@@ -164,7 +164,7 @@ instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
164 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); 164 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
165 165
166 inst->peer_user_ns = user_ns; 166 inst->peer_user_ns = user_ns;
167 inst->peer_pid = pid; 167 inst->peer_portid = portid;
168 inst->group_num = group_num; 168 inst->group_num = group_num;
169 169
170 inst->qthreshold = NFULNL_QTHRESH_DEFAULT; 170 inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
@@ -336,7 +336,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
336 if (!nlh) 336 if (!nlh)
337 goto out; 337 goto out;
338 } 338 }
339 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, 339 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_portid,
340 MSG_DONTWAIT); 340 MSG_DONTWAIT);
341 341
342 inst->qlen = 0; 342 inst->qlen = 0;
@@ -704,7 +704,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
704 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 704 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
705 int i; 705 int i;
706 706
707 /* destroy all instances for this pid */ 707 /* destroy all instances for this portid */
708 spin_lock_bh(&instances_lock); 708 spin_lock_bh(&instances_lock);
709 for (i = 0; i < INSTANCE_BUCKETS; i++) { 709 for (i = 0; i < INSTANCE_BUCKETS; i++) {
710 struct hlist_node *tmp, *t2; 710 struct hlist_node *tmp, *t2;
@@ -713,7 +713,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
713 713
714 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 714 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
715 if ((net_eq(n->net, &init_net)) && 715 if ((net_eq(n->net, &init_net)) &&
716 (n->pid == inst->peer_pid)) 716 (n->portid == inst->peer_portid))
717 __instance_destroy(inst); 717 __instance_destroy(inst);
718 } 718 }
719 } 719 }
@@ -775,7 +775,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
775 } 775 }
776 776
777 inst = instance_lookup_get(group_num); 777 inst = instance_lookup_get(group_num);
778 if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { 778 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
779 ret = -EPERM; 779 ret = -EPERM;
780 goto out_put; 780 goto out_put;
781 } 781 }
@@ -789,7 +789,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
789 } 789 }
790 790
791 inst = instance_create(group_num, 791 inst = instance_create(group_num,
792 NETLINK_CB(skb).pid, 792 NETLINK_CB(skb).portid,
793 sk_user_ns(NETLINK_CB(skb).ssk)); 793 sk_user_ns(NETLINK_CB(skb).ssk));
794 if (IS_ERR(inst)) { 794 if (IS_ERR(inst)) {
795 ret = PTR_ERR(inst); 795 ret = PTR_ERR(inst);
@@ -948,7 +948,7 @@ static int seq_show(struct seq_file *s, void *v)
948 948
949 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n", 949 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
950 inst->group_num, 950 inst->group_num,
951 inst->peer_pid, inst->qlen, 951 inst->peer_portid, inst->qlen,
952 inst->copy_mode, inst->copy_range, 952 inst->copy_mode, inst->copy_range,
953 inst->flushtimeout, atomic_read(&inst->use)); 953 inst->flushtimeout, atomic_read(&inst->use));
954} 954}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index c0496a55ad0c..e12d44e75b21 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -44,7 +44,7 @@ struct nfqnl_instance {
44 struct hlist_node hlist; /* global list of queues */ 44 struct hlist_node hlist; /* global list of queues */
45 struct rcu_head rcu; 45 struct rcu_head rcu;
46 46
47 int peer_pid; 47 int peer_portid;
48 unsigned int queue_maxlen; 48 unsigned int queue_maxlen;
49 unsigned int copy_range; 49 unsigned int copy_range;
50 unsigned int queue_dropped; 50 unsigned int queue_dropped;
@@ -92,7 +92,7 @@ instance_lookup(u_int16_t queue_num)
92} 92}
93 93
94static struct nfqnl_instance * 94static struct nfqnl_instance *
95instance_create(u_int16_t queue_num, int pid) 95instance_create(u_int16_t queue_num, int portid)
96{ 96{
97 struct nfqnl_instance *inst; 97 struct nfqnl_instance *inst;
98 unsigned int h; 98 unsigned int h;
@@ -111,7 +111,7 @@ instance_create(u_int16_t queue_num, int pid)
111 } 111 }
112 112
113 inst->queue_num = queue_num; 113 inst->queue_num = queue_num;
114 inst->peer_pid = pid; 114 inst->peer_portid = portid;
115 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 115 inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
116 inst->copy_range = 0xfffff; 116 inst->copy_range = 0xfffff;
117 inst->copy_mode = NFQNL_COPY_NONE; 117 inst->copy_mode = NFQNL_COPY_NONE;
@@ -225,7 +225,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
225{ 225{
226 sk_buff_data_t old_tail; 226 sk_buff_data_t old_tail;
227 size_t size; 227 size_t size;
228 size_t data_len = 0; 228 size_t data_len = 0, cap_len = 0;
229 struct sk_buff *skb; 229 struct sk_buff *skb;
230 struct nlattr *nla; 230 struct nlattr *nla;
231 struct nfqnl_msg_packet_hdr *pmsg; 231 struct nfqnl_msg_packet_hdr *pmsg;
@@ -247,7 +247,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
247#endif 247#endif
248 + nla_total_size(sizeof(u_int32_t)) /* mark */ 248 + nla_total_size(sizeof(u_int32_t)) /* mark */
249 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 249 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
250 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 250 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)
251 + nla_total_size(sizeof(u_int32_t))); /* cap_len */
251 252
252 outdev = entry->outdev; 253 outdev = entry->outdev;
253 254
@@ -266,6 +267,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
266 data_len = entskb->len; 267 data_len = entskb->len;
267 268
268 size += nla_total_size(data_len); 269 size += nla_total_size(data_len);
270 cap_len = entskb->len;
269 break; 271 break;
270 } 272 }
271 273
@@ -402,12 +404,14 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
402 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) 404 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
403 goto nla_put_failure; 405 goto nla_put_failure;
404 406
407 if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
408 goto nla_put_failure;
409
405 nlh->nlmsg_len = skb->tail - old_tail; 410 nlh->nlmsg_len = skb->tail - old_tail;
406 return skb; 411 return skb;
407 412
408nla_put_failure: 413nla_put_failure:
409 if (skb) 414 kfree_skb(skb);
410 kfree_skb(skb);
411 net_err_ratelimited("nf_queue: error creating packet message\n"); 415 net_err_ratelimited("nf_queue: error creating packet message\n");
412 return NULL; 416 return NULL;
413} 417}
@@ -440,7 +444,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
440 } 444 }
441 spin_lock_bh(&queue->lock); 445 spin_lock_bh(&queue->lock);
442 446
443 if (!queue->peer_pid) { 447 if (!queue->peer_portid) {
444 err = -EINVAL; 448 err = -EINVAL;
445 goto err_out_free_nskb; 449 goto err_out_free_nskb;
446 } 450 }
@@ -459,7 +463,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
459 *packet_id_ptr = htonl(entry->id); 463 *packet_id_ptr = htonl(entry->id);
460 464
461 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 465 /* nfnetlink_unicast will either free the nskb or add it to a socket */
462 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); 466 err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT);
463 if (err < 0) { 467 if (err < 0) {
464 queue->queue_user_dropped++; 468 queue->queue_user_dropped++;
465 goto err_out_unlock; 469 goto err_out_unlock;
@@ -527,9 +531,13 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
527 531
528 case NFQNL_COPY_PACKET: 532 case NFQNL_COPY_PACKET:
529 queue->copy_mode = mode; 533 queue->copy_mode = mode;
530 /* we're using struct nlattr which has 16bit nla_len */ 534 /* We're using struct nlattr which has 16bit nla_len. Note that
531 if (range > 0xffff) 535 * nla_len includes the header length. Thus, the maximum packet
532 queue->copy_range = 0xffff; 536 * length that we support is 65531 bytes. We send truncated
537 * packets if the specified length is larger than that.
538 */
539 if (range > 0xffff - NLA_HDRLEN)
540 queue->copy_range = 0xffff - NLA_HDRLEN;
533 else 541 else
534 queue->copy_range = range; 542 queue->copy_range = range;
535 break; 543 break;
@@ -616,7 +624,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
616 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 624 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
617 int i; 625 int i;
618 626
619 /* destroy all instances for this pid */ 627 /* destroy all instances for this portid */
620 spin_lock(&instances_lock); 628 spin_lock(&instances_lock);
621 for (i = 0; i < INSTANCE_BUCKETS; i++) { 629 for (i = 0; i < INSTANCE_BUCKETS; i++) {
622 struct hlist_node *tmp, *t2; 630 struct hlist_node *tmp, *t2;
@@ -625,7 +633,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
625 633
626 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { 634 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
627 if ((n->net == &init_net) && 635 if ((n->net == &init_net) &&
628 (n->pid == inst->peer_pid)) 636 (n->portid == inst->peer_portid))
629 __instance_destroy(inst); 637 __instance_destroy(inst);
630 } 638 }
631 } 639 }
@@ -650,7 +658,7 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
650 [NFQA_MARK] = { .type = NLA_U32 }, 658 [NFQA_MARK] = { .type = NLA_U32 },
651}; 659};
652 660
653static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid) 661static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid)
654{ 662{
655 struct nfqnl_instance *queue; 663 struct nfqnl_instance *queue;
656 664
@@ -658,7 +666,7 @@ static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
658 if (!queue) 666 if (!queue)
659 return ERR_PTR(-ENODEV); 667 return ERR_PTR(-ENODEV);
660 668
661 if (queue->peer_pid != nlpid) 669 if (queue->peer_portid != nlportid)
662 return ERR_PTR(-EPERM); 670 return ERR_PTR(-EPERM);
663 671
664 return queue; 672 return queue;
@@ -698,7 +706,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
698 LIST_HEAD(batch_list); 706 LIST_HEAD(batch_list);
699 u16 queue_num = ntohs(nfmsg->res_id); 707 u16 queue_num = ntohs(nfmsg->res_id);
700 708
701 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); 709 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
702 if (IS_ERR(queue)) 710 if (IS_ERR(queue))
703 return PTR_ERR(queue); 711 return PTR_ERR(queue);
704 712
@@ -749,7 +757,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
749 queue = instance_lookup(queue_num); 757 queue = instance_lookup(queue_num);
750 if (!queue) 758 if (!queue)
751 759
752 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); 760 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
753 if (IS_ERR(queue)) 761 if (IS_ERR(queue))
754 return PTR_ERR(queue); 762 return PTR_ERR(queue);
755 763
@@ -832,7 +840,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
832 840
833 rcu_read_lock(); 841 rcu_read_lock();
834 queue = instance_lookup(queue_num); 842 queue = instance_lookup(queue_num);
835 if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { 843 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
836 ret = -EPERM; 844 ret = -EPERM;
837 goto err_out_unlock; 845 goto err_out_unlock;
838 } 846 }
@@ -844,7 +852,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
844 ret = -EBUSY; 852 ret = -EBUSY;
845 goto err_out_unlock; 853 goto err_out_unlock;
846 } 854 }
847 queue = instance_create(queue_num, NETLINK_CB(skb).pid); 855 queue = instance_create(queue_num, NETLINK_CB(skb).portid);
848 if (IS_ERR(queue)) { 856 if (IS_ERR(queue)) {
849 ret = PTR_ERR(queue); 857 ret = PTR_ERR(queue);
850 goto err_out_unlock; 858 goto err_out_unlock;
@@ -1016,7 +1024,7 @@ static int seq_show(struct seq_file *s, void *v)
1016 1024
1017 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", 1025 return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1018 inst->queue_num, 1026 inst->queue_num,
1019 inst->peer_pid, inst->queue_total, 1027 inst->peer_portid, inst->queue_total,
1020 inst->copy_mode, inst->copy_range, 1028 inst->copy_mode, inst->copy_range,
1021 inst->queue_dropped, inst->queue_user_dropped, 1029 inst->queue_dropped, inst->queue_user_dropped,
1022 inst->id_sequence, 1); 1030 inst->id_sequence, 1);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 116018560c60..16c712563860 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -72,14 +72,44 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
72 return 0; 72 return 0;
73} 73}
74 74
75static int
76xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
77 const struct xt_tgchk_param *par)
78{
79 struct nf_conntrack_helper *helper;
80 struct nf_conn_help *help;
81 u8 proto;
82
83 proto = xt_ct_find_proto(par);
84 if (!proto) {
85 pr_info("You must specify a L4 protocol, and not use "
86 "inversions on it.\n");
87 return -ENOENT;
88 }
89
90 helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
91 proto);
92 if (helper == NULL) {
93 pr_info("No such helper \"%s\"\n", helper_name);
94 return -ENOENT;
95 }
96
97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
98 if (help == NULL) {
99 module_put(helper->me);
100 return -ENOMEM;
101 }
102
103 help->helper = helper;
104 return 0;
105}
106
75static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par) 107static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
76{ 108{
77 struct xt_ct_target_info *info = par->targinfo; 109 struct xt_ct_target_info *info = par->targinfo;
78 struct nf_conntrack_tuple t; 110 struct nf_conntrack_tuple t;
79 struct nf_conn_help *help;
80 struct nf_conn *ct; 111 struct nf_conn *ct;
81 int ret = 0; 112 int ret;
82 u8 proto;
83 113
84 if (info->flags & ~XT_CT_NOTRACK) 114 if (info->flags & ~XT_CT_NOTRACK)
85 return -EINVAL; 115 return -EINVAL;
@@ -112,31 +142,9 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
112 goto err3; 142 goto err3;
113 143
114 if (info->helper[0]) { 144 if (info->helper[0]) {
115 struct nf_conntrack_helper *helper; 145 ret = xt_ct_set_helper(ct, info->helper, par);
116 146 if (ret < 0)
117 ret = -ENOENT;
118 proto = xt_ct_find_proto(par);
119 if (!proto) {
120 pr_info("You must specify a L4 protocol, "
121 "and not use inversions on it.\n");
122 goto err3;
123 }
124
125 ret = -ENOENT;
126 helper = nf_conntrack_helper_try_module_get(info->helper,
127 par->family,
128 proto);
129 if (helper == NULL) {
130 pr_info("No such helper \"%s\"\n", info->helper);
131 goto err3;
132 }
133
134 ret = -ENOMEM;
135 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
136 if (help == NULL)
137 goto err3; 147 goto err3;
138
139 help->helper = helper;
140 } 148 }
141 149
142 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 150 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -164,17 +172,77 @@ static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
164} 172}
165#endif 173#endif
166 174
175static int
176xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
177 const char *timeout_name)
178{
179#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
180 typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
181 struct ctnl_timeout *timeout;
182 struct nf_conn_timeout *timeout_ext;
183 const struct ipt_entry *e = par->entryinfo;
184 struct nf_conntrack_l4proto *l4proto;
185 int ret = 0;
186
187 rcu_read_lock();
188 timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
189 if (timeout_find_get == NULL) {
190 ret = -ENOENT;
191 pr_info("Timeout policy base is empty\n");
192 goto out;
193 }
194
195 if (e->ip.invflags & IPT_INV_PROTO) {
196 ret = -EINVAL;
197 pr_info("You cannot use inversion on L4 protocol\n");
198 goto out;
199 }
200
201 timeout = timeout_find_get(timeout_name);
202 if (timeout == NULL) {
203 ret = -ENOENT;
204 pr_info("No such timeout policy \"%s\"\n", timeout_name);
205 goto out;
206 }
207
208 if (timeout->l3num != par->family) {
209 ret = -EINVAL;
210 pr_info("Timeout policy `%s' can only be used by L3 protocol "
211 "number %d\n", timeout_name, timeout->l3num);
212 goto err_put_timeout;
213 }
214 /* Make sure the timeout policy matches any existing protocol tracker,
215 * otherwise default to generic.
216 */
217 l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
218 if (timeout->l4proto->l4proto != l4proto->l4proto) {
219 ret = -EINVAL;
220 pr_info("Timeout policy `%s' can only be used by L4 protocol "
221 "number %d\n",
222 timeout_name, timeout->l4proto->l4proto);
223 goto err_put_timeout;
224 }
225 timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
226 if (timeout_ext == NULL)
227 ret = -ENOMEM;
228
229err_put_timeout:
230 __xt_ct_tg_timeout_put(timeout);
231out:
232 rcu_read_unlock();
233 return ret;
234#else
235 return -EOPNOTSUPP;
236#endif
237}
238
167static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) 239static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
168{ 240{
169 struct xt_ct_target_info_v1 *info = par->targinfo; 241 struct xt_ct_target_info_v1 *info = par->targinfo;
170 struct nf_conntrack_tuple t; 242 struct nf_conntrack_tuple t;
171 struct nf_conn_help *help;
172 struct nf_conn *ct; 243 struct nf_conn *ct;
173 int ret = 0; 244 int ret;
174 u8 proto; 245
175#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
176 struct ctnl_timeout *timeout;
177#endif
178 if (info->flags & ~XT_CT_NOTRACK) 246 if (info->flags & ~XT_CT_NOTRACK)
179 return -EINVAL; 247 return -EINVAL;
180 248
@@ -206,93 +274,16 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
206 goto err3; 274 goto err3;
207 275
208 if (info->helper[0]) { 276 if (info->helper[0]) {
209 struct nf_conntrack_helper *helper; 277 ret = xt_ct_set_helper(ct, info->helper, par);
210 278 if (ret < 0)
211 ret = -ENOENT;
212 proto = xt_ct_find_proto(par);
213 if (!proto) {
214 pr_info("You must specify a L4 protocol, "
215 "and not use inversions on it.\n");
216 goto err3;
217 }
218
219 ret = -ENOENT;
220 helper = nf_conntrack_helper_try_module_get(info->helper,
221 par->family,
222 proto);
223 if (helper == NULL) {
224 pr_info("No such helper \"%s\"\n", info->helper);
225 goto err3;
226 }
227
228 ret = -ENOMEM;
229 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
230 if (help == NULL)
231 goto err3; 279 goto err3;
232
233 help->helper = helper;
234 } 280 }
235 281
236#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
237 if (info->timeout[0]) { 282 if (info->timeout[0]) {
238 typeof(nf_ct_timeout_find_get_hook) timeout_find_get; 283 ret = xt_ct_set_timeout(ct, par, info->timeout);
239 struct nf_conn_timeout *timeout_ext; 284 if (ret < 0)
240 285 goto err3;
241 rcu_read_lock();
242 timeout_find_get =
243 rcu_dereference(nf_ct_timeout_find_get_hook);
244
245 if (timeout_find_get) {
246 const struct ipt_entry *e = par->entryinfo;
247 struct nf_conntrack_l4proto *l4proto;
248
249 if (e->ip.invflags & IPT_INV_PROTO) {
250 ret = -EINVAL;
251 pr_info("You cannot use inversion on "
252 "L4 protocol\n");
253 goto err4;
254 }
255 timeout = timeout_find_get(info->timeout);
256 if (timeout == NULL) {
257 ret = -ENOENT;
258 pr_info("No such timeout policy \"%s\"\n",
259 info->timeout);
260 goto err4;
261 }
262 if (timeout->l3num != par->family) {
263 ret = -EINVAL;
264 pr_info("Timeout policy `%s' can only be "
265 "used by L3 protocol number %d\n",
266 info->timeout, timeout->l3num);
267 goto err5;
268 }
269 /* Make sure the timeout policy matches any existing
270 * protocol tracker, otherwise default to generic.
271 */
272 l4proto = __nf_ct_l4proto_find(par->family,
273 e->ip.proto);
274 if (timeout->l4proto->l4proto != l4proto->l4proto) {
275 ret = -EINVAL;
276 pr_info("Timeout policy `%s' can only be "
277 "used by L4 protocol number %d\n",
278 info->timeout,
279 timeout->l4proto->l4proto);
280 goto err5;
281 }
282 timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
283 GFP_ATOMIC);
284 if (timeout_ext == NULL) {
285 ret = -ENOMEM;
286 goto err5;
287 }
288 } else {
289 ret = -ENOENT;
290 pr_info("Timeout policy base is empty\n");
291 goto err4;
292 }
293 rcu_read_unlock();
294 } 286 }
295#endif
296 287
297 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 288 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
298 __set_bit(IPS_CONFIRMED_BIT, &ct->status); 289 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
@@ -300,12 +291,6 @@ out:
300 info->ct = ct; 291 info->ct = ct;
301 return 0; 292 return 0;
302 293
303#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
304err5:
305 __xt_ct_tg_timeout_put(timeout);
306err4:
307 rcu_read_unlock();
308#endif
309err3: 294err3:
310 nf_conntrack_free(ct); 295 nf_conntrack_free(ct);
311err2: 296err2:
@@ -330,15 +315,30 @@ static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
330 nf_ct_put(info->ct); 315 nf_ct_put(info->ct);
331} 316}
332 317
333static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par) 318static void xt_ct_destroy_timeout(struct nf_conn *ct)
334{ 319{
335 struct xt_ct_target_info_v1 *info = par->targinfo;
336 struct nf_conn *ct = info->ct;
337 struct nf_conn_help *help;
338#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 320#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
339 struct nf_conn_timeout *timeout_ext; 321 struct nf_conn_timeout *timeout_ext;
340 typeof(nf_ct_timeout_put_hook) timeout_put; 322 typeof(nf_ct_timeout_put_hook) timeout_put;
323
324 rcu_read_lock();
325 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
326
327 if (timeout_put) {
328 timeout_ext = nf_ct_timeout_find(ct);
329 if (timeout_ext)
330 timeout_put(timeout_ext->timeout);
331 }
332 rcu_read_unlock();
341#endif 333#endif
334}
335
336static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
337{
338 struct xt_ct_target_info_v1 *info = par->targinfo;
339 struct nf_conn *ct = info->ct;
340 struct nf_conn_help *help;
341
342 if (!nf_ct_is_untracked(ct)) { 342 if (!nf_ct_is_untracked(ct)) {
343 help = nfct_help(ct); 343 help = nfct_help(ct);
344 if (help) 344 if (help)
@@ -346,17 +346,7 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
346 346
347 nf_ct_l3proto_module_put(par->family); 347 nf_ct_l3proto_module_put(par->family);
348 348
349#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 349 xt_ct_destroy_timeout(ct);
350 rcu_read_lock();
351 timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
352
353 if (timeout_put) {
354 timeout_ext = nf_ct_timeout_find(ct);
355 if (timeout_ext)
356 timeout_put(timeout_ext->timeout);
357 }
358 rcu_read_unlock();
359#endif
360 } 350 }
361 nf_ct_put(info->ct); 351 nf_ct_put(info->ct);
362} 352}
diff --git a/net/netfilter/xt_NETMAP.c b/net/netfilter/xt_NETMAP.c
new file mode 100644
index 000000000000..b253e07cb1c5
--- /dev/null
+++ b/net/netfilter/xt_NETMAP.c
@@ -0,0 +1,165 @@
1/*
2 * (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
3 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/ip.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <linux/ipv6.h>
15#include <linux/netfilter.h>
16#include <linux/netfilter_ipv4.h>
17#include <linux/netfilter_ipv6.h>
18#include <linux/netfilter/x_tables.h>
19#include <net/netfilter/nf_nat.h>
20
21static unsigned int
22netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
23{
24 const struct nf_nat_range *range = par->targinfo;
25 struct nf_nat_range newrange;
26 struct nf_conn *ct;
27 enum ip_conntrack_info ctinfo;
28 union nf_inet_addr new_addr, netmask;
29 unsigned int i;
30
31 ct = nf_ct_get(skb, &ctinfo);
32 for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
33 netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
34 range->max_addr.ip6[i]);
35
36 if (par->hooknum == NF_INET_PRE_ROUTING ||
37 par->hooknum == NF_INET_LOCAL_OUT)
38 new_addr.in6 = ipv6_hdr(skb)->daddr;
39 else
40 new_addr.in6 = ipv6_hdr(skb)->saddr;
41
42 for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) {
43 new_addr.ip6[i] &= ~netmask.ip6[i];
44 new_addr.ip6[i] |= range->min_addr.ip6[i] &
45 netmask.ip6[i];
46 }
47
48 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
49 newrange.min_addr = new_addr;
50 newrange.max_addr = new_addr;
51 newrange.min_proto = range->min_proto;
52 newrange.max_proto = range->max_proto;
53
54 return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
55}
56
57static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
58{
59 const struct nf_nat_range *range = par->targinfo;
60
61 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
62 return -EINVAL;
63 return 0;
64}
65
66static unsigned int
67netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
68{
69 struct nf_conn *ct;
70 enum ip_conntrack_info ctinfo;
71 __be32 new_ip, netmask;
72 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
73 struct nf_nat_range newrange;
74
75 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
76 par->hooknum == NF_INET_POST_ROUTING ||
77 par->hooknum == NF_INET_LOCAL_OUT ||
78 par->hooknum == NF_INET_LOCAL_IN);
79 ct = nf_ct_get(skb, &ctinfo);
80
81 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
82
83 if (par->hooknum == NF_INET_PRE_ROUTING ||
84 par->hooknum == NF_INET_LOCAL_OUT)
85 new_ip = ip_hdr(skb)->daddr & ~netmask;
86 else
87 new_ip = ip_hdr(skb)->saddr & ~netmask;
88 new_ip |= mr->range[0].min_ip & netmask;
89
90 memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
91 memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
92 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
93 newrange.min_addr.ip = new_ip;
94 newrange.max_addr.ip = new_ip;
95 newrange.min_proto = mr->range[0].min;
96 newrange.max_proto = mr->range[0].max;
97
98 /* Hand modified range to generic setup. */
99 return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
100}
101
102static int netmap_tg4_check(const struct xt_tgchk_param *par)
103{
104 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
105
106 if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
107 pr_debug("bad MAP_IPS.\n");
108 return -EINVAL;
109 }
110 if (mr->rangesize != 1) {
111 pr_debug("bad rangesize %u.\n", mr->rangesize);
112 return -EINVAL;
113 }
114 return 0;
115}
116
117static struct xt_target netmap_tg_reg[] __read_mostly = {
118 {
119 .name = "NETMAP",
120 .family = NFPROTO_IPV6,
121 .revision = 0,
122 .target = netmap_tg6,
123 .targetsize = sizeof(struct nf_nat_range),
124 .table = "nat",
125 .hooks = (1 << NF_INET_PRE_ROUTING) |
126 (1 << NF_INET_POST_ROUTING) |
127 (1 << NF_INET_LOCAL_OUT) |
128 (1 << NF_INET_LOCAL_IN),
129 .checkentry = netmap_tg6_checkentry,
130 .me = THIS_MODULE,
131 },
132 {
133 .name = "NETMAP",
134 .family = NFPROTO_IPV4,
135 .revision = 0,
136 .target = netmap_tg4,
137 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
138 .table = "nat",
139 .hooks = (1 << NF_INET_PRE_ROUTING) |
140 (1 << NF_INET_POST_ROUTING) |
141 (1 << NF_INET_LOCAL_OUT) |
142 (1 << NF_INET_LOCAL_IN),
143 .checkentry = netmap_tg4_check,
144 .me = THIS_MODULE,
145 },
146};
147
148static int __init netmap_tg_init(void)
149{
150 return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
151}
152
153static void netmap_tg_exit(void)
154{
155 xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
156}
157
158module_init(netmap_tg_init);
159module_exit(netmap_tg_exit);
160
161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets");
163MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
164MODULE_ALIAS("ip6t_NETMAP");
165MODULE_ALIAS("ipt_NETMAP");
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 7babe7d68716..817f9e9f2b16 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -43,7 +43,7 @@ static u32 hash_v4(const struct sk_buff *skb)
43 const struct iphdr *iph = ip_hdr(skb); 43 const struct iphdr *iph = ip_hdr(skb);
44 44
45 /* packets in either direction go into same queue */ 45 /* packets in either direction go into same queue */
46 if (iph->saddr < iph->daddr) 46 if ((__force u32)iph->saddr < (__force u32)iph->daddr)
47 return jhash_3words((__force u32)iph->saddr, 47 return jhash_3words((__force u32)iph->saddr,
48 (__force u32)iph->daddr, iph->protocol, jhash_initval); 48 (__force u32)iph->daddr, iph->protocol, jhash_initval);
49 49
@@ -57,7 +57,8 @@ static u32 hash_v6(const struct sk_buff *skb)
57 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 57 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
58 u32 a, b, c; 58 u32 a, b, c;
59 59
60 if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) { 60 if ((__force u32)ip6h->saddr.s6_addr32[3] <
61 (__force u32)ip6h->daddr.s6_addr32[3]) {
61 a = (__force u32) ip6h->saddr.s6_addr32[3]; 62 a = (__force u32) ip6h->saddr.s6_addr32[3];
62 b = (__force u32) ip6h->daddr.s6_addr32[3]; 63 b = (__force u32) ip6h->daddr.s6_addr32[3];
63 } else { 64 } else {
@@ -65,7 +66,8 @@ static u32 hash_v6(const struct sk_buff *skb)
65 a = (__force u32) ip6h->daddr.s6_addr32[3]; 66 a = (__force u32) ip6h->daddr.s6_addr32[3];
66 } 67 }
67 68
68 if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1]) 69 if ((__force u32)ip6h->saddr.s6_addr32[1] <
70 (__force u32)ip6h->daddr.s6_addr32[1])
69 c = (__force u32) ip6h->saddr.s6_addr32[1]; 71 c = (__force u32) ip6h->saddr.s6_addr32[1];
70 else 72 else
71 c = (__force u32) ip6h->daddr.s6_addr32[1]; 73 c = (__force u32) ip6h->daddr.s6_addr32[1];
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
deleted file mode 100644
index 9d782181b6c8..000000000000
--- a/net/netfilter/xt_NOTRACK.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/* This is a module which is used for setting up fake conntracks
2 * on packets so that they are not seen by the conntrack/NAT code.
3 */
4#include <linux/module.h>
5#include <linux/skbuff.h>
6
7#include <linux/netfilter/x_tables.h>
8#include <net/netfilter/nf_conntrack.h>
9
10MODULE_DESCRIPTION("Xtables: Disabling connection tracking for packets");
11MODULE_LICENSE("GPL");
12MODULE_ALIAS("ipt_NOTRACK");
13MODULE_ALIAS("ip6t_NOTRACK");
14
15static unsigned int
16notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
17{
18 /* Previously seen (loopback)? Ignore. */
19 if (skb->nfct != NULL)
20 return XT_CONTINUE;
21
22 /* Attach fake conntrack entry.
23 If there is a real ct entry correspondig to this packet,
24 it'll hang aroun till timing out. We don't deal with it
25 for performance reasons. JK */
26 skb->nfct = &nf_ct_untracked_get()->ct_general;
27 skb->nfctinfo = IP_CT_NEW;
28 nf_conntrack_get(skb->nfct);
29
30 return XT_CONTINUE;
31}
32
33static struct xt_target notrack_tg_reg __read_mostly = {
34 .name = "NOTRACK",
35 .revision = 0,
36 .family = NFPROTO_UNSPEC,
37 .target = notrack_tg,
38 .table = "raw",
39 .me = THIS_MODULE,
40};
41
42static int __init notrack_tg_init(void)
43{
44 return xt_register_target(&notrack_tg_reg);
45}
46
47static void __exit notrack_tg_exit(void)
48{
49 xt_unregister_target(&notrack_tg_reg);
50}
51
52module_init(notrack_tg_init);
53module_exit(notrack_tg_exit);
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
new file mode 100644
index 000000000000..22a10309297c
--- /dev/null
+++ b/net/netfilter/xt_REDIRECT.c
@@ -0,0 +1,190 @@
1/*
2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
11 * NAT funded by Astaro.
12 */
13
14#include <linux/if.h>
15#include <linux/inetdevice.h>
16#include <linux/ip.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/netfilter.h>
21#include <linux/types.h>
22#include <linux/netfilter_ipv4.h>
23#include <linux/netfilter_ipv6.h>
24#include <linux/netfilter/x_tables.h>
25#include <net/addrconf.h>
26#include <net/checksum.h>
27#include <net/protocol.h>
28#include <net/netfilter/nf_nat.h>
29
30static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
31
32static unsigned int
33redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
34{
35 const struct nf_nat_range *range = par->targinfo;
36 struct nf_nat_range newrange;
37 struct in6_addr newdst;
38 enum ip_conntrack_info ctinfo;
39 struct nf_conn *ct;
40
41 ct = nf_ct_get(skb, &ctinfo);
42 if (par->hooknum == NF_INET_LOCAL_OUT)
43 newdst = loopback_addr;
44 else {
45 struct inet6_dev *idev;
46 struct inet6_ifaddr *ifa;
47 bool addr = false;
48
49 rcu_read_lock();
50 idev = __in6_dev_get(skb->dev);
51 if (idev != NULL) {
52 list_for_each_entry(ifa, &idev->addr_list, if_list) {
53 newdst = ifa->addr;
54 addr = true;
55 break;
56 }
57 }
58 rcu_read_unlock();
59
60 if (!addr)
61 return NF_DROP;
62 }
63
64 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
65 newrange.min_addr.in6 = newdst;
66 newrange.max_addr.in6 = newdst;
67 newrange.min_proto = range->min_proto;
68 newrange.max_proto = range->max_proto;
69
70 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
71}
72
73static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
74{
75 const struct nf_nat_range *range = par->targinfo;
76
77 if (range->flags & NF_NAT_RANGE_MAP_IPS)
78 return -EINVAL;
79 return 0;
80}
81
82/* FIXME: Take multiple ranges --RR */
83static int redirect_tg4_check(const struct xt_tgchk_param *par)
84{
85 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
86
87 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
88 pr_debug("bad MAP_IPS.\n");
89 return -EINVAL;
90 }
91 if (mr->rangesize != 1) {
92 pr_debug("bad rangesize %u.\n", mr->rangesize);
93 return -EINVAL;
94 }
95 return 0;
96}
97
98static unsigned int
99redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
100{
101 struct nf_conn *ct;
102 enum ip_conntrack_info ctinfo;
103 __be32 newdst;
104 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
105 struct nf_nat_range newrange;
106
107 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
108 par->hooknum == NF_INET_LOCAL_OUT);
109
110 ct = nf_ct_get(skb, &ctinfo);
111 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
112
113 /* Local packets: make them go to loopback */
114 if (par->hooknum == NF_INET_LOCAL_OUT)
115 newdst = htonl(0x7F000001);
116 else {
117 struct in_device *indev;
118 struct in_ifaddr *ifa;
119
120 newdst = 0;
121
122 rcu_read_lock();
123 indev = __in_dev_get_rcu(skb->dev);
124 if (indev && (ifa = indev->ifa_list))
125 newdst = ifa->ifa_local;
126 rcu_read_unlock();
127
128 if (!newdst)
129 return NF_DROP;
130 }
131
132 /* Transfer from original range. */
133 memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
134 memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
135 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
136 newrange.min_addr.ip = newdst;
137 newrange.max_addr.ip = newdst;
138 newrange.min_proto = mr->range[0].min;
139 newrange.max_proto = mr->range[0].max;
140
141 /* Hand modified range to generic setup. */
142 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
143}
144
145static struct xt_target redirect_tg_reg[] __read_mostly = {
146 {
147 .name = "REDIRECT",
148 .family = NFPROTO_IPV6,
149 .revision = 0,
150 .table = "nat",
151 .checkentry = redirect_tg6_checkentry,
152 .target = redirect_tg6,
153 .targetsize = sizeof(struct nf_nat_range),
154 .hooks = (1 << NF_INET_PRE_ROUTING) |
155 (1 << NF_INET_LOCAL_OUT),
156 .me = THIS_MODULE,
157 },
158 {
159 .name = "REDIRECT",
160 .family = NFPROTO_IPV4,
161 .revision = 0,
162 .table = "nat",
163 .target = redirect_tg4,
164 .checkentry = redirect_tg4_check,
165 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
166 .hooks = (1 << NF_INET_PRE_ROUTING) |
167 (1 << NF_INET_LOCAL_OUT),
168 .me = THIS_MODULE,
169 },
170};
171
172static int __init redirect_tg_init(void)
173{
174 return xt_register_targets(redirect_tg_reg,
175 ARRAY_SIZE(redirect_tg_reg));
176}
177
178static void __exit redirect_tg_exit(void)
179{
180 xt_unregister_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg));
181}
182
183module_init(redirect_tg_init);
184module_exit(redirect_tg_exit);
185
186MODULE_LICENSE("GPL");
187MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
188MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
189MODULE_ALIAS("ip6t_REDIRECT");
190MODULE_ALIAS("ipt_REDIRECT");
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
new file mode 100644
index 000000000000..81aafa8e4fef
--- /dev/null
+++ b/net/netfilter/xt_nat.c
@@ -0,0 +1,170 @@
1/*
2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter/x_tables.h>
15#include <net/netfilter/nf_nat_core.h>
16
17static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
18{
19 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
20
21 if (mr->rangesize != 1) {
22 pr_info("%s: multiple ranges no longer supported\n",
23 par->target->name);
24 return -EINVAL;
25 }
26 return 0;
27}
28
29static void xt_nat_convert_range(struct nf_nat_range *dst,
30 const struct nf_nat_ipv4_range *src)
31{
32 memset(&dst->min_addr, 0, sizeof(dst->min_addr));
33 memset(&dst->max_addr, 0, sizeof(dst->max_addr));
34
35 dst->flags = src->flags;
36 dst->min_addr.ip = src->min_ip;
37 dst->max_addr.ip = src->max_ip;
38 dst->min_proto = src->min;
39 dst->max_proto = src->max;
40}
41
42static unsigned int
43xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
44{
45 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
46 struct nf_nat_range range;
47 enum ip_conntrack_info ctinfo;
48 struct nf_conn *ct;
49
50 ct = nf_ct_get(skb, &ctinfo);
51 NF_CT_ASSERT(ct != NULL &&
52 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
53 ctinfo == IP_CT_RELATED_REPLY));
54
55 xt_nat_convert_range(&range, &mr->range[0]);
56 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
57}
58
59static unsigned int
60xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
61{
62 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
63 struct nf_nat_range range;
64 enum ip_conntrack_info ctinfo;
65 struct nf_conn *ct;
66
67 ct = nf_ct_get(skb, &ctinfo);
68 NF_CT_ASSERT(ct != NULL &&
69 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
70
71 xt_nat_convert_range(&range, &mr->range[0]);
72 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
73}
74
75static unsigned int
76xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
77{
78 const struct nf_nat_range *range = par->targinfo;
79 enum ip_conntrack_info ctinfo;
80 struct nf_conn *ct;
81
82 ct = nf_ct_get(skb, &ctinfo);
83 NF_CT_ASSERT(ct != NULL &&
84 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
85 ctinfo == IP_CT_RELATED_REPLY));
86
87 return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
88}
89
90static unsigned int
91xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
92{
93 const struct nf_nat_range *range = par->targinfo;
94 enum ip_conntrack_info ctinfo;
95 struct nf_conn *ct;
96
97 ct = nf_ct_get(skb, &ctinfo);
98 NF_CT_ASSERT(ct != NULL &&
99 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
100
101 return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST);
102}
103
104static struct xt_target xt_nat_target_reg[] __read_mostly = {
105 {
106 .name = "SNAT",
107 .revision = 0,
108 .checkentry = xt_nat_checkentry_v0,
109 .target = xt_snat_target_v0,
110 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
111 .family = NFPROTO_IPV4,
112 .table = "nat",
113 .hooks = (1 << NF_INET_POST_ROUTING) |
114 (1 << NF_INET_LOCAL_OUT),
115 .me = THIS_MODULE,
116 },
117 {
118 .name = "DNAT",
119 .revision = 0,
120 .checkentry = xt_nat_checkentry_v0,
121 .target = xt_dnat_target_v0,
122 .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
123 .family = NFPROTO_IPV4,
124 .table = "nat",
125 .hooks = (1 << NF_INET_PRE_ROUTING) |
126 (1 << NF_INET_LOCAL_IN),
127 .me = THIS_MODULE,
128 },
129 {
130 .name = "SNAT",
131 .revision = 1,
132 .target = xt_snat_target_v1,
133 .targetsize = sizeof(struct nf_nat_range),
134 .table = "nat",
135 .hooks = (1 << NF_INET_POST_ROUTING) |
136 (1 << NF_INET_LOCAL_OUT),
137 .me = THIS_MODULE,
138 },
139 {
140 .name = "DNAT",
141 .revision = 1,
142 .target = xt_dnat_target_v1,
143 .targetsize = sizeof(struct nf_nat_range),
144 .table = "nat",
145 .hooks = (1 << NF_INET_PRE_ROUTING) |
146 (1 << NF_INET_LOCAL_IN),
147 .me = THIS_MODULE,
148 },
149};
150
151static int __init xt_nat_init(void)
152{
153 return xt_register_targets(xt_nat_target_reg,
154 ARRAY_SIZE(xt_nat_target_reg));
155}
156
157static void __exit xt_nat_exit(void)
158{
159 xt_unregister_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg));
160}
161
162module_init(xt_nat_init);
163module_exit(xt_nat_exit);
164
165MODULE_LICENSE("GPL");
166MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
167MODULE_ALIAS("ipt_SNAT");
168MODULE_ALIAS("ipt_DNAT");
169MODULE_ALIAS("ip6t_SNAT");
170MODULE_ALIAS("ip6t_DNAT");
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 846f895cb656..a5e673d32bda 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -269,7 +269,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
269 mss <<= 8; 269 mss <<= 8;
270 mss |= optp[2]; 270 mss |= optp[2];
271 271
272 mss = ntohs(mss); 272 mss = ntohs((__force __be16)mss);
273 break; 273 break;
274 case OSFOPT_TS: 274 case OSFOPT_TS:
275 loop_cont = 1; 275 loop_cont = 1;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index c6f7db720d84..865a9e54f3ad 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -356,6 +356,27 @@ static struct xt_match set_matches[] __read_mostly = {
356 .destroy = set_match_v1_destroy, 356 .destroy = set_match_v1_destroy,
357 .me = THIS_MODULE 357 .me = THIS_MODULE
358 }, 358 },
359 /* --return-nomatch flag support */
360 {
361 .name = "set",
362 .family = NFPROTO_IPV4,
363 .revision = 2,
364 .match = set_match_v1,
365 .matchsize = sizeof(struct xt_set_info_match_v1),
366 .checkentry = set_match_v1_checkentry,
367 .destroy = set_match_v1_destroy,
368 .me = THIS_MODULE
369 },
370 {
371 .name = "set",
372 .family = NFPROTO_IPV6,
373 .revision = 2,
374 .match = set_match_v1,
375 .matchsize = sizeof(struct xt_set_info_match_v1),
376 .checkentry = set_match_v1_checkentry,
377 .destroy = set_match_v1_destroy,
378 .me = THIS_MODULE
379 },
359}; 380};
360 381
361static struct xt_target set_targets[] __read_mostly = { 382static struct xt_target set_targets[] __read_mostly = {
@@ -389,6 +410,7 @@ static struct xt_target set_targets[] __read_mostly = {
389 .destroy = set_target_v1_destroy, 410 .destroy = set_target_v1_destroy,
390 .me = THIS_MODULE 411 .me = THIS_MODULE
391 }, 412 },
413 /* --timeout and --exist flags support */
392 { 414 {
393 .name = "SET", 415 .name = "SET",
394 .revision = 2, 416 .revision = 2,
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 9ea482d08cf7..63b2bdb59e95 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -108,9 +108,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
108 const struct iphdr *iph = ip_hdr(skb); 108 const struct iphdr *iph = ip_hdr(skb);
109 struct udphdr _hdr, *hp = NULL; 109 struct udphdr _hdr, *hp = NULL;
110 struct sock *sk; 110 struct sock *sk;
111 __be32 daddr, saddr; 111 __be32 uninitialized_var(daddr), uninitialized_var(saddr);
112 __be16 dport, sport; 112 __be16 uninitialized_var(dport), uninitialized_var(sport);
113 u8 protocol; 113 u8 uninitialized_var(protocol);
114#ifdef XT_SOCKET_HAVE_CONNTRACK 114#ifdef XT_SOCKET_HAVE_CONNTRACK
115 struct nf_conn const *ct; 115 struct nf_conn const *ct;
116 enum ip_conntrack_info ctinfo; 116 enum ip_conntrack_info ctinfo;
@@ -261,9 +261,9 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
261 struct ipv6hdr *iph = ipv6_hdr(skb); 261 struct ipv6hdr *iph = ipv6_hdr(skb);
262 struct udphdr _hdr, *hp = NULL; 262 struct udphdr _hdr, *hp = NULL;
263 struct sock *sk; 263 struct sock *sk;
264 struct in6_addr *daddr, *saddr; 264 struct in6_addr *daddr = NULL, *saddr = NULL;
265 __be16 dport, sport; 265 __be16 uninitialized_var(dport), uninitialized_var(sport);
266 int thoff = 0, tproto; 266 int thoff = 0, uninitialized_var(tproto);
267 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; 267 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
268 268
269 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); 269 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index c48975ff8ea2..0ae55a36f492 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -42,6 +42,7 @@ static const u_int16_t days_since_leapyear[] = {
42 */ 42 */
43enum { 43enum {
44 DSE_FIRST = 2039, 44 DSE_FIRST = 2039,
45 SECONDS_PER_DAY = 86400,
45}; 46};
46static const u_int16_t days_since_epoch[] = { 47static const u_int16_t days_since_epoch[] = {
47 /* 2039 - 2030 */ 48 /* 2039 - 2030 */
@@ -78,7 +79,7 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time)
78 unsigned int v, w; 79 unsigned int v, w;
79 80
80 /* Each day has 86400s, so finding the hour/minute is actually easy. */ 81 /* Each day has 86400s, so finding the hour/minute is actually easy. */
81 v = time % 86400; 82 v = time % SECONDS_PER_DAY;
82 r->second = v % 60; 83 r->second = v % 60;
83 w = v / 60; 84 w = v / 60;
84 r->minute = w % 60; 85 r->minute = w % 60;
@@ -199,6 +200,18 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
199 if (packet_time < info->daytime_start && 200 if (packet_time < info->daytime_start &&
200 packet_time > info->daytime_stop) 201 packet_time > info->daytime_stop)
201 return false; 202 return false;
203
204 /** if user asked to ignore 'next day', then e.g.
205 * '1 PM Wed, August 1st' should be treated
206 * like 'Tue 1 PM July 31st'.
207 *
208 * This also causes
209 * 'Monday, "23:00 to 01:00", to match for 2 hours, starting
210 * Monday 23:00 to Tuesday 01:00.
211 */
212 if ((info->flags & XT_TIME_CONTIGUOUS) &&
213 packet_time <= info->daytime_stop)
214 stamp -= SECONDS_PER_DAY;
202 } 215 }
203 216
204 localtime_2(&current_time, stamp); 217 localtime_2(&current_time, stamp);
@@ -227,6 +240,15 @@ static int time_mt_check(const struct xt_mtchk_param *par)
227 return -EDOM; 240 return -EDOM;
228 } 241 }
229 242
243 if (info->flags & ~XT_TIME_ALL_FLAGS) {
244 pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
245 return -EINVAL;
246 }
247
248 if ((info->flags & XT_TIME_CONTIGUOUS) &&
249 info->daytime_start < info->daytime_stop)
250 return -EINVAL;
251
230 return 0; 252 return 0;
231} 253}
232 254
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 6bf878335d94..c15042f987bd 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -627,7 +627,7 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg)
627 struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg; 627 struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg;
628 void *data; 628 void *data;
629 629
630 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, 630 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
631 cb_arg->seq, &netlbl_cipsov4_gnl_family, 631 cb_arg->seq, &netlbl_cipsov4_gnl_family,
632 NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL); 632 NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL);
633 if (data == NULL) 633 if (data == NULL)
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 4809e2e48b02..c5384ffc6146 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -448,7 +448,7 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg)
448 struct netlbl_domhsh_walk_arg *cb_arg = arg; 448 struct netlbl_domhsh_walk_arg *cb_arg = arg;
449 void *data; 449 void *data;
450 450
451 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, 451 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
452 cb_arg->seq, &netlbl_mgmt_gnl_family, 452 cb_arg->seq, &netlbl_mgmt_gnl_family,
453 NLM_F_MULTI, NLBL_MGMT_C_LISTALL); 453 NLM_F_MULTI, NLBL_MGMT_C_LISTALL);
454 if (data == NULL) 454 if (data == NULL)
@@ -613,7 +613,7 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
613 int ret_val = -ENOMEM; 613 int ret_val = -ENOMEM;
614 void *data; 614 void *data;
615 615
616 data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 616 data = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
617 &netlbl_mgmt_gnl_family, NLM_F_MULTI, 617 &netlbl_mgmt_gnl_family, NLM_F_MULTI,
618 NLBL_MGMT_C_PROTOCOLS); 618 NLBL_MGMT_C_PROTOCOLS);
619 if (data == NULL) 619 if (data == NULL)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 729a345c75a4..847d495cd4de 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1096,7 +1096,7 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
1096 char *secctx; 1096 char *secctx;
1097 u32 secctx_len; 1097 u32 secctx_len;
1098 1098
1099 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, 1099 data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
1100 cb_arg->seq, &netlbl_unlabel_gnl_family, 1100 cb_arg->seq, &netlbl_unlabel_gnl_family,
1101 NLM_F_MULTI, cmd); 1101 NLM_F_MULTI, cmd);
1102 if (data == NULL) 1102 if (data == NULL)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 382119917166..0f2e3ad69c47 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -67,8 +67,8 @@
67struct netlink_sock { 67struct netlink_sock {
68 /* struct sock has to be the first member of netlink_sock */ 68 /* struct sock has to be the first member of netlink_sock */
69 struct sock sk; 69 struct sock sk;
70 u32 pid; 70 u32 portid;
71 u32 dst_pid; 71 u32 dst_portid;
72 u32 dst_group; 72 u32 dst_group;
73 u32 flags; 73 u32 flags;
74 u32 subscriptions; 74 u32 subscriptions;
@@ -104,7 +104,7 @@ static inline int netlink_is_kernel(struct sock *sk)
104 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 104 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
105} 105}
106 106
107struct nl_pid_hash { 107struct nl_portid_hash {
108 struct hlist_head *table; 108 struct hlist_head *table;
109 unsigned long rehash_time; 109 unsigned long rehash_time;
110 110
@@ -118,10 +118,10 @@ struct nl_pid_hash {
118}; 118};
119 119
120struct netlink_table { 120struct netlink_table {
121 struct nl_pid_hash hash; 121 struct nl_portid_hash hash;
122 struct hlist_head mc_list; 122 struct hlist_head mc_list;
123 struct listeners __rcu *listeners; 123 struct listeners __rcu *listeners;
124 unsigned int nl_nonroot; 124 unsigned int flags;
125 unsigned int groups; 125 unsigned int groups;
126 struct mutex *cb_mutex; 126 struct mutex *cb_mutex;
127 struct module *module; 127 struct module *module;
@@ -145,9 +145,9 @@ static inline u32 netlink_group_mask(u32 group)
145 return group ? 1 << (group - 1) : 0; 145 return group ? 1 << (group - 1) : 0;
146} 146}
147 147
148static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) 148static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
149{ 149{
150 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; 150 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
151} 151}
152 152
153static void netlink_destroy_callback(struct netlink_callback *cb) 153static void netlink_destroy_callback(struct netlink_callback *cb)
@@ -239,17 +239,17 @@ netlink_unlock_table(void)
239 wake_up(&nl_table_wait); 239 wake_up(&nl_table_wait);
240} 240}
241 241
242static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) 242static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
243{ 243{
244 struct nl_pid_hash *hash = &nl_table[protocol].hash; 244 struct nl_portid_hash *hash = &nl_table[protocol].hash;
245 struct hlist_head *head; 245 struct hlist_head *head;
246 struct sock *sk; 246 struct sock *sk;
247 struct hlist_node *node; 247 struct hlist_node *node;
248 248
249 read_lock(&nl_table_lock); 249 read_lock(&nl_table_lock);
250 head = nl_pid_hashfn(hash, pid); 250 head = nl_portid_hashfn(hash, portid);
251 sk_for_each(sk, node, head) { 251 sk_for_each(sk, node, head) {
252 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { 252 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
253 sock_hold(sk); 253 sock_hold(sk);
254 goto found; 254 goto found;
255 } 255 }
@@ -260,7 +260,7 @@ found:
260 return sk; 260 return sk;
261} 261}
262 262
263static struct hlist_head *nl_pid_hash_zalloc(size_t size) 263static struct hlist_head *nl_portid_hash_zalloc(size_t size)
264{ 264{
265 if (size <= PAGE_SIZE) 265 if (size <= PAGE_SIZE)
266 return kzalloc(size, GFP_ATOMIC); 266 return kzalloc(size, GFP_ATOMIC);
@@ -270,7 +270,7 @@ static struct hlist_head *nl_pid_hash_zalloc(size_t size)
270 get_order(size)); 270 get_order(size));
271} 271}
272 272
273static void nl_pid_hash_free(struct hlist_head *table, size_t size) 273static void nl_portid_hash_free(struct hlist_head *table, size_t size)
274{ 274{
275 if (size <= PAGE_SIZE) 275 if (size <= PAGE_SIZE)
276 kfree(table); 276 kfree(table);
@@ -278,7 +278,7 @@ static void nl_pid_hash_free(struct hlist_head *table, size_t size)
278 free_pages((unsigned long)table, get_order(size)); 278 free_pages((unsigned long)table, get_order(size));
279} 279}
280 280
281static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) 281static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
282{ 282{
283 unsigned int omask, mask, shift; 283 unsigned int omask, mask, shift;
284 size_t osize, size; 284 size_t osize, size;
@@ -296,7 +296,7 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
296 size *= 2; 296 size *= 2;
297 } 297 }
298 298
299 table = nl_pid_hash_zalloc(size); 299 table = nl_portid_hash_zalloc(size);
300 if (!table) 300 if (!table)
301 return 0; 301 return 0;
302 302
@@ -311,23 +311,23 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
311 struct hlist_node *node, *tmp; 311 struct hlist_node *node, *tmp;
312 312
313 sk_for_each_safe(sk, node, tmp, &otable[i]) 313 sk_for_each_safe(sk, node, tmp, &otable[i])
314 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); 314 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
315 } 315 }
316 316
317 nl_pid_hash_free(otable, osize); 317 nl_portid_hash_free(otable, osize);
318 hash->rehash_time = jiffies + 10 * 60 * HZ; 318 hash->rehash_time = jiffies + 10 * 60 * HZ;
319 return 1; 319 return 1;
320} 320}
321 321
322static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) 322static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
323{ 323{
324 int avg = hash->entries >> hash->shift; 324 int avg = hash->entries >> hash->shift;
325 325
326 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) 326 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
327 return 1; 327 return 1;
328 328
329 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { 329 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
330 nl_pid_hash_rehash(hash, 0); 330 nl_portid_hash_rehash(hash, 0);
331 return 1; 331 return 1;
332 } 332 }
333 333
@@ -356,9 +356,9 @@ netlink_update_listeners(struct sock *sk)
356 * makes sure updates are visible before bind or setsockopt return. */ 356 * makes sure updates are visible before bind or setsockopt return. */
357} 357}
358 358
359static int netlink_insert(struct sock *sk, struct net *net, u32 pid) 359static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
360{ 360{
361 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 361 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
362 struct hlist_head *head; 362 struct hlist_head *head;
363 int err = -EADDRINUSE; 363 int err = -EADDRINUSE;
364 struct sock *osk; 364 struct sock *osk;
@@ -366,10 +366,10 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
366 int len; 366 int len;
367 367
368 netlink_table_grab(); 368 netlink_table_grab();
369 head = nl_pid_hashfn(hash, pid); 369 head = nl_portid_hashfn(hash, portid);
370 len = 0; 370 len = 0;
371 sk_for_each(osk, node, head) { 371 sk_for_each(osk, node, head) {
372 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) 372 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
373 break; 373 break;
374 len++; 374 len++;
375 } 375 }
@@ -377,17 +377,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
377 goto err; 377 goto err;
378 378
379 err = -EBUSY; 379 err = -EBUSY;
380 if (nlk_sk(sk)->pid) 380 if (nlk_sk(sk)->portid)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) 384 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
385 goto err; 385 goto err;
386 386
387 if (len && nl_pid_hash_dilute(hash, len)) 387 if (len && nl_portid_hash_dilute(hash, len))
388 head = nl_pid_hashfn(hash, pid); 388 head = nl_portid_hashfn(hash, portid);
389 hash->entries++; 389 hash->entries++;
390 nlk_sk(sk)->pid = pid; 390 nlk_sk(sk)->portid = portid;
391 sk_add_node(sk, head); 391 sk_add_node(sk, head);
392 err = 0; 392 err = 0;
393 393
@@ -518,11 +518,11 @@ static int netlink_release(struct socket *sock)
518 518
519 skb_queue_purge(&sk->sk_write_queue); 519 skb_queue_purge(&sk->sk_write_queue);
520 520
521 if (nlk->pid) { 521 if (nlk->portid) {
522 struct netlink_notify n = { 522 struct netlink_notify n = {
523 .net = sock_net(sk), 523 .net = sock_net(sk),
524 .protocol = sk->sk_protocol, 524 .protocol = sk->sk_protocol,
525 .pid = nlk->pid, 525 .portid = nlk->portid,
526 }; 526 };
527 atomic_notifier_call_chain(&netlink_chain, 527 atomic_notifier_call_chain(&netlink_chain,
528 NETLINK_URELEASE, &n); 528 NETLINK_URELEASE, &n);
@@ -536,6 +536,8 @@ static int netlink_release(struct socket *sock)
536 if (--nl_table[sk->sk_protocol].registered == 0) { 536 if (--nl_table[sk->sk_protocol].registered == 0) {
537 kfree(nl_table[sk->sk_protocol].listeners); 537 kfree(nl_table[sk->sk_protocol].listeners);
538 nl_table[sk->sk_protocol].module = NULL; 538 nl_table[sk->sk_protocol].module = NULL;
539 nl_table[sk->sk_protocol].bind = NULL;
540 nl_table[sk->sk_protocol].flags = 0;
539 nl_table[sk->sk_protocol].registered = 0; 541 nl_table[sk->sk_protocol].registered = 0;
540 } 542 }
541 } else if (nlk->subscriptions) { 543 } else if (nlk->subscriptions) {
@@ -557,24 +559,24 @@ static int netlink_autobind(struct socket *sock)
557{ 559{
558 struct sock *sk = sock->sk; 560 struct sock *sk = sock->sk;
559 struct net *net = sock_net(sk); 561 struct net *net = sock_net(sk);
560 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 562 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
561 struct hlist_head *head; 563 struct hlist_head *head;
562 struct sock *osk; 564 struct sock *osk;
563 struct hlist_node *node; 565 struct hlist_node *node;
564 s32 pid = task_tgid_vnr(current); 566 s32 portid = task_tgid_vnr(current);
565 int err; 567 int err;
566 static s32 rover = -4097; 568 static s32 rover = -4097;
567 569
568retry: 570retry:
569 cond_resched(); 571 cond_resched();
570 netlink_table_grab(); 572 netlink_table_grab();
571 head = nl_pid_hashfn(hash, pid); 573 head = nl_portid_hashfn(hash, portid);
572 sk_for_each(osk, node, head) { 574 sk_for_each(osk, node, head) {
573 if (!net_eq(sock_net(osk), net)) 575 if (!net_eq(sock_net(osk), net))
574 continue; 576 continue;
575 if (nlk_sk(osk)->pid == pid) { 577 if (nlk_sk(osk)->portid == portid) {
576 /* Bind collision, search negative pid values. */ 578 /* Bind collision, search negative portid values. */
577 pid = rover--; 579 portid = rover--;
578 if (rover > -4097) 580 if (rover > -4097)
579 rover = -4097; 581 rover = -4097;
580 netlink_table_ungrab(); 582 netlink_table_ungrab();
@@ -583,7 +585,7 @@ retry:
583 } 585 }
584 netlink_table_ungrab(); 586 netlink_table_ungrab();
585 587
586 err = netlink_insert(sk, net, pid); 588 err = netlink_insert(sk, net, portid);
587 if (err == -EADDRINUSE) 589 if (err == -EADDRINUSE)
588 goto retry; 590 goto retry;
589 591
@@ -596,7 +598,7 @@ retry:
596 598
597static inline int netlink_capable(const struct socket *sock, unsigned int flag) 599static inline int netlink_capable(const struct socket *sock, unsigned int flag)
598{ 600{
599 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || 601 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
600 capable(CAP_NET_ADMIN); 602 capable(CAP_NET_ADMIN);
601} 603}
602 604
@@ -659,15 +661,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
659 661
660 /* Only superuser is allowed to listen multicasts */ 662 /* Only superuser is allowed to listen multicasts */
661 if (nladdr->nl_groups) { 663 if (nladdr->nl_groups) {
662 if (!netlink_capable(sock, NL_NONROOT_RECV)) 664 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
663 return -EPERM; 665 return -EPERM;
664 err = netlink_realloc_groups(sk); 666 err = netlink_realloc_groups(sk);
665 if (err) 667 if (err)
666 return err; 668 return err;
667 } 669 }
668 670
669 if (nlk->pid) { 671 if (nlk->portid) {
670 if (nladdr->nl_pid != nlk->pid) 672 if (nladdr->nl_pid != nlk->portid)
671 return -EINVAL; 673 return -EINVAL;
672 } else { 674 } else {
673 err = nladdr->nl_pid ? 675 err = nladdr->nl_pid ?
@@ -713,7 +715,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
713 715
714 if (addr->sa_family == AF_UNSPEC) { 716 if (addr->sa_family == AF_UNSPEC) {
715 sk->sk_state = NETLINK_UNCONNECTED; 717 sk->sk_state = NETLINK_UNCONNECTED;
716 nlk->dst_pid = 0; 718 nlk->dst_portid = 0;
717 nlk->dst_group = 0; 719 nlk->dst_group = 0;
718 return 0; 720 return 0;
719 } 721 }
@@ -721,15 +723,15 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
721 return -EINVAL; 723 return -EINVAL;
722 724
723 /* Only superuser is allowed to send multicasts */ 725 /* Only superuser is allowed to send multicasts */
724 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) 726 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
725 return -EPERM; 727 return -EPERM;
726 728
727 if (!nlk->pid) 729 if (!nlk->portid)
728 err = netlink_autobind(sock); 730 err = netlink_autobind(sock);
729 731
730 if (err == 0) { 732 if (err == 0) {
731 sk->sk_state = NETLINK_CONNECTED; 733 sk->sk_state = NETLINK_CONNECTED;
732 nlk->dst_pid = nladdr->nl_pid; 734 nlk->dst_portid = nladdr->nl_pid;
733 nlk->dst_group = ffs(nladdr->nl_groups); 735 nlk->dst_group = ffs(nladdr->nl_groups);
734 } 736 }
735 737
@@ -748,10 +750,10 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
748 *addr_len = sizeof(*nladdr); 750 *addr_len = sizeof(*nladdr);
749 751
750 if (peer) { 752 if (peer) {
751 nladdr->nl_pid = nlk->dst_pid; 753 nladdr->nl_pid = nlk->dst_portid;
752 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 754 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
753 } else { 755 } else {
754 nladdr->nl_pid = nlk->pid; 756 nladdr->nl_pid = nlk->portid;
755 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 757 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
756 } 758 }
757 return 0; 759 return 0;
@@ -770,19 +772,19 @@ static void netlink_overrun(struct sock *sk)
770 atomic_inc(&sk->sk_drops); 772 atomic_inc(&sk->sk_drops);
771} 773}
772 774
773static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) 775static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
774{ 776{
775 struct sock *sock; 777 struct sock *sock;
776 struct netlink_sock *nlk; 778 struct netlink_sock *nlk;
777 779
778 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); 780 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
779 if (!sock) 781 if (!sock)
780 return ERR_PTR(-ECONNREFUSED); 782 return ERR_PTR(-ECONNREFUSED);
781 783
782 /* Don't bother queuing skb if kernel socket has no input function */ 784 /* Don't bother queuing skb if kernel socket has no input function */
783 nlk = nlk_sk(sock); 785 nlk = nlk_sk(sock);
784 if (sock->sk_state == NETLINK_CONNECTED && 786 if (sock->sk_state == NETLINK_CONNECTED &&
785 nlk->dst_pid != nlk_sk(ssk)->pid) { 787 nlk->dst_portid != nlk_sk(ssk)->portid) {
786 sock_put(sock); 788 sock_put(sock);
787 return ERR_PTR(-ECONNREFUSED); 789 return ERR_PTR(-ECONNREFUSED);
788 } 790 }
@@ -933,7 +935,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
933} 935}
934 936
935int netlink_unicast(struct sock *ssk, struct sk_buff *skb, 937int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
936 u32 pid, int nonblock) 938 u32 portid, int nonblock)
937{ 939{
938 struct sock *sk; 940 struct sock *sk;
939 int err; 941 int err;
@@ -943,7 +945,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
943 945
944 timeo = sock_sndtimeo(ssk, nonblock); 946 timeo = sock_sndtimeo(ssk, nonblock);
945retry: 947retry:
946 sk = netlink_getsockbypid(ssk, pid); 948 sk = netlink_getsockbyportid(ssk, portid);
947 if (IS_ERR(sk)) { 949 if (IS_ERR(sk)) {
948 kfree_skb(skb); 950 kfree_skb(skb);
949 return PTR_ERR(sk); 951 return PTR_ERR(sk);
@@ -1003,7 +1005,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1003struct netlink_broadcast_data { 1005struct netlink_broadcast_data {
1004 struct sock *exclude_sk; 1006 struct sock *exclude_sk;
1005 struct net *net; 1007 struct net *net;
1006 u32 pid; 1008 u32 portid;
1007 u32 group; 1009 u32 group;
1008 int failure; 1010 int failure;
1009 int delivery_failure; 1011 int delivery_failure;
@@ -1024,7 +1026,7 @@ static int do_one_broadcast(struct sock *sk,
1024 if (p->exclude_sk == sk) 1026 if (p->exclude_sk == sk)
1025 goto out; 1027 goto out;
1026 1028
1027 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1029 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1028 !test_bit(p->group - 1, nlk->groups)) 1030 !test_bit(p->group - 1, nlk->groups))
1029 goto out; 1031 goto out;
1030 1032
@@ -1076,7 +1078,7 @@ out:
1076 return 0; 1078 return 0;
1077} 1079}
1078 1080
1079int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, 1081int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1080 u32 group, gfp_t allocation, 1082 u32 group, gfp_t allocation,
1081 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), 1083 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1082 void *filter_data) 1084 void *filter_data)
@@ -1090,7 +1092,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1090 1092
1091 info.exclude_sk = ssk; 1093 info.exclude_sk = ssk;
1092 info.net = net; 1094 info.net = net;
1093 info.pid = pid; 1095 info.portid = portid;
1094 info.group = group; 1096 info.group = group;
1095 info.failure = 0; 1097 info.failure = 0;
1096 info.delivery_failure = 0; 1098 info.delivery_failure = 0;
@@ -1128,17 +1130,17 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1128} 1130}
1129EXPORT_SYMBOL(netlink_broadcast_filtered); 1131EXPORT_SYMBOL(netlink_broadcast_filtered);
1130 1132
1131int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 1133int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1132 u32 group, gfp_t allocation) 1134 u32 group, gfp_t allocation)
1133{ 1135{
1134 return netlink_broadcast_filtered(ssk, skb, pid, group, allocation, 1136 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1135 NULL, NULL); 1137 NULL, NULL);
1136} 1138}
1137EXPORT_SYMBOL(netlink_broadcast); 1139EXPORT_SYMBOL(netlink_broadcast);
1138 1140
1139struct netlink_set_err_data { 1141struct netlink_set_err_data {
1140 struct sock *exclude_sk; 1142 struct sock *exclude_sk;
1141 u32 pid; 1143 u32 portid;
1142 u32 group; 1144 u32 group;
1143 int code; 1145 int code;
1144}; 1146};
@@ -1154,7 +1156,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1154 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) 1156 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1155 goto out; 1157 goto out;
1156 1158
1157 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1159 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1158 !test_bit(p->group - 1, nlk->groups)) 1160 !test_bit(p->group - 1, nlk->groups))
1159 goto out; 1161 goto out;
1160 1162
@@ -1172,14 +1174,14 @@ out:
1172/** 1174/**
1173 * netlink_set_err - report error to broadcast listeners 1175 * netlink_set_err - report error to broadcast listeners
1174 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() 1176 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1175 * @pid: the PID of a process that we want to skip (if any) 1177 * @portid: the PORTID of a process that we want to skip (if any)
1176 * @groups: the broadcast group that will notice the error 1178 * @groups: the broadcast group that will notice the error
1177 * @code: error code, must be negative (as usual in kernelspace) 1179 * @code: error code, must be negative (as usual in kernelspace)
1178 * 1180 *
1179 * This function returns the number of broadcast listeners that have set the 1181 * This function returns the number of broadcast listeners that have set the
1180 * NETLINK_RECV_NO_ENOBUFS socket option. 1182 * NETLINK_RECV_NO_ENOBUFS socket option.
1181 */ 1183 */
1182int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1184int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1183{ 1185{
1184 struct netlink_set_err_data info; 1186 struct netlink_set_err_data info;
1185 struct hlist_node *node; 1187 struct hlist_node *node;
@@ -1187,7 +1189,7 @@ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1187 int ret = 0; 1189 int ret = 0;
1188 1190
1189 info.exclude_sk = ssk; 1191 info.exclude_sk = ssk;
1190 info.pid = pid; 1192 info.portid = portid;
1191 info.group = group; 1193 info.group = group;
1192 /* sk->sk_err wants a positive error value */ 1194 /* sk->sk_err wants a positive error value */
1193 info.code = -code; 1195 info.code = -code;
@@ -1244,7 +1246,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1244 break; 1246 break;
1245 case NETLINK_ADD_MEMBERSHIP: 1247 case NETLINK_ADD_MEMBERSHIP:
1246 case NETLINK_DROP_MEMBERSHIP: { 1248 case NETLINK_DROP_MEMBERSHIP: {
1247 if (!netlink_capable(sock, NL_NONROOT_RECV)) 1249 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1248 return -EPERM; 1250 return -EPERM;
1249 err = netlink_realloc_groups(sk); 1251 err = netlink_realloc_groups(sk);
1250 if (err) 1252 if (err)
@@ -1352,7 +1354,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1352 struct sock *sk = sock->sk; 1354 struct sock *sk = sock->sk;
1353 struct netlink_sock *nlk = nlk_sk(sk); 1355 struct netlink_sock *nlk = nlk_sk(sk);
1354 struct sockaddr_nl *addr = msg->msg_name; 1356 struct sockaddr_nl *addr = msg->msg_name;
1355 u32 dst_pid; 1357 u32 dst_portid;
1356 u32 dst_group; 1358 u32 dst_group;
1357 struct sk_buff *skb; 1359 struct sk_buff *skb;
1358 int err; 1360 int err;
@@ -1372,18 +1374,18 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1372 err = -EINVAL; 1374 err = -EINVAL;
1373 if (addr->nl_family != AF_NETLINK) 1375 if (addr->nl_family != AF_NETLINK)
1374 goto out; 1376 goto out;
1375 dst_pid = addr->nl_pid; 1377 dst_portid = addr->nl_pid;
1376 dst_group = ffs(addr->nl_groups); 1378 dst_group = ffs(addr->nl_groups);
1377 err = -EPERM; 1379 err = -EPERM;
1378 if ((dst_group || dst_pid) && 1380 if ((dst_group || dst_portid) &&
1379 !netlink_capable(sock, NL_NONROOT_SEND)) 1381 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1380 goto out; 1382 goto out;
1381 } else { 1383 } else {
1382 dst_pid = nlk->dst_pid; 1384 dst_portid = nlk->dst_portid;
1383 dst_group = nlk->dst_group; 1385 dst_group = nlk->dst_group;
1384 } 1386 }
1385 1387
1386 if (!nlk->pid) { 1388 if (!nlk->portid) {
1387 err = netlink_autobind(sock); 1389 err = netlink_autobind(sock);
1388 if (err) 1390 if (err)
1389 goto out; 1391 goto out;
@@ -1397,9 +1399,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1397 if (skb == NULL) 1399 if (skb == NULL)
1398 goto out; 1400 goto out;
1399 1401
1400 NETLINK_CB(skb).pid = nlk->pid; 1402 NETLINK_CB(skb).portid = nlk->portid;
1401 NETLINK_CB(skb).dst_group = dst_group; 1403 NETLINK_CB(skb).dst_group = dst_group;
1402 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1404 NETLINK_CB(skb).creds = siocb->scm->creds;
1403 1405
1404 err = -EFAULT; 1406 err = -EFAULT;
1405 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1407 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1415,9 +1417,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1415 1417
1416 if (dst_group) { 1418 if (dst_group) {
1417 atomic_inc(&skb->users); 1419 atomic_inc(&skb->users);
1418 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); 1420 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1419 } 1421 }
1420 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); 1422 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1421 1423
1422out: 1424out:
1423 scm_destroy(siocb->scm); 1425 scm_destroy(siocb->scm);
@@ -1480,7 +1482,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1480 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; 1482 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1481 addr->nl_family = AF_NETLINK; 1483 addr->nl_family = AF_NETLINK;
1482 addr->nl_pad = 0; 1484 addr->nl_pad = 0;
1483 addr->nl_pid = NETLINK_CB(skb).pid; 1485 addr->nl_pid = NETLINK_CB(skb).portid;
1484 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); 1486 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1485 msg->msg_namelen = sizeof(*addr); 1487 msg->msg_namelen = sizeof(*addr);
1486 } 1488 }
@@ -1524,9 +1526,8 @@ static void netlink_data_ready(struct sock *sk, int len)
1524 */ 1526 */
1525 1527
1526struct sock * 1528struct sock *
1527netlink_kernel_create(struct net *net, int unit, 1529__netlink_kernel_create(struct net *net, int unit, struct module *module,
1528 struct module *module, 1530 struct netlink_kernel_cfg *cfg)
1529 struct netlink_kernel_cfg *cfg)
1530{ 1531{
1531 struct socket *sock; 1532 struct socket *sock;
1532 struct sock *sk; 1533 struct sock *sk;
@@ -1580,7 +1581,10 @@ netlink_kernel_create(struct net *net, int unit,
1580 rcu_assign_pointer(nl_table[unit].listeners, listeners); 1581 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1581 nl_table[unit].cb_mutex = cb_mutex; 1582 nl_table[unit].cb_mutex = cb_mutex;
1582 nl_table[unit].module = module; 1583 nl_table[unit].module = module;
1583 nl_table[unit].bind = cfg ? cfg->bind : NULL; 1584 if (cfg) {
1585 nl_table[unit].bind = cfg->bind;
1586 nl_table[unit].flags = cfg->flags;
1587 }
1584 nl_table[unit].registered = 1; 1588 nl_table[unit].registered = 1;
1585 } else { 1589 } else {
1586 kfree(listeners); 1590 kfree(listeners);
@@ -1598,8 +1602,7 @@ out_sock_release_nosk:
1598 sock_release(sock); 1602 sock_release(sock);
1599 return NULL; 1603 return NULL;
1600} 1604}
1601EXPORT_SYMBOL(netlink_kernel_create); 1605EXPORT_SYMBOL(__netlink_kernel_create);
1602
1603 1606
1604void 1607void
1605netlink_kernel_release(struct sock *sk) 1608netlink_kernel_release(struct sock *sk)
@@ -1679,15 +1682,8 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1679 netlink_table_ungrab(); 1682 netlink_table_ungrab();
1680} 1683}
1681 1684
1682void netlink_set_nonroot(int protocol, unsigned int flags)
1683{
1684 if ((unsigned int)protocol < MAX_LINKS)
1685 nl_table[protocol].nl_nonroot = flags;
1686}
1687EXPORT_SYMBOL(netlink_set_nonroot);
1688
1689struct nlmsghdr * 1685struct nlmsghdr *
1690__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) 1686__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
1691{ 1687{
1692 struct nlmsghdr *nlh; 1688 struct nlmsghdr *nlh;
1693 int size = NLMSG_LENGTH(len); 1689 int size = NLMSG_LENGTH(len);
@@ -1696,7 +1692,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1696 nlh->nlmsg_type = type; 1692 nlh->nlmsg_type = type;
1697 nlh->nlmsg_len = size; 1693 nlh->nlmsg_len = size;
1698 nlh->nlmsg_flags = flags; 1694 nlh->nlmsg_flags = flags;
1699 nlh->nlmsg_pid = pid; 1695 nlh->nlmsg_pid = portid;
1700 nlh->nlmsg_seq = seq; 1696 nlh->nlmsg_seq = seq;
1701 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) 1697 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1702 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); 1698 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
@@ -1792,7 +1788,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1792 atomic_inc(&skb->users); 1788 atomic_inc(&skb->users);
1793 cb->skb = skb; 1789 cb->skb = skb;
1794 1790
1795 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); 1791 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1796 if (sk == NULL) { 1792 if (sk == NULL) {
1797 netlink_destroy_callback(cb); 1793 netlink_destroy_callback(cb);
1798 return -ECONNREFUSED; 1794 return -ECONNREFUSED;
@@ -1840,7 +1836,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1840 1836
1841 sk = netlink_lookup(sock_net(in_skb->sk), 1837 sk = netlink_lookup(sock_net(in_skb->sk),
1842 in_skb->sk->sk_protocol, 1838 in_skb->sk->sk_protocol,
1843 NETLINK_CB(in_skb).pid); 1839 NETLINK_CB(in_skb).portid);
1844 if (sk) { 1840 if (sk) {
1845 sk->sk_err = ENOBUFS; 1841 sk->sk_err = ENOBUFS;
1846 sk->sk_error_report(sk); 1842 sk->sk_error_report(sk);
@@ -1849,12 +1845,12 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1849 return; 1845 return;
1850 } 1846 }
1851 1847
1852 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 1848 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
1853 NLMSG_ERROR, payload, 0); 1849 NLMSG_ERROR, payload, 0);
1854 errmsg = nlmsg_data(rep); 1850 errmsg = nlmsg_data(rep);
1855 errmsg->error = err; 1851 errmsg->error = err;
1856 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); 1852 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1857 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 1853 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1858} 1854}
1859EXPORT_SYMBOL(netlink_ack); 1855EXPORT_SYMBOL(netlink_ack);
1860 1856
@@ -1904,33 +1900,33 @@ EXPORT_SYMBOL(netlink_rcv_skb);
1904 * nlmsg_notify - send a notification netlink message 1900 * nlmsg_notify - send a notification netlink message
1905 * @sk: netlink socket to use 1901 * @sk: netlink socket to use
1906 * @skb: notification message 1902 * @skb: notification message
1907 * @pid: destination netlink pid for reports or 0 1903 * @portid: destination netlink portid for reports or 0
1908 * @group: destination multicast group or 0 1904 * @group: destination multicast group or 0
1909 * @report: 1 to report back, 0 to disable 1905 * @report: 1 to report back, 0 to disable
1910 * @flags: allocation flags 1906 * @flags: allocation flags
1911 */ 1907 */
1912int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, 1908int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
1913 unsigned int group, int report, gfp_t flags) 1909 unsigned int group, int report, gfp_t flags)
1914{ 1910{
1915 int err = 0; 1911 int err = 0;
1916 1912
1917 if (group) { 1913 if (group) {
1918 int exclude_pid = 0; 1914 int exclude_portid = 0;
1919 1915
1920 if (report) { 1916 if (report) {
1921 atomic_inc(&skb->users); 1917 atomic_inc(&skb->users);
1922 exclude_pid = pid; 1918 exclude_portid = portid;
1923 } 1919 }
1924 1920
1925 /* errors reported via destination sk->sk_err, but propagate 1921 /* errors reported via destination sk->sk_err, but propagate
1926 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ 1922 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1927 err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); 1923 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
1928 } 1924 }
1929 1925
1930 if (report) { 1926 if (report) {
1931 int err2; 1927 int err2;
1932 1928
1933 err2 = nlmsg_unicast(sk, skb, pid); 1929 err2 = nlmsg_unicast(sk, skb, portid);
1934 if (!err || err == -ESRCH) 1930 if (!err || err == -ESRCH)
1935 err = err2; 1931 err = err2;
1936 } 1932 }
@@ -1955,7 +1951,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1955 loff_t off = 0; 1951 loff_t off = 0;
1956 1952
1957 for (i = 0; i < MAX_LINKS; i++) { 1953 for (i = 0; i < MAX_LINKS; i++) {
1958 struct nl_pid_hash *hash = &nl_table[i].hash; 1954 struct nl_portid_hash *hash = &nl_table[i].hash;
1959 1955
1960 for (j = 0; j <= hash->mask; j++) { 1956 for (j = 0; j <= hash->mask; j++) {
1961 sk_for_each(s, node, &hash->table[j]) { 1957 sk_for_each(s, node, &hash->table[j]) {
@@ -2003,7 +1999,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2003 j = iter->hash_idx + 1; 1999 j = iter->hash_idx + 1;
2004 2000
2005 do { 2001 do {
2006 struct nl_pid_hash *hash = &nl_table[i].hash; 2002 struct nl_portid_hash *hash = &nl_table[i].hash;
2007 2003
2008 for (; j <= hash->mask; j++) { 2004 for (; j <= hash->mask; j++) {
2009 s = sk_head(&hash->table[j]); 2005 s = sk_head(&hash->table[j]);
@@ -2042,7 +2038,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
2042 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 2038 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2043 s, 2039 s,
2044 s->sk_protocol, 2040 s->sk_protocol,
2045 nlk->pid, 2041 nlk->portid,
2046 nlk->groups ? (u32)nlk->groups[0] : 0, 2042 nlk->groups ? (u32)nlk->groups[0] : 0,
2047 sk_rmem_alloc_get(s), 2043 sk_rmem_alloc_get(s),
2048 sk_wmem_alloc_get(s), 2044 sk_wmem_alloc_get(s),
@@ -2150,7 +2146,7 @@ static void __init netlink_add_usersock_entry(void)
2150 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2146 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2151 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2147 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2152 nl_table[NETLINK_USERSOCK].registered = 1; 2148 nl_table[NETLINK_USERSOCK].registered = 1;
2153 nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; 2149 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2154 2150
2155 netlink_table_ungrab(); 2151 netlink_table_ungrab();
2156} 2152}
@@ -2187,12 +2183,12 @@ static int __init netlink_proto_init(void)
2187 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; 2183 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2188 2184
2189 for (i = 0; i < MAX_LINKS; i++) { 2185 for (i = 0; i < MAX_LINKS; i++) {
2190 struct nl_pid_hash *hash = &nl_table[i].hash; 2186 struct nl_portid_hash *hash = &nl_table[i].hash;
2191 2187
2192 hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); 2188 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
2193 if (!hash->table) { 2189 if (!hash->table) {
2194 while (i-- > 0) 2190 while (i-- > 0)
2195 nl_pid_hash_free(nl_table[i].hash.table, 2191 nl_portid_hash_free(nl_table[i].hash.table,
2196 1 * sizeof(*hash->table)); 2192 1 * sizeof(*hash->table));
2197 kfree(nl_table); 2193 kfree(nl_table);
2198 goto panic; 2194 goto panic;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index fda497412fc3..f2aabb6f4105 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL(genl_unregister_family);
501/** 501/**
502 * genlmsg_put - Add generic netlink header to netlink message 502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message 503 * @skb: socket buffer holding the message
504 * @pid: netlink pid the message is addressed to 504 * @portid: netlink portid the message is addressed to
505 * @seq: sequence number (usually the one of the sender) 505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family 506 * @family: generic netlink family
507 * @flags: netlink message flags 507 * @flags: netlink message flags
@@ -509,13 +509,13 @@ EXPORT_SYMBOL(genl_unregister_family);
509 * 509 *
510 * Returns pointer to user specific header 510 * Returns pointer to user specific header
511 */ 511 */
512void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, 512void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd) 513 struct genl_family *family, int flags, u8 cmd)
514{ 514{
515 struct nlmsghdr *nlh; 515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr; 516 struct genlmsghdr *hdr;
517 517
518 nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN + 518 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags); 519 family->hdrsize, flags);
520 if (nlh == NULL) 520 if (nlh == NULL)
521 return NULL; 521 return NULL;
@@ -585,7 +585,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
585 } 585 }
586 586
587 info.snd_seq = nlh->nlmsg_seq; 587 info.snd_seq = nlh->nlmsg_seq;
588 info.snd_pid = NETLINK_CB(skb).pid; 588 info.snd_portid = NETLINK_CB(skb).portid;
589 info.nlhdr = nlh; 589 info.nlhdr = nlh;
590 info.genlhdr = nlmsg_data(nlh); 590 info.genlhdr = nlmsg_data(nlh);
591 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 591 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
@@ -626,12 +626,12 @@ static struct genl_family genl_ctrl = {
626 .netnsok = true, 626 .netnsok = true,
627}; 627};
628 628
629static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, 629static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
630 u32 flags, struct sk_buff *skb, u8 cmd) 630 u32 flags, struct sk_buff *skb, u8 cmd)
631{ 631{
632 void *hdr; 632 void *hdr;
633 633
634 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd); 634 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
635 if (hdr == NULL) 635 if (hdr == NULL)
636 return -1; 636 return -1;
637 637
@@ -701,7 +701,7 @@ nla_put_failure:
701 return -EMSGSIZE; 701 return -EMSGSIZE;
702} 702}
703 703
704static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, 704static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
705 u32 seq, u32 flags, struct sk_buff *skb, 705 u32 seq, u32 flags, struct sk_buff *skb,
706 u8 cmd) 706 u8 cmd)
707{ 707{
@@ -709,7 +709,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
709 struct nlattr *nla_grps; 709 struct nlattr *nla_grps;
710 struct nlattr *nest; 710 struct nlattr *nest;
711 711
712 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd); 712 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
713 if (hdr == NULL) 713 if (hdr == NULL)
714 return -1; 714 return -1;
715 715
@@ -756,7 +756,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
756 continue; 756 continue;
757 if (++n < fams_to_skip) 757 if (++n < fams_to_skip)
758 continue; 758 continue;
759 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid, 759 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
760 cb->nlh->nlmsg_seq, NLM_F_MULTI, 760 cb->nlh->nlmsg_seq, NLM_F_MULTI,
761 skb, CTRL_CMD_NEWFAMILY) < 0) 761 skb, CTRL_CMD_NEWFAMILY) < 0)
762 goto errout; 762 goto errout;
@@ -773,7 +773,7 @@ errout:
773} 773}
774 774
775static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, 775static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
776 u32 pid, int seq, u8 cmd) 776 u32 portid, int seq, u8 cmd)
777{ 777{
778 struct sk_buff *skb; 778 struct sk_buff *skb;
779 int err; 779 int err;
@@ -782,7 +782,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
782 if (skb == NULL) 782 if (skb == NULL)
783 return ERR_PTR(-ENOBUFS); 783 return ERR_PTR(-ENOBUFS);
784 784
785 err = ctrl_fill_info(family, pid, seq, 0, skb, cmd); 785 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
786 if (err < 0) { 786 if (err < 0) {
787 nlmsg_free(skb); 787 nlmsg_free(skb);
788 return ERR_PTR(err); 788 return ERR_PTR(err);
@@ -792,7 +792,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
792} 792}
793 793
794static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, 794static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
795 u32 pid, int seq, u8 cmd) 795 u32 portid, int seq, u8 cmd)
796{ 796{
797 struct sk_buff *skb; 797 struct sk_buff *skb;
798 int err; 798 int err;
@@ -801,7 +801,7 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
801 if (skb == NULL) 801 if (skb == NULL)
802 return ERR_PTR(-ENOBUFS); 802 return ERR_PTR(-ENOBUFS);
803 803
804 err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd); 804 err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd);
805 if (err < 0) { 805 if (err < 0) {
806 nlmsg_free(skb); 806 nlmsg_free(skb);
807 return ERR_PTR(err); 807 return ERR_PTR(err);
@@ -853,7 +853,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
853 return -ENOENT; 853 return -ENOENT;
854 } 854 }
855 855
856 msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, 856 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
857 CTRL_CMD_NEWFAMILY); 857 CTRL_CMD_NEWFAMILY);
858 if (IS_ERR(msg)) 858 if (IS_ERR(msg))
859 return PTR_ERR(msg); 859 return PTR_ERR(msg);
@@ -918,11 +918,11 @@ static int __net_init genl_pernet_init(struct net *net)
918 struct netlink_kernel_cfg cfg = { 918 struct netlink_kernel_cfg cfg = {
919 .input = genl_rcv, 919 .input = genl_rcv,
920 .cb_mutex = &genl_mutex, 920 .cb_mutex = &genl_mutex,
921 .flags = NL_CFG_F_NONROOT_RECV,
921 }; 922 };
922 923
923 /* we'll bump the group number right afterwards */ 924 /* we'll bump the group number right afterwards */
924 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 925 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
925 THIS_MODULE, &cfg);
926 926
927 if (!net->genl_sock && net_eq(net, &init_net)) 927 if (!net->genl_sock && net_eq(net, &init_net))
928 panic("GENL: Cannot initialize generic netlink\n"); 928 panic("GENL: Cannot initialize generic netlink\n");
@@ -955,8 +955,6 @@ static int __init genl_init(void)
955 if (err < 0) 955 if (err < 0)
956 goto problem; 956 goto problem;
957 957
958 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
959
960 err = register_pernet_subsys(&genl_pernet_ops); 958 err = register_pernet_subsys(&genl_pernet_ops);
961 if (err) 959 if (err)
962 goto problem; 960 goto problem;
@@ -973,7 +971,7 @@ problem:
973 971
974subsys_initcall(genl_init); 972subsys_initcall(genl_init);
975 973
976static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, 974static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
977 gfp_t flags) 975 gfp_t flags)
978{ 976{
979 struct sk_buff *tmp; 977 struct sk_buff *tmp;
@@ -988,7 +986,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
988 goto error; 986 goto error;
989 } 987 }
990 err = nlmsg_multicast(prev->genl_sock, tmp, 988 err = nlmsg_multicast(prev->genl_sock, tmp,
991 pid, group, flags); 989 portid, group, flags);
992 if (err) 990 if (err)
993 goto error; 991 goto error;
994 } 992 }
@@ -996,20 +994,20 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
996 prev = net; 994 prev = net;
997 } 995 }
998 996
999 return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags); 997 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1000 error: 998 error:
1001 kfree_skb(skb); 999 kfree_skb(skb);
1002 return err; 1000 return err;
1003} 1001}
1004 1002
1005int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group, 1003int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group,
1006 gfp_t flags) 1004 gfp_t flags)
1007{ 1005{
1008 return genlmsg_mcast(skb, pid, group, flags); 1006 return genlmsg_mcast(skb, portid, group, flags);
1009} 1007}
1010EXPORT_SYMBOL(genlmsg_multicast_allns); 1008EXPORT_SYMBOL(genlmsg_multicast_allns);
1011 1009
1012void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, 1010void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group,
1013 struct nlmsghdr *nlh, gfp_t flags) 1011 struct nlmsghdr *nlh, gfp_t flags)
1014{ 1012{
1015 struct sock *sk = net->genl_sock; 1013 struct sock *sk = net->genl_sock;
@@ -1018,6 +1016,6 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
1018 if (nlh) 1016 if (nlh)
1019 report = nlmsg_report(nlh); 1017 report = nlmsg_report(nlh);
1020 1018
1021 nlmsg_notify(sk, skb, pid, group, report, flags); 1019 nlmsg_notify(sk, skb, portid, group, report, flags);
1022} 1020}
1023EXPORT_SYMBOL(genl_notify); 1021EXPORT_SYMBOL(genl_notify);
diff --git a/net/nfc/core.c b/net/nfc/core.c
index ff749794bc5b..c9eacc1f145f 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -679,7 +679,7 @@ static void nfc_release(struct device *d)
679 679
680 if (dev->ops->check_presence) { 680 if (dev->ops->check_presence) {
681 del_timer_sync(&dev->check_pres_timer); 681 del_timer_sync(&dev->check_pres_timer);
682 destroy_workqueue(dev->check_pres_wq); 682 cancel_work_sync(&dev->check_pres_work);
683 } 683 }
684 684
685 nfc_genl_data_exit(&dev->genl_data); 685 nfc_genl_data_exit(&dev->genl_data);
@@ -715,7 +715,7 @@ static void nfc_check_pres_timeout(unsigned long data)
715{ 715{
716 struct nfc_dev *dev = (struct nfc_dev *)data; 716 struct nfc_dev *dev = (struct nfc_dev *)data;
717 717
718 queue_work(dev->check_pres_wq, &dev->check_pres_work); 718 queue_work(system_nrt_wq, &dev->check_pres_work);
719} 719}
720 720
721struct class nfc_class = { 721struct class nfc_class = {
@@ -784,20 +784,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
784 dev->targets_generation = 1; 784 dev->targets_generation = 1;
785 785
786 if (ops->check_presence) { 786 if (ops->check_presence) {
787 char name[32];
788 init_timer(&dev->check_pres_timer); 787 init_timer(&dev->check_pres_timer);
789 dev->check_pres_timer.data = (unsigned long)dev; 788 dev->check_pres_timer.data = (unsigned long)dev;
790 dev->check_pres_timer.function = nfc_check_pres_timeout; 789 dev->check_pres_timer.function = nfc_check_pres_timeout;
791 790
792 INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); 791 INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
793 snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
794 dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
795 WQ_UNBOUND |
796 WQ_MEM_RECLAIM, 1);
797 if (dev->check_pres_wq == NULL) {
798 kfree(dev);
799 return NULL;
800 }
801 } 792 }
802 793
803 return dev; 794 return dev;
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
index f9c44b2fb065..c5dbb6891b24 100644
--- a/net/nfc/hci/Makefile
+++ b/net/nfc/hci/Makefile
@@ -4,5 +4,5 @@
4 4
5obj-$(CONFIG_NFC_HCI) += hci.o 5obj-$(CONFIG_NFC_HCI) += hci.o
6 6
7hci-y := core.o hcp.o command.o 7hci-y := core.o hcp.o command.o llc.o llc_nop.o
8hci-$(CONFIG_NFC_SHDLC) += shdlc.o 8hci-$(CONFIG_NFC_SHDLC) += llc_shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 46362ef979db..71c6a7086b8f 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -28,10 +28,29 @@
28 28
29#include "hci.h" 29#include "hci.h"
30 30
31static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err, 31static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
32 struct sk_buff *skb, void *cb_data) 32 const u8 *param, size_t param_len,
33 data_exchange_cb_t cb, void *cb_context)
33{ 34{
34 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data; 35 pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe,
36 cmd, param_len);
37
38 /* TODO: Define hci cmd execution delay. Should it be the same
39 * for all commands?
40 */
41 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
42 param, param_len, cb, cb_context, 3000);
43}
44
45/*
46 * HCI command execution completion callback.
47 * err will be a standard linux error (may be converted from HCI response)
48 * skb contains the response data and must be disposed, or may be NULL if
49 * an error occured
50 */
51static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
52{
53 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context;
35 54
36 pr_debug("HCI Cmd completed with result=%d\n", err); 55 pr_debug("HCI Cmd completed with result=%d\n", err);
37 56
@@ -55,7 +74,8 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
55 hcp_ew.exec_complete = false; 74 hcp_ew.exec_complete = false;
56 hcp_ew.result_skb = NULL; 75 hcp_ew.result_skb = NULL;
57 76
58 pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); 77 pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe,
78 cmd, param_len);
59 79
60 /* TODO: Define hci cmd execution delay. Should it be the same 80 /* TODO: Define hci cmd execution delay. Should it be the same
61 * for all commands? 81 * for all commands?
@@ -133,6 +153,23 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
133} 153}
134EXPORT_SYMBOL(nfc_hci_send_cmd); 154EXPORT_SYMBOL(nfc_hci_send_cmd);
135 155
156int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
157 const u8 *param, size_t param_len,
158 data_exchange_cb_t cb, void *cb_context)
159{
160 u8 pipe;
161
162 pr_debug("\n");
163
164 pipe = hdev->gate2pipe[gate];
165 if (pipe == NFC_HCI_INVALID_PIPE)
166 return -EADDRNOTAVAIL;
167
168 return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len,
169 cb, cb_context);
170}
171EXPORT_SYMBOL(nfc_hci_send_cmd_async);
172
136int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, 173int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
137 const u8 *param, size_t param_len) 174 const u8 *param, size_t param_len)
138{ 175{
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 1ac7b3fac6c9..d378d93de62e 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -26,6 +26,7 @@
26 26
27#include <net/nfc/nfc.h> 27#include <net/nfc/nfc.h>
28#include <net/nfc/hci.h> 28#include <net/nfc/hci.h>
29#include <net/nfc/llc.h>
29 30
30#include "hci.h" 31#include "hci.h"
31 32
@@ -57,12 +58,11 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
57 if (hdev->cmd_pending_msg) { 58 if (hdev->cmd_pending_msg) {
58 if (timer_pending(&hdev->cmd_timer) == 0) { 59 if (timer_pending(&hdev->cmd_timer) == 0) {
59 if (hdev->cmd_pending_msg->cb) 60 if (hdev->cmd_pending_msg->cb)
60 hdev->cmd_pending_msg->cb(hdev, 61 hdev->cmd_pending_msg->cb(hdev->
61 -ETIME,
62 NULL,
63 hdev->
64 cmd_pending_msg-> 62 cmd_pending_msg->
65 cb_context); 63 cb_context,
64 NULL,
65 -ETIME);
66 kfree(hdev->cmd_pending_msg); 66 kfree(hdev->cmd_pending_msg);
67 hdev->cmd_pending_msg = NULL; 67 hdev->cmd_pending_msg = NULL;
68 } else 68 } else
@@ -78,12 +78,12 @@ next_msg:
78 78
79 pr_debug("msg_tx_queue has a cmd to send\n"); 79 pr_debug("msg_tx_queue has a cmd to send\n");
80 while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) { 80 while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
81 r = hdev->ops->xmit(hdev, skb); 81 r = nfc_llc_xmit_from_hci(hdev->llc, skb);
82 if (r < 0) { 82 if (r < 0) {
83 kfree_skb(skb); 83 kfree_skb(skb);
84 skb_queue_purge(&msg->msg_frags); 84 skb_queue_purge(&msg->msg_frags);
85 if (msg->cb) 85 if (msg->cb)
86 msg->cb(hdev, r, NULL, msg->cb_context); 86 msg->cb(msg->cb_context, NULL, r);
87 kfree(msg); 87 kfree(msg);
88 break; 88 break;
89 } 89 }
@@ -133,15 +133,15 @@ static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
133 del_timer_sync(&hdev->cmd_timer); 133 del_timer_sync(&hdev->cmd_timer);
134 134
135 if (hdev->cmd_pending_msg->cb) 135 if (hdev->cmd_pending_msg->cb)
136 hdev->cmd_pending_msg->cb(hdev, err, skb, 136 hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
137 hdev->cmd_pending_msg->cb_context); 137 skb, err);
138 else 138 else
139 kfree_skb(skb); 139 kfree_skb(skb);
140 140
141 kfree(hdev->cmd_pending_msg); 141 kfree(hdev->cmd_pending_msg);
142 hdev->cmd_pending_msg = NULL; 142 hdev->cmd_pending_msg = NULL;
143 143
144 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); 144 queue_work(system_nrt_wq, &hdev->msg_tx_work);
145} 145}
146 146
147void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, 147void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
@@ -326,7 +326,7 @@ static void nfc_hci_cmd_timeout(unsigned long data)
326{ 326{
327 struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data; 327 struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
328 328
329 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); 329 queue_work(system_nrt_wq, &hdev->msg_tx_work);
330} 330}
331 331
332static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count, 332static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
@@ -398,8 +398,7 @@ disconnect_all:
398 nfc_hci_disconnect_all_gates(hdev); 398 nfc_hci_disconnect_all_gates(hdev);
399 399
400exit: 400exit:
401 if (skb) 401 kfree_skb(skb);
402 kfree_skb(skb);
403 402
404 return r; 403 return r;
405} 404}
@@ -470,29 +469,38 @@ static int hci_dev_up(struct nfc_dev *nfc_dev)
470 return r; 469 return r;
471 } 470 }
472 471
472 r = nfc_llc_start(hdev->llc);
473 if (r < 0)
474 goto exit_close;
475
473 r = hci_dev_session_init(hdev); 476 r = hci_dev_session_init(hdev);
474 if (r < 0) 477 if (r < 0)
475 goto exit; 478 goto exit_llc;
476 479
477 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 480 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
478 NFC_HCI_EVT_END_OPERATION, NULL, 0); 481 NFC_HCI_EVT_END_OPERATION, NULL, 0);
479 if (r < 0) 482 if (r < 0)
480 goto exit; 483 goto exit_llc;
481 484
482 if (hdev->ops->hci_ready) { 485 if (hdev->ops->hci_ready) {
483 r = hdev->ops->hci_ready(hdev); 486 r = hdev->ops->hci_ready(hdev);
484 if (r < 0) 487 if (r < 0)
485 goto exit; 488 goto exit_llc;
486 } 489 }
487 490
488 r = hci_dev_version(hdev); 491 r = hci_dev_version(hdev);
489 if (r < 0) 492 if (r < 0)
490 goto exit; 493 goto exit_llc;
494
495 return 0;
496
497exit_llc:
498 nfc_llc_stop(hdev->llc);
499
500exit_close:
501 if (hdev->ops->close)
502 hdev->ops->close(hdev);
491 503
492exit:
493 if (r < 0)
494 if (hdev->ops->close)
495 hdev->ops->close(hdev);
496 return r; 504 return r;
497} 505}
498 506
@@ -500,6 +508,8 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
500{ 508{
501 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 509 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
502 510
511 nfc_llc_stop(hdev->llc);
512
503 if (hdev->ops->close) 513 if (hdev->ops->close)
504 hdev->ops->close(hdev); 514 hdev->ops->close(hdev);
505 515
@@ -539,13 +549,37 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
539{ 549{
540} 550}
541 551
552#define HCI_CB_TYPE_TRANSCEIVE 1
553
554static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
555{
556 struct nfc_hci_dev *hdev = context;
557
558 switch (hdev->async_cb_type) {
559 case HCI_CB_TYPE_TRANSCEIVE:
560 /*
561 * TODO: Check RF Error indicator to make sure data is valid.
562 * It seems that HCI cmd can complete without error, but data
563 * can be invalid if an RF error occured? Ignore for now.
564 */
565 if (err == 0)
566 skb_trim(skb, skb->len - 1); /* RF Err ind */
567
568 hdev->async_cb(hdev->async_cb_context, skb, err);
569 break;
570 default:
571 if (err == 0)
572 kfree_skb(skb);
573 break;
574 }
575}
576
542static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, 577static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
543 struct sk_buff *skb, data_exchange_cb_t cb, 578 struct sk_buff *skb, data_exchange_cb_t cb,
544 void *cb_context) 579 void *cb_context)
545{ 580{
546 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 581 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
547 int r; 582 int r;
548 struct sk_buff *res_skb = NULL;
549 583
550 pr_debug("target_idx=%d\n", target->idx); 584 pr_debug("target_idx=%d\n", target->idx);
551 585
@@ -553,40 +587,37 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
553 case NFC_HCI_RF_READER_A_GATE: 587 case NFC_HCI_RF_READER_A_GATE:
554 case NFC_HCI_RF_READER_B_GATE: 588 case NFC_HCI_RF_READER_B_GATE:
555 if (hdev->ops->data_exchange) { 589 if (hdev->ops->data_exchange) {
556 r = hdev->ops->data_exchange(hdev, target, skb, 590 r = hdev->ops->data_exchange(hdev, target, skb, cb,
557 &res_skb); 591 cb_context);
558 if (r <= 0) /* handled */ 592 if (r <= 0) /* handled */
559 break; 593 break;
560 } 594 }
561 595
562 *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */ 596 *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
563 r = nfc_hci_send_cmd(hdev, target->hci_reader_gate, 597
564 NFC_HCI_WR_XCHG_DATA, 598 hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE;
565 skb->data, skb->len, &res_skb); 599 hdev->async_cb = cb;
566 /* 600 hdev->async_cb_context = cb_context;
567 * TODO: Check RF Error indicator to make sure data is valid. 601
568 * It seems that HCI cmd can complete without error, but data 602 r = nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
569 * can be invalid if an RF error occured? Ignore for now. 603 NFC_HCI_WR_XCHG_DATA, skb->data,
570 */ 604 skb->len, hci_transceive_cb, hdev);
571 if (r == 0)
572 skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
573 break; 605 break;
574 default: 606 default:
575 if (hdev->ops->data_exchange) { 607 if (hdev->ops->data_exchange) {
576 r = hdev->ops->data_exchange(hdev, target, skb, 608 r = hdev->ops->data_exchange(hdev, target, skb, cb,
577 &res_skb); 609 cb_context);
578 if (r == 1) 610 if (r == 1)
579 r = -ENOTSUPP; 611 r = -ENOTSUPP;
580 } 612 }
581 else 613 else
582 r = -ENOTSUPP; 614 r = -ENOTSUPP;
615 break;
583 } 616 }
584 617
585 kfree_skb(skb); 618 kfree_skb(skb);
586 619
587 cb(cb_context, res_skb, r); 620 return r;
588
589 return 0;
590} 621}
591 622
592static int hci_check_presence(struct nfc_dev *nfc_dev, 623static int hci_check_presence(struct nfc_dev *nfc_dev,
@@ -600,6 +631,93 @@ static int hci_check_presence(struct nfc_dev *nfc_dev,
600 return 0; 631 return 0;
601} 632}
602 633
634static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
635{
636 mutex_lock(&hdev->msg_tx_mutex);
637
638 if (hdev->cmd_pending_msg == NULL) {
639 nfc_driver_failure(hdev->ndev, err);
640 goto exit;
641 }
642
643 __nfc_hci_cmd_completion(hdev, err, NULL);
644
645exit:
646 mutex_unlock(&hdev->msg_tx_mutex);
647}
648
649static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err)
650{
651 nfc_hci_failure(hdev, err);
652}
653
654static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
655{
656 struct hcp_packet *packet;
657 u8 type;
658 u8 instruction;
659 struct sk_buff *hcp_skb;
660 u8 pipe;
661 struct sk_buff *frag_skb;
662 int msg_len;
663
664 packet = (struct hcp_packet *)skb->data;
665 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
666 skb_queue_tail(&hdev->rx_hcp_frags, skb);
667 return;
668 }
669
670 /* it's the last fragment. Does it need re-aggregation? */
671 if (skb_queue_len(&hdev->rx_hcp_frags)) {
672 pipe = packet->header & NFC_HCI_FRAGMENT;
673 skb_queue_tail(&hdev->rx_hcp_frags, skb);
674
675 msg_len = 0;
676 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
677 msg_len += (frag_skb->len -
678 NFC_HCI_HCP_PACKET_HEADER_LEN);
679 }
680
681 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
682 msg_len, GFP_KERNEL);
683 if (hcp_skb == NULL) {
684 nfc_hci_failure(hdev, -ENOMEM);
685 return;
686 }
687
688 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
689
690 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
691 msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
692 memcpy(skb_put(hcp_skb, msg_len),
693 frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
694 msg_len);
695 }
696
697 skb_queue_purge(&hdev->rx_hcp_frags);
698 } else {
699 packet->header &= NFC_HCI_FRAGMENT;
700 hcp_skb = skb;
701 }
702
703 /* if this is a response, dispatch immediately to
704 * unblock waiting cmd context. Otherwise, enqueue to dispatch
705 * in separate context where handler can also execute command.
706 */
707 packet = (struct hcp_packet *)hcp_skb->data;
708 type = HCP_MSG_GET_TYPE(packet->message.header);
709 if (type == NFC_HCI_HCP_RESPONSE) {
710 pipe = packet->header;
711 instruction = HCP_MSG_GET_CMD(packet->message.header);
712 skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
713 NFC_HCI_HCP_MESSAGE_HEADER_LEN);
714 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
715 } else {
716 skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
717 queue_work(system_nrt_wq, &hdev->msg_rx_work);
718 }
719}
720
603static struct nfc_ops hci_nfc_ops = { 721static struct nfc_ops hci_nfc_ops = {
604 .dev_up = hci_dev_up, 722 .dev_up = hci_dev_up,
605 .dev_down = hci_dev_down, 723 .dev_down = hci_dev_down,
@@ -614,6 +732,7 @@ static struct nfc_ops hci_nfc_ops = {
614struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, 732struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
615 struct nfc_hci_init_data *init_data, 733 struct nfc_hci_init_data *init_data,
616 u32 protocols, 734 u32 protocols,
735 const char *llc_name,
617 int tx_headroom, 736 int tx_headroom,
618 int tx_tailroom, 737 int tx_tailroom,
619 int max_link_payload) 738 int max_link_payload)
@@ -630,10 +749,19 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
630 if (hdev == NULL) 749 if (hdev == NULL)
631 return NULL; 750 return NULL;
632 751
752 hdev->llc = nfc_llc_allocate(llc_name, hdev, ops->xmit,
753 nfc_hci_recv_from_llc, tx_headroom,
754 tx_tailroom, nfc_hci_llc_failure);
755 if (hdev->llc == NULL) {
756 kfree(hdev);
757 return NULL;
758 }
759
633 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, 760 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
634 tx_headroom + HCI_CMDS_HEADROOM, 761 tx_headroom + HCI_CMDS_HEADROOM,
635 tx_tailroom); 762 tx_tailroom);
636 if (!hdev->ndev) { 763 if (!hdev->ndev) {
764 nfc_llc_free(hdev->llc);
637 kfree(hdev); 765 kfree(hdev);
638 return NULL; 766 return NULL;
639 } 767 }
@@ -653,29 +781,18 @@ EXPORT_SYMBOL(nfc_hci_allocate_device);
653void nfc_hci_free_device(struct nfc_hci_dev *hdev) 781void nfc_hci_free_device(struct nfc_hci_dev *hdev)
654{ 782{
655 nfc_free_device(hdev->ndev); 783 nfc_free_device(hdev->ndev);
784 nfc_llc_free(hdev->llc);
656 kfree(hdev); 785 kfree(hdev);
657} 786}
658EXPORT_SYMBOL(nfc_hci_free_device); 787EXPORT_SYMBOL(nfc_hci_free_device);
659 788
660int nfc_hci_register_device(struct nfc_hci_dev *hdev) 789int nfc_hci_register_device(struct nfc_hci_dev *hdev)
661{ 790{
662 struct device *dev = &hdev->ndev->dev;
663 const char *devname = dev_name(dev);
664 char name[32];
665 int r = 0;
666
667 mutex_init(&hdev->msg_tx_mutex); 791 mutex_init(&hdev->msg_tx_mutex);
668 792
669 INIT_LIST_HEAD(&hdev->msg_tx_queue); 793 INIT_LIST_HEAD(&hdev->msg_tx_queue);
670 794
671 INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work); 795 INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
672 snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
673 hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
674 WQ_MEM_RECLAIM, 1);
675 if (hdev->msg_tx_wq == NULL) {
676 r = -ENOMEM;
677 goto exit;
678 }
679 796
680 init_timer(&hdev->cmd_timer); 797 init_timer(&hdev->cmd_timer);
681 hdev->cmd_timer.data = (unsigned long)hdev; 798 hdev->cmd_timer.data = (unsigned long)hdev;
@@ -684,27 +801,10 @@ int nfc_hci_register_device(struct nfc_hci_dev *hdev)
684 skb_queue_head_init(&hdev->rx_hcp_frags); 801 skb_queue_head_init(&hdev->rx_hcp_frags);
685 802
686 INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work); 803 INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
687 snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
688 hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
689 WQ_MEM_RECLAIM, 1);
690 if (hdev->msg_rx_wq == NULL) {
691 r = -ENOMEM;
692 goto exit;
693 }
694 804
695 skb_queue_head_init(&hdev->msg_rx_queue); 805 skb_queue_head_init(&hdev->msg_rx_queue);
696 806
697 r = nfc_register_device(hdev->ndev); 807 return nfc_register_device(hdev->ndev);
698
699exit:
700 if (r < 0) {
701 if (hdev->msg_tx_wq)
702 destroy_workqueue(hdev->msg_tx_wq);
703 if (hdev->msg_rx_wq)
704 destroy_workqueue(hdev->msg_rx_wq);
705 }
706
707 return r;
708} 808}
709EXPORT_SYMBOL(nfc_hci_register_device); 809EXPORT_SYMBOL(nfc_hci_register_device);
710 810
@@ -725,9 +825,8 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
725 825
726 nfc_unregister_device(hdev->ndev); 826 nfc_unregister_device(hdev->ndev);
727 827
728 destroy_workqueue(hdev->msg_tx_wq); 828 cancel_work_sync(&hdev->msg_tx_work);
729 829 cancel_work_sync(&hdev->msg_rx_work);
730 destroy_workqueue(hdev->msg_rx_wq);
731} 830}
732EXPORT_SYMBOL(nfc_hci_unregister_device); 831EXPORT_SYMBOL(nfc_hci_unregister_device);
733 832
@@ -743,93 +842,30 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
743} 842}
744EXPORT_SYMBOL(nfc_hci_get_clientdata); 843EXPORT_SYMBOL(nfc_hci_get_clientdata);
745 844
746static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
747{
748 mutex_lock(&hdev->msg_tx_mutex);
749
750 if (hdev->cmd_pending_msg == NULL) {
751 nfc_driver_failure(hdev->ndev, err);
752 goto exit;
753 }
754
755 __nfc_hci_cmd_completion(hdev, err, NULL);
756
757exit:
758 mutex_unlock(&hdev->msg_tx_mutex);
759}
760
761void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err) 845void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
762{ 846{
763 nfc_hci_failure(hdev, err); 847 nfc_hci_failure(hdev, err);
764} 848}
765EXPORT_SYMBOL(nfc_hci_driver_failure); 849EXPORT_SYMBOL(nfc_hci_driver_failure);
766 850
767void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) 851void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
768{ 852{
769 struct hcp_packet *packet; 853 nfc_llc_rcv_from_drv(hdev->llc, skb);
770 u8 type; 854}
771 u8 instruction; 855EXPORT_SYMBOL(nfc_hci_recv_frame);
772 struct sk_buff *hcp_skb;
773 u8 pipe;
774 struct sk_buff *frag_skb;
775 int msg_len;
776
777 packet = (struct hcp_packet *)skb->data;
778 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
779 skb_queue_tail(&hdev->rx_hcp_frags, skb);
780 return;
781 }
782
783 /* it's the last fragment. Does it need re-aggregation? */
784 if (skb_queue_len(&hdev->rx_hcp_frags)) {
785 pipe = packet->header & NFC_HCI_FRAGMENT;
786 skb_queue_tail(&hdev->rx_hcp_frags, skb);
787
788 msg_len = 0;
789 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
790 msg_len += (frag_skb->len -
791 NFC_HCI_HCP_PACKET_HEADER_LEN);
792 }
793
794 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
795 msg_len, GFP_KERNEL);
796 if (hcp_skb == NULL) {
797 nfc_hci_failure(hdev, -ENOMEM);
798 return;
799 }
800
801 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
802
803 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
804 msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
805 memcpy(skb_put(hcp_skb, msg_len),
806 frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
807 msg_len);
808 }
809 856
810 skb_queue_purge(&hdev->rx_hcp_frags); 857static int __init nfc_hci_init(void)
811 } else { 858{
812 packet->header &= NFC_HCI_FRAGMENT; 859 return nfc_llc_init();
813 hcp_skb = skb; 860}
814 }
815 861
816 /* if this is a response, dispatch immediately to 862static void __exit nfc_hci_exit(void)
817 * unblock waiting cmd context. Otherwise, enqueue to dispatch 863{
818 * in separate context where handler can also execute command. 864 nfc_llc_exit();
819 */
820 packet = (struct hcp_packet *)hcp_skb->data;
821 type = HCP_MSG_GET_TYPE(packet->message.header);
822 if (type == NFC_HCI_HCP_RESPONSE) {
823 pipe = packet->header;
824 instruction = HCP_MSG_GET_CMD(packet->message.header);
825 skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
826 NFC_HCI_HCP_MESSAGE_HEADER_LEN);
827 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
828 } else {
829 skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
830 queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
831 }
832} 865}
833EXPORT_SYMBOL(nfc_hci_recv_frame); 866
867subsys_initcall(nfc_hci_init);
868module_exit(nfc_hci_exit);
834 869
835MODULE_LICENSE("GPL"); 870MODULE_LICENSE("GPL");
871MODULE_DESCRIPTION("NFC HCI Core");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
index fa9a21e92239..b274d12c18ac 100644
--- a/net/nfc/hci/hci.h
+++ b/net/nfc/hci/hci.h
@@ -20,6 +20,8 @@
20#ifndef __LOCAL_HCI_H 20#ifndef __LOCAL_HCI_H
21#define __LOCAL_HCI_H 21#define __LOCAL_HCI_H
22 22
23#include <net/nfc/hci.h>
24
23struct gate_pipe_map { 25struct gate_pipe_map {
24 u8 gate; 26 u8 gate;
25 u8 pipe; 27 u8 pipe;
@@ -35,15 +37,6 @@ struct hcp_packet {
35 struct hcp_message message; 37 struct hcp_message message;
36} __packed; 38} __packed;
37 39
38/*
39 * HCI command execution completion callback.
40 * result will be a standard linux error (may be converted from HCI response)
41 * skb contains the response data and must be disposed, or may be NULL if
42 * an error occured
43 */
44typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
45 struct sk_buff *skb, void *cb_data);
46
47struct hcp_exec_waiter { 40struct hcp_exec_waiter {
48 wait_queue_head_t *wq; 41 wait_queue_head_t *wq;
49 bool exec_complete; 42 bool exec_complete;
@@ -55,7 +48,7 @@ struct hci_msg {
55 struct list_head msg_l; 48 struct list_head msg_l;
56 struct sk_buff_head msg_frags; 49 struct sk_buff_head msg_frags;
57 bool wait_response; 50 bool wait_response;
58 hci_cmd_cb_t cb; 51 data_exchange_cb_t cb;
59 void *cb_context; 52 void *cb_context;
60 unsigned long completion_delay; 53 unsigned long completion_delay;
61}; 54};
@@ -83,7 +76,7 @@ struct hci_create_pipe_resp {
83int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, 76int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
84 u8 type, u8 instruction, 77 u8 type, u8 instruction,
85 const u8 *payload, size_t payload_len, 78 const u8 *payload, size_t payload_len,
86 hci_cmd_cb_t cb, void *cb_data, 79 data_exchange_cb_t cb, void *cb_context,
87 unsigned long completion_delay); 80 unsigned long completion_delay);
88 81
89u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe); 82u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index f4dad1a89740..208eedd07ee3 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -35,7 +35,7 @@
35int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, 35int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
36 u8 type, u8 instruction, 36 u8 type, u8 instruction,
37 const u8 *payload, size_t payload_len, 37 const u8 *payload, size_t payload_len,
38 hci_cmd_cb_t cb, void *cb_data, 38 data_exchange_cb_t cb, void *cb_context,
39 unsigned long completion_delay) 39 unsigned long completion_delay)
40{ 40{
41 struct nfc_dev *ndev = hdev->ndev; 41 struct nfc_dev *ndev = hdev->ndev;
@@ -52,7 +52,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
52 skb_queue_head_init(&cmd->msg_frags); 52 skb_queue_head_init(&cmd->msg_frags);
53 cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false; 53 cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
54 cmd->cb = cb; 54 cmd->cb = cb;
55 cmd->cb_context = cb_data; 55 cmd->cb_context = cb_context;
56 cmd->completion_delay = completion_delay; 56 cmd->completion_delay = completion_delay;
57 57
58 hci_len = payload_len + 1; 58 hci_len = payload_len + 1;
@@ -108,7 +108,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
108 list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue); 108 list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
109 mutex_unlock(&hdev->msg_tx_mutex); 109 mutex_unlock(&hdev->msg_tx_mutex);
110 110
111 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work); 111 queue_work(system_nrt_wq, &hdev->msg_tx_work);
112 112
113 return 0; 113 return 0;
114 114
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
new file mode 100644
index 000000000000..ae1205ded87f
--- /dev/null
+++ b/net/nfc/hci/llc.c
@@ -0,0 +1,170 @@
1/*
2 * Link Layer Control manager
3 *
4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <net/nfc/llc.h>
22
23#include "llc.h"
24
25static struct list_head llc_engines;
26
27int nfc_llc_init(void)
28{
29 int r;
30
31 INIT_LIST_HEAD(&llc_engines);
32
33 r = nfc_llc_nop_register();
34 if (r)
35 goto exit;
36
37 r = nfc_llc_shdlc_register();
38 if (r)
39 goto exit;
40
41 return 0;
42
43exit:
44 nfc_llc_exit();
45 return r;
46}
47
48void nfc_llc_exit(void)
49{
50 struct nfc_llc_engine *llc_engine, *n;
51
52 list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
53 list_del(&llc_engine->entry);
54 kfree(llc_engine->name);
55 kfree(llc_engine);
56 }
57}
58
59int nfc_llc_register(const char *name, struct nfc_llc_ops *ops)
60{
61 struct nfc_llc_engine *llc_engine;
62
63 llc_engine = kzalloc(sizeof(struct nfc_llc_engine), GFP_KERNEL);
64 if (llc_engine == NULL)
65 return -ENOMEM;
66
67 llc_engine->name = kstrdup(name, GFP_KERNEL);
68 if (llc_engine->name == NULL) {
69 kfree(llc_engine);
70 return -ENOMEM;
71 }
72 llc_engine->ops = ops;
73
74 INIT_LIST_HEAD(&llc_engine->entry);
75 list_add_tail (&llc_engine->entry, &llc_engines);
76
77 return 0;
78}
79
80static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name)
81{
82 struct nfc_llc_engine *llc_engine;
83
84 list_for_each_entry(llc_engine, &llc_engines, entry) {
85 if (strcmp(llc_engine->name, name) == 0)
86 return llc_engine;
87 }
88
89 return NULL;
90}
91
92void nfc_llc_unregister(const char *name)
93{
94 struct nfc_llc_engine *llc_engine;
95
96 llc_engine = nfc_llc_name_to_engine(name);
97 if (llc_engine == NULL)
98 return;
99
100 list_del(&llc_engine->entry);
101 kfree(llc_engine->name);
102 kfree(llc_engine);
103}
104
105struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
106 xmit_to_drv_t xmit_to_drv,
107 rcv_to_hci_t rcv_to_hci, int tx_headroom,
108 int tx_tailroom, llc_failure_t llc_failure)
109{
110 struct nfc_llc_engine *llc_engine;
111 struct nfc_llc *llc;
112
113 llc_engine = nfc_llc_name_to_engine(name);
114 if (llc_engine == NULL)
115 return NULL;
116
117 llc = kzalloc(sizeof(struct nfc_llc), GFP_KERNEL);
118 if (llc == NULL)
119 return NULL;
120
121 llc->data = llc_engine->ops->init(hdev, xmit_to_drv, rcv_to_hci,
122 tx_headroom, tx_tailroom,
123 &llc->rx_headroom, &llc->rx_tailroom,
124 llc_failure);
125 if (llc->data == NULL) {
126 kfree(llc);
127 return NULL;
128 }
129 llc->ops = llc_engine->ops;
130
131 return llc;
132}
133
134void nfc_llc_free(struct nfc_llc *llc)
135{
136 llc->ops->deinit(llc);
137 kfree(llc);
138}
139
140inline void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
141 int *rx_tailroom)
142{
143 *rx_headroom = llc->rx_headroom;
144 *rx_tailroom = llc->rx_tailroom;
145}
146
147inline int nfc_llc_start(struct nfc_llc *llc)
148{
149 return llc->ops->start(llc);
150}
151
152inline int nfc_llc_stop(struct nfc_llc *llc)
153{
154 return llc->ops->stop(llc);
155}
156
157inline void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
158{
159 llc->ops->rcv_from_drv(llc, skb);
160}
161
162inline int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
163{
164 return llc->ops->xmit_from_hci(llc, skb);
165}
166
167inline void *nfc_llc_get_data(struct nfc_llc *llc)
168{
169 return llc->data;
170}
diff --git a/net/nfc/hci/llc.h b/net/nfc/hci/llc.h
new file mode 100644
index 000000000000..7be0b7f3ceb6
--- /dev/null
+++ b/net/nfc/hci/llc.h
@@ -0,0 +1,69 @@
1/*
2 * Link Layer Control manager
3 *
4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef __LOCAL_LLC_H_
22#define __LOCAL_LLC_H_
23
24#include <net/nfc/hci.h>
25#include <net/nfc/llc.h>
26#include <linux/skbuff.h>
27
28struct nfc_llc_ops {
29 void *(*init) (struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
30 rcv_to_hci_t rcv_to_hci, int tx_headroom,
31 int tx_tailroom, int *rx_headroom, int *rx_tailroom,
32 llc_failure_t llc_failure);
33 void (*deinit) (struct nfc_llc *llc);
34 int (*start) (struct nfc_llc *llc);
35 int (*stop) (struct nfc_llc *llc);
36 void (*rcv_from_drv) (struct nfc_llc *llc, struct sk_buff *skb);
37 int (*xmit_from_hci) (struct nfc_llc *llc, struct sk_buff *skb);
38};
39
40struct nfc_llc_engine {
41 const char *name;
42 struct nfc_llc_ops *ops;
43 struct list_head entry;
44};
45
46struct nfc_llc {
47 void *data;
48 struct nfc_llc_ops *ops;
49 int rx_headroom;
50 int rx_tailroom;
51};
52
53void *nfc_llc_get_data(struct nfc_llc *llc);
54
55int nfc_llc_register(const char *name, struct nfc_llc_ops *ops);
56void nfc_llc_unregister(const char *name);
57
58int nfc_llc_nop_register(void);
59
60#if defined(CONFIG_NFC_SHDLC)
61int nfc_llc_shdlc_register(void);
62#else
63static inline int nfc_llc_shdlc_register(void)
64{
65 return 0;
66}
67#endif
68
69#endif /* __LOCAL_LLC_H_ */
diff --git a/net/nfc/hci/llc_nop.c b/net/nfc/hci/llc_nop.c
new file mode 100644
index 000000000000..87b10291b40f
--- /dev/null
+++ b/net/nfc/hci/llc_nop.c
@@ -0,0 +1,99 @@
1/*
2 * nop (passthrough) Link Layer Control
3 *
4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/types.h>
22
23#include "llc.h"
24
25struct llc_nop {
26 struct nfc_hci_dev *hdev;
27 xmit_to_drv_t xmit_to_drv;
28 rcv_to_hci_t rcv_to_hci;
29 int tx_headroom;
30 int tx_tailroom;
31 llc_failure_t llc_failure;
32};
33
34static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
35 rcv_to_hci_t rcv_to_hci, int tx_headroom,
36 int tx_tailroom, int *rx_headroom, int *rx_tailroom,
37 llc_failure_t llc_failure)
38{
39 struct llc_nop *llc_nop;
40
41 *rx_headroom = 0;
42 *rx_tailroom = 0;
43
44 llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL);
45 if (llc_nop == NULL)
46 return NULL;
47
48 llc_nop->hdev = hdev;
49 llc_nop->xmit_to_drv = xmit_to_drv;
50 llc_nop->rcv_to_hci = rcv_to_hci;
51 llc_nop->tx_headroom = tx_headroom;
52 llc_nop->tx_tailroom = tx_tailroom;
53 llc_nop->llc_failure = llc_failure;
54
55 return llc_nop;
56}
57
58static void llc_nop_deinit(struct nfc_llc *llc)
59{
60 kfree(nfc_llc_get_data(llc));
61}
62
63static int llc_nop_start(struct nfc_llc *llc)
64{
65 return 0;
66}
67
68static int llc_nop_stop(struct nfc_llc *llc)
69{
70 return 0;
71}
72
73static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
74{
75 struct llc_nop *llc_nop = nfc_llc_get_data(llc);
76
77 llc_nop->rcv_to_hci(llc_nop->hdev, skb);
78}
79
80static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
81{
82 struct llc_nop *llc_nop = nfc_llc_get_data(llc);
83
84 return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
85}
86
87static struct nfc_llc_ops llc_nop_ops = {
88 .init = llc_nop_init,
89 .deinit = llc_nop_deinit,
90 .start = llc_nop_start,
91 .stop = llc_nop_stop,
92 .rcv_from_drv = llc_nop_rcv_from_drv,
93 .xmit_from_hci = llc_nop_xmit_from_hci,
94};
95
96int nfc_llc_nop_register(void)
97{
98 return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops);
99}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/llc_shdlc.c
index 6f840c18c892..8f69d791dcb3 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * shdlc Link Layer Control
3 *
2 * Copyright (C) 2012 Intel Corporation. All rights reserved. 4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 * 5 *
4 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify it
5 * it under the terms of the GNU General Public License as published by 7 * under the terms and conditions of the GNU General Public License,
6 * the Free Software Foundation; either version 2 of the License, or 8 * version 2, as published by the Free Software Foundation.
7 * (at your option) any later version.
8 * 9 *
9 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -19,18 +20,65 @@
19 20
20#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__ 21#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
21 22
23#include <linux/types.h>
22#include <linux/sched.h> 24#include <linux/sched.h>
23#include <linux/export.h>
24#include <linux/wait.h> 25#include <linux/wait.h>
25#include <linux/crc-ccitt.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28 28
29#include <net/nfc/hci.h> 29#include "llc.h"
30#include <net/nfc/shdlc.h> 30
31enum shdlc_state {
32 SHDLC_DISCONNECTED = 0,
33 SHDLC_CONNECTING = 1,
34 SHDLC_NEGOTIATING = 2,
35 SHDLC_HALF_CONNECTED = 3,
36 SHDLC_CONNECTED = 4
37};
38
39struct llc_shdlc {
40 struct nfc_hci_dev *hdev;
41 xmit_to_drv_t xmit_to_drv;
42 rcv_to_hci_t rcv_to_hci;
43
44 struct mutex state_mutex;
45 enum shdlc_state state;
46 int hard_fault;
47
48 wait_queue_head_t *connect_wq;
49 int connect_tries;
50 int connect_result;
51 struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
52
53 u8 w; /* window size */
54 bool srej_support;
55
56 struct timer_list t1_timer; /* send ack timeout */
57 bool t1_active;
58
59 struct timer_list t2_timer; /* guard/retransmit timeout */
60 bool t2_active;
61
62 int ns; /* next seq num for send */
63 int nr; /* next expected seq num for receive */
64 int dnr; /* oldest sent unacked seq num */
65
66 struct sk_buff_head rcv_q;
67
68 struct sk_buff_head send_q;
69 bool rnr; /* other side is not ready to receive */
70
71 struct sk_buff_head ack_pending_q;
72
73 struct work_struct sm_work;
74
75 int tx_headroom;
76 int tx_tailroom;
77
78 llc_failure_t llc_failure;
79};
31 80
32#define SHDLC_LLC_HEAD_ROOM 2 81#define SHDLC_LLC_HEAD_ROOM 2
33#define SHDLC_LLC_TAIL_ROOM 2
34 82
35#define SHDLC_MAX_WINDOW 4 83#define SHDLC_MAX_WINDOW 4
36#define SHDLC_SREJ_SUPPORT false 84#define SHDLC_SREJ_SUPPORT false
@@ -71,7 +119,7 @@ do { \
71} while (0) 119} while (0)
72 120
73/* checks x < y <= z modulo 8 */ 121/* checks x < y <= z modulo 8 */
74static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z) 122static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
75{ 123{
76 if (x < z) 124 if (x < z)
77 return ((x < y) && (y <= z)) ? true : false; 125 return ((x < y) && (y <= z)) ? true : false;
@@ -80,7 +128,7 @@ static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
80} 128}
81 129
82/* checks x <= y < z modulo 8 */ 130/* checks x <= y < z modulo 8 */
83static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z) 131static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
84{ 132{
85 if (x <= z) 133 if (x <= z)
86 return ((x <= y) && (y < z)) ? true : false; 134 return ((x <= y) && (y < z)) ? true : false;
@@ -88,36 +136,21 @@ static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
88 return ((y >= x) || (y < z)) ? true : false; 136 return ((y >= x) || (y < z)) ? true : false;
89} 137}
90 138
91static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc, 139static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
92 int payload_len) 140 int payload_len)
93{ 141{
94 struct sk_buff *skb; 142 struct sk_buff *skb;
95 143
96 skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM + 144 skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
97 shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM + 145 shdlc->tx_tailroom + payload_len, GFP_KERNEL);
98 payload_len, GFP_KERNEL);
99 if (skb) 146 if (skb)
100 skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM); 147 skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
101 148
102 return skb; 149 return skb;
103} 150}
104 151
105static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
106{
107 u16 crc;
108 int len;
109
110 len = skb->len + 2;
111 *skb_push(skb, 1) = len;
112
113 crc = crc_ccitt(0xffff, skb->data, skb->len);
114 crc = ~crc;
115 *skb_put(skb, 1) = crc & 0xff;
116 *skb_put(skb, 1) = crc >> 8;
117}
118
119/* immediately sends an S frame. */ 152/* immediately sends an S frame. */
120static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc, 153static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
121 enum sframe_type sframe_type, int nr) 154 enum sframe_type sframe_type, int nr)
122{ 155{
123 int r; 156 int r;
@@ -125,15 +158,13 @@ static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
125 158
126 pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr); 159 pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
127 160
128 skb = nfc_shdlc_alloc_skb(shdlc, 0); 161 skb = llc_shdlc_alloc_skb(shdlc, 0);
129 if (skb == NULL) 162 if (skb == NULL)
130 return -ENOMEM; 163 return -ENOMEM;
131 164
132 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr; 165 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
133 166
134 nfc_shdlc_add_len_crc(skb); 167 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
135
136 r = shdlc->ops->xmit(shdlc, skb);
137 168
138 kfree_skb(skb); 169 kfree_skb(skb);
139 170
@@ -141,7 +172,7 @@ static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
141} 172}
142 173
143/* immediately sends an U frame. skb may contain optional payload */ 174/* immediately sends an U frame. skb may contain optional payload */
144static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc, 175static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
145 struct sk_buff *skb, 176 struct sk_buff *skb,
146 enum uframe_modifier uframe_modifier) 177 enum uframe_modifier uframe_modifier)
147{ 178{
@@ -151,9 +182,7 @@ static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
151 182
152 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier; 183 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
153 184
154 nfc_shdlc_add_len_crc(skb); 185 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
155
156 r = shdlc->ops->xmit(shdlc, skb);
157 186
158 kfree_skb(skb); 187 kfree_skb(skb);
159 188
@@ -164,7 +193,7 @@ static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
164 * Free ack_pending frames until y_nr - 1, and reset t2 according to 193 * Free ack_pending frames until y_nr - 1, and reset t2 according to
165 * the remaining oldest ack_pending frame sent time 194 * the remaining oldest ack_pending frame sent time
166 */ 195 */
167static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr) 196static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
168{ 197{
169 struct sk_buff *skb; 198 struct sk_buff *skb;
170 int dnr = shdlc->dnr; /* MUST initially be < y_nr */ 199 int dnr = shdlc->dnr; /* MUST initially be < y_nr */
@@ -204,7 +233,7 @@ static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
204 * Receive validated frames from lower layer. skb contains HCI payload only. 233 * Receive validated frames from lower layer. skb contains HCI payload only.
205 * Handle according to algorithm at spec:10.8.2 234 * Handle according to algorithm at spec:10.8.2
206 */ 235 */
207static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc, 236static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
208 struct sk_buff *skb, int ns, int nr) 237 struct sk_buff *skb, int ns, int nr)
209{ 238{
210 int x_ns = ns; 239 int x_ns = ns;
@@ -216,66 +245,64 @@ static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
216 goto exit; 245 goto exit;
217 246
218 if (x_ns != shdlc->nr) { 247 if (x_ns != shdlc->nr) {
219 nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr); 248 llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
220 goto exit; 249 goto exit;
221 } 250 }
222 251
223 if (shdlc->t1_active == false) { 252 if (shdlc->t1_active == false) {
224 shdlc->t1_active = true; 253 shdlc->t1_active = true;
225 mod_timer(&shdlc->t1_timer, 254 mod_timer(&shdlc->t1_timer, jiffies +
226 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w))); 255 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
227 pr_debug("(re)Start T1(send ack)\n"); 256 pr_debug("(re)Start T1(send ack)\n");
228 } 257 }
229 258
230 if (skb->len) { 259 if (skb->len) {
231 nfc_hci_recv_frame(shdlc->hdev, skb); 260 shdlc->rcv_to_hci(shdlc->hdev, skb);
232 skb = NULL; 261 skb = NULL;
233 } 262 }
234 263
235 shdlc->nr = (shdlc->nr + 1) % 8; 264 shdlc->nr = (shdlc->nr + 1) % 8;
236 265
237 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { 266 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
238 nfc_shdlc_reset_t2(shdlc, y_nr); 267 llc_shdlc_reset_t2(shdlc, y_nr);
239 268
240 shdlc->dnr = y_nr; 269 shdlc->dnr = y_nr;
241 } 270 }
242 271
243exit: 272exit:
244 if (skb) 273 kfree_skb(skb);
245 kfree_skb(skb);
246} 274}
247 275
248static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr) 276static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
249{ 277{
250 pr_debug("remote acked up to frame %d excluded\n", y_nr); 278 pr_debug("remote acked up to frame %d excluded\n", y_nr);
251 279
252 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { 280 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
253 nfc_shdlc_reset_t2(shdlc, y_nr); 281 llc_shdlc_reset_t2(shdlc, y_nr);
254 shdlc->dnr = y_nr; 282 shdlc->dnr = y_nr;
255 } 283 }
256} 284}
257 285
258static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc) 286static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
259{ 287{
260 struct sk_buff *skb; 288 struct sk_buff *skb;
261 289
262 pr_debug("ns reset to %d\n", shdlc->dnr); 290 pr_debug("ns reset to %d\n", shdlc->dnr);
263 291
264 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) { 292 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
265 skb_pull(skb, 2); /* remove len+control */ 293 skb_pull(skb, 1); /* remove control field */
266 skb_trim(skb, skb->len - 2); /* remove crc */
267 skb_queue_head(&shdlc->send_q, skb); 294 skb_queue_head(&shdlc->send_q, skb);
268 } 295 }
269 shdlc->ns = shdlc->dnr; 296 shdlc->ns = shdlc->dnr;
270} 297}
271 298
272static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr) 299static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
273{ 300{
274 struct sk_buff *skb; 301 struct sk_buff *skb;
275 302
276 pr_debug("remote asks retransmition from frame %d\n", y_nr); 303 pr_debug("remote asks retransmition from frame %d\n", y_nr);
277 304
278 if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { 305 if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
279 if (shdlc->t2_active) { 306 if (shdlc->t2_active) {
280 del_timer_sync(&shdlc->t2_timer); 307 del_timer_sync(&shdlc->t2_timer);
281 shdlc->t2_active = false; 308 shdlc->t2_active = false;
@@ -289,12 +316,12 @@ static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
289 } 316 }
290 } 317 }
291 318
292 nfc_shdlc_requeue_ack_pending(shdlc); 319 llc_shdlc_requeue_ack_pending(shdlc);
293 } 320 }
294} 321}
295 322
296/* See spec RR:10.8.3 REJ:10.8.4 */ 323/* See spec RR:10.8.3 REJ:10.8.4 */
297static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc, 324static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
298 enum sframe_type s_frame_type, int nr) 325 enum sframe_type s_frame_type, int nr)
299{ 326{
300 struct sk_buff *skb; 327 struct sk_buff *skb;
@@ -304,21 +331,21 @@ static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
304 331
305 switch (s_frame_type) { 332 switch (s_frame_type) {
306 case S_FRAME_RR: 333 case S_FRAME_RR:
307 nfc_shdlc_rcv_ack(shdlc, nr); 334 llc_shdlc_rcv_ack(shdlc, nr);
308 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */ 335 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
309 shdlc->rnr = false; 336 shdlc->rnr = false;
310 if (shdlc->send_q.qlen == 0) { 337 if (shdlc->send_q.qlen == 0) {
311 skb = nfc_shdlc_alloc_skb(shdlc, 0); 338 skb = llc_shdlc_alloc_skb(shdlc, 0);
312 if (skb) 339 if (skb)
313 skb_queue_tail(&shdlc->send_q, skb); 340 skb_queue_tail(&shdlc->send_q, skb);
314 } 341 }
315 } 342 }
316 break; 343 break;
317 case S_FRAME_REJ: 344 case S_FRAME_REJ:
318 nfc_shdlc_rcv_rej(shdlc, nr); 345 llc_shdlc_rcv_rej(shdlc, nr);
319 break; 346 break;
320 case S_FRAME_RNR: 347 case S_FRAME_RNR:
321 nfc_shdlc_rcv_ack(shdlc, nr); 348 llc_shdlc_rcv_ack(shdlc, nr);
322 shdlc->rnr = true; 349 shdlc->rnr = true;
323 break; 350 break;
324 default: 351 default:
@@ -326,7 +353,7 @@ static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
326 } 353 }
327} 354}
328 355
329static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r) 356static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
330{ 357{
331 pr_debug("result=%d\n", r); 358 pr_debug("result=%d\n", r);
332 359
@@ -337,7 +364,7 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
337 shdlc->nr = 0; 364 shdlc->nr = 0;
338 shdlc->dnr = 0; 365 shdlc->dnr = 0;
339 366
340 shdlc->state = SHDLC_CONNECTED; 367 shdlc->state = SHDLC_HALF_CONNECTED;
341 } else { 368 } else {
342 shdlc->state = SHDLC_DISCONNECTED; 369 shdlc->state = SHDLC_DISCONNECTED;
343 } 370 }
@@ -347,36 +374,36 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
347 wake_up(shdlc->connect_wq); 374 wake_up(shdlc->connect_wq);
348} 375}
349 376
350static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc) 377static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
351{ 378{
352 struct sk_buff *skb; 379 struct sk_buff *skb;
353 380
354 pr_debug("\n"); 381 pr_debug("\n");
355 382
356 skb = nfc_shdlc_alloc_skb(shdlc, 2); 383 skb = llc_shdlc_alloc_skb(shdlc, 2);
357 if (skb == NULL) 384 if (skb == NULL)
358 return -ENOMEM; 385 return -ENOMEM;
359 386
360 *skb_put(skb, 1) = SHDLC_MAX_WINDOW; 387 *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
361 *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0; 388 *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
362 389
363 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET); 390 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
364} 391}
365 392
366static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc) 393static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
367{ 394{
368 struct sk_buff *skb; 395 struct sk_buff *skb;
369 396
370 pr_debug("\n"); 397 pr_debug("\n");
371 398
372 skb = nfc_shdlc_alloc_skb(shdlc, 0); 399 skb = llc_shdlc_alloc_skb(shdlc, 0);
373 if (skb == NULL) 400 if (skb == NULL)
374 return -ENOMEM; 401 return -ENOMEM;
375 402
376 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA); 403 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
377} 404}
378 405
379static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc, 406static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
380 struct sk_buff *skb, 407 struct sk_buff *skb,
381 enum uframe_modifier u_frame_modifier) 408 enum uframe_modifier u_frame_modifier)
382{ 409{
@@ -388,8 +415,13 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
388 415
389 switch (u_frame_modifier) { 416 switch (u_frame_modifier) {
390 case U_FRAME_RSET: 417 case U_FRAME_RSET:
391 if (shdlc->state == SHDLC_NEGOCIATING) { 418 switch (shdlc->state) {
392 /* we sent RSET, but chip wants to negociate */ 419 case SHDLC_NEGOTIATING:
420 case SHDLC_CONNECTING:
421 /*
422 * We sent RSET, but chip wants to negociate or we
423 * got RSET before we managed to send out our.
424 */
393 if (skb->len > 0) 425 if (skb->len > 0)
394 w = skb->data[0]; 426 w = skb->data[0];
395 427
@@ -401,22 +433,34 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
401 (SHDLC_SREJ_SUPPORT || (srej_support == false))) { 433 (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
402 shdlc->w = w; 434 shdlc->w = w;
403 shdlc->srej_support = srej_support; 435 shdlc->srej_support = srej_support;
404 r = nfc_shdlc_connect_send_ua(shdlc); 436 r = llc_shdlc_connect_send_ua(shdlc);
405 nfc_shdlc_connect_complete(shdlc, r); 437 llc_shdlc_connect_complete(shdlc, r);
406 } 438 }
407 } else if (shdlc->state == SHDLC_CONNECTED) { 439 break;
440 case SHDLC_HALF_CONNECTED:
441 /*
442 * Chip resent RSET due to its timeout - Ignote it
443 * as we already sent UA.
444 */
445 break;
446 case SHDLC_CONNECTED:
408 /* 447 /*
409 * Chip wants to reset link. This is unexpected and 448 * Chip wants to reset link. This is unexpected and
410 * unsupported. 449 * unsupported.
411 */ 450 */
412 shdlc->hard_fault = -ECONNRESET; 451 shdlc->hard_fault = -ECONNRESET;
452 break;
453 default:
454 break;
413 } 455 }
414 break; 456 break;
415 case U_FRAME_UA: 457 case U_FRAME_UA:
416 if ((shdlc->state == SHDLC_CONNECTING && 458 if ((shdlc->state == SHDLC_CONNECTING &&
417 shdlc->connect_tries > 0) || 459 shdlc->connect_tries > 0) ||
418 (shdlc->state == SHDLC_NEGOCIATING)) 460 (shdlc->state == SHDLC_NEGOTIATING)) {
419 nfc_shdlc_connect_complete(shdlc, 0); 461 llc_shdlc_connect_complete(shdlc, 0);
462 shdlc->state = SHDLC_CONNECTED;
463 }
420 break; 464 break;
421 default: 465 default:
422 break; 466 break;
@@ -425,7 +469,7 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
425 kfree_skb(skb); 469 kfree_skb(skb);
426} 470}
427 471
428static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc) 472static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
429{ 473{
430 struct sk_buff *skb; 474 struct sk_buff *skb;
431 u8 control; 475 u8 control;
@@ -443,19 +487,25 @@ static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
443 switch (control & SHDLC_CONTROL_HEAD_MASK) { 487 switch (control & SHDLC_CONTROL_HEAD_MASK) {
444 case SHDLC_CONTROL_HEAD_I: 488 case SHDLC_CONTROL_HEAD_I:
445 case SHDLC_CONTROL_HEAD_I2: 489 case SHDLC_CONTROL_HEAD_I2:
490 if (shdlc->state == SHDLC_HALF_CONNECTED)
491 shdlc->state = SHDLC_CONNECTED;
492
446 ns = (control & SHDLC_CONTROL_NS_MASK) >> 3; 493 ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
447 nr = control & SHDLC_CONTROL_NR_MASK; 494 nr = control & SHDLC_CONTROL_NR_MASK;
448 nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr); 495 llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
449 break; 496 break;
450 case SHDLC_CONTROL_HEAD_S: 497 case SHDLC_CONTROL_HEAD_S:
498 if (shdlc->state == SHDLC_HALF_CONNECTED)
499 shdlc->state = SHDLC_CONNECTED;
500
451 s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3; 501 s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
452 nr = control & SHDLC_CONTROL_NR_MASK; 502 nr = control & SHDLC_CONTROL_NR_MASK;
453 nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr); 503 llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
454 kfree_skb(skb); 504 kfree_skb(skb);
455 break; 505 break;
456 case SHDLC_CONTROL_HEAD_U: 506 case SHDLC_CONTROL_HEAD_U:
457 u_frame_modifier = control & SHDLC_CONTROL_M_MASK; 507 u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
458 nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier); 508 llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
459 break; 509 break;
460 default: 510 default:
461 pr_err("UNKNOWN Control=%d\n", control); 511 pr_err("UNKNOWN Control=%d\n", control);
@@ -465,7 +515,7 @@ static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
465 } 515 }
466} 516}
467 517
468static int nfc_shdlc_w_used(int ns, int dnr) 518static int llc_shdlc_w_used(int ns, int dnr)
469{ 519{
470 int unack_count; 520 int unack_count;
471 521
@@ -478,7 +528,7 @@ static int nfc_shdlc_w_used(int ns, int dnr)
478} 528}
479 529
480/* Send frames according to algorithm at spec:10.8.1 */ 530/* Send frames according to algorithm at spec:10.8.1 */
481static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc) 531static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
482{ 532{
483 struct sk_buff *skb; 533 struct sk_buff *skb;
484 int r; 534 int r;
@@ -489,7 +539,7 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
489 ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", 539 ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
490 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, 540 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
491 shdlc->rnr == false ? "false" : "true", 541 shdlc->rnr == false ? "false" : "true",
492 shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr), 542 shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
493 shdlc->ack_pending_q.qlen); 543 shdlc->ack_pending_q.qlen);
494 544
495 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && 545 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
@@ -508,11 +558,9 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
508 558
509 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns, 559 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
510 shdlc->nr); 560 shdlc->nr);
511 /* SHDLC_DUMP_SKB("shdlc frame written", skb); */ 561 SHDLC_DUMP_SKB("shdlc frame written", skb);
512
513 nfc_shdlc_add_len_crc(skb);
514 562
515 r = shdlc->ops->xmit(shdlc, skb); 563 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
516 if (r < 0) { 564 if (r < 0) {
517 shdlc->hard_fault = r; 565 shdlc->hard_fault = r;
518 break; 566 break;
@@ -534,36 +582,36 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
534 } 582 }
535} 583}
536 584
537static void nfc_shdlc_connect_timeout(unsigned long data) 585static void llc_shdlc_connect_timeout(unsigned long data)
538{ 586{
539 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; 587 struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
540 588
541 pr_debug("\n"); 589 pr_debug("\n");
542 590
543 queue_work(shdlc->sm_wq, &shdlc->sm_work); 591 queue_work(system_nrt_wq, &shdlc->sm_work);
544} 592}
545 593
546static void nfc_shdlc_t1_timeout(unsigned long data) 594static void llc_shdlc_t1_timeout(unsigned long data)
547{ 595{
548 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; 596 struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
549 597
550 pr_debug("SoftIRQ: need to send ack\n"); 598 pr_debug("SoftIRQ: need to send ack\n");
551 599
552 queue_work(shdlc->sm_wq, &shdlc->sm_work); 600 queue_work(system_nrt_wq, &shdlc->sm_work);
553} 601}
554 602
555static void nfc_shdlc_t2_timeout(unsigned long data) 603static void llc_shdlc_t2_timeout(unsigned long data)
556{ 604{
557 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data; 605 struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
558 606
559 pr_debug("SoftIRQ: need to retransmit\n"); 607 pr_debug("SoftIRQ: need to retransmit\n");
560 608
561 queue_work(shdlc->sm_wq, &shdlc->sm_work); 609 queue_work(system_nrt_wq, &shdlc->sm_work);
562} 610}
563 611
564static void nfc_shdlc_sm_work(struct work_struct *work) 612static void llc_shdlc_sm_work(struct work_struct *work)
565{ 613{
566 struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work); 614 struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
567 int r; 615 int r;
568 616
569 pr_debug("\n"); 617 pr_debug("\n");
@@ -578,46 +626,47 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
578 break; 626 break;
579 case SHDLC_CONNECTING: 627 case SHDLC_CONNECTING:
580 if (shdlc->hard_fault) { 628 if (shdlc->hard_fault) {
581 nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault); 629 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
582 break; 630 break;
583 } 631 }
584 632
585 if (shdlc->connect_tries++ < 5) 633 if (shdlc->connect_tries++ < 5)
586 r = nfc_shdlc_connect_initiate(shdlc); 634 r = llc_shdlc_connect_initiate(shdlc);
587 else 635 else
588 r = -ETIME; 636 r = -ETIME;
589 if (r < 0) 637 if (r < 0)
590 nfc_shdlc_connect_complete(shdlc, r); 638 llc_shdlc_connect_complete(shdlc, r);
591 else { 639 else {
592 mod_timer(&shdlc->connect_timer, jiffies + 640 mod_timer(&shdlc->connect_timer, jiffies +
593 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); 641 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
594 642
595 shdlc->state = SHDLC_NEGOCIATING; 643 shdlc->state = SHDLC_NEGOTIATING;
596 } 644 }
597 break; 645 break;
598 case SHDLC_NEGOCIATING: 646 case SHDLC_NEGOTIATING:
599 if (timer_pending(&shdlc->connect_timer) == 0) { 647 if (timer_pending(&shdlc->connect_timer) == 0) {
600 shdlc->state = SHDLC_CONNECTING; 648 shdlc->state = SHDLC_CONNECTING;
601 queue_work(shdlc->sm_wq, &shdlc->sm_work); 649 queue_work(system_nrt_wq, &shdlc->sm_work);
602 } 650 }
603 651
604 nfc_shdlc_handle_rcv_queue(shdlc); 652 llc_shdlc_handle_rcv_queue(shdlc);
605 653
606 if (shdlc->hard_fault) { 654 if (shdlc->hard_fault) {
607 nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault); 655 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
608 break; 656 break;
609 } 657 }
610 break; 658 break;
659 case SHDLC_HALF_CONNECTED:
611 case SHDLC_CONNECTED: 660 case SHDLC_CONNECTED:
612 nfc_shdlc_handle_rcv_queue(shdlc); 661 llc_shdlc_handle_rcv_queue(shdlc);
613 nfc_shdlc_handle_send_queue(shdlc); 662 llc_shdlc_handle_send_queue(shdlc);
614 663
615 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) { 664 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
616 pr_debug 665 pr_debug
617 ("Handle T1(send ack) elapsed (T1 now inactive)\n"); 666 ("Handle T1(send ack) elapsed (T1 now inactive)\n");
618 667
619 shdlc->t1_active = false; 668 shdlc->t1_active = false;
620 r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR, 669 r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
621 shdlc->nr); 670 shdlc->nr);
622 if (r < 0) 671 if (r < 0)
623 shdlc->hard_fault = r; 672 shdlc->hard_fault = r;
@@ -629,12 +678,12 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
629 678
630 shdlc->t2_active = false; 679 shdlc->t2_active = false;
631 680
632 nfc_shdlc_requeue_ack_pending(shdlc); 681 llc_shdlc_requeue_ack_pending(shdlc);
633 nfc_shdlc_handle_send_queue(shdlc); 682 llc_shdlc_handle_send_queue(shdlc);
634 } 683 }
635 684
636 if (shdlc->hard_fault) { 685 if (shdlc->hard_fault) {
637 nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault); 686 shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
638 } 687 }
639 break; 688 break;
640 default: 689 default:
@@ -647,7 +696,7 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
647 * Called from syscall context to establish shdlc link. Sleeps until 696 * Called from syscall context to establish shdlc link. Sleeps until
648 * link is ready or failure. 697 * link is ready or failure.
649 */ 698 */
650static int nfc_shdlc_connect(struct nfc_shdlc *shdlc) 699static int llc_shdlc_connect(struct llc_shdlc *shdlc)
651{ 700{
652 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); 701 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
653 702
@@ -662,14 +711,14 @@ static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
662 711
663 mutex_unlock(&shdlc->state_mutex); 712 mutex_unlock(&shdlc->state_mutex);
664 713
665 queue_work(shdlc->sm_wq, &shdlc->sm_work); 714 queue_work(system_nrt_wq, &shdlc->sm_work);
666 715
667 wait_event(connect_wq, shdlc->connect_result != 1); 716 wait_event(connect_wq, shdlc->connect_result != 1);
668 717
669 return shdlc->connect_result; 718 return shdlc->connect_result;
670} 719}
671 720
672static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc) 721static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
673{ 722{
674 pr_debug("\n"); 723 pr_debug("\n");
675 724
@@ -679,7 +728,7 @@ static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
679 728
680 mutex_unlock(&shdlc->state_mutex); 729 mutex_unlock(&shdlc->state_mutex);
681 730
682 queue_work(shdlc->sm_wq, &shdlc->sm_work); 731 queue_work(system_nrt_wq, &shdlc->sm_work);
683} 732}
684 733
685/* 734/*
@@ -687,7 +736,7 @@ static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
687 * skb contains only LLC header and payload. 736 * skb contains only LLC header and payload.
688 * If skb == NULL, it is a notification that the link below is dead. 737 * If skb == NULL, it is a notification that the link below is dead.
689 */ 738 */
690void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb) 739static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
691{ 740{
692 if (skb == NULL) { 741 if (skb == NULL) {
693 pr_err("NULL Frame -> link is dead\n"); 742 pr_err("NULL Frame -> link is dead\n");
@@ -697,176 +746,37 @@ void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
697 skb_queue_tail(&shdlc->rcv_q, skb); 746 skb_queue_tail(&shdlc->rcv_q, skb);
698 } 747 }
699 748
700 queue_work(shdlc->sm_wq, &shdlc->sm_work); 749 queue_work(system_nrt_wq, &shdlc->sm_work);
701}
702EXPORT_SYMBOL(nfc_shdlc_recv_frame);
703
704static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
705{
706 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
707 int r;
708
709 pr_debug("\n");
710
711 if (shdlc->ops->open) {
712 r = shdlc->ops->open(shdlc);
713 if (r < 0)
714 return r;
715 }
716
717 r = nfc_shdlc_connect(shdlc);
718 if (r < 0 && shdlc->ops->close)
719 shdlc->ops->close(shdlc);
720
721 return r;
722}
723
724static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
725{
726 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
727
728 pr_debug("\n");
729
730 nfc_shdlc_disconnect(shdlc);
731
732 if (shdlc->ops->close)
733 shdlc->ops->close(shdlc);
734} 750}
735 751
736static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev) 752static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
753 rcv_to_hci_t rcv_to_hci, int tx_headroom,
754 int tx_tailroom, int *rx_headroom, int *rx_tailroom,
755 llc_failure_t llc_failure)
737{ 756{
738 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev); 757 struct llc_shdlc *shdlc;
739 int r = 0;
740
741 pr_debug("\n");
742 758
743 if (shdlc->ops->hci_ready) 759 *rx_headroom = SHDLC_LLC_HEAD_ROOM;
744 r = shdlc->ops->hci_ready(shdlc); 760 *rx_tailroom = 0;
745
746 return r;
747}
748
749static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
750{
751 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
752
753 SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
754
755 skb_queue_tail(&shdlc->send_q, skb);
756 761
757 queue_work(shdlc->sm_wq, &shdlc->sm_work); 762 shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
758
759 return 0;
760}
761
762static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
763 u32 im_protocols, u32 tm_protocols)
764{
765 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
766
767 pr_debug("\n");
768
769 if (shdlc->ops->start_poll)
770 return shdlc->ops->start_poll(shdlc,
771 im_protocols, tm_protocols);
772
773 return 0;
774}
775
776static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
777 struct nfc_target *target)
778{
779 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
780
781 if (shdlc->ops->target_from_gate)
782 return shdlc->ops->target_from_gate(shdlc, gate, target);
783
784 return -EPERM;
785}
786
787static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
788 u8 gate,
789 struct nfc_target *target)
790{
791 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
792
793 pr_debug("\n");
794
795 if (shdlc->ops->complete_target_discovered)
796 return shdlc->ops->complete_target_discovered(shdlc, gate,
797 target);
798
799 return 0;
800}
801
802static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
803 struct nfc_target *target,
804 struct sk_buff *skb,
805 struct sk_buff **res_skb)
806{
807 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
808
809 if (shdlc->ops->data_exchange)
810 return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
811
812 return -EPERM;
813}
814
815static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
816 struct nfc_target *target)
817{
818 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
819
820 if (shdlc->ops->check_presence)
821 return shdlc->ops->check_presence(shdlc, target);
822
823 return 0;
824}
825
826static struct nfc_hci_ops shdlc_ops = {
827 .open = nfc_shdlc_open,
828 .close = nfc_shdlc_close,
829 .hci_ready = nfc_shdlc_hci_ready,
830 .xmit = nfc_shdlc_xmit,
831 .start_poll = nfc_shdlc_start_poll,
832 .target_from_gate = nfc_shdlc_target_from_gate,
833 .complete_target_discovered = nfc_shdlc_complete_target_discovered,
834 .data_exchange = nfc_shdlc_data_exchange,
835 .check_presence = nfc_shdlc_check_presence,
836};
837
838struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
839 struct nfc_hci_init_data *init_data,
840 u32 protocols,
841 int tx_headroom, int tx_tailroom,
842 int max_link_payload, const char *devname)
843{
844 struct nfc_shdlc *shdlc;
845 int r;
846 char name[32];
847
848 if (ops->xmit == NULL)
849 return NULL;
850
851 shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
852 if (shdlc == NULL) 763 if (shdlc == NULL)
853 return NULL; 764 return NULL;
854 765
855 mutex_init(&shdlc->state_mutex); 766 mutex_init(&shdlc->state_mutex);
856 shdlc->ops = ops;
857 shdlc->state = SHDLC_DISCONNECTED; 767 shdlc->state = SHDLC_DISCONNECTED;
858 768
859 init_timer(&shdlc->connect_timer); 769 init_timer(&shdlc->connect_timer);
860 shdlc->connect_timer.data = (unsigned long)shdlc; 770 shdlc->connect_timer.data = (unsigned long)shdlc;
861 shdlc->connect_timer.function = nfc_shdlc_connect_timeout; 771 shdlc->connect_timer.function = llc_shdlc_connect_timeout;
862 772
863 init_timer(&shdlc->t1_timer); 773 init_timer(&shdlc->t1_timer);
864 shdlc->t1_timer.data = (unsigned long)shdlc; 774 shdlc->t1_timer.data = (unsigned long)shdlc;
865 shdlc->t1_timer.function = nfc_shdlc_t1_timeout; 775 shdlc->t1_timer.function = llc_shdlc_t1_timeout;
866 776
867 init_timer(&shdlc->t2_timer); 777 init_timer(&shdlc->t2_timer);
868 shdlc->t2_timer.data = (unsigned long)shdlc; 778 shdlc->t2_timer.data = (unsigned long)shdlc;
869 shdlc->t2_timer.function = nfc_shdlc_t2_timeout; 779 shdlc->t2_timer.function = llc_shdlc_t2_timeout;
870 780
871 shdlc->w = SHDLC_MAX_WINDOW; 781 shdlc->w = SHDLC_MAX_WINDOW;
872 shdlc->srej_support = SHDLC_SREJ_SUPPORT; 782 shdlc->srej_support = SHDLC_SREJ_SUPPORT;
@@ -875,77 +785,73 @@ struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
875 skb_queue_head_init(&shdlc->send_q); 785 skb_queue_head_init(&shdlc->send_q);
876 skb_queue_head_init(&shdlc->ack_pending_q); 786 skb_queue_head_init(&shdlc->ack_pending_q);
877 787
878 INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work); 788 INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
879 snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
880 shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
881 WQ_MEM_RECLAIM, 1);
882 if (shdlc->sm_wq == NULL)
883 goto err_allocwq;
884 789
885 shdlc->client_headroom = tx_headroom; 790 shdlc->hdev = hdev;
886 shdlc->client_tailroom = tx_tailroom; 791 shdlc->xmit_to_drv = xmit_to_drv;
887 792 shdlc->rcv_to_hci = rcv_to_hci;
888 shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols, 793 shdlc->tx_headroom = tx_headroom;
889 tx_headroom + SHDLC_LLC_HEAD_ROOM, 794 shdlc->tx_tailroom = tx_tailroom;
890 tx_tailroom + SHDLC_LLC_TAIL_ROOM, 795 shdlc->llc_failure = llc_failure;
891 max_link_payload);
892 if (shdlc->hdev == NULL)
893 goto err_allocdev;
894
895 nfc_hci_set_clientdata(shdlc->hdev, shdlc);
896
897 r = nfc_hci_register_device(shdlc->hdev);
898 if (r < 0)
899 goto err_regdev;
900 796
901 return shdlc; 797 return shdlc;
798}
902 799
903err_regdev: 800static void llc_shdlc_deinit(struct nfc_llc *llc)
904 nfc_hci_free_device(shdlc->hdev); 801{
802 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
905 803
906err_allocdev: 804 skb_queue_purge(&shdlc->rcv_q);
907 destroy_workqueue(shdlc->sm_wq); 805 skb_queue_purge(&shdlc->send_q);
806 skb_queue_purge(&shdlc->ack_pending_q);
908 807
909err_allocwq:
910 kfree(shdlc); 808 kfree(shdlc);
911
912 return NULL;
913} 809}
914EXPORT_SYMBOL(nfc_shdlc_allocate);
915 810
916void nfc_shdlc_free(struct nfc_shdlc *shdlc) 811static int llc_shdlc_start(struct nfc_llc *llc)
917{ 812{
918 pr_debug("\n"); 813 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
919 814
920 nfc_hci_unregister_device(shdlc->hdev); 815 return llc_shdlc_connect(shdlc);
921 nfc_hci_free_device(shdlc->hdev); 816}
922 817
923 destroy_workqueue(shdlc->sm_wq); 818static int llc_shdlc_stop(struct nfc_llc *llc)
819{
820 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
924 821
925 skb_queue_purge(&shdlc->rcv_q); 822 llc_shdlc_disconnect(shdlc);
926 skb_queue_purge(&shdlc->send_q);
927 skb_queue_purge(&shdlc->ack_pending_q);
928 823
929 kfree(shdlc); 824 return 0;
930} 825}
931EXPORT_SYMBOL(nfc_shdlc_free);
932 826
933void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata) 827static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
934{ 828{
935 pr_debug("\n"); 829 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
936 830
937 shdlc->clientdata = clientdata; 831 llc_shdlc_recv_frame(shdlc, skb);
938} 832}
939EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
940 833
941void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc) 834static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
942{ 835{
943 return shdlc->clientdata; 836 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
837
838 skb_queue_tail(&shdlc->send_q, skb);
839
840 queue_work(system_nrt_wq, &shdlc->sm_work);
841
842 return 0;
944} 843}
945EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
946 844
947struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc) 845static struct nfc_llc_ops llc_shdlc_ops = {
846 .init = llc_shdlc_init,
847 .deinit = llc_shdlc_deinit,
848 .start = llc_shdlc_start,
849 .stop = llc_shdlc_stop,
850 .rcv_from_drv = llc_shdlc_rcv_from_drv,
851 .xmit_from_hci = llc_shdlc_xmit_from_hci,
852};
853
854int nfc_llc_shdlc_register(void)
948{ 855{
949 return shdlc->hdev; 856 return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
950} 857}
951EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index b982b5b890d7..c45ccd6c094c 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -312,6 +312,8 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
312 312
313 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); 313 skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
314 314
315 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX);
316
315 return nfc_data_exchange(dev, local->target_idx, skb, 317 return nfc_data_exchange(dev, local->target_idx, skb,
316 nfc_llcp_recv, local); 318 nfc_llcp_recv, local);
317} 319}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 82f0f7588b46..c12c5ef3d036 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -56,7 +56,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
56 sk_for_each_safe(sk, node, tmp, &local->sockets.head) { 56 sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
57 llcp_sock = nfc_llcp_sock(sk); 57 llcp_sock = nfc_llcp_sock(sk);
58 58
59 lock_sock(sk); 59 bh_lock_sock(sk);
60 60
61 if (sk->sk_state == LLCP_CONNECTED) 61 if (sk->sk_state == LLCP_CONNECTED)
62 nfc_put_device(llcp_sock->dev); 62 nfc_put_device(llcp_sock->dev);
@@ -68,26 +68,26 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
68 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, 68 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
69 accept_queue) { 69 accept_queue) {
70 accept_sk = &lsk->sk; 70 accept_sk = &lsk->sk;
71 lock_sock(accept_sk); 71 bh_lock_sock(accept_sk);
72 72
73 nfc_llcp_accept_unlink(accept_sk); 73 nfc_llcp_accept_unlink(accept_sk);
74 74
75 accept_sk->sk_state = LLCP_CLOSED; 75 accept_sk->sk_state = LLCP_CLOSED;
76 76
77 release_sock(accept_sk); 77 bh_unlock_sock(accept_sk);
78 78
79 sock_orphan(accept_sk); 79 sock_orphan(accept_sk);
80 } 80 }
81 81
82 if (listen == true) { 82 if (listen == true) {
83 release_sock(sk); 83 bh_unlock_sock(sk);
84 continue; 84 continue;
85 } 85 }
86 } 86 }
87 87
88 sk->sk_state = LLCP_CLOSED; 88 sk->sk_state = LLCP_CLOSED;
89 89
90 release_sock(sk); 90 bh_unlock_sock(sk);
91 91
92 sock_orphan(sk); 92 sock_orphan(sk);
93 93
@@ -114,9 +114,9 @@ static void local_release(struct kref *ref)
114 nfc_llcp_socket_release(local, false); 114 nfc_llcp_socket_release(local, false);
115 del_timer_sync(&local->link_timer); 115 del_timer_sync(&local->link_timer);
116 skb_queue_purge(&local->tx_queue); 116 skb_queue_purge(&local->tx_queue);
117 destroy_workqueue(local->tx_wq); 117 cancel_work_sync(&local->tx_work);
118 destroy_workqueue(local->rx_wq); 118 cancel_work_sync(&local->rx_work);
119 destroy_workqueue(local->timeout_wq); 119 cancel_work_sync(&local->timeout_work);
120 kfree_skb(local->rx_pending); 120 kfree_skb(local->rx_pending);
121 kfree(local); 121 kfree(local);
122} 122}
@@ -181,7 +181,7 @@ static void nfc_llcp_symm_timer(unsigned long data)
181 181
182 pr_err("SYMM timeout\n"); 182 pr_err("SYMM timeout\n");
183 183
184 queue_work(local->timeout_wq, &local->timeout_work); 184 queue_work(system_nrt_wq, &local->timeout_work);
185} 185}
186 186
187struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) 187struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
@@ -426,6 +426,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
426 u8 *miux_tlv, miux_length; 426 u8 *miux_tlv, miux_length;
427 __be16 miux; 427 __be16 miux;
428 u8 gb_len = 0; 428 u8 gb_len = 0;
429 int ret = 0;
429 430
430 version = LLCP_VERSION_11; 431 version = LLCP_VERSION_11;
431 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, 432 version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
@@ -450,8 +451,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
450 gb_len += ARRAY_SIZE(llcp_magic); 451 gb_len += ARRAY_SIZE(llcp_magic);
451 452
452 if (gb_len > NFC_MAX_GT_LEN) { 453 if (gb_len > NFC_MAX_GT_LEN) {
453 kfree(version_tlv); 454 ret = -EINVAL;
454 return -EINVAL; 455 goto out;
455 } 456 }
456 457
457 gb_cur = local->gb; 458 gb_cur = local->gb;
@@ -471,12 +472,15 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
471 memcpy(gb_cur, miux_tlv, miux_length); 472 memcpy(gb_cur, miux_tlv, miux_length);
472 gb_cur += miux_length; 473 gb_cur += miux_length;
473 474
475 local->gb_len = gb_len;
476
477out:
474 kfree(version_tlv); 478 kfree(version_tlv);
475 kfree(lto_tlv); 479 kfree(lto_tlv);
480 kfree(wks_tlv);
481 kfree(miux_tlv);
476 482
477 local->gb_len = gb_len; 483 return ret;
478
479 return 0;
480} 484}
481 485
482u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) 486u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
@@ -554,6 +558,46 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
554 sock->recv_ack_n = (sock->recv_n - 1) % 16; 558 sock->recv_ack_n = (sock->recv_n - 1) % 16;
555} 559}
556 560
561void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
562 struct sk_buff *skb, u8 direction)
563{
564 struct hlist_node *node;
565 struct sk_buff *skb_copy = NULL, *nskb;
566 struct sock *sk;
567 u8 *data;
568
569 read_lock(&local->raw_sockets.lock);
570
571 sk_for_each(sk, node, &local->raw_sockets.head) {
572 if (sk->sk_state != LLCP_BOUND)
573 continue;
574
575 if (skb_copy == NULL) {
576 skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE,
577 GFP_ATOMIC);
578
579 if (skb_copy == NULL)
580 continue;
581
582 data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE);
583
584 data[0] = local->dev ? local->dev->idx : 0xFF;
585 data[1] = direction;
586 }
587
588 nskb = skb_clone(skb_copy, GFP_ATOMIC);
589 if (!nskb)
590 continue;
591
592 if (sock_queue_rcv_skb(sk, nskb))
593 kfree_skb(nskb);
594 }
595
596 read_unlock(&local->raw_sockets.lock);
597
598 kfree_skb(skb_copy);
599}
600
557static void nfc_llcp_tx_work(struct work_struct *work) 601static void nfc_llcp_tx_work(struct work_struct *work)
558{ 602{
559 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, 603 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -574,6 +618,9 @@ static void nfc_llcp_tx_work(struct work_struct *work)
574 DUMP_PREFIX_OFFSET, 16, 1, 618 DUMP_PREFIX_OFFSET, 16, 1,
575 skb->data, skb->len, true); 619 skb->data, skb->len, true);
576 620
621 nfc_llcp_send_to_raw_sock(local, skb,
622 NFC_LLCP_DIRECTION_TX);
623
577 ret = nfc_data_exchange(local->dev, local->target_idx, 624 ret = nfc_data_exchange(local->dev, local->target_idx,
578 skb, nfc_llcp_recv, local); 625 skb, nfc_llcp_recv, local);
579 626
@@ -1018,6 +1065,8 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1018 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET, 1065 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
1019 16, 1, skb->data, skb->len, true); 1066 16, 1, skb->data, skb->len, true);
1020 1067
1068 nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX);
1069
1021 switch (ptype) { 1070 switch (ptype) {
1022 case LLCP_PDU_SYMM: 1071 case LLCP_PDU_SYMM:
1023 pr_debug("SYMM\n"); 1072 pr_debug("SYMM\n");
@@ -1052,7 +1101,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1052 1101
1053 } 1102 }
1054 1103
1055 queue_work(local->tx_wq, &local->tx_work); 1104 queue_work(system_nrt_wq, &local->tx_work);
1056 kfree_skb(local->rx_pending); 1105 kfree_skb(local->rx_pending);
1057 local->rx_pending = NULL; 1106 local->rx_pending = NULL;
1058 1107
@@ -1071,7 +1120,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
1071 1120
1072 local->rx_pending = skb_get(skb); 1121 local->rx_pending = skb_get(skb);
1073 del_timer(&local->link_timer); 1122 del_timer(&local->link_timer);
1074 queue_work(local->rx_wq, &local->rx_work); 1123 queue_work(system_nrt_wq, &local->rx_work);
1075 1124
1076 return; 1125 return;
1077} 1126}
@@ -1086,7 +1135,7 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
1086 1135
1087 local->rx_pending = skb_get(skb); 1136 local->rx_pending = skb_get(skb);
1088 del_timer(&local->link_timer); 1137 del_timer(&local->link_timer);
1089 queue_work(local->rx_wq, &local->rx_work); 1138 queue_work(system_nrt_wq, &local->rx_work);
1090 1139
1091 return 0; 1140 return 0;
1092} 1141}
@@ -1121,7 +1170,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
1121 if (rf_mode == NFC_RF_INITIATOR) { 1170 if (rf_mode == NFC_RF_INITIATOR) {
1122 pr_debug("Queueing Tx work\n"); 1171 pr_debug("Queueing Tx work\n");
1123 1172
1124 queue_work(local->tx_wq, &local->tx_work); 1173 queue_work(system_nrt_wq, &local->tx_work);
1125 } else { 1174 } else {
1126 mod_timer(&local->link_timer, 1175 mod_timer(&local->link_timer,
1127 jiffies + msecs_to_jiffies(local->remote_lto)); 1176 jiffies + msecs_to_jiffies(local->remote_lto));
@@ -1130,10 +1179,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
1130 1179
1131int nfc_llcp_register_device(struct nfc_dev *ndev) 1180int nfc_llcp_register_device(struct nfc_dev *ndev)
1132{ 1181{
1133 struct device *dev = &ndev->dev;
1134 struct nfc_llcp_local *local; 1182 struct nfc_llcp_local *local;
1135 char name[32];
1136 int err;
1137 1183
1138 local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL); 1184 local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
1139 if (local == NULL) 1185 if (local == NULL)
@@ -1149,41 +1195,15 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1149 1195
1150 skb_queue_head_init(&local->tx_queue); 1196 skb_queue_head_init(&local->tx_queue);
1151 INIT_WORK(&local->tx_work, nfc_llcp_tx_work); 1197 INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
1152 snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
1153 local->tx_wq =
1154 alloc_workqueue(name,
1155 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1156 1);
1157 if (local->tx_wq == NULL) {
1158 err = -ENOMEM;
1159 goto err_local;
1160 }
1161 1198
1162 local->rx_pending = NULL; 1199 local->rx_pending = NULL;
1163 INIT_WORK(&local->rx_work, nfc_llcp_rx_work); 1200 INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
1164 snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
1165 local->rx_wq =
1166 alloc_workqueue(name,
1167 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1168 1);
1169 if (local->rx_wq == NULL) {
1170 err = -ENOMEM;
1171 goto err_tx_wq;
1172 }
1173 1201
1174 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work); 1202 INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
1175 snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
1176 local->timeout_wq =
1177 alloc_workqueue(name,
1178 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1179 1);
1180 if (local->timeout_wq == NULL) {
1181 err = -ENOMEM;
1182 goto err_rx_wq;
1183 }
1184 1203
1185 local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock); 1204 rwlock_init(&local->sockets.lock);
1186 local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock); 1205 rwlock_init(&local->connecting_sockets.lock);
1206 rwlock_init(&local->raw_sockets.lock);
1187 1207
1188 nfc_llcp_build_gb(local); 1208 nfc_llcp_build_gb(local);
1189 1209
@@ -1193,17 +1213,6 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1193 list_add(&llcp_devices, &local->list); 1213 list_add(&llcp_devices, &local->list);
1194 1214
1195 return 0; 1215 return 0;
1196
1197err_rx_wq:
1198 destroy_workqueue(local->rx_wq);
1199
1200err_tx_wq:
1201 destroy_workqueue(local->tx_wq);
1202
1203err_local:
1204 kfree(local);
1205
1206 return 0;
1207} 1216}
1208 1217
1209void nfc_llcp_unregister_device(struct nfc_dev *dev) 1218void nfc_llcp_unregister_device(struct nfc_dev *dev)
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 83b8bba5a280..fdb2d24e60bd 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -56,12 +56,9 @@ struct nfc_llcp_local {
56 56
57 struct timer_list link_timer; 57 struct timer_list link_timer;
58 struct sk_buff_head tx_queue; 58 struct sk_buff_head tx_queue;
59 struct workqueue_struct *tx_wq;
60 struct work_struct tx_work; 59 struct work_struct tx_work;
61 struct workqueue_struct *rx_wq;
62 struct work_struct rx_work; 60 struct work_struct rx_work;
63 struct sk_buff *rx_pending; 61 struct sk_buff *rx_pending;
64 struct workqueue_struct *timeout_wq;
65 struct work_struct timeout_work; 62 struct work_struct timeout_work;
66 63
67 u32 target_idx; 64 u32 target_idx;
@@ -89,6 +86,7 @@ struct nfc_llcp_local {
89 /* sockets array */ 86 /* sockets array */
90 struct llcp_sock_list sockets; 87 struct llcp_sock_list sockets;
91 struct llcp_sock_list connecting_sockets; 88 struct llcp_sock_list connecting_sockets;
89 struct llcp_sock_list raw_sockets;
92}; 90};
93 91
94struct nfc_llcp_sock { 92struct nfc_llcp_sock {
@@ -187,6 +185,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
187u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local); 185u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
188void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap); 186void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
189int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock); 187int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
188void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
189 struct sk_buff *skb, u8 direction);
190 190
191/* Sock API */ 191/* Sock API */
192struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp); 192struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index ddeb9aa398f0..40f056debf9a 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -142,6 +142,60 @@ error:
142 return ret; 142 return ret;
143} 143}
144 144
145static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
146 int alen)
147{
148 struct sock *sk = sock->sk;
149 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
150 struct nfc_llcp_local *local;
151 struct nfc_dev *dev;
152 struct sockaddr_nfc_llcp llcp_addr;
153 int len, ret = 0;
154
155 if (!addr || addr->sa_family != AF_NFC)
156 return -EINVAL;
157
158 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
159
160 memset(&llcp_addr, 0, sizeof(llcp_addr));
161 len = min_t(unsigned int, sizeof(llcp_addr), alen);
162 memcpy(&llcp_addr, addr, len);
163
164 lock_sock(sk);
165
166 if (sk->sk_state != LLCP_CLOSED) {
167 ret = -EBADFD;
168 goto error;
169 }
170
171 dev = nfc_get_device(llcp_addr.dev_idx);
172 if (dev == NULL) {
173 ret = -ENODEV;
174 goto error;
175 }
176
177 local = nfc_llcp_find_local(dev);
178 if (local == NULL) {
179 ret = -ENODEV;
180 goto put_dev;
181 }
182
183 llcp_sock->dev = dev;
184 llcp_sock->local = nfc_llcp_local_get(local);
185 llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
186
187 nfc_llcp_sock_link(&local->raw_sockets, sk);
188
189 sk->sk_state = LLCP_BOUND;
190
191put_dev:
192 nfc_put_device(dev);
193
194error:
195 release_sock(sk);
196 return ret;
197}
198
145static int llcp_sock_listen(struct socket *sock, int backlog) 199static int llcp_sock_listen(struct socket *sock, int backlog)
146{ 200{
147 struct sock *sk = sock->sk; 201 struct sock *sk = sock->sk;
@@ -300,9 +354,6 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
300 pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx, 354 pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
301 llcp_sock->dsap, llcp_sock->ssap); 355 llcp_sock->dsap, llcp_sock->ssap);
302 356
303 if (llcp_sock == NULL || llcp_sock->dev == NULL)
304 return -EBADFD;
305
306 uaddr->sa_family = AF_NFC; 357 uaddr->sa_family = AF_NFC;
307 358
308 *len = sizeof(struct sockaddr_nfc_llcp); 359 *len = sizeof(struct sockaddr_nfc_llcp);
@@ -421,7 +472,10 @@ static int llcp_sock_release(struct socket *sock)
421 472
422 release_sock(sk); 473 release_sock(sk);
423 474
424 nfc_llcp_sock_unlink(&local->sockets, sk); 475 if (sock->type == SOCK_RAW)
476 nfc_llcp_sock_unlink(&local->raw_sockets, sk);
477 else
478 nfc_llcp_sock_unlink(&local->sockets, sk);
425 479
426out: 480out:
427 sock_orphan(sk); 481 sock_orphan(sk);
@@ -617,7 +671,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
617 if (!(flags & MSG_PEEK)) { 671 if (!(flags & MSG_PEEK)) {
618 672
619 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 673 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
620 if (sk->sk_type == SOCK_STREAM) { 674 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) {
621 skb_pull(skb, copied); 675 skb_pull(skb, copied);
622 if (skb->len) { 676 if (skb->len) {
623 skb_queue_head(&sk->sk_receive_queue, skb); 677 skb_queue_head(&sk->sk_receive_queue, skb);
@@ -658,6 +712,26 @@ static const struct proto_ops llcp_sock_ops = {
658 .mmap = sock_no_mmap, 712 .mmap = sock_no_mmap,
659}; 713};
660 714
715static const struct proto_ops llcp_rawsock_ops = {
716 .family = PF_NFC,
717 .owner = THIS_MODULE,
718 .bind = llcp_raw_sock_bind,
719 .connect = sock_no_connect,
720 .release = llcp_sock_release,
721 .socketpair = sock_no_socketpair,
722 .accept = sock_no_accept,
723 .getname = llcp_sock_getname,
724 .poll = llcp_sock_poll,
725 .ioctl = sock_no_ioctl,
726 .listen = sock_no_listen,
727 .shutdown = sock_no_shutdown,
728 .setsockopt = sock_no_setsockopt,
729 .getsockopt = sock_no_getsockopt,
730 .sendmsg = sock_no_sendmsg,
731 .recvmsg = llcp_sock_recvmsg,
732 .mmap = sock_no_mmap,
733};
734
661static void llcp_sock_destruct(struct sock *sk) 735static void llcp_sock_destruct(struct sock *sk)
662{ 736{
663 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 737 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -735,10 +809,15 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
735 809
736 pr_debug("%p\n", sock); 810 pr_debug("%p\n", sock);
737 811
738 if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM) 812 if (sock->type != SOCK_STREAM &&
813 sock->type != SOCK_DGRAM &&
814 sock->type != SOCK_RAW)
739 return -ESOCKTNOSUPPORT; 815 return -ESOCKTNOSUPPORT;
740 816
741 sock->ops = &llcp_sock_ops; 817 if (sock->type == SOCK_RAW)
818 sock->ops = &llcp_rawsock_ops;
819 else
820 sock->ops = &llcp_sock_ops;
742 821
743 sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC); 822 sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
744 if (sk == NULL) 823 if (sk == NULL)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index f81efe13985a..acf9abb7d99b 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -176,6 +176,27 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
176 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd); 176 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
177} 177}
178 178
179struct nci_set_config_param {
180 __u8 id;
181 size_t len;
182 __u8 *val;
183};
184
185static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
186{
187 struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
188 struct nci_core_set_config_cmd cmd;
189
190 BUG_ON(param->len > NCI_MAX_PARAM_LEN);
191
192 cmd.num_params = 1;
193 cmd.param.id = param->id;
194 cmd.param.len = param->len;
195 memcpy(cmd.param.val, param->val, param->len);
196
197 nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
198}
199
179static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) 200static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
180{ 201{
181 struct nci_rf_disc_cmd cmd; 202 struct nci_rf_disc_cmd cmd;
@@ -388,6 +409,32 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
388 return nci_close_device(ndev); 409 return nci_close_device(ndev);
389} 410}
390 411
412static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
413{
414 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
415 struct nci_set_config_param param;
416 __u8 local_gb[NFC_MAX_GT_LEN];
417 int i, rc = 0;
418
419 param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
420 if ((param.val == NULL) || (param.len == 0))
421 return rc;
422
423 if (param.len > NCI_MAX_PARAM_LEN)
424 return -EINVAL;
425
426 for (i = 0; i < param.len; i++)
427 local_gb[param.len-1-i] = param.val[i];
428
429 param.id = NCI_PN_ATR_REQ_GEN_BYTES;
430 param.val = local_gb;
431
432 rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
433 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
434
435 return rc;
436}
437
391static int nci_start_poll(struct nfc_dev *nfc_dev, 438static int nci_start_poll(struct nfc_dev *nfc_dev,
392 __u32 im_protocols, __u32 tm_protocols) 439 __u32 im_protocols, __u32 tm_protocols)
393{ 440{
@@ -415,6 +462,14 @@ static int nci_start_poll(struct nfc_dev *nfc_dev,
415 return -EBUSY; 462 return -EBUSY;
416 } 463 }
417 464
465 if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
466 rc = nci_set_local_general_bytes(nfc_dev);
467 if (rc) {
468 pr_err("failed to set local general bytes\n");
469 return rc;
470 }
471 }
472
418 rc = nci_request(ndev, nci_rf_discover_req, im_protocols, 473 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
419 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 474 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
420 475
@@ -509,7 +564,7 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
509{ 564{
510 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 565 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
511 566
512 pr_debug("target_idx %d\n", target->idx); 567 pr_debug("entry\n");
513 568
514 if (!ndev->target_active_prot) { 569 if (!ndev->target_active_prot) {
515 pr_err("unable to deactivate target, no active target\n"); 570 pr_err("unable to deactivate target, no active target\n");
@@ -524,6 +579,38 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
524 } 579 }
525} 580}
526 581
582
583static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
584 __u8 comm_mode, __u8 *gb, size_t gb_len)
585{
586 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
587 int rc;
588
589 pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
590
591 rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
592 if (rc)
593 return rc;
594
595 rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
596 ndev->remote_gb_len);
597 if (!rc)
598 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
599 NFC_RF_INITIATOR);
600
601 return rc;
602}
603
604static int nci_dep_link_down(struct nfc_dev *nfc_dev)
605{
606 pr_debug("entry\n");
607
608 nci_deactivate_target(nfc_dev, NULL);
609
610 return 0;
611}
612
613
527static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, 614static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
528 struct sk_buff *skb, 615 struct sk_buff *skb,
529 data_exchange_cb_t cb, void *cb_context) 616 data_exchange_cb_t cb, void *cb_context)
@@ -557,6 +644,8 @@ static struct nfc_ops nci_nfc_ops = {
557 .dev_down = nci_dev_down, 644 .dev_down = nci_dev_down,
558 .start_poll = nci_start_poll, 645 .start_poll = nci_start_poll,
559 .stop_poll = nci_stop_poll, 646 .stop_poll = nci_stop_poll,
647 .dep_link_up = nci_dep_link_up,
648 .dep_link_down = nci_dep_link_down,
560 .activate_target = nci_activate_target, 649 .activate_target = nci_activate_target,
561 .deactivate_target = nci_deactivate_target, 650 .deactivate_target = nci_deactivate_target,
562 .im_transceive = nci_transceive, 651 .im_transceive = nci_transceive,
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index af7a93b04393..b2aa98ef0927 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -176,6 +176,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
176 protocol = NFC_PROTO_ISO14443_B_MASK; 176 protocol = NFC_PROTO_ISO14443_B_MASK;
177 else if (rf_protocol == NCI_RF_PROTOCOL_T3T) 177 else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
178 protocol = NFC_PROTO_FELICA_MASK; 178 protocol = NFC_PROTO_FELICA_MASK;
179 else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP)
180 protocol = NFC_PROTO_NFC_DEP_MASK;
179 else 181 else
180 protocol = 0; 182 protocol = 0;
181 183
@@ -361,6 +363,33 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
361 return NCI_STATUS_OK; 363 return NCI_STATUS_OK;
362} 364}
363 365
366static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
367 struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
368{
369 struct activation_params_poll_nfc_dep *poll;
370 int i;
371
372 switch (ntf->activation_rf_tech_and_mode) {
373 case NCI_NFC_A_PASSIVE_POLL_MODE:
374 case NCI_NFC_F_PASSIVE_POLL_MODE:
375 poll = &ntf->activation_params.poll_nfc_dep;
376 poll->atr_res_len = min_t(__u8, *data++, 63);
377 pr_debug("atr_res_len %d\n", poll->atr_res_len);
378 if (poll->atr_res_len > 0) {
379 for (i = 0; i < poll->atr_res_len; i++)
380 poll->atr_res[poll->atr_res_len-1-i] = data[i];
381 }
382 break;
383
384 default:
385 pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
386 ntf->activation_rf_tech_and_mode);
387 return NCI_STATUS_RF_PROTOCOL_ERROR;
388 }
389
390 return NCI_STATUS_OK;
391}
392
364static void nci_target_auto_activated(struct nci_dev *ndev, 393static void nci_target_auto_activated(struct nci_dev *ndev,
365 struct nci_rf_intf_activated_ntf *ntf) 394 struct nci_rf_intf_activated_ntf *ntf)
366{ 395{
@@ -454,6 +483,11 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
454 &ntf, data); 483 &ntf, data);
455 break; 484 break;
456 485
486 case NCI_RF_INTERFACE_NFC_DEP:
487 err = nci_extract_activation_params_nfc_dep(ndev,
488 &ntf, data);
489 break;
490
457 case NCI_RF_INTERFACE_FRAME: 491 case NCI_RF_INTERFACE_FRAME:
458 /* no activation params */ 492 /* no activation params */
459 break; 493 break;
@@ -473,6 +507,24 @@ exit:
473 507
474 /* set the available credits to initial value */ 508 /* set the available credits to initial value */
475 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); 509 atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
510
511 /* store general bytes to be reported later in dep_link_up */
512 if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
513 ndev->remote_gb_len = 0;
514
515 if (ntf.activation_params_len > 0) {
516 /* ATR_RES general bytes at offset 15 */
517 ndev->remote_gb_len = min_t(__u8,
518 (ntf.activation_params
519 .poll_nfc_dep.atr_res_len
520 - NFC_ATR_RES_GT_OFFSET),
521 NFC_MAX_GT_LEN);
522 memcpy(ndev->remote_gb,
523 (ntf.activation_params.poll_nfc_dep
524 .atr_res + NFC_ATR_RES_GT_OFFSET),
525 ndev->remote_gb_len);
526 }
527 }
476 } 528 }
477 529
478 if (atomic_read(&ndev->state) == NCI_DISCOVERY) { 530 if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 3003c3390e49..dd072f38ad00 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -119,6 +119,16 @@ exit:
119 nci_req_complete(ndev, rsp_1->status); 119 nci_req_complete(ndev, rsp_1->status);
120} 120}
121 121
122static void nci_core_set_config_rsp_packet(struct nci_dev *ndev,
123 struct sk_buff *skb)
124{
125 struct nci_core_set_config_rsp *rsp = (void *) skb->data;
126
127 pr_debug("status 0x%x\n", rsp->status);
128
129 nci_req_complete(ndev, rsp->status);
130}
131
122static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, 132static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
123 struct sk_buff *skb) 133 struct sk_buff *skb)
124{ 134{
@@ -194,6 +204,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
194 nci_core_init_rsp_packet(ndev, skb); 204 nci_core_init_rsp_packet(ndev, skb);
195 break; 205 break;
196 206
207 case NCI_OP_CORE_SET_CONFIG_RSP:
208 nci_core_set_config_rsp_packet(ndev, skb);
209 break;
210
197 case NCI_OP_RF_DISCOVER_MAP_RSP: 211 case NCI_OP_RF_DISCOVER_MAP_RSP:
198 nci_rf_disc_map_rsp_packet(ndev, skb); 212 nci_rf_disc_map_rsp_packet(ndev, skb);
199 break; 213 break;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 4c51714ee741..c1b5285cbde7 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -58,7 +58,7 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
58{ 58{
59 void *hdr; 59 void *hdr;
60 60
61 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 61 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
62 &nfc_genl_family, flags, NFC_CMD_GET_TARGET); 62 &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
63 if (!hdr) 63 if (!hdr)
64 return -EMSGSIZE; 64 return -EMSGSIZE;
@@ -165,7 +165,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
165 struct sk_buff *msg; 165 struct sk_buff *msg;
166 void *hdr; 166 void *hdr;
167 167
168 dev->genl_data.poll_req_pid = 0; 168 dev->genl_data.poll_req_portid = 0;
169 169
170 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 170 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
171 if (!msg) 171 if (!msg)
@@ -347,13 +347,13 @@ free_msg:
347} 347}
348 348
349static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 349static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
350 u32 pid, u32 seq, 350 u32 portid, u32 seq,
351 struct netlink_callback *cb, 351 struct netlink_callback *cb,
352 int flags) 352 int flags)
353{ 353{
354 void *hdr; 354 void *hdr;
355 355
356 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags, 356 hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
357 NFC_CMD_GET_DEVICE); 357 NFC_CMD_GET_DEVICE);
358 if (!hdr) 358 if (!hdr)
359 return -EMSGSIZE; 359 return -EMSGSIZE;
@@ -401,7 +401,7 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
401 while (dev) { 401 while (dev) {
402 int rc; 402 int rc;
403 403
404 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid, 404 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid,
405 cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); 405 cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
406 if (rc < 0) 406 if (rc < 0)
407 break; 407 break;
@@ -520,7 +520,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
520 goto out_putdev; 520 goto out_putdev;
521 } 521 }
522 522
523 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq, 523 rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq,
524 NULL, 0); 524 NULL, 0);
525 if (rc < 0) 525 if (rc < 0)
526 goto out_free; 526 goto out_free;
@@ -611,7 +611,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
611 611
612 rc = nfc_start_poll(dev, im_protocols, tm_protocols); 612 rc = nfc_start_poll(dev, im_protocols, tm_protocols);
613 if (!rc) 613 if (!rc)
614 dev->genl_data.poll_req_pid = info->snd_pid; 614 dev->genl_data.poll_req_portid = info->snd_portid;
615 615
616 mutex_unlock(&dev->genl_data.genl_data_mutex); 616 mutex_unlock(&dev->genl_data.genl_data_mutex);
617 617
@@ -645,13 +645,13 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
645 645
646 mutex_lock(&dev->genl_data.genl_data_mutex); 646 mutex_lock(&dev->genl_data.genl_data_mutex);
647 647
648 if (dev->genl_data.poll_req_pid != info->snd_pid) { 648 if (dev->genl_data.poll_req_portid != info->snd_portid) {
649 rc = -EBUSY; 649 rc = -EBUSY;
650 goto out; 650 goto out;
651 } 651 }
652 652
653 rc = nfc_stop_poll(dev); 653 rc = nfc_stop_poll(dev);
654 dev->genl_data.poll_req_pid = 0; 654 dev->genl_data.poll_req_portid = 0;
655 655
656out: 656out:
657 mutex_unlock(&dev->genl_data.genl_data_mutex); 657 mutex_unlock(&dev->genl_data.genl_data_mutex);
@@ -761,38 +761,70 @@ static struct genl_ops nfc_genl_ops[] = {
761 }, 761 },
762}; 762};
763 763
764static int nfc_genl_rcv_nl_event(struct notifier_block *this, 764
765 unsigned long event, void *ptr) 765struct urelease_work {
766 struct work_struct w;
767 int portid;
768};
769
770static void nfc_urelease_event_work(struct work_struct *work)
766{ 771{
767 struct netlink_notify *n = ptr; 772 struct urelease_work *w = container_of(work, struct urelease_work, w);
768 struct class_dev_iter iter; 773 struct class_dev_iter iter;
769 struct nfc_dev *dev; 774 struct nfc_dev *dev;
770 775
771 if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) 776 pr_debug("portid %d\n", w->portid);
772 goto out;
773 777
774 pr_debug("NETLINK_URELEASE event from id %d\n", n->pid); 778 mutex_lock(&nfc_devlist_mutex);
775 779
776 nfc_device_iter_init(&iter); 780 nfc_device_iter_init(&iter);
777 dev = nfc_device_iter_next(&iter); 781 dev = nfc_device_iter_next(&iter);
778 782
779 while (dev) { 783 while (dev) {
780 if (dev->genl_data.poll_req_pid == n->pid) { 784 mutex_lock(&dev->genl_data.genl_data_mutex);
785
786 if (dev->genl_data.poll_req_portid == w->portid) {
781 nfc_stop_poll(dev); 787 nfc_stop_poll(dev);
782 dev->genl_data.poll_req_pid = 0; 788 dev->genl_data.poll_req_portid = 0;
783 } 789 }
790
791 mutex_unlock(&dev->genl_data.genl_data_mutex);
792
784 dev = nfc_device_iter_next(&iter); 793 dev = nfc_device_iter_next(&iter);
785 } 794 }
786 795
787 nfc_device_iter_exit(&iter); 796 nfc_device_iter_exit(&iter);
788 797
798 mutex_unlock(&nfc_devlist_mutex);
799
800 kfree(w);
801}
802
803static int nfc_genl_rcv_nl_event(struct notifier_block *this,
804 unsigned long event, void *ptr)
805{
806 struct netlink_notify *n = ptr;
807 struct urelease_work *w;
808
809 if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
810 goto out;
811
812 pr_debug("NETLINK_URELEASE event from id %d\n", n->portid);
813
814 w = kmalloc(sizeof(*w), GFP_ATOMIC);
815 if (w) {
816 INIT_WORK((struct work_struct *) w, nfc_urelease_event_work);
817 w->portid = n->portid;
818 schedule_work((struct work_struct *) w);
819 }
820
789out: 821out:
790 return NOTIFY_DONE; 822 return NOTIFY_DONE;
791} 823}
792 824
793void nfc_genl_data_init(struct nfc_genl_data *genl_data) 825void nfc_genl_data_init(struct nfc_genl_data *genl_data)
794{ 826{
795 genl_data->poll_req_pid = 0; 827 genl_data->poll_req_portid = 0;
796 mutex_init(&genl_data->genl_data_mutex); 828 mutex_init(&genl_data->genl_data_mutex);
797} 829}
798 830
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 954405ceae9e..08114478cb85 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -266,7 +266,7 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
266 if (unlikely(!skb)) 266 if (unlikely(!skb))
267 return -ENOMEM; 267 return -ENOMEM;
268 268
269 vport = rcu_dereference(dp->ports[out_port]); 269 vport = ovs_vport_rcu(dp, out_port);
270 if (unlikely(!vport)) { 270 if (unlikely(!vport)) {
271 kfree_skb(skb); 271 kfree_skb(skb);
272 return -ENODEV; 272 return -ENODEV;
@@ -286,7 +286,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
286 upcall.cmd = OVS_PACKET_CMD_ACTION; 286 upcall.cmd = OVS_PACKET_CMD_ACTION;
287 upcall.key = &OVS_CB(skb)->flow->key; 287 upcall.key = &OVS_CB(skb)->flow->key;
288 upcall.userdata = NULL; 288 upcall.userdata = NULL;
289 upcall.pid = 0; 289 upcall.portid = 0;
290 290
291 for (a = nla_data(attr), rem = nla_len(attr); rem > 0; 291 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
292 a = nla_next(a, &rem)) { 292 a = nla_next(a, &rem)) {
@@ -296,7 +296,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
296 break; 296 break;
297 297
298 case OVS_USERSPACE_ATTR_PID: 298 case OVS_USERSPACE_ATTR_PID:
299 upcall.pid = nla_get_u32(a); 299 upcall.portid = nla_get_u32(a);
300 break; 300 break;
301 } 301 }
302 } 302 }
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index cf58cedad083..4c4b62ccc7d7 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -49,12 +49,29 @@
49#include <linux/dmi.h> 49#include <linux/dmi.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <net/genetlink.h> 51#include <net/genetlink.h>
52#include <net/net_namespace.h>
53#include <net/netns/generic.h>
52 54
53#include "datapath.h" 55#include "datapath.h"
54#include "flow.h" 56#include "flow.h"
55#include "vport-internal_dev.h" 57#include "vport-internal_dev.h"
56 58
57/** 59/**
60 * struct ovs_net - Per net-namespace data for ovs.
61 * @dps: List of datapaths to enable dumping them all out.
62 * Protected by genl_mutex.
63 */
64struct ovs_net {
65 struct list_head dps;
66};
67
68static int ovs_net_id __read_mostly;
69
70#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
71static void rehash_flow_table(struct work_struct *work);
72static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
73
74/**
58 * DOC: Locking: 75 * DOC: Locking:
59 * 76 *
60 * Writes to device state (add/remove datapath, port, set operations on vports, 77 * Writes to device state (add/remove datapath, port, set operations on vports,
@@ -71,29 +88,21 @@
71 * each other. 88 * each other.
72 */ 89 */
73 90
74/* Global list of datapaths to enable dumping them all out.
75 * Protected by genl_mutex.
76 */
77static LIST_HEAD(dps);
78
79#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
80static void rehash_flow_table(struct work_struct *work);
81static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
82
83static struct vport *new_vport(const struct vport_parms *); 91static struct vport *new_vport(const struct vport_parms *);
84static int queue_gso_packets(int dp_ifindex, struct sk_buff *, 92static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
85 const struct dp_upcall_info *); 93 const struct dp_upcall_info *);
86static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, 94static int queue_userspace_packet(struct net *, int dp_ifindex,
95 struct sk_buff *,
87 const struct dp_upcall_info *); 96 const struct dp_upcall_info *);
88 97
89/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ 98/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
90static struct datapath *get_dp(int dp_ifindex) 99static struct datapath *get_dp(struct net *net, int dp_ifindex)
91{ 100{
92 struct datapath *dp = NULL; 101 struct datapath *dp = NULL;
93 struct net_device *dev; 102 struct net_device *dev;
94 103
95 rcu_read_lock(); 104 rcu_read_lock();
96 dev = dev_get_by_index_rcu(&init_net, dp_ifindex); 105 dev = dev_get_by_index_rcu(net, dp_ifindex);
97 if (dev) { 106 if (dev) {
98 struct vport *vport = ovs_internal_dev_get_vport(dev); 107 struct vport *vport = ovs_internal_dev_get_vport(dev);
99 if (vport) 108 if (vport)
@@ -107,7 +116,7 @@ static struct datapath *get_dp(int dp_ifindex)
107/* Must be called with rcu_read_lock or RTNL lock. */ 116/* Must be called with rcu_read_lock or RTNL lock. */
108const char *ovs_dp_name(const struct datapath *dp) 117const char *ovs_dp_name(const struct datapath *dp)
109{ 118{
110 struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]); 119 struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
111 return vport->ops->get_name(vport); 120 return vport->ops->get_name(vport);
112} 121}
113 122
@@ -118,7 +127,7 @@ static int get_dpifindex(struct datapath *dp)
118 127
119 rcu_read_lock(); 128 rcu_read_lock();
120 129
121 local = rcu_dereference(dp->ports[OVSP_LOCAL]); 130 local = ovs_vport_rcu(dp, OVSP_LOCAL);
122 if (local) 131 if (local)
123 ifindex = local->ops->get_ifindex(local); 132 ifindex = local->ops->get_ifindex(local);
124 else 133 else
@@ -135,9 +144,31 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
135 144
136 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); 145 ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
137 free_percpu(dp->stats_percpu); 146 free_percpu(dp->stats_percpu);
147 release_net(ovs_dp_get_net(dp));
148 kfree(dp->ports);
138 kfree(dp); 149 kfree(dp);
139} 150}
140 151
152static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
153 u16 port_no)
154{
155 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
156}
157
158struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
159{
160 struct vport *vport;
161 struct hlist_node *n;
162 struct hlist_head *head;
163
164 head = vport_hash_bucket(dp, port_no);
165 hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
166 if (vport->port_no == port_no)
167 return vport;
168 }
169 return NULL;
170}
171
141/* Called with RTNL lock and genl_lock. */ 172/* Called with RTNL lock and genl_lock. */
142static struct vport *new_vport(const struct vport_parms *parms) 173static struct vport *new_vport(const struct vport_parms *parms)
143{ 174{
@@ -146,9 +177,9 @@ static struct vport *new_vport(const struct vport_parms *parms)
146 vport = ovs_vport_add(parms); 177 vport = ovs_vport_add(parms);
147 if (!IS_ERR(vport)) { 178 if (!IS_ERR(vport)) {
148 struct datapath *dp = parms->dp; 179 struct datapath *dp = parms->dp;
180 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
149 181
150 rcu_assign_pointer(dp->ports[parms->port_no], vport); 182 hlist_add_head_rcu(&vport->dp_hash_node, head);
151 list_add(&vport->node, &dp->port_list);
152 } 183 }
153 184
154 return vport; 185 return vport;
@@ -160,8 +191,7 @@ void ovs_dp_detach_port(struct vport *p)
160 ASSERT_RTNL(); 191 ASSERT_RTNL();
161 192
162 /* First drop references to device. */ 193 /* First drop references to device. */
163 list_del(&p->node); 194 hlist_del_rcu(&p->dp_hash_node);
164 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
165 195
166 /* Then destroy it. */ 196 /* Then destroy it. */
167 ovs_vport_del(p); 197 ovs_vport_del(p);
@@ -195,7 +225,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
195 upcall.cmd = OVS_PACKET_CMD_MISS; 225 upcall.cmd = OVS_PACKET_CMD_MISS;
196 upcall.key = &key; 226 upcall.key = &key;
197 upcall.userdata = NULL; 227 upcall.userdata = NULL;
198 upcall.pid = p->upcall_pid; 228 upcall.portid = p->upcall_portid;
199 ovs_dp_upcall(dp, skb, &upcall); 229 ovs_dp_upcall(dp, skb, &upcall);
200 consume_skb(skb); 230 consume_skb(skb);
201 stats_counter = &stats->n_missed; 231 stats_counter = &stats->n_missed;
@@ -220,17 +250,18 @@ static struct genl_family dp_packet_genl_family = {
220 .hdrsize = sizeof(struct ovs_header), 250 .hdrsize = sizeof(struct ovs_header),
221 .name = OVS_PACKET_FAMILY, 251 .name = OVS_PACKET_FAMILY,
222 .version = OVS_PACKET_VERSION, 252 .version = OVS_PACKET_VERSION,
223 .maxattr = OVS_PACKET_ATTR_MAX 253 .maxattr = OVS_PACKET_ATTR_MAX,
254 .netnsok = true
224}; 255};
225 256
226int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 257int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
227 const struct dp_upcall_info *upcall_info) 258 const struct dp_upcall_info *upcall_info)
228{ 259{
229 struct dp_stats_percpu *stats; 260 struct dp_stats_percpu *stats;
230 int dp_ifindex; 261 int dp_ifindex;
231 int err; 262 int err;
232 263
233 if (upcall_info->pid == 0) { 264 if (upcall_info->portid == 0) {
234 err = -ENOTCONN; 265 err = -ENOTCONN;
235 goto err; 266 goto err;
236 } 267 }
@@ -242,9 +273,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
242 } 273 }
243 274
244 if (!skb_is_gso(skb)) 275 if (!skb_is_gso(skb))
245 err = queue_userspace_packet(dp_ifindex, skb, upcall_info); 276 err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
246 else 277 else
247 err = queue_gso_packets(dp_ifindex, skb, upcall_info); 278 err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
248 if (err) 279 if (err)
249 goto err; 280 goto err;
250 281
@@ -260,7 +291,8 @@ err:
260 return err; 291 return err;
261} 292}
262 293
263static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, 294static int queue_gso_packets(struct net *net, int dp_ifindex,
295 struct sk_buff *skb,
264 const struct dp_upcall_info *upcall_info) 296 const struct dp_upcall_info *upcall_info)
265{ 297{
266 unsigned short gso_type = skb_shinfo(skb)->gso_type; 298 unsigned short gso_type = skb_shinfo(skb)->gso_type;
@@ -276,7 +308,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
276 /* Queue all of the segments. */ 308 /* Queue all of the segments. */
277 skb = segs; 309 skb = segs;
278 do { 310 do {
279 err = queue_userspace_packet(dp_ifindex, skb, upcall_info); 311 err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
280 if (err) 312 if (err)
281 break; 313 break;
282 314
@@ -306,7 +338,8 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
306 return err; 338 return err;
307} 339}
308 340
309static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, 341static int queue_userspace_packet(struct net *net, int dp_ifindex,
342 struct sk_buff *skb,
310 const struct dp_upcall_info *upcall_info) 343 const struct dp_upcall_info *upcall_info)
311{ 344{
312 struct ovs_header *upcall; 345 struct ovs_header *upcall;
@@ -362,7 +395,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
362 395
363 skb_copy_and_csum_dev(skb, nla_data(nla)); 396 skb_copy_and_csum_dev(skb, nla_data(nla));
364 397
365 err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); 398 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
366 399
367out: 400out:
368 kfree_skb(nskb); 401 kfree_skb(nskb);
@@ -370,15 +403,10 @@ out:
370} 403}
371 404
372/* Called with genl_mutex. */ 405/* Called with genl_mutex. */
373static int flush_flows(int dp_ifindex) 406static int flush_flows(struct datapath *dp)
374{ 407{
375 struct flow_table *old_table; 408 struct flow_table *old_table;
376 struct flow_table *new_table; 409 struct flow_table *new_table;
377 struct datapath *dp;
378
379 dp = get_dp(dp_ifindex);
380 if (!dp)
381 return -ENODEV;
382 410
383 old_table = genl_dereference(dp->table); 411 old_table = genl_dereference(dp->table);
384 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); 412 new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
@@ -668,7 +696,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
668 packet->priority = flow->key.phy.priority; 696 packet->priority = flow->key.phy.priority;
669 697
670 rcu_read_lock(); 698 rcu_read_lock();
671 dp = get_dp(ovs_header->dp_ifindex); 699 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
672 err = -ENODEV; 700 err = -ENODEV;
673 if (!dp) 701 if (!dp)
674 goto err_unlock; 702 goto err_unlock;
@@ -742,7 +770,8 @@ static struct genl_family dp_flow_genl_family = {
742 .hdrsize = sizeof(struct ovs_header), 770 .hdrsize = sizeof(struct ovs_header),
743 .name = OVS_FLOW_FAMILY, 771 .name = OVS_FLOW_FAMILY,
744 .version = OVS_FLOW_VERSION, 772 .version = OVS_FLOW_VERSION,
745 .maxattr = OVS_FLOW_ATTR_MAX 773 .maxattr = OVS_FLOW_ATTR_MAX,
774 .netnsok = true
746}; 775};
747 776
748static struct genl_multicast_group ovs_dp_flow_multicast_group = { 777static struct genl_multicast_group ovs_dp_flow_multicast_group = {
@@ -751,7 +780,7 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
751 780
752/* Called with genl_lock. */ 781/* Called with genl_lock. */
753static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, 782static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
754 struct sk_buff *skb, u32 pid, 783 struct sk_buff *skb, u32 portid,
755 u32 seq, u32 flags, u8 cmd) 784 u32 seq, u32 flags, u8 cmd)
756{ 785{
757 const int skb_orig_len = skb->len; 786 const int skb_orig_len = skb->len;
@@ -766,7 +795,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
766 sf_acts = rcu_dereference_protected(flow->sf_acts, 795 sf_acts = rcu_dereference_protected(flow->sf_acts,
767 lockdep_genl_is_held()); 796 lockdep_genl_is_held());
768 797
769 ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); 798 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
770 if (!ovs_header) 799 if (!ovs_header)
771 return -EMSGSIZE; 800 return -EMSGSIZE;
772 801
@@ -850,7 +879,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
850 879
851static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, 880static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
852 struct datapath *dp, 881 struct datapath *dp,
853 u32 pid, u32 seq, u8 cmd) 882 u32 portid, u32 seq, u8 cmd)
854{ 883{
855 struct sk_buff *skb; 884 struct sk_buff *skb;
856 int retval; 885 int retval;
@@ -859,7 +888,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
859 if (!skb) 888 if (!skb)
860 return ERR_PTR(-ENOMEM); 889 return ERR_PTR(-ENOMEM);
861 890
862 retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd); 891 retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
863 BUG_ON(retval < 0); 892 BUG_ON(retval < 0);
864 return skb; 893 return skb;
865} 894}
@@ -894,7 +923,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
894 goto error; 923 goto error;
895 } 924 }
896 925
897 dp = get_dp(ovs_header->dp_ifindex); 926 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
898 error = -ENODEV; 927 error = -ENODEV;
899 if (!dp) 928 if (!dp)
900 goto error; 929 goto error;
@@ -941,7 +970,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
941 flow->hash = ovs_flow_hash(&key, key_len); 970 flow->hash = ovs_flow_hash(&key, key_len);
942 ovs_flow_tbl_insert(table, flow); 971 ovs_flow_tbl_insert(table, flow);
943 972
944 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, 973 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
945 info->snd_seq, 974 info->snd_seq,
946 OVS_FLOW_CMD_NEW); 975 OVS_FLOW_CMD_NEW);
947 } else { 976 } else {
@@ -979,7 +1008,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
979 ovs_flow_deferred_free_acts(old_acts); 1008 ovs_flow_deferred_free_acts(old_acts);
980 } 1009 }
981 1010
982 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, 1011 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
983 info->snd_seq, OVS_FLOW_CMD_NEW); 1012 info->snd_seq, OVS_FLOW_CMD_NEW);
984 1013
985 /* Clear stats. */ 1014 /* Clear stats. */
@@ -991,11 +1020,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
991 } 1020 }
992 1021
993 if (!IS_ERR(reply)) 1022 if (!IS_ERR(reply))
994 genl_notify(reply, genl_info_net(info), info->snd_pid, 1023 genl_notify(reply, genl_info_net(info), info->snd_portid,
995 ovs_dp_flow_multicast_group.id, info->nlhdr, 1024 ovs_dp_flow_multicast_group.id, info->nlhdr,
996 GFP_KERNEL); 1025 GFP_KERNEL);
997 else 1026 else
998 netlink_set_err(init_net.genl_sock, 0, 1027 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
999 ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); 1028 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
1000 return 0; 1029 return 0;
1001 1030
@@ -1023,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1023 if (err) 1052 if (err)
1024 return err; 1053 return err;
1025 1054
1026 dp = get_dp(ovs_header->dp_ifindex); 1055 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1027 if (!dp) 1056 if (!dp)
1028 return -ENODEV; 1057 return -ENODEV;
1029 1058
@@ -1032,7 +1061,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1032 if (!flow) 1061 if (!flow)
1033 return -ENOENT; 1062 return -ENOENT;
1034 1063
1035 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, 1064 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
1036 info->snd_seq, OVS_FLOW_CMD_NEW); 1065 info->snd_seq, OVS_FLOW_CMD_NEW);
1037 if (IS_ERR(reply)) 1066 if (IS_ERR(reply))
1038 return PTR_ERR(reply); 1067 return PTR_ERR(reply);
@@ -1052,16 +1081,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1052 int err; 1081 int err;
1053 int key_len; 1082 int key_len;
1054 1083
1084 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1085 if (!dp)
1086 return -ENODEV;
1087
1055 if (!a[OVS_FLOW_ATTR_KEY]) 1088 if (!a[OVS_FLOW_ATTR_KEY])
1056 return flush_flows(ovs_header->dp_ifindex); 1089 return flush_flows(dp);
1090
1057 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); 1091 err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1058 if (err) 1092 if (err)
1059 return err; 1093 return err;
1060 1094
1061 dp = get_dp(ovs_header->dp_ifindex);
1062 if (!dp)
1063 return -ENODEV;
1064
1065 table = genl_dereference(dp->table); 1095 table = genl_dereference(dp->table);
1066 flow = ovs_flow_tbl_lookup(table, &key, key_len); 1096 flow = ovs_flow_tbl_lookup(table, &key, key_len);
1067 if (!flow) 1097 if (!flow)
@@ -1073,13 +1103,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1073 1103
1074 ovs_flow_tbl_remove(table, flow); 1104 ovs_flow_tbl_remove(table, flow);
1075 1105
1076 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid, 1106 err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
1077 info->snd_seq, 0, OVS_FLOW_CMD_DEL); 1107 info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1078 BUG_ON(err < 0); 1108 BUG_ON(err < 0);
1079 1109
1080 ovs_flow_deferred_free(flow); 1110 ovs_flow_deferred_free(flow);
1081 1111
1082 genl_notify(reply, genl_info_net(info), info->snd_pid, 1112 genl_notify(reply, genl_info_net(info), info->snd_portid,
1083 ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); 1113 ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1084 return 0; 1114 return 0;
1085} 1115}
@@ -1090,7 +1120,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1090 struct datapath *dp; 1120 struct datapath *dp;
1091 struct flow_table *table; 1121 struct flow_table *table;
1092 1122
1093 dp = get_dp(ovs_header->dp_ifindex); 1123 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1094 if (!dp) 1124 if (!dp)
1095 return -ENODEV; 1125 return -ENODEV;
1096 1126
@@ -1107,7 +1137,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1107 break; 1137 break;
1108 1138
1109 if (ovs_flow_cmd_fill_info(flow, dp, skb, 1139 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1110 NETLINK_CB(cb->skb).pid, 1140 NETLINK_CB(cb->skb).portid,
1111 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1141 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1112 OVS_FLOW_CMD_NEW) < 0) 1142 OVS_FLOW_CMD_NEW) < 0)
1113 break; 1143 break;
@@ -1152,7 +1182,8 @@ static struct genl_family dp_datapath_genl_family = {
1152 .hdrsize = sizeof(struct ovs_header), 1182 .hdrsize = sizeof(struct ovs_header),
1153 .name = OVS_DATAPATH_FAMILY, 1183 .name = OVS_DATAPATH_FAMILY,
1154 .version = OVS_DATAPATH_VERSION, 1184 .version = OVS_DATAPATH_VERSION,
1155 .maxattr = OVS_DP_ATTR_MAX 1185 .maxattr = OVS_DP_ATTR_MAX,
1186 .netnsok = true
1156}; 1187};
1157 1188
1158static struct genl_multicast_group ovs_dp_datapath_multicast_group = { 1189static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
@@ -1160,13 +1191,13 @@ static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1160}; 1191};
1161 1192
1162static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1193static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1163 u32 pid, u32 seq, u32 flags, u8 cmd) 1194 u32 portid, u32 seq, u32 flags, u8 cmd)
1164{ 1195{
1165 struct ovs_header *ovs_header; 1196 struct ovs_header *ovs_header;
1166 struct ovs_dp_stats dp_stats; 1197 struct ovs_dp_stats dp_stats;
1167 int err; 1198 int err;
1168 1199
1169 ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family, 1200 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1170 flags, cmd); 1201 flags, cmd);
1171 if (!ovs_header) 1202 if (!ovs_header)
1172 goto error; 1203 goto error;
@@ -1191,7 +1222,7 @@ error:
1191 return -EMSGSIZE; 1222 return -EMSGSIZE;
1192} 1223}
1193 1224
1194static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, 1225static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
1195 u32 seq, u8 cmd) 1226 u32 seq, u8 cmd)
1196{ 1227{
1197 struct sk_buff *skb; 1228 struct sk_buff *skb;
@@ -1201,7 +1232,7 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1201 if (!skb) 1232 if (!skb)
1202 return ERR_PTR(-ENOMEM); 1233 return ERR_PTR(-ENOMEM);
1203 1234
1204 retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd); 1235 retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
1205 if (retval < 0) { 1236 if (retval < 0) {
1206 kfree_skb(skb); 1237 kfree_skb(skb);
1207 return ERR_PTR(retval); 1238 return ERR_PTR(retval);
@@ -1210,18 +1241,19 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1210} 1241}
1211 1242
1212/* Called with genl_mutex and optionally with RTNL lock also. */ 1243/* Called with genl_mutex and optionally with RTNL lock also. */
1213static struct datapath *lookup_datapath(struct ovs_header *ovs_header, 1244static struct datapath *lookup_datapath(struct net *net,
1245 struct ovs_header *ovs_header,
1214 struct nlattr *a[OVS_DP_ATTR_MAX + 1]) 1246 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1215{ 1247{
1216 struct datapath *dp; 1248 struct datapath *dp;
1217 1249
1218 if (!a[OVS_DP_ATTR_NAME]) 1250 if (!a[OVS_DP_ATTR_NAME])
1219 dp = get_dp(ovs_header->dp_ifindex); 1251 dp = get_dp(net, ovs_header->dp_ifindex);
1220 else { 1252 else {
1221 struct vport *vport; 1253 struct vport *vport;
1222 1254
1223 rcu_read_lock(); 1255 rcu_read_lock();
1224 vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); 1256 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1225 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; 1257 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1226 rcu_read_unlock(); 1258 rcu_read_unlock();
1227 } 1259 }
@@ -1235,22 +1267,21 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1235 struct sk_buff *reply; 1267 struct sk_buff *reply;
1236 struct datapath *dp; 1268 struct datapath *dp;
1237 struct vport *vport; 1269 struct vport *vport;
1238 int err; 1270 struct ovs_net *ovs_net;
1271 int err, i;
1239 1272
1240 err = -EINVAL; 1273 err = -EINVAL;
1241 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1274 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1242 goto err; 1275 goto err;
1243 1276
1244 rtnl_lock(); 1277 rtnl_lock();
1245 err = -ENODEV;
1246 if (!try_module_get(THIS_MODULE))
1247 goto err_unlock_rtnl;
1248 1278
1249 err = -ENOMEM; 1279 err = -ENOMEM;
1250 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1280 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1251 if (dp == NULL) 1281 if (dp == NULL)
1252 goto err_put_module; 1282 goto err_unlock_rtnl;
1253 INIT_LIST_HEAD(&dp->port_list); 1283
1284 ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1254 1285
1255 /* Allocate table. */ 1286 /* Allocate table. */
1256 err = -ENOMEM; 1287 err = -ENOMEM;
@@ -1264,13 +1295,23 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1264 goto err_destroy_table; 1295 goto err_destroy_table;
1265 } 1296 }
1266 1297
1298 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1299 GFP_KERNEL);
1300 if (!dp->ports) {
1301 err = -ENOMEM;
1302 goto err_destroy_percpu;
1303 }
1304
1305 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1306 INIT_HLIST_HEAD(&dp->ports[i]);
1307
1267 /* Set up our datapath device. */ 1308 /* Set up our datapath device. */
1268 parms.name = nla_data(a[OVS_DP_ATTR_NAME]); 1309 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1269 parms.type = OVS_VPORT_TYPE_INTERNAL; 1310 parms.type = OVS_VPORT_TYPE_INTERNAL;
1270 parms.options = NULL; 1311 parms.options = NULL;
1271 parms.dp = dp; 1312 parms.dp = dp;
1272 parms.port_no = OVSP_LOCAL; 1313 parms.port_no = OVSP_LOCAL;
1273 parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); 1314 parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1274 1315
1275 vport = new_vport(&parms); 1316 vport = new_vport(&parms);
1276 if (IS_ERR(vport)) { 1317 if (IS_ERR(vport)) {
@@ -1278,64 +1319,59 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1278 if (err == -EBUSY) 1319 if (err == -EBUSY)
1279 err = -EEXIST; 1320 err = -EEXIST;
1280 1321
1281 goto err_destroy_percpu; 1322 goto err_destroy_ports_array;
1282 } 1323 }
1283 1324
1284 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, 1325 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1285 info->snd_seq, OVS_DP_CMD_NEW); 1326 info->snd_seq, OVS_DP_CMD_NEW);
1286 err = PTR_ERR(reply); 1327 err = PTR_ERR(reply);
1287 if (IS_ERR(reply)) 1328 if (IS_ERR(reply))
1288 goto err_destroy_local_port; 1329 goto err_destroy_local_port;
1289 1330
1290 list_add_tail(&dp->list_node, &dps); 1331 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1332 list_add_tail(&dp->list_node, &ovs_net->dps);
1291 rtnl_unlock(); 1333 rtnl_unlock();
1292 1334
1293 genl_notify(reply, genl_info_net(info), info->snd_pid, 1335 genl_notify(reply, genl_info_net(info), info->snd_portid,
1294 ovs_dp_datapath_multicast_group.id, info->nlhdr, 1336 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1295 GFP_KERNEL); 1337 GFP_KERNEL);
1296 return 0; 1338 return 0;
1297 1339
1298err_destroy_local_port: 1340err_destroy_local_port:
1299 ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); 1341 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1342err_destroy_ports_array:
1343 kfree(dp->ports);
1300err_destroy_percpu: 1344err_destroy_percpu:
1301 free_percpu(dp->stats_percpu); 1345 free_percpu(dp->stats_percpu);
1302err_destroy_table: 1346err_destroy_table:
1303 ovs_flow_tbl_destroy(genl_dereference(dp->table)); 1347 ovs_flow_tbl_destroy(genl_dereference(dp->table));
1304err_free_dp: 1348err_free_dp:
1349 release_net(ovs_dp_get_net(dp));
1305 kfree(dp); 1350 kfree(dp);
1306err_put_module:
1307 module_put(THIS_MODULE);
1308err_unlock_rtnl: 1351err_unlock_rtnl:
1309 rtnl_unlock(); 1352 rtnl_unlock();
1310err: 1353err:
1311 return err; 1354 return err;
1312} 1355}
1313 1356
1314static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) 1357/* Called with genl_mutex. */
1358static void __dp_destroy(struct datapath *dp)
1315{ 1359{
1316 struct vport *vport, *next_vport; 1360 int i;
1317 struct sk_buff *reply;
1318 struct datapath *dp;
1319 int err;
1320 1361
1321 rtnl_lock(); 1362 rtnl_lock();
1322 dp = lookup_datapath(info->userhdr, info->attrs);
1323 err = PTR_ERR(dp);
1324 if (IS_ERR(dp))
1325 goto exit_unlock;
1326 1363
1327 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, 1364 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1328 info->snd_seq, OVS_DP_CMD_DEL); 1365 struct vport *vport;
1329 err = PTR_ERR(reply); 1366 struct hlist_node *node, *n;
1330 if (IS_ERR(reply))
1331 goto exit_unlock;
1332 1367
1333 list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) 1368 hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
1334 if (vport->port_no != OVSP_LOCAL) 1369 if (vport->port_no != OVSP_LOCAL)
1335 ovs_dp_detach_port(vport); 1370 ovs_dp_detach_port(vport);
1371 }
1336 1372
1337 list_del(&dp->list_node); 1373 list_del(&dp->list_node);
1338 ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); 1374 ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
1339 1375
1340 /* rtnl_unlock() will wait until all the references to devices that 1376 /* rtnl_unlock() will wait until all the references to devices that
1341 * are pending unregistration have been dropped. We do it here to 1377 * are pending unregistration have been dropped. We do it here to
@@ -1345,17 +1381,32 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1345 rtnl_unlock(); 1381 rtnl_unlock();
1346 1382
1347 call_rcu(&dp->rcu, destroy_dp_rcu); 1383 call_rcu(&dp->rcu, destroy_dp_rcu);
1348 module_put(THIS_MODULE); 1384}
1385
1386static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1387{
1388 struct sk_buff *reply;
1389 struct datapath *dp;
1390 int err;
1391
1392 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1393 err = PTR_ERR(dp);
1394 if (IS_ERR(dp))
1395 return err;
1349 1396
1350 genl_notify(reply, genl_info_net(info), info->snd_pid, 1397 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1398 info->snd_seq, OVS_DP_CMD_DEL);
1399 err = PTR_ERR(reply);
1400 if (IS_ERR(reply))
1401 return err;
1402
1403 __dp_destroy(dp);
1404
1405 genl_notify(reply, genl_info_net(info), info->snd_portid,
1351 ovs_dp_datapath_multicast_group.id, info->nlhdr, 1406 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1352 GFP_KERNEL); 1407 GFP_KERNEL);
1353 1408
1354 return 0; 1409 return 0;
1355
1356exit_unlock:
1357 rtnl_unlock();
1358 return err;
1359} 1410}
1360 1411
1361static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) 1412static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
@@ -1364,20 +1415,20 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1364 struct datapath *dp; 1415 struct datapath *dp;
1365 int err; 1416 int err;
1366 1417
1367 dp = lookup_datapath(info->userhdr, info->attrs); 1418 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1368 if (IS_ERR(dp)) 1419 if (IS_ERR(dp))
1369 return PTR_ERR(dp); 1420 return PTR_ERR(dp);
1370 1421
1371 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, 1422 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1372 info->snd_seq, OVS_DP_CMD_NEW); 1423 info->snd_seq, OVS_DP_CMD_NEW);
1373 if (IS_ERR(reply)) { 1424 if (IS_ERR(reply)) {
1374 err = PTR_ERR(reply); 1425 err = PTR_ERR(reply);
1375 netlink_set_err(init_net.genl_sock, 0, 1426 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1376 ovs_dp_datapath_multicast_group.id, err); 1427 ovs_dp_datapath_multicast_group.id, err);
1377 return 0; 1428 return 0;
1378 } 1429 }
1379 1430
1380 genl_notify(reply, genl_info_net(info), info->snd_pid, 1431 genl_notify(reply, genl_info_net(info), info->snd_portid,
1381 ovs_dp_datapath_multicast_group.id, info->nlhdr, 1432 ovs_dp_datapath_multicast_group.id, info->nlhdr,
1382 GFP_KERNEL); 1433 GFP_KERNEL);
1383 1434
@@ -1389,11 +1440,11 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1389 struct sk_buff *reply; 1440 struct sk_buff *reply;
1390 struct datapath *dp; 1441 struct datapath *dp;
1391 1442
1392 dp = lookup_datapath(info->userhdr, info->attrs); 1443 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1393 if (IS_ERR(dp)) 1444 if (IS_ERR(dp))
1394 return PTR_ERR(dp); 1445 return PTR_ERR(dp);
1395 1446
1396 reply = ovs_dp_cmd_build_info(dp, info->snd_pid, 1447 reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
1397 info->snd_seq, OVS_DP_CMD_NEW); 1448 info->snd_seq, OVS_DP_CMD_NEW);
1398 if (IS_ERR(reply)) 1449 if (IS_ERR(reply))
1399 return PTR_ERR(reply); 1450 return PTR_ERR(reply);
@@ -1403,13 +1454,14 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1403 1454
1404static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 1455static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1405{ 1456{
1457 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1406 struct datapath *dp; 1458 struct datapath *dp;
1407 int skip = cb->args[0]; 1459 int skip = cb->args[0];
1408 int i = 0; 1460 int i = 0;
1409 1461
1410 list_for_each_entry(dp, &dps, list_node) { 1462 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1411 if (i >= skip && 1463 if (i >= skip &&
1412 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, 1464 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1413 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1465 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1414 OVS_DP_CMD_NEW) < 0) 1466 OVS_DP_CMD_NEW) < 0)
1415 break; 1467 break;
@@ -1459,7 +1511,8 @@ static struct genl_family dp_vport_genl_family = {
1459 .hdrsize = sizeof(struct ovs_header), 1511 .hdrsize = sizeof(struct ovs_header),
1460 .name = OVS_VPORT_FAMILY, 1512 .name = OVS_VPORT_FAMILY,
1461 .version = OVS_VPORT_VERSION, 1513 .version = OVS_VPORT_VERSION,
1462 .maxattr = OVS_VPORT_ATTR_MAX 1514 .maxattr = OVS_VPORT_ATTR_MAX,
1515 .netnsok = true
1463}; 1516};
1464 1517
1465struct genl_multicast_group ovs_dp_vport_multicast_group = { 1518struct genl_multicast_group ovs_dp_vport_multicast_group = {
@@ -1468,13 +1521,13 @@ struct genl_multicast_group ovs_dp_vport_multicast_group = {
1468 1521
1469/* Called with RTNL lock or RCU read lock. */ 1522/* Called with RTNL lock or RCU read lock. */
1470static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, 1523static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1471 u32 pid, u32 seq, u32 flags, u8 cmd) 1524 u32 portid, u32 seq, u32 flags, u8 cmd)
1472{ 1525{
1473 struct ovs_header *ovs_header; 1526 struct ovs_header *ovs_header;
1474 struct ovs_vport_stats vport_stats; 1527 struct ovs_vport_stats vport_stats;
1475 int err; 1528 int err;
1476 1529
1477 ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, 1530 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1478 flags, cmd); 1531 flags, cmd);
1479 if (!ovs_header) 1532 if (!ovs_header)
1480 return -EMSGSIZE; 1533 return -EMSGSIZE;
@@ -1484,7 +1537,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1484 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || 1537 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1485 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || 1538 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1486 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) || 1539 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1487 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid)) 1540 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
1488 goto nla_put_failure; 1541 goto nla_put_failure;
1489 1542
1490 ovs_vport_get_stats(vport, &vport_stats); 1543 ovs_vport_get_stats(vport, &vport_stats);
@@ -1506,7 +1559,7 @@ error:
1506} 1559}
1507 1560
1508/* Called with RTNL lock or RCU read lock. */ 1561/* Called with RTNL lock or RCU read lock. */
1509struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, 1562struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1510 u32 seq, u8 cmd) 1563 u32 seq, u8 cmd)
1511{ 1564{
1512 struct sk_buff *skb; 1565 struct sk_buff *skb;
@@ -1516,7 +1569,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1516 if (!skb) 1569 if (!skb)
1517 return ERR_PTR(-ENOMEM); 1570 return ERR_PTR(-ENOMEM);
1518 1571
1519 retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd); 1572 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1520 if (retval < 0) { 1573 if (retval < 0) {
1521 kfree_skb(skb); 1574 kfree_skb(skb);
1522 return ERR_PTR(retval); 1575 return ERR_PTR(retval);
@@ -1525,14 +1578,15 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1525} 1578}
1526 1579
1527/* Called with RTNL lock or RCU read lock. */ 1580/* Called with RTNL lock or RCU read lock. */
1528static struct vport *lookup_vport(struct ovs_header *ovs_header, 1581static struct vport *lookup_vport(struct net *net,
1582 struct ovs_header *ovs_header,
1529 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) 1583 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1530{ 1584{
1531 struct datapath *dp; 1585 struct datapath *dp;
1532 struct vport *vport; 1586 struct vport *vport;
1533 1587
1534 if (a[OVS_VPORT_ATTR_NAME]) { 1588 if (a[OVS_VPORT_ATTR_NAME]) {
1535 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); 1589 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1536 if (!vport) 1590 if (!vport)
1537 return ERR_PTR(-ENODEV); 1591 return ERR_PTR(-ENODEV);
1538 if (ovs_header->dp_ifindex && 1592 if (ovs_header->dp_ifindex &&
@@ -1545,11 +1599,11 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
1545 if (port_no >= DP_MAX_PORTS) 1599 if (port_no >= DP_MAX_PORTS)
1546 return ERR_PTR(-EFBIG); 1600 return ERR_PTR(-EFBIG);
1547 1601
1548 dp = get_dp(ovs_header->dp_ifindex); 1602 dp = get_dp(net, ovs_header->dp_ifindex);
1549 if (!dp) 1603 if (!dp)
1550 return ERR_PTR(-ENODEV); 1604 return ERR_PTR(-ENODEV);
1551 1605
1552 vport = rcu_dereference_rtnl(dp->ports[port_no]); 1606 vport = ovs_vport_rtnl_rcu(dp, port_no);
1553 if (!vport) 1607 if (!vport)
1554 return ERR_PTR(-ENOENT); 1608 return ERR_PTR(-ENOENT);
1555 return vport; 1609 return vport;
@@ -1574,7 +1628,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1574 goto exit; 1628 goto exit;
1575 1629
1576 rtnl_lock(); 1630 rtnl_lock();
1577 dp = get_dp(ovs_header->dp_ifindex); 1631 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1578 err = -ENODEV; 1632 err = -ENODEV;
1579 if (!dp) 1633 if (!dp)
1580 goto exit_unlock; 1634 goto exit_unlock;
@@ -1586,7 +1640,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1586 if (port_no >= DP_MAX_PORTS) 1640 if (port_no >= DP_MAX_PORTS)
1587 goto exit_unlock; 1641 goto exit_unlock;
1588 1642
1589 vport = rtnl_dereference(dp->ports[port_no]); 1643 vport = ovs_vport_rtnl_rcu(dp, port_no);
1590 err = -EBUSY; 1644 err = -EBUSY;
1591 if (vport) 1645 if (vport)
1592 goto exit_unlock; 1646 goto exit_unlock;
@@ -1596,7 +1650,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1596 err = -EFBIG; 1650 err = -EFBIG;
1597 goto exit_unlock; 1651 goto exit_unlock;
1598 } 1652 }
1599 vport = rtnl_dereference(dp->ports[port_no]); 1653 vport = ovs_vport_rtnl(dp, port_no);
1600 if (!vport) 1654 if (!vport)
1601 break; 1655 break;
1602 } 1656 }
@@ -1607,21 +1661,21 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1607 parms.options = a[OVS_VPORT_ATTR_OPTIONS]; 1661 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1608 parms.dp = dp; 1662 parms.dp = dp;
1609 parms.port_no = port_no; 1663 parms.port_no = port_no;
1610 parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); 1664 parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1611 1665
1612 vport = new_vport(&parms); 1666 vport = new_vport(&parms);
1613 err = PTR_ERR(vport); 1667 err = PTR_ERR(vport);
1614 if (IS_ERR(vport)) 1668 if (IS_ERR(vport))
1615 goto exit_unlock; 1669 goto exit_unlock;
1616 1670
1617 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1671 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1618 OVS_VPORT_CMD_NEW); 1672 OVS_VPORT_CMD_NEW);
1619 if (IS_ERR(reply)) { 1673 if (IS_ERR(reply)) {
1620 err = PTR_ERR(reply); 1674 err = PTR_ERR(reply);
1621 ovs_dp_detach_port(vport); 1675 ovs_dp_detach_port(vport);
1622 goto exit_unlock; 1676 goto exit_unlock;
1623 } 1677 }
1624 genl_notify(reply, genl_info_net(info), info->snd_pid, 1678 genl_notify(reply, genl_info_net(info), info->snd_portid,
1625 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); 1679 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1626 1680
1627exit_unlock: 1681exit_unlock:
@@ -1638,7 +1692,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1638 int err; 1692 int err;
1639 1693
1640 rtnl_lock(); 1694 rtnl_lock();
1641 vport = lookup_vport(info->userhdr, a); 1695 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1642 err = PTR_ERR(vport); 1696 err = PTR_ERR(vport);
1643 if (IS_ERR(vport)) 1697 if (IS_ERR(vport))
1644 goto exit_unlock; 1698 goto exit_unlock;
@@ -1653,17 +1707,17 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1653 if (err) 1707 if (err)
1654 goto exit_unlock; 1708 goto exit_unlock;
1655 if (a[OVS_VPORT_ATTR_UPCALL_PID]) 1709 if (a[OVS_VPORT_ATTR_UPCALL_PID])
1656 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); 1710 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1657 1711
1658 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1712 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1659 OVS_VPORT_CMD_NEW); 1713 OVS_VPORT_CMD_NEW);
1660 if (IS_ERR(reply)) { 1714 if (IS_ERR(reply)) {
1661 netlink_set_err(init_net.genl_sock, 0, 1715 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1662 ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); 1716 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1663 goto exit_unlock; 1717 goto exit_unlock;
1664 } 1718 }
1665 1719
1666 genl_notify(reply, genl_info_net(info), info->snd_pid, 1720 genl_notify(reply, genl_info_net(info), info->snd_portid,
1667 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); 1721 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1668 1722
1669exit_unlock: 1723exit_unlock:
@@ -1679,7 +1733,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1679 int err; 1733 int err;
1680 1734
1681 rtnl_lock(); 1735 rtnl_lock();
1682 vport = lookup_vport(info->userhdr, a); 1736 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1683 err = PTR_ERR(vport); 1737 err = PTR_ERR(vport);
1684 if (IS_ERR(vport)) 1738 if (IS_ERR(vport))
1685 goto exit_unlock; 1739 goto exit_unlock;
@@ -1689,7 +1743,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1689 goto exit_unlock; 1743 goto exit_unlock;
1690 } 1744 }
1691 1745
1692 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1746 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1693 OVS_VPORT_CMD_DEL); 1747 OVS_VPORT_CMD_DEL);
1694 err = PTR_ERR(reply); 1748 err = PTR_ERR(reply);
1695 if (IS_ERR(reply)) 1749 if (IS_ERR(reply))
@@ -1697,7 +1751,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1697 1751
1698 ovs_dp_detach_port(vport); 1752 ovs_dp_detach_port(vport);
1699 1753
1700 genl_notify(reply, genl_info_net(info), info->snd_pid, 1754 genl_notify(reply, genl_info_net(info), info->snd_portid,
1701 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); 1755 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1702 1756
1703exit_unlock: 1757exit_unlock:
@@ -1714,12 +1768,12 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1714 int err; 1768 int err;
1715 1769
1716 rcu_read_lock(); 1770 rcu_read_lock();
1717 vport = lookup_vport(ovs_header, a); 1771 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1718 err = PTR_ERR(vport); 1772 err = PTR_ERR(vport);
1719 if (IS_ERR(vport)) 1773 if (IS_ERR(vport))
1720 goto exit_unlock; 1774 goto exit_unlock;
1721 1775
1722 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1776 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1723 OVS_VPORT_CMD_NEW); 1777 OVS_VPORT_CMD_NEW);
1724 err = PTR_ERR(reply); 1778 err = PTR_ERR(reply);
1725 if (IS_ERR(reply)) 1779 if (IS_ERR(reply))
@@ -1738,54 +1792,39 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1738{ 1792{
1739 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 1793 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1740 struct datapath *dp; 1794 struct datapath *dp;
1741 u32 port_no; 1795 int bucket = cb->args[0], skip = cb->args[1];
1742 int retval; 1796 int i, j = 0;
1743 1797
1744 dp = get_dp(ovs_header->dp_ifindex); 1798 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1745 if (!dp) 1799 if (!dp)
1746 return -ENODEV; 1800 return -ENODEV;
1747 1801
1748 rcu_read_lock(); 1802 rcu_read_lock();
1749 for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { 1803 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1750 struct vport *vport; 1804 struct vport *vport;
1751 1805 struct hlist_node *n;
1752 vport = rcu_dereference(dp->ports[port_no]); 1806
1753 if (!vport) 1807 j = 0;
1754 continue; 1808 hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
1755 1809 if (j >= skip &&
1756 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, 1810 ovs_vport_cmd_fill_info(vport, skb,
1757 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1811 NETLINK_CB(cb->skb).portid,
1758 OVS_VPORT_CMD_NEW) < 0) 1812 cb->nlh->nlmsg_seq,
1759 break; 1813 NLM_F_MULTI,
1760 } 1814 OVS_VPORT_CMD_NEW) < 0)
1761 rcu_read_unlock(); 1815 goto out;
1762 1816
1763 cb->args[0] = port_no; 1817 j++;
1764 retval = skb->len;
1765
1766 return retval;
1767}
1768
1769static void rehash_flow_table(struct work_struct *work)
1770{
1771 struct datapath *dp;
1772
1773 genl_lock();
1774
1775 list_for_each_entry(dp, &dps, list_node) {
1776 struct flow_table *old_table = genl_dereference(dp->table);
1777 struct flow_table *new_table;
1778
1779 new_table = ovs_flow_tbl_rehash(old_table);
1780 if (!IS_ERR(new_table)) {
1781 rcu_assign_pointer(dp->table, new_table);
1782 ovs_flow_tbl_deferred_destroy(old_table);
1783 } 1818 }
1819 skip = 0;
1784 } 1820 }
1821out:
1822 rcu_read_unlock();
1785 1823
1786 genl_unlock(); 1824 cb->args[0] = i;
1825 cb->args[1] = j;
1787 1826
1788 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); 1827 return skb->len;
1789} 1828}
1790 1829
1791static struct genl_ops dp_vport_genl_ops[] = { 1830static struct genl_ops dp_vport_genl_ops[] = {
@@ -1872,6 +1911,59 @@ error:
1872 return err; 1911 return err;
1873} 1912}
1874 1913
1914static void rehash_flow_table(struct work_struct *work)
1915{
1916 struct datapath *dp;
1917 struct net *net;
1918
1919 genl_lock();
1920 rtnl_lock();
1921 for_each_net(net) {
1922 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1923
1924 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1925 struct flow_table *old_table = genl_dereference(dp->table);
1926 struct flow_table *new_table;
1927
1928 new_table = ovs_flow_tbl_rehash(old_table);
1929 if (!IS_ERR(new_table)) {
1930 rcu_assign_pointer(dp->table, new_table);
1931 ovs_flow_tbl_deferred_destroy(old_table);
1932 }
1933 }
1934 }
1935 rtnl_unlock();
1936 genl_unlock();
1937
1938 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
1939}
1940
1941static int __net_init ovs_init_net(struct net *net)
1942{
1943 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1944
1945 INIT_LIST_HEAD(&ovs_net->dps);
1946 return 0;
1947}
1948
1949static void __net_exit ovs_exit_net(struct net *net)
1950{
1951 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1952 struct datapath *dp, *dp_next;
1953
1954 genl_lock();
1955 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
1956 __dp_destroy(dp);
1957 genl_unlock();
1958}
1959
1960static struct pernet_operations ovs_net_ops = {
1961 .init = ovs_init_net,
1962 .exit = ovs_exit_net,
1963 .id = &ovs_net_id,
1964 .size = sizeof(struct ovs_net),
1965};
1966
1875static int __init dp_init(void) 1967static int __init dp_init(void)
1876{ 1968{
1877 struct sk_buff *dummy_skb; 1969 struct sk_buff *dummy_skb;
@@ -1889,10 +1981,14 @@ static int __init dp_init(void)
1889 if (err) 1981 if (err)
1890 goto error_flow_exit; 1982 goto error_flow_exit;
1891 1983
1892 err = register_netdevice_notifier(&ovs_dp_device_notifier); 1984 err = register_pernet_device(&ovs_net_ops);
1893 if (err) 1985 if (err)
1894 goto error_vport_exit; 1986 goto error_vport_exit;
1895 1987
1988 err = register_netdevice_notifier(&ovs_dp_device_notifier);
1989 if (err)
1990 goto error_netns_exit;
1991
1896 err = dp_register_genl(); 1992 err = dp_register_genl();
1897 if (err < 0) 1993 if (err < 0)
1898 goto error_unreg_notifier; 1994 goto error_unreg_notifier;
@@ -1903,6 +1999,8 @@ static int __init dp_init(void)
1903 1999
1904error_unreg_notifier: 2000error_unreg_notifier:
1905 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2001 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2002error_netns_exit:
2003 unregister_pernet_device(&ovs_net_ops);
1906error_vport_exit: 2004error_vport_exit:
1907 ovs_vport_exit(); 2005 ovs_vport_exit();
1908error_flow_exit: 2006error_flow_exit:
@@ -1914,9 +2012,10 @@ error:
1914static void dp_cleanup(void) 2012static void dp_cleanup(void)
1915{ 2013{
1916 cancel_delayed_work_sync(&rehash_flow_wq); 2014 cancel_delayed_work_sync(&rehash_flow_wq);
1917 rcu_barrier();
1918 dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); 2015 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
1919 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2016 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2017 unregister_pernet_device(&ovs_net_ops);
2018 rcu_barrier();
1920 ovs_vport_exit(); 2019 ovs_vport_exit();
1921 ovs_flow_exit(); 2020 ovs_flow_exit();
1922} 2021}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index c1105c147531..031dfbf37c93 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -27,10 +27,11 @@
27#include <linux/u64_stats_sync.h> 27#include <linux/u64_stats_sync.h>
28 28
29#include "flow.h" 29#include "flow.h"
30#include "vport.h"
30 31
31struct vport; 32#define DP_MAX_PORTS USHRT_MAX
33#define DP_VPORT_HASH_BUCKETS 1024
32 34
33#define DP_MAX_PORTS 1024
34#define SAMPLE_ACTION_DEPTH 3 35#define SAMPLE_ACTION_DEPTH 3
35 36
36/** 37/**
@@ -58,11 +59,10 @@ struct dp_stats_percpu {
58 * @list_node: Element in global 'dps' list. 59 * @list_node: Element in global 'dps' list.
59 * @n_flows: Number of flows currently in flow table. 60 * @n_flows: Number of flows currently in flow table.
60 * @table: Current flow table. Protected by genl_lock and RCU. 61 * @table: Current flow table. Protected by genl_lock and RCU.
61 * @ports: Map from port number to &struct vport. %OVSP_LOCAL port 62 * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
62 * always exists, other ports may be %NULL. Protected by RTNL and RCU. 63 * RTNL and RCU.
63 * @port_list: List of all ports in @ports in arbitrary order. RTNL required
64 * to iterate or modify.
65 * @stats_percpu: Per-CPU datapath statistics. 64 * @stats_percpu: Per-CPU datapath statistics.
65 * @net: Reference to net namespace.
66 * 66 *
67 * Context: See the comment on locking at the top of datapath.c for additional 67 * Context: See the comment on locking at the top of datapath.c for additional
68 * locking information. 68 * locking information.
@@ -75,13 +75,37 @@ struct datapath {
75 struct flow_table __rcu *table; 75 struct flow_table __rcu *table;
76 76
77 /* Switch ports. */ 77 /* Switch ports. */
78 struct vport __rcu *ports[DP_MAX_PORTS]; 78 struct hlist_head *ports;
79 struct list_head port_list;
80 79
81 /* Stats. */ 80 /* Stats. */
82 struct dp_stats_percpu __percpu *stats_percpu; 81 struct dp_stats_percpu __percpu *stats_percpu;
82
83#ifdef CONFIG_NET_NS
84 /* Network namespace ref. */
85 struct net *net;
86#endif
83}; 87};
84 88
89struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
90
91static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
92{
93 WARN_ON_ONCE(!rcu_read_lock_held());
94 return ovs_lookup_vport(dp, port_no);
95}
96
97static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
98{
99 WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
100 return ovs_lookup_vport(dp, port_no);
101}
102
103static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
104{
105 ASSERT_RTNL();
106 return ovs_lookup_vport(dp, port_no);
107}
108
85/** 109/**
86 * struct ovs_skb_cb - OVS data in skb CB 110 * struct ovs_skb_cb - OVS data in skb CB
87 * @flow: The flow associated with this packet. May be %NULL if no flow. 111 * @flow: The flow associated with this packet. May be %NULL if no flow.
@@ -105,9 +129,19 @@ struct dp_upcall_info {
105 u8 cmd; 129 u8 cmd;
106 const struct sw_flow_key *key; 130 const struct sw_flow_key *key;
107 const struct nlattr *userdata; 131 const struct nlattr *userdata;
108 u32 pid; 132 u32 portid;
109}; 133};
110 134
135static inline struct net *ovs_dp_get_net(struct datapath *dp)
136{
137 return read_pnet(&dp->net);
138}
139
140static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
141{
142 write_pnet(&dp->net, net);
143}
144
111extern struct notifier_block ovs_dp_device_notifier; 145extern struct notifier_block ovs_dp_device_notifier;
112extern struct genl_multicast_group ovs_dp_vport_multicast_group; 146extern struct genl_multicast_group ovs_dp_vport_multicast_group;
113 147
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 36dcee8fc84a..5558350e0d33 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -41,19 +41,21 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
41 case NETDEV_UNREGISTER: 41 case NETDEV_UNREGISTER:
42 if (!ovs_is_internal_dev(dev)) { 42 if (!ovs_is_internal_dev(dev)) {
43 struct sk_buff *notify; 43 struct sk_buff *notify;
44 struct datapath *dp = vport->dp;
44 45
45 notify = ovs_vport_cmd_build_info(vport, 0, 0, 46 notify = ovs_vport_cmd_build_info(vport, 0, 0,
46 OVS_VPORT_CMD_DEL); 47 OVS_VPORT_CMD_DEL);
47 ovs_dp_detach_port(vport); 48 ovs_dp_detach_port(vport);
48 if (IS_ERR(notify)) { 49 if (IS_ERR(notify)) {
49 netlink_set_err(init_net.genl_sock, 0, 50 netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
50 ovs_dp_vport_multicast_group.id, 51 ovs_dp_vport_multicast_group.id,
51 PTR_ERR(notify)); 52 PTR_ERR(notify));
52 break; 53 break;
53 } 54 }
54 55
55 genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id, 56 genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
56 GFP_KERNEL); 57 ovs_dp_vport_multicast_group.id,
58 GFP_KERNEL);
57 } 59 }
58 break; 60 break;
59 } 61 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b7f38b161909..98c70630ad06 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -203,10 +203,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
203 int actions_len = nla_len(actions); 203 int actions_len = nla_len(actions);
204 struct sw_flow_actions *sfa; 204 struct sw_flow_actions *sfa;
205 205
206 /* At least DP_MAX_PORTS actions are required to be able to flood a 206 if (actions_len > MAX_ACTIONS_BUFSIZE)
207 * packet to every port. Factor of 2 allows for setting VLAN tags,
208 * etc. */
209 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
210 return ERR_PTR(-EINVAL); 207 return ERR_PTR(-EINVAL);
211 208
212 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); 209 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
@@ -427,19 +424,11 @@ void ovs_flow_deferred_free(struct sw_flow *flow)
427 call_rcu(&flow->rcu, rcu_free_flow_callback); 424 call_rcu(&flow->rcu, rcu_free_flow_callback);
428} 425}
429 426
430/* RCU callback used by ovs_flow_deferred_free_acts. */
431static void rcu_free_acts_callback(struct rcu_head *rcu)
432{
433 struct sw_flow_actions *sf_acts = container_of(rcu,
434 struct sw_flow_actions, rcu);
435 kfree(sf_acts);
436}
437
438/* Schedules 'sf_acts' to be freed after the next RCU grace period. 427/* Schedules 'sf_acts' to be freed after the next RCU grace period.
439 * The caller must hold rcu_read_lock for this to be sensible. */ 428 * The caller must hold rcu_read_lock for this to be sensible. */
440void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) 429void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
441{ 430{
442 call_rcu(&sf_acts->rcu, rcu_free_acts_callback); 431 kfree_rcu(sf_acts, rcu);
443} 432}
444 433
445static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) 434static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
@@ -1000,7 +989,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1000 swkey->phy.in_port = in_port; 989 swkey->phy.in_port = in_port;
1001 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); 990 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
1002 } else { 991 } else {
1003 swkey->phy.in_port = USHRT_MAX; 992 swkey->phy.in_port = DP_MAX_PORTS;
1004 } 993 }
1005 994
1006 /* Data attributes. */ 995 /* Data attributes. */
@@ -1143,7 +1132,7 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
1143 const struct nlattr *nla; 1132 const struct nlattr *nla;
1144 int rem; 1133 int rem;
1145 1134
1146 *in_port = USHRT_MAX; 1135 *in_port = DP_MAX_PORTS;
1147 *priority = 0; 1136 *priority = 0;
1148 1137
1149 nla_for_each_nested(nla, attr, rem) { 1138 nla_for_each_nested(nla, attr, rem) {
@@ -1180,7 +1169,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1180 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) 1169 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1181 goto nla_put_failure; 1170 goto nla_put_failure;
1182 1171
1183 if (swkey->phy.in_port != USHRT_MAX && 1172 if (swkey->phy.in_port != DP_MAX_PORTS &&
1184 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) 1173 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1185 goto nla_put_failure; 1174 goto nla_put_failure;
1186 1175
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index c30df1a10c67..14a324eb017b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -43,7 +43,7 @@ struct sw_flow_actions {
43struct sw_flow_key { 43struct sw_flow_key {
44 struct { 44 struct {
45 u32 priority; /* Packet QoS priority. */ 45 u32 priority; /* Packet QoS priority. */
46 u16 in_port; /* Input switch port (or USHRT_MAX). */ 46 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
47 } phy; 47 } phy;
48 struct { 48 struct {
49 u8 src[ETH_ALEN]; /* Ethernet source address. */ 49 u8 src[ETH_ALEN]; /* Ethernet source address. */
@@ -163,6 +163,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
163int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, 163int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
164 const struct nlattr *); 164 const struct nlattr *);
165 165
166#define MAX_ACTIONS_BUFSIZE (16 * 1024)
166#define TBL_MIN_BUCKETS 1024 167#define TBL_MIN_BUCKETS 1024
167 168
168struct flow_table { 169struct flow_table {
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 4061b9ee07f7..5d460c37df07 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -144,7 +144,7 @@ static void do_setup(struct net_device *netdev)
144 netdev->tx_queue_len = 0; 144 netdev->tx_queue_len = 0;
145 145
146 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | 146 netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
147 NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; 147 NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
148 148
149 netdev->vlan_features = netdev->features; 149 netdev->vlan_features = netdev->features;
150 netdev->features |= NETIF_F_HW_VLAN_TX; 150 netdev->features |= NETIF_F_HW_VLAN_TX;
@@ -175,9 +175,14 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
175 goto error_free_vport; 175 goto error_free_vport;
176 } 176 }
177 177
178 dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
178 internal_dev = internal_dev_priv(netdev_vport->dev); 179 internal_dev = internal_dev_priv(netdev_vport->dev);
179 internal_dev->vport = vport; 180 internal_dev->vport = vport;
180 181
182 /* Restrict bridge port to current netns. */
183 if (vport->port_no == OVSP_LOCAL)
184 netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
185
181 err = register_netdevice(netdev_vport->dev); 186 err = register_netdevice(netdev_vport->dev);
182 if (err) 187 if (err)
183 goto error_free_netdev; 188 goto error_free_netdev;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 6ea3551cc78c..3c1e58ba714b 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -83,7 +83,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
83 83
84 netdev_vport = netdev_vport_priv(vport); 84 netdev_vport = netdev_vport_priv(vport);
85 85
86 netdev_vport->dev = dev_get_by_name(&init_net, parms->name); 86 netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
87 if (!netdev_vport->dev) { 87 if (!netdev_vport->dev) {
88 err = -ENODEV; 88 err = -ENODEV;
89 goto error_free_vport; 89 goto error_free_vport;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6140336e79d7..03779e8a2622 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -16,10 +16,10 @@
16 * 02110-1301, USA 16 * 02110-1301, USA
17 */ 17 */
18 18
19#include <linux/dcache.h>
20#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
21#include <linux/if.h> 20#include <linux/if.h>
22#include <linux/if_vlan.h> 21#include <linux/if_vlan.h>
22#include <linux/jhash.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
@@ -27,7 +27,9 @@
27#include <linux/rcupdate.h> 27#include <linux/rcupdate.h>
28#include <linux/rtnetlink.h> 28#include <linux/rtnetlink.h>
29#include <linux/compat.h> 29#include <linux/compat.h>
30#include <net/net_namespace.h>
30 31
32#include "datapath.h"
31#include "vport.h" 33#include "vport.h"
32#include "vport-internal_dev.h" 34#include "vport-internal_dev.h"
33 35
@@ -67,9 +69,9 @@ void ovs_vport_exit(void)
67 kfree(dev_table); 69 kfree(dev_table);
68} 70}
69 71
70static struct hlist_head *hash_bucket(const char *name) 72static struct hlist_head *hash_bucket(struct net *net, const char *name)
71{ 73{
72 unsigned int hash = full_name_hash(name, strlen(name)); 74 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
73 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; 75 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
74} 76}
75 77
@@ -80,14 +82,15 @@ static struct hlist_head *hash_bucket(const char *name)
80 * 82 *
81 * Must be called with RTNL or RCU read lock. 83 * Must be called with RTNL or RCU read lock.
82 */ 84 */
83struct vport *ovs_vport_locate(const char *name) 85struct vport *ovs_vport_locate(struct net *net, const char *name)
84{ 86{
85 struct hlist_head *bucket = hash_bucket(name); 87 struct hlist_head *bucket = hash_bucket(net, name);
86 struct vport *vport; 88 struct vport *vport;
87 struct hlist_node *node; 89 struct hlist_node *node;
88 90
89 hlist_for_each_entry_rcu(vport, node, bucket, hash_node) 91 hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
90 if (!strcmp(name, vport->ops->get_name(vport))) 92 if (!strcmp(name, vport->ops->get_name(vport)) &&
93 net_eq(ovs_dp_get_net(vport->dp), net))
91 return vport; 94 return vport;
92 95
93 return NULL; 96 return NULL;
@@ -122,8 +125,9 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
122 125
123 vport->dp = parms->dp; 126 vport->dp = parms->dp;
124 vport->port_no = parms->port_no; 127 vport->port_no = parms->port_no;
125 vport->upcall_pid = parms->upcall_pid; 128 vport->upcall_portid = parms->upcall_portid;
126 vport->ops = ops; 129 vport->ops = ops;
130 INIT_HLIST_NODE(&vport->dp_hash_node);
127 131
128 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats); 132 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
129 if (!vport->percpu_stats) { 133 if (!vport->percpu_stats) {
@@ -170,14 +174,17 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
170 174
171 for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { 175 for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
172 if (vport_ops_list[i]->type == parms->type) { 176 if (vport_ops_list[i]->type == parms->type) {
177 struct hlist_head *bucket;
178
173 vport = vport_ops_list[i]->create(parms); 179 vport = vport_ops_list[i]->create(parms);
174 if (IS_ERR(vport)) { 180 if (IS_ERR(vport)) {
175 err = PTR_ERR(vport); 181 err = PTR_ERR(vport);
176 goto out; 182 goto out;
177 } 183 }
178 184
179 hlist_add_head_rcu(&vport->hash_node, 185 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
180 hash_bucket(vport->ops->get_name(vport))); 186 vport->ops->get_name(vport));
187 hlist_add_head_rcu(&vport->hash_node, bucket);
181 return vport; 188 return vport;
182 } 189 }
183 } 190 }
@@ -391,7 +398,7 @@ void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
391 case VPORT_E_TX_ERROR: 398 case VPORT_E_TX_ERROR:
392 vport->err_stats.tx_errors++; 399 vport->err_stats.tx_errors++;
393 break; 400 break;
394 }; 401 }
395 402
396 spin_unlock(&vport->stats_lock); 403 spin_unlock(&vport->stats_lock);
397} 404}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index aac680ca2b06..3f7961ea3c56 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -20,6 +20,7 @@
20#define VPORT_H 1 20#define VPORT_H 1
21 21
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/netlink.h>
23#include <linux/openvswitch.h> 24#include <linux/openvswitch.h>
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
@@ -38,7 +39,7 @@ void ovs_vport_exit(void);
38struct vport *ovs_vport_add(const struct vport_parms *); 39struct vport *ovs_vport_add(const struct vport_parms *);
39void ovs_vport_del(struct vport *); 40void ovs_vport_del(struct vport *);
40 41
41struct vport *ovs_vport_locate(const char *name); 42struct vport *ovs_vport_locate(struct net *net, const char *name);
42 43
43void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); 44void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
44 45
@@ -69,10 +70,10 @@ struct vport_err_stats {
69 * @rcu: RCU callback head for deferred destruction. 70 * @rcu: RCU callback head for deferred destruction.
70 * @port_no: Index into @dp's @ports array. 71 * @port_no: Index into @dp's @ports array.
71 * @dp: Datapath to which this port belongs. 72 * @dp: Datapath to which this port belongs.
72 * @node: Element in @dp's @port_list. 73 * @upcall_portid: The Netlink port to use for packets received on this port that
73 * @upcall_pid: The Netlink port to use for packets received on this port that
74 * miss the flow table. 74 * miss the flow table.
75 * @hash_node: Element in @dev_table hash table in vport.c. 75 * @hash_node: Element in @dev_table hash table in vport.c.
76 * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
76 * @ops: Class structure. 77 * @ops: Class structure.
77 * @percpu_stats: Points to per-CPU statistics used and maintained by vport 78 * @percpu_stats: Points to per-CPU statistics used and maintained by vport
78 * @stats_lock: Protects @err_stats; 79 * @stats_lock: Protects @err_stats;
@@ -82,10 +83,10 @@ struct vport {
82 struct rcu_head rcu; 83 struct rcu_head rcu;
83 u16 port_no; 84 u16 port_no;
84 struct datapath *dp; 85 struct datapath *dp;
85 struct list_head node; 86 u32 upcall_portid;
86 u32 upcall_pid;
87 87
88 struct hlist_node hash_node; 88 struct hlist_node hash_node;
89 struct hlist_node dp_hash_node;
89 const struct vport_ops *ops; 90 const struct vport_ops *ops;
90 91
91 struct vport_percpu_stats __percpu *percpu_stats; 92 struct vport_percpu_stats __percpu *percpu_stats;
@@ -112,7 +113,7 @@ struct vport_parms {
112 /* For ovs_vport_alloc(). */ 113 /* For ovs_vport_alloc(). */
113 struct datapath *dp; 114 struct datapath *dp;
114 u16 port_no; 115 u16 port_no;
115 u32 upcall_pid; 116 u32 upcall_portid;
116}; 117};
117 118
118/** 119/**
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 0060e3b396b7..cc55b35f80e5 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,3 +14,11 @@ config PACKET
14 be called af_packet. 14 be called af_packet.
15 15
16 If unsure, say Y. 16 If unsure, say Y.
17
18config PACKET_DIAG
19 tristate "Packet: sockets monitoring interface"
20 depends on PACKET
21 default n
22 ---help---
23 Support for PF_PACKET sockets monitoring interface used by the ss tool.
24 If unsure, say Y.
diff --git a/net/packet/Makefile b/net/packet/Makefile
index 81183eabfdec..9df61347a3c3 100644
--- a/net/packet/Makefile
+++ b/net/packet/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-$(CONFIG_PACKET) += af_packet.o 5obj-$(CONFIG_PACKET) += af_packet.o
6obj-$(CONFIG_PACKET_DIAG) += af_packet_diag.o
7af_packet_diag-y += diag.o
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 048fba476aa5..94060edbbd70 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,6 +93,8 @@
93#include <net/inet_common.h> 93#include <net/inet_common.h>
94#endif 94#endif
95 95
96#include "internal.h"
97
96/* 98/*
97 Assumptions: 99 Assumptions:
98 - if device has no dev->hard_header routine, it adds and removes ll header 100 - if device has no dev->hard_header routine, it adds and removes ll header
@@ -146,14 +148,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
146 148
147/* Private packet socket structures. */ 149/* Private packet socket structures. */
148 150
149struct packet_mclist {
150 struct packet_mclist *next;
151 int ifindex;
152 int count;
153 unsigned short type;
154 unsigned short alen;
155 unsigned char addr[MAX_ADDR_LEN];
156};
157/* identical to struct packet_mreq except it has 151/* identical to struct packet_mreq except it has
158 * a longer address field. 152 * a longer address field.
159 */ 153 */
@@ -175,63 +169,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
175#define BLK_PLUS_PRIV(sz_of_priv) \ 169#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) 170 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177 171
178/* kbdq - kernel block descriptor queue */
179struct tpacket_kbdq_core {
180 struct pgv *pkbdq;
181 unsigned int feature_req_word;
182 unsigned int hdrlen;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
187
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
191 */
192 unsigned short last_kactive_blk_num;
193
194 char *pkblk_start;
195 char *pkblk_end;
196 int kblk_size;
197 unsigned int knum_blocks;
198 uint64_t knxt_seq_num;
199 char *prev;
200 char *nxt_offset;
201 struct sk_buff *skb;
202
203 atomic_t blk_fill_in_prog;
204
205 /* Default is set to 8ms */
206#define DEFAULT_PRB_RETIRE_TOV (8)
207
208 unsigned short retire_blk_tov;
209 unsigned short version;
210 unsigned long tov_in_jiffies;
211
212 /* timer to retire an outstanding block */
213 struct timer_list retire_blk_timer;
214};
215
216#define PGV_FROM_VMALLOC 1 172#define PGV_FROM_VMALLOC 1
217struct pgv {
218 char *buffer;
219};
220
221struct packet_ring_buffer {
222 struct pgv *pg_vec;
223 unsigned int head;
224 unsigned int frames_per_block;
225 unsigned int frame_size;
226 unsigned int frame_max;
227
228 unsigned int pg_vec_order;
229 unsigned int pg_vec_pages;
230 unsigned int pg_vec_len;
231
232 struct tpacket_kbdq_core prb_bdqc;
233 atomic_t pending;
234};
235 173
236#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) 174#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
237#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) 175#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
@@ -269,52 +207,6 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
269 struct tpacket3_hdr *); 207 struct tpacket3_hdr *);
270static void packet_flush_mclist(struct sock *sk); 208static void packet_flush_mclist(struct sock *sk);
271 209
272struct packet_fanout;
273struct packet_sock {
274 /* struct sock has to be the first member of packet_sock */
275 struct sock sk;
276 struct packet_fanout *fanout;
277 struct tpacket_stats stats;
278 union tpacket_stats_u stats_u;
279 struct packet_ring_buffer rx_ring;
280 struct packet_ring_buffer tx_ring;
281 int copy_thresh;
282 spinlock_t bind_lock;
283 struct mutex pg_vec_lock;
284 unsigned int running:1, /* prot_hook is attached*/
285 auxdata:1,
286 origdev:1,
287 has_vnet_hdr:1;
288 int ifindex; /* bound device */
289 __be16 num;
290 struct packet_mclist *mclist;
291 atomic_t mapped;
292 enum tpacket_versions tp_version;
293 unsigned int tp_hdrlen;
294 unsigned int tp_reserve;
295 unsigned int tp_loss:1;
296 unsigned int tp_tstamp;
297 struct packet_type prot_hook ____cacheline_aligned_in_smp;
298};
299
300#define PACKET_FANOUT_MAX 256
301
302struct packet_fanout {
303#ifdef CONFIG_NET_NS
304 struct net *net;
305#endif
306 unsigned int num_members;
307 u16 id;
308 u8 type;
309 u8 defrag;
310 atomic_t rr_cur;
311 struct list_head list;
312 struct sock *arr[PACKET_FANOUT_MAX];
313 spinlock_t lock;
314 atomic_t sk_ref;
315 struct packet_type prot_hook ____cacheline_aligned_in_smp;
316};
317
318struct packet_skb_cb { 210struct packet_skb_cb {
319 unsigned int origlen; 211 unsigned int origlen;
320 union { 212 union {
@@ -334,11 +226,6 @@ struct packet_skb_cb {
334 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ 226 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
335 ((x)->kactive_blk_num+1) : 0) 227 ((x)->kactive_blk_num+1) : 0)
336 228
337static struct packet_sock *pkt_sk(struct sock *sk)
338{
339 return (struct packet_sock *)sk;
340}
341
342static void __fanout_unlink(struct sock *sk, struct packet_sock *po); 229static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
343static void __fanout_link(struct sock *sk, struct packet_sock *po); 230static void __fanout_link(struct sock *sk, struct packet_sock *po);
344 231
@@ -968,7 +855,8 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
968 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb); 855 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
969 ppd->tp_status = TP_STATUS_VLAN_VALID; 856 ppd->tp_status = TP_STATUS_VLAN_VALID;
970 } else { 857 } else {
971 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0; 858 ppd->hv1.tp_vlan_tci = 0;
859 ppd->tp_status = TP_STATUS_AVAILABLE;
972 } 860 }
973} 861}
974 862
@@ -1243,7 +1131,8 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1243 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1131 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1244} 1132}
1245 1133
1246static DEFINE_MUTEX(fanout_mutex); 1134DEFINE_MUTEX(fanout_mutex);
1135EXPORT_SYMBOL_GPL(fanout_mutex);
1247static LIST_HEAD(fanout_list); 1136static LIST_HEAD(fanout_list);
1248 1137
1249static void __fanout_link(struct sock *sk, struct packet_sock *po) 1138static void __fanout_link(struct sock *sk, struct packet_sock *po)
@@ -1364,9 +1253,9 @@ static void fanout_release(struct sock *sk)
1364 if (!f) 1253 if (!f)
1365 return; 1254 return;
1366 1255
1256 mutex_lock(&fanout_mutex);
1367 po->fanout = NULL; 1257 po->fanout = NULL;
1368 1258
1369 mutex_lock(&fanout_mutex);
1370 if (atomic_dec_and_test(&f->sk_ref)) { 1259 if (atomic_dec_and_test(&f->sk_ref)) {
1371 list_del(&f->list); 1260 list_del(&f->list);
1372 dev_remove_pack(&f->prot_hook); 1261 dev_remove_pack(&f->prot_hook);
@@ -2063,7 +1952,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2063 int tp_len, size_max; 1952 int tp_len, size_max;
2064 unsigned char *addr; 1953 unsigned char *addr;
2065 int len_sum = 0; 1954 int len_sum = 0;
2066 int status = 0; 1955 int status = TP_STATUS_AVAILABLE;
2067 int hlen, tlen; 1956 int hlen, tlen;
2068 1957
2069 mutex_lock(&po->pg_vec_lock); 1958 mutex_lock(&po->pg_vec_lock);
@@ -2428,10 +2317,13 @@ static int packet_release(struct socket *sock)
2428 net = sock_net(sk); 2317 net = sock_net(sk);
2429 po = pkt_sk(sk); 2318 po = pkt_sk(sk);
2430 2319
2431 spin_lock_bh(&net->packet.sklist_lock); 2320 mutex_lock(&net->packet.sklist_lock);
2432 sk_del_node_init_rcu(sk); 2321 sk_del_node_init_rcu(sk);
2322 mutex_unlock(&net->packet.sklist_lock);
2323
2324 preempt_disable();
2433 sock_prot_inuse_add(net, sk->sk_prot, -1); 2325 sock_prot_inuse_add(net, sk->sk_prot, -1);
2434 spin_unlock_bh(&net->packet.sklist_lock); 2326 preempt_enable();
2435 2327
2436 spin_lock(&po->bind_lock); 2328 spin_lock(&po->bind_lock);
2437 unregister_prot_hook(sk, false); 2329 unregister_prot_hook(sk, false);
@@ -2630,10 +2522,13 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2630 register_prot_hook(sk); 2522 register_prot_hook(sk);
2631 } 2523 }
2632 2524
2633 spin_lock_bh(&net->packet.sklist_lock); 2525 mutex_lock(&net->packet.sklist_lock);
2634 sk_add_node_rcu(sk, &net->packet.sklist); 2526 sk_add_node_rcu(sk, &net->packet.sklist);
2527 mutex_unlock(&net->packet.sklist_lock);
2528
2529 preempt_disable();
2635 sock_prot_inuse_add(net, &packet_proto, 1); 2530 sock_prot_inuse_add(net, &packet_proto, 1);
2636 spin_unlock_bh(&net->packet.sklist_lock); 2531 preempt_enable();
2637 2532
2638 return 0; 2533 return 0;
2639out: 2534out:
@@ -3886,7 +3781,7 @@ static const struct file_operations packet_seq_fops = {
3886 3781
3887static int __net_init packet_net_init(struct net *net) 3782static int __net_init packet_net_init(struct net *net)
3888{ 3783{
3889 spin_lock_init(&net->packet.sklist_lock); 3784 mutex_init(&net->packet.sklist_lock);
3890 INIT_HLIST_HEAD(&net->packet.sklist); 3785 INIT_HLIST_HEAD(&net->packet.sklist);
3891 3786
3892 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 3787 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
diff --git a/net/packet/diag.c b/net/packet/diag.c
new file mode 100644
index 000000000000..8db6e21c46bd
--- /dev/null
+++ b/net/packet/diag.c
@@ -0,0 +1,242 @@
1#include <linux/module.h>
2#include <linux/sock_diag.h>
3#include <linux/net.h>
4#include <linux/netdevice.h>
5#include <linux/packet_diag.h>
6#include <net/net_namespace.h>
7#include <net/sock.h>
8
9#include "internal.h"
10
11static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
12{
13 struct packet_diag_info pinfo;
14
15 pinfo.pdi_index = po->ifindex;
16 pinfo.pdi_version = po->tp_version;
17 pinfo.pdi_reserve = po->tp_reserve;
18 pinfo.pdi_copy_thresh = po->copy_thresh;
19 pinfo.pdi_tstamp = po->tp_tstamp;
20
21 pinfo.pdi_flags = 0;
22 if (po->running)
23 pinfo.pdi_flags |= PDI_RUNNING;
24 if (po->auxdata)
25 pinfo.pdi_flags |= PDI_AUXDATA;
26 if (po->origdev)
27 pinfo.pdi_flags |= PDI_ORIGDEV;
28 if (po->has_vnet_hdr)
29 pinfo.pdi_flags |= PDI_VNETHDR;
30 if (po->tp_loss)
31 pinfo.pdi_flags |= PDI_LOSS;
32
33 return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
34}
35
36static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
37{
38 struct nlattr *mca;
39 struct packet_mclist *ml;
40
41 mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
42 if (!mca)
43 return -EMSGSIZE;
44
45 rtnl_lock();
46 for (ml = po->mclist; ml; ml = ml->next) {
47 struct packet_diag_mclist *dml;
48
49 dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
50 if (!dml) {
51 rtnl_unlock();
52 nla_nest_cancel(nlskb, mca);
53 return -EMSGSIZE;
54 }
55
56 dml->pdmc_index = ml->ifindex;
57 dml->pdmc_type = ml->type;
58 dml->pdmc_alen = ml->alen;
59 dml->pdmc_count = ml->count;
60 BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
61 memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
62 }
63
64 rtnl_unlock();
65 nla_nest_end(nlskb, mca);
66
67 return 0;
68}
69
70static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
71 struct sk_buff *nlskb)
72{
73 struct packet_diag_ring pdr;
74
75 if (!ring->pg_vec || ((ver > TPACKET_V2) &&
76 (nl_type == PACKET_DIAG_TX_RING)))
77 return 0;
78
79 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
80 pdr.pdr_block_nr = ring->pg_vec_len;
81 pdr.pdr_frame_size = ring->frame_size;
82 pdr.pdr_frame_nr = ring->frame_max + 1;
83
84 if (ver > TPACKET_V2) {
85 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
86 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
87 pdr.pdr_features = ring->prb_bdqc.feature_req_word;
88 } else {
89 pdr.pdr_retire_tmo = 0;
90 pdr.pdr_sizeof_priv = 0;
91 pdr.pdr_features = 0;
92 }
93
94 return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
95}
96
97static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
98{
99 int ret;
100
101 mutex_lock(&po->pg_vec_lock);
102 ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
103 PACKET_DIAG_RX_RING, skb);
104 if (!ret)
105 ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
106 PACKET_DIAG_TX_RING, skb);
107 mutex_unlock(&po->pg_vec_lock);
108
109 return ret;
110}
111
112static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
113{
114 int ret = 0;
115
116 mutex_lock(&fanout_mutex);
117 if (po->fanout) {
118 u32 val;
119
120 val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
121 ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
122 }
123 mutex_unlock(&fanout_mutex);
124
125 return ret;
126}
127
128static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct packet_diag_req *req,
129 u32 portid, u32 seq, u32 flags, int sk_ino)
130{
131 struct nlmsghdr *nlh;
132 struct packet_diag_msg *rp;
133 struct packet_sock *po = pkt_sk(sk);
134
135 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
136 if (!nlh)
137 return -EMSGSIZE;
138
139 rp = nlmsg_data(nlh);
140 rp->pdiag_family = AF_PACKET;
141 rp->pdiag_type = sk->sk_type;
142 rp->pdiag_num = ntohs(po->num);
143 rp->pdiag_ino = sk_ino;
144 sock_diag_save_cookie(sk, rp->pdiag_cookie);
145
146 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
147 pdiag_put_info(po, skb))
148 goto out_nlmsg_trim;
149
150 if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
151 pdiag_put_mclist(po, skb))
152 goto out_nlmsg_trim;
153
154 if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
155 pdiag_put_rings_cfg(po, skb))
156 goto out_nlmsg_trim;
157
158 if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
159 pdiag_put_fanout(po, skb))
160 goto out_nlmsg_trim;
161
162 return nlmsg_end(skb, nlh);
163
164out_nlmsg_trim:
165 nlmsg_cancel(skb, nlh);
166 return -EMSGSIZE;
167}
168
169static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
170{
171 int num = 0, s_num = cb->args[0];
172 struct packet_diag_req *req;
173 struct net *net;
174 struct sock *sk;
175 struct hlist_node *node;
176
177 net = sock_net(skb->sk);
178 req = nlmsg_data(cb->nlh);
179
180 mutex_lock(&net->packet.sklist_lock);
181 sk_for_each(sk, node, &net->packet.sklist) {
182 if (!net_eq(sock_net(sk), net))
183 continue;
184 if (num < s_num)
185 goto next;
186
187 if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).portid,
188 cb->nlh->nlmsg_seq, NLM_F_MULTI,
189 sock_i_ino(sk)) < 0)
190 goto done;
191next:
192 num++;
193 }
194done:
195 mutex_unlock(&net->packet.sklist_lock);
196 cb->args[0] = num;
197
198 return skb->len;
199}
200
201static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
202{
203 int hdrlen = sizeof(struct packet_diag_req);
204 struct net *net = sock_net(skb->sk);
205 struct packet_diag_req *req;
206
207 if (nlmsg_len(h) < hdrlen)
208 return -EINVAL;
209
210 req = nlmsg_data(h);
211 /* Make it possible to support protocol filtering later */
212 if (req->sdiag_protocol)
213 return -EINVAL;
214
215 if (h->nlmsg_flags & NLM_F_DUMP) {
216 struct netlink_dump_control c = {
217 .dump = packet_diag_dump,
218 };
219 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
220 } else
221 return -EOPNOTSUPP;
222}
223
224static const struct sock_diag_handler packet_diag_handler = {
225 .family = AF_PACKET,
226 .dump = packet_diag_handler_dump,
227};
228
229static int __init packet_diag_init(void)
230{
231 return sock_diag_register(&packet_diag_handler);
232}
233
234static void __exit packet_diag_exit(void)
235{
236 sock_diag_unregister(&packet_diag_handler);
237}
238
239module_init(packet_diag_init);
240module_exit(packet_diag_exit);
241MODULE_LICENSE("GPL");
242MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
diff --git a/net/packet/internal.h b/net/packet/internal.h
new file mode 100644
index 000000000000..44945f6b7252
--- /dev/null
+++ b/net/packet/internal.h
@@ -0,0 +1,121 @@
1#ifndef __PACKET_INTERNAL_H__
2#define __PACKET_INTERNAL_H__
3
4struct packet_mclist {
5 struct packet_mclist *next;
6 int ifindex;
7 int count;
8 unsigned short type;
9 unsigned short alen;
10 unsigned char addr[MAX_ADDR_LEN];
11};
12
13/* kbdq - kernel block descriptor queue */
14struct tpacket_kbdq_core {
15 struct pgv *pkbdq;
16 unsigned int feature_req_word;
17 unsigned int hdrlen;
18 unsigned char reset_pending_on_curr_blk;
19 unsigned char delete_blk_timer;
20 unsigned short kactive_blk_num;
21 unsigned short blk_sizeof_priv;
22
23 /* last_kactive_blk_num:
24 * trick to see if user-space has caught up
25 * in order to avoid refreshing timer when every single pkt arrives.
26 */
27 unsigned short last_kactive_blk_num;
28
29 char *pkblk_start;
30 char *pkblk_end;
31 int kblk_size;
32 unsigned int knum_blocks;
33 uint64_t knxt_seq_num;
34 char *prev;
35 char *nxt_offset;
36 struct sk_buff *skb;
37
38 atomic_t blk_fill_in_prog;
39
40 /* Default is set to 8ms */
41#define DEFAULT_PRB_RETIRE_TOV (8)
42
43 unsigned short retire_blk_tov;
44 unsigned short version;
45 unsigned long tov_in_jiffies;
46
47 /* timer to retire an outstanding block */
48 struct timer_list retire_blk_timer;
49};
50
51struct pgv {
52 char *buffer;
53};
54
55struct packet_ring_buffer {
56 struct pgv *pg_vec;
57 unsigned int head;
58 unsigned int frames_per_block;
59 unsigned int frame_size;
60 unsigned int frame_max;
61
62 unsigned int pg_vec_order;
63 unsigned int pg_vec_pages;
64 unsigned int pg_vec_len;
65
66 struct tpacket_kbdq_core prb_bdqc;
67 atomic_t pending;
68};
69
70extern struct mutex fanout_mutex;
71#define PACKET_FANOUT_MAX 256
72
73struct packet_fanout {
74#ifdef CONFIG_NET_NS
75 struct net *net;
76#endif
77 unsigned int num_members;
78 u16 id;
79 u8 type;
80 u8 defrag;
81 atomic_t rr_cur;
82 struct list_head list;
83 struct sock *arr[PACKET_FANOUT_MAX];
84 spinlock_t lock;
85 atomic_t sk_ref;
86 struct packet_type prot_hook ____cacheline_aligned_in_smp;
87};
88
89struct packet_sock {
90 /* struct sock has to be the first member of packet_sock */
91 struct sock sk;
92 struct packet_fanout *fanout;
93 struct tpacket_stats stats;
94 union tpacket_stats_u stats_u;
95 struct packet_ring_buffer rx_ring;
96 struct packet_ring_buffer tx_ring;
97 int copy_thresh;
98 spinlock_t bind_lock;
99 struct mutex pg_vec_lock;
100 unsigned int running:1, /* prot_hook is attached*/
101 auxdata:1,
102 origdev:1,
103 has_vnet_hdr:1;
104 int ifindex; /* bound device */
105 __be16 num;
106 struct packet_mclist *mclist;
107 atomic_t mapped;
108 enum tpacket_versions tp_version;
109 unsigned int tp_hdrlen;
110 unsigned int tp_reserve;
111 unsigned int tp_loss:1;
112 unsigned int tp_tstamp;
113 struct packet_type prot_hook ____cacheline_aligned_in_smp;
114};
115
116static struct packet_sock *pkt_sk(struct sock *sk)
117{
118 return (struct packet_sock *)sk;
119}
120
121#endif
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 7dd762a464e5..83a8389619aa 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -33,7 +33,7 @@
33/* Device address handling */ 33/* Device address handling */
34 34
35static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, 35static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
36 u32 pid, u32 seq, int event); 36 u32 portid, u32 seq, int event);
37 37
38void phonet_address_notify(int event, struct net_device *dev, u8 addr) 38void phonet_address_notify(int event, struct net_device *dev, u8 addr)
39{ 39{
@@ -101,12 +101,12 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
101} 101}
102 102
103static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, 103static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
104 u32 pid, u32 seq, int event) 104 u32 portid, u32 seq, int event)
105{ 105{
106 struct ifaddrmsg *ifm; 106 struct ifaddrmsg *ifm;
107 struct nlmsghdr *nlh; 107 struct nlmsghdr *nlh;
108 108
109 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0); 109 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0);
110 if (nlh == NULL) 110 if (nlh == NULL)
111 return -EMSGSIZE; 111 return -EMSGSIZE;
112 112
@@ -148,7 +148,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
148 continue; 148 continue;
149 149
150 if (fill_addr(skb, pnd->netdev, addr << 2, 150 if (fill_addr(skb, pnd->netdev, addr << 2,
151 NETLINK_CB(cb->skb).pid, 151 NETLINK_CB(cb->skb).portid,
152 cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) 152 cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
153 goto out; 153 goto out;
154 } 154 }
@@ -165,12 +165,12 @@ out:
165/* Routes handling */ 165/* Routes handling */
166 166
167static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, 167static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
168 u32 pid, u32 seq, int event) 168 u32 portid, u32 seq, int event)
169{ 169{
170 struct rtmsg *rtm; 170 struct rtmsg *rtm;
171 struct nlmsghdr *nlh; 171 struct nlmsghdr *nlh;
172 172
173 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0); 173 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0);
174 if (nlh == NULL) 174 if (nlh == NULL)
175 return -EMSGSIZE; 175 return -EMSGSIZE;
176 176
@@ -276,7 +276,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
276 276
277 if (addr_idx++ < addr_start_idx) 277 if (addr_idx++ < addr_start_idx)
278 continue; 278 continue;
279 if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid, 279 if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
280 cb->nlh->nlmsg_seq, RTM_NEWROUTE)) 280 cb->nlh->nlmsg_seq, RTM_NEWROUTE))
281 goto out; 281 goto out;
282 } 282 }
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index af95c8e058fc..a65ee78db0c5 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
43 struct rds_connection *conn; 43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc; 44 struct rds_tcp_connection *tc;
45 45
46 read_lock_bh(&sk->sk_callback_lock); 46 read_lock(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (!conn) { 48 if (!conn) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
68 break; 68 break;
69 } 69 }
70out: 70out:
71 read_unlock_bh(&sk->sk_callback_lock); 71 read_unlock(&sk->sk_callback_lock);
72 state_change(sk); 72 state_change(sk);
73} 73}
74 74
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 72981375f47c..7787537e9c2e 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
114 114
115 rdsdebug("listen data ready sk %p\n", sk); 115 rdsdebug("listen data ready sk %p\n", sk);
116 116
117 read_lock_bh(&sk->sk_callback_lock); 117 read_lock(&sk->sk_callback_lock);
118 ready = sk->sk_user_data; 118 ready = sk->sk_user_data;
119 if (!ready) { /* check for teardown race */ 119 if (!ready) { /* check for teardown race */
120 ready = sk->sk_data_ready; 120 ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
131 queue_work(rds_wq, &rds_tcp_listen_work); 131 queue_work(rds_wq, &rds_tcp_listen_work);
132 132
133out: 133out:
134 read_unlock_bh(&sk->sk_callback_lock); 134 read_unlock(&sk->sk_callback_lock);
135 ready(sk, bytes); 135 ready(sk, bytes);
136} 136}
137 137
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 6243258f840f..4fac4f2bb9dc 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
322 322
323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 323 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
324 324
325 read_lock_bh(&sk->sk_callback_lock); 325 read_lock(&sk->sk_callback_lock);
326 conn = sk->sk_user_data; 326 conn = sk->sk_user_data;
327 if (!conn) { /* check for teardown race */ 327 if (!conn) { /* check for teardown race */
328 ready = sk->sk_data_ready; 328 ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
336 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) 336 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
338out: 338out:
339 read_unlock_bh(&sk->sk_callback_lock); 339 read_unlock(&sk->sk_callback_lock);
340 ready(sk, bytes); 340 ready(sk, bytes);
341} 341}
342 342
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 1b4fd68f0c7c..81cf5a4c5e40 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
174 struct rds_connection *conn; 174 struct rds_connection *conn;
175 struct rds_tcp_connection *tc; 175 struct rds_tcp_connection *tc;
176 176
177 read_lock_bh(&sk->sk_callback_lock); 177 read_lock(&sk->sk_callback_lock);
178 conn = sk->sk_user_data; 178 conn = sk->sk_user_data;
179 if (!conn) { 179 if (!conn) {
180 write_space = sk->sk_write_space; 180 write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
194 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 194 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
195 195
196out: 196out:
197 read_unlock_bh(&sk->sk_callback_lock); 197 read_unlock(&sk->sk_callback_lock);
198 198
199 /* 199 /*
200 * write_space is only called when data leaves tcp's send queue if 200 * write_space is only called when data leaves tcp's send queue if
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 752b72360ebc..a5c952741279 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
150 rfkill_led_trigger_event(rfkill); 150 rfkill_led_trigger_event(rfkill);
151} 151}
152 152
153const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154{
155 return rfkill->led_trigger.name;
156}
157EXPORT_SYMBOL(rfkill_get_led_trigger_name);
158
159void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160{
161 BUG_ON(!rfkill);
162
163 rfkill->ledtrigname = name;
164}
165EXPORT_SYMBOL(rfkill_set_led_trigger_name);
166
153static int rfkill_led_trigger_register(struct rfkill *rfkill) 167static int rfkill_led_trigger_register(struct rfkill *rfkill)
154{ 168{
155 rfkill->led_trigger.name = rfkill->ledtrigname 169 rfkill->led_trigger.name = rfkill->ledtrigname
@@ -256,6 +270,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
256static void rfkill_set_block(struct rfkill *rfkill, bool blocked) 270static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
257{ 271{
258 unsigned long flags; 272 unsigned long flags;
273 bool prev, curr;
259 int err; 274 int err;
260 275
261 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) 276 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
@@ -270,6 +285,8 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
270 rfkill->ops->query(rfkill, rfkill->data); 285 rfkill->ops->query(rfkill, rfkill->data);
271 286
272 spin_lock_irqsave(&rfkill->lock, flags); 287 spin_lock_irqsave(&rfkill->lock, flags);
288 prev = rfkill->state & RFKILL_BLOCK_SW;
289
273 if (rfkill->state & RFKILL_BLOCK_SW) 290 if (rfkill->state & RFKILL_BLOCK_SW)
274 rfkill->state |= RFKILL_BLOCK_SW_PREV; 291 rfkill->state |= RFKILL_BLOCK_SW_PREV;
275 else 292 else
@@ -299,10 +316,13 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
299 } 316 }
300 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; 317 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
301 rfkill->state &= ~RFKILL_BLOCK_SW_PREV; 318 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
319 curr = rfkill->state & RFKILL_BLOCK_SW;
302 spin_unlock_irqrestore(&rfkill->lock, flags); 320 spin_unlock_irqrestore(&rfkill->lock, flags);
303 321
304 rfkill_led_trigger_event(rfkill); 322 rfkill_led_trigger_event(rfkill);
305 rfkill_event(rfkill); 323
324 if (prev != curr)
325 rfkill_event(rfkill);
306} 326}
307 327
308#ifdef CONFIG_RFKILL_INPUT 328#ifdef CONFIG_RFKILL_INPUT
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e3d2c78cb52c..102761d294cb 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -644,7 +644,7 @@ errout:
644} 644}
645 645
646static int 646static int
647tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, 647tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
648 u16 flags, int event, int bind, int ref) 648 u16 flags, int event, int bind, int ref)
649{ 649{
650 struct tcamsg *t; 650 struct tcamsg *t;
@@ -652,7 +652,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
652 unsigned char *b = skb_tail_pointer(skb); 652 unsigned char *b = skb_tail_pointer(skb);
653 struct nlattr *nest; 653 struct nlattr *nest;
654 654
655 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags); 655 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
656 if (!nlh) 656 if (!nlh)
657 goto out_nlmsg_trim; 657 goto out_nlmsg_trim;
658 t = nlmsg_data(nlh); 658 t = nlmsg_data(nlh);
@@ -678,7 +678,7 @@ out_nlmsg_trim:
678} 678}
679 679
680static int 680static int
681act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, 681act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
682 struct tc_action *a, int event) 682 struct tc_action *a, int event)
683{ 683{
684 struct sk_buff *skb; 684 struct sk_buff *skb;
@@ -686,16 +686,16 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
686 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 686 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
687 if (!skb) 687 if (!skb)
688 return -ENOBUFS; 688 return -ENOBUFS;
689 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 689 if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
690 kfree_skb(skb); 690 kfree_skb(skb);
691 return -EINVAL; 691 return -EINVAL;
692 } 692 }
693 693
694 return rtnl_unicast(skb, net, pid); 694 return rtnl_unicast(skb, net, portid);
695} 695}
696 696
697static struct tc_action * 697static struct tc_action *
698tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 698tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
699{ 699{
700 struct nlattr *tb[TCA_ACT_MAX + 1]; 700 struct nlattr *tb[TCA_ACT_MAX + 1];
701 struct tc_action *a; 701 struct tc_action *a;
@@ -762,7 +762,7 @@ static struct tc_action *create_a(int i)
762} 762}
763 763
764static int tca_action_flush(struct net *net, struct nlattr *nla, 764static int tca_action_flush(struct net *net, struct nlattr *nla,
765 struct nlmsghdr *n, u32 pid) 765 struct nlmsghdr *n, u32 portid)
766{ 766{
767 struct sk_buff *skb; 767 struct sk_buff *skb;
768 unsigned char *b; 768 unsigned char *b;
@@ -799,7 +799,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
799 if (a->ops == NULL) 799 if (a->ops == NULL)
800 goto err_out; 800 goto err_out;
801 801
802 nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); 802 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
803 if (!nlh) 803 if (!nlh)
804 goto out_module_put; 804 goto out_module_put;
805 t = nlmsg_data(nlh); 805 t = nlmsg_data(nlh);
@@ -823,7 +823,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
823 nlh->nlmsg_flags |= NLM_F_ROOT; 823 nlh->nlmsg_flags |= NLM_F_ROOT;
824 module_put(a->ops->owner); 824 module_put(a->ops->owner);
825 kfree(a); 825 kfree(a);
826 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, 826 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
827 n->nlmsg_flags & NLM_F_ECHO); 827 n->nlmsg_flags & NLM_F_ECHO);
828 if (err > 0) 828 if (err > 0)
829 return 0; 829 return 0;
@@ -841,7 +841,7 @@ noflush_out:
841 841
842static int 842static int
843tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 843tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
844 u32 pid, int event) 844 u32 portid, int event)
845{ 845{
846 int i, ret; 846 int i, ret;
847 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 847 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -853,13 +853,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
853 853
854 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 854 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
855 if (tb[1] != NULL) 855 if (tb[1] != NULL)
856 return tca_action_flush(net, tb[1], n, pid); 856 return tca_action_flush(net, tb[1], n, portid);
857 else 857 else
858 return -EINVAL; 858 return -EINVAL;
859 } 859 }
860 860
861 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 861 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
862 act = tcf_action_get_1(tb[i], n, pid); 862 act = tcf_action_get_1(tb[i], n, portid);
863 if (IS_ERR(act)) { 863 if (IS_ERR(act)) {
864 ret = PTR_ERR(act); 864 ret = PTR_ERR(act);
865 goto err; 865 goto err;
@@ -874,7 +874,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
874 } 874 }
875 875
876 if (event == RTM_GETACTION) 876 if (event == RTM_GETACTION)
877 ret = act_get_notify(net, pid, n, head, event); 877 ret = act_get_notify(net, portid, n, head, event);
878 else { /* delete */ 878 else { /* delete */
879 struct sk_buff *skb; 879 struct sk_buff *skb;
880 880
@@ -884,7 +884,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
884 goto err; 884 goto err;
885 } 885 }
886 886
887 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 887 if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event,
888 0, 1) <= 0) { 888 0, 1) <= 0) {
889 kfree_skb(skb); 889 kfree_skb(skb);
890 ret = -EINVAL; 890 ret = -EINVAL;
@@ -893,7 +893,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
893 893
894 /* now do the delete */ 894 /* now do the delete */
895 tcf_action_destroy(head, 0); 895 tcf_action_destroy(head, 0);
896 ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, 896 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
897 n->nlmsg_flags & NLM_F_ECHO); 897 n->nlmsg_flags & NLM_F_ECHO);
898 if (ret > 0) 898 if (ret > 0)
899 return 0; 899 return 0;
@@ -905,7 +905,7 @@ err:
905} 905}
906 906
907static int tcf_add_notify(struct net *net, struct tc_action *a, 907static int tcf_add_notify(struct net *net, struct tc_action *a,
908 u32 pid, u32 seq, int event, u16 flags) 908 u32 portid, u32 seq, int event, u16 flags)
909{ 909{
910 struct tcamsg *t; 910 struct tcamsg *t;
911 struct nlmsghdr *nlh; 911 struct nlmsghdr *nlh;
@@ -920,7 +920,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
920 920
921 b = skb_tail_pointer(skb); 921 b = skb_tail_pointer(skb);
922 922
923 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags); 923 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
924 if (!nlh) 924 if (!nlh)
925 goto out_kfree_skb; 925 goto out_kfree_skb;
926 t = nlmsg_data(nlh); 926 t = nlmsg_data(nlh);
@@ -940,7 +940,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
940 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 940 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
941 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 941 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
942 942
943 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); 943 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
944 if (err > 0) 944 if (err > 0)
945 err = 0; 945 err = 0;
946 return err; 946 return err;
@@ -953,7 +953,7 @@ out_kfree_skb:
953 953
954static int 954static int
955tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 955tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
956 u32 pid, int ovr) 956 u32 portid, int ovr)
957{ 957{
958 int ret = 0; 958 int ret = 0;
959 struct tc_action *act; 959 struct tc_action *act;
@@ -971,7 +971,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
971 /* dump then free all the actions after update; inserted policy 971 /* dump then free all the actions after update; inserted policy
972 * stays intact 972 * stays intact
973 */ 973 */
974 ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 974 ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
975 for (a = act; a; a = act) { 975 for (a = act; a; a = act) {
976 act = a->next; 976 act = a->next;
977 kfree(a); 977 kfree(a);
@@ -984,7 +984,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
984{ 984{
985 struct net *net = sock_net(skb->sk); 985 struct net *net = sock_net(skb->sk);
986 struct nlattr *tca[TCA_ACT_MAX + 1]; 986 struct nlattr *tca[TCA_ACT_MAX + 1];
987 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 987 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
988 int ret = 0, ovr = 0; 988 int ret = 0, ovr = 0;
989 989
990 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 990 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
@@ -1008,17 +1008,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1008 if (n->nlmsg_flags & NLM_F_REPLACE) 1008 if (n->nlmsg_flags & NLM_F_REPLACE)
1009 ovr = 1; 1009 ovr = 1;
1010replay: 1010replay:
1011 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); 1011 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
1012 if (ret == -EAGAIN) 1012 if (ret == -EAGAIN)
1013 goto replay; 1013 goto replay;
1014 break; 1014 break;
1015 case RTM_DELACTION: 1015 case RTM_DELACTION:
1016 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1016 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1017 pid, RTM_DELACTION); 1017 portid, RTM_DELACTION);
1018 break; 1018 break;
1019 case RTM_GETACTION: 1019 case RTM_GETACTION:
1020 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1020 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1021 pid, RTM_GETACTION); 1021 portid, RTM_GETACTION);
1022 break; 1022 break;
1023 default: 1023 default:
1024 BUG(); 1024 BUG();
@@ -1085,7 +1085,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1085 goto out_module_put; 1085 goto out_module_put;
1086 } 1086 }
1087 1087
1088 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1088 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1089 cb->nlh->nlmsg_type, sizeof(*t), 0); 1089 cb->nlh->nlmsg_type, sizeof(*t), 0);
1090 if (!nlh) 1090 if (!nlh)
1091 goto out_module_put; 1091 goto out_module_put;
@@ -1109,7 +1109,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1109 nla_nest_cancel(skb, nest); 1109 nla_nest_cancel(skb, nest);
1110 1110
1111 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1111 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1112 if (NETLINK_CB(cb->skb).pid && ret) 1112 if (NETLINK_CB(cb->skb).portid && ret)
1113 nlh->nlmsg_flags |= NLM_F_MULTI; 1113 nlh->nlmsg_flags |= NLM_F_MULTI;
1114 module_put(a_o->owner); 1114 module_put(a_o->owner);
1115 return skb->len; 1115 return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index dc3ef5aef355..7ae02892437c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -343,13 +343,13 @@ errout:
343} 343}
344 344
345static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, 345static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
346 unsigned long fh, u32 pid, u32 seq, u16 flags, int event) 346 unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
347{ 347{
348 struct tcmsg *tcm; 348 struct tcmsg *tcm;
349 struct nlmsghdr *nlh; 349 struct nlmsghdr *nlh;
350 unsigned char *b = skb_tail_pointer(skb); 350 unsigned char *b = skb_tail_pointer(skb);
351 351
352 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags); 352 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
353 if (!nlh) 353 if (!nlh)
354 goto out_nlmsg_trim; 354 goto out_nlmsg_trim;
355 tcm = nlmsg_data(nlh); 355 tcm = nlmsg_data(nlh);
@@ -381,18 +381,18 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
381 unsigned long fh, int event) 381 unsigned long fh, int event)
382{ 382{
383 struct sk_buff *skb; 383 struct sk_buff *skb;
384 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 384 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
385 385
386 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 386 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
387 if (!skb) 387 if (!skb)
388 return -ENOBUFS; 388 return -ENOBUFS;
389 389
390 if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) { 390 if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
391 kfree_skb(skb); 391 kfree_skb(skb);
392 return -EINVAL; 392 return -EINVAL;
393 } 393 }
394 394
395 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, 395 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
396 n->nlmsg_flags & NLM_F_ECHO); 396 n->nlmsg_flags & NLM_F_ECHO);
397} 397}
398 398
@@ -407,7 +407,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
407{ 407{
408 struct tcf_dump_args *a = (void *)arg; 408 struct tcf_dump_args *a = (void *)arg;
409 409
410 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid, 410 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
411 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); 411 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
412} 412}
413 413
@@ -465,7 +465,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
465 if (t > s_t) 465 if (t > s_t)
466 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 466 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
467 if (cb->args[1] == 0) { 467 if (cb->args[1] == 0) {
468 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid, 468 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid,
469 cb->nlh->nlmsg_seq, NLM_F_MULTI, 469 cb->nlh->nlmsg_seq, NLM_F_MULTI,
470 RTM_NEWTFILTER) <= 0) 470 RTM_NEWTFILTER) <= 0)
471 break; 471 break;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 4ab6e3325573..7c3de6ffa516 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -461,7 +461,7 @@ META_COLLECTOR(int_sk_sndtimeo)
461META_COLLECTOR(int_sk_sendmsg_off) 461META_COLLECTOR(int_sk_sendmsg_off)
462{ 462{
463 SKIP_NONLOCAL(skb); 463 SKIP_NONLOCAL(skb);
464 dst->value = skb->sk->sk_sndmsg_off; 464 dst->value = skb->sk->sk_frag.offset;
465} 465}
466 466
467META_COLLECTOR(int_sk_write_pend) 467META_COLLECTOR(int_sk_write_pend)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a08b4ab3e421..a18d975db59c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1185,7 +1185,7 @@ graft:
1185} 1185}
1186 1186
1187static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, 1187static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1188 u32 pid, u32 seq, u16 flags, int event) 1188 u32 portid, u32 seq, u16 flags, int event)
1189{ 1189{
1190 struct tcmsg *tcm; 1190 struct tcmsg *tcm;
1191 struct nlmsghdr *nlh; 1191 struct nlmsghdr *nlh;
@@ -1193,7 +1193,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1193 struct gnet_dump d; 1193 struct gnet_dump d;
1194 struct qdisc_size_table *stab; 1194 struct qdisc_size_table *stab;
1195 1195
1196 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags); 1196 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1197 if (!nlh) 1197 if (!nlh)
1198 goto out_nlmsg_trim; 1198 goto out_nlmsg_trim;
1199 tcm = nlmsg_data(nlh); 1199 tcm = nlmsg_data(nlh);
@@ -1248,25 +1248,25 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1248 struct Qdisc *old, struct Qdisc *new) 1248 struct Qdisc *old, struct Qdisc *new)
1249{ 1249{
1250 struct sk_buff *skb; 1250 struct sk_buff *skb;
1251 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1251 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1252 1252
1253 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1253 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1254 if (!skb) 1254 if (!skb)
1255 return -ENOBUFS; 1255 return -ENOBUFS;
1256 1256
1257 if (old && !tc_qdisc_dump_ignore(old)) { 1257 if (old && !tc_qdisc_dump_ignore(old)) {
1258 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 1258 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1259 0, RTM_DELQDISC) < 0) 1259 0, RTM_DELQDISC) < 0)
1260 goto err_out; 1260 goto err_out;
1261 } 1261 }
1262 if (new && !tc_qdisc_dump_ignore(new)) { 1262 if (new && !tc_qdisc_dump_ignore(new)) {
1263 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, 1263 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1264 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1264 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1265 goto err_out; 1265 goto err_out;
1266 } 1266 }
1267 1267
1268 if (skb->len) 1268 if (skb->len)
1269 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, 1269 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1270 n->nlmsg_flags & NLM_F_ECHO); 1270 n->nlmsg_flags & NLM_F_ECHO);
1271 1271
1272err_out: 1272err_out:
@@ -1289,7 +1289,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1289 q_idx++; 1289 q_idx++;
1290 } else { 1290 } else {
1291 if (!tc_qdisc_dump_ignore(q) && 1291 if (!tc_qdisc_dump_ignore(q) &&
1292 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 1292 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1293 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) 1293 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1294 goto done; 1294 goto done;
1295 q_idx++; 1295 q_idx++;
@@ -1300,7 +1300,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1300 continue; 1300 continue;
1301 } 1301 }
1302 if (!tc_qdisc_dump_ignore(q) && 1302 if (!tc_qdisc_dump_ignore(q) &&
1303 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 1303 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1304 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) 1304 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1305 goto done; 1305 goto done;
1306 q_idx++; 1306 q_idx++;
@@ -1375,7 +1375,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1375 const struct Qdisc_class_ops *cops; 1375 const struct Qdisc_class_ops *cops;
1376 unsigned long cl = 0; 1376 unsigned long cl = 0;
1377 unsigned long new_cl; 1377 unsigned long new_cl;
1378 u32 pid = tcm->tcm_parent; 1378 u32 portid = tcm->tcm_parent;
1379 u32 clid = tcm->tcm_handle; 1379 u32 clid = tcm->tcm_handle;
1380 u32 qid = TC_H_MAJ(clid); 1380 u32 qid = TC_H_MAJ(clid);
1381 int err; 1381 int err;
@@ -1403,8 +1403,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1403 1403
1404 /* Step 1. Determine qdisc handle X:0 */ 1404 /* Step 1. Determine qdisc handle X:0 */
1405 1405
1406 if (pid != TC_H_ROOT) { 1406 if (portid != TC_H_ROOT) {
1407 u32 qid1 = TC_H_MAJ(pid); 1407 u32 qid1 = TC_H_MAJ(portid);
1408 1408
1409 if (qid && qid1) { 1409 if (qid && qid1) {
1410 /* If both majors are known, they must be identical. */ 1410 /* If both majors are known, they must be identical. */
@@ -1418,10 +1418,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1418 /* Now qid is genuine qdisc handle consistent 1418 /* Now qid is genuine qdisc handle consistent
1419 * both with parent and child. 1419 * both with parent and child.
1420 * 1420 *
1421 * TC_H_MAJ(pid) still may be unspecified, complete it now. 1421 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1422 */ 1422 */
1423 if (pid) 1423 if (portid)
1424 pid = TC_H_MAKE(qid, pid); 1424 portid = TC_H_MAKE(qid, portid);
1425 } else { 1425 } else {
1426 if (qid == 0) 1426 if (qid == 0)
1427 qid = dev->qdisc->handle; 1427 qid = dev->qdisc->handle;
@@ -1439,7 +1439,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1439 1439
1440 /* Now try to get class */ 1440 /* Now try to get class */
1441 if (clid == 0) { 1441 if (clid == 0) {
1442 if (pid == TC_H_ROOT) 1442 if (portid == TC_H_ROOT)
1443 clid = qid; 1443 clid = qid;
1444 } else 1444 } else
1445 clid = TC_H_MAKE(qid, clid); 1445 clid = TC_H_MAKE(qid, clid);
@@ -1478,7 +1478,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1478 new_cl = cl; 1478 new_cl = cl;
1479 err = -EOPNOTSUPP; 1479 err = -EOPNOTSUPP;
1480 if (cops->change) 1480 if (cops->change)
1481 err = cops->change(q, clid, pid, tca, &new_cl); 1481 err = cops->change(q, clid, portid, tca, &new_cl);
1482 if (err == 0) 1482 if (err == 0)
1483 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); 1483 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1484 1484
@@ -1492,7 +1492,7 @@ out:
1492 1492
1493static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, 1493static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1494 unsigned long cl, 1494 unsigned long cl,
1495 u32 pid, u32 seq, u16 flags, int event) 1495 u32 portid, u32 seq, u16 flags, int event)
1496{ 1496{
1497 struct tcmsg *tcm; 1497 struct tcmsg *tcm;
1498 struct nlmsghdr *nlh; 1498 struct nlmsghdr *nlh;
@@ -1500,7 +1500,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1500 struct gnet_dump d; 1500 struct gnet_dump d;
1501 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1501 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1502 1502
1503 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags); 1503 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1504 if (!nlh) 1504 if (!nlh)
1505 goto out_nlmsg_trim; 1505 goto out_nlmsg_trim;
1506 tcm = nlmsg_data(nlh); 1506 tcm = nlmsg_data(nlh);
@@ -1540,18 +1540,18 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
1540 unsigned long cl, int event) 1540 unsigned long cl, int event)
1541{ 1541{
1542 struct sk_buff *skb; 1542 struct sk_buff *skb;
1543 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1543 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1544 1544
1545 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1545 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1546 if (!skb) 1546 if (!skb)
1547 return -ENOBUFS; 1547 return -ENOBUFS;
1548 1548
1549 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) { 1549 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1550 kfree_skb(skb); 1550 kfree_skb(skb);
1551 return -EINVAL; 1551 return -EINVAL;
1552 } 1552 }
1553 1553
1554 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, 1554 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1555 n->nlmsg_flags & NLM_F_ECHO); 1555 n->nlmsg_flags & NLM_F_ECHO);
1556} 1556}
1557 1557
@@ -1565,7 +1565,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
1565{ 1565{
1566 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg; 1566 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1567 1567
1568 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid, 1568 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1569 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); 1569 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1570} 1570}
1571 1571
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 9ce0b4fe23ff..71e50c80315f 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -352,7 +352,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
352{ 352{
353 struct drr_sched *q = qdisc_priv(sch); 353 struct drr_sched *q = qdisc_priv(sch);
354 struct drr_class *cl; 354 struct drr_class *cl;
355 int err; 355 int err = 0;
356 356
357 cl = drr_classify(skb, sch, &err); 357 cl = drr_classify(skb, sch, &err);
358 if (cl == NULL) { 358 if (cl == NULL) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 511323e89cec..aefc1504dc88 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -324,24 +324,6 @@ void netif_carrier_off(struct net_device *dev)
324} 324}
325EXPORT_SYMBOL(netif_carrier_off); 325EXPORT_SYMBOL(netif_carrier_off);
326 326
327/**
328 * netif_notify_peers - notify network peers about existence of @dev
329 * @dev: network device
330 *
331 * Generate traffic such that interested network peers are aware of
332 * @dev, such as by generating a gratuitous ARP. This may be used when
333 * a device wants to inform the rest of the network about some sort of
334 * reconfiguration such as a failover event or virtual machine
335 * migration.
336 */
337void netif_notify_peers(struct net_device *dev)
338{
339 rtnl_lock();
340 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
341 rtnl_unlock();
342}
343EXPORT_SYMBOL(netif_notify_peers);
344
345/* "NOOP" scheduler: the best scheduler, recommended for all interfaces 327/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
346 under all circumstances. It is difficult to invent anything faster or 328 under all circumstances. It is difficult to invent anything faster or
347 cheaper. 329 cheaper.
@@ -545,6 +527,8 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
545}; 527};
546EXPORT_SYMBOL(pfifo_fast_ops); 528EXPORT_SYMBOL(pfifo_fast_ops);
547 529
530static struct lock_class_key qdisc_tx_busylock;
531
548struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 532struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
549 struct Qdisc_ops *ops) 533 struct Qdisc_ops *ops)
550{ 534{
@@ -552,6 +536,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
552 struct Qdisc *sch; 536 struct Qdisc *sch;
553 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; 537 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
554 int err = -ENOBUFS; 538 int err = -ENOBUFS;
539 struct net_device *dev = dev_queue->dev;
555 540
556 p = kzalloc_node(size, GFP_KERNEL, 541 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue)); 542 netdev_queue_numa_node_read(dev_queue));
@@ -571,12 +556,16 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
571 } 556 }
572 INIT_LIST_HEAD(&sch->list); 557 INIT_LIST_HEAD(&sch->list);
573 skb_queue_head_init(&sch->q); 558 skb_queue_head_init(&sch->q);
559
574 spin_lock_init(&sch->busylock); 560 spin_lock_init(&sch->busylock);
561 lockdep_set_class(&sch->busylock,
562 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
563
575 sch->ops = ops; 564 sch->ops = ops;
576 sch->enqueue = ops->enqueue; 565 sch->enqueue = ops->enqueue;
577 sch->dequeue = ops->dequeue; 566 sch->dequeue = ops->dequeue;
578 sch->dev_queue = dev_queue; 567 sch->dev_queue = dev_queue;
579 dev_hold(qdisc_dev(sch)); 568 dev_hold(dev);
580 atomic_set(&sch->refcnt, 1); 569 atomic_set(&sch->refcnt, 1);
581 570
582 return sch; 571 return sch;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 211a21217045..f0dd83cff906 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -881,7 +881,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
881{ 881{
882 struct qfq_sched *q = qdisc_priv(sch); 882 struct qfq_sched *q = qdisc_priv(sch);
883 struct qfq_class *cl; 883 struct qfq_class *cl;
884 int err; 884 int err = 0;
885 885
886 cl = qfq_classify(skb, sch, &err); 886 cl = qfq_classify(skb, sch, &err);
887 if (cl == NULL) { 887 if (cl == NULL) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ebaef3ed6065..b1ef3bc301a5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -82,6 +82,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
82 sctp_scope_t scope, 82 sctp_scope_t scope,
83 gfp_t gfp) 83 gfp_t gfp)
84{ 84{
85 struct net *net = sock_net(sk);
85 struct sctp_sock *sp; 86 struct sctp_sock *sp;
86 int i; 87 int i;
87 sctp_paramhdr_t *p; 88 sctp_paramhdr_t *p;
@@ -124,7 +125,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
124 * socket values. 125 * socket values.
125 */ 126 */
126 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 127 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
127 asoc->pf_retrans = sctp_pf_retrans; 128 asoc->pf_retrans = net->sctp.pf_retrans;
128 129
129 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 130 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
130 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 131 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -175,7 +176,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
175 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 176 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
176 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 177 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
177 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 178 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
178 min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; 179 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
179 180
180 /* Initializes the timers */ 181 /* Initializes the timers */
181 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 182 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -281,7 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
281 * and will revert old behavior. 282 * and will revert old behavior.
282 */ 283 */
283 asoc->peer.asconf_capable = 0; 284 asoc->peer.asconf_capable = 0;
284 if (sctp_addip_noauth) 285 if (net->sctp.addip_noauth)
285 asoc->peer.asconf_capable = 1; 286 asoc->peer.asconf_capable = 1;
286 asoc->asconf_addr_del_pending = NULL; 287 asoc->asconf_addr_del_pending = NULL;
287 asoc->src_out_of_asoc_ok = 0; 288 asoc->src_out_of_asoc_ok = 0;
@@ -641,6 +642,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
641 const gfp_t gfp, 642 const gfp_t gfp,
642 const int peer_state) 643 const int peer_state)
643{ 644{
645 struct net *net = sock_net(asoc->base.sk);
644 struct sctp_transport *peer; 646 struct sctp_transport *peer;
645 struct sctp_sock *sp; 647 struct sctp_sock *sp;
646 unsigned short port; 648 unsigned short port;
@@ -674,7 +676,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
674 return peer; 676 return peer;
675 } 677 }
676 678
677 peer = sctp_transport_new(addr, gfp); 679 peer = sctp_transport_new(net, addr, gfp);
678 if (!peer) 680 if (!peer)
679 return NULL; 681 return NULL;
680 682
@@ -1089,13 +1091,15 @@ out:
1089 1091
1090/* Is this the association we are looking for? */ 1092/* Is this the association we are looking for? */
1091struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, 1093struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1094 struct net *net,
1092 const union sctp_addr *laddr, 1095 const union sctp_addr *laddr,
1093 const union sctp_addr *paddr) 1096 const union sctp_addr *paddr)
1094{ 1097{
1095 struct sctp_transport *transport; 1098 struct sctp_transport *transport;
1096 1099
1097 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && 1100 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1098 (htons(asoc->peer.port) == paddr->v4.sin_port)) { 1101 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1102 net_eq(sock_net(asoc->base.sk), net)) {
1099 transport = sctp_assoc_lookup_paddr(asoc, paddr); 1103 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1100 if (!transport) 1104 if (!transport)
1101 goto out; 1105 goto out;
@@ -1116,6 +1120,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1116 struct sctp_association *asoc = 1120 struct sctp_association *asoc =
1117 container_of(work, struct sctp_association, 1121 container_of(work, struct sctp_association,
1118 base.inqueue.immediate); 1122 base.inqueue.immediate);
1123 struct net *net = sock_net(asoc->base.sk);
1119 struct sctp_endpoint *ep; 1124 struct sctp_endpoint *ep;
1120 struct sctp_chunk *chunk; 1125 struct sctp_chunk *chunk;
1121 struct sctp_inq *inqueue; 1126 struct sctp_inq *inqueue;
@@ -1148,13 +1153,13 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1148 if (sctp_chunk_is_data(chunk)) 1153 if (sctp_chunk_is_data(chunk))
1149 asoc->peer.last_data_from = chunk->transport; 1154 asoc->peer.last_data_from = chunk->transport;
1150 else 1155 else
1151 SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); 1156 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1152 1157
1153 if (chunk->transport) 1158 if (chunk->transport)
1154 chunk->transport->last_time_heard = jiffies; 1159 chunk->transport->last_time_heard = jiffies;
1155 1160
1156 /* Run through the state machine. */ 1161 /* Run through the state machine. */
1157 error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, 1162 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1158 state, ep, asoc, chunk, GFP_ATOMIC); 1163 state, ep, asoc, chunk, GFP_ATOMIC);
1159 1164
1160 /* Check to see if the association is freed in response to 1165 /* Check to see if the association is freed in response to
@@ -1414,6 +1419,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1414/* Should we send a SACK to update our peer? */ 1419/* Should we send a SACK to update our peer? */
1415static inline int sctp_peer_needs_update(struct sctp_association *asoc) 1420static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1416{ 1421{
1422 struct net *net = sock_net(asoc->base.sk);
1417 switch (asoc->state) { 1423 switch (asoc->state) {
1418 case SCTP_STATE_ESTABLISHED: 1424 case SCTP_STATE_ESTABLISHED:
1419 case SCTP_STATE_SHUTDOWN_PENDING: 1425 case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1421,7 +1427,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1421 case SCTP_STATE_SHUTDOWN_SENT: 1427 case SCTP_STATE_SHUTDOWN_SENT:
1422 if ((asoc->rwnd > asoc->a_rwnd) && 1428 if ((asoc->rwnd > asoc->a_rwnd) &&
1423 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, 1429 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1424 (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift), 1430 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1425 asoc->pathmtu))) 1431 asoc->pathmtu)))
1426 return 1; 1432 return 1;
1427 break; 1433 break;
@@ -1542,7 +1548,8 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1542 if (asoc->peer.ipv6_address) 1548 if (asoc->peer.ipv6_address)
1543 flags |= SCTP_ADDR6_PEERSUPP; 1549 flags |= SCTP_ADDR6_PEERSUPP;
1544 1550
1545 return sctp_bind_addr_copy(&asoc->base.bind_addr, 1551 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1552 &asoc->base.bind_addr,
1546 &asoc->ep->base.bind_addr, 1553 &asoc->ep->base.bind_addr,
1547 scope, gfp, flags); 1554 scope, gfp, flags);
1548} 1555}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index bf812048cf6f..159b9bc5d633 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -392,13 +392,14 @@ nomem:
392 */ 392 */
393int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) 393int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
394{ 394{
395 struct net *net = sock_net(asoc->base.sk);
395 struct sctp_auth_bytes *secret; 396 struct sctp_auth_bytes *secret;
396 struct sctp_shared_key *ep_key; 397 struct sctp_shared_key *ep_key;
397 398
398 /* If we don't support AUTH, or peer is not capable 399 /* If we don't support AUTH, or peer is not capable
399 * we don't need to do anything. 400 * we don't need to do anything.
400 */ 401 */
401 if (!sctp_auth_enable || !asoc->peer.auth_capable) 402 if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
402 return 0; 403 return 0;
403 404
404 /* If the key_id is non-zero and we couldn't find an 405 /* If the key_id is non-zero and we couldn't find an
@@ -445,11 +446,12 @@ struct sctp_shared_key *sctp_auth_get_shkey(
445 */ 446 */
446int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) 447int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
447{ 448{
449 struct net *net = sock_net(ep->base.sk);
448 struct crypto_hash *tfm = NULL; 450 struct crypto_hash *tfm = NULL;
449 __u16 id; 451 __u16 id;
450 452
451 /* if the transforms are already allocted, we are done */ 453 /* if the transforms are already allocted, we are done */
452 if (!sctp_auth_enable) { 454 if (!net->sctp.auth_enable) {
453 ep->auth_hmacs = NULL; 455 ep->auth_hmacs = NULL;
454 return 0; 456 return 0;
455 } 457 }
@@ -674,7 +676,12 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
674/* Check if peer requested that this chunk is authenticated */ 676/* Check if peer requested that this chunk is authenticated */
675int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) 677int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
676{ 678{
677 if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable) 679 struct net *net;
680 if (!asoc)
681 return 0;
682
683 net = sock_net(asoc->base.sk);
684 if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
678 return 0; 685 return 0;
679 686
680 return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); 687 return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
@@ -683,7 +690,12 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
683/* Check if we requested that peer authenticate this chunk. */ 690/* Check if we requested that peer authenticate this chunk. */
684int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) 691int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
685{ 692{
686 if (!sctp_auth_enable || !asoc) 693 struct net *net;
694 if (!asoc)
695 return 0;
696
697 net = sock_net(asoc->base.sk);
698 if (!net->sctp.auth_enable)
687 return 0; 699 return 0;
688 700
689 return __sctp_auth_cid(chunk, 701 return __sctp_auth_cid(chunk,
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 4ece451c8d27..d886b3bf84f5 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -52,8 +52,8 @@
52#include <net/sctp/sm.h> 52#include <net/sctp/sm.h>
53 53
54/* Forward declarations for internal helpers. */ 54/* Forward declarations for internal helpers. */
55static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, 55static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *,
56 sctp_scope_t scope, gfp_t gfp, 56 union sctp_addr *, sctp_scope_t scope, gfp_t gfp,
57 int flags); 57 int flags);
58static void sctp_bind_addr_clean(struct sctp_bind_addr *); 58static void sctp_bind_addr_clean(struct sctp_bind_addr *);
59 59
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
62/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses 62/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses
63 * in 'src' which have a broader scope than 'scope'. 63 * in 'src' which have a broader scope than 'scope'.
64 */ 64 */
65int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 65int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
66 const struct sctp_bind_addr *src, 66 const struct sctp_bind_addr *src,
67 sctp_scope_t scope, gfp_t gfp, 67 sctp_scope_t scope, gfp_t gfp,
68 int flags) 68 int flags)
@@ -75,7 +75,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
75 75
76 /* Extract the addresses which are relevant for this scope. */ 76 /* Extract the addresses which are relevant for this scope. */
77 list_for_each_entry(addr, &src->address_list, list) { 77 list_for_each_entry(addr, &src->address_list, list) {
78 error = sctp_copy_one_addr(dest, &addr->a, scope, 78 error = sctp_copy_one_addr(net, dest, &addr->a, scope,
79 gfp, flags); 79 gfp, flags);
80 if (error < 0) 80 if (error < 0)
81 goto out; 81 goto out;
@@ -87,7 +87,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
87 */ 87 */
88 if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) { 88 if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
89 list_for_each_entry(addr, &src->address_list, list) { 89 list_for_each_entry(addr, &src->address_list, list) {
90 error = sctp_copy_one_addr(dest, &addr->a, 90 error = sctp_copy_one_addr(net, dest, &addr->a,
91 SCTP_SCOPE_LINK, gfp, 91 SCTP_SCOPE_LINK, gfp,
92 flags); 92 flags);
93 if (error < 0) 93 if (error < 0)
@@ -448,7 +448,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
448} 448}
449 449
450/* Copy out addresses from the global local address list. */ 450/* Copy out addresses from the global local address list. */
451static int sctp_copy_one_addr(struct sctp_bind_addr *dest, 451static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
452 union sctp_addr *addr, 452 union sctp_addr *addr,
453 sctp_scope_t scope, gfp_t gfp, 453 sctp_scope_t scope, gfp_t gfp,
454 int flags) 454 int flags)
@@ -456,8 +456,8 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
456 int error = 0; 456 int error = 0;
457 457
458 if (sctp_is_any(NULL, addr)) { 458 if (sctp_is_any(NULL, addr)) {
459 error = sctp_copy_local_addr_list(dest, scope, gfp, flags); 459 error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
460 } else if (sctp_in_scope(addr, scope)) { 460 } else if (sctp_in_scope(net, addr, scope)) {
461 /* Now that the address is in scope, check to see if 461 /* Now that the address is in scope, check to see if
462 * the address type is supported by local sock as 462 * the address type is supported by local sock as
463 * well as the remote peer. 463 * well as the remote peer.
@@ -494,7 +494,7 @@ int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
494} 494}
495 495
496/* Is 'addr' valid for 'scope'? */ 496/* Is 'addr' valid for 'scope'? */
497int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope) 497int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope)
498{ 498{
499 sctp_scope_t addr_scope = sctp_scope(addr); 499 sctp_scope_t addr_scope = sctp_scope(addr);
500 500
@@ -512,7 +512,7 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
512 * Address scoping can be selectively controlled via sysctl 512 * Address scoping can be selectively controlled via sysctl
513 * option 513 * option
514 */ 514 */
515 switch (sctp_scope_policy) { 515 switch (net->sctp.scope_policy) {
516 case SCTP_SCOPE_POLICY_DISABLE: 516 case SCTP_SCOPE_POLICY_DISABLE:
517 return 1; 517 return 1;
518 case SCTP_SCOPE_POLICY_ENABLE: 518 case SCTP_SCOPE_POLICY_ENABLE:
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 6c8556459a75..7c2df9c33df3 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -257,7 +257,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
257 offset = 0; 257 offset = 0;
258 258
259 if ((whole > 1) || (whole && over)) 259 if ((whole > 1) || (whole && over))
260 SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS); 260 SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
261 261
262 /* Create chunks for all the full sized DATA chunks. */ 262 /* Create chunks for all the full sized DATA chunks. */
263 for (i=0, len=first_len; i < whole; i++) { 263 for (i=0, len=first_len; i < whole; i++) {
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 68a385d7c3bd..1859e2bc83d1 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -65,6 +65,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
65 struct sock *sk, 65 struct sock *sk,
66 gfp_t gfp) 66 gfp_t gfp)
67{ 67{
68 struct net *net = sock_net(sk);
68 struct sctp_hmac_algo_param *auth_hmacs = NULL; 69 struct sctp_hmac_algo_param *auth_hmacs = NULL;
69 struct sctp_chunks_param *auth_chunks = NULL; 70 struct sctp_chunks_param *auth_chunks = NULL;
70 struct sctp_shared_key *null_key; 71 struct sctp_shared_key *null_key;
@@ -74,7 +75,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
74 if (!ep->digest) 75 if (!ep->digest)
75 return NULL; 76 return NULL;
76 77
77 if (sctp_auth_enable) { 78 if (net->sctp.auth_enable) {
78 /* Allocate space for HMACS and CHUNKS authentication 79 /* Allocate space for HMACS and CHUNKS authentication
79 * variables. There are arrays that we encode directly 80 * variables. There are arrays that we encode directly
80 * into parameters to make the rest of the operations easier. 81 * into parameters to make the rest of the operations easier.
@@ -106,7 +107,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
106 /* If the Add-IP functionality is enabled, we must 107 /* If the Add-IP functionality is enabled, we must
107 * authenticate, ASCONF and ASCONF-ACK chunks 108 * authenticate, ASCONF and ASCONF-ACK chunks
108 */ 109 */
109 if (sctp_addip_enable) { 110 if (net->sctp.addip_enable) {
110 auth_chunks->chunks[0] = SCTP_CID_ASCONF; 111 auth_chunks->chunks[0] = SCTP_CID_ASCONF;
111 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; 112 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
112 auth_chunks->param_hdr.length = 113 auth_chunks->param_hdr.length =
@@ -140,14 +141,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
140 INIT_LIST_HEAD(&ep->asocs); 141 INIT_LIST_HEAD(&ep->asocs);
141 142
142 /* Use SCTP specific send buffer space queues. */ 143 /* Use SCTP specific send buffer space queues. */
143 ep->sndbuf_policy = sctp_sndbuf_policy; 144 ep->sndbuf_policy = net->sctp.sndbuf_policy;
144 145
145 sk->sk_data_ready = sctp_data_ready; 146 sk->sk_data_ready = sctp_data_ready;
146 sk->sk_write_space = sctp_write_space; 147 sk->sk_write_space = sctp_write_space;
147 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 148 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
148 149
149 /* Get the receive buffer policy for this endpoint */ 150 /* Get the receive buffer policy for this endpoint */
150 ep->rcvbuf_policy = sctp_rcvbuf_policy; 151 ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
151 152
152 /* Initialize the secret key used with cookie. */ 153 /* Initialize the secret key used with cookie. */
153 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); 154 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -302,11 +303,13 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
302 303
303/* Is this the endpoint we are looking for? */ 304/* Is this the endpoint we are looking for? */
304struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, 305struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
306 struct net *net,
305 const union sctp_addr *laddr) 307 const union sctp_addr *laddr)
306{ 308{
307 struct sctp_endpoint *retval = NULL; 309 struct sctp_endpoint *retval = NULL;
308 310
309 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { 311 if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
312 net_eq(sock_net(ep->base.sk), net)) {
310 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, 313 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
311 sctp_sk(ep->base.sk))) 314 sctp_sk(ep->base.sk)))
312 retval = ep; 315 retval = ep;
@@ -343,7 +346,8 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
343 346
344 rport = ntohs(paddr->v4.sin_port); 347 rport = ntohs(paddr->v4.sin_port);
345 348
346 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport); 349 hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
350 rport);
347 head = &sctp_assoc_hashtable[hash]; 351 head = &sctp_assoc_hashtable[hash];
348 read_lock(&head->lock); 352 read_lock(&head->lock);
349 sctp_for_each_hentry(epb, node, &head->chain) { 353 sctp_for_each_hentry(epb, node, &head->chain) {
@@ -386,13 +390,14 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
386{ 390{
387 struct sctp_sockaddr_entry *addr; 391 struct sctp_sockaddr_entry *addr;
388 struct sctp_bind_addr *bp; 392 struct sctp_bind_addr *bp;
393 struct net *net = sock_net(ep->base.sk);
389 394
390 bp = &ep->base.bind_addr; 395 bp = &ep->base.bind_addr;
391 /* This function is called with the socket lock held, 396 /* This function is called with the socket lock held,
392 * so the address_list can not change. 397 * so the address_list can not change.
393 */ 398 */
394 list_for_each_entry(addr, &bp->address_list, list) { 399 list_for_each_entry(addr, &bp->address_list, list) {
395 if (sctp_has_association(&addr->a, paddr)) 400 if (sctp_has_association(net, &addr->a, paddr))
396 return 1; 401 return 1;
397 } 402 }
398 403
@@ -409,6 +414,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
409 base.inqueue.immediate); 414 base.inqueue.immediate);
410 struct sctp_association *asoc; 415 struct sctp_association *asoc;
411 struct sock *sk; 416 struct sock *sk;
417 struct net *net;
412 struct sctp_transport *transport; 418 struct sctp_transport *transport;
413 struct sctp_chunk *chunk; 419 struct sctp_chunk *chunk;
414 struct sctp_inq *inqueue; 420 struct sctp_inq *inqueue;
@@ -423,6 +429,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
423 asoc = NULL; 429 asoc = NULL;
424 inqueue = &ep->base.inqueue; 430 inqueue = &ep->base.inqueue;
425 sk = ep->base.sk; 431 sk = ep->base.sk;
432 net = sock_net(sk);
426 433
427 while (NULL != (chunk = sctp_inq_pop(inqueue))) { 434 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
428 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 435 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
@@ -474,12 +481,12 @@ normal:
474 if (asoc && sctp_chunk_is_data(chunk)) 481 if (asoc && sctp_chunk_is_data(chunk))
475 asoc->peer.last_data_from = chunk->transport; 482 asoc->peer.last_data_from = chunk->transport;
476 else 483 else
477 SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); 484 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
478 485
479 if (chunk->transport) 486 if (chunk->transport)
480 chunk->transport->last_time_heard = jiffies; 487 chunk->transport->last_time_heard = jiffies;
481 488
482 error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, 489 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
483 ep, asoc, chunk, GFP_ATOMIC); 490 ep, asoc, chunk, GFP_ATOMIC);
484 491
485 if (error && chunk) 492 if (error && chunk)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index e64d5210ed13..25dfe7380479 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -66,12 +66,15 @@
66 66
67/* Forward declarations for internal helpers. */ 67/* Forward declarations for internal helpers. */
68static int sctp_rcv_ootb(struct sk_buff *); 68static int sctp_rcv_ootb(struct sk_buff *);
69static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, 69static struct sctp_association *__sctp_rcv_lookup(struct net *net,
70 struct sk_buff *skb,
70 const union sctp_addr *laddr, 71 const union sctp_addr *laddr,
71 const union sctp_addr *paddr, 72 const union sctp_addr *paddr,
72 struct sctp_transport **transportp); 73 struct sctp_transport **transportp);
73static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr); 74static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
75 const union sctp_addr *laddr);
74static struct sctp_association *__sctp_lookup_association( 76static struct sctp_association *__sctp_lookup_association(
77 struct net *net,
75 const union sctp_addr *local, 78 const union sctp_addr *local,
76 const union sctp_addr *peer, 79 const union sctp_addr *peer,
77 struct sctp_transport **pt); 80 struct sctp_transport **pt);
@@ -80,7 +83,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
80 83
81 84
82/* Calculate the SCTP checksum of an SCTP packet. */ 85/* Calculate the SCTP checksum of an SCTP packet. */
83static inline int sctp_rcv_checksum(struct sk_buff *skb) 86static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
84{ 87{
85 struct sctphdr *sh = sctp_hdr(skb); 88 struct sctphdr *sh = sctp_hdr(skb);
86 __le32 cmp = sh->checksum; 89 __le32 cmp = sh->checksum;
@@ -96,7 +99,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
96 99
97 if (val != cmp) { 100 if (val != cmp) {
98 /* CRC failure, dump it. */ 101 /* CRC failure, dump it. */
99 SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS); 102 SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
100 return -1; 103 return -1;
101 } 104 }
102 return 0; 105 return 0;
@@ -129,11 +132,12 @@ int sctp_rcv(struct sk_buff *skb)
129 union sctp_addr dest; 132 union sctp_addr dest;
130 int family; 133 int family;
131 struct sctp_af *af; 134 struct sctp_af *af;
135 struct net *net = dev_net(skb->dev);
132 136
133 if (skb->pkt_type!=PACKET_HOST) 137 if (skb->pkt_type!=PACKET_HOST)
134 goto discard_it; 138 goto discard_it;
135 139
136 SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS); 140 SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
137 141
138 if (skb_linearize(skb)) 142 if (skb_linearize(skb))
139 goto discard_it; 143 goto discard_it;
@@ -145,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb)
145 if (skb->len < sizeof(struct sctphdr)) 149 if (skb->len < sizeof(struct sctphdr))
146 goto discard_it; 150 goto discard_it;
147 if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && 151 if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
148 sctp_rcv_checksum(skb) < 0) 152 sctp_rcv_checksum(net, skb) < 0)
149 goto discard_it; 153 goto discard_it;
150 154
151 skb_pull(skb, sizeof(struct sctphdr)); 155 skb_pull(skb, sizeof(struct sctphdr));
@@ -178,10 +182,10 @@ int sctp_rcv(struct sk_buff *skb)
178 !af->addr_valid(&dest, NULL, skb)) 182 !af->addr_valid(&dest, NULL, skb))
179 goto discard_it; 183 goto discard_it;
180 184
181 asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); 185 asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
182 186
183 if (!asoc) 187 if (!asoc)
184 ep = __sctp_rcv_lookup_endpoint(&dest); 188 ep = __sctp_rcv_lookup_endpoint(net, &dest);
185 189
186 /* Retrieve the common input handling substructure. */ 190 /* Retrieve the common input handling substructure. */
187 rcvr = asoc ? &asoc->base : &ep->base; 191 rcvr = asoc ? &asoc->base : &ep->base;
@@ -200,7 +204,7 @@ int sctp_rcv(struct sk_buff *skb)
200 sctp_endpoint_put(ep); 204 sctp_endpoint_put(ep);
201 ep = NULL; 205 ep = NULL;
202 } 206 }
203 sk = sctp_get_ctl_sock(); 207 sk = net->sctp.ctl_sock;
204 ep = sctp_sk(sk)->ep; 208 ep = sctp_sk(sk)->ep;
205 sctp_endpoint_hold(ep); 209 sctp_endpoint_hold(ep);
206 rcvr = &ep->base; 210 rcvr = &ep->base;
@@ -216,7 +220,7 @@ int sctp_rcv(struct sk_buff *skb)
216 */ 220 */
217 if (!asoc) { 221 if (!asoc) {
218 if (sctp_rcv_ootb(skb)) { 222 if (sctp_rcv_ootb(skb)) {
219 SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); 223 SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
220 goto discard_release; 224 goto discard_release;
221 } 225 }
222 } 226 }
@@ -272,9 +276,9 @@ int sctp_rcv(struct sk_buff *skb)
272 skb = NULL; /* sctp_chunk_free already freed the skb */ 276 skb = NULL; /* sctp_chunk_free already freed the skb */
273 goto discard_release; 277 goto discard_release;
274 } 278 }
275 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 279 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
276 } else { 280 } else {
277 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); 281 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
278 sctp_inq_push(&chunk->rcvr->inqueue, chunk); 282 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
279 } 283 }
280 284
@@ -289,7 +293,7 @@ int sctp_rcv(struct sk_buff *skb)
289 return 0; 293 return 0;
290 294
291discard_it: 295discard_it:
292 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS); 296 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
293 kfree_skb(skb); 297 kfree_skb(skb);
294 return 0; 298 return 0;
295 299
@@ -462,11 +466,13 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
462 } 466 }
463 467
464 } else { 468 } else {
469 struct net *net = sock_net(sk);
470
465 if (timer_pending(&t->proto_unreach_timer) && 471 if (timer_pending(&t->proto_unreach_timer) &&
466 del_timer(&t->proto_unreach_timer)) 472 del_timer(&t->proto_unreach_timer))
467 sctp_association_put(asoc); 473 sctp_association_put(asoc);
468 474
469 sctp_do_sm(SCTP_EVENT_T_OTHER, 475 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
470 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 476 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
471 asoc->state, asoc->ep, asoc, t, 477 asoc->state, asoc->ep, asoc, t,
472 GFP_ATOMIC); 478 GFP_ATOMIC);
@@ -474,7 +480,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
474} 480}
475 481
476/* Common lookup code for icmp/icmpv6 error handler. */ 482/* Common lookup code for icmp/icmpv6 error handler. */
477struct sock *sctp_err_lookup(int family, struct sk_buff *skb, 483struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
478 struct sctphdr *sctphdr, 484 struct sctphdr *sctphdr,
479 struct sctp_association **app, 485 struct sctp_association **app,
480 struct sctp_transport **tpp) 486 struct sctp_transport **tpp)
@@ -503,7 +509,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
503 /* Look for an association that matches the incoming ICMP error 509 /* Look for an association that matches the incoming ICMP error
504 * packet. 510 * packet.
505 */ 511 */
506 asoc = __sctp_lookup_association(&saddr, &daddr, &transport); 512 asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
507 if (!asoc) 513 if (!asoc)
508 return NULL; 514 return NULL;
509 515
@@ -539,7 +545,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
539 * servers this needs to be solved differently. 545 * servers this needs to be solved differently.
540 */ 546 */
541 if (sock_owned_by_user(sk)) 547 if (sock_owned_by_user(sk))
542 NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS); 548 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
543 549
544 *app = asoc; 550 *app = asoc;
545 *tpp = transport; 551 *tpp = transport;
@@ -586,9 +592,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
586 struct inet_sock *inet; 592 struct inet_sock *inet;
587 sk_buff_data_t saveip, savesctp; 593 sk_buff_data_t saveip, savesctp;
588 int err; 594 int err;
595 struct net *net = dev_net(skb->dev);
589 596
590 if (skb->len < ihlen + 8) { 597 if (skb->len < ihlen + 8) {
591 ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); 598 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
592 return; 599 return;
593 } 600 }
594 601
@@ -597,12 +604,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
597 savesctp = skb->transport_header; 604 savesctp = skb->transport_header;
598 skb_reset_network_header(skb); 605 skb_reset_network_header(skb);
599 skb_set_transport_header(skb, ihlen); 606 skb_set_transport_header(skb, ihlen);
600 sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); 607 sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
601 /* Put back, the original values. */ 608 /* Put back, the original values. */
602 skb->network_header = saveip; 609 skb->network_header = saveip;
603 skb->transport_header = savesctp; 610 skb->transport_header = savesctp;
604 if (!sk) { 611 if (!sk) {
605 ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS); 612 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
606 return; 613 return;
607 } 614 }
608 /* Warning: The sock lock is held. Remember to call 615 /* Warning: The sock lock is held. Remember to call
@@ -723,12 +730,13 @@ discard:
723/* Insert endpoint into the hash table. */ 730/* Insert endpoint into the hash table. */
724static void __sctp_hash_endpoint(struct sctp_endpoint *ep) 731static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
725{ 732{
733 struct net *net = sock_net(ep->base.sk);
726 struct sctp_ep_common *epb; 734 struct sctp_ep_common *epb;
727 struct sctp_hashbucket *head; 735 struct sctp_hashbucket *head;
728 736
729 epb = &ep->base; 737 epb = &ep->base;
730 738
731 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 739 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
732 head = &sctp_ep_hashtable[epb->hashent]; 740 head = &sctp_ep_hashtable[epb->hashent];
733 741
734 sctp_write_lock(&head->lock); 742 sctp_write_lock(&head->lock);
@@ -747,12 +755,13 @@ void sctp_hash_endpoint(struct sctp_endpoint *ep)
747/* Remove endpoint from the hash table. */ 755/* Remove endpoint from the hash table. */
748static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) 756static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
749{ 757{
758 struct net *net = sock_net(ep->base.sk);
750 struct sctp_hashbucket *head; 759 struct sctp_hashbucket *head;
751 struct sctp_ep_common *epb; 760 struct sctp_ep_common *epb;
752 761
753 epb = &ep->base; 762 epb = &ep->base;
754 763
755 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 764 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
756 765
757 head = &sctp_ep_hashtable[epb->hashent]; 766 head = &sctp_ep_hashtable[epb->hashent];
758 767
@@ -770,7 +779,8 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
770} 779}
771 780
772/* Look up an endpoint. */ 781/* Look up an endpoint. */
773static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr) 782static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
783 const union sctp_addr *laddr)
774{ 784{
775 struct sctp_hashbucket *head; 785 struct sctp_hashbucket *head;
776 struct sctp_ep_common *epb; 786 struct sctp_ep_common *epb;
@@ -778,16 +788,16 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
778 struct hlist_node *node; 788 struct hlist_node *node;
779 int hash; 789 int hash;
780 790
781 hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); 791 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
782 head = &sctp_ep_hashtable[hash]; 792 head = &sctp_ep_hashtable[hash];
783 read_lock(&head->lock); 793 read_lock(&head->lock);
784 sctp_for_each_hentry(epb, node, &head->chain) { 794 sctp_for_each_hentry(epb, node, &head->chain) {
785 ep = sctp_ep(epb); 795 ep = sctp_ep(epb);
786 if (sctp_endpoint_is_match(ep, laddr)) 796 if (sctp_endpoint_is_match(ep, net, laddr))
787 goto hit; 797 goto hit;
788 } 798 }
789 799
790 ep = sctp_sk((sctp_get_ctl_sock()))->ep; 800 ep = sctp_sk(net->sctp.ctl_sock)->ep;
791 801
792hit: 802hit:
793 sctp_endpoint_hold(ep); 803 sctp_endpoint_hold(ep);
@@ -798,13 +808,15 @@ hit:
798/* Insert association into the hash table. */ 808/* Insert association into the hash table. */
799static void __sctp_hash_established(struct sctp_association *asoc) 809static void __sctp_hash_established(struct sctp_association *asoc)
800{ 810{
811 struct net *net = sock_net(asoc->base.sk);
801 struct sctp_ep_common *epb; 812 struct sctp_ep_common *epb;
802 struct sctp_hashbucket *head; 813 struct sctp_hashbucket *head;
803 814
804 epb = &asoc->base; 815 epb = &asoc->base;
805 816
806 /* Calculate which chain this entry will belong to. */ 817 /* Calculate which chain this entry will belong to. */
807 epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); 818 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
819 asoc->peer.port);
808 820
809 head = &sctp_assoc_hashtable[epb->hashent]; 821 head = &sctp_assoc_hashtable[epb->hashent];
810 822
@@ -827,12 +839,13 @@ void sctp_hash_established(struct sctp_association *asoc)
827/* Remove association from the hash table. */ 839/* Remove association from the hash table. */
828static void __sctp_unhash_established(struct sctp_association *asoc) 840static void __sctp_unhash_established(struct sctp_association *asoc)
829{ 841{
842 struct net *net = sock_net(asoc->base.sk);
830 struct sctp_hashbucket *head; 843 struct sctp_hashbucket *head;
831 struct sctp_ep_common *epb; 844 struct sctp_ep_common *epb;
832 845
833 epb = &asoc->base; 846 epb = &asoc->base;
834 847
835 epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, 848 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
836 asoc->peer.port); 849 asoc->peer.port);
837 850
838 head = &sctp_assoc_hashtable[epb->hashent]; 851 head = &sctp_assoc_hashtable[epb->hashent];
@@ -855,6 +868,7 @@ void sctp_unhash_established(struct sctp_association *asoc)
855 868
856/* Look up an association. */ 869/* Look up an association. */
857static struct sctp_association *__sctp_lookup_association( 870static struct sctp_association *__sctp_lookup_association(
871 struct net *net,
858 const union sctp_addr *local, 872 const union sctp_addr *local,
859 const union sctp_addr *peer, 873 const union sctp_addr *peer,
860 struct sctp_transport **pt) 874 struct sctp_transport **pt)
@@ -869,12 +883,13 @@ static struct sctp_association *__sctp_lookup_association(
869 /* Optimize here for direct hit, only listening connections can 883 /* Optimize here for direct hit, only listening connections can
870 * have wildcards anyways. 884 * have wildcards anyways.
871 */ 885 */
872 hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); 886 hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
887 ntohs(peer->v4.sin_port));
873 head = &sctp_assoc_hashtable[hash]; 888 head = &sctp_assoc_hashtable[hash];
874 read_lock(&head->lock); 889 read_lock(&head->lock);
875 sctp_for_each_hentry(epb, node, &head->chain) { 890 sctp_for_each_hentry(epb, node, &head->chain) {
876 asoc = sctp_assoc(epb); 891 asoc = sctp_assoc(epb);
877 transport = sctp_assoc_is_match(asoc, local, peer); 892 transport = sctp_assoc_is_match(asoc, net, local, peer);
878 if (transport) 893 if (transport)
879 goto hit; 894 goto hit;
880 } 895 }
@@ -892,27 +907,29 @@ hit:
892 907
893/* Look up an association. BH-safe. */ 908/* Look up an association. BH-safe. */
894SCTP_STATIC 909SCTP_STATIC
895struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr, 910struct sctp_association *sctp_lookup_association(struct net *net,
911 const union sctp_addr *laddr,
896 const union sctp_addr *paddr, 912 const union sctp_addr *paddr,
897 struct sctp_transport **transportp) 913 struct sctp_transport **transportp)
898{ 914{
899 struct sctp_association *asoc; 915 struct sctp_association *asoc;
900 916
901 sctp_local_bh_disable(); 917 sctp_local_bh_disable();
902 asoc = __sctp_lookup_association(laddr, paddr, transportp); 918 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
903 sctp_local_bh_enable(); 919 sctp_local_bh_enable();
904 920
905 return asoc; 921 return asoc;
906} 922}
907 923
908/* Is there an association matching the given local and peer addresses? */ 924/* Is there an association matching the given local and peer addresses? */
909int sctp_has_association(const union sctp_addr *laddr, 925int sctp_has_association(struct net *net,
926 const union sctp_addr *laddr,
910 const union sctp_addr *paddr) 927 const union sctp_addr *paddr)
911{ 928{
912 struct sctp_association *asoc; 929 struct sctp_association *asoc;
913 struct sctp_transport *transport; 930 struct sctp_transport *transport;
914 931
915 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { 932 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
916 sctp_association_put(asoc); 933 sctp_association_put(asoc);
917 return 1; 934 return 1;
918 } 935 }
@@ -938,7 +955,8 @@ int sctp_has_association(const union sctp_addr *laddr,
938 * in certain circumstances. 955 * in certain circumstances.
939 * 956 *
940 */ 957 */
941static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, 958static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
959 struct sk_buff *skb,
942 const union sctp_addr *laddr, struct sctp_transport **transportp) 960 const union sctp_addr *laddr, struct sctp_transport **transportp)
943{ 961{
944 struct sctp_association *asoc; 962 struct sctp_association *asoc;
@@ -978,7 +996,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
978 996
979 af->from_addr_param(paddr, params.addr, sh->source, 0); 997 af->from_addr_param(paddr, params.addr, sh->source, 0);
980 998
981 asoc = __sctp_lookup_association(laddr, paddr, &transport); 999 asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
982 if (asoc) 1000 if (asoc)
983 return asoc; 1001 return asoc;
984 } 1002 }
@@ -1001,6 +1019,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
1001 * subsequent ASCONF Chunks. If found, proceed to rule D4. 1019 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1002 */ 1020 */
1003static struct sctp_association *__sctp_rcv_asconf_lookup( 1021static struct sctp_association *__sctp_rcv_asconf_lookup(
1022 struct net *net,
1004 sctp_chunkhdr_t *ch, 1023 sctp_chunkhdr_t *ch,
1005 const union sctp_addr *laddr, 1024 const union sctp_addr *laddr,
1006 __be16 peer_port, 1025 __be16 peer_port,
@@ -1020,7 +1039,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1020 1039
1021 af->from_addr_param(&paddr, param, peer_port, 0); 1040 af->from_addr_param(&paddr, param, peer_port, 0);
1022 1041
1023 return __sctp_lookup_association(laddr, &paddr, transportp); 1042 return __sctp_lookup_association(net, laddr, &paddr, transportp);
1024} 1043}
1025 1044
1026 1045
@@ -1033,7 +1052,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1033* This means that any chunks that can help us identify the association need 1052* This means that any chunks that can help us identify the association need
1034* to be looked at to find this association. 1053* to be looked at to find this association.
1035*/ 1054*/
1036static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, 1055static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1056 struct sk_buff *skb,
1037 const union sctp_addr *laddr, 1057 const union sctp_addr *laddr,
1038 struct sctp_transport **transportp) 1058 struct sctp_transport **transportp)
1039{ 1059{
@@ -1074,8 +1094,9 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
1074 break; 1094 break;
1075 1095
1076 case SCTP_CID_ASCONF: 1096 case SCTP_CID_ASCONF:
1077 if (have_auth || sctp_addip_noauth) 1097 if (have_auth || net->sctp.addip_noauth)
1078 asoc = __sctp_rcv_asconf_lookup(ch, laddr, 1098 asoc = __sctp_rcv_asconf_lookup(
1099 net, ch, laddr,
1079 sctp_hdr(skb)->source, 1100 sctp_hdr(skb)->source,
1080 transportp); 1101 transportp);
1081 default: 1102 default:
@@ -1098,7 +1119,8 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
1098 * include looking inside of INIT/INIT-ACK chunks or after the AUTH 1119 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1099 * chunks. 1120 * chunks.
1100 */ 1121 */
1101static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, 1122static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1123 struct sk_buff *skb,
1102 const union sctp_addr *laddr, 1124 const union sctp_addr *laddr,
1103 struct sctp_transport **transportp) 1125 struct sctp_transport **transportp)
1104{ 1126{
@@ -1118,11 +1140,11 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
1118 switch (ch->type) { 1140 switch (ch->type) {
1119 case SCTP_CID_INIT: 1141 case SCTP_CID_INIT:
1120 case SCTP_CID_INIT_ACK: 1142 case SCTP_CID_INIT_ACK:
1121 return __sctp_rcv_init_lookup(skb, laddr, transportp); 1143 return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1122 break; 1144 break;
1123 1145
1124 default: 1146 default:
1125 return __sctp_rcv_walk_lookup(skb, laddr, transportp); 1147 return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1126 break; 1148 break;
1127 } 1149 }
1128 1150
@@ -1131,21 +1153,22 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
1131} 1153}
1132 1154
1133/* Lookup an association for an inbound skb. */ 1155/* Lookup an association for an inbound skb. */
1134static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, 1156static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1157 struct sk_buff *skb,
1135 const union sctp_addr *paddr, 1158 const union sctp_addr *paddr,
1136 const union sctp_addr *laddr, 1159 const union sctp_addr *laddr,
1137 struct sctp_transport **transportp) 1160 struct sctp_transport **transportp)
1138{ 1161{
1139 struct sctp_association *asoc; 1162 struct sctp_association *asoc;
1140 1163
1141 asoc = __sctp_lookup_association(laddr, paddr, transportp); 1164 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1142 1165
1143 /* Further lookup for INIT/INIT-ACK packets. 1166 /* Further lookup for INIT/INIT-ACK packets.
1144 * SCTP Implementors Guide, 2.18 Handling of address 1167 * SCTP Implementors Guide, 2.18 Handling of address
1145 * parameters within the INIT or INIT-ACK. 1168 * parameters within the INIT or INIT-ACK.
1146 */ 1169 */
1147 if (!asoc) 1170 if (!asoc)
1148 asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp); 1171 asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1149 1172
1150 return asoc; 1173 return asoc;
1151} 1174}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ed7139ea7978..ea14cb445295 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -99,6 +99,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
99 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 99 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
100 struct sctp_sockaddr_entry *addr = NULL; 100 struct sctp_sockaddr_entry *addr = NULL;
101 struct sctp_sockaddr_entry *temp; 101 struct sctp_sockaddr_entry *temp;
102 struct net *net = dev_net(ifa->idev->dev);
102 int found = 0; 103 int found = 0;
103 104
104 switch (ev) { 105 switch (ev) {
@@ -110,27 +111,27 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
110 addr->a.v6.sin6_addr = ifa->addr; 111 addr->a.v6.sin6_addr = ifa->addr;
111 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 112 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
112 addr->valid = 1; 113 addr->valid = 1;
113 spin_lock_bh(&sctp_local_addr_lock); 114 spin_lock_bh(&net->sctp.local_addr_lock);
114 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 115 list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
115 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW); 116 sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
116 spin_unlock_bh(&sctp_local_addr_lock); 117 spin_unlock_bh(&net->sctp.local_addr_lock);
117 } 118 }
118 break; 119 break;
119 case NETDEV_DOWN: 120 case NETDEV_DOWN:
120 spin_lock_bh(&sctp_local_addr_lock); 121 spin_lock_bh(&net->sctp.local_addr_lock);
121 list_for_each_entry_safe(addr, temp, 122 list_for_each_entry_safe(addr, temp,
122 &sctp_local_addr_list, list) { 123 &net->sctp.local_addr_list, list) {
123 if (addr->a.sa.sa_family == AF_INET6 && 124 if (addr->a.sa.sa_family == AF_INET6 &&
124 ipv6_addr_equal(&addr->a.v6.sin6_addr, 125 ipv6_addr_equal(&addr->a.v6.sin6_addr,
125 &ifa->addr)) { 126 &ifa->addr)) {
126 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL); 127 sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
127 found = 1; 128 found = 1;
128 addr->valid = 0; 129 addr->valid = 0;
129 list_del_rcu(&addr->list); 130 list_del_rcu(&addr->list);
130 break; 131 break;
131 } 132 }
132 } 133 }
133 spin_unlock_bh(&sctp_local_addr_lock); 134 spin_unlock_bh(&net->sctp.local_addr_lock);
134 if (found) 135 if (found)
135 kfree_rcu(addr, rcu); 136 kfree_rcu(addr, rcu);
136 break; 137 break;
@@ -154,6 +155,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
154 struct ipv6_pinfo *np; 155 struct ipv6_pinfo *np;
155 sk_buff_data_t saveip, savesctp; 156 sk_buff_data_t saveip, savesctp;
156 int err; 157 int err;
158 struct net *net = dev_net(skb->dev);
157 159
158 idev = in6_dev_get(skb->dev); 160 idev = in6_dev_get(skb->dev);
159 161
@@ -162,12 +164,12 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
162 savesctp = skb->transport_header; 164 savesctp = skb->transport_header;
163 skb_reset_network_header(skb); 165 skb_reset_network_header(skb);
164 skb_set_transport_header(skb, offset); 166 skb_set_transport_header(skb, offset);
165 sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); 167 sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
166 /* Put back, the original pointers. */ 168 /* Put back, the original pointers. */
167 skb->network_header = saveip; 169 skb->network_header = saveip;
168 skb->transport_header = savesctp; 170 skb->transport_header = savesctp;
169 if (!sk) { 171 if (!sk) {
170 ICMP6_INC_STATS_BH(dev_net(skb->dev), idev, ICMP6_MIB_INERRORS); 172 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
171 goto out; 173 goto out;
172 } 174 }
173 175
@@ -241,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
241 __func__, skb, skb->len, 243 __func__, skb, skb->len,
242 &fl6.saddr, &fl6.daddr); 244 &fl6.saddr, &fl6.daddr);
243 245
244 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 246 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
245 247
246 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 248 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
247 skb->local_df = 1; 249 skb->local_df = 1;
@@ -580,7 +582,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
580 if (!(type & IPV6_ADDR_UNICAST)) 582 if (!(type & IPV6_ADDR_UNICAST))
581 return 0; 583 return 0;
582 584
583 return ipv6_chk_addr(&init_net, in6, NULL, 0); 585 return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0);
584} 586}
585 587
586/* This function checks if the address is a valid address to be used for 588/* This function checks if the address is a valid address to be used for
@@ -857,14 +859,14 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
857 struct net_device *dev; 859 struct net_device *dev;
858 860
859 if (type & IPV6_ADDR_LINKLOCAL) { 861 if (type & IPV6_ADDR_LINKLOCAL) {
862 struct net *net;
860 if (!addr->v6.sin6_scope_id) 863 if (!addr->v6.sin6_scope_id)
861 return 0; 864 return 0;
865 net = sock_net(&opt->inet.sk);
862 rcu_read_lock(); 866 rcu_read_lock();
863 dev = dev_get_by_index_rcu(&init_net, 867 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
864 addr->v6.sin6_scope_id);
865 if (!dev || 868 if (!dev ||
866 !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr, 869 !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
867 dev, 0)) {
868 rcu_read_unlock(); 870 rcu_read_unlock();
869 return 0; 871 return 0;
870 } 872 }
@@ -897,7 +899,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
897 if (!addr->v6.sin6_scope_id) 899 if (!addr->v6.sin6_scope_id)
898 return 0; 900 return 0;
899 rcu_read_lock(); 901 rcu_read_lock();
900 dev = dev_get_by_index_rcu(&init_net, 902 dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
901 addr->v6.sin6_scope_id); 903 addr->v6.sin6_scope_id);
902 rcu_read_unlock(); 904 rcu_read_unlock();
903 if (!dev) 905 if (!dev)
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index 8ef8e7d9eb61..fe012c44f8df 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -129,20 +129,20 @@ static const struct file_operations sctp_objcnt_ops = {
129}; 129};
130 130
131/* Initialize the objcount in the proc filesystem. */ 131/* Initialize the objcount in the proc filesystem. */
132void sctp_dbg_objcnt_init(void) 132void sctp_dbg_objcnt_init(struct net *net)
133{ 133{
134 struct proc_dir_entry *ent; 134 struct proc_dir_entry *ent;
135 135
136 ent = proc_create("sctp_dbg_objcnt", 0, 136 ent = proc_create("sctp_dbg_objcnt", 0,
137 proc_net_sctp, &sctp_objcnt_ops); 137 net->sctp.proc_net_sctp, &sctp_objcnt_ops);
138 if (!ent) 138 if (!ent)
139 pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n"); 139 pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
140} 140}
141 141
142/* Cleanup the objcount entry in the proc filesystem. */ 142/* Cleanup the objcount entry in the proc filesystem. */
143void sctp_dbg_objcnt_exit(void) 143void sctp_dbg_objcnt_exit(struct net *net)
144{ 144{
145 remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp); 145 remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp);
146} 146}
147 147
148 148
diff --git a/net/sctp/output.c b/net/sctp/output.c
index be50aa234dcd..4e90188bf489 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -616,7 +616,7 @@ out:
616 return err; 616 return err;
617no_route: 617no_route:
618 kfree_skb(nskb); 618 kfree_skb(nskb);
619 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); 619 IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
620 620
621 /* FIXME: Returning the 'err' will effect all the associations 621 /* FIXME: Returning the 'err' will effect all the associations
622 * associated with a socket, although only one of the paths of the 622 * associated with a socket, although only one of the paths of the
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e7aa177c9522..d16632e1503a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -299,6 +299,7 @@ void sctp_outq_free(struct sctp_outq *q)
299/* Put a new chunk in an sctp_outq. */ 299/* Put a new chunk in an sctp_outq. */
300int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 300int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
301{ 301{
302 struct net *net = sock_net(q->asoc->base.sk);
302 int error = 0; 303 int error = 0;
303 304
304 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", 305 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
@@ -337,15 +338,15 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
337 338
338 sctp_outq_tail_data(q, chunk); 339 sctp_outq_tail_data(q, chunk);
339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 340 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
340 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); 341 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
341 else 342 else
342 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); 343 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
343 q->empty = 0; 344 q->empty = 0;
344 break; 345 break;
345 } 346 }
346 } else { 347 } else {
347 list_add_tail(&chunk->list, &q->control_chunk_list); 348 list_add_tail(&chunk->list, &q->control_chunk_list);
348 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 349 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
349 } 350 }
350 351
351 if (error < 0) 352 if (error < 0)
@@ -478,11 +479,12 @@ void sctp_retransmit_mark(struct sctp_outq *q,
478void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 479void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
479 sctp_retransmit_reason_t reason) 480 sctp_retransmit_reason_t reason)
480{ 481{
482 struct net *net = sock_net(q->asoc->base.sk);
481 int error = 0; 483 int error = 0;
482 484
483 switch(reason) { 485 switch(reason) {
484 case SCTP_RTXR_T3_RTX: 486 case SCTP_RTXR_T3_RTX:
485 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS); 487 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
486 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 488 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
487 /* Update the retran path if the T3-rtx timer has expired for 489 /* Update the retran path if the T3-rtx timer has expired for
488 * the current retran path. 490 * the current retran path.
@@ -493,15 +495,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
493 transport->asoc->unack_data; 495 transport->asoc->unack_data;
494 break; 496 break;
495 case SCTP_RTXR_FAST_RTX: 497 case SCTP_RTXR_FAST_RTX:
496 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 498 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
497 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 499 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
498 q->fast_rtx = 1; 500 q->fast_rtx = 1;
499 break; 501 break;
500 case SCTP_RTXR_PMTUD: 502 case SCTP_RTXR_PMTUD:
501 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 503 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
502 break; 504 break;
503 case SCTP_RTXR_T1_RTX: 505 case SCTP_RTXR_T1_RTX:
504 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); 506 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
505 transport->asoc->init_retries++; 507 transport->asoc->init_retries++;
506 break; 508 break;
507 default: 509 default:
@@ -589,9 +591,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
589 * next chunk. 591 * next chunk.
590 */ 592 */
591 if (chunk->tsn_gap_acked) { 593 if (chunk->tsn_gap_acked) {
592 list_del(&chunk->transmitted_list); 594 list_move_tail(&chunk->transmitted_list,
593 list_add_tail(&chunk->transmitted_list, 595 &transport->transmitted);
594 &transport->transmitted);
595 continue; 596 continue;
596 } 597 }
597 598
@@ -655,9 +656,8 @@ redo:
655 /* The append was successful, so add this chunk to 656 /* The append was successful, so add this chunk to
656 * the transmitted list. 657 * the transmitted list.
657 */ 658 */
658 list_del(&chunk->transmitted_list); 659 list_move_tail(&chunk->transmitted_list,
659 list_add_tail(&chunk->transmitted_list, 660 &transport->transmitted);
660 &transport->transmitted);
661 661
662 /* Mark the chunk as ineligible for fast retransmit 662 /* Mark the chunk as ineligible for fast retransmit
663 * after it is retransmitted. 663 * after it is retransmitted.
@@ -1914,6 +1914,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1914 1914
1915 if (ftsn_chunk) { 1915 if (ftsn_chunk) {
1916 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1916 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1917 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1917 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1918 } 1918 }
1919} 1919}
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 534c7eae9d15..794bb14decde 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -57,7 +57,7 @@
57 57
58#define DECLARE_PRIMITIVE(name) \ 58#define DECLARE_PRIMITIVE(name) \
59/* This is called in the code as sctp_primitive_ ## name. */ \ 59/* This is called in the code as sctp_primitive_ ## name. */ \
60int sctp_primitive_ ## name(struct sctp_association *asoc, \ 60int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
61 void *arg) { \ 61 void *arg) { \
62 int error = 0; \ 62 int error = 0; \
63 sctp_event_t event_type; sctp_subtype_t subtype; \ 63 sctp_event_t event_type; sctp_subtype_t subtype; \
@@ -69,7 +69,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
69 state = asoc ? asoc->state : SCTP_STATE_CLOSED; \ 69 state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
70 ep = asoc ? asoc->ep : NULL; \ 70 ep = asoc ? asoc->ep : NULL; \
71 \ 71 \
72 error = sctp_do_sm(event_type, subtype, state, ep, asoc, \ 72 error = sctp_do_sm(net, event_type, subtype, state, ep, asoc, \
73 arg, GFP_KERNEL); \ 73 arg, GFP_KERNEL); \
74 return error; \ 74 return error; \
75} 75}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index dc12febc977a..c3bea269faf4 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -80,11 +80,12 @@ static const struct snmp_mib sctp_snmp_list[] = {
80/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ 80/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
81static int sctp_snmp_seq_show(struct seq_file *seq, void *v) 81static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
82{ 82{
83 struct net *net = seq->private;
83 int i; 84 int i;
84 85
85 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 86 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
86 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 87 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
87 snmp_fold_field((void __percpu **)sctp_statistics, 88 snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
88 sctp_snmp_list[i].entry)); 89 sctp_snmp_list[i].entry));
89 90
90 return 0; 91 return 0;
@@ -93,7 +94,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
93/* Initialize the seq file operations for 'snmp' object. */ 94/* Initialize the seq file operations for 'snmp' object. */
94static int sctp_snmp_seq_open(struct inode *inode, struct file *file) 95static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
95{ 96{
96 return single_open(file, sctp_snmp_seq_show, NULL); 97 return single_open_net(inode, file, sctp_snmp_seq_show);
97} 98}
98 99
99static const struct file_operations sctp_snmp_seq_fops = { 100static const struct file_operations sctp_snmp_seq_fops = {
@@ -105,11 +106,12 @@ static const struct file_operations sctp_snmp_seq_fops = {
105}; 106};
106 107
107/* Set up the proc fs entry for 'snmp' object. */ 108/* Set up the proc fs entry for 'snmp' object. */
108int __init sctp_snmp_proc_init(void) 109int __net_init sctp_snmp_proc_init(struct net *net)
109{ 110{
110 struct proc_dir_entry *p; 111 struct proc_dir_entry *p;
111 112
112 p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops); 113 p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
114 &sctp_snmp_seq_fops);
113 if (!p) 115 if (!p)
114 return -ENOMEM; 116 return -ENOMEM;
115 117
@@ -117,9 +119,9 @@ int __init sctp_snmp_proc_init(void)
117} 119}
118 120
119/* Cleanup the proc fs entry for 'snmp' object. */ 121/* Cleanup the proc fs entry for 'snmp' object. */
120void sctp_snmp_proc_exit(void) 122void sctp_snmp_proc_exit(struct net *net)
121{ 123{
122 remove_proc_entry("snmp", proc_net_sctp); 124 remove_proc_entry("snmp", net->sctp.proc_net_sctp);
123} 125}
124 126
125/* Dump local addresses of an association/endpoint. */ 127/* Dump local addresses of an association/endpoint. */
@@ -213,6 +215,8 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
213 sctp_for_each_hentry(epb, node, &head->chain) { 215 sctp_for_each_hentry(epb, node, &head->chain) {
214 ep = sctp_ep(epb); 216 ep = sctp_ep(epb);
215 sk = epb->sk; 217 sk = epb->sk;
218 if (!net_eq(sock_net(sk), seq_file_net(seq)))
219 continue;
216 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 220 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
217 sctp_sk(sk)->type, sk->sk_state, hash, 221 sctp_sk(sk)->type, sk->sk_state, hash,
218 epb->bind_addr.port, 222 epb->bind_addr.port,
@@ -239,7 +243,8 @@ static const struct seq_operations sctp_eps_ops = {
239/* Initialize the seq file operations for 'eps' object. */ 243/* Initialize the seq file operations for 'eps' object. */
240static int sctp_eps_seq_open(struct inode *inode, struct file *file) 244static int sctp_eps_seq_open(struct inode *inode, struct file *file)
241{ 245{
242 return seq_open(file, &sctp_eps_ops); 246 return seq_open_net(inode, file, &sctp_eps_ops,
247 sizeof(struct seq_net_private));
243} 248}
244 249
245static const struct file_operations sctp_eps_seq_fops = { 250static const struct file_operations sctp_eps_seq_fops = {
@@ -250,11 +255,12 @@ static const struct file_operations sctp_eps_seq_fops = {
250}; 255};
251 256
252/* Set up the proc fs entry for 'eps' object. */ 257/* Set up the proc fs entry for 'eps' object. */
253int __init sctp_eps_proc_init(void) 258int __net_init sctp_eps_proc_init(struct net *net)
254{ 259{
255 struct proc_dir_entry *p; 260 struct proc_dir_entry *p;
256 261
257 p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); 262 p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
263 &sctp_eps_seq_fops);
258 if (!p) 264 if (!p)
259 return -ENOMEM; 265 return -ENOMEM;
260 266
@@ -262,9 +268,9 @@ int __init sctp_eps_proc_init(void)
262} 268}
263 269
264/* Cleanup the proc fs entry for 'eps' object. */ 270/* Cleanup the proc fs entry for 'eps' object. */
265void sctp_eps_proc_exit(void) 271void sctp_eps_proc_exit(struct net *net)
266{ 272{
267 remove_proc_entry("eps", proc_net_sctp); 273 remove_proc_entry("eps", net->sctp.proc_net_sctp);
268} 274}
269 275
270 276
@@ -317,6 +323,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
317 sctp_for_each_hentry(epb, node, &head->chain) { 323 sctp_for_each_hentry(epb, node, &head->chain) {
318 assoc = sctp_assoc(epb); 324 assoc = sctp_assoc(epb);
319 sk = epb->sk; 325 sk = epb->sk;
326 if (!net_eq(sock_net(sk), seq_file_net(seq)))
327 continue;
320 seq_printf(seq, 328 seq_printf(seq,
321 "%8pK %8pK %-3d %-3d %-2d %-4d " 329 "%8pK %8pK %-3d %-3d %-2d %-4d "
322 "%4d %8d %8d %7d %5lu %-5d %5d ", 330 "%4d %8d %8d %7d %5lu %-5d %5d ",
@@ -356,7 +364,8 @@ static const struct seq_operations sctp_assoc_ops = {
356/* Initialize the seq file operations for 'assocs' object. */ 364/* Initialize the seq file operations for 'assocs' object. */
357static int sctp_assocs_seq_open(struct inode *inode, struct file *file) 365static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
358{ 366{
359 return seq_open(file, &sctp_assoc_ops); 367 return seq_open_net(inode, file, &sctp_assoc_ops,
368 sizeof(struct seq_net_private));
360} 369}
361 370
362static const struct file_operations sctp_assocs_seq_fops = { 371static const struct file_operations sctp_assocs_seq_fops = {
@@ -367,11 +376,11 @@ static const struct file_operations sctp_assocs_seq_fops = {
367}; 376};
368 377
369/* Set up the proc fs entry for 'assocs' object. */ 378/* Set up the proc fs entry for 'assocs' object. */
370int __init sctp_assocs_proc_init(void) 379int __net_init sctp_assocs_proc_init(struct net *net)
371{ 380{
372 struct proc_dir_entry *p; 381 struct proc_dir_entry *p;
373 382
374 p = proc_create("assocs", S_IRUGO, proc_net_sctp, 383 p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
375 &sctp_assocs_seq_fops); 384 &sctp_assocs_seq_fops);
376 if (!p) 385 if (!p)
377 return -ENOMEM; 386 return -ENOMEM;
@@ -380,9 +389,9 @@ int __init sctp_assocs_proc_init(void)
380} 389}
381 390
382/* Cleanup the proc fs entry for 'assocs' object. */ 391/* Cleanup the proc fs entry for 'assocs' object. */
383void sctp_assocs_proc_exit(void) 392void sctp_assocs_proc_exit(struct net *net)
384{ 393{
385 remove_proc_entry("assocs", proc_net_sctp); 394 remove_proc_entry("assocs", net->sctp.proc_net_sctp);
386} 395}
387 396
388static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) 397static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
@@ -428,6 +437,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
428 sctp_local_bh_disable(); 437 sctp_local_bh_disable();
429 read_lock(&head->lock); 438 read_lock(&head->lock);
430 sctp_for_each_hentry(epb, node, &head->chain) { 439 sctp_for_each_hentry(epb, node, &head->chain) {
440 if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
441 continue;
431 assoc = sctp_assoc(epb); 442 assoc = sctp_assoc(epb);
432 list_for_each_entry(tsp, &assoc->peer.transport_addr_list, 443 list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
433 transports) { 444 transports) {
@@ -491,14 +502,15 @@ static const struct seq_operations sctp_remaddr_ops = {
491}; 502};
492 503
493/* Cleanup the proc fs entry for 'remaddr' object. */ 504/* Cleanup the proc fs entry for 'remaddr' object. */
494void sctp_remaddr_proc_exit(void) 505void sctp_remaddr_proc_exit(struct net *net)
495{ 506{
496 remove_proc_entry("remaddr", proc_net_sctp); 507 remove_proc_entry("remaddr", net->sctp.proc_net_sctp);
497} 508}
498 509
499static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) 510static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
500{ 511{
501 return seq_open(file, &sctp_remaddr_ops); 512 return seq_open_net(inode, file, &sctp_remaddr_ops,
513 sizeof(struct seq_net_private));
502} 514}
503 515
504static const struct file_operations sctp_remaddr_seq_fops = { 516static const struct file_operations sctp_remaddr_seq_fops = {
@@ -508,11 +520,12 @@ static const struct file_operations sctp_remaddr_seq_fops = {
508 .release = seq_release, 520 .release = seq_release,
509}; 521};
510 522
511int __init sctp_remaddr_proc_init(void) 523int __net_init sctp_remaddr_proc_init(struct net *net)
512{ 524{
513 struct proc_dir_entry *p; 525 struct proc_dir_entry *p;
514 526
515 p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); 527 p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
528 &sctp_remaddr_seq_fops);
516 if (!p) 529 if (!p)
517 return -ENOMEM; 530 return -ENOMEM;
518 return 0; 531 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1f89c4e69645..2d518425d598 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -69,21 +69,10 @@
69 69
70/* Global data structures. */ 70/* Global data structures. */
71struct sctp_globals sctp_globals __read_mostly; 71struct sctp_globals sctp_globals __read_mostly;
72DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
73
74#ifdef CONFIG_PROC_FS
75struct proc_dir_entry *proc_net_sctp;
76#endif
77 72
78struct idr sctp_assocs_id; 73struct idr sctp_assocs_id;
79DEFINE_SPINLOCK(sctp_assocs_id_lock); 74DEFINE_SPINLOCK(sctp_assocs_id_lock);
80 75
81/* This is the global socket data structure used for responding to
82 * the Out-of-the-blue (OOTB) packets. A control sock will be created
83 * for this socket at the initialization time.
84 */
85static struct sock *sctp_ctl_sock;
86
87static struct sctp_pf *sctp_pf_inet6_specific; 76static struct sctp_pf *sctp_pf_inet6_specific;
88static struct sctp_pf *sctp_pf_inet_specific; 77static struct sctp_pf *sctp_pf_inet_specific;
89static struct sctp_af *sctp_af_v4_specific; 78static struct sctp_af *sctp_af_v4_specific;
@@ -96,74 +85,54 @@ long sysctl_sctp_mem[3];
96int sysctl_sctp_rmem[3]; 85int sysctl_sctp_rmem[3];
97int sysctl_sctp_wmem[3]; 86int sysctl_sctp_wmem[3];
98 87
99/* Return the address of the control sock. */
100struct sock *sctp_get_ctl_sock(void)
101{
102 return sctp_ctl_sock;
103}
104
105/* Set up the proc fs entry for the SCTP protocol. */ 88/* Set up the proc fs entry for the SCTP protocol. */
106static __init int sctp_proc_init(void) 89static __net_init int sctp_proc_init(struct net *net)
107{ 90{
108 if (percpu_counter_init(&sctp_sockets_allocated, 0))
109 goto out_nomem;
110#ifdef CONFIG_PROC_FS 91#ifdef CONFIG_PROC_FS
111 if (!proc_net_sctp) { 92 net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
112 proc_net_sctp = proc_mkdir("sctp", init_net.proc_net); 93 if (!net->sctp.proc_net_sctp)
113 if (!proc_net_sctp) 94 goto out_proc_net_sctp;
114 goto out_free_percpu; 95 if (sctp_snmp_proc_init(net))
115 }
116
117 if (sctp_snmp_proc_init())
118 goto out_snmp_proc_init; 96 goto out_snmp_proc_init;
119 if (sctp_eps_proc_init()) 97 if (sctp_eps_proc_init(net))
120 goto out_eps_proc_init; 98 goto out_eps_proc_init;
121 if (sctp_assocs_proc_init()) 99 if (sctp_assocs_proc_init(net))
122 goto out_assocs_proc_init; 100 goto out_assocs_proc_init;
123 if (sctp_remaddr_proc_init()) 101 if (sctp_remaddr_proc_init(net))
124 goto out_remaddr_proc_init; 102 goto out_remaddr_proc_init;
125 103
126 return 0; 104 return 0;
127 105
128out_remaddr_proc_init: 106out_remaddr_proc_init:
129 sctp_assocs_proc_exit(); 107 sctp_assocs_proc_exit(net);
130out_assocs_proc_init: 108out_assocs_proc_init:
131 sctp_eps_proc_exit(); 109 sctp_eps_proc_exit(net);
132out_eps_proc_init: 110out_eps_proc_init:
133 sctp_snmp_proc_exit(); 111 sctp_snmp_proc_exit(net);
134out_snmp_proc_init: 112out_snmp_proc_init:
135 if (proc_net_sctp) { 113 remove_proc_entry("sctp", net->proc_net);
136 proc_net_sctp = NULL; 114 net->sctp.proc_net_sctp = NULL;
137 remove_proc_entry("sctp", init_net.proc_net); 115out_proc_net_sctp:
138 }
139out_free_percpu:
140 percpu_counter_destroy(&sctp_sockets_allocated);
141#else
142 return 0;
143#endif /* CONFIG_PROC_FS */
144
145out_nomem:
146 return -ENOMEM; 116 return -ENOMEM;
117#endif /* CONFIG_PROC_FS */
118 return 0;
147} 119}
148 120
149/* Clean up the proc fs entry for the SCTP protocol. 121/* Clean up the proc fs entry for the SCTP protocol.
150 * Note: Do not make this __exit as it is used in the init error 122 * Note: Do not make this __exit as it is used in the init error
151 * path. 123 * path.
152 */ 124 */
153static void sctp_proc_exit(void) 125static void sctp_proc_exit(struct net *net)
154{ 126{
155#ifdef CONFIG_PROC_FS 127#ifdef CONFIG_PROC_FS
156 sctp_snmp_proc_exit(); 128 sctp_snmp_proc_exit(net);
157 sctp_eps_proc_exit(); 129 sctp_eps_proc_exit(net);
158 sctp_assocs_proc_exit(); 130 sctp_assocs_proc_exit(net);
159 sctp_remaddr_proc_exit(); 131 sctp_remaddr_proc_exit(net);
160 132
161 if (proc_net_sctp) { 133 remove_proc_entry("sctp", net->proc_net);
162 proc_net_sctp = NULL; 134 net->sctp.proc_net_sctp = NULL;
163 remove_proc_entry("sctp", init_net.proc_net);
164 }
165#endif 135#endif
166 percpu_counter_destroy(&sctp_sockets_allocated);
167} 136}
168 137
169/* Private helper to extract ipv4 address and stash them in 138/* Private helper to extract ipv4 address and stash them in
@@ -201,29 +170,29 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
201/* Extract our IP addresses from the system and stash them in the 170/* Extract our IP addresses from the system and stash them in the
202 * protocol structure. 171 * protocol structure.
203 */ 172 */
204static void sctp_get_local_addr_list(void) 173static void sctp_get_local_addr_list(struct net *net)
205{ 174{
206 struct net_device *dev; 175 struct net_device *dev;
207 struct list_head *pos; 176 struct list_head *pos;
208 struct sctp_af *af; 177 struct sctp_af *af;
209 178
210 rcu_read_lock(); 179 rcu_read_lock();
211 for_each_netdev_rcu(&init_net, dev) { 180 for_each_netdev_rcu(net, dev) {
212 __list_for_each(pos, &sctp_address_families) { 181 __list_for_each(pos, &sctp_address_families) {
213 af = list_entry(pos, struct sctp_af, list); 182 af = list_entry(pos, struct sctp_af, list);
214 af->copy_addrlist(&sctp_local_addr_list, dev); 183 af->copy_addrlist(&net->sctp.local_addr_list, dev);
215 } 184 }
216 } 185 }
217 rcu_read_unlock(); 186 rcu_read_unlock();
218} 187}
219 188
220/* Free the existing local addresses. */ 189/* Free the existing local addresses. */
221static void sctp_free_local_addr_list(void) 190static void sctp_free_local_addr_list(struct net *net)
222{ 191{
223 struct sctp_sockaddr_entry *addr; 192 struct sctp_sockaddr_entry *addr;
224 struct list_head *pos, *temp; 193 struct list_head *pos, *temp;
225 194
226 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 195 list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
227 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 196 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
228 list_del(pos); 197 list_del(pos);
229 kfree(addr); 198 kfree(addr);
@@ -231,17 +200,17 @@ static void sctp_free_local_addr_list(void)
231} 200}
232 201
233/* Copy the local addresses which are valid for 'scope' into 'bp'. */ 202/* Copy the local addresses which are valid for 'scope' into 'bp'. */
234int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 203int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
235 gfp_t gfp, int copy_flags) 204 sctp_scope_t scope, gfp_t gfp, int copy_flags)
236{ 205{
237 struct sctp_sockaddr_entry *addr; 206 struct sctp_sockaddr_entry *addr;
238 int error = 0; 207 int error = 0;
239 208
240 rcu_read_lock(); 209 rcu_read_lock();
241 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { 210 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
242 if (!addr->valid) 211 if (!addr->valid)
243 continue; 212 continue;
244 if (sctp_in_scope(&addr->a, scope)) { 213 if (sctp_in_scope(net, &addr->a, scope)) {
245 /* Now that the address is in scope, check to see if 214 /* Now that the address is in scope, check to see if
246 * the address type is really supported by the local 215 * the address type is really supported by the local
247 * sock as well as the remote peer. 216 * sock as well as the remote peer.
@@ -397,7 +366,8 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
397/* Should this be available for binding? */ 366/* Should this be available for binding? */
398static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) 367static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
399{ 368{
400 int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr); 369 struct net *net = sock_net(&sp->inet.sk);
370 int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
401 371
402 372
403 if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && 373 if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
@@ -484,7 +454,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
484 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 454 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
485 __func__, &fl4->daddr, &fl4->saddr); 455 __func__, &fl4->daddr, &fl4->saddr);
486 456
487 rt = ip_route_output_key(&init_net, fl4); 457 rt = ip_route_output_key(sock_net(sk), fl4);
488 if (!IS_ERR(rt)) 458 if (!IS_ERR(rt))
489 dst = &rt->dst; 459 dst = &rt->dst;
490 460
@@ -530,7 +500,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
530 (AF_INET == laddr->a.sa.sa_family)) { 500 (AF_INET == laddr->a.sa.sa_family)) {
531 fl4->saddr = laddr->a.v4.sin_addr.s_addr; 501 fl4->saddr = laddr->a.v4.sin_addr.s_addr;
532 fl4->fl4_sport = laddr->a.v4.sin_port; 502 fl4->fl4_sport = laddr->a.v4.sin_port;
533 rt = ip_route_output_key(&init_net, fl4); 503 rt = ip_route_output_key(sock_net(sk), fl4);
534 if (!IS_ERR(rt)) { 504 if (!IS_ERR(rt)) {
535 dst = &rt->dst; 505 dst = &rt->dst;
536 goto out_unlock; 506 goto out_unlock;
@@ -627,14 +597,15 @@ static void sctp_v4_ecn_capable(struct sock *sk)
627 597
628void sctp_addr_wq_timeout_handler(unsigned long arg) 598void sctp_addr_wq_timeout_handler(unsigned long arg)
629{ 599{
600 struct net *net = (struct net *)arg;
630 struct sctp_sockaddr_entry *addrw, *temp; 601 struct sctp_sockaddr_entry *addrw, *temp;
631 struct sctp_sock *sp; 602 struct sctp_sock *sp;
632 603
633 spin_lock_bh(&sctp_addr_wq_lock); 604 spin_lock_bh(&net->sctp.addr_wq_lock);
634 605
635 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) { 606 list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
636 SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ", 607 SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
637 " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state, 608 " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state,
638 addrw); 609 addrw);
639 610
640#if IS_ENABLED(CONFIG_IPV6) 611#if IS_ENABLED(CONFIG_IPV6)
@@ -648,7 +619,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
648 goto free_next; 619 goto free_next;
649 620
650 in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr; 621 in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
651 if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 && 622 if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
652 addrw->state == SCTP_ADDR_NEW) { 623 addrw->state == SCTP_ADDR_NEW) {
653 unsigned long timeo_val; 624 unsigned long timeo_val;
654 625
@@ -656,12 +627,12 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
656 SCTP_ADDRESS_TICK_DELAY); 627 SCTP_ADDRESS_TICK_DELAY);
657 timeo_val = jiffies; 628 timeo_val = jiffies;
658 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); 629 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
659 mod_timer(&sctp_addr_wq_timer, timeo_val); 630 mod_timer(&net->sctp.addr_wq_timer, timeo_val);
660 break; 631 break;
661 } 632 }
662 } 633 }
663#endif 634#endif
664 list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) { 635 list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
665 struct sock *sk; 636 struct sock *sk;
666 637
667 sk = sctp_opt2sk(sp); 638 sk = sctp_opt2sk(sp);
@@ -679,31 +650,32 @@ free_next:
679 list_del(&addrw->list); 650 list_del(&addrw->list);
680 kfree(addrw); 651 kfree(addrw);
681 } 652 }
682 spin_unlock_bh(&sctp_addr_wq_lock); 653 spin_unlock_bh(&net->sctp.addr_wq_lock);
683} 654}
684 655
685static void sctp_free_addr_wq(void) 656static void sctp_free_addr_wq(struct net *net)
686{ 657{
687 struct sctp_sockaddr_entry *addrw; 658 struct sctp_sockaddr_entry *addrw;
688 struct sctp_sockaddr_entry *temp; 659 struct sctp_sockaddr_entry *temp;
689 660
690 spin_lock_bh(&sctp_addr_wq_lock); 661 spin_lock_bh(&net->sctp.addr_wq_lock);
691 del_timer(&sctp_addr_wq_timer); 662 del_timer(&net->sctp.addr_wq_timer);
692 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) { 663 list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
693 list_del(&addrw->list); 664 list_del(&addrw->list);
694 kfree(addrw); 665 kfree(addrw);
695 } 666 }
696 spin_unlock_bh(&sctp_addr_wq_lock); 667 spin_unlock_bh(&net->sctp.addr_wq_lock);
697} 668}
698 669
699/* lookup the entry for the same address in the addr_waitq 670/* lookup the entry for the same address in the addr_waitq
700 * sctp_addr_wq MUST be locked 671 * sctp_addr_wq MUST be locked
701 */ 672 */
702static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr) 673static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
674 struct sctp_sockaddr_entry *addr)
703{ 675{
704 struct sctp_sockaddr_entry *addrw; 676 struct sctp_sockaddr_entry *addrw;
705 677
706 list_for_each_entry(addrw, &sctp_addr_waitq, list) { 678 list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
707 if (addrw->a.sa.sa_family != addr->a.sa.sa_family) 679 if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
708 continue; 680 continue;
709 if (addrw->a.sa.sa_family == AF_INET) { 681 if (addrw->a.sa.sa_family == AF_INET) {
@@ -719,7 +691,7 @@ static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entr
719 return NULL; 691 return NULL;
720} 692}
721 693
722void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd) 694void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
723{ 695{
724 struct sctp_sockaddr_entry *addrw; 696 struct sctp_sockaddr_entry *addrw;
725 unsigned long timeo_val; 697 unsigned long timeo_val;
@@ -730,38 +702,38 @@ void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
730 * new address after a couple of addition and deletion of that address 702 * new address after a couple of addition and deletion of that address
731 */ 703 */
732 704
733 spin_lock_bh(&sctp_addr_wq_lock); 705 spin_lock_bh(&net->sctp.addr_wq_lock);
734 /* Offsets existing events in addr_wq */ 706 /* Offsets existing events in addr_wq */
735 addrw = sctp_addr_wq_lookup(addr); 707 addrw = sctp_addr_wq_lookup(net, addr);
736 if (addrw) { 708 if (addrw) {
737 if (addrw->state != cmd) { 709 if (addrw->state != cmd) {
738 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ", 710 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
739 " in wq %p\n", addrw->state, &addrw->a, 711 " in wq %p\n", addrw->state, &addrw->a,
740 &sctp_addr_waitq); 712 &net->sctp.addr_waitq);
741 list_del(&addrw->list); 713 list_del(&addrw->list);
742 kfree(addrw); 714 kfree(addrw);
743 } 715 }
744 spin_unlock_bh(&sctp_addr_wq_lock); 716 spin_unlock_bh(&net->sctp.addr_wq_lock);
745 return; 717 return;
746 } 718 }
747 719
748 /* OK, we have to add the new address to the wait queue */ 720 /* OK, we have to add the new address to the wait queue */
749 addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 721 addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
750 if (addrw == NULL) { 722 if (addrw == NULL) {
751 spin_unlock_bh(&sctp_addr_wq_lock); 723 spin_unlock_bh(&net->sctp.addr_wq_lock);
752 return; 724 return;
753 } 725 }
754 addrw->state = cmd; 726 addrw->state = cmd;
755 list_add_tail(&addrw->list, &sctp_addr_waitq); 727 list_add_tail(&addrw->list, &net->sctp.addr_waitq);
756 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ", 728 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
757 " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq); 729 " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq);
758 730
759 if (!timer_pending(&sctp_addr_wq_timer)) { 731 if (!timer_pending(&net->sctp.addr_wq_timer)) {
760 timeo_val = jiffies; 732 timeo_val = jiffies;
761 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); 733 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
762 mod_timer(&sctp_addr_wq_timer, timeo_val); 734 mod_timer(&net->sctp.addr_wq_timer, timeo_val);
763 } 735 }
764 spin_unlock_bh(&sctp_addr_wq_lock); 736 spin_unlock_bh(&net->sctp.addr_wq_lock);
765} 737}
766 738
767/* Event handler for inet address addition/deletion events. 739/* Event handler for inet address addition/deletion events.
@@ -776,11 +748,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
776 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 748 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
777 struct sctp_sockaddr_entry *addr = NULL; 749 struct sctp_sockaddr_entry *addr = NULL;
778 struct sctp_sockaddr_entry *temp; 750 struct sctp_sockaddr_entry *temp;
751 struct net *net = dev_net(ifa->ifa_dev->dev);
779 int found = 0; 752 int found = 0;
780 753
781 if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net))
782 return NOTIFY_DONE;
783
784 switch (ev) { 754 switch (ev) {
785 case NETDEV_UP: 755 case NETDEV_UP:
786 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 756 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -789,27 +759,27 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
789 addr->a.v4.sin_port = 0; 759 addr->a.v4.sin_port = 0;
790 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 760 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
791 addr->valid = 1; 761 addr->valid = 1;
792 spin_lock_bh(&sctp_local_addr_lock); 762 spin_lock_bh(&net->sctp.local_addr_lock);
793 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 763 list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
794 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW); 764 sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
795 spin_unlock_bh(&sctp_local_addr_lock); 765 spin_unlock_bh(&net->sctp.local_addr_lock);
796 } 766 }
797 break; 767 break;
798 case NETDEV_DOWN: 768 case NETDEV_DOWN:
799 spin_lock_bh(&sctp_local_addr_lock); 769 spin_lock_bh(&net->sctp.local_addr_lock);
800 list_for_each_entry_safe(addr, temp, 770 list_for_each_entry_safe(addr, temp,
801 &sctp_local_addr_list, list) { 771 &net->sctp.local_addr_list, list) {
802 if (addr->a.sa.sa_family == AF_INET && 772 if (addr->a.sa.sa_family == AF_INET &&
803 addr->a.v4.sin_addr.s_addr == 773 addr->a.v4.sin_addr.s_addr ==
804 ifa->ifa_local) { 774 ifa->ifa_local) {
805 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL); 775 sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
806 found = 1; 776 found = 1;
807 addr->valid = 0; 777 addr->valid = 0;
808 list_del_rcu(&addr->list); 778 list_del_rcu(&addr->list);
809 break; 779 break;
810 } 780 }
811 } 781 }
812 spin_unlock_bh(&sctp_local_addr_lock); 782 spin_unlock_bh(&net->sctp.local_addr_lock);
813 if (found) 783 if (found)
814 kfree_rcu(addr, rcu); 784 kfree_rcu(addr, rcu);
815 break; 785 break;
@@ -822,7 +792,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
822 * Initialize the control inode/socket with a control endpoint data 792 * Initialize the control inode/socket with a control endpoint data
823 * structure. This endpoint is reserved exclusively for the OOTB processing. 793 * structure. This endpoint is reserved exclusively for the OOTB processing.
824 */ 794 */
825static int sctp_ctl_sock_init(void) 795static int sctp_ctl_sock_init(struct net *net)
826{ 796{
827 int err; 797 int err;
828 sa_family_t family = PF_INET; 798 sa_family_t family = PF_INET;
@@ -830,14 +800,14 @@ static int sctp_ctl_sock_init(void)
830 if (sctp_get_pf_specific(PF_INET6)) 800 if (sctp_get_pf_specific(PF_INET6))
831 family = PF_INET6; 801 family = PF_INET6;
832 802
833 err = inet_ctl_sock_create(&sctp_ctl_sock, family, 803 err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
834 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net); 804 SOCK_SEQPACKET, IPPROTO_SCTP, net);
835 805
836 /* If IPv6 socket could not be created, try the IPv4 socket */ 806 /* If IPv6 socket could not be created, try the IPv4 socket */
837 if (err < 0 && family == PF_INET6) 807 if (err < 0 && family == PF_INET6)
838 err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET, 808 err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
839 SOCK_SEQPACKET, IPPROTO_SCTP, 809 SOCK_SEQPACKET, IPPROTO_SCTP,
840 &init_net); 810 net);
841 811
842 if (err < 0) { 812 if (err < 0) {
843 pr_err("Failed to create the SCTP control socket\n"); 813 pr_err("Failed to create the SCTP control socket\n");
@@ -990,7 +960,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
990 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? 960 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
991 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 961 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
992 962
993 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 963 SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
994 return ip_queue_xmit(skb, &transport->fl); 964 return ip_queue_xmit(skb, &transport->fl);
995} 965}
996 966
@@ -1063,6 +1033,7 @@ static const struct net_protocol sctp_protocol = {
1063 .handler = sctp_rcv, 1033 .handler = sctp_rcv,
1064 .err_handler = sctp_v4_err, 1034 .err_handler = sctp_v4_err,
1065 .no_policy = 1, 1035 .no_policy = 1,
1036 .netns_ok = 1,
1066}; 1037};
1067 1038
1068/* IPv4 address related functions. */ 1039/* IPv4 address related functions. */
@@ -1130,16 +1101,16 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
1130 return 1; 1101 return 1;
1131} 1102}
1132 1103
1133static inline int init_sctp_mibs(void) 1104static inline int init_sctp_mibs(struct net *net)
1134{ 1105{
1135 return snmp_mib_init((void __percpu **)sctp_statistics, 1106 return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
1136 sizeof(struct sctp_mib), 1107 sizeof(struct sctp_mib),
1137 __alignof__(struct sctp_mib)); 1108 __alignof__(struct sctp_mib));
1138} 1109}
1139 1110
1140static inline void cleanup_sctp_mibs(void) 1111static inline void cleanup_sctp_mibs(struct net *net)
1141{ 1112{
1142 snmp_mib_free((void __percpu **)sctp_statistics); 1113 snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
1143} 1114}
1144 1115
1145static void sctp_v4_pf_init(void) 1116static void sctp_v4_pf_init(void)
@@ -1194,6 +1165,143 @@ static void sctp_v4_del_protocol(void)
1194 unregister_inetaddr_notifier(&sctp_inetaddr_notifier); 1165 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1195} 1166}
1196 1167
1168static int sctp_net_init(struct net *net)
1169{
1170 int status;
1171
1172 /*
1173 * 14. Suggested SCTP Protocol Parameter Values
1174 */
1175 /* The following protocol parameters are RECOMMENDED: */
1176 /* RTO.Initial - 3 seconds */
1177 net->sctp.rto_initial = SCTP_RTO_INITIAL;
1178 /* RTO.Min - 1 second */
1179 net->sctp.rto_min = SCTP_RTO_MIN;
1180 /* RTO.Max - 60 seconds */
1181 net->sctp.rto_max = SCTP_RTO_MAX;
1182 /* RTO.Alpha - 1/8 */
1183 net->sctp.rto_alpha = SCTP_RTO_ALPHA;
1184 /* RTO.Beta - 1/4 */
1185 net->sctp.rto_beta = SCTP_RTO_BETA;
1186
1187 /* Valid.Cookie.Life - 60 seconds */
1188 net->sctp.valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE;
1189
1190 /* Whether Cookie Preservative is enabled(1) or not(0) */
1191 net->sctp.cookie_preserve_enable = 1;
1192
1193 /* Max.Burst - 4 */
1194 net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
1195
1196 /* Association.Max.Retrans - 10 attempts
1197 * Path.Max.Retrans - 5 attempts (per destination address)
1198 * Max.Init.Retransmits - 8 attempts
1199 */
1200 net->sctp.max_retrans_association = 10;
1201 net->sctp.max_retrans_path = 5;
1202 net->sctp.max_retrans_init = 8;
1203
1204 /* Sendbuffer growth - do per-socket accounting */
1205 net->sctp.sndbuf_policy = 0;
1206
1207 /* Rcvbuffer growth - do per-socket accounting */
1208 net->sctp.rcvbuf_policy = 0;
1209
1210 /* HB.interval - 30 seconds */
1211 net->sctp.hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
1212
1213 /* delayed SACK timeout */
1214 net->sctp.sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
1215
1216 /* Disable ADDIP by default. */
1217 net->sctp.addip_enable = 0;
1218 net->sctp.addip_noauth = 0;
1219 net->sctp.default_auto_asconf = 0;
1220
1221 /* Enable PR-SCTP by default. */
1222 net->sctp.prsctp_enable = 1;
1223
1224 /* Disable AUTH by default. */
1225 net->sctp.auth_enable = 0;
1226
1227 /* Set SCOPE policy to enabled */
1228 net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
1229
1230 /* Set the default rwnd update threshold */
1231 net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
1232
1233 /* Initialize maximum autoclose timeout. */
1234 net->sctp.max_autoclose = INT_MAX / HZ;
1235
1236 status = sctp_sysctl_net_register(net);
1237 if (status)
1238 goto err_sysctl_register;
1239
1240 /* Allocate and initialise sctp mibs. */
1241 status = init_sctp_mibs(net);
1242 if (status)
1243 goto err_init_mibs;
1244
1245 /* Initialize proc fs directory. */
1246 status = sctp_proc_init(net);
1247 if (status)
1248 goto err_init_proc;
1249
1250 sctp_dbg_objcnt_init(net);
1251
1252 /* Initialize the control inode/socket for handling OOTB packets. */
1253 if ((status = sctp_ctl_sock_init(net))) {
1254 pr_err("Failed to initialize the SCTP control sock\n");
1255 goto err_ctl_sock_init;
1256 }
1257
1258 /* Initialize the local address list. */
1259 INIT_LIST_HEAD(&net->sctp.local_addr_list);
1260 spin_lock_init(&net->sctp.local_addr_lock);
1261 sctp_get_local_addr_list(net);
1262
1263 /* Initialize the address event list */
1264 INIT_LIST_HEAD(&net->sctp.addr_waitq);
1265 INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
1266 spin_lock_init(&net->sctp.addr_wq_lock);
1267 net->sctp.addr_wq_timer.expires = 0;
1268 setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler,
1269 (unsigned long)net);
1270
1271 return 0;
1272
1273err_ctl_sock_init:
1274 sctp_dbg_objcnt_exit(net);
1275 sctp_proc_exit(net);
1276err_init_proc:
1277 cleanup_sctp_mibs(net);
1278err_init_mibs:
1279 sctp_sysctl_net_unregister(net);
1280err_sysctl_register:
1281 return status;
1282}
1283
1284static void sctp_net_exit(struct net *net)
1285{
1286 /* Free the local address list */
1287 sctp_free_addr_wq(net);
1288 sctp_free_local_addr_list(net);
1289
1290 /* Free the control endpoint. */
1291 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1292
1293 sctp_dbg_objcnt_exit(net);
1294
1295 sctp_proc_exit(net);
1296 cleanup_sctp_mibs(net);
1297 sctp_sysctl_net_unregister(net);
1298}
1299
1300static struct pernet_operations sctp_net_ops = {
1301 .init = sctp_net_init,
1302 .exit = sctp_net_exit,
1303};
1304
1197/* Initialize the universe into something sensible. */ 1305/* Initialize the universe into something sensible. */
1198SCTP_STATIC __init int sctp_init(void) 1306SCTP_STATIC __init int sctp_init(void)
1199{ 1307{
@@ -1224,62 +1332,9 @@ SCTP_STATIC __init int sctp_init(void)
1224 if (!sctp_chunk_cachep) 1332 if (!sctp_chunk_cachep)
1225 goto err_chunk_cachep; 1333 goto err_chunk_cachep;
1226 1334
1227 /* Allocate and initialise sctp mibs. */ 1335 status = percpu_counter_init(&sctp_sockets_allocated, 0);
1228 status = init_sctp_mibs();
1229 if (status) 1336 if (status)
1230 goto err_init_mibs; 1337 goto err_percpu_counter_init;
1231
1232 /* Initialize proc fs directory. */
1233 status = sctp_proc_init();
1234 if (status)
1235 goto err_init_proc;
1236
1237 /* Initialize object count debugging. */
1238 sctp_dbg_objcnt_init();
1239
1240 /*
1241 * 14. Suggested SCTP Protocol Parameter Values
1242 */
1243 /* The following protocol parameters are RECOMMENDED: */
1244 /* RTO.Initial - 3 seconds */
1245 sctp_rto_initial = SCTP_RTO_INITIAL;
1246 /* RTO.Min - 1 second */
1247 sctp_rto_min = SCTP_RTO_MIN;
1248 /* RTO.Max - 60 seconds */
1249 sctp_rto_max = SCTP_RTO_MAX;
1250 /* RTO.Alpha - 1/8 */
1251 sctp_rto_alpha = SCTP_RTO_ALPHA;
1252 /* RTO.Beta - 1/4 */
1253 sctp_rto_beta = SCTP_RTO_BETA;
1254
1255 /* Valid.Cookie.Life - 60 seconds */
1256 sctp_valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE;
1257
1258 /* Whether Cookie Preservative is enabled(1) or not(0) */
1259 sctp_cookie_preserve_enable = 1;
1260
1261 /* Max.Burst - 4 */
1262 sctp_max_burst = SCTP_DEFAULT_MAX_BURST;
1263
1264 /* Association.Max.Retrans - 10 attempts
1265 * Path.Max.Retrans - 5 attempts (per destination address)
1266 * Max.Init.Retransmits - 8 attempts
1267 */
1268 sctp_max_retrans_association = 10;
1269 sctp_max_retrans_path = 5;
1270 sctp_max_retrans_init = 8;
1271
1272 /* Sendbuffer growth - do per-socket accounting */
1273 sctp_sndbuf_policy = 0;
1274
1275 /* Rcvbuffer growth - do per-socket accounting */
1276 sctp_rcvbuf_policy = 0;
1277
1278 /* HB.interval - 30 seconds */
1279 sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
1280
1281 /* delayed SACK timeout */
1282 sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
1283 1338
1284 /* Implementation specific variables. */ 1339 /* Implementation specific variables. */
1285 1340
@@ -1287,9 +1342,6 @@ SCTP_STATIC __init int sctp_init(void)
1287 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; 1342 sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
1288 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; 1343 sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
1289 1344
1290 /* Initialize maximum autoclose timeout. */
1291 sctp_max_autoclose = INT_MAX / HZ;
1292
1293 /* Initialize handle used for association ids. */ 1345 /* Initialize handle used for association ids. */
1294 idr_init(&sctp_assocs_id); 1346 idr_init(&sctp_assocs_id);
1295 1347
@@ -1376,41 +1428,12 @@ SCTP_STATIC __init int sctp_init(void)
1376 pr_info("Hash tables configured (established %d bind %d)\n", 1428 pr_info("Hash tables configured (established %d bind %d)\n",
1377 sctp_assoc_hashsize, sctp_port_hashsize); 1429 sctp_assoc_hashsize, sctp_port_hashsize);
1378 1430
1379 /* Disable ADDIP by default. */
1380 sctp_addip_enable = 0;
1381 sctp_addip_noauth = 0;
1382 sctp_default_auto_asconf = 0;
1383
1384 /* Enable PR-SCTP by default. */
1385 sctp_prsctp_enable = 1;
1386
1387 /* Disable AUTH by default. */
1388 sctp_auth_enable = 0;
1389
1390 /* Set SCOPE policy to enabled */
1391 sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
1392
1393 /* Set the default rwnd update threshold */
1394 sctp_rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
1395
1396 sctp_sysctl_register(); 1431 sctp_sysctl_register();
1397 1432
1398 INIT_LIST_HEAD(&sctp_address_families); 1433 INIT_LIST_HEAD(&sctp_address_families);
1399 sctp_v4_pf_init(); 1434 sctp_v4_pf_init();
1400 sctp_v6_pf_init(); 1435 sctp_v6_pf_init();
1401 1436
1402 /* Initialize the local address list. */
1403 INIT_LIST_HEAD(&sctp_local_addr_list);
1404 spin_lock_init(&sctp_local_addr_lock);
1405 sctp_get_local_addr_list();
1406
1407 /* Initialize the address event list */
1408 INIT_LIST_HEAD(&sctp_addr_waitq);
1409 INIT_LIST_HEAD(&sctp_auto_asconf_splist);
1410 spin_lock_init(&sctp_addr_wq_lock);
1411 sctp_addr_wq_timer.expires = 0;
1412 setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
1413
1414 status = sctp_v4_protosw_init(); 1437 status = sctp_v4_protosw_init();
1415 1438
1416 if (status) 1439 if (status)
@@ -1420,11 +1443,9 @@ SCTP_STATIC __init int sctp_init(void)
1420 if (status) 1443 if (status)
1421 goto err_v6_protosw_init; 1444 goto err_v6_protosw_init;
1422 1445
1423 /* Initialize the control inode/socket for handling OOTB packets. */ 1446 status = register_pernet_subsys(&sctp_net_ops);
1424 if ((status = sctp_ctl_sock_init())) { 1447 if (status)
1425 pr_err("Failed to initialize the SCTP control sock\n"); 1448 goto err_register_pernet_subsys;
1426 goto err_ctl_sock_init;
1427 }
1428 1449
1429 status = sctp_v4_add_protocol(); 1450 status = sctp_v4_add_protocol();
1430 if (status) 1451 if (status)
@@ -1441,13 +1462,12 @@ out:
1441err_v6_add_protocol: 1462err_v6_add_protocol:
1442 sctp_v4_del_protocol(); 1463 sctp_v4_del_protocol();
1443err_add_protocol: 1464err_add_protocol:
1444 inet_ctl_sock_destroy(sctp_ctl_sock); 1465 unregister_pernet_subsys(&sctp_net_ops);
1445err_ctl_sock_init: 1466err_register_pernet_subsys:
1446 sctp_v6_protosw_exit(); 1467 sctp_v6_protosw_exit();
1447err_v6_protosw_init: 1468err_v6_protosw_init:
1448 sctp_v4_protosw_exit(); 1469 sctp_v4_protosw_exit();
1449err_protosw_init: 1470err_protosw_init:
1450 sctp_free_local_addr_list();
1451 sctp_v4_pf_exit(); 1471 sctp_v4_pf_exit();
1452 sctp_v6_pf_exit(); 1472 sctp_v6_pf_exit();
1453 sctp_sysctl_unregister(); 1473 sctp_sysctl_unregister();
@@ -1461,11 +1481,8 @@ err_ehash_alloc:
1461 get_order(sctp_assoc_hashsize * 1481 get_order(sctp_assoc_hashsize *
1462 sizeof(struct sctp_hashbucket))); 1482 sizeof(struct sctp_hashbucket)));
1463err_ahash_alloc: 1483err_ahash_alloc:
1464 sctp_dbg_objcnt_exit(); 1484 percpu_counter_destroy(&sctp_sockets_allocated);
1465 sctp_proc_exit(); 1485err_percpu_counter_init:
1466err_init_proc:
1467 cleanup_sctp_mibs();
1468err_init_mibs:
1469 kmem_cache_destroy(sctp_chunk_cachep); 1486 kmem_cache_destroy(sctp_chunk_cachep);
1470err_chunk_cachep: 1487err_chunk_cachep:
1471 kmem_cache_destroy(sctp_bucket_cachep); 1488 kmem_cache_destroy(sctp_bucket_cachep);
@@ -1482,18 +1499,13 @@ SCTP_STATIC __exit void sctp_exit(void)
1482 /* Unregister with inet6/inet layers. */ 1499 /* Unregister with inet6/inet layers. */
1483 sctp_v6_del_protocol(); 1500 sctp_v6_del_protocol();
1484 sctp_v4_del_protocol(); 1501 sctp_v4_del_protocol();
1485 sctp_free_addr_wq();
1486 1502
1487 /* Free the control endpoint. */ 1503 unregister_pernet_subsys(&sctp_net_ops);
1488 inet_ctl_sock_destroy(sctp_ctl_sock);
1489 1504
1490 /* Free protosw registrations */ 1505 /* Free protosw registrations */
1491 sctp_v6_protosw_exit(); 1506 sctp_v6_protosw_exit();
1492 sctp_v4_protosw_exit(); 1507 sctp_v4_protosw_exit();
1493 1508
1494 /* Free the local address list. */
1495 sctp_free_local_addr_list();
1496
1497 /* Unregister with socket layer. */ 1509 /* Unregister with socket layer. */
1498 sctp_v6_pf_exit(); 1510 sctp_v6_pf_exit();
1499 sctp_v4_pf_exit(); 1511 sctp_v4_pf_exit();
@@ -1508,9 +1520,7 @@ SCTP_STATIC __exit void sctp_exit(void)
1508 get_order(sctp_port_hashsize * 1520 get_order(sctp_port_hashsize *
1509 sizeof(struct sctp_bind_hashbucket))); 1521 sizeof(struct sctp_bind_hashbucket)));
1510 1522
1511 sctp_dbg_objcnt_exit(); 1523 percpu_counter_destroy(&sctp_sockets_allocated);
1512 sctp_proc_exit();
1513 cleanup_sctp_mibs();
1514 1524
1515 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1525 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1516 1526
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 479a70ef6ff8..fbe1636309a7 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -198,6 +198,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
198 const struct sctp_bind_addr *bp, 198 const struct sctp_bind_addr *bp,
199 gfp_t gfp, int vparam_len) 199 gfp_t gfp, int vparam_len)
200{ 200{
201 struct net *net = sock_net(asoc->base.sk);
201 sctp_inithdr_t init; 202 sctp_inithdr_t init;
202 union sctp_params addrs; 203 union sctp_params addrs;
203 size_t chunksize; 204 size_t chunksize;
@@ -237,7 +238,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
237 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); 238 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
238 chunksize += sizeof(ecap_param); 239 chunksize += sizeof(ecap_param);
239 240
240 if (sctp_prsctp_enable) 241 if (net->sctp.prsctp_enable)
241 chunksize += sizeof(prsctp_param); 242 chunksize += sizeof(prsctp_param);
242 243
243 /* ADDIP: Section 4.2.7: 244 /* ADDIP: Section 4.2.7:
@@ -245,7 +246,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
245 * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and 246 * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and
246 * INIT-ACK parameters. 247 * INIT-ACK parameters.
247 */ 248 */
248 if (sctp_addip_enable) { 249 if (net->sctp.addip_enable) {
249 extensions[num_ext] = SCTP_CID_ASCONF; 250 extensions[num_ext] = SCTP_CID_ASCONF;
250 extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; 251 extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
251 num_ext += 2; 252 num_ext += 2;
@@ -257,7 +258,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
257 chunksize += vparam_len; 258 chunksize += vparam_len;
258 259
259 /* Account for AUTH related parameters */ 260 /* Account for AUTH related parameters */
260 if (sctp_auth_enable) { 261 if (net->sctp.auth_enable) {
261 /* Add random parameter length*/ 262 /* Add random parameter length*/
262 chunksize += sizeof(asoc->c.auth_random); 263 chunksize += sizeof(asoc->c.auth_random);
263 264
@@ -331,7 +332,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
331 sctp_addto_param(retval, num_ext, extensions); 332 sctp_addto_param(retval, num_ext, extensions);
332 } 333 }
333 334
334 if (sctp_prsctp_enable) 335 if (net->sctp.prsctp_enable)
335 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); 336 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
336 337
337 if (sp->adaptation_ind) { 338 if (sp->adaptation_ind) {
@@ -342,7 +343,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
342 } 343 }
343 344
344 /* Add SCTP-AUTH chunks to the parameter list */ 345 /* Add SCTP-AUTH chunks to the parameter list */
345 if (sctp_auth_enable) { 346 if (net->sctp.auth_enable) {
346 sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), 347 sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
347 asoc->c.auth_random); 348 asoc->c.auth_random);
348 if (auth_hmacs) 349 if (auth_hmacs)
@@ -1940,7 +1941,7 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1940 return 0; 1941 return 0;
1941} 1942}
1942 1943
1943static int sctp_verify_ext_param(union sctp_params param) 1944static int sctp_verify_ext_param(struct net *net, union sctp_params param)
1944{ 1945{
1945 __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); 1946 __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
1946 int have_auth = 0; 1947 int have_auth = 0;
@@ -1964,10 +1965,10 @@ static int sctp_verify_ext_param(union sctp_params param)
1964 * only if ADD-IP is turned on and we are not backward-compatible 1965 * only if ADD-IP is turned on and we are not backward-compatible
1965 * mode. 1966 * mode.
1966 */ 1967 */
1967 if (sctp_addip_noauth) 1968 if (net->sctp.addip_noauth)
1968 return 1; 1969 return 1;
1969 1970
1970 if (sctp_addip_enable && !have_auth && have_asconf) 1971 if (net->sctp.addip_enable && !have_auth && have_asconf)
1971 return 0; 1972 return 0;
1972 1973
1973 return 1; 1974 return 1;
@@ -1976,13 +1977,14 @@ static int sctp_verify_ext_param(union sctp_params param)
1976static void sctp_process_ext_param(struct sctp_association *asoc, 1977static void sctp_process_ext_param(struct sctp_association *asoc,
1977 union sctp_params param) 1978 union sctp_params param)
1978{ 1979{
1980 struct net *net = sock_net(asoc->base.sk);
1979 __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); 1981 __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
1980 int i; 1982 int i;
1981 1983
1982 for (i = 0; i < num_ext; i++) { 1984 for (i = 0; i < num_ext; i++) {
1983 switch (param.ext->chunks[i]) { 1985 switch (param.ext->chunks[i]) {
1984 case SCTP_CID_FWD_TSN: 1986 case SCTP_CID_FWD_TSN:
1985 if (sctp_prsctp_enable && 1987 if (net->sctp.prsctp_enable &&
1986 !asoc->peer.prsctp_capable) 1988 !asoc->peer.prsctp_capable)
1987 asoc->peer.prsctp_capable = 1; 1989 asoc->peer.prsctp_capable = 1;
1988 break; 1990 break;
@@ -1990,12 +1992,12 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1990 /* if the peer reports AUTH, assume that he 1992 /* if the peer reports AUTH, assume that he
1991 * supports AUTH. 1993 * supports AUTH.
1992 */ 1994 */
1993 if (sctp_auth_enable) 1995 if (net->sctp.auth_enable)
1994 asoc->peer.auth_capable = 1; 1996 asoc->peer.auth_capable = 1;
1995 break; 1997 break;
1996 case SCTP_CID_ASCONF: 1998 case SCTP_CID_ASCONF:
1997 case SCTP_CID_ASCONF_ACK: 1999 case SCTP_CID_ASCONF_ACK:
1998 if (sctp_addip_enable) 2000 if (net->sctp.addip_enable)
1999 asoc->peer.asconf_capable = 1; 2001 asoc->peer.asconf_capable = 1;
2000 break; 2002 break;
2001 default: 2003 default:
@@ -2081,7 +2083,8 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
2081 * SCTP_IERROR_ERROR - stop processing, trigger an ERROR 2083 * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
2082 * SCTP_IERROR_NO_ERROR - continue with the chunk 2084 * SCTP_IERROR_NO_ERROR - continue with the chunk
2083 */ 2085 */
2084static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc, 2086static sctp_ierror_t sctp_verify_param(struct net *net,
2087 const struct sctp_association *asoc,
2085 union sctp_params param, 2088 union sctp_params param,
2086 sctp_cid_t cid, 2089 sctp_cid_t cid,
2087 struct sctp_chunk *chunk, 2090 struct sctp_chunk *chunk,
@@ -2110,12 +2113,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
2110 break; 2113 break;
2111 2114
2112 case SCTP_PARAM_SUPPORTED_EXT: 2115 case SCTP_PARAM_SUPPORTED_EXT:
2113 if (!sctp_verify_ext_param(param)) 2116 if (!sctp_verify_ext_param(net, param))
2114 return SCTP_IERROR_ABORT; 2117 return SCTP_IERROR_ABORT;
2115 break; 2118 break;
2116 2119
2117 case SCTP_PARAM_SET_PRIMARY: 2120 case SCTP_PARAM_SET_PRIMARY:
2118 if (sctp_addip_enable) 2121 if (net->sctp.addip_enable)
2119 break; 2122 break;
2120 goto fallthrough; 2123 goto fallthrough;
2121 2124
@@ -2126,12 +2129,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
2126 break; 2129 break;
2127 2130
2128 case SCTP_PARAM_FWD_TSN_SUPPORT: 2131 case SCTP_PARAM_FWD_TSN_SUPPORT:
2129 if (sctp_prsctp_enable) 2132 if (net->sctp.prsctp_enable)
2130 break; 2133 break;
2131 goto fallthrough; 2134 goto fallthrough;
2132 2135
2133 case SCTP_PARAM_RANDOM: 2136 case SCTP_PARAM_RANDOM:
2134 if (!sctp_auth_enable) 2137 if (!net->sctp.auth_enable)
2135 goto fallthrough; 2138 goto fallthrough;
2136 2139
2137 /* SCTP-AUTH: Secion 6.1 2140 /* SCTP-AUTH: Secion 6.1
@@ -2148,7 +2151,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
2148 break; 2151 break;
2149 2152
2150 case SCTP_PARAM_CHUNKS: 2153 case SCTP_PARAM_CHUNKS:
2151 if (!sctp_auth_enable) 2154 if (!net->sctp.auth_enable)
2152 goto fallthrough; 2155 goto fallthrough;
2153 2156
2154 /* SCTP-AUTH: Section 3.2 2157 /* SCTP-AUTH: Section 3.2
@@ -2164,7 +2167,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
2164 break; 2167 break;
2165 2168
2166 case SCTP_PARAM_HMAC_ALGO: 2169 case SCTP_PARAM_HMAC_ALGO:
2167 if (!sctp_auth_enable) 2170 if (!net->sctp.auth_enable)
2168 goto fallthrough; 2171 goto fallthrough;
2169 2172
2170 hmacs = (struct sctp_hmac_algo_param *)param.p; 2173 hmacs = (struct sctp_hmac_algo_param *)param.p;
@@ -2198,7 +2201,7 @@ fallthrough:
2198} 2201}
2199 2202
2200/* Verify the INIT packet before we process it. */ 2203/* Verify the INIT packet before we process it. */
2201int sctp_verify_init(const struct sctp_association *asoc, 2204int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
2202 sctp_cid_t cid, 2205 sctp_cid_t cid,
2203 sctp_init_chunk_t *peer_init, 2206 sctp_init_chunk_t *peer_init,
2204 struct sctp_chunk *chunk, 2207 struct sctp_chunk *chunk,
@@ -2245,7 +2248,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
2245 /* Verify all the variable length parameters */ 2248 /* Verify all the variable length parameters */
2246 sctp_walk_params(param, peer_init, init_hdr.params) { 2249 sctp_walk_params(param, peer_init, init_hdr.params) {
2247 2250
2248 result = sctp_verify_param(asoc, param, cid, chunk, errp); 2251 result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
2249 switch (result) { 2252 switch (result) {
2250 case SCTP_IERROR_ABORT: 2253 case SCTP_IERROR_ABORT:
2251 case SCTP_IERROR_NOMEM: 2254 case SCTP_IERROR_NOMEM:
@@ -2270,6 +2273,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2270 const union sctp_addr *peer_addr, 2273 const union sctp_addr *peer_addr,
2271 sctp_init_chunk_t *peer_init, gfp_t gfp) 2274 sctp_init_chunk_t *peer_init, gfp_t gfp)
2272{ 2275{
2276 struct net *net = sock_net(asoc->base.sk);
2273 union sctp_params param; 2277 union sctp_params param;
2274 struct sctp_transport *transport; 2278 struct sctp_transport *transport;
2275 struct list_head *pos, *temp; 2279 struct list_head *pos, *temp;
@@ -2326,7 +2330,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2326 * also give us an option to silently ignore the packet, which 2330 * also give us an option to silently ignore the packet, which
2327 * is what we'll do here. 2331 * is what we'll do here.
2328 */ 2332 */
2329 if (!sctp_addip_noauth && 2333 if (!net->sctp.addip_noauth &&
2330 (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { 2334 (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
2331 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | 2335 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
2332 SCTP_PARAM_DEL_IP | 2336 SCTP_PARAM_DEL_IP |
@@ -2466,6 +2470,7 @@ static int sctp_process_param(struct sctp_association *asoc,
2466 const union sctp_addr *peer_addr, 2470 const union sctp_addr *peer_addr,
2467 gfp_t gfp) 2471 gfp_t gfp)
2468{ 2472{
2473 struct net *net = sock_net(asoc->base.sk);
2469 union sctp_addr addr; 2474 union sctp_addr addr;
2470 int i; 2475 int i;
2471 __u16 sat; 2476 __u16 sat;
@@ -2494,13 +2499,13 @@ do_addr_param:
2494 af = sctp_get_af_specific(param_type2af(param.p->type)); 2499 af = sctp_get_af_specific(param_type2af(param.p->type));
2495 af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); 2500 af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
2496 scope = sctp_scope(peer_addr); 2501 scope = sctp_scope(peer_addr);
2497 if (sctp_in_scope(&addr, scope)) 2502 if (sctp_in_scope(net, &addr, scope))
2498 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) 2503 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
2499 return 0; 2504 return 0;
2500 break; 2505 break;
2501 2506
2502 case SCTP_PARAM_COOKIE_PRESERVATIVE: 2507 case SCTP_PARAM_COOKIE_PRESERVATIVE:
2503 if (!sctp_cookie_preserve_enable) 2508 if (!net->sctp.cookie_preserve_enable)
2504 break; 2509 break;
2505 2510
2506 stale = ntohl(param.life->lifespan_increment); 2511 stale = ntohl(param.life->lifespan_increment);
@@ -2580,7 +2585,7 @@ do_addr_param:
2580 break; 2585 break;
2581 2586
2582 case SCTP_PARAM_SET_PRIMARY: 2587 case SCTP_PARAM_SET_PRIMARY:
2583 if (!sctp_addip_enable) 2588 if (!net->sctp.addip_enable)
2584 goto fall_through; 2589 goto fall_through;
2585 2590
2586 addr_param = param.v + sizeof(sctp_addip_param_t); 2591 addr_param = param.v + sizeof(sctp_addip_param_t);
@@ -2607,7 +2612,7 @@ do_addr_param:
2607 break; 2612 break;
2608 2613
2609 case SCTP_PARAM_FWD_TSN_SUPPORT: 2614 case SCTP_PARAM_FWD_TSN_SUPPORT:
2610 if (sctp_prsctp_enable) { 2615 if (net->sctp.prsctp_enable) {
2611 asoc->peer.prsctp_capable = 1; 2616 asoc->peer.prsctp_capable = 1;
2612 break; 2617 break;
2613 } 2618 }
@@ -2615,7 +2620,7 @@ do_addr_param:
2615 goto fall_through; 2620 goto fall_through;
2616 2621
2617 case SCTP_PARAM_RANDOM: 2622 case SCTP_PARAM_RANDOM:
2618 if (!sctp_auth_enable) 2623 if (!net->sctp.auth_enable)
2619 goto fall_through; 2624 goto fall_through;
2620 2625
2621 /* Save peer's random parameter */ 2626 /* Save peer's random parameter */
@@ -2628,7 +2633,7 @@ do_addr_param:
2628 break; 2633 break;
2629 2634
2630 case SCTP_PARAM_HMAC_ALGO: 2635 case SCTP_PARAM_HMAC_ALGO:
2631 if (!sctp_auth_enable) 2636 if (!net->sctp.auth_enable)
2632 goto fall_through; 2637 goto fall_through;
2633 2638
2634 /* Save peer's HMAC list */ 2639 /* Save peer's HMAC list */
@@ -2644,7 +2649,7 @@ do_addr_param:
2644 break; 2649 break;
2645 2650
2646 case SCTP_PARAM_CHUNKS: 2651 case SCTP_PARAM_CHUNKS:
2647 if (!sctp_auth_enable) 2652 if (!net->sctp.auth_enable)
2648 goto fall_through; 2653 goto fall_through;
2649 2654
2650 asoc->peer.peer_chunks = kmemdup(param.p, 2655 asoc->peer.peer_chunks = kmemdup(param.p,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fe99628e1257..bcfebb91559d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -251,6 +251,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
251 int error; 251 int error;
252 struct sctp_transport *transport = (struct sctp_transport *) peer; 252 struct sctp_transport *transport = (struct sctp_transport *) peer;
253 struct sctp_association *asoc = transport->asoc; 253 struct sctp_association *asoc = transport->asoc;
254 struct net *net = sock_net(asoc->base.sk);
254 255
255 /* Check whether a task is in the sock. */ 256 /* Check whether a task is in the sock. */
256 257
@@ -271,7 +272,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
271 goto out_unlock; 272 goto out_unlock;
272 273
273 /* Run through the state machine. */ 274 /* Run through the state machine. */
274 error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, 275 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
275 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), 276 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
276 asoc->state, 277 asoc->state,
277 asoc->ep, asoc, 278 asoc->ep, asoc,
@@ -291,6 +292,7 @@ out_unlock:
291static void sctp_generate_timeout_event(struct sctp_association *asoc, 292static void sctp_generate_timeout_event(struct sctp_association *asoc,
292 sctp_event_timeout_t timeout_type) 293 sctp_event_timeout_t timeout_type)
293{ 294{
295 struct net *net = sock_net(asoc->base.sk);
294 int error = 0; 296 int error = 0;
295 297
296 sctp_bh_lock_sock(asoc->base.sk); 298 sctp_bh_lock_sock(asoc->base.sk);
@@ -312,7 +314,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
312 goto out_unlock; 314 goto out_unlock;
313 315
314 /* Run through the state machine. */ 316 /* Run through the state machine. */
315 error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, 317 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
316 SCTP_ST_TIMEOUT(timeout_type), 318 SCTP_ST_TIMEOUT(timeout_type),
317 asoc->state, asoc->ep, asoc, 319 asoc->state, asoc->ep, asoc,
318 (void *)timeout_type, GFP_ATOMIC); 320 (void *)timeout_type, GFP_ATOMIC);
@@ -371,6 +373,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
371 int error = 0; 373 int error = 0;
372 struct sctp_transport *transport = (struct sctp_transport *) data; 374 struct sctp_transport *transport = (struct sctp_transport *) data;
373 struct sctp_association *asoc = transport->asoc; 375 struct sctp_association *asoc = transport->asoc;
376 struct net *net = sock_net(asoc->base.sk);
374 377
375 sctp_bh_lock_sock(asoc->base.sk); 378 sctp_bh_lock_sock(asoc->base.sk);
376 if (sock_owned_by_user(asoc->base.sk)) { 379 if (sock_owned_by_user(asoc->base.sk)) {
@@ -388,7 +391,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
388 if (transport->dead) 391 if (transport->dead)
389 goto out_unlock; 392 goto out_unlock;
390 393
391 error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, 394 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 395 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
393 asoc->state, asoc->ep, asoc, 396 asoc->state, asoc->ep, asoc,
394 transport, GFP_ATOMIC); 397 transport, GFP_ATOMIC);
@@ -408,6 +411,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
408{ 411{
409 struct sctp_transport *transport = (struct sctp_transport *) data; 412 struct sctp_transport *transport = (struct sctp_transport *) data;
410 struct sctp_association *asoc = transport->asoc; 413 struct sctp_association *asoc = transport->asoc;
414 struct net *net = sock_net(asoc->base.sk);
411 415
412 sctp_bh_lock_sock(asoc->base.sk); 416 sctp_bh_lock_sock(asoc->base.sk);
413 if (sock_owned_by_user(asoc->base.sk)) { 417 if (sock_owned_by_user(asoc->base.sk)) {
@@ -426,7 +430,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
426 if (asoc->base.dead) 430 if (asoc->base.dead)
427 goto out_unlock; 431 goto out_unlock;
428 432
429 sctp_do_sm(SCTP_EVENT_T_OTHER, 433 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
430 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 434 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
431 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 435 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
432 436
@@ -753,8 +757,10 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
753 int err = 0; 757 int err = 0;
754 758
755 if (sctp_outq_sack(&asoc->outqueue, sackh)) { 759 if (sctp_outq_sack(&asoc->outqueue, sackh)) {
760 struct net *net = sock_net(asoc->base.sk);
761
756 /* There are no more TSNs awaiting SACK. */ 762 /* There are no more TSNs awaiting SACK. */
757 err = sctp_do_sm(SCTP_EVENT_T_OTHER, 763 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
758 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 764 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
759 asoc->state, asoc->ep, asoc, NULL, 765 asoc->state, asoc->ep, asoc, NULL,
760 GFP_ATOMIC); 766 GFP_ATOMIC);
@@ -1042,6 +1048,8 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
1042 */ 1048 */
1043static void sctp_cmd_send_asconf(struct sctp_association *asoc) 1049static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1044{ 1050{
1051 struct net *net = sock_net(asoc->base.sk);
1052
1045 /* Send the next asconf chunk from the addip chunk 1053 /* Send the next asconf chunk from the addip chunk
1046 * queue. 1054 * queue.
1047 */ 1055 */
@@ -1053,7 +1061,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1053 1061
1054 /* Hold the chunk until an ASCONF_ACK is received. */ 1062 /* Hold the chunk until an ASCONF_ACK is received. */
1055 sctp_chunk_hold(asconf); 1063 sctp_chunk_hold(asconf);
1056 if (sctp_primitive_ASCONF(asoc, asconf)) 1064 if (sctp_primitive_ASCONF(net, asoc, asconf))
1057 sctp_chunk_free(asconf); 1065 sctp_chunk_free(asconf);
1058 else 1066 else
1059 asoc->addip_last_asconf = asconf; 1067 asoc->addip_last_asconf = asconf;
@@ -1089,7 +1097,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1089 * If you want to understand all of lksctp, this is a 1097 * If you want to understand all of lksctp, this is a
1090 * good place to start. 1098 * good place to start.
1091 */ 1099 */
1092int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, 1100int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
1093 sctp_state_t state, 1101 sctp_state_t state,
1094 struct sctp_endpoint *ep, 1102 struct sctp_endpoint *ep,
1095 struct sctp_association *asoc, 1103 struct sctp_association *asoc,
@@ -1110,12 +1118,12 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
1110 /* Look up the state function, run it, and then process the 1118 /* Look up the state function, run it, and then process the
1111 * side effects. These three steps are the heart of lksctp. 1119 * side effects. These three steps are the heart of lksctp.
1112 */ 1120 */
1113 state_fn = sctp_sm_lookup_event(event_type, state, subtype); 1121 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1114 1122
1115 sctp_init_cmd_seq(&commands); 1123 sctp_init_cmd_seq(&commands);
1116 1124
1117 DEBUG_PRE; 1125 DEBUG_PRE;
1118 status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands); 1126 status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
1119 DEBUG_POST; 1127 DEBUG_POST;
1120 1128
1121 error = sctp_side_effects(event_type, subtype, state, 1129 error = sctp_side_effects(event_type, subtype, state,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9fca10357350..094813b6c3c3 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -66,7 +66,8 @@
66#include <net/sctp/sm.h> 66#include <net/sctp/sm.h>
67#include <net/sctp/structs.h> 67#include <net/sctp/structs.h>
68 68
69static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep, 69static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
70 const struct sctp_endpoint *ep,
70 const struct sctp_association *asoc, 71 const struct sctp_association *asoc,
71 struct sctp_chunk *chunk, 72 struct sctp_chunk *chunk,
72 const void *payload, 73 const void *payload,
@@ -74,36 +75,43 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
74static int sctp_eat_data(const struct sctp_association *asoc, 75static int sctp_eat_data(const struct sctp_association *asoc,
75 struct sctp_chunk *chunk, 76 struct sctp_chunk *chunk,
76 sctp_cmd_seq_t *commands); 77 sctp_cmd_seq_t *commands);
77static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc, 78static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
79 const struct sctp_association *asoc,
78 const struct sctp_chunk *chunk); 80 const struct sctp_chunk *chunk);
79static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep, 81static void sctp_send_stale_cookie_err(struct net *net,
82 const struct sctp_endpoint *ep,
80 const struct sctp_association *asoc, 83 const struct sctp_association *asoc,
81 const struct sctp_chunk *chunk, 84 const struct sctp_chunk *chunk,
82 sctp_cmd_seq_t *commands, 85 sctp_cmd_seq_t *commands,
83 struct sctp_chunk *err_chunk); 86 struct sctp_chunk *err_chunk);
84static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, 87static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
88 const struct sctp_endpoint *ep,
85 const struct sctp_association *asoc, 89 const struct sctp_association *asoc,
86 const sctp_subtype_t type, 90 const sctp_subtype_t type,
87 void *arg, 91 void *arg,
88 sctp_cmd_seq_t *commands); 92 sctp_cmd_seq_t *commands);
89static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, 93static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
94 const struct sctp_endpoint *ep,
90 const struct sctp_association *asoc, 95 const struct sctp_association *asoc,
91 const sctp_subtype_t type, 96 const sctp_subtype_t type,
92 void *arg, 97 void *arg,
93 sctp_cmd_seq_t *commands); 98 sctp_cmd_seq_t *commands);
94static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, 99static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
100 const struct sctp_endpoint *ep,
95 const struct sctp_association *asoc, 101 const struct sctp_association *asoc,
96 const sctp_subtype_t type, 102 const sctp_subtype_t type,
97 void *arg, 103 void *arg,
98 sctp_cmd_seq_t *commands); 104 sctp_cmd_seq_t *commands);
99static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 105static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
100 106
101static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 107static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
108 sctp_cmd_seq_t *commands,
102 __be16 error, int sk_err, 109 __be16 error, int sk_err,
103 const struct sctp_association *asoc, 110 const struct sctp_association *asoc,
104 struct sctp_transport *transport); 111 struct sctp_transport *transport);
105 112
106static sctp_disposition_t sctp_sf_abort_violation( 113static sctp_disposition_t sctp_sf_abort_violation(
114 struct net *net,
107 const struct sctp_endpoint *ep, 115 const struct sctp_endpoint *ep,
108 const struct sctp_association *asoc, 116 const struct sctp_association *asoc,
109 void *arg, 117 void *arg,
@@ -112,6 +120,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
112 const size_t paylen); 120 const size_t paylen);
113 121
114static sctp_disposition_t sctp_sf_violation_chunklen( 122static sctp_disposition_t sctp_sf_violation_chunklen(
123 struct net *net,
115 const struct sctp_endpoint *ep, 124 const struct sctp_endpoint *ep,
116 const struct sctp_association *asoc, 125 const struct sctp_association *asoc,
117 const sctp_subtype_t type, 126 const sctp_subtype_t type,
@@ -119,6 +128,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
119 sctp_cmd_seq_t *commands); 128 sctp_cmd_seq_t *commands);
120 129
121static sctp_disposition_t sctp_sf_violation_paramlen( 130static sctp_disposition_t sctp_sf_violation_paramlen(
131 struct net *net,
122 const struct sctp_endpoint *ep, 132 const struct sctp_endpoint *ep,
123 const struct sctp_association *asoc, 133 const struct sctp_association *asoc,
124 const sctp_subtype_t type, 134 const sctp_subtype_t type,
@@ -126,6 +136,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
126 sctp_cmd_seq_t *commands); 136 sctp_cmd_seq_t *commands);
127 137
128static sctp_disposition_t sctp_sf_violation_ctsn( 138static sctp_disposition_t sctp_sf_violation_ctsn(
139 struct net *net,
129 const struct sctp_endpoint *ep, 140 const struct sctp_endpoint *ep,
130 const struct sctp_association *asoc, 141 const struct sctp_association *asoc,
131 const sctp_subtype_t type, 142 const sctp_subtype_t type,
@@ -133,18 +144,21 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
133 sctp_cmd_seq_t *commands); 144 sctp_cmd_seq_t *commands);
134 145
135static sctp_disposition_t sctp_sf_violation_chunk( 146static sctp_disposition_t sctp_sf_violation_chunk(
147 struct net *net,
136 const struct sctp_endpoint *ep, 148 const struct sctp_endpoint *ep,
137 const struct sctp_association *asoc, 149 const struct sctp_association *asoc,
138 const sctp_subtype_t type, 150 const sctp_subtype_t type,
139 void *arg, 151 void *arg,
140 sctp_cmd_seq_t *commands); 152 sctp_cmd_seq_t *commands);
141 153
142static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, 154static sctp_ierror_t sctp_sf_authenticate(struct net *net,
155 const struct sctp_endpoint *ep,
143 const struct sctp_association *asoc, 156 const struct sctp_association *asoc,
144 const sctp_subtype_t type, 157 const sctp_subtype_t type,
145 struct sctp_chunk *chunk); 158 struct sctp_chunk *chunk);
146 159
147static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, 160static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
161 const struct sctp_endpoint *ep,
148 const struct sctp_association *asoc, 162 const struct sctp_association *asoc,
149 const sctp_subtype_t type, 163 const sctp_subtype_t type,
150 void *arg, 164 void *arg,
@@ -204,7 +218,8 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
204 * 218 *
205 * The return value is the disposition of the chunk. 219 * The return value is the disposition of the chunk.
206 */ 220 */
207sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, 221sctp_disposition_t sctp_sf_do_4_C(struct net *net,
222 const struct sctp_endpoint *ep,
208 const struct sctp_association *asoc, 223 const struct sctp_association *asoc,
209 const sctp_subtype_t type, 224 const sctp_subtype_t type,
210 void *arg, 225 void *arg,
@@ -214,7 +229,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
214 struct sctp_ulpevent *ev; 229 struct sctp_ulpevent *ev;
215 230
216 if (!sctp_vtag_verify_either(chunk, asoc)) 231 if (!sctp_vtag_verify_either(chunk, asoc))
217 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 232 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
218 233
219 /* RFC 2960 6.10 Bundling 234 /* RFC 2960 6.10 Bundling
220 * 235 *
@@ -222,11 +237,11 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
222 * SHUTDOWN COMPLETE with any other chunks. 237 * SHUTDOWN COMPLETE with any other chunks.
223 */ 238 */
224 if (!chunk->singleton) 239 if (!chunk->singleton)
225 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); 240 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
226 241
227 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ 242 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
228 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 243 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
229 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 244 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
230 commands); 245 commands);
231 246
232 /* RFC 2960 10.2 SCTP-to-ULP 247 /* RFC 2960 10.2 SCTP-to-ULP
@@ -259,8 +274,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
259 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 274 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
260 SCTP_STATE(SCTP_STATE_CLOSED)); 275 SCTP_STATE(SCTP_STATE_CLOSED));
261 276
262 SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); 277 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
263 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 278 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
264 279
265 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 280 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
266 281
@@ -289,7 +304,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
289 * 304 *
290 * The return value is the disposition of the chunk. 305 * The return value is the disposition of the chunk.
291 */ 306 */
292sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, 307sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
308 const struct sctp_endpoint *ep,
293 const struct sctp_association *asoc, 309 const struct sctp_association *asoc,
294 const sctp_subtype_t type, 310 const sctp_subtype_t type,
295 void *arg, 311 void *arg,
@@ -313,21 +329,21 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
313 * with an INIT chunk that is bundled with other chunks. 329 * with an INIT chunk that is bundled with other chunks.
314 */ 330 */
315 if (!chunk->singleton) 331 if (!chunk->singleton)
316 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 332 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
317 333
318 /* If the packet is an OOTB packet which is temporarily on the 334 /* If the packet is an OOTB packet which is temporarily on the
319 * control endpoint, respond with an ABORT. 335 * control endpoint, respond with an ABORT.
320 */ 336 */
321 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) { 337 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
322 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 338 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
323 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 339 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
324 } 340 }
325 341
326 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 342 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
327 * Tag. 343 * Tag.
328 */ 344 */
329 if (chunk->sctp_hdr->vtag != 0) 345 if (chunk->sctp_hdr->vtag != 0)
330 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 346 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
331 347
332 /* Make sure that the INIT chunk has a valid length. 348 /* Make sure that the INIT chunk has a valid length.
333 * Normally, this would cause an ABORT with a Protocol Violation 349 * Normally, this would cause an ABORT with a Protocol Violation
@@ -335,7 +351,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
335 * just discard the packet. 351 * just discard the packet.
336 */ 352 */
337 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) 353 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
338 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 354 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
339 355
340 /* If the INIT is coming toward a closing socket, we'll send back 356 /* If the INIT is coming toward a closing socket, we'll send back
341 * and ABORT. Essentially, this catches the race of INIT being 357 * and ABORT. Essentially, this catches the race of INIT being
@@ -344,18 +360,18 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
344 * can treat this OOTB 360 * can treat this OOTB
345 */ 361 */
346 if (sctp_sstate(ep->base.sk, CLOSING)) 362 if (sctp_sstate(ep->base.sk, CLOSING))
347 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 363 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
348 364
349 /* Verify the INIT chunk before processing it. */ 365 /* Verify the INIT chunk before processing it. */
350 err_chunk = NULL; 366 err_chunk = NULL;
351 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, 367 if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
352 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, 368 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
353 &err_chunk)) { 369 &err_chunk)) {
354 /* This chunk contains fatal error. It is to be discarded. 370 /* This chunk contains fatal error. It is to be discarded.
355 * Send an ABORT, with causes if there is any. 371 * Send an ABORT, with causes if there is any.
356 */ 372 */
357 if (err_chunk) { 373 if (err_chunk) {
358 packet = sctp_abort_pkt_new(ep, asoc, arg, 374 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
359 (__u8 *)(err_chunk->chunk_hdr) + 375 (__u8 *)(err_chunk->chunk_hdr) +
360 sizeof(sctp_chunkhdr_t), 376 sizeof(sctp_chunkhdr_t),
361 ntohs(err_chunk->chunk_hdr->length) - 377 ntohs(err_chunk->chunk_hdr->length) -
@@ -366,13 +382,13 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
366 if (packet) { 382 if (packet) {
367 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 383 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
368 SCTP_PACKET(packet)); 384 SCTP_PACKET(packet));
369 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 385 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
370 return SCTP_DISPOSITION_CONSUME; 386 return SCTP_DISPOSITION_CONSUME;
371 } else { 387 } else {
372 return SCTP_DISPOSITION_NOMEM; 388 return SCTP_DISPOSITION_NOMEM;
373 } 389 }
374 } else { 390 } else {
375 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, 391 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
376 commands); 392 commands);
377 } 393 }
378 } 394 }
@@ -484,7 +500,8 @@ nomem:
484 * 500 *
485 * The return value is the disposition of the chunk. 501 * The return value is the disposition of the chunk.
486 */ 502 */
487sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, 503sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
504 const struct sctp_endpoint *ep,
488 const struct sctp_association *asoc, 505 const struct sctp_association *asoc,
489 const sctp_subtype_t type, 506 const sctp_subtype_t type,
490 void *arg, 507 void *arg,
@@ -496,25 +513,25 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
496 struct sctp_packet *packet; 513 struct sctp_packet *packet;
497 514
498 if (!sctp_vtag_verify(chunk, asoc)) 515 if (!sctp_vtag_verify(chunk, asoc))
499 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 516 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
500 517
501 /* 6.10 Bundling 518 /* 6.10 Bundling
502 * An endpoint MUST NOT bundle INIT, INIT ACK or 519 * An endpoint MUST NOT bundle INIT, INIT ACK or
503 * SHUTDOWN COMPLETE with any other chunks. 520 * SHUTDOWN COMPLETE with any other chunks.
504 */ 521 */
505 if (!chunk->singleton) 522 if (!chunk->singleton)
506 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands); 523 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
507 524
508 /* Make sure that the INIT-ACK chunk has a valid length */ 525 /* Make sure that the INIT-ACK chunk has a valid length */
509 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) 526 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
510 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 527 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
511 commands); 528 commands);
512 /* Grab the INIT header. */ 529 /* Grab the INIT header. */
513 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; 530 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
514 531
515 /* Verify the INIT chunk before processing it. */ 532 /* Verify the INIT chunk before processing it. */
516 err_chunk = NULL; 533 err_chunk = NULL;
517 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, 534 if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
518 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, 535 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
519 &err_chunk)) { 536 &err_chunk)) {
520 537
@@ -526,7 +543,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
526 * the association. 543 * the association.
527 */ 544 */
528 if (err_chunk) { 545 if (err_chunk) {
529 packet = sctp_abort_pkt_new(ep, asoc, arg, 546 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
530 (__u8 *)(err_chunk->chunk_hdr) + 547 (__u8 *)(err_chunk->chunk_hdr) +
531 sizeof(sctp_chunkhdr_t), 548 sizeof(sctp_chunkhdr_t),
532 ntohs(err_chunk->chunk_hdr->length) - 549 ntohs(err_chunk->chunk_hdr->length) -
@@ -537,7 +554,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
537 if (packet) { 554 if (packet) {
538 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 555 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
539 SCTP_PACKET(packet)); 556 SCTP_PACKET(packet));
540 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 557 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
541 error = SCTP_ERROR_INV_PARAM; 558 error = SCTP_ERROR_INV_PARAM;
542 } 559 }
543 } 560 }
@@ -554,10 +571,10 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
554 * was malformed. 571 * was malformed.
555 */ 572 */
556 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 573 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
557 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 574 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
558 575
559 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 576 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
560 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, 577 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
561 asoc, chunk->transport); 578 asoc, chunk->transport);
562 } 579 }
563 580
@@ -633,7 +650,8 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
633 * 650 *
634 * The return value is the disposition of the chunk. 651 * The return value is the disposition of the chunk.
635 */ 652 */
636sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, 653sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
654 const struct sctp_endpoint *ep,
637 const struct sctp_association *asoc, 655 const struct sctp_association *asoc,
638 const sctp_subtype_t type, void *arg, 656 const sctp_subtype_t type, void *arg,
639 sctp_cmd_seq_t *commands) 657 sctp_cmd_seq_t *commands)
@@ -650,9 +668,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
650 /* If the packet is an OOTB packet which is temporarily on the 668 /* If the packet is an OOTB packet which is temporarily on the
651 * control endpoint, respond with an ABORT. 669 * control endpoint, respond with an ABORT.
652 */ 670 */
653 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) { 671 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
654 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 672 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
655 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 673 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
656 } 674 }
657 675
658 /* Make sure that the COOKIE_ECHO chunk has a valid length. 676 /* Make sure that the COOKIE_ECHO chunk has a valid length.
@@ -661,7 +679,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
661 * in sctp_unpack_cookie(). 679 * in sctp_unpack_cookie().
662 */ 680 */
663 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 681 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
664 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 682 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
665 683
666 /* If the endpoint is not listening or if the number of associations 684 /* If the endpoint is not listening or if the number of associations
667 * on the TCP-style socket exceed the max backlog, respond with an 685 * on the TCP-style socket exceed the max backlog, respond with an
@@ -670,7 +688,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
670 sk = ep->base.sk; 688 sk = ep->base.sk;
671 if (!sctp_sstate(sk, LISTENING) || 689 if (!sctp_sstate(sk, LISTENING) ||
672 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) 690 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
673 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 691 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
674 692
675 /* "Decode" the chunk. We have no optional parameters so we 693 /* "Decode" the chunk. We have no optional parameters so we
676 * are in good shape. 694 * are in good shape.
@@ -703,13 +721,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
703 goto nomem; 721 goto nomem;
704 722
705 case -SCTP_IERROR_STALE_COOKIE: 723 case -SCTP_IERROR_STALE_COOKIE:
706 sctp_send_stale_cookie_err(ep, asoc, chunk, commands, 724 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
707 err_chk_p); 725 err_chk_p);
708 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 726 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
709 727
710 case -SCTP_IERROR_BAD_SIG: 728 case -SCTP_IERROR_BAD_SIG:
711 default: 729 default:
712 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 730 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
713 } 731 }
714 } 732 }
715 733
@@ -756,14 +774,14 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
756 skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); 774 skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
757 auth.transport = chunk->transport; 775 auth.transport = chunk->transport;
758 776
759 ret = sctp_sf_authenticate(ep, new_asoc, type, &auth); 777 ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
760 778
761 /* We can now safely free the auth_chunk clone */ 779 /* We can now safely free the auth_chunk clone */
762 kfree_skb(chunk->auth_chunk); 780 kfree_skb(chunk->auth_chunk);
763 781
764 if (ret != SCTP_IERROR_NO_ERROR) { 782 if (ret != SCTP_IERROR_NO_ERROR) {
765 sctp_association_free(new_asoc); 783 sctp_association_free(new_asoc);
766 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 784 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
767 } 785 }
768 } 786 }
769 787
@@ -804,8 +822,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
804 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 822 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
805 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 823 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
806 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 824 SCTP_STATE(SCTP_STATE_ESTABLISHED));
807 SCTP_INC_STATS(SCTP_MIB_CURRESTAB); 825 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
808 SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS); 826 SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
809 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 827 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
810 828
811 if (new_asoc->autoclose) 829 if (new_asoc->autoclose)
@@ -856,7 +874,8 @@ nomem:
856 * 874 *
857 * The return value is the disposition of the chunk. 875 * The return value is the disposition of the chunk.
858 */ 876 */
859sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, 877sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
878 const struct sctp_endpoint *ep,
860 const struct sctp_association *asoc, 879 const struct sctp_association *asoc,
861 const sctp_subtype_t type, void *arg, 880 const sctp_subtype_t type, void *arg,
862 sctp_cmd_seq_t *commands) 881 sctp_cmd_seq_t *commands)
@@ -865,13 +884,13 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
865 struct sctp_ulpevent *ev; 884 struct sctp_ulpevent *ev;
866 885
867 if (!sctp_vtag_verify(chunk, asoc)) 886 if (!sctp_vtag_verify(chunk, asoc))
868 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 887 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
869 888
870 /* Verify that the chunk length for the COOKIE-ACK is OK. 889 /* Verify that the chunk length for the COOKIE-ACK is OK.
871 * If we don't do this, any bundled chunks may be junked. 890 * If we don't do this, any bundled chunks may be junked.
872 */ 891 */
873 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 892 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
874 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 893 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
875 commands); 894 commands);
876 895
877 /* Reset init error count upon receipt of COOKIE-ACK, 896 /* Reset init error count upon receipt of COOKIE-ACK,
@@ -892,8 +911,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
892 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 911 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
893 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 912 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
894 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 913 SCTP_STATE(SCTP_STATE_ESTABLISHED));
895 SCTP_INC_STATS(SCTP_MIB_CURRESTAB); 914 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
896 SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS); 915 SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
897 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 916 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
898 if (asoc->autoclose) 917 if (asoc->autoclose)
899 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 918 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -958,7 +977,8 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
958} 977}
959 978
960/* Generate a HEARTBEAT packet on the given transport. */ 979/* Generate a HEARTBEAT packet on the given transport. */
961sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, 980sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
981 const struct sctp_endpoint *ep,
962 const struct sctp_association *asoc, 982 const struct sctp_association *asoc,
963 const sctp_subtype_t type, 983 const sctp_subtype_t type,
964 void *arg, 984 void *arg,
@@ -972,8 +992,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
972 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 992 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
973 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 993 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
974 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 994 SCTP_PERR(SCTP_ERROR_NO_ERROR));
975 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 995 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
976 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 996 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
977 return SCTP_DISPOSITION_DELETE_TCB; 997 return SCTP_DISPOSITION_DELETE_TCB;
978 } 998 }
979 999
@@ -1028,7 +1048,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
1028 * 1048 *
1029 * The return value is the disposition of the chunk. 1049 * The return value is the disposition of the chunk.
1030 */ 1050 */
1031sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep, 1051sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
1052 const struct sctp_endpoint *ep,
1032 const struct sctp_association *asoc, 1053 const struct sctp_association *asoc,
1033 const sctp_subtype_t type, 1054 const sctp_subtype_t type,
1034 void *arg, 1055 void *arg,
@@ -1039,11 +1060,11 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
1039 size_t paylen = 0; 1060 size_t paylen = 0;
1040 1061
1041 if (!sctp_vtag_verify(chunk, asoc)) 1062 if (!sctp_vtag_verify(chunk, asoc))
1042 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1063 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1043 1064
1044 /* Make sure that the HEARTBEAT chunk has a valid length. */ 1065 /* Make sure that the HEARTBEAT chunk has a valid length. */
1045 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) 1066 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
1046 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 1067 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1047 commands); 1068 commands);
1048 1069
1049 /* 8.3 The receiver of the HEARTBEAT should immediately 1070 /* 8.3 The receiver of the HEARTBEAT should immediately
@@ -1095,7 +1116,8 @@ nomem:
1095 * 1116 *
1096 * The return value is the disposition of the chunk. 1117 * The return value is the disposition of the chunk.
1097 */ 1118 */
1098sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, 1119sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
1120 const struct sctp_endpoint *ep,
1099 const struct sctp_association *asoc, 1121 const struct sctp_association *asoc,
1100 const sctp_subtype_t type, 1122 const sctp_subtype_t type,
1101 void *arg, 1123 void *arg,
@@ -1108,12 +1130,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1108 unsigned long max_interval; 1130 unsigned long max_interval;
1109 1131
1110 if (!sctp_vtag_verify(chunk, asoc)) 1132 if (!sctp_vtag_verify(chunk, asoc))
1111 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1133 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1112 1134
1113 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ 1135 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
1114 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + 1136 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
1115 sizeof(sctp_sender_hb_info_t))) 1137 sizeof(sctp_sender_hb_info_t)))
1116 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 1138 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1117 commands); 1139 commands);
1118 1140
1119 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 1141 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
@@ -1171,7 +1193,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1171/* Helper function to send out an abort for the restart 1193/* Helper function to send out an abort for the restart
1172 * condition. 1194 * condition.
1173 */ 1195 */
1174static int sctp_sf_send_restart_abort(union sctp_addr *ssa, 1196static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
1175 struct sctp_chunk *init, 1197 struct sctp_chunk *init,
1176 sctp_cmd_seq_t *commands) 1198 sctp_cmd_seq_t *commands)
1177{ 1199{
@@ -1197,18 +1219,18 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
1197 errhdr->length = htons(len); 1219 errhdr->length = htons(len);
1198 1220
1199 /* Assign to the control socket. */ 1221 /* Assign to the control socket. */
1200 ep = sctp_sk((sctp_get_ctl_sock()))->ep; 1222 ep = sctp_sk(net->sctp.ctl_sock)->ep;
1201 1223
1202 /* Association is NULL since this may be a restart attack and we 1224 /* Association is NULL since this may be a restart attack and we
1203 * want to send back the attacker's vtag. 1225 * want to send back the attacker's vtag.
1204 */ 1226 */
1205 pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len); 1227 pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
1206 1228
1207 if (!pkt) 1229 if (!pkt)
1208 goto out; 1230 goto out;
1209 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); 1231 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
1210 1232
1211 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1233 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
1212 1234
1213 /* Discard the rest of the inbound packet. */ 1235 /* Discard the rest of the inbound packet. */
1214 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 1236 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1240,6 +1262,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1240 struct sctp_chunk *init, 1262 struct sctp_chunk *init,
1241 sctp_cmd_seq_t *commands) 1263 sctp_cmd_seq_t *commands)
1242{ 1264{
1265 struct net *net = sock_net(new_asoc->base.sk);
1243 struct sctp_transport *new_addr; 1266 struct sctp_transport *new_addr;
1244 int ret = 1; 1267 int ret = 1;
1245 1268
@@ -1258,7 +1281,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1258 transports) { 1281 transports) {
1259 if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, 1282 if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
1260 &new_addr->ipaddr)) { 1283 &new_addr->ipaddr)) {
1261 sctp_sf_send_restart_abort(&new_addr->ipaddr, init, 1284 sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
1262 commands); 1285 commands);
1263 ret = 0; 1286 ret = 0;
1264 break; 1287 break;
@@ -1358,6 +1381,7 @@ static char sctp_tietags_compare(struct sctp_association *new_asoc,
1358 * chunk handling. 1381 * chunk handling.
1359 */ 1382 */
1360static sctp_disposition_t sctp_sf_do_unexpected_init( 1383static sctp_disposition_t sctp_sf_do_unexpected_init(
1384 struct net *net,
1361 const struct sctp_endpoint *ep, 1385 const struct sctp_endpoint *ep,
1362 const struct sctp_association *asoc, 1386 const struct sctp_association *asoc,
1363 const sctp_subtype_t type, 1387 const sctp_subtype_t type,
@@ -1382,20 +1406,20 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1382 * with an INIT chunk that is bundled with other chunks. 1406 * with an INIT chunk that is bundled with other chunks.
1383 */ 1407 */
1384 if (!chunk->singleton) 1408 if (!chunk->singleton)
1385 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1409 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1386 1410
1387 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 1411 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
1388 * Tag. 1412 * Tag.
1389 */ 1413 */
1390 if (chunk->sctp_hdr->vtag != 0) 1414 if (chunk->sctp_hdr->vtag != 0)
1391 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 1415 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
1392 1416
1393 /* Make sure that the INIT chunk has a valid length. 1417 /* Make sure that the INIT chunk has a valid length.
1394 * In this case, we generate a protocol violation since we have 1418 * In this case, we generate a protocol violation since we have
1395 * an association established. 1419 * an association established.
1396 */ 1420 */
1397 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) 1421 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
1398 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 1422 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1399 commands); 1423 commands);
1400 /* Grab the INIT header. */ 1424 /* Grab the INIT header. */
1401 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; 1425 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -1405,14 +1429,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1405 1429
1406 /* Verify the INIT chunk before processing it. */ 1430 /* Verify the INIT chunk before processing it. */
1407 err_chunk = NULL; 1431 err_chunk = NULL;
1408 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, 1432 if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
1409 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, 1433 (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
1410 &err_chunk)) { 1434 &err_chunk)) {
1411 /* This chunk contains fatal error. It is to be discarded. 1435 /* This chunk contains fatal error. It is to be discarded.
1412 * Send an ABORT, with causes if there is any. 1436 * Send an ABORT, with causes if there is any.
1413 */ 1437 */
1414 if (err_chunk) { 1438 if (err_chunk) {
1415 packet = sctp_abort_pkt_new(ep, asoc, arg, 1439 packet = sctp_abort_pkt_new(net, ep, asoc, arg,
1416 (__u8 *)(err_chunk->chunk_hdr) + 1440 (__u8 *)(err_chunk->chunk_hdr) +
1417 sizeof(sctp_chunkhdr_t), 1441 sizeof(sctp_chunkhdr_t),
1418 ntohs(err_chunk->chunk_hdr->length) - 1442 ntohs(err_chunk->chunk_hdr->length) -
@@ -1421,14 +1445,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1421 if (packet) { 1445 if (packet) {
1422 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 1446 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
1423 SCTP_PACKET(packet)); 1447 SCTP_PACKET(packet));
1424 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1448 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
1425 retval = SCTP_DISPOSITION_CONSUME; 1449 retval = SCTP_DISPOSITION_CONSUME;
1426 } else { 1450 } else {
1427 retval = SCTP_DISPOSITION_NOMEM; 1451 retval = SCTP_DISPOSITION_NOMEM;
1428 } 1452 }
1429 goto cleanup; 1453 goto cleanup;
1430 } else { 1454 } else {
1431 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, 1455 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
1432 commands); 1456 commands);
1433 } 1457 }
1434 } 1458 }
@@ -1570,7 +1594,8 @@ cleanup:
1570 * 1594 *
1571 * The return value is the disposition of the chunk. 1595 * The return value is the disposition of the chunk.
1572 */ 1596 */
1573sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep, 1597sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
1598 const struct sctp_endpoint *ep,
1574 const struct sctp_association *asoc, 1599 const struct sctp_association *asoc,
1575 const sctp_subtype_t type, 1600 const sctp_subtype_t type,
1576 void *arg, 1601 void *arg,
@@ -1579,7 +1604,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
1579 /* Call helper to do the real work for both simulataneous and 1604 /* Call helper to do the real work for both simulataneous and
1580 * duplicate INIT chunk handling. 1605 * duplicate INIT chunk handling.
1581 */ 1606 */
1582 return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands); 1607 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
1583} 1608}
1584 1609
1585/* 1610/*
@@ -1623,7 +1648,8 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
1623 * 1648 *
1624 * The return value is the disposition of the chunk. 1649 * The return value is the disposition of the chunk.
1625 */ 1650 */
1626sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep, 1651sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
1652 const struct sctp_endpoint *ep,
1627 const struct sctp_association *asoc, 1653 const struct sctp_association *asoc,
1628 const sctp_subtype_t type, 1654 const sctp_subtype_t type,
1629 void *arg, 1655 void *arg,
@@ -1632,7 +1658,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
1632 /* Call helper to do the real work for both simulataneous and 1658 /* Call helper to do the real work for both simulataneous and
1633 * duplicate INIT chunk handling. 1659 * duplicate INIT chunk handling.
1634 */ 1660 */
1635 return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands); 1661 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
1636} 1662}
1637 1663
1638 1664
@@ -1645,7 +1671,8 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
1645 * An unexpected INIT ACK usually indicates the processing of an old or 1671 * An unexpected INIT ACK usually indicates the processing of an old or
1646 * duplicated INIT chunk. 1672 * duplicated INIT chunk.
1647*/ 1673*/
1648sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep, 1674sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
1675 const struct sctp_endpoint *ep,
1649 const struct sctp_association *asoc, 1676 const struct sctp_association *asoc,
1650 const sctp_subtype_t type, 1677 const sctp_subtype_t type,
1651 void *arg, sctp_cmd_seq_t *commands) 1678 void *arg, sctp_cmd_seq_t *commands)
@@ -1653,10 +1680,10 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
1653 /* Per the above section, we'll discard the chunk if we have an 1680 /* Per the above section, we'll discard the chunk if we have an
1654 * endpoint. If this is an OOTB INIT-ACK, treat it as such. 1681 * endpoint. If this is an OOTB INIT-ACK, treat it as such.
1655 */ 1682 */
1656 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 1683 if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
1657 return sctp_sf_ootb(ep, asoc, type, arg, commands); 1684 return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
1658 else 1685 else
1659 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 1686 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
1660} 1687}
1661 1688
1662/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A') 1689/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
@@ -1664,7 +1691,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
1664 * Section 5.2.4 1691 * Section 5.2.4
1665 * A) In this case, the peer may have restarted. 1692 * A) In this case, the peer may have restarted.
1666 */ 1693 */
1667static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, 1694static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1695 const struct sctp_endpoint *ep,
1668 const struct sctp_association *asoc, 1696 const struct sctp_association *asoc,
1669 struct sctp_chunk *chunk, 1697 struct sctp_chunk *chunk,
1670 sctp_cmd_seq_t *commands, 1698 sctp_cmd_seq_t *commands,
@@ -1700,7 +1728,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1700 * its peer. 1728 * its peer.
1701 */ 1729 */
1702 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { 1730 if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
1703 disposition = sctp_sf_do_9_2_reshutack(ep, asoc, 1731 disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
1704 SCTP_ST_CHUNK(chunk->chunk_hdr->type), 1732 SCTP_ST_CHUNK(chunk->chunk_hdr->type),
1705 chunk, commands); 1733 chunk, commands);
1706 if (SCTP_DISPOSITION_NOMEM == disposition) 1734 if (SCTP_DISPOSITION_NOMEM == disposition)
@@ -1763,7 +1791,8 @@ nomem:
1763 * after responding to the local endpoint's INIT 1791 * after responding to the local endpoint's INIT
1764 */ 1792 */
1765/* This case represents an initialization collision. */ 1793/* This case represents an initialization collision. */
1766static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, 1794static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
1795 const struct sctp_endpoint *ep,
1767 const struct sctp_association *asoc, 1796 const struct sctp_association *asoc,
1768 struct sctp_chunk *chunk, 1797 struct sctp_chunk *chunk,
1769 sctp_cmd_seq_t *commands, 1798 sctp_cmd_seq_t *commands,
@@ -1784,7 +1813,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1784 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1813 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1785 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1814 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1786 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1815 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1787 SCTP_INC_STATS(SCTP_MIB_CURRESTAB); 1816 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
1788 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); 1817 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
1789 1818
1790 repl = sctp_make_cookie_ack(new_asoc, chunk); 1819 repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1833,7 +1862,8 @@ nomem:
1833 * but a new tag of its own. 1862 * but a new tag of its own.
1834 */ 1863 */
1835/* This case represents an initialization collision. */ 1864/* This case represents an initialization collision. */
1836static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep, 1865static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
1866 const struct sctp_endpoint *ep,
1837 const struct sctp_association *asoc, 1867 const struct sctp_association *asoc,
1838 struct sctp_chunk *chunk, 1868 struct sctp_chunk *chunk,
1839 sctp_cmd_seq_t *commands, 1869 sctp_cmd_seq_t *commands,
@@ -1854,7 +1884,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
1854 * enter the ESTABLISHED state, if it has not already done so. 1884 * enter the ESTABLISHED state, if it has not already done so.
1855 */ 1885 */
1856/* This case represents an initialization collision. */ 1886/* This case represents an initialization collision. */
1857static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, 1887static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
1888 const struct sctp_endpoint *ep,
1858 const struct sctp_association *asoc, 1889 const struct sctp_association *asoc,
1859 struct sctp_chunk *chunk, 1890 struct sctp_chunk *chunk,
1860 sctp_cmd_seq_t *commands, 1891 sctp_cmd_seq_t *commands,
@@ -1876,7 +1907,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1876 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 1907 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1877 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1908 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1878 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1909 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1879 SCTP_INC_STATS(SCTP_MIB_CURRESTAB); 1910 SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
1880 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, 1911 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
1881 SCTP_NULL()); 1912 SCTP_NULL());
1882 1913
@@ -1948,7 +1979,8 @@ nomem:
1948 * 1979 *
1949 * The return value is the disposition of the chunk. 1980 * The return value is the disposition of the chunk.
1950 */ 1981 */
1951sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, 1982sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
1983 const struct sctp_endpoint *ep,
1952 const struct sctp_association *asoc, 1984 const struct sctp_association *asoc,
1953 const sctp_subtype_t type, 1985 const sctp_subtype_t type,
1954 void *arg, 1986 void *arg,
@@ -1967,7 +1999,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
1967 * done later. 1999 * done later.
1968 */ 2000 */
1969 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 2001 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
1970 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2002 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1971 commands); 2003 commands);
1972 2004
1973 /* "Decode" the chunk. We have no optional parameters so we 2005 /* "Decode" the chunk. We have no optional parameters so we
@@ -2001,12 +2033,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2001 goto nomem; 2033 goto nomem;
2002 2034
2003 case -SCTP_IERROR_STALE_COOKIE: 2035 case -SCTP_IERROR_STALE_COOKIE:
2004 sctp_send_stale_cookie_err(ep, asoc, chunk, commands, 2036 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
2005 err_chk_p); 2037 err_chk_p);
2006 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2038 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2007 case -SCTP_IERROR_BAD_SIG: 2039 case -SCTP_IERROR_BAD_SIG:
2008 default: 2040 default:
2009 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2041 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2010 } 2042 }
2011 } 2043 }
2012 2044
@@ -2017,27 +2049,27 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2017 2049
2018 switch (action) { 2050 switch (action) {
2019 case 'A': /* Association restart. */ 2051 case 'A': /* Association restart. */
2020 retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands, 2052 retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
2021 new_asoc); 2053 new_asoc);
2022 break; 2054 break;
2023 2055
2024 case 'B': /* Collision case B. */ 2056 case 'B': /* Collision case B. */
2025 retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands, 2057 retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
2026 new_asoc); 2058 new_asoc);
2027 break; 2059 break;
2028 2060
2029 case 'C': /* Collision case C. */ 2061 case 'C': /* Collision case C. */
2030 retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands, 2062 retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
2031 new_asoc); 2063 new_asoc);
2032 break; 2064 break;
2033 2065
2034 case 'D': /* Collision case D. */ 2066 case 'D': /* Collision case D. */
2035 retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands, 2067 retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
2036 new_asoc); 2068 new_asoc);
2037 break; 2069 break;
2038 2070
2039 default: /* Discard packet for all others. */ 2071 default: /* Discard packet for all others. */
2040 retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2072 retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2041 break; 2073 break;
2042 } 2074 }
2043 2075
@@ -2063,6 +2095,7 @@ nomem:
2063 * See sctp_sf_do_9_1_abort(). 2095 * See sctp_sf_do_9_1_abort().
2064 */ 2096 */
2065sctp_disposition_t sctp_sf_shutdown_pending_abort( 2097sctp_disposition_t sctp_sf_shutdown_pending_abort(
2098 struct net *net,
2066 const struct sctp_endpoint *ep, 2099 const struct sctp_endpoint *ep,
2067 const struct sctp_association *asoc, 2100 const struct sctp_association *asoc,
2068 const sctp_subtype_t type, 2101 const sctp_subtype_t type,
@@ -2072,7 +2105,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2072 struct sctp_chunk *chunk = arg; 2105 struct sctp_chunk *chunk = arg;
2073 2106
2074 if (!sctp_vtag_verify_either(chunk, asoc)) 2107 if (!sctp_vtag_verify_either(chunk, asoc))
2075 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2108 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2076 2109
2077 /* Make sure that the ABORT chunk has a valid length. 2110 /* Make sure that the ABORT chunk has a valid length.
2078 * Since this is an ABORT chunk, we have to discard it 2111 * Since this is an ABORT chunk, we have to discard it
@@ -2085,7 +2118,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2085 * packet. 2118 * packet.
2086 */ 2119 */
2087 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) 2120 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
2088 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2121 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2089 2122
2090 /* ADD-IP: Special case for ABORT chunks 2123 /* ADD-IP: Special case for ABORT chunks
2091 * F4) One special consideration is that ABORT Chunks arriving 2124 * F4) One special consideration is that ABORT Chunks arriving
@@ -2094,9 +2127,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2094 */ 2127 */
2095 if (SCTP_ADDR_DEL == 2128 if (SCTP_ADDR_DEL ==
2096 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2129 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2097 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 2130 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2098 2131
2099 return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); 2132 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2100} 2133}
2101 2134
2102/* 2135/*
@@ -2104,7 +2137,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2104 * 2137 *
2105 * See sctp_sf_do_9_1_abort(). 2138 * See sctp_sf_do_9_1_abort().
2106 */ 2139 */
2107sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep, 2140sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
2141 const struct sctp_endpoint *ep,
2108 const struct sctp_association *asoc, 2142 const struct sctp_association *asoc,
2109 const sctp_subtype_t type, 2143 const sctp_subtype_t type,
2110 void *arg, 2144 void *arg,
@@ -2113,7 +2147,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2113 struct sctp_chunk *chunk = arg; 2147 struct sctp_chunk *chunk = arg;
2114 2148
2115 if (!sctp_vtag_verify_either(chunk, asoc)) 2149 if (!sctp_vtag_verify_either(chunk, asoc))
2116 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2150 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2117 2151
2118 /* Make sure that the ABORT chunk has a valid length. 2152 /* Make sure that the ABORT chunk has a valid length.
2119 * Since this is an ABORT chunk, we have to discard it 2153 * Since this is an ABORT chunk, we have to discard it
@@ -2126,7 +2160,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2126 * packet. 2160 * packet.
2127 */ 2161 */
2128 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) 2162 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
2129 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2163 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2130 2164
2131 /* ADD-IP: Special case for ABORT chunks 2165 /* ADD-IP: Special case for ABORT chunks
2132 * F4) One special consideration is that ABORT Chunks arriving 2166 * F4) One special consideration is that ABORT Chunks arriving
@@ -2135,7 +2169,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2135 */ 2169 */
2136 if (SCTP_ADDR_DEL == 2170 if (SCTP_ADDR_DEL ==
2137 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2171 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2138 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 2172 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2139 2173
2140 /* Stop the T2-shutdown timer. */ 2174 /* Stop the T2-shutdown timer. */
2141 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2175 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2145,7 +2179,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2145 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2179 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2146 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 2180 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
2147 2181
2148 return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); 2182 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2149} 2183}
2150 2184
2151/* 2185/*
@@ -2154,6 +2188,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2154 * See sctp_sf_do_9_1_abort(). 2188 * See sctp_sf_do_9_1_abort().
2155 */ 2189 */
2156sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( 2190sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
2191 struct net *net,
2157 const struct sctp_endpoint *ep, 2192 const struct sctp_endpoint *ep,
2158 const struct sctp_association *asoc, 2193 const struct sctp_association *asoc,
2159 const sctp_subtype_t type, 2194 const sctp_subtype_t type,
@@ -2163,7 +2198,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
2163 /* The same T2 timer, so we should be able to use 2198 /* The same T2 timer, so we should be able to use
2164 * common function with the SHUTDOWN-SENT state. 2199 * common function with the SHUTDOWN-SENT state.
2165 */ 2200 */
2166 return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands); 2201 return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
2167} 2202}
2168 2203
2169/* 2204/*
@@ -2180,7 +2215,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
2180 * 2215 *
2181 * The return value is the disposition of the chunk. 2216 * The return value is the disposition of the chunk.
2182 */ 2217 */
2183sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep, 2218sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
2219 const struct sctp_endpoint *ep,
2184 const struct sctp_association *asoc, 2220 const struct sctp_association *asoc,
2185 const sctp_subtype_t type, 2221 const sctp_subtype_t type,
2186 void *arg, 2222 void *arg,
@@ -2190,13 +2226,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
2190 sctp_errhdr_t *err; 2226 sctp_errhdr_t *err;
2191 2227
2192 if (!sctp_vtag_verify(chunk, asoc)) 2228 if (!sctp_vtag_verify(chunk, asoc))
2193 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2229 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2194 2230
2195 /* Make sure that the ERROR chunk has a valid length. 2231 /* Make sure that the ERROR chunk has a valid length.
2196 * The parameter walking depends on this as well. 2232 * The parameter walking depends on this as well.
2197 */ 2233 */
2198 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 2234 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
2199 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2235 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2200 commands); 2236 commands);
2201 2237
2202 /* Process the error here */ 2238 /* Process the error here */
@@ -2206,7 +2242,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
2206 */ 2242 */
2207 sctp_walk_errors(err, chunk->chunk_hdr) { 2243 sctp_walk_errors(err, chunk->chunk_hdr) {
2208 if (SCTP_ERROR_STALE_COOKIE == err->cause) 2244 if (SCTP_ERROR_STALE_COOKIE == err->cause)
2209 return sctp_sf_do_5_2_6_stale(ep, asoc, type, 2245 return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
2210 arg, commands); 2246 arg, commands);
2211 } 2247 }
2212 2248
@@ -2215,7 +2251,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
2215 * we are discarding the packet, there should be no adverse 2251 * we are discarding the packet, there should be no adverse
2216 * affects. 2252 * affects.
2217 */ 2253 */
2218 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2254 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2219} 2255}
2220 2256
2221/* 2257/*
@@ -2243,7 +2279,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
2243 * 2279 *
2244 * The return value is the disposition of the chunk. 2280 * The return value is the disposition of the chunk.
2245 */ 2281 */
2246static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, 2282static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
2283 const struct sctp_endpoint *ep,
2247 const struct sctp_association *asoc, 2284 const struct sctp_association *asoc,
2248 const sctp_subtype_t type, 2285 const sctp_subtype_t type,
2249 void *arg, 2286 void *arg,
@@ -2365,7 +2402,8 @@ nomem:
2365 * 2402 *
2366 * The return value is the disposition of the chunk. 2403 * The return value is the disposition of the chunk.
2367 */ 2404 */
2368sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, 2405sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
2406 const struct sctp_endpoint *ep,
2369 const struct sctp_association *asoc, 2407 const struct sctp_association *asoc,
2370 const sctp_subtype_t type, 2408 const sctp_subtype_t type,
2371 void *arg, 2409 void *arg,
@@ -2374,7 +2412,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2374 struct sctp_chunk *chunk = arg; 2412 struct sctp_chunk *chunk = arg;
2375 2413
2376 if (!sctp_vtag_verify_either(chunk, asoc)) 2414 if (!sctp_vtag_verify_either(chunk, asoc))
2377 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2415 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2378 2416
2379 /* Make sure that the ABORT chunk has a valid length. 2417 /* Make sure that the ABORT chunk has a valid length.
2380 * Since this is an ABORT chunk, we have to discard it 2418 * Since this is an ABORT chunk, we have to discard it
@@ -2387,7 +2425,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2387 * packet. 2425 * packet.
2388 */ 2426 */
2389 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) 2427 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
2390 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2428 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2391 2429
2392 /* ADD-IP: Special case for ABORT chunks 2430 /* ADD-IP: Special case for ABORT chunks
2393 * F4) One special consideration is that ABORT Chunks arriving 2431 * F4) One special consideration is that ABORT Chunks arriving
@@ -2396,12 +2434,13 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2396 */ 2434 */
2397 if (SCTP_ADDR_DEL == 2435 if (SCTP_ADDR_DEL ==
2398 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) 2436 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2399 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 2437 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2400 2438
2401 return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); 2439 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2402} 2440}
2403 2441
2404static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, 2442static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
2443 const struct sctp_endpoint *ep,
2405 const struct sctp_association *asoc, 2444 const struct sctp_association *asoc,
2406 const sctp_subtype_t type, 2445 const sctp_subtype_t type,
2407 void *arg, 2446 void *arg,
@@ -2418,7 +2457,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2418 sctp_errhdr_t *err; 2457 sctp_errhdr_t *err;
2419 sctp_walk_errors(err, chunk->chunk_hdr); 2458 sctp_walk_errors(err, chunk->chunk_hdr);
2420 if ((void *)err != (void *)chunk->chunk_end) 2459 if ((void *)err != (void *)chunk->chunk_end)
2421 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2460 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2422 2461
2423 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2462 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2424 } 2463 }
@@ -2426,8 +2465,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2426 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2465 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2427 /* ASSOC_FAILED will DELETE_TCB. */ 2466 /* ASSOC_FAILED will DELETE_TCB. */
2428 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); 2467 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
2429 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2468 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
2430 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 2469 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
2431 2470
2432 return SCTP_DISPOSITION_ABORT; 2471 return SCTP_DISPOSITION_ABORT;
2433} 2472}
@@ -2437,7 +2476,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2437 * 2476 *
2438 * See sctp_sf_do_9_1_abort() above. 2477 * See sctp_sf_do_9_1_abort() above.
2439 */ 2478 */
2440sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, 2479sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
2480 const struct sctp_endpoint *ep,
2441 const struct sctp_association *asoc, 2481 const struct sctp_association *asoc,
2442 const sctp_subtype_t type, 2482 const sctp_subtype_t type,
2443 void *arg, 2483 void *arg,
@@ -2448,7 +2488,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2448 __be16 error = SCTP_ERROR_NO_ERROR; 2488 __be16 error = SCTP_ERROR_NO_ERROR;
2449 2489
2450 if (!sctp_vtag_verify_either(chunk, asoc)) 2490 if (!sctp_vtag_verify_either(chunk, asoc))
2451 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2491 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2452 2492
2453 /* Make sure that the ABORT chunk has a valid length. 2493 /* Make sure that the ABORT chunk has a valid length.
2454 * Since this is an ABORT chunk, we have to discard it 2494 * Since this is an ABORT chunk, we have to discard it
@@ -2461,27 +2501,28 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2461 * packet. 2501 * packet.
2462 */ 2502 */
2463 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) 2503 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
2464 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2504 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2465 2505
2466 /* See if we have an error cause code in the chunk. */ 2506 /* See if we have an error cause code in the chunk. */
2467 len = ntohs(chunk->chunk_hdr->length); 2507 len = ntohs(chunk->chunk_hdr->length);
2468 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2508 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2469 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2509 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2470 2510
2471 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, 2511 return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
2472 chunk->transport); 2512 chunk->transport);
2473} 2513}
2474 2514
2475/* 2515/*
2476 * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) 2516 * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
2477 */ 2517 */
2478sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep, 2518sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
2519 const struct sctp_endpoint *ep,
2479 const struct sctp_association *asoc, 2520 const struct sctp_association *asoc,
2480 const sctp_subtype_t type, 2521 const sctp_subtype_t type,
2481 void *arg, 2522 void *arg,
2482 sctp_cmd_seq_t *commands) 2523 sctp_cmd_seq_t *commands)
2483{ 2524{
2484 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, 2525 return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
2485 ENOPROTOOPT, asoc, 2526 ENOPROTOOPT, asoc,
2486 (struct sctp_transport *)arg); 2527 (struct sctp_transport *)arg);
2487} 2528}
@@ -2489,7 +2530,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
2489/* 2530/*
2490 * Process an ABORT. (COOKIE-ECHOED state) 2531 * Process an ABORT. (COOKIE-ECHOED state)
2491 */ 2532 */
2492sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep, 2533sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
2534 const struct sctp_endpoint *ep,
2493 const struct sctp_association *asoc, 2535 const struct sctp_association *asoc,
2494 const sctp_subtype_t type, 2536 const sctp_subtype_t type,
2495 void *arg, 2537 void *arg,
@@ -2498,7 +2540,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
2498 /* There is a single T1 timer, so we should be able to use 2540 /* There is a single T1 timer, so we should be able to use
2499 * common function with the COOKIE-WAIT state. 2541 * common function with the COOKIE-WAIT state.
2500 */ 2542 */
2501 return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands); 2543 return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
2502} 2544}
2503 2545
2504/* 2546/*
@@ -2506,7 +2548,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
2506 * 2548 *
2507 * This is common code called by several sctp_sf_*_abort() functions above. 2549 * This is common code called by several sctp_sf_*_abort() functions above.
2508 */ 2550 */
2509static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 2551static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
2552 sctp_cmd_seq_t *commands,
2510 __be16 error, int sk_err, 2553 __be16 error, int sk_err,
2511 const struct sctp_association *asoc, 2554 const struct sctp_association *asoc,
2512 struct sctp_transport *transport) 2555 struct sctp_transport *transport)
@@ -2514,7 +2557,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2514 SCTP_DEBUG_PRINTK("ABORT received (INIT).\n"); 2557 SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
2515 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 2558 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
2516 SCTP_STATE(SCTP_STATE_CLOSED)); 2559 SCTP_STATE(SCTP_STATE_CLOSED));
2517 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2560 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
2518 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2561 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2519 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2562 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
2520 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); 2563 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
@@ -2557,7 +2600,8 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2557 * 2600 *
2558 * The return value is the disposition of the chunk. 2601 * The return value is the disposition of the chunk.
2559 */ 2602 */
2560sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, 2603sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
2604 const struct sctp_endpoint *ep,
2561 const struct sctp_association *asoc, 2605 const struct sctp_association *asoc,
2562 const sctp_subtype_t type, 2606 const sctp_subtype_t type,
2563 void *arg, 2607 void *arg,
@@ -2570,12 +2614,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2570 __u32 ctsn; 2614 __u32 ctsn;
2571 2615
2572 if (!sctp_vtag_verify(chunk, asoc)) 2616 if (!sctp_vtag_verify(chunk, asoc))
2573 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2617 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2574 2618
2575 /* Make sure that the SHUTDOWN chunk has a valid length. */ 2619 /* Make sure that the SHUTDOWN chunk has a valid length. */
2576 if (!sctp_chunk_length_valid(chunk, 2620 if (!sctp_chunk_length_valid(chunk,
2577 sizeof(struct sctp_shutdown_chunk_t))) 2621 sizeof(struct sctp_shutdown_chunk_t)))
2578 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2622 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2579 commands); 2623 commands);
2580 2624
2581 /* Convert the elaborate header. */ 2625 /* Convert the elaborate header. */
@@ -2595,7 +2639,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2595 * sender with an ABORT. 2639 * sender with an ABORT.
2596 */ 2640 */
2597 if (!TSN_lt(ctsn, asoc->next_tsn)) 2641 if (!TSN_lt(ctsn, asoc->next_tsn))
2598 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); 2642 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
2599 2643
2600 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT 2644 /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
2601 * When a peer sends a SHUTDOWN, SCTP delivers this notification to 2645 * When a peer sends a SHUTDOWN, SCTP delivers this notification to
@@ -2619,7 +2663,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2619 disposition = SCTP_DISPOSITION_CONSUME; 2663 disposition = SCTP_DISPOSITION_CONSUME;
2620 2664
2621 if (sctp_outq_is_empty(&asoc->outqueue)) { 2665 if (sctp_outq_is_empty(&asoc->outqueue)) {
2622 disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type, 2666 disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
2623 arg, commands); 2667 arg, commands);
2624 } 2668 }
2625 2669
@@ -2645,7 +2689,8 @@ out:
2645 * The Cumulative TSN Ack of the received SHUTDOWN chunk 2689 * The Cumulative TSN Ack of the received SHUTDOWN chunk
2646 * MUST be processed. 2690 * MUST be processed.
2647 */ 2691 */
2648sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, 2692sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
2693 const struct sctp_endpoint *ep,
2649 const struct sctp_association *asoc, 2694 const struct sctp_association *asoc,
2650 const sctp_subtype_t type, 2695 const sctp_subtype_t type,
2651 void *arg, 2696 void *arg,
@@ -2656,12 +2701,12 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2656 __u32 ctsn; 2701 __u32 ctsn;
2657 2702
2658 if (!sctp_vtag_verify(chunk, asoc)) 2703 if (!sctp_vtag_verify(chunk, asoc))
2659 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2704 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2660 2705
2661 /* Make sure that the SHUTDOWN chunk has a valid length. */ 2706 /* Make sure that the SHUTDOWN chunk has a valid length. */
2662 if (!sctp_chunk_length_valid(chunk, 2707 if (!sctp_chunk_length_valid(chunk,
2663 sizeof(struct sctp_shutdown_chunk_t))) 2708 sizeof(struct sctp_shutdown_chunk_t)))
2664 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2709 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2665 commands); 2710 commands);
2666 2711
2667 sdh = (sctp_shutdownhdr_t *)chunk->skb->data; 2712 sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
@@ -2678,7 +2723,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2678 * sender with an ABORT. 2723 * sender with an ABORT.
2679 */ 2724 */
2680 if (!TSN_lt(ctsn, asoc->next_tsn)) 2725 if (!TSN_lt(ctsn, asoc->next_tsn))
2681 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); 2726 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
2682 2727
2683 /* verify, by checking the Cumulative TSN Ack field of the 2728 /* verify, by checking the Cumulative TSN Ack field of the
2684 * chunk, that all its outstanding DATA chunks have been 2729 * chunk, that all its outstanding DATA chunks have been
@@ -2697,7 +2742,8 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2697 * that belong to this association, it should discard the INIT chunk and 2742 * that belong to this association, it should discard the INIT chunk and
2698 * retransmit the SHUTDOWN ACK chunk. 2743 * retransmit the SHUTDOWN ACK chunk.
2699 */ 2744 */
2700sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep, 2745sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
2746 const struct sctp_endpoint *ep,
2701 const struct sctp_association *asoc, 2747 const struct sctp_association *asoc,
2702 const sctp_subtype_t type, 2748 const sctp_subtype_t type,
2703 void *arg, 2749 void *arg,
@@ -2708,7 +2754,7 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
2708 2754
2709 /* Make sure that the chunk has a valid length */ 2755 /* Make sure that the chunk has a valid length */
2710 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 2756 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
2711 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2757 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2712 commands); 2758 commands);
2713 2759
2714 /* Since we are not going to really process this INIT, there 2760 /* Since we are not going to really process this INIT, there
@@ -2760,7 +2806,8 @@ nomem:
2760 * 2806 *
2761 * The return value is the disposition of the chunk. 2807 * The return value is the disposition of the chunk.
2762 */ 2808 */
2763sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep, 2809sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
2810 const struct sctp_endpoint *ep,
2764 const struct sctp_association *asoc, 2811 const struct sctp_association *asoc,
2765 const sctp_subtype_t type, 2812 const sctp_subtype_t type,
2766 void *arg, 2813 void *arg,
@@ -2771,10 +2818,10 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
2771 u32 lowest_tsn; 2818 u32 lowest_tsn;
2772 2819
2773 if (!sctp_vtag_verify(chunk, asoc)) 2820 if (!sctp_vtag_verify(chunk, asoc))
2774 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2821 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2775 2822
2776 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) 2823 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
2777 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2824 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2778 commands); 2825 commands);
2779 2826
2780 cwr = (sctp_cwrhdr_t *) chunk->skb->data; 2827 cwr = (sctp_cwrhdr_t *) chunk->skb->data;
@@ -2815,7 +2862,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
2815 * 2862 *
2816 * The return value is the disposition of the chunk. 2863 * The return value is the disposition of the chunk.
2817 */ 2864 */
2818sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep, 2865sctp_disposition_t sctp_sf_do_ecne(struct net *net,
2866 const struct sctp_endpoint *ep,
2819 const struct sctp_association *asoc, 2867 const struct sctp_association *asoc,
2820 const sctp_subtype_t type, 2868 const sctp_subtype_t type,
2821 void *arg, 2869 void *arg,
@@ -2825,10 +2873,10 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
2825 struct sctp_chunk *chunk = arg; 2873 struct sctp_chunk *chunk = arg;
2826 2874
2827 if (!sctp_vtag_verify(chunk, asoc)) 2875 if (!sctp_vtag_verify(chunk, asoc))
2828 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2876 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2829 2877
2830 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) 2878 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
2831 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2879 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2832 commands); 2880 commands);
2833 2881
2834 ecne = (sctp_ecnehdr_t *) chunk->skb->data; 2882 ecne = (sctp_ecnehdr_t *) chunk->skb->data;
@@ -2871,7 +2919,8 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
2871 * 2919 *
2872 * The return value is the disposition of the chunk. 2920 * The return value is the disposition of the chunk.
2873 */ 2921 */
2874sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, 2922sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
2923 const struct sctp_endpoint *ep,
2875 const struct sctp_association *asoc, 2924 const struct sctp_association *asoc,
2876 const sctp_subtype_t type, 2925 const sctp_subtype_t type,
2877 void *arg, 2926 void *arg,
@@ -2884,11 +2933,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2884 if (!sctp_vtag_verify(chunk, asoc)) { 2933 if (!sctp_vtag_verify(chunk, asoc)) {
2885 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 2934 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
2886 SCTP_NULL()); 2935 SCTP_NULL());
2887 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2936 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2888 } 2937 }
2889 2938
2890 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) 2939 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
2891 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 2940 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
2892 commands); 2941 commands);
2893 2942
2894 error = sctp_eat_data(asoc, chunk, commands ); 2943 error = sctp_eat_data(asoc, chunk, commands );
@@ -2897,16 +2946,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2897 break; 2946 break;
2898 case SCTP_IERROR_HIGH_TSN: 2947 case SCTP_IERROR_HIGH_TSN:
2899 case SCTP_IERROR_BAD_STREAM: 2948 case SCTP_IERROR_BAD_STREAM:
2900 SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS); 2949 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
2901 goto discard_noforce; 2950 goto discard_noforce;
2902 case SCTP_IERROR_DUP_TSN: 2951 case SCTP_IERROR_DUP_TSN:
2903 case SCTP_IERROR_IGNORE_TSN: 2952 case SCTP_IERROR_IGNORE_TSN:
2904 SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS); 2953 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
2905 goto discard_force; 2954 goto discard_force;
2906 case SCTP_IERROR_NO_DATA: 2955 case SCTP_IERROR_NO_DATA:
2907 goto consume; 2956 goto consume;
2908 case SCTP_IERROR_PROTO_VIOLATION: 2957 case SCTP_IERROR_PROTO_VIOLATION:
2909 return sctp_sf_abort_violation(ep, asoc, chunk, commands, 2958 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
2910 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); 2959 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
2911 default: 2960 default:
2912 BUG(); 2961 BUG();
@@ -2992,7 +3041,8 @@ consume:
2992 * 3041 *
2993 * The return value is the disposition of the chunk. 3042 * The return value is the disposition of the chunk.
2994 */ 3043 */
2995sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, 3044sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
3045 const struct sctp_endpoint *ep,
2996 const struct sctp_association *asoc, 3046 const struct sctp_association *asoc,
2997 const sctp_subtype_t type, 3047 const sctp_subtype_t type,
2998 void *arg, 3048 void *arg,
@@ -3004,11 +3054,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
3004 if (!sctp_vtag_verify(chunk, asoc)) { 3054 if (!sctp_vtag_verify(chunk, asoc)) {
3005 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3055 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3006 SCTP_NULL()); 3056 SCTP_NULL());
3007 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3057 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3008 } 3058 }
3009 3059
3010 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) 3060 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
3011 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3061 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3012 commands); 3062 commands);
3013 3063
3014 error = sctp_eat_data(asoc, chunk, commands ); 3064 error = sctp_eat_data(asoc, chunk, commands );
@@ -3022,7 +3072,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
3022 case SCTP_IERROR_NO_DATA: 3072 case SCTP_IERROR_NO_DATA:
3023 goto consume; 3073 goto consume;
3024 case SCTP_IERROR_PROTO_VIOLATION: 3074 case SCTP_IERROR_PROTO_VIOLATION:
3025 return sctp_sf_abort_violation(ep, asoc, chunk, commands, 3075 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
3026 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); 3076 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
3027 default: 3077 default:
3028 BUG(); 3078 BUG();
@@ -3082,7 +3132,8 @@ consume:
3082 * 3132 *
3083 * The return value is the disposition of the chunk. 3133 * The return value is the disposition of the chunk.
3084 */ 3134 */
3085sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, 3135sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
3136 const struct sctp_endpoint *ep,
3086 const struct sctp_association *asoc, 3137 const struct sctp_association *asoc,
3087 const sctp_subtype_t type, 3138 const sctp_subtype_t type,
3088 void *arg, 3139 void *arg,
@@ -3093,18 +3144,18 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
3093 __u32 ctsn; 3144 __u32 ctsn;
3094 3145
3095 if (!sctp_vtag_verify(chunk, asoc)) 3146 if (!sctp_vtag_verify(chunk, asoc))
3096 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3147 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3097 3148
3098 /* Make sure that the SACK chunk has a valid length. */ 3149 /* Make sure that the SACK chunk has a valid length. */
3099 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t))) 3150 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
3100 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3151 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3101 commands); 3152 commands);
3102 3153
3103 /* Pull the SACK chunk from the data buffer */ 3154 /* Pull the SACK chunk from the data buffer */
3104 sackh = sctp_sm_pull_sack(chunk); 3155 sackh = sctp_sm_pull_sack(chunk);
3105 /* Was this a bogus SACK? */ 3156 /* Was this a bogus SACK? */
3106 if (!sackh) 3157 if (!sackh)
3107 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3158 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3108 chunk->subh.sack_hdr = sackh; 3159 chunk->subh.sack_hdr = sackh;
3109 ctsn = ntohl(sackh->cum_tsn_ack); 3160 ctsn = ntohl(sackh->cum_tsn_ack);
3110 3161
@@ -3125,7 +3176,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
3125 * sender with an ABORT. 3176 * sender with an ABORT.
3126 */ 3177 */
3127 if (!TSN_lt(ctsn, asoc->next_tsn)) 3178 if (!TSN_lt(ctsn, asoc->next_tsn))
3128 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); 3179 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
3129 3180
3130 /* Return this SACK for further processing. */ 3181 /* Return this SACK for further processing. */
3131 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh)); 3182 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3154,7 +3205,8 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
3154 * 3205 *
3155 * The return value is the disposition of the chunk. 3206 * The return value is the disposition of the chunk.
3156*/ 3207*/
3157static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, 3208static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
3209 const struct sctp_endpoint *ep,
3158 const struct sctp_association *asoc, 3210 const struct sctp_association *asoc,
3159 const sctp_subtype_t type, 3211 const sctp_subtype_t type,
3160 void *arg, 3212 void *arg,
@@ -3164,7 +3216,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
3164 struct sctp_chunk *chunk = arg; 3216 struct sctp_chunk *chunk = arg;
3165 struct sctp_chunk *abort; 3217 struct sctp_chunk *abort;
3166 3218
3167 packet = sctp_ootb_pkt_new(asoc, chunk); 3219 packet = sctp_ootb_pkt_new(net, asoc, chunk);
3168 3220
3169 if (packet) { 3221 if (packet) {
3170 /* Make an ABORT. The T bit will be set if the asoc 3222 /* Make an ABORT. The T bit will be set if the asoc
@@ -3188,9 +3240,9 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
3188 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 3240 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3189 SCTP_PACKET(packet)); 3241 SCTP_PACKET(packet));
3190 3242
3191 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 3243 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
3192 3244
3193 sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3245 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3194 return SCTP_DISPOSITION_CONSUME; 3246 return SCTP_DISPOSITION_CONSUME;
3195 } 3247 }
3196 3248
@@ -3205,7 +3257,8 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
3205 * 3257 *
3206 * The return value is the disposition of the chunk. 3258 * The return value is the disposition of the chunk.
3207*/ 3259*/
3208sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, 3260sctp_disposition_t sctp_sf_operr_notify(struct net *net,
3261 const struct sctp_endpoint *ep,
3209 const struct sctp_association *asoc, 3262 const struct sctp_association *asoc,
3210 const sctp_subtype_t type, 3263 const sctp_subtype_t type,
3211 void *arg, 3264 void *arg,
@@ -3215,15 +3268,15 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3215 sctp_errhdr_t *err; 3268 sctp_errhdr_t *err;
3216 3269
3217 if (!sctp_vtag_verify(chunk, asoc)) 3270 if (!sctp_vtag_verify(chunk, asoc))
3218 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3271 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3219 3272
3220 /* Make sure that the ERROR chunk has a valid length. */ 3273 /* Make sure that the ERROR chunk has a valid length. */
3221 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 3274 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
3222 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3275 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3223 commands); 3276 commands);
3224 sctp_walk_errors(err, chunk->chunk_hdr); 3277 sctp_walk_errors(err, chunk->chunk_hdr);
3225 if ((void *)err != (void *)chunk->chunk_end) 3278 if ((void *)err != (void *)chunk->chunk_end)
3226 return sctp_sf_violation_paramlen(ep, asoc, type, arg, 3279 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3227 (void *)err, commands); 3280 (void *)err, commands);
3228 3281
3229 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3282 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
@@ -3242,7 +3295,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3242 * 3295 *
3243 * The return value is the disposition. 3296 * The return value is the disposition.
3244 */ 3297 */
3245sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, 3298sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
3299 const struct sctp_endpoint *ep,
3246 const struct sctp_association *asoc, 3300 const struct sctp_association *asoc,
3247 const sctp_subtype_t type, 3301 const sctp_subtype_t type,
3248 void *arg, 3302 void *arg,
@@ -3253,11 +3307,11 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
3253 struct sctp_ulpevent *ev; 3307 struct sctp_ulpevent *ev;
3254 3308
3255 if (!sctp_vtag_verify(chunk, asoc)) 3309 if (!sctp_vtag_verify(chunk, asoc))
3256 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3310 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3257 3311
3258 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ 3312 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3259 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3313 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3260 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3314 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3261 commands); 3315 commands);
3262 /* 10.2 H) SHUTDOWN COMPLETE notification 3316 /* 10.2 H) SHUTDOWN COMPLETE notification
3263 * 3317 *
@@ -3290,8 +3344,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
3290 3344
3291 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 3345 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
3292 SCTP_STATE(SCTP_STATE_CLOSED)); 3346 SCTP_STATE(SCTP_STATE_CLOSED));
3293 SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); 3347 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
3294 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3348 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
3295 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 3349 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
3296 3350
3297 /* ...and remove all record of the association. */ 3351 /* ...and remove all record of the association. */
@@ -3324,7 +3378,8 @@ nomem:
3324 * receiver of the OOTB packet shall discard the OOTB packet and take 3378 * receiver of the OOTB packet shall discard the OOTB packet and take
3325 * no further action. 3379 * no further action.
3326 */ 3380 */
3327sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, 3381sctp_disposition_t sctp_sf_ootb(struct net *net,
3382 const struct sctp_endpoint *ep,
3328 const struct sctp_association *asoc, 3383 const struct sctp_association *asoc,
3329 const sctp_subtype_t type, 3384 const sctp_subtype_t type,
3330 void *arg, 3385 void *arg,
@@ -3338,13 +3393,13 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3338 int ootb_shut_ack = 0; 3393 int ootb_shut_ack = 0;
3339 int ootb_cookie_ack = 0; 3394 int ootb_cookie_ack = 0;
3340 3395
3341 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3396 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
3342 3397
3343 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; 3398 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
3344 do { 3399 do {
3345 /* Report violation if the chunk is less then minimal */ 3400 /* Report violation if the chunk is less then minimal */
3346 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) 3401 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
3347 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3402 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3348 commands); 3403 commands);
3349 3404
3350 /* Now that we know we at least have a chunk header, 3405 /* Now that we know we at least have a chunk header,
@@ -3359,7 +3414,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3359 * sending an ABORT of its own. 3414 * sending an ABORT of its own.
3360 */ 3415 */
3361 if (SCTP_CID_ABORT == ch->type) 3416 if (SCTP_CID_ABORT == ch->type)
3362 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3417 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3363 3418
3364 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR 3419 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3365 * or a COOKIE ACK the SCTP Packet should be silently 3420 * or a COOKIE ACK the SCTP Packet should be silently
@@ -3381,18 +3436,18 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3381 /* Report violation if chunk len overflows */ 3436 /* Report violation if chunk len overflows */
3382 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3437 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3383 if (ch_end > skb_tail_pointer(skb)) 3438 if (ch_end > skb_tail_pointer(skb))
3384 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3439 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3385 commands); 3440 commands);
3386 3441
3387 ch = (sctp_chunkhdr_t *) ch_end; 3442 ch = (sctp_chunkhdr_t *) ch_end;
3388 } while (ch_end < skb_tail_pointer(skb)); 3443 } while (ch_end < skb_tail_pointer(skb));
3389 3444
3390 if (ootb_shut_ack) 3445 if (ootb_shut_ack)
3391 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3446 return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
3392 else if (ootb_cookie_ack) 3447 else if (ootb_cookie_ack)
3393 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3448 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3394 else 3449 else
3395 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3450 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
3396} 3451}
3397 3452
3398/* 3453/*
@@ -3416,7 +3471,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3416 * 3471 *
3417 * The return value is the disposition of the chunk. 3472 * The return value is the disposition of the chunk.
3418 */ 3473 */
3419static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, 3474static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
3475 const struct sctp_endpoint *ep,
3420 const struct sctp_association *asoc, 3476 const struct sctp_association *asoc,
3421 const sctp_subtype_t type, 3477 const sctp_subtype_t type,
3422 void *arg, 3478 void *arg,
@@ -3426,7 +3482,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3426 struct sctp_chunk *chunk = arg; 3482 struct sctp_chunk *chunk = arg;
3427 struct sctp_chunk *shut; 3483 struct sctp_chunk *shut;
3428 3484
3429 packet = sctp_ootb_pkt_new(asoc, chunk); 3485 packet = sctp_ootb_pkt_new(net, asoc, chunk);
3430 3486
3431 if (packet) { 3487 if (packet) {
3432 /* Make an SHUTDOWN_COMPLETE. 3488 /* Make an SHUTDOWN_COMPLETE.
@@ -3450,19 +3506,19 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3450 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 3506 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3451 SCTP_PACKET(packet)); 3507 SCTP_PACKET(packet));
3452 3508
3453 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 3509 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
3454 3510
3455 /* If the chunk length is invalid, we don't want to process 3511 /* If the chunk length is invalid, we don't want to process
3456 * the reset of the packet. 3512 * the reset of the packet.
3457 */ 3513 */
3458 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3514 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3459 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3515 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3460 3516
3461 /* We need to discard the rest of the packet to prevent 3517 /* We need to discard the rest of the packet to prevent
3462 * potential bomming attacks from additional bundled chunks. 3518 * potential bomming attacks from additional bundled chunks.
3463 * This is documented in SCTP Threats ID. 3519 * This is documented in SCTP Threats ID.
3464 */ 3520 */
3465 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3521 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3466 } 3522 }
3467 3523
3468 return SCTP_DISPOSITION_NOMEM; 3524 return SCTP_DISPOSITION_NOMEM;
@@ -3479,7 +3535,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3479 * chunks. --piggy ] 3535 * chunks. --piggy ]
3480 * 3536 *
3481 */ 3537 */
3482sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep, 3538sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
3539 const struct sctp_endpoint *ep,
3483 const struct sctp_association *asoc, 3540 const struct sctp_association *asoc,
3484 const sctp_subtype_t type, 3541 const sctp_subtype_t type,
3485 void *arg, 3542 void *arg,
@@ -3489,7 +3546,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
3489 3546
3490 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ 3547 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3491 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3548 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3492 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3549 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3493 commands); 3550 commands);
3494 3551
3495 /* Although we do have an association in this case, it corresponds 3552 /* Although we do have an association in this case, it corresponds
@@ -3497,13 +3554,14 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
3497 * packet and the state function that handles OOTB SHUTDOWN_ACK is 3554 * packet and the state function that handles OOTB SHUTDOWN_ACK is
3498 * called with a NULL association. 3555 * called with a NULL association.
3499 */ 3556 */
3500 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3557 SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
3501 3558
3502 return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands); 3559 return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
3503} 3560}
3504 3561
3505/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ 3562/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
3506sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, 3563sctp_disposition_t sctp_sf_do_asconf(struct net *net,
3564 const struct sctp_endpoint *ep,
3507 const struct sctp_association *asoc, 3565 const struct sctp_association *asoc,
3508 const sctp_subtype_t type, void *arg, 3566 const sctp_subtype_t type, void *arg,
3509 sctp_cmd_seq_t *commands) 3567 sctp_cmd_seq_t *commands)
@@ -3519,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3519 if (!sctp_vtag_verify(chunk, asoc)) { 3577 if (!sctp_vtag_verify(chunk, asoc)) {
3520 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3578 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3521 SCTP_NULL()); 3579 SCTP_NULL());
3522 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3580 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3523 } 3581 }
3524 3582
3525 /* ADD-IP: Section 4.1.1 3583 /* ADD-IP: Section 4.1.1
@@ -3528,12 +3586,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3528 * is received unauthenticated it MUST be silently discarded as 3586 * is received unauthenticated it MUST be silently discarded as
3529 * described in [I-D.ietf-tsvwg-sctp-auth]. 3587 * described in [I-D.ietf-tsvwg-sctp-auth].
3530 */ 3588 */
3531 if (!sctp_addip_noauth && !chunk->auth) 3589 if (!net->sctp.addip_noauth && !chunk->auth)
3532 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 3590 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
3533 3591
3534 /* Make sure that the ASCONF ADDIP chunk has a valid length. */ 3592 /* Make sure that the ASCONF ADDIP chunk has a valid length. */
3535 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) 3593 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
3536 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3594 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3537 commands); 3595 commands);
3538 3596
3539 hdr = (sctp_addiphdr_t *)chunk->skb->data; 3597 hdr = (sctp_addiphdr_t *)chunk->skb->data;
@@ -3542,7 +3600,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3542 addr_param = (union sctp_addr_param *)hdr->params; 3600 addr_param = (union sctp_addr_param *)hdr->params;
3543 length = ntohs(addr_param->p.length); 3601 length = ntohs(addr_param->p.length);
3544 if (length < sizeof(sctp_paramhdr_t)) 3602 if (length < sizeof(sctp_paramhdr_t))
3545 return sctp_sf_violation_paramlen(ep, asoc, type, arg, 3603 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3546 (void *)addr_param, commands); 3604 (void *)addr_param, commands);
3547 3605
3548 /* Verify the ASCONF chunk before processing it. */ 3606 /* Verify the ASCONF chunk before processing it. */
@@ -3550,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3550 (sctp_paramhdr_t *)((void *)addr_param + length), 3608 (sctp_paramhdr_t *)((void *)addr_param + length),
3551 (void *)chunk->chunk_end, 3609 (void *)chunk->chunk_end,
3552 &err_param)) 3610 &err_param))
3553 return sctp_sf_violation_paramlen(ep, asoc, type, arg, 3611 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3554 (void *)err_param, commands); 3612 (void *)err_param, commands);
3555 3613
3556 /* ADDIP 5.2 E1) Compare the value of the serial number to the value 3614 /* ADDIP 5.2 E1) Compare the value of the serial number to the value
@@ -3630,7 +3688,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3630 * When building TLV parameters for the ASCONF Chunk that will add or 3688 * When building TLV parameters for the ASCONF Chunk that will add or
3631 * delete IP addresses the D0 to D13 rules should be applied: 3689 * delete IP addresses the D0 to D13 rules should be applied:
3632 */ 3690 */
3633sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, 3691sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
3692 const struct sctp_endpoint *ep,
3634 const struct sctp_association *asoc, 3693 const struct sctp_association *asoc,
3635 const sctp_subtype_t type, void *arg, 3694 const sctp_subtype_t type, void *arg,
3636 sctp_cmd_seq_t *commands) 3695 sctp_cmd_seq_t *commands)
@@ -3645,7 +3704,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3645 if (!sctp_vtag_verify(asconf_ack, asoc)) { 3704 if (!sctp_vtag_verify(asconf_ack, asoc)) {
3646 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3705 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3647 SCTP_NULL()); 3706 SCTP_NULL());
3648 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3707 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3649 } 3708 }
3650 3709
3651 /* ADD-IP, Section 4.1.2: 3710 /* ADD-IP, Section 4.1.2:
@@ -3654,12 +3713,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3654 * is received unauthenticated it MUST be silently discarded as 3713 * is received unauthenticated it MUST be silently discarded as
3655 * described in [I-D.ietf-tsvwg-sctp-auth]. 3714 * described in [I-D.ietf-tsvwg-sctp-auth].
3656 */ 3715 */
3657 if (!sctp_addip_noauth && !asconf_ack->auth) 3716 if (!net->sctp.addip_noauth && !asconf_ack->auth)
3658 return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); 3717 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
3659 3718
3660 /* Make sure that the ADDIP chunk has a valid length. */ 3719 /* Make sure that the ADDIP chunk has a valid length. */
3661 if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) 3720 if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
3662 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3721 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3663 commands); 3722 commands);
3664 3723
3665 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; 3724 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
@@ -3670,7 +3729,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3670 (sctp_paramhdr_t *)addip_hdr->params, 3729 (sctp_paramhdr_t *)addip_hdr->params,
3671 (void *)asconf_ack->chunk_end, 3730 (void *)asconf_ack->chunk_end,
3672 &err_param)) 3731 &err_param))
3673 return sctp_sf_violation_paramlen(ep, asoc, type, arg, 3732 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
3674 (void *)err_param, commands); 3733 (void *)err_param, commands);
3675 3734
3676 if (last_asconf) { 3735 if (last_asconf) {
@@ -3705,8 +3764,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3705 SCTP_ERROR(ECONNABORTED)); 3764 SCTP_ERROR(ECONNABORTED));
3706 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3765 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3707 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 3766 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
3708 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3767 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
3709 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3768 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
3710 return SCTP_DISPOSITION_ABORT; 3769 return SCTP_DISPOSITION_ABORT;
3711 } 3770 }
3712 3771
@@ -3739,8 +3798,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3739 SCTP_ERROR(ECONNABORTED)); 3798 SCTP_ERROR(ECONNABORTED));
3740 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3799 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3741 SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); 3800 SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
3742 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3801 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
3743 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3802 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
3744 return SCTP_DISPOSITION_ABORT; 3803 return SCTP_DISPOSITION_ABORT;
3745 } 3804 }
3746 3805
@@ -3761,7 +3820,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3761 * 3820 *
3762 * The return value is the disposition of the chunk. 3821 * The return value is the disposition of the chunk.
3763 */ 3822 */
3764sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep, 3823sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3824 const struct sctp_endpoint *ep,
3765 const struct sctp_association *asoc, 3825 const struct sctp_association *asoc,
3766 const sctp_subtype_t type, 3826 const sctp_subtype_t type,
3767 void *arg, 3827 void *arg,
@@ -3776,12 +3836,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
3776 if (!sctp_vtag_verify(chunk, asoc)) { 3836 if (!sctp_vtag_verify(chunk, asoc)) {
3777 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3837 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3778 SCTP_NULL()); 3838 SCTP_NULL());
3779 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3839 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3780 } 3840 }
3781 3841
3782 /* Make sure that the FORWARD_TSN chunk has valid length. */ 3842 /* Make sure that the FORWARD_TSN chunk has valid length. */
3783 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) 3843 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
3784 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3844 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3785 commands); 3845 commands);
3786 3846
3787 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; 3847 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3828,6 +3888,7 @@ discard_noforce:
3828} 3888}
3829 3889
3830sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( 3890sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
3891 struct net *net,
3831 const struct sctp_endpoint *ep, 3892 const struct sctp_endpoint *ep,
3832 const struct sctp_association *asoc, 3893 const struct sctp_association *asoc,
3833 const sctp_subtype_t type, 3894 const sctp_subtype_t type,
@@ -3843,12 +3904,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
3843 if (!sctp_vtag_verify(chunk, asoc)) { 3904 if (!sctp_vtag_verify(chunk, asoc)) {
3844 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3905 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
3845 SCTP_NULL()); 3906 SCTP_NULL());
3846 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3907 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3847 } 3908 }
3848 3909
3849 /* Make sure that the FORWARD_TSN chunk has a valid length. */ 3910 /* Make sure that the FORWARD_TSN chunk has a valid length. */
3850 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) 3911 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
3851 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3912 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3852 commands); 3913 commands);
3853 3914
3854 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; 3915 fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3915,7 +3976,8 @@ gen_shutdown:
3915 * 3976 *
3916 * The return value is the disposition of the chunk. 3977 * The return value is the disposition of the chunk.
3917 */ 3978 */
3918static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, 3979static sctp_ierror_t sctp_sf_authenticate(struct net *net,
3980 const struct sctp_endpoint *ep,
3919 const struct sctp_association *asoc, 3981 const struct sctp_association *asoc,
3920 const sctp_subtype_t type, 3982 const sctp_subtype_t type,
3921 struct sctp_chunk *chunk) 3983 struct sctp_chunk *chunk)
@@ -3988,7 +4050,8 @@ nomem:
3988 return SCTP_IERROR_NOMEM; 4050 return SCTP_IERROR_NOMEM;
3989} 4051}
3990 4052
3991sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep, 4053sctp_disposition_t sctp_sf_eat_auth(struct net *net,
4054 const struct sctp_endpoint *ep,
3992 const struct sctp_association *asoc, 4055 const struct sctp_association *asoc,
3993 const sctp_subtype_t type, 4056 const sctp_subtype_t type,
3994 void *arg, 4057 void *arg,
@@ -4001,21 +4064,21 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
4001 4064
4002 /* Make sure that the peer has AUTH capable */ 4065 /* Make sure that the peer has AUTH capable */
4003 if (!asoc->peer.auth_capable) 4066 if (!asoc->peer.auth_capable)
4004 return sctp_sf_unk_chunk(ep, asoc, type, arg, commands); 4067 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
4005 4068
4006 if (!sctp_vtag_verify(chunk, asoc)) { 4069 if (!sctp_vtag_verify(chunk, asoc)) {
4007 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 4070 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
4008 SCTP_NULL()); 4071 SCTP_NULL());
4009 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4072 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4010 } 4073 }
4011 4074
4012 /* Make sure that the AUTH chunk has valid length. */ 4075 /* Make sure that the AUTH chunk has valid length. */
4013 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) 4076 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
4014 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4077 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4015 commands); 4078 commands);
4016 4079
4017 auth_hdr = (struct sctp_authhdr *)chunk->skb->data; 4080 auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
4018 error = sctp_sf_authenticate(ep, asoc, type, chunk); 4081 error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
4019 switch (error) { 4082 switch (error) {
4020 case SCTP_IERROR_AUTH_BAD_HMAC: 4083 case SCTP_IERROR_AUTH_BAD_HMAC:
4021 /* Generate the ERROR chunk and discard the rest 4084 /* Generate the ERROR chunk and discard the rest
@@ -4032,10 +4095,10 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
4032 /* Fall Through */ 4095 /* Fall Through */
4033 case SCTP_IERROR_AUTH_BAD_KEYID: 4096 case SCTP_IERROR_AUTH_BAD_KEYID:
4034 case SCTP_IERROR_BAD_SIG: 4097 case SCTP_IERROR_BAD_SIG:
4035 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4098 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4036 4099
4037 case SCTP_IERROR_PROTO_VIOLATION: 4100 case SCTP_IERROR_PROTO_VIOLATION:
4038 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4101 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4039 commands); 4102 commands);
4040 4103
4041 case SCTP_IERROR_NOMEM: 4104 case SCTP_IERROR_NOMEM:
@@ -4084,7 +4147,8 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
4084 * 4147 *
4085 * The return value is the disposition of the chunk. 4148 * The return value is the disposition of the chunk.
4086 */ 4149 */
4087sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, 4150sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
4151 const struct sctp_endpoint *ep,
4088 const struct sctp_association *asoc, 4152 const struct sctp_association *asoc,
4089 const sctp_subtype_t type, 4153 const sctp_subtype_t type,
4090 void *arg, 4154 void *arg,
@@ -4097,20 +4161,20 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4097 SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk); 4161 SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
4098 4162
4099 if (!sctp_vtag_verify(unk_chunk, asoc)) 4163 if (!sctp_vtag_verify(unk_chunk, asoc))
4100 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4164 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4101 4165
4102 /* Make sure that the chunk has a valid length. 4166 /* Make sure that the chunk has a valid length.
4103 * Since we don't know the chunk type, we use a general 4167 * Since we don't know the chunk type, we use a general
4104 * chunkhdr structure to make a comparison. 4168 * chunkhdr structure to make a comparison.
4105 */ 4169 */
4106 if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t))) 4170 if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
4107 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4171 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4108 commands); 4172 commands);
4109 4173
4110 switch (type.chunk & SCTP_CID_ACTION_MASK) { 4174 switch (type.chunk & SCTP_CID_ACTION_MASK) {
4111 case SCTP_CID_ACTION_DISCARD: 4175 case SCTP_CID_ACTION_DISCARD:
4112 /* Discard the packet. */ 4176 /* Discard the packet. */
4113 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4177 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4114 break; 4178 break;
4115 case SCTP_CID_ACTION_DISCARD_ERR: 4179 case SCTP_CID_ACTION_DISCARD_ERR:
4116 /* Generate an ERROR chunk as response. */ 4180 /* Generate an ERROR chunk as response. */
@@ -4125,7 +4189,7 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4125 } 4189 }
4126 4190
4127 /* Discard the packet. */ 4191 /* Discard the packet. */
4128 sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4192 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
4129 return SCTP_DISPOSITION_CONSUME; 4193 return SCTP_DISPOSITION_CONSUME;
4130 break; 4194 break;
4131 case SCTP_CID_ACTION_SKIP: 4195 case SCTP_CID_ACTION_SKIP:
@@ -4167,7 +4231,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
4167 * 4231 *
4168 * The return value is the disposition of the chunk. 4232 * The return value is the disposition of the chunk.
4169 */ 4233 */
4170sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep, 4234sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
4235 const struct sctp_endpoint *ep,
4171 const struct sctp_association *asoc, 4236 const struct sctp_association *asoc,
4172 const sctp_subtype_t type, 4237 const sctp_subtype_t type,
4173 void *arg, 4238 void *arg,
@@ -4180,7 +4245,7 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
4180 * chunkhdr structure to make a comparison. 4245 * chunkhdr structure to make a comparison.
4181 */ 4246 */
4182 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 4247 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
4183 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4248 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4184 commands); 4249 commands);
4185 4250
4186 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); 4251 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
@@ -4205,13 +4270,14 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
4205 * 4270 *
4206 * The return value is the disposition of the chunk. 4271 * The return value is the disposition of the chunk.
4207 */ 4272 */
4208sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep, 4273sctp_disposition_t sctp_sf_pdiscard(struct net *net,
4274 const struct sctp_endpoint *ep,
4209 const struct sctp_association *asoc, 4275 const struct sctp_association *asoc,
4210 const sctp_subtype_t type, 4276 const sctp_subtype_t type,
4211 void *arg, 4277 void *arg,
4212 sctp_cmd_seq_t *commands) 4278 sctp_cmd_seq_t *commands)
4213{ 4279{
4214 SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS); 4280 SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
4215 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 4281 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
4216 4282
4217 return SCTP_DISPOSITION_CONSUME; 4283 return SCTP_DISPOSITION_CONSUME;
@@ -4232,7 +4298,8 @@ sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
4232 * We simply tag the chunk as a violation. The state machine will log 4298 * We simply tag the chunk as a violation. The state machine will log
4233 * the violation and continue. 4299 * the violation and continue.
4234 */ 4300 */
4235sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, 4301sctp_disposition_t sctp_sf_violation(struct net *net,
4302 const struct sctp_endpoint *ep,
4236 const struct sctp_association *asoc, 4303 const struct sctp_association *asoc,
4237 const sctp_subtype_t type, 4304 const sctp_subtype_t type,
4238 void *arg, 4305 void *arg,
@@ -4242,7 +4309,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
4242 4309
4243 /* Make sure that the chunk has a valid length. */ 4310 /* Make sure that the chunk has a valid length. */
4244 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 4311 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
4245 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4312 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
4246 commands); 4313 commands);
4247 4314
4248 return SCTP_DISPOSITION_VIOLATION; 4315 return SCTP_DISPOSITION_VIOLATION;
@@ -4252,6 +4319,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
4252 * Common function to handle a protocol violation. 4319 * Common function to handle a protocol violation.
4253 */ 4320 */
4254static sctp_disposition_t sctp_sf_abort_violation( 4321static sctp_disposition_t sctp_sf_abort_violation(
4322 struct net *net,
4255 const struct sctp_endpoint *ep, 4323 const struct sctp_endpoint *ep,
4256 const struct sctp_association *asoc, 4324 const struct sctp_association *asoc,
4257 void *arg, 4325 void *arg,
@@ -4302,7 +4370,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
4302 } 4370 }
4303 4371
4304 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4372 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4305 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 4373 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4306 4374
4307 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 4375 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
4308 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4376 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -4316,10 +4384,10 @@ static sctp_disposition_t sctp_sf_abort_violation(
4316 SCTP_ERROR(ECONNABORTED)); 4384 SCTP_ERROR(ECONNABORTED));
4317 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4385 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4318 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 4386 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4319 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 4387 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4320 } 4388 }
4321 } else { 4389 } else {
4322 packet = sctp_ootb_pkt_new(asoc, chunk); 4390 packet = sctp_ootb_pkt_new(net, asoc, chunk);
4323 4391
4324 if (!packet) 4392 if (!packet)
4325 goto nomem_pkt; 4393 goto nomem_pkt;
@@ -4334,13 +4402,13 @@ static sctp_disposition_t sctp_sf_abort_violation(
4334 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 4402 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
4335 SCTP_PACKET(packet)); 4403 SCTP_PACKET(packet));
4336 4404
4337 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 4405 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4338 } 4406 }
4339 4407
4340 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4408 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4341 4409
4342discard: 4410discard:
4343 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); 4411 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
4344 return SCTP_DISPOSITION_ABORT; 4412 return SCTP_DISPOSITION_ABORT;
4345 4413
4346nomem_pkt: 4414nomem_pkt:
@@ -4369,6 +4437,7 @@ nomem:
4369 * Generate an ABORT chunk and terminate the association. 4437 * Generate an ABORT chunk and terminate the association.
4370 */ 4438 */
4371static sctp_disposition_t sctp_sf_violation_chunklen( 4439static sctp_disposition_t sctp_sf_violation_chunklen(
4440 struct net *net,
4372 const struct sctp_endpoint *ep, 4441 const struct sctp_endpoint *ep,
4373 const struct sctp_association *asoc, 4442 const struct sctp_association *asoc,
4374 const sctp_subtype_t type, 4443 const sctp_subtype_t type,
@@ -4377,7 +4446,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4377{ 4446{
4378 static const char err_str[]="The following chunk had invalid length:"; 4447 static const char err_str[]="The following chunk had invalid length:";
4379 4448
4380 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4449 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4381 sizeof(err_str)); 4450 sizeof(err_str));
4382} 4451}
4383 4452
@@ -4388,6 +4457,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4388 * the length is considered as invalid. 4457 * the length is considered as invalid.
4389 */ 4458 */
4390static sctp_disposition_t sctp_sf_violation_paramlen( 4459static sctp_disposition_t sctp_sf_violation_paramlen(
4460 struct net *net,
4391 const struct sctp_endpoint *ep, 4461 const struct sctp_endpoint *ep,
4392 const struct sctp_association *asoc, 4462 const struct sctp_association *asoc,
4393 const sctp_subtype_t type, 4463 const sctp_subtype_t type,
@@ -4407,17 +4477,17 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
4407 goto nomem; 4477 goto nomem;
4408 4478
4409 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4479 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4410 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 4480 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
4411 4481
4412 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 4482 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4413 SCTP_ERROR(ECONNABORTED)); 4483 SCTP_ERROR(ECONNABORTED));
4414 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4484 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4415 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 4485 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
4416 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 4486 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4417 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4487 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4418 4488
4419discard: 4489discard:
4420 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); 4490 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
4421 return SCTP_DISPOSITION_ABORT; 4491 return SCTP_DISPOSITION_ABORT;
4422nomem: 4492nomem:
4423 return SCTP_DISPOSITION_NOMEM; 4493 return SCTP_DISPOSITION_NOMEM;
@@ -4430,6 +4500,7 @@ nomem:
4430 * error code. 4500 * error code.
4431 */ 4501 */
4432static sctp_disposition_t sctp_sf_violation_ctsn( 4502static sctp_disposition_t sctp_sf_violation_ctsn(
4503 struct net *net,
4433 const struct sctp_endpoint *ep, 4504 const struct sctp_endpoint *ep,
4434 const struct sctp_association *asoc, 4505 const struct sctp_association *asoc,
4435 const sctp_subtype_t type, 4506 const sctp_subtype_t type,
@@ -4438,7 +4509,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
4438{ 4509{
4439 static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; 4510 static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
4440 4511
4441 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4512 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4442 sizeof(err_str)); 4513 sizeof(err_str));
4443} 4514}
4444 4515
@@ -4449,6 +4520,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
4449 * on the path and we may not want to continue this communication. 4520 * on the path and we may not want to continue this communication.
4450 */ 4521 */
4451static sctp_disposition_t sctp_sf_violation_chunk( 4522static sctp_disposition_t sctp_sf_violation_chunk(
4523 struct net *net,
4452 const struct sctp_endpoint *ep, 4524 const struct sctp_endpoint *ep,
4453 const struct sctp_association *asoc, 4525 const struct sctp_association *asoc,
4454 const sctp_subtype_t type, 4526 const sctp_subtype_t type,
@@ -4458,9 +4530,9 @@ static sctp_disposition_t sctp_sf_violation_chunk(
4458 static const char err_str[]="The following chunk violates protocol:"; 4530 static const char err_str[]="The following chunk violates protocol:";
4459 4531
4460 if (!asoc) 4532 if (!asoc)
4461 return sctp_sf_violation(ep, asoc, type, arg, commands); 4533 return sctp_sf_violation(net, ep, asoc, type, arg, commands);
4462 4534
4463 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, 4535 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
4464 sizeof(err_str)); 4536 sizeof(err_str));
4465} 4537}
4466/*************************************************************************** 4538/***************************************************************************
@@ -4523,7 +4595,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
4523 * 4595 *
4524 * The return value is a disposition. 4596 * The return value is a disposition.
4525 */ 4597 */
4526sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep, 4598sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
4599 const struct sctp_endpoint *ep,
4527 const struct sctp_association *asoc, 4600 const struct sctp_association *asoc,
4528 const sctp_subtype_t type, 4601 const sctp_subtype_t type,
4529 void *arg, 4602 void *arg,
@@ -4634,7 +4707,8 @@ nomem:
4634 * 4707 *
4635 * The return value is the disposition. 4708 * The return value is the disposition.
4636 */ 4709 */
4637sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep, 4710sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
4711 const struct sctp_endpoint *ep,
4638 const struct sctp_association *asoc, 4712 const struct sctp_association *asoc,
4639 const sctp_subtype_t type, 4713 const sctp_subtype_t type,
4640 void *arg, 4714 void *arg,
@@ -4673,6 +4747,7 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
4673 * The return value is the disposition. 4747 * The return value is the disposition.
4674 */ 4748 */
4675sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( 4749sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
4750 struct net *net,
4676 const struct sctp_endpoint *ep, 4751 const struct sctp_endpoint *ep,
4677 const struct sctp_association *asoc, 4752 const struct sctp_association *asoc,
4678 const sctp_subtype_t type, 4753 const sctp_subtype_t type,
@@ -4694,7 +4769,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
4694 4769
4695 disposition = SCTP_DISPOSITION_CONSUME; 4770 disposition = SCTP_DISPOSITION_CONSUME;
4696 if (sctp_outq_is_empty(&asoc->outqueue)) { 4771 if (sctp_outq_is_empty(&asoc->outqueue)) {
4697 disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, 4772 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
4698 arg, commands); 4773 arg, commands);
4699 } 4774 }
4700 return disposition; 4775 return disposition;
@@ -4728,6 +4803,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
4728 * The return value is the disposition. 4803 * The return value is the disposition.
4729 */ 4804 */
4730sctp_disposition_t sctp_sf_do_9_1_prm_abort( 4805sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4806 struct net *net,
4731 const struct sctp_endpoint *ep, 4807 const struct sctp_endpoint *ep,
4732 const struct sctp_association *asoc, 4808 const struct sctp_association *asoc,
4733 const sctp_subtype_t type, 4809 const sctp_subtype_t type,
@@ -4759,14 +4835,15 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4759 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4835 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4760 SCTP_PERR(SCTP_ERROR_USER_ABORT)); 4836 SCTP_PERR(SCTP_ERROR_USER_ABORT));
4761 4837
4762 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4838 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4763 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 4839 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4764 4840
4765 return retval; 4841 return retval;
4766} 4842}
4767 4843
4768/* We tried an illegal operation on an association which is closed. */ 4844/* We tried an illegal operation on an association which is closed. */
4769sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep, 4845sctp_disposition_t sctp_sf_error_closed(struct net *net,
4846 const struct sctp_endpoint *ep,
4770 const struct sctp_association *asoc, 4847 const struct sctp_association *asoc,
4771 const sctp_subtype_t type, 4848 const sctp_subtype_t type,
4772 void *arg, 4849 void *arg,
@@ -4779,7 +4856,8 @@ sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
4779/* We tried an illegal operation on an association which is shutting 4856/* We tried an illegal operation on an association which is shutting
4780 * down. 4857 * down.
4781 */ 4858 */
4782sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep, 4859sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
4860 const struct sctp_endpoint *ep,
4783 const struct sctp_association *asoc, 4861 const struct sctp_association *asoc,
4784 const sctp_subtype_t type, 4862 const sctp_subtype_t type,
4785 void *arg, 4863 void *arg,
@@ -4805,6 +4883,7 @@ sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
4805 * (timers) 4883 * (timers)
4806 */ 4884 */
4807sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( 4885sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
4886 struct net *net,
4808 const struct sctp_endpoint *ep, 4887 const struct sctp_endpoint *ep,
4809 const struct sctp_association *asoc, 4888 const struct sctp_association *asoc,
4810 const sctp_subtype_t type, 4889 const sctp_subtype_t type,
@@ -4817,7 +4896,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
4817 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4896 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4818 SCTP_STATE(SCTP_STATE_CLOSED)); 4897 SCTP_STATE(SCTP_STATE_CLOSED));
4819 4898
4820 SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); 4899 SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
4821 4900
4822 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 4901 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
4823 4902
@@ -4839,6 +4918,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
4839 * (timers) 4918 * (timers)
4840 */ 4919 */
4841sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( 4920sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
4921 struct net *net,
4842 const struct sctp_endpoint *ep, 4922 const struct sctp_endpoint *ep,
4843 const struct sctp_association *asoc, 4923 const struct sctp_association *asoc,
4844 const sctp_subtype_t type, 4924 const sctp_subtype_t type,
@@ -4847,7 +4927,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
4847 /* There is a single T1 timer, so we should be able to use 4927 /* There is a single T1 timer, so we should be able to use
4848 * common function with the COOKIE-WAIT state. 4928 * common function with the COOKIE-WAIT state.
4849 */ 4929 */
4850 return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands); 4930 return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
4851} 4931}
4852 4932
4853/* 4933/*
@@ -4865,6 +4945,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
4865 * (timers) 4945 * (timers)
4866 */ 4946 */
4867sctp_disposition_t sctp_sf_cookie_wait_prm_abort( 4947sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4948 struct net *net,
4868 const struct sctp_endpoint *ep, 4949 const struct sctp_endpoint *ep,
4869 const struct sctp_association *asoc, 4950 const struct sctp_association *asoc,
4870 const sctp_subtype_t type, 4951 const sctp_subtype_t type,
@@ -4884,7 +4965,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4884 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4965 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4885 SCTP_STATE(SCTP_STATE_CLOSED)); 4966 SCTP_STATE(SCTP_STATE_CLOSED));
4886 4967
4887 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4968 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4888 4969
4889 /* Even if we can't send the ABORT due to low memory delete the 4970 /* Even if we can't send the ABORT due to low memory delete the
4890 * TCB. This is a departure from our typical NOMEM handling. 4971 * TCB. This is a departure from our typical NOMEM handling.
@@ -4914,6 +4995,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4914 * (timers) 4995 * (timers)
4915 */ 4996 */
4916sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( 4997sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
4998 struct net *net,
4917 const struct sctp_endpoint *ep, 4999 const struct sctp_endpoint *ep,
4918 const struct sctp_association *asoc, 5000 const struct sctp_association *asoc,
4919 const sctp_subtype_t type, 5001 const sctp_subtype_t type,
@@ -4923,7 +5005,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
4923 /* There is a single T1 timer, so we should be able to use 5005 /* There is a single T1 timer, so we should be able to use
4924 * common function with the COOKIE-WAIT state. 5006 * common function with the COOKIE-WAIT state.
4925 */ 5007 */
4926 return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands); 5008 return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
4927} 5009}
4928 5010
4929/* 5011/*
@@ -4939,6 +5021,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
4939 * (timers) 5021 * (timers)
4940 */ 5022 */
4941sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( 5023sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
5024 struct net *net,
4942 const struct sctp_endpoint *ep, 5025 const struct sctp_endpoint *ep,
4943 const struct sctp_association *asoc, 5026 const struct sctp_association *asoc,
4944 const sctp_subtype_t type, 5027 const sctp_subtype_t type,
@@ -4949,7 +5032,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
4949 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5032 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4950 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5033 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
4951 5034
4952 return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands); 5035 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
4953} 5036}
4954 5037
4955/* 5038/*
@@ -4965,6 +5048,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
4965 * (timers) 5048 * (timers)
4966 */ 5049 */
4967sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( 5050sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
5051 struct net *net,
4968 const struct sctp_endpoint *ep, 5052 const struct sctp_endpoint *ep,
4969 const struct sctp_association *asoc, 5053 const struct sctp_association *asoc,
4970 const sctp_subtype_t type, 5054 const sctp_subtype_t type,
@@ -4979,7 +5063,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
4979 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 5063 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4980 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5064 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
4981 5065
4982 return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands); 5066 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
4983} 5067}
4984 5068
4985/* 5069/*
@@ -4995,6 +5079,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
4995 * (timers) 5079 * (timers)
4996 */ 5080 */
4997sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( 5081sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
5082 struct net *net,
4998 const struct sctp_endpoint *ep, 5083 const struct sctp_endpoint *ep,
4999 const struct sctp_association *asoc, 5084 const struct sctp_association *asoc,
5000 const sctp_subtype_t type, 5085 const sctp_subtype_t type,
@@ -5004,7 +5089,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
5004 /* The same T2 timer, so we should be able to use 5089 /* The same T2 timer, so we should be able to use
5005 * common function with the SHUTDOWN-SENT state. 5090 * common function with the SHUTDOWN-SENT state.
5006 */ 5091 */
5007 return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands); 5092 return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
5008} 5093}
5009 5094
5010/* 5095/*
@@ -5030,6 +5115,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
5030 * association on which a heartbeat should be issued. 5115 * association on which a heartbeat should be issued.
5031 */ 5116 */
5032sctp_disposition_t sctp_sf_do_prm_requestheartbeat( 5117sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
5118 struct net *net,
5033 const struct sctp_endpoint *ep, 5119 const struct sctp_endpoint *ep,
5034 const struct sctp_association *asoc, 5120 const struct sctp_association *asoc,
5035 const sctp_subtype_t type, 5121 const sctp_subtype_t type,
@@ -5061,7 +5147,8 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
5061 * When an endpoint has an ASCONF signaled change to be sent to the 5147 * When an endpoint has an ASCONF signaled change to be sent to the
5062 * remote endpoint it should do A1 to A9 5148 * remote endpoint it should do A1 to A9
5063 */ 5149 */
5064sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep, 5150sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
5151 const struct sctp_endpoint *ep,
5065 const struct sctp_association *asoc, 5152 const struct sctp_association *asoc,
5066 const sctp_subtype_t type, 5153 const sctp_subtype_t type,
5067 void *arg, 5154 void *arg,
@@ -5082,6 +5169,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
5082 * The return value is the disposition of the primitive. 5169 * The return value is the disposition of the primitive.
5083 */ 5170 */
5084sctp_disposition_t sctp_sf_ignore_primitive( 5171sctp_disposition_t sctp_sf_ignore_primitive(
5172 struct net *net,
5085 const struct sctp_endpoint *ep, 5173 const struct sctp_endpoint *ep,
5086 const struct sctp_association *asoc, 5174 const struct sctp_association *asoc,
5087 const sctp_subtype_t type, 5175 const sctp_subtype_t type,
@@ -5103,6 +5191,7 @@ sctp_disposition_t sctp_sf_ignore_primitive(
5103 * retransmit, the stack will immediately send up this notification. 5191 * retransmit, the stack will immediately send up this notification.
5104 */ 5192 */
5105sctp_disposition_t sctp_sf_do_no_pending_tsn( 5193sctp_disposition_t sctp_sf_do_no_pending_tsn(
5194 struct net *net,
5106 const struct sctp_endpoint *ep, 5195 const struct sctp_endpoint *ep,
5107 const struct sctp_association *asoc, 5196 const struct sctp_association *asoc,
5108 const sctp_subtype_t type, 5197 const sctp_subtype_t type,
@@ -5134,6 +5223,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn(
5134 * The return value is the disposition. 5223 * The return value is the disposition.
5135 */ 5224 */
5136sctp_disposition_t sctp_sf_do_9_2_start_shutdown( 5225sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5226 struct net *net,
5137 const struct sctp_endpoint *ep, 5227 const struct sctp_endpoint *ep,
5138 const struct sctp_association *asoc, 5228 const struct sctp_association *asoc,
5139 const sctp_subtype_t type, 5229 const sctp_subtype_t type,
@@ -5203,6 +5293,7 @@ nomem:
5203 * The return value is the disposition. 5293 * The return value is the disposition.
5204 */ 5294 */
5205sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( 5295sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
5296 struct net *net,
5206 const struct sctp_endpoint *ep, 5297 const struct sctp_endpoint *ep,
5207 const struct sctp_association *asoc, 5298 const struct sctp_association *asoc,
5208 const sctp_subtype_t type, 5299 const sctp_subtype_t type,
@@ -5221,11 +5312,11 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
5221 */ 5312 */
5222 if (chunk) { 5313 if (chunk) {
5223 if (!sctp_vtag_verify(chunk, asoc)) 5314 if (!sctp_vtag_verify(chunk, asoc))
5224 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 5315 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
5225 5316
5226 /* Make sure that the SHUTDOWN chunk has a valid length. */ 5317 /* Make sure that the SHUTDOWN chunk has a valid length. */
5227 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) 5318 if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
5228 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 5319 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
5229 commands); 5320 commands);
5230 } 5321 }
5231 5322
@@ -5273,7 +5364,8 @@ nomem:
5273 * 5364 *
5274 * The return value is the disposition of the event. 5365 * The return value is the disposition of the event.
5275 */ 5366 */
5276sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep, 5367sctp_disposition_t sctp_sf_ignore_other(struct net *net,
5368 const struct sctp_endpoint *ep,
5277 const struct sctp_association *asoc, 5369 const struct sctp_association *asoc,
5278 const sctp_subtype_t type, 5370 const sctp_subtype_t type,
5279 void *arg, 5371 void *arg,
@@ -5298,7 +5390,8 @@ sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
5298 * 5390 *
5299 * The return value is the disposition of the chunk. 5391 * The return value is the disposition of the chunk.
5300 */ 5392 */
5301sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, 5393sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
5394 const struct sctp_endpoint *ep,
5302 const struct sctp_association *asoc, 5395 const struct sctp_association *asoc,
5303 const sctp_subtype_t type, 5396 const sctp_subtype_t type,
5304 void *arg, 5397 void *arg,
@@ -5306,7 +5399,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5306{ 5399{
5307 struct sctp_transport *transport = arg; 5400 struct sctp_transport *transport = arg;
5308 5401
5309 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); 5402 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
5310 5403
5311 if (asoc->overall_error_count >= asoc->max_retrans) { 5404 if (asoc->overall_error_count >= asoc->max_retrans) {
5312 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5405 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
@@ -5327,8 +5420,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5327 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5420 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5328 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5421 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5329 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5422 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5330 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5423 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5331 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5424 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5332 return SCTP_DISPOSITION_DELETE_TCB; 5425 return SCTP_DISPOSITION_DELETE_TCB;
5333 } 5426 }
5334 } 5427 }
@@ -5384,13 +5477,14 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5384 * allow. However, an SCTP transmitter MUST NOT be more aggressive than 5477 * allow. However, an SCTP transmitter MUST NOT be more aggressive than
5385 * the following algorithms allow. 5478 * the following algorithms allow.
5386 */ 5479 */
5387sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep, 5480sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
5481 const struct sctp_endpoint *ep,
5388 const struct sctp_association *asoc, 5482 const struct sctp_association *asoc,
5389 const sctp_subtype_t type, 5483 const sctp_subtype_t type,
5390 void *arg, 5484 void *arg,
5391 sctp_cmd_seq_t *commands) 5485 sctp_cmd_seq_t *commands)
5392{ 5486{
5393 SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS); 5487 SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
5394 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); 5488 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
5395 return SCTP_DISPOSITION_CONSUME; 5489 return SCTP_DISPOSITION_CONSUME;
5396} 5490}
@@ -5414,7 +5508,8 @@ sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
5414 * (timers, events) 5508 * (timers, events)
5415 * 5509 *
5416 */ 5510 */
5417sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, 5511sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
5512 const struct sctp_endpoint *ep,
5418 const struct sctp_association *asoc, 5513 const struct sctp_association *asoc,
5419 const sctp_subtype_t type, 5514 const sctp_subtype_t type,
5420 void *arg, 5515 void *arg,
@@ -5425,7 +5520,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
5425 int attempts = asoc->init_err_counter + 1; 5520 int attempts = asoc->init_err_counter + 1;
5426 5521
5427 SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); 5522 SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
5428 SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS); 5523 SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
5429 5524
5430 if (attempts <= asoc->max_init_attempts) { 5525 if (attempts <= asoc->max_init_attempts) {
5431 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; 5526 bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
@@ -5475,7 +5570,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
5475 * (timers, events) 5570 * (timers, events)
5476 * 5571 *
5477 */ 5572 */
5478sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep, 5573sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
5574 const struct sctp_endpoint *ep,
5479 const struct sctp_association *asoc, 5575 const struct sctp_association *asoc,
5480 const sctp_subtype_t type, 5576 const sctp_subtype_t type,
5481 void *arg, 5577 void *arg,
@@ -5485,7 +5581,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
5485 int attempts = asoc->init_err_counter + 1; 5581 int attempts = asoc->init_err_counter + 1;
5486 5582
5487 SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); 5583 SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
5488 SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS); 5584 SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
5489 5585
5490 if (attempts <= asoc->max_init_attempts) { 5586 if (attempts <= asoc->max_init_attempts) {
5491 repl = sctp_make_cookie_echo(asoc, NULL); 5587 repl = sctp_make_cookie_echo(asoc, NULL);
@@ -5523,7 +5619,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
5523 * the T2-Shutdown timer, giving its peer ample opportunity to transmit 5619 * the T2-Shutdown timer, giving its peer ample opportunity to transmit
5524 * all of its queued DATA chunks that have not yet been sent. 5620 * all of its queued DATA chunks that have not yet been sent.
5525 */ 5621 */
5526sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, 5622sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
5623 const struct sctp_endpoint *ep,
5527 const struct sctp_association *asoc, 5624 const struct sctp_association *asoc,
5528 const sctp_subtype_t type, 5625 const sctp_subtype_t type,
5529 void *arg, 5626 void *arg,
@@ -5532,7 +5629,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
5532 struct sctp_chunk *reply = NULL; 5629 struct sctp_chunk *reply = NULL;
5533 5630
5534 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 5631 SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
5535 SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS); 5632 SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
5536 5633
5537 ((struct sctp_association *)asoc)->shutdown_retries++; 5634 ((struct sctp_association *)asoc)->shutdown_retries++;
5538 5635
@@ -5542,8 +5639,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
5542 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5639 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5543 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5640 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5544 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5641 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5545 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5642 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5546 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5643 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5547 return SCTP_DISPOSITION_DELETE_TCB; 5644 return SCTP_DISPOSITION_DELETE_TCB;
5548 } 5645 }
5549 5646
@@ -5592,6 +5689,7 @@ nomem:
5592 * If the T4 RTO timer expires the endpoint should do B1 to B5 5689 * If the T4 RTO timer expires the endpoint should do B1 to B5
5593 */ 5690 */
5594sctp_disposition_t sctp_sf_t4_timer_expire( 5691sctp_disposition_t sctp_sf_t4_timer_expire(
5692 struct net *net,
5595 const struct sctp_endpoint *ep, 5693 const struct sctp_endpoint *ep,
5596 const struct sctp_association *asoc, 5694 const struct sctp_association *asoc,
5597 const sctp_subtype_t type, 5695 const sctp_subtype_t type,
@@ -5601,7 +5699,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
5601 struct sctp_chunk *chunk = asoc->addip_last_asconf; 5699 struct sctp_chunk *chunk = asoc->addip_last_asconf;
5602 struct sctp_transport *transport = chunk->transport; 5700 struct sctp_transport *transport = chunk->transport;
5603 5701
5604 SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS); 5702 SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
5605 5703
5606 /* ADDIP 4.1 B1) Increment the error counters and perform path failure 5704 /* ADDIP 4.1 B1) Increment the error counters and perform path failure
5607 * detection on the appropriate destination address as defined in 5705 * detection on the appropriate destination address as defined in
@@ -5626,8 +5724,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
5626 SCTP_ERROR(ETIMEDOUT)); 5724 SCTP_ERROR(ETIMEDOUT));
5627 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5725 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5628 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5726 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5629 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5727 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5630 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5728 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5631 return SCTP_DISPOSITION_ABORT; 5729 return SCTP_DISPOSITION_ABORT;
5632 } 5730 }
5633 5731
@@ -5662,7 +5760,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
5662 * At the expiration of this timer the sender SHOULD abort the association 5760 * At the expiration of this timer the sender SHOULD abort the association
5663 * by sending an ABORT chunk. 5761 * by sending an ABORT chunk.
5664 */ 5762 */
5665sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep, 5763sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
5764 const struct sctp_endpoint *ep,
5666 const struct sctp_association *asoc, 5765 const struct sctp_association *asoc,
5667 const sctp_subtype_t type, 5766 const sctp_subtype_t type,
5668 void *arg, 5767 void *arg,
@@ -5671,7 +5770,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
5671 struct sctp_chunk *reply = NULL; 5770 struct sctp_chunk *reply = NULL;
5672 5771
5673 SCTP_DEBUG_PRINTK("Timer T5 expired.\n"); 5772 SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
5674 SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS); 5773 SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
5675 5774
5676 reply = sctp_make_abort(asoc, NULL, 0); 5775 reply = sctp_make_abort(asoc, NULL, 0);
5677 if (!reply) 5776 if (!reply)
@@ -5683,8 +5782,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
5683 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5782 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5684 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5783 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5685 5784
5686 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5785 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
5687 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5786 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
5688 5787
5689 return SCTP_DISPOSITION_DELETE_TCB; 5788 return SCTP_DISPOSITION_DELETE_TCB;
5690nomem: 5789nomem:
@@ -5697,6 +5796,7 @@ nomem:
5697 * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). 5796 * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
5698 */ 5797 */
5699sctp_disposition_t sctp_sf_autoclose_timer_expire( 5798sctp_disposition_t sctp_sf_autoclose_timer_expire(
5799 struct net *net,
5700 const struct sctp_endpoint *ep, 5800 const struct sctp_endpoint *ep,
5701 const struct sctp_association *asoc, 5801 const struct sctp_association *asoc,
5702 const sctp_subtype_t type, 5802 const sctp_subtype_t type,
@@ -5705,7 +5805,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
5705{ 5805{
5706 int disposition; 5806 int disposition;
5707 5807
5708 SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS); 5808 SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
5709 5809
5710 /* From 9.2 Shutdown of an Association 5810 /* From 9.2 Shutdown of an Association
5711 * Upon receipt of the SHUTDOWN primitive from its upper 5811 * Upon receipt of the SHUTDOWN primitive from its upper
@@ -5720,7 +5820,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
5720 5820
5721 disposition = SCTP_DISPOSITION_CONSUME; 5821 disposition = SCTP_DISPOSITION_CONSUME;
5722 if (sctp_outq_is_empty(&asoc->outqueue)) { 5822 if (sctp_outq_is_empty(&asoc->outqueue)) {
5723 disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, 5823 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
5724 arg, commands); 5824 arg, commands);
5725 } 5825 }
5726 return disposition; 5826 return disposition;
@@ -5738,7 +5838,8 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
5738 * 5838 *
5739 * The return value is the disposition of the chunk. 5839 * The return value is the disposition of the chunk.
5740 */ 5840 */
5741sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep, 5841sctp_disposition_t sctp_sf_not_impl(struct net *net,
5842 const struct sctp_endpoint *ep,
5742 const struct sctp_association *asoc, 5843 const struct sctp_association *asoc,
5743 const sctp_subtype_t type, 5844 const sctp_subtype_t type,
5744 void *arg, 5845 void *arg,
@@ -5755,7 +5856,8 @@ sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
5755 * 5856 *
5756 * The return value is the disposition of the chunk. 5857 * The return value is the disposition of the chunk.
5757 */ 5858 */
5758sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep, 5859sctp_disposition_t sctp_sf_bug(struct net *net,
5860 const struct sctp_endpoint *ep,
5759 const struct sctp_association *asoc, 5861 const struct sctp_association *asoc,
5760 const sctp_subtype_t type, 5862 const sctp_subtype_t type,
5761 void *arg, 5863 void *arg,
@@ -5775,7 +5877,8 @@ sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
5775 * 5877 *
5776 * The return value is the disposition of the chunk. 5878 * The return value is the disposition of the chunk.
5777 */ 5879 */
5778sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep, 5880sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
5881 const struct sctp_endpoint *ep,
5779 const struct sctp_association *asoc, 5882 const struct sctp_association *asoc,
5780 const sctp_subtype_t type, 5883 const sctp_subtype_t type,
5781 void *arg, 5884 void *arg,
@@ -5817,7 +5920,8 @@ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
5817/* Create an ABORT packet to be sent as a response, with the specified 5920/* Create an ABORT packet to be sent as a response, with the specified
5818 * error causes. 5921 * error causes.
5819 */ 5922 */
5820static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep, 5923static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
5924 const struct sctp_endpoint *ep,
5821 const struct sctp_association *asoc, 5925 const struct sctp_association *asoc,
5822 struct sctp_chunk *chunk, 5926 struct sctp_chunk *chunk,
5823 const void *payload, 5927 const void *payload,
@@ -5826,7 +5930,7 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
5826 struct sctp_packet *packet; 5930 struct sctp_packet *packet;
5827 struct sctp_chunk *abort; 5931 struct sctp_chunk *abort;
5828 5932
5829 packet = sctp_ootb_pkt_new(asoc, chunk); 5933 packet = sctp_ootb_pkt_new(net, asoc, chunk);
5830 5934
5831 if (packet) { 5935 if (packet) {
5832 /* Make an ABORT. 5936 /* Make an ABORT.
@@ -5858,7 +5962,8 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
5858} 5962}
5859 5963
5860/* Allocate a packet for responding in the OOTB conditions. */ 5964/* Allocate a packet for responding in the OOTB conditions. */
5861static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc, 5965static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
5966 const struct sctp_association *asoc,
5862 const struct sctp_chunk *chunk) 5967 const struct sctp_chunk *chunk)
5863{ 5968{
5864 struct sctp_packet *packet; 5969 struct sctp_packet *packet;
@@ -5911,7 +6016,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5911 } 6016 }
5912 6017
5913 /* Make a transport for the bucket, Eliza... */ 6018 /* Make a transport for the bucket, Eliza... */
5914 transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC); 6019 transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
5915 if (!transport) 6020 if (!transport)
5916 goto nomem; 6021 goto nomem;
5917 6022
@@ -5919,7 +6024,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5919 * the source address. 6024 * the source address.
5920 */ 6025 */
5921 sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, 6026 sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
5922 sctp_sk(sctp_get_ctl_sock())); 6027 sctp_sk(net->sctp.ctl_sock));
5923 6028
5924 packet = sctp_packet_init(&transport->packet, transport, sport, dport); 6029 packet = sctp_packet_init(&transport->packet, transport, sport, dport);
5925 packet = sctp_packet_config(packet, vtag, 0); 6030 packet = sctp_packet_config(packet, vtag, 0);
@@ -5937,7 +6042,8 @@ void sctp_ootb_pkt_free(struct sctp_packet *packet)
5937} 6042}
5938 6043
5939/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */ 6044/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
5940static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep, 6045static void sctp_send_stale_cookie_err(struct net *net,
6046 const struct sctp_endpoint *ep,
5941 const struct sctp_association *asoc, 6047 const struct sctp_association *asoc,
5942 const struct sctp_chunk *chunk, 6048 const struct sctp_chunk *chunk,
5943 sctp_cmd_seq_t *commands, 6049 sctp_cmd_seq_t *commands,
@@ -5946,7 +6052,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
5946 struct sctp_packet *packet; 6052 struct sctp_packet *packet;
5947 6053
5948 if (err_chunk) { 6054 if (err_chunk) {
5949 packet = sctp_ootb_pkt_new(asoc, chunk); 6055 packet = sctp_ootb_pkt_new(net, asoc, chunk);
5950 if (packet) { 6056 if (packet) {
5951 struct sctp_signed_cookie *cookie; 6057 struct sctp_signed_cookie *cookie;
5952 6058
@@ -5959,7 +6065,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
5959 sctp_packet_append_chunk(packet, err_chunk); 6065 sctp_packet_append_chunk(packet, err_chunk);
5960 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 6066 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
5961 SCTP_PACKET(packet)); 6067 SCTP_PACKET(packet));
5962 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 6068 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
5963 } else 6069 } else
5964 sctp_chunk_free (err_chunk); 6070 sctp_chunk_free (err_chunk);
5965 } 6071 }
@@ -5979,6 +6085,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5979 __u32 tsn; 6085 __u32 tsn;
5980 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 6086 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5981 struct sock *sk = asoc->base.sk; 6087 struct sock *sk = asoc->base.sk;
6088 struct net *net = sock_net(sk);
5982 u16 ssn; 6089 u16 ssn;
5983 u16 sid; 6090 u16 sid;
5984 u8 ordered = 0; 6091 u8 ordered = 0;
@@ -6109,8 +6216,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6109 SCTP_ERROR(ECONNABORTED)); 6216 SCTP_ERROR(ECONNABORTED));
6110 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 6217 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
6111 SCTP_PERR(SCTP_ERROR_NO_DATA)); 6218 SCTP_PERR(SCTP_ERROR_NO_DATA));
6112 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 6219 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
6113 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 6220 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
6114 return SCTP_IERROR_NO_DATA; 6221 return SCTP_IERROR_NO_DATA;
6115 } 6222 }
6116 6223
@@ -6120,9 +6227,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6120 * if we renege and the chunk arrives again. 6227 * if we renege and the chunk arrives again.
6121 */ 6228 */
6122 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 6229 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
6123 SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); 6230 SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
6124 else { 6231 else {
6125 SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); 6232 SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
6126 ordered = 1; 6233 ordered = 1;
6127 } 6234 }
6128 6235
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 7c211a7f90f4..84d98d8a5a74 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -59,7 +59,8 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES];
59static const sctp_sm_table_entry_t 59static const sctp_sm_table_entry_t
60timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; 60timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
61 61
62static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, 62static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
63 sctp_cid_t cid,
63 sctp_state_t state); 64 sctp_state_t state);
64 65
65 66
@@ -82,13 +83,14 @@ static const sctp_sm_table_entry_t bug = {
82 rtn; \ 83 rtn; \
83}) 84})
84 85
85const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, 86const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net,
87 sctp_event_t event_type,
86 sctp_state_t state, 88 sctp_state_t state,
87 sctp_subtype_t event_subtype) 89 sctp_subtype_t event_subtype)
88{ 90{
89 switch (event_type) { 91 switch (event_type) {
90 case SCTP_EVENT_T_CHUNK: 92 case SCTP_EVENT_T_CHUNK:
91 return sctp_chunk_event_lookup(event_subtype.chunk, state); 93 return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
92 case SCTP_EVENT_T_TIMEOUT: 94 case SCTP_EVENT_T_TIMEOUT:
93 return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, 95 return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
94 timeout_event_table); 96 timeout_event_table);
@@ -906,7 +908,8 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
906 TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, 908 TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
907}; 909};
908 910
909static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, 911static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
912 sctp_cid_t cid,
910 sctp_state_t state) 913 sctp_state_t state)
911{ 914{
912 if (state > SCTP_STATE_MAX) 915 if (state > SCTP_STATE_MAX)
@@ -915,12 +918,12 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
915 if (cid <= SCTP_CID_BASE_MAX) 918 if (cid <= SCTP_CID_BASE_MAX)
916 return &chunk_event_table[cid][state]; 919 return &chunk_event_table[cid][state];
917 920
918 if (sctp_prsctp_enable) { 921 if (net->sctp.prsctp_enable) {
919 if (cid == SCTP_CID_FWD_TSN) 922 if (cid == SCTP_CID_FWD_TSN)
920 return &prsctp_chunk_event_table[0][state]; 923 return &prsctp_chunk_event_table[0][state];
921 } 924 }
922 925
923 if (sctp_addip_enable) { 926 if (net->sctp.addip_enable) {
924 if (cid == SCTP_CID_ASCONF) 927 if (cid == SCTP_CID_ASCONF)
925 return &addip_chunk_event_table[0][state]; 928 return &addip_chunk_event_table[0][state];
926 929
@@ -928,7 +931,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
928 return &addip_chunk_event_table[1][state]; 931 return &addip_chunk_event_table[1][state];
929 } 932 }
930 933
931 if (sctp_auth_enable) { 934 if (net->sctp.auth_enable) {
932 if (cid == SCTP_CID_AUTH) 935 if (cid == SCTP_CID_AUTH)
933 return &auth_chunk_event_table[0][state]; 936 return &auth_chunk_event_table[0][state];
934 } 937 }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5e259817a7f3..d37d24ff197f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -427,6 +427,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
427static int sctp_send_asconf(struct sctp_association *asoc, 427static int sctp_send_asconf(struct sctp_association *asoc,
428 struct sctp_chunk *chunk) 428 struct sctp_chunk *chunk)
429{ 429{
430 struct net *net = sock_net(asoc->base.sk);
430 int retval = 0; 431 int retval = 0;
431 432
432 /* If there is an outstanding ASCONF chunk, queue it for later 433 /* If there is an outstanding ASCONF chunk, queue it for later
@@ -439,7 +440,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
439 440
440 /* Hold the chunk until an ASCONF_ACK is received. */ 441 /* Hold the chunk until an ASCONF_ACK is received. */
441 sctp_chunk_hold(chunk); 442 sctp_chunk_hold(chunk);
442 retval = sctp_primitive_ASCONF(asoc, chunk); 443 retval = sctp_primitive_ASCONF(net, asoc, chunk);
443 if (retval) 444 if (retval)
444 sctp_chunk_free(chunk); 445 sctp_chunk_free(chunk);
445 else 446 else
@@ -515,6 +516,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
515 struct sockaddr *addrs, 516 struct sockaddr *addrs,
516 int addrcnt) 517 int addrcnt)
517{ 518{
519 struct net *net = sock_net(sk);
518 struct sctp_sock *sp; 520 struct sctp_sock *sp;
519 struct sctp_endpoint *ep; 521 struct sctp_endpoint *ep;
520 struct sctp_association *asoc; 522 struct sctp_association *asoc;
@@ -529,7 +531,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
529 int i; 531 int i;
530 int retval = 0; 532 int retval = 0;
531 533
532 if (!sctp_addip_enable) 534 if (!net->sctp.addip_enable)
533 return retval; 535 return retval;
534 536
535 sp = sctp_sk(sk); 537 sp = sctp_sk(sk);
@@ -717,6 +719,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
717 struct sockaddr *addrs, 719 struct sockaddr *addrs,
718 int addrcnt) 720 int addrcnt)
719{ 721{
722 struct net *net = sock_net(sk);
720 struct sctp_sock *sp; 723 struct sctp_sock *sp;
721 struct sctp_endpoint *ep; 724 struct sctp_endpoint *ep;
722 struct sctp_association *asoc; 725 struct sctp_association *asoc;
@@ -732,7 +735,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
732 int stored = 0; 735 int stored = 0;
733 736
734 chunk = NULL; 737 chunk = NULL;
735 if (!sctp_addip_enable) 738 if (!net->sctp.addip_enable)
736 return retval; 739 return retval;
737 740
738 sp = sctp_sk(sk); 741 sp = sctp_sk(sk);
@@ -1050,6 +1053,7 @@ static int __sctp_connect(struct sock* sk,
1050 int addrs_size, 1053 int addrs_size,
1051 sctp_assoc_t *assoc_id) 1054 sctp_assoc_t *assoc_id)
1052{ 1055{
1056 struct net *net = sock_net(sk);
1053 struct sctp_sock *sp; 1057 struct sctp_sock *sp;
1054 struct sctp_endpoint *ep; 1058 struct sctp_endpoint *ep;
1055 struct sctp_association *asoc = NULL; 1059 struct sctp_association *asoc = NULL;
@@ -1200,7 +1204,7 @@ static int __sctp_connect(struct sock* sk,
1200 goto out_free; 1204 goto out_free;
1201 } 1205 }
1202 1206
1203 err = sctp_primitive_ASSOCIATE(asoc, NULL); 1207 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1204 if (err < 0) { 1208 if (err < 0) {
1205 goto out_free; 1209 goto out_free;
1206 } 1210 }
@@ -1458,6 +1462,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
1458 */ 1462 */
1459SCTP_STATIC void sctp_close(struct sock *sk, long timeout) 1463SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1460{ 1464{
1465 struct net *net = sock_net(sk);
1461 struct sctp_endpoint *ep; 1466 struct sctp_endpoint *ep;
1462 struct sctp_association *asoc; 1467 struct sctp_association *asoc;
1463 struct list_head *pos, *temp; 1468 struct list_head *pos, *temp;
@@ -1499,9 +1504,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1499 1504
1500 chunk = sctp_make_abort_user(asoc, NULL, 0); 1505 chunk = sctp_make_abort_user(asoc, NULL, 0);
1501 if (chunk) 1506 if (chunk)
1502 sctp_primitive_ABORT(asoc, chunk); 1507 sctp_primitive_ABORT(net, asoc, chunk);
1503 } else 1508 } else
1504 sctp_primitive_SHUTDOWN(asoc, NULL); 1509 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1505 } 1510 }
1506 1511
1507 /* On a TCP-style socket, block for at most linger_time if set. */ 1512 /* On a TCP-style socket, block for at most linger_time if set. */
@@ -1569,6 +1574,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
1569SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, 1574SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1570 struct msghdr *msg, size_t msg_len) 1575 struct msghdr *msg, size_t msg_len)
1571{ 1576{
1577 struct net *net = sock_net(sk);
1572 struct sctp_sock *sp; 1578 struct sctp_sock *sp;
1573 struct sctp_endpoint *ep; 1579 struct sctp_endpoint *ep;
1574 struct sctp_association *new_asoc=NULL, *asoc=NULL; 1580 struct sctp_association *new_asoc=NULL, *asoc=NULL;
@@ -1714,7 +1720,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1714 if (sinfo_flags & SCTP_EOF) { 1720 if (sinfo_flags & SCTP_EOF) {
1715 SCTP_DEBUG_PRINTK("Shutting down association: %p\n", 1721 SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
1716 asoc); 1722 asoc);
1717 sctp_primitive_SHUTDOWN(asoc, NULL); 1723 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1718 err = 0; 1724 err = 0;
1719 goto out_unlock; 1725 goto out_unlock;
1720 } 1726 }
@@ -1727,7 +1733,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1727 } 1733 }
1728 1734
1729 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); 1735 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
1730 sctp_primitive_ABORT(asoc, chunk); 1736 sctp_primitive_ABORT(net, asoc, chunk);
1731 err = 0; 1737 err = 0;
1732 goto out_unlock; 1738 goto out_unlock;
1733 } 1739 }
@@ -1900,7 +1906,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1900 1906
1901 /* Auto-connect, if we aren't connected already. */ 1907 /* Auto-connect, if we aren't connected already. */
1902 if (sctp_state(asoc, CLOSED)) { 1908 if (sctp_state(asoc, CLOSED)) {
1903 err = sctp_primitive_ASSOCIATE(asoc, NULL); 1909 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1904 if (err < 0) 1910 if (err < 0)
1905 goto out_free; 1911 goto out_free;
1906 SCTP_DEBUG_PRINTK("We associated primitively.\n"); 1912 SCTP_DEBUG_PRINTK("We associated primitively.\n");
@@ -1928,7 +1934,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1928 * works that way today. Keep it that way or this 1934 * works that way today. Keep it that way or this
1929 * breaks. 1935 * breaks.
1930 */ 1936 */
1931 err = sctp_primitive_SEND(asoc, datamsg); 1937 err = sctp_primitive_SEND(net, asoc, datamsg);
1932 /* Did the lower layer accept the chunk? */ 1938 /* Did the lower layer accept the chunk? */
1933 if (err) 1939 if (err)
1934 sctp_datamsg_free(datamsg); 1940 sctp_datamsg_free(datamsg);
@@ -2320,7 +2326,9 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2320 int error; 2326 int error;
2321 2327
2322 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2328 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2323 error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans); 2329 struct net *net = sock_net(trans->asoc->base.sk);
2330
2331 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2324 if (error) 2332 if (error)
2325 return error; 2333 return error;
2326 } 2334 }
@@ -3033,6 +3041,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
3033static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 3041static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3034 unsigned int optlen) 3042 unsigned int optlen)
3035{ 3043{
3044 struct net *net = sock_net(sk);
3036 struct sctp_sock *sp; 3045 struct sctp_sock *sp;
3037 struct sctp_association *asoc = NULL; 3046 struct sctp_association *asoc = NULL;
3038 struct sctp_setpeerprim prim; 3047 struct sctp_setpeerprim prim;
@@ -3042,7 +3051,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
3042 3051
3043 sp = sctp_sk(sk); 3052 sp = sctp_sk(sk);
3044 3053
3045 if (!sctp_addip_enable) 3054 if (!net->sctp.addip_enable)
3046 return -EPERM; 3055 return -EPERM;
3047 3056
3048 if (optlen != sizeof(struct sctp_setpeerprim)) 3057 if (optlen != sizeof(struct sctp_setpeerprim))
@@ -3279,9 +3288,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3279 char __user *optval, 3288 char __user *optval,
3280 unsigned int optlen) 3289 unsigned int optlen)
3281{ 3290{
3291 struct net *net = sock_net(sk);
3282 struct sctp_authchunk val; 3292 struct sctp_authchunk val;
3283 3293
3284 if (!sctp_auth_enable) 3294 if (!net->sctp.auth_enable)
3285 return -EACCES; 3295 return -EACCES;
3286 3296
3287 if (optlen != sizeof(struct sctp_authchunk)) 3297 if (optlen != sizeof(struct sctp_authchunk))
@@ -3311,11 +3321,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3311 char __user *optval, 3321 char __user *optval,
3312 unsigned int optlen) 3322 unsigned int optlen)
3313{ 3323{
3324 struct net *net = sock_net(sk);
3314 struct sctp_hmacalgo *hmacs; 3325 struct sctp_hmacalgo *hmacs;
3315 u32 idents; 3326 u32 idents;
3316 int err; 3327 int err;
3317 3328
3318 if (!sctp_auth_enable) 3329 if (!net->sctp.auth_enable)
3319 return -EACCES; 3330 return -EACCES;
3320 3331
3321 if (optlen < sizeof(struct sctp_hmacalgo)) 3332 if (optlen < sizeof(struct sctp_hmacalgo))
@@ -3348,11 +3359,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3348 char __user *optval, 3359 char __user *optval,
3349 unsigned int optlen) 3360 unsigned int optlen)
3350{ 3361{
3362 struct net *net = sock_net(sk);
3351 struct sctp_authkey *authkey; 3363 struct sctp_authkey *authkey;
3352 struct sctp_association *asoc; 3364 struct sctp_association *asoc;
3353 int ret; 3365 int ret;
3354 3366
3355 if (!sctp_auth_enable) 3367 if (!net->sctp.auth_enable)
3356 return -EACCES; 3368 return -EACCES;
3357 3369
3358 if (optlen <= sizeof(struct sctp_authkey)) 3370 if (optlen <= sizeof(struct sctp_authkey))
@@ -3389,10 +3401,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3389 char __user *optval, 3401 char __user *optval,
3390 unsigned int optlen) 3402 unsigned int optlen)
3391{ 3403{
3404 struct net *net = sock_net(sk);
3392 struct sctp_authkeyid val; 3405 struct sctp_authkeyid val;
3393 struct sctp_association *asoc; 3406 struct sctp_association *asoc;
3394 3407
3395 if (!sctp_auth_enable) 3408 if (!net->sctp.auth_enable)
3396 return -EACCES; 3409 return -EACCES;
3397 3410
3398 if (optlen != sizeof(struct sctp_authkeyid)) 3411 if (optlen != sizeof(struct sctp_authkeyid))
@@ -3417,10 +3430,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3417 char __user *optval, 3430 char __user *optval,
3418 unsigned int optlen) 3431 unsigned int optlen)
3419{ 3432{
3433 struct net *net = sock_net(sk);
3420 struct sctp_authkeyid val; 3434 struct sctp_authkeyid val;
3421 struct sctp_association *asoc; 3435 struct sctp_association *asoc;
3422 3436
3423 if (!sctp_auth_enable) 3437 if (!net->sctp.auth_enable)
3424 return -EACCES; 3438 return -EACCES;
3425 3439
3426 if (optlen != sizeof(struct sctp_authkeyid)) 3440 if (optlen != sizeof(struct sctp_authkeyid))
@@ -3471,7 +3485,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3471 sp->do_auto_asconf = 0; 3485 sp->do_auto_asconf = 0;
3472 } else if (val && !sp->do_auto_asconf) { 3486 } else if (val && !sp->do_auto_asconf) {
3473 list_add_tail(&sp->auto_asconf_list, 3487 list_add_tail(&sp->auto_asconf_list,
3474 &sctp_auto_asconf_splist); 3488 &sock_net(sk)->sctp.auto_asconf_splist);
3475 sp->do_auto_asconf = 1; 3489 sp->do_auto_asconf = 1;
3476 } 3490 }
3477 return 0; 3491 return 0;
@@ -3843,6 +3857,7 @@ out:
3843 */ 3857 */
3844SCTP_STATIC int sctp_init_sock(struct sock *sk) 3858SCTP_STATIC int sctp_init_sock(struct sock *sk)
3845{ 3859{
3860 struct net *net = sock_net(sk);
3846 struct sctp_endpoint *ep; 3861 struct sctp_endpoint *ep;
3847 struct sctp_sock *sp; 3862 struct sctp_sock *sp;
3848 3863
@@ -3872,7 +3887,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3872 sp->default_timetolive = 0; 3887 sp->default_timetolive = 0;
3873 3888
3874 sp->default_rcv_context = 0; 3889 sp->default_rcv_context = 0;
3875 sp->max_burst = sctp_max_burst; 3890 sp->max_burst = net->sctp.max_burst;
3876 3891
3877 /* Initialize default setup parameters. These parameters 3892 /* Initialize default setup parameters. These parameters
3878 * can be modified with the SCTP_INITMSG socket option or 3893 * can be modified with the SCTP_INITMSG socket option or
@@ -3880,24 +3895,24 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3880 */ 3895 */
3881 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; 3896 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
3882 sp->initmsg.sinit_max_instreams = sctp_max_instreams; 3897 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
3883 sp->initmsg.sinit_max_attempts = sctp_max_retrans_init; 3898 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
3884 sp->initmsg.sinit_max_init_timeo = sctp_rto_max; 3899 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
3885 3900
3886 /* Initialize default RTO related parameters. These parameters can 3901 /* Initialize default RTO related parameters. These parameters can
3887 * be modified for with the SCTP_RTOINFO socket option. 3902 * be modified for with the SCTP_RTOINFO socket option.
3888 */ 3903 */
3889 sp->rtoinfo.srto_initial = sctp_rto_initial; 3904 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
3890 sp->rtoinfo.srto_max = sctp_rto_max; 3905 sp->rtoinfo.srto_max = net->sctp.rto_max;
3891 sp->rtoinfo.srto_min = sctp_rto_min; 3906 sp->rtoinfo.srto_min = net->sctp.rto_min;
3892 3907
3893 /* Initialize default association related parameters. These parameters 3908 /* Initialize default association related parameters. These parameters
3894 * can be modified with the SCTP_ASSOCINFO socket option. 3909 * can be modified with the SCTP_ASSOCINFO socket option.
3895 */ 3910 */
3896 sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association; 3911 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
3897 sp->assocparams.sasoc_number_peer_destinations = 0; 3912 sp->assocparams.sasoc_number_peer_destinations = 0;
3898 sp->assocparams.sasoc_peer_rwnd = 0; 3913 sp->assocparams.sasoc_peer_rwnd = 0;
3899 sp->assocparams.sasoc_local_rwnd = 0; 3914 sp->assocparams.sasoc_local_rwnd = 0;
3900 sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life; 3915 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
3901 3916
3902 /* Initialize default event subscriptions. By default, all the 3917 /* Initialize default event subscriptions. By default, all the
3903 * options are off. 3918 * options are off.
@@ -3907,10 +3922,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3907 /* Default Peer Address Parameters. These defaults can 3922 /* Default Peer Address Parameters. These defaults can
3908 * be modified via SCTP_PEER_ADDR_PARAMS 3923 * be modified via SCTP_PEER_ADDR_PARAMS
3909 */ 3924 */
3910 sp->hbinterval = sctp_hb_interval; 3925 sp->hbinterval = net->sctp.hb_interval;
3911 sp->pathmaxrxt = sctp_max_retrans_path; 3926 sp->pathmaxrxt = net->sctp.max_retrans_path;
3912 sp->pathmtu = 0; // allow default discovery 3927 sp->pathmtu = 0; // allow default discovery
3913 sp->sackdelay = sctp_sack_timeout; 3928 sp->sackdelay = net->sctp.sack_timeout;
3914 sp->sackfreq = 2; 3929 sp->sackfreq = 2;
3915 sp->param_flags = SPP_HB_ENABLE | 3930 sp->param_flags = SPP_HB_ENABLE |
3916 SPP_PMTUD_ENABLE | 3931 SPP_PMTUD_ENABLE |
@@ -3961,10 +3976,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3961 3976
3962 local_bh_disable(); 3977 local_bh_disable();
3963 percpu_counter_inc(&sctp_sockets_allocated); 3978 percpu_counter_inc(&sctp_sockets_allocated);
3964 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3979 sock_prot_inuse_add(net, sk->sk_prot, 1);
3965 if (sctp_default_auto_asconf) { 3980 if (net->sctp.default_auto_asconf) {
3966 list_add_tail(&sp->auto_asconf_list, 3981 list_add_tail(&sp->auto_asconf_list,
3967 &sctp_auto_asconf_splist); 3982 &net->sctp.auto_asconf_splist);
3968 sp->do_auto_asconf = 1; 3983 sp->do_auto_asconf = 1;
3969 } else 3984 } else
3970 sp->do_auto_asconf = 0; 3985 sp->do_auto_asconf = 0;
@@ -4011,6 +4026,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
4011 */ 4026 */
4012SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) 4027SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
4013{ 4028{
4029 struct net *net = sock_net(sk);
4014 struct sctp_endpoint *ep; 4030 struct sctp_endpoint *ep;
4015 struct sctp_association *asoc; 4031 struct sctp_association *asoc;
4016 4032
@@ -4022,7 +4038,7 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
4022 if (!list_empty(&ep->asocs)) { 4038 if (!list_empty(&ep->asocs)) {
4023 asoc = list_entry(ep->asocs.next, 4039 asoc = list_entry(ep->asocs.next,
4024 struct sctp_association, asocs); 4040 struct sctp_association, asocs);
4025 sctp_primitive_SHUTDOWN(asoc, NULL); 4041 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4026 } 4042 }
4027 } 4043 }
4028} 4044}
@@ -4653,9 +4669,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4653 union sctp_addr temp; 4669 union sctp_addr temp;
4654 int cnt = 0; 4670 int cnt = 0;
4655 int addrlen; 4671 int addrlen;
4672 struct net *net = sock_net(sk);
4656 4673
4657 rcu_read_lock(); 4674 rcu_read_lock();
4658 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { 4675 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
4659 if (!addr->valid) 4676 if (!addr->valid)
4660 continue; 4677 continue;
4661 4678
@@ -5299,12 +5316,13 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5299static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, 5316static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5300 char __user *optval, int __user *optlen) 5317 char __user *optval, int __user *optlen)
5301{ 5318{
5319 struct net *net = sock_net(sk);
5302 struct sctp_hmacalgo __user *p = (void __user *)optval; 5320 struct sctp_hmacalgo __user *p = (void __user *)optval;
5303 struct sctp_hmac_algo_param *hmacs; 5321 struct sctp_hmac_algo_param *hmacs;
5304 __u16 data_len = 0; 5322 __u16 data_len = 0;
5305 u32 num_idents; 5323 u32 num_idents;
5306 5324
5307 if (!sctp_auth_enable) 5325 if (!net->sctp.auth_enable)
5308 return -EACCES; 5326 return -EACCES;
5309 5327
5310 hmacs = sctp_sk(sk)->ep->auth_hmacs_list; 5328 hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
@@ -5328,10 +5346,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5328static int sctp_getsockopt_active_key(struct sock *sk, int len, 5346static int sctp_getsockopt_active_key(struct sock *sk, int len,
5329 char __user *optval, int __user *optlen) 5347 char __user *optval, int __user *optlen)
5330{ 5348{
5349 struct net *net = sock_net(sk);
5331 struct sctp_authkeyid val; 5350 struct sctp_authkeyid val;
5332 struct sctp_association *asoc; 5351 struct sctp_association *asoc;
5333 5352
5334 if (!sctp_auth_enable) 5353 if (!net->sctp.auth_enable)
5335 return -EACCES; 5354 return -EACCES;
5336 5355
5337 if (len < sizeof(struct sctp_authkeyid)) 5356 if (len < sizeof(struct sctp_authkeyid))
@@ -5360,6 +5379,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
5360static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, 5379static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5361 char __user *optval, int __user *optlen) 5380 char __user *optval, int __user *optlen)
5362{ 5381{
5382 struct net *net = sock_net(sk);
5363 struct sctp_authchunks __user *p = (void __user *)optval; 5383 struct sctp_authchunks __user *p = (void __user *)optval;
5364 struct sctp_authchunks val; 5384 struct sctp_authchunks val;
5365 struct sctp_association *asoc; 5385 struct sctp_association *asoc;
@@ -5367,7 +5387,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5367 u32 num_chunks = 0; 5387 u32 num_chunks = 0;
5368 char __user *to; 5388 char __user *to;
5369 5389
5370 if (!sctp_auth_enable) 5390 if (!net->sctp.auth_enable)
5371 return -EACCES; 5391 return -EACCES;
5372 5392
5373 if (len < sizeof(struct sctp_authchunks)) 5393 if (len < sizeof(struct sctp_authchunks))
@@ -5403,6 +5423,7 @@ num:
5403static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, 5423static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5404 char __user *optval, int __user *optlen) 5424 char __user *optval, int __user *optlen)
5405{ 5425{
5426 struct net *net = sock_net(sk);
5406 struct sctp_authchunks __user *p = (void __user *)optval; 5427 struct sctp_authchunks __user *p = (void __user *)optval;
5407 struct sctp_authchunks val; 5428 struct sctp_authchunks val;
5408 struct sctp_association *asoc; 5429 struct sctp_association *asoc;
@@ -5410,7 +5431,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5410 u32 num_chunks = 0; 5431 u32 num_chunks = 0;
5411 char __user *to; 5432 char __user *to;
5412 5433
5413 if (!sctp_auth_enable) 5434 if (!net->sctp.auth_enable)
5414 return -EACCES; 5435 return -EACCES;
5415 5436
5416 if (len < sizeof(struct sctp_authchunks)) 5437 if (len < sizeof(struct sctp_authchunks))
@@ -5769,7 +5790,7 @@ static void sctp_unhash(struct sock *sk)
5769 * a fastreuse flag (FIXME: NPI ipg). 5790 * a fastreuse flag (FIXME: NPI ipg).
5770 */ 5791 */
5771static struct sctp_bind_bucket *sctp_bucket_create( 5792static struct sctp_bind_bucket *sctp_bucket_create(
5772 struct sctp_bind_hashbucket *head, unsigned short snum); 5793 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
5773 5794
5774static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 5795static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5775{ 5796{
@@ -5799,11 +5820,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5799 rover = low; 5820 rover = low;
5800 if (inet_is_reserved_local_port(rover)) 5821 if (inet_is_reserved_local_port(rover))
5801 continue; 5822 continue;
5802 index = sctp_phashfn(rover); 5823 index = sctp_phashfn(sock_net(sk), rover);
5803 head = &sctp_port_hashtable[index]; 5824 head = &sctp_port_hashtable[index];
5804 sctp_spin_lock(&head->lock); 5825 sctp_spin_lock(&head->lock);
5805 sctp_for_each_hentry(pp, node, &head->chain) 5826 sctp_for_each_hentry(pp, node, &head->chain)
5806 if (pp->port == rover) 5827 if ((pp->port == rover) &&
5828 net_eq(sock_net(sk), pp->net))
5807 goto next; 5829 goto next;
5808 break; 5830 break;
5809 next: 5831 next:
@@ -5827,10 +5849,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5827 * to the port number (snum) - we detect that with the 5849 * to the port number (snum) - we detect that with the
5828 * port iterator, pp being NULL. 5850 * port iterator, pp being NULL.
5829 */ 5851 */
5830 head = &sctp_port_hashtable[sctp_phashfn(snum)]; 5852 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
5831 sctp_spin_lock(&head->lock); 5853 sctp_spin_lock(&head->lock);
5832 sctp_for_each_hentry(pp, node, &head->chain) { 5854 sctp_for_each_hentry(pp, node, &head->chain) {
5833 if (pp->port == snum) 5855 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
5834 goto pp_found; 5856 goto pp_found;
5835 } 5857 }
5836 } 5858 }
@@ -5881,7 +5903,7 @@ pp_found:
5881pp_not_found: 5903pp_not_found:
5882 /* If there was a hash table miss, create a new port. */ 5904 /* If there was a hash table miss, create a new port. */
5883 ret = 1; 5905 ret = 1;
5884 if (!pp && !(pp = sctp_bucket_create(head, snum))) 5906 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
5885 goto fail_unlock; 5907 goto fail_unlock;
5886 5908
5887 /* In either case (hit or miss), make sure fastreuse is 1 only 5909 /* In either case (hit or miss), make sure fastreuse is 1 only
@@ -6113,7 +6135,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6113 ********************************************************************/ 6135 ********************************************************************/
6114 6136
6115static struct sctp_bind_bucket *sctp_bucket_create( 6137static struct sctp_bind_bucket *sctp_bucket_create(
6116 struct sctp_bind_hashbucket *head, unsigned short snum) 6138 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
6117{ 6139{
6118 struct sctp_bind_bucket *pp; 6140 struct sctp_bind_bucket *pp;
6119 6141
@@ -6123,6 +6145,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
6123 pp->port = snum; 6145 pp->port = snum;
6124 pp->fastreuse = 0; 6146 pp->fastreuse = 0;
6125 INIT_HLIST_HEAD(&pp->owner); 6147 INIT_HLIST_HEAD(&pp->owner);
6148 pp->net = net;
6126 hlist_add_head(&pp->node, &head->chain); 6149 hlist_add_head(&pp->node, &head->chain);
6127 } 6150 }
6128 return pp; 6151 return pp;
@@ -6142,7 +6165,8 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
6142static inline void __sctp_put_port(struct sock *sk) 6165static inline void __sctp_put_port(struct sock *sk)
6143{ 6166{
6144 struct sctp_bind_hashbucket *head = 6167 struct sctp_bind_hashbucket *head =
6145 &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)]; 6168 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
6169 inet_sk(sk)->inet_num)];
6146 struct sctp_bind_bucket *pp; 6170 struct sctp_bind_bucket *pp;
6147 6171
6148 sctp_spin_lock(&head->lock); 6172 sctp_spin_lock(&head->lock);
@@ -6809,7 +6833,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6809 newsp->hmac = NULL; 6833 newsp->hmac = NULL;
6810 6834
6811 /* Hook this new socket in to the bind_hash list. */ 6835 /* Hook this new socket in to the bind_hash list. */
6812 head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)]; 6836 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
6837 inet_sk(oldsk)->inet_num)];
6813 sctp_local_bh_disable(); 6838 sctp_local_bh_disable();
6814 sctp_spin_lock(&head->lock); 6839 sctp_spin_lock(&head->lock);
6815 pp = sctp_sk(oldsk)->bind_hash; 6840 pp = sctp_sk(oldsk)->bind_hash;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 2b2bfe933ff1..70e3ba5cb50b 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -64,8 +64,34 @@ extern int sysctl_sctp_wmem[3];
64 64
65static ctl_table sctp_table[] = { 65static ctl_table sctp_table[] = {
66 { 66 {
67 .procname = "sctp_mem",
68 .data = &sysctl_sctp_mem,
69 .maxlen = sizeof(sysctl_sctp_mem),
70 .mode = 0644,
71 .proc_handler = proc_doulongvec_minmax
72 },
73 {
74 .procname = "sctp_rmem",
75 .data = &sysctl_sctp_rmem,
76 .maxlen = sizeof(sysctl_sctp_rmem),
77 .mode = 0644,
78 .proc_handler = proc_dointvec,
79 },
80 {
81 .procname = "sctp_wmem",
82 .data = &sysctl_sctp_wmem,
83 .maxlen = sizeof(sysctl_sctp_wmem),
84 .mode = 0644,
85 .proc_handler = proc_dointvec,
86 },
87
88 { /* sentinel */ }
89};
90
91static ctl_table sctp_net_table[] = {
92 {
67 .procname = "rto_initial", 93 .procname = "rto_initial",
68 .data = &sctp_rto_initial, 94 .data = &init_net.sctp.rto_initial,
69 .maxlen = sizeof(unsigned int), 95 .maxlen = sizeof(unsigned int),
70 .mode = 0644, 96 .mode = 0644,
71 .proc_handler = proc_dointvec_minmax, 97 .proc_handler = proc_dointvec_minmax,
@@ -74,7 +100,7 @@ static ctl_table sctp_table[] = {
74 }, 100 },
75 { 101 {
76 .procname = "rto_min", 102 .procname = "rto_min",
77 .data = &sctp_rto_min, 103 .data = &init_net.sctp.rto_min,
78 .maxlen = sizeof(unsigned int), 104 .maxlen = sizeof(unsigned int),
79 .mode = 0644, 105 .mode = 0644,
80 .proc_handler = proc_dointvec_minmax, 106 .proc_handler = proc_dointvec_minmax,
@@ -83,7 +109,7 @@ static ctl_table sctp_table[] = {
83 }, 109 },
84 { 110 {
85 .procname = "rto_max", 111 .procname = "rto_max",
86 .data = &sctp_rto_max, 112 .data = &init_net.sctp.rto_max,
87 .maxlen = sizeof(unsigned int), 113 .maxlen = sizeof(unsigned int),
88 .mode = 0644, 114 .mode = 0644,
89 .proc_handler = proc_dointvec_minmax, 115 .proc_handler = proc_dointvec_minmax,
@@ -91,17 +117,22 @@ static ctl_table sctp_table[] = {
91 .extra2 = &timer_max 117 .extra2 = &timer_max
92 }, 118 },
93 { 119 {
94 .procname = "valid_cookie_life", 120 .procname = "rto_alpha_exp_divisor",
95 .data = &sctp_valid_cookie_life, 121 .data = &init_net.sctp.rto_alpha,
96 .maxlen = sizeof(unsigned int), 122 .maxlen = sizeof(int),
97 .mode = 0644, 123 .mode = 0444,
98 .proc_handler = proc_dointvec_minmax, 124 .proc_handler = proc_dointvec,
99 .extra1 = &one, 125 },
100 .extra2 = &timer_max 126 {
127 .procname = "rto_beta_exp_divisor",
128 .data = &init_net.sctp.rto_beta,
129 .maxlen = sizeof(int),
130 .mode = 0444,
131 .proc_handler = proc_dointvec,
101 }, 132 },
102 { 133 {
103 .procname = "max_burst", 134 .procname = "max_burst",
104 .data = &sctp_max_burst, 135 .data = &init_net.sctp.max_burst,
105 .maxlen = sizeof(int), 136 .maxlen = sizeof(int),
106 .mode = 0644, 137 .mode = 0644,
107 .proc_handler = proc_dointvec_minmax, 138 .proc_handler = proc_dointvec_minmax,
@@ -109,31 +140,42 @@ static ctl_table sctp_table[] = {
109 .extra2 = &int_max 140 .extra2 = &int_max
110 }, 141 },
111 { 142 {
112 .procname = "association_max_retrans", 143 .procname = "cookie_preserve_enable",
113 .data = &sctp_max_retrans_association, 144 .data = &init_net.sctp.cookie_preserve_enable,
114 .maxlen = sizeof(int), 145 .maxlen = sizeof(int),
115 .mode = 0644, 146 .mode = 0644,
147 .proc_handler = proc_dointvec,
148 },
149 {
150 .procname = "valid_cookie_life",
151 .data = &init_net.sctp.valid_cookie_life,
152 .maxlen = sizeof(unsigned int),
153 .mode = 0644,
116 .proc_handler = proc_dointvec_minmax, 154 .proc_handler = proc_dointvec_minmax,
117 .extra1 = &one, 155 .extra1 = &one,
118 .extra2 = &int_max 156 .extra2 = &timer_max
119 }, 157 },
120 { 158 {
121 .procname = "sndbuf_policy", 159 .procname = "sack_timeout",
122 .data = &sctp_sndbuf_policy, 160 .data = &init_net.sctp.sack_timeout,
123 .maxlen = sizeof(int), 161 .maxlen = sizeof(int),
124 .mode = 0644, 162 .mode = 0644,
125 .proc_handler = proc_dointvec, 163 .proc_handler = proc_dointvec_minmax,
164 .extra1 = &sack_timer_min,
165 .extra2 = &sack_timer_max,
126 }, 166 },
127 { 167 {
128 .procname = "rcvbuf_policy", 168 .procname = "hb_interval",
129 .data = &sctp_rcvbuf_policy, 169 .data = &init_net.sctp.hb_interval,
130 .maxlen = sizeof(int), 170 .maxlen = sizeof(unsigned int),
131 .mode = 0644, 171 .mode = 0644,
132 .proc_handler = proc_dointvec, 172 .proc_handler = proc_dointvec_minmax,
173 .extra1 = &one,
174 .extra2 = &timer_max
133 }, 175 },
134 { 176 {
135 .procname = "path_max_retrans", 177 .procname = "association_max_retrans",
136 .data = &sctp_max_retrans_path, 178 .data = &init_net.sctp.max_retrans_association,
137 .maxlen = sizeof(int), 179 .maxlen = sizeof(int),
138 .mode = 0644, 180 .mode = 0644,
139 .proc_handler = proc_dointvec_minmax, 181 .proc_handler = proc_dointvec_minmax,
@@ -141,17 +183,17 @@ static ctl_table sctp_table[] = {
141 .extra2 = &int_max 183 .extra2 = &int_max
142 }, 184 },
143 { 185 {
144 .procname = "pf_retrans", 186 .procname = "path_max_retrans",
145 .data = &sctp_pf_retrans, 187 .data = &init_net.sctp.max_retrans_path,
146 .maxlen = sizeof(int), 188 .maxlen = sizeof(int),
147 .mode = 0644, 189 .mode = 0644,
148 .proc_handler = proc_dointvec_minmax, 190 .proc_handler = proc_dointvec_minmax,
149 .extra1 = &zero, 191 .extra1 = &one,
150 .extra2 = &int_max 192 .extra2 = &int_max
151 }, 193 },
152 { 194 {
153 .procname = "max_init_retransmits", 195 .procname = "max_init_retransmits",
154 .data = &sctp_max_retrans_init, 196 .data = &init_net.sctp.max_retrans_init,
155 .maxlen = sizeof(int), 197 .maxlen = sizeof(int),
156 .mode = 0644, 198 .mode = 0644,
157 .proc_handler = proc_dointvec_minmax, 199 .proc_handler = proc_dointvec_minmax,
@@ -159,103 +201,66 @@ static ctl_table sctp_table[] = {
159 .extra2 = &int_max 201 .extra2 = &int_max
160 }, 202 },
161 { 203 {
162 .procname = "hb_interval", 204 .procname = "pf_retrans",
163 .data = &sctp_hb_interval, 205 .data = &init_net.sctp.pf_retrans,
164 .maxlen = sizeof(unsigned int), 206 .maxlen = sizeof(int),
165 .mode = 0644, 207 .mode = 0644,
166 .proc_handler = proc_dointvec_minmax, 208 .proc_handler = proc_dointvec_minmax,
167 .extra1 = &one, 209 .extra1 = &zero,
168 .extra2 = &timer_max 210 .extra2 = &int_max
169 }, 211 },
170 { 212 {
171 .procname = "cookie_preserve_enable", 213 .procname = "sndbuf_policy",
172 .data = &sctp_cookie_preserve_enable, 214 .data = &init_net.sctp.sndbuf_policy,
173 .maxlen = sizeof(int), 215 .maxlen = sizeof(int),
174 .mode = 0644, 216 .mode = 0644,
175 .proc_handler = proc_dointvec, 217 .proc_handler = proc_dointvec,
176 }, 218 },
177 { 219 {
178 .procname = "rto_alpha_exp_divisor", 220 .procname = "rcvbuf_policy",
179 .data = &sctp_rto_alpha, 221 .data = &init_net.sctp.rcvbuf_policy,
180 .maxlen = sizeof(int),
181 .mode = 0444,
182 .proc_handler = proc_dointvec,
183 },
184 {
185 .procname = "rto_beta_exp_divisor",
186 .data = &sctp_rto_beta,
187 .maxlen = sizeof(int),
188 .mode = 0444,
189 .proc_handler = proc_dointvec,
190 },
191 {
192 .procname = "addip_enable",
193 .data = &sctp_addip_enable,
194 .maxlen = sizeof(int), 222 .maxlen = sizeof(int),
195 .mode = 0644, 223 .mode = 0644,
196 .proc_handler = proc_dointvec, 224 .proc_handler = proc_dointvec,
197 }, 225 },
198 { 226 {
199 .procname = "default_auto_asconf", 227 .procname = "default_auto_asconf",
200 .data = &sctp_default_auto_asconf, 228 .data = &init_net.sctp.default_auto_asconf,
201 .maxlen = sizeof(int), 229 .maxlen = sizeof(int),
202 .mode = 0644, 230 .mode = 0644,
203 .proc_handler = proc_dointvec, 231 .proc_handler = proc_dointvec,
204 }, 232 },
205 { 233 {
206 .procname = "prsctp_enable", 234 .procname = "addip_enable",
207 .data = &sctp_prsctp_enable, 235 .data = &init_net.sctp.addip_enable,
208 .maxlen = sizeof(int), 236 .maxlen = sizeof(int),
209 .mode = 0644, 237 .mode = 0644,
210 .proc_handler = proc_dointvec, 238 .proc_handler = proc_dointvec,
211 }, 239 },
212 { 240 {
213 .procname = "sack_timeout", 241 .procname = "addip_noauth_enable",
214 .data = &sctp_sack_timeout, 242 .data = &init_net.sctp.addip_noauth,
215 .maxlen = sizeof(int), 243 .maxlen = sizeof(int),
216 .mode = 0644, 244 .mode = 0644,
217 .proc_handler = proc_dointvec_minmax,
218 .extra1 = &sack_timer_min,
219 .extra2 = &sack_timer_max,
220 },
221 {
222 .procname = "sctp_mem",
223 .data = &sysctl_sctp_mem,
224 .maxlen = sizeof(sysctl_sctp_mem),
225 .mode = 0644,
226 .proc_handler = proc_doulongvec_minmax
227 },
228 {
229 .procname = "sctp_rmem",
230 .data = &sysctl_sctp_rmem,
231 .maxlen = sizeof(sysctl_sctp_rmem),
232 .mode = 0644,
233 .proc_handler = proc_dointvec,
234 },
235 {
236 .procname = "sctp_wmem",
237 .data = &sysctl_sctp_wmem,
238 .maxlen = sizeof(sysctl_sctp_wmem),
239 .mode = 0644,
240 .proc_handler = proc_dointvec, 245 .proc_handler = proc_dointvec,
241 }, 246 },
242 { 247 {
243 .procname = "auth_enable", 248 .procname = "prsctp_enable",
244 .data = &sctp_auth_enable, 249 .data = &init_net.sctp.prsctp_enable,
245 .maxlen = sizeof(int), 250 .maxlen = sizeof(int),
246 .mode = 0644, 251 .mode = 0644,
247 .proc_handler = proc_dointvec, 252 .proc_handler = proc_dointvec,
248 }, 253 },
249 { 254 {
250 .procname = "addip_noauth_enable", 255 .procname = "auth_enable",
251 .data = &sctp_addip_noauth, 256 .data = &init_net.sctp.auth_enable,
252 .maxlen = sizeof(int), 257 .maxlen = sizeof(int),
253 .mode = 0644, 258 .mode = 0644,
254 .proc_handler = proc_dointvec, 259 .proc_handler = proc_dointvec,
255 }, 260 },
256 { 261 {
257 .procname = "addr_scope_policy", 262 .procname = "addr_scope_policy",
258 .data = &sctp_scope_policy, 263 .data = &init_net.sctp.scope_policy,
259 .maxlen = sizeof(int), 264 .maxlen = sizeof(int),
260 .mode = 0644, 265 .mode = 0644,
261 .proc_handler = proc_dointvec_minmax, 266 .proc_handler = proc_dointvec_minmax,
@@ -264,7 +269,7 @@ static ctl_table sctp_table[] = {
264 }, 269 },
265 { 270 {
266 .procname = "rwnd_update_shift", 271 .procname = "rwnd_update_shift",
267 .data = &sctp_rwnd_upd_shift, 272 .data = &init_net.sctp.rwnd_upd_shift,
268 .maxlen = sizeof(int), 273 .maxlen = sizeof(int),
269 .mode = 0644, 274 .mode = 0644,
270 .proc_handler = &proc_dointvec_minmax, 275 .proc_handler = &proc_dointvec_minmax,
@@ -273,7 +278,7 @@ static ctl_table sctp_table[] = {
273 }, 278 },
274 { 279 {
275 .procname = "max_autoclose", 280 .procname = "max_autoclose",
276 .data = &sctp_max_autoclose, 281 .data = &init_net.sctp.max_autoclose,
277 .maxlen = sizeof(unsigned long), 282 .maxlen = sizeof(unsigned long),
278 .mode = 0644, 283 .mode = 0644,
279 .proc_handler = &proc_doulongvec_minmax, 284 .proc_handler = &proc_doulongvec_minmax,
@@ -284,6 +289,27 @@ static ctl_table sctp_table[] = {
284 { /* sentinel */ } 289 { /* sentinel */ }
285}; 290};
286 291
292int sctp_sysctl_net_register(struct net *net)
293{
294 struct ctl_table *table;
295 int i;
296
297 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
298 if (!table)
299 return -ENOMEM;
300
301 for (i = 0; table[i].data; i++)
302 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
303
304 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
305 return 0;
306}
307
308void sctp_sysctl_net_unregister(struct net *net)
309{
310 unregister_net_sysctl_table(net->sctp.sysctl_header);
311}
312
287static struct ctl_table_header * sctp_sysctl_header; 313static struct ctl_table_header * sctp_sysctl_header;
288 314
289/* Sysctl registration. */ 315/* Sysctl registration. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c97472b248a2..953c21e4af97 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -59,7 +59,8 @@
59/* 1st Level Abstractions. */ 59/* 1st Level Abstractions. */
60 60
61/* Initialize a new transport from provided memory. */ 61/* Initialize a new transport from provided memory. */
62static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, 62static struct sctp_transport *sctp_transport_init(struct net *net,
63 struct sctp_transport *peer,
63 const union sctp_addr *addr, 64 const union sctp_addr *addr,
64 gfp_t gfp) 65 gfp_t gfp)
65{ 66{
@@ -76,7 +77,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
76 * given destination transport address, set RTO to the protocol 77 * given destination transport address, set RTO to the protocol
77 * parameter 'RTO.Initial'. 78 * parameter 'RTO.Initial'.
78 */ 79 */
79 peer->rto = msecs_to_jiffies(sctp_rto_initial); 80 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
80 81
81 peer->last_time_heard = jiffies; 82 peer->last_time_heard = jiffies;
82 peer->last_time_ecne_reduced = jiffies; 83 peer->last_time_ecne_reduced = jiffies;
@@ -86,8 +87,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
86 SPP_SACKDELAY_ENABLE; 87 SPP_SACKDELAY_ENABLE;
87 88
88 /* Initialize the default path max_retrans. */ 89 /* Initialize the default path max_retrans. */
89 peer->pathmaxrxt = sctp_max_retrans_path; 90 peer->pathmaxrxt = net->sctp.max_retrans_path;
90 peer->pf_retrans = sctp_pf_retrans; 91 peer->pf_retrans = net->sctp.pf_retrans;
91 92
92 INIT_LIST_HEAD(&peer->transmitted); 93 INIT_LIST_HEAD(&peer->transmitted);
93 INIT_LIST_HEAD(&peer->send_ready); 94 INIT_LIST_HEAD(&peer->send_ready);
@@ -109,7 +110,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
109} 110}
110 111
111/* Allocate and initialize a new transport. */ 112/* Allocate and initialize a new transport. */
112struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, 113struct sctp_transport *sctp_transport_new(struct net *net,
114 const union sctp_addr *addr,
113 gfp_t gfp) 115 gfp_t gfp)
114{ 116{
115 struct sctp_transport *transport; 117 struct sctp_transport *transport;
@@ -118,7 +120,7 @@ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
118 if (!transport) 120 if (!transport)
119 goto fail; 121 goto fail;
120 122
121 if (!sctp_transport_init(transport, addr, gfp)) 123 if (!sctp_transport_init(net, transport, addr, gfp))
122 goto fail_init; 124 goto fail_init;
123 125
124 transport->malloced = 1; 126 transport->malloced = 1;
@@ -316,6 +318,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
316 SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); 318 SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
317 319
318 if (tp->rttvar || tp->srtt) { 320 if (tp->rttvar || tp->srtt) {
321 struct net *net = sock_net(tp->asoc->base.sk);
319 /* 6.3.1 C3) When a new RTT measurement R' is made, set 322 /* 6.3.1 C3) When a new RTT measurement R' is made, set
320 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| 323 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
321 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' 324 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
@@ -327,10 +330,10 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
327 * For example, assuming the default value of RTO.Alpha of 330 * For example, assuming the default value of RTO.Alpha of
328 * 1/8, rto_alpha would be expressed as 3. 331 * 1/8, rto_alpha would be expressed as 3.
329 */ 332 */
330 tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta) 333 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
331 + ((abs(tp->srtt - rtt)) >> sctp_rto_beta); 334 + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
332 tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha) 335 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
333 + (rtt >> sctp_rto_alpha); 336 + (rtt >> net->sctp.rto_alpha);
334 } else { 337 } else {
335 /* 6.3.1 C2) When the first RTT measurement R is made, set 338 /* 6.3.1 C2) When the first RTT measurement R is made, set
336 * SRTT <- R, RTTVAR <- R/2. 339 * SRTT <- R, RTTVAR <- R/2.
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f5a6a4f4faf7..360d8697b95c 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -326,7 +326,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
326 * payload was fragmented on the way and ip had to reassemble them. 326 * payload was fragmented on the way and ip had to reassemble them.
327 * We add the rest of skb's to the first skb's fraglist. 327 * We add the rest of skb's to the first skb's fraglist.
328 */ 328 */
329static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) 329static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
330 struct sk_buff_head *queue, struct sk_buff *f_frag,
331 struct sk_buff *l_frag)
330{ 332{
331 struct sk_buff *pos; 333 struct sk_buff *pos;
332 struct sk_buff *new = NULL; 334 struct sk_buff *new = NULL;
@@ -394,7 +396,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
394 } 396 }
395 397
396 event = sctp_skb2event(f_frag); 398 event = sctp_skb2event(f_frag);
397 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); 399 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
398 400
399 return event; 401 return event;
400} 402}
@@ -493,7 +495,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
493 cevent = sctp_skb2event(pd_first); 495 cevent = sctp_skb2event(pd_first);
494 pd_point = sctp_sk(asoc->base.sk)->pd_point; 496 pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 if (pd_point && pd_point <= pd_len) { 497 if (pd_point && pd_point <= pd_len) {
496 retval = sctp_make_reassembled_event(&ulpq->reasm, 498 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
499 &ulpq->reasm,
497 pd_first, 500 pd_first,
498 pd_last); 501 pd_last);
499 if (retval) 502 if (retval)
@@ -503,7 +506,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
503done: 506done:
504 return retval; 507 return retval;
505found: 508found:
506 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos); 509 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
510 &ulpq->reasm, first_frag, pos);
507 if (retval) 511 if (retval)
508 retval->msg_flags |= MSG_EOR; 512 retval->msg_flags |= MSG_EOR;
509 goto done; 513 goto done;
@@ -563,7 +567,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
563 * further. 567 * further.
564 */ 568 */
565done: 569done:
566 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); 570 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
571 &ulpq->reasm, first_frag, last_frag);
567 if (retval && is_last) 572 if (retval && is_last)
568 retval->msg_flags |= MSG_EOR; 573 retval->msg_flags |= MSG_EOR;
569 574
@@ -655,7 +660,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
655 * further. 660 * further.
656 */ 661 */
657done: 662done:
658 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); 663 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
664 &ulpq->reasm, first_frag, last_frag);
659 return retval; 665 return retval;
660} 666}
661 667
diff --git a/net/socket.c b/net/socket.c
index edc3c4af9085..80dc7e84b046 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -88,6 +88,7 @@
88#include <linux/nsproxy.h> 88#include <linux/nsproxy.h>
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h> 90#include <linux/slab.h>
91#include <linux/xattr.h>
91 92
92#include <asm/uaccess.h> 93#include <asm/uaccess.h>
93#include <asm/unistd.h> 94#include <asm/unistd.h>
@@ -346,7 +347,8 @@ static struct file_system_type sock_fs_type = {
346 * but we take care of internal coherence yet. 347 * but we take care of internal coherence yet.
347 */ 348 */
348 349
349static int sock_alloc_file(struct socket *sock, struct file **f, int flags) 350static int sock_alloc_file(struct socket *sock, struct file **f, int flags,
351 const char *dname)
350{ 352{
351 struct qstr name = { .name = "" }; 353 struct qstr name = { .name = "" };
352 struct path path; 354 struct path path;
@@ -357,6 +359,13 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
357 if (unlikely(fd < 0)) 359 if (unlikely(fd < 0))
358 return fd; 360 return fd;
359 361
362 if (dname) {
363 name.name = dname;
364 name.len = strlen(name.name);
365 } else if (sock->sk) {
366 name.name = sock->sk->sk_prot_creator->name;
367 name.len = strlen(name.name);
368 }
360 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); 369 path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
361 if (unlikely(!path.dentry)) { 370 if (unlikely(!path.dentry)) {
362 put_unused_fd(fd); 371 put_unused_fd(fd);
@@ -389,7 +398,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
389int sock_map_fd(struct socket *sock, int flags) 398int sock_map_fd(struct socket *sock, int flags)
390{ 399{
391 struct file *newfile; 400 struct file *newfile;
392 int fd = sock_alloc_file(sock, &newfile, flags); 401 int fd = sock_alloc_file(sock, &newfile, flags, NULL);
393 402
394 if (likely(fd >= 0)) 403 if (likely(fd >= 0))
395 fd_install(fd, newfile); 404 fd_install(fd, newfile);
@@ -455,6 +464,68 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
455 return NULL; 464 return NULL;
456} 465}
457 466
467#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
468#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
469#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
470static ssize_t sockfs_getxattr(struct dentry *dentry,
471 const char *name, void *value, size_t size)
472{
473 const char *proto_name;
474 size_t proto_size;
475 int error;
476
477 error = -ENODATA;
478 if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) {
479 proto_name = dentry->d_name.name;
480 proto_size = strlen(proto_name);
481
482 if (value) {
483 error = -ERANGE;
484 if (proto_size + 1 > size)
485 goto out;
486
487 strncpy(value, proto_name, proto_size + 1);
488 }
489 error = proto_size + 1;
490 }
491
492out:
493 return error;
494}
495
496static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
497 size_t size)
498{
499 ssize_t len;
500 ssize_t used = 0;
501
502 len = security_inode_listsecurity(dentry->d_inode, buffer, size);
503 if (len < 0)
504 return len;
505 used += len;
506 if (buffer) {
507 if (size < used)
508 return -ERANGE;
509 buffer += len;
510 }
511
512 len = (XATTR_NAME_SOCKPROTONAME_LEN + 1);
513 used += len;
514 if (buffer) {
515 if (size < used)
516 return -ERANGE;
517 memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len);
518 buffer += len;
519 }
520
521 return used;
522}
523
524static const struct inode_operations sockfs_inode_ops = {
525 .getxattr = sockfs_getxattr,
526 .listxattr = sockfs_listxattr,
527};
528
458/** 529/**
459 * sock_alloc - allocate a socket 530 * sock_alloc - allocate a socket
460 * 531 *
@@ -479,6 +550,7 @@ static struct socket *sock_alloc(void)
479 inode->i_mode = S_IFSOCK | S_IRWXUGO; 550 inode->i_mode = S_IFSOCK | S_IRWXUGO;
480 inode->i_uid = current_fsuid(); 551 inode->i_uid = current_fsuid();
481 inode->i_gid = current_fsgid(); 552 inode->i_gid = current_fsgid();
553 inode->i_op = &sockfs_inode_ops;
482 554
483 this_cpu_add(sockets_in_use, 1); 555 this_cpu_add(sockets_in_use, 1);
484 return sock; 556 return sock;
@@ -1394,13 +1466,13 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
1394 if (err < 0) 1466 if (err < 0)
1395 goto out_release_both; 1467 goto out_release_both;
1396 1468
1397 fd1 = sock_alloc_file(sock1, &newfile1, flags); 1469 fd1 = sock_alloc_file(sock1, &newfile1, flags, NULL);
1398 if (unlikely(fd1 < 0)) { 1470 if (unlikely(fd1 < 0)) {
1399 err = fd1; 1471 err = fd1;
1400 goto out_release_both; 1472 goto out_release_both;
1401 } 1473 }
1402 1474
1403 fd2 = sock_alloc_file(sock2, &newfile2, flags); 1475 fd2 = sock_alloc_file(sock2, &newfile2, flags, NULL);
1404 if (unlikely(fd2 < 0)) { 1476 if (unlikely(fd2 < 0)) {
1405 err = fd2; 1477 err = fd2;
1406 fput(newfile1); 1478 fput(newfile1);
@@ -1536,7 +1608,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1536 */ 1608 */
1537 __module_get(newsock->ops->owner); 1609 __module_get(newsock->ops->owner);
1538 1610
1539 newfd = sock_alloc_file(newsock, &newfile, flags); 1611 newfd = sock_alloc_file(newsock, &newfile, flags,
1612 sock->sk->sk_prot_creator->name);
1540 if (unlikely(newfd < 0)) { 1613 if (unlikely(newfd < 0)) {
1541 err = newfd; 1614 err = newfd;
1542 sock_release(newsock); 1615 sock_release(newsock);
@@ -2528,12 +2601,6 @@ static int __init sock_init(void)
2528 goto out; 2601 goto out;
2529 2602
2530 /* 2603 /*
2531 * Initialize sock SLAB cache.
2532 */
2533
2534 sk_init();
2535
2536 /*
2537 * Initialize skbuff SLAB cache 2604 * Initialize skbuff SLAB cache
2538 */ 2605 */
2539 skb_init(); 2606 skb_init();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 09e71241265d..4ec5c80e8a7c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -49,21 +49,6 @@ struct tipc_bearer tipc_bearers[MAX_BEARERS];
49static void bearer_disable(struct tipc_bearer *b_ptr); 49static void bearer_disable(struct tipc_bearer *b_ptr);
50 50
51/** 51/**
52 * media_name_valid - validate media name
53 *
54 * Returns 1 if media name is valid, otherwise 0.
55 */
56static int media_name_valid(const char *name)
57{
58 u32 len;
59
60 len = strlen(name);
61 if ((len + 1) > TIPC_MAX_MEDIA_NAME)
62 return 0;
63 return strspn(name, tipc_alphabet) == len;
64}
65
66/**
67 * tipc_media_find - locates specified media object by name 52 * tipc_media_find - locates specified media object by name
68 */ 53 */
69struct tipc_media *tipc_media_find(const char *name) 54struct tipc_media *tipc_media_find(const char *name)
@@ -102,7 +87,7 @@ int tipc_register_media(struct tipc_media *m_ptr)
102 87
103 write_lock_bh(&tipc_net_lock); 88 write_lock_bh(&tipc_net_lock);
104 89
105 if (!media_name_valid(m_ptr->name)) 90 if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
106 goto exit; 91 goto exit;
107 if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) || 92 if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
108 !m_ptr->bcast_addr.broadcast) 93 !m_ptr->bcast_addr.broadcast)
@@ -206,9 +191,7 @@ static int bearer_name_validate(const char *name,
206 191
207 /* validate component parts of bearer name */ 192 /* validate component parts of bearer name */
208 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 193 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
209 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 194 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
210 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
211 (strspn(if_name, tipc_alphabet) != (if_len - 1)))
212 return 0; 195 return 0;
213 196
214 /* return bearer name components, if necessary */ 197 /* return bearer name components, if necessary */
diff --git a/net/tipc/config.c b/net/tipc/config.c
index a056a3852f71..f67866c765dd 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -208,36 +208,6 @@ static struct sk_buff *cfg_set_remote_mng(void)
208 return tipc_cfg_reply_none(); 208 return tipc_cfg_reply_none();
209} 209}
210 210
211static struct sk_buff *cfg_set_max_publications(void)
212{
213 u32 value;
214
215 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
216 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
217
218 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
219 if (value < 1 || value > 65535)
220 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
221 " (max publications must be 1-65535)");
222 tipc_max_publications = value;
223 return tipc_cfg_reply_none();
224}
225
226static struct sk_buff *cfg_set_max_subscriptions(void)
227{
228 u32 value;
229
230 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
231 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
232
233 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
234 if (value < 1 || value > 65535)
235 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
236 " (max subscriptions must be 1-65535");
237 tipc_max_subscriptions = value;
238 return tipc_cfg_reply_none();
239}
240
241static struct sk_buff *cfg_set_max_ports(void) 211static struct sk_buff *cfg_set_max_ports(void)
242{ 212{
243 u32 value; 213 u32 value;
@@ -357,12 +327,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
357 case TIPC_CMD_SET_MAX_PORTS: 327 case TIPC_CMD_SET_MAX_PORTS:
358 rep_tlv_buf = cfg_set_max_ports(); 328 rep_tlv_buf = cfg_set_max_ports();
359 break; 329 break;
360 case TIPC_CMD_SET_MAX_PUBL:
361 rep_tlv_buf = cfg_set_max_publications();
362 break;
363 case TIPC_CMD_SET_MAX_SUBSCR:
364 rep_tlv_buf = cfg_set_max_subscriptions();
365 break;
366 case TIPC_CMD_SET_NETID: 330 case TIPC_CMD_SET_NETID:
367 rep_tlv_buf = cfg_set_netid(); 331 rep_tlv_buf = cfg_set_netid();
368 break; 332 break;
@@ -372,12 +336,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
372 case TIPC_CMD_GET_MAX_PORTS: 336 case TIPC_CMD_GET_MAX_PORTS:
373 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports); 337 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
374 break; 338 break;
375 case TIPC_CMD_GET_MAX_PUBL:
376 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
377 break;
378 case TIPC_CMD_GET_MAX_SUBSCR:
379 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
380 break;
381 case TIPC_CMD_GET_NETID: 339 case TIPC_CMD_GET_NETID:
382 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); 340 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
383 break; 341 break;
@@ -393,6 +351,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
393 case TIPC_CMD_GET_MAX_CLUSTERS: 351 case TIPC_CMD_GET_MAX_CLUSTERS:
394 case TIPC_CMD_SET_MAX_NODES: 352 case TIPC_CMD_SET_MAX_NODES:
395 case TIPC_CMD_GET_MAX_NODES: 353 case TIPC_CMD_GET_MAX_NODES:
354 case TIPC_CMD_SET_MAX_SUBSCR:
355 case TIPC_CMD_GET_MAX_SUBSCR:
356 case TIPC_CMD_SET_MAX_PUBL:
357 case TIPC_CMD_GET_MAX_PUBL:
396 case TIPC_CMD_SET_LOG_SIZE: 358 case TIPC_CMD_SET_LOG_SIZE:
397 case TIPC_CMD_DUMP_LOG: 359 case TIPC_CMD_DUMP_LOG:
398 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 360 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 6586eac6a50e..bfe8af88469a 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -48,18 +48,13 @@
48 48
49 49
50/* global variables used by multiple sub-systems within TIPC */ 50/* global variables used by multiple sub-systems within TIPC */
51int tipc_random; 51int tipc_random __read_mostly;
52
53const char tipc_alphabet[] =
54 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
55 52
56/* configurable TIPC parameters */ 53/* configurable TIPC parameters */
57u32 tipc_own_addr; 54u32 tipc_own_addr __read_mostly;
58int tipc_max_ports; 55int tipc_max_ports __read_mostly;
59int tipc_max_subscriptions; 56int tipc_net_id __read_mostly;
60int tipc_max_publications; 57int tipc_remote_management __read_mostly;
61int tipc_net_id;
62int tipc_remote_management;
63 58
64 59
65/** 60/**
@@ -101,9 +96,8 @@ int tipc_core_start_net(unsigned long addr)
101{ 96{
102 int res; 97 int res;
103 98
104 res = tipc_net_start(addr); 99 tipc_net_start(addr);
105 if (!res) 100 res = tipc_eth_media_start();
106 res = tipc_eth_media_start();
107 if (res) 101 if (res)
108 tipc_core_stop_net(); 102 tipc_core_stop_net();
109 return res; 103 return res;
@@ -160,8 +154,6 @@ static int __init tipc_init(void)
160 154
161 tipc_own_addr = 0; 155 tipc_own_addr = 0;
162 tipc_remote_management = 1; 156 tipc_remote_management = 1;
163 tipc_max_publications = 10000;
164 tipc_max_subscriptions = 2000;
165 tipc_max_ports = CONFIG_TIPC_PORTS; 157 tipc_max_ports = CONFIG_TIPC_PORTS;
166 tipc_net_id = 4711; 158 tipc_net_id = 4711;
167 159
diff --git a/net/tipc/core.h b/net/tipc/core.h
index fd42e106c185..0207db04179a 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -60,7 +60,9 @@
60 60
61#define TIPC_MOD_VER "2.0.0" 61#define TIPC_MOD_VER "2.0.0"
62 62
63#define ULTRA_STRING_MAX_LEN 32768 63#define ULTRA_STRING_MAX_LEN 32768
64#define TIPC_MAX_SUBSCRIPTIONS 65535
65#define TIPC_MAX_PUBLICATIONS 65535
64 66
65struct tipc_msg; /* msg.h */ 67struct tipc_msg; /* msg.h */
66 68
@@ -74,19 +76,15 @@ int tipc_snprintf(char *buf, int len, const char *fmt, ...);
74/* 76/*
75 * Global configuration variables 77 * Global configuration variables
76 */ 78 */
77extern u32 tipc_own_addr; 79extern u32 tipc_own_addr __read_mostly;
78extern int tipc_max_ports; 80extern int tipc_max_ports __read_mostly;
79extern int tipc_max_subscriptions; 81extern int tipc_net_id __read_mostly;
80extern int tipc_max_publications; 82extern int tipc_remote_management __read_mostly;
81extern int tipc_net_id;
82extern int tipc_remote_management;
83 83
84/* 84/*
85 * Other global variables 85 * Other global variables
86 */ 86 */
87extern int tipc_random; 87extern int tipc_random __read_mostly;
88extern const char tipc_alphabet[];
89
90 88
91/* 89/*
92 * Routines available to privileged subsystems 90 * Routines available to privileged subsystems
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 90ac9bfa7abb..2132c1ef2951 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -46,19 +46,30 @@
46 * @bearer: ptr to associated "generic" bearer structure 46 * @bearer: ptr to associated "generic" bearer structure
47 * @dev: ptr to associated Ethernet network device 47 * @dev: ptr to associated Ethernet network device
48 * @tipc_packet_type: used in binding TIPC to Ethernet driver 48 * @tipc_packet_type: used in binding TIPC to Ethernet driver
49 * @setup: work item used when enabling bearer
49 * @cleanup: work item used when disabling bearer 50 * @cleanup: work item used when disabling bearer
50 */ 51 */
51struct eth_bearer { 52struct eth_bearer {
52 struct tipc_bearer *bearer; 53 struct tipc_bearer *bearer;
53 struct net_device *dev; 54 struct net_device *dev;
54 struct packet_type tipc_packet_type; 55 struct packet_type tipc_packet_type;
56 struct work_struct setup;
55 struct work_struct cleanup; 57 struct work_struct cleanup;
56}; 58};
57 59
58static struct tipc_media eth_media_info; 60static struct tipc_media eth_media_info;
59static struct eth_bearer eth_bearers[MAX_ETH_BEARERS]; 61static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
60static int eth_started; 62static int eth_started;
61static struct notifier_block notifier; 63
64static int recv_notification(struct notifier_block *nb, unsigned long evt,
65 void *dv);
66/*
67 * Network device notifier info
68 */
69static struct notifier_block notifier = {
70 .notifier_call = recv_notification,
71 .priority = 0
72};
62 73
63/** 74/**
64 * eth_media_addr_set - initialize Ethernet media address structure 75 * eth_media_addr_set - initialize Ethernet media address structure
@@ -134,6 +145,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
134} 145}
135 146
136/** 147/**
148 * setup_bearer - setup association between Ethernet bearer and interface
149 */
150static void setup_bearer(struct work_struct *work)
151{
152 struct eth_bearer *eb_ptr =
153 container_of(work, struct eth_bearer, setup);
154
155 dev_add_pack(&eb_ptr->tipc_packet_type);
156}
157
158/**
137 * enable_bearer - attach TIPC bearer to an Ethernet interface 159 * enable_bearer - attach TIPC bearer to an Ethernet interface
138 */ 160 */
139static int enable_bearer(struct tipc_bearer *tb_ptr) 161static int enable_bearer(struct tipc_bearer *tb_ptr)
@@ -173,7 +195,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
173 eb_ptr->tipc_packet_type.func = recv_msg; 195 eb_ptr->tipc_packet_type.func = recv_msg;
174 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr; 196 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
175 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list)); 197 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
176 dev_add_pack(&eb_ptr->tipc_packet_type); 198 INIT_WORK(&eb_ptr->setup, setup_bearer);
199 schedule_work(&eb_ptr->setup);
177 200
178 /* Associate TIPC bearer with Ethernet bearer */ 201 /* Associate TIPC bearer with Ethernet bearer */
179 eb_ptr->bearer = tb_ptr; 202 eb_ptr->bearer = tb_ptr;
@@ -357,8 +380,6 @@ int tipc_eth_media_start(void)
357 if (res) 380 if (res)
358 return res; 381 return res;
359 382
360 notifier.notifier_call = &recv_notification;
361 notifier.priority = 0;
362 res = register_netdevice_notifier(&notifier); 383 res = register_netdevice_notifier(&notifier);
363 if (!res) 384 if (!res)
364 eth_started = 1; 385 eth_started = 1;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 7a52d3922f3c..111ff8300ae5 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -45,7 +45,7 @@ struct queue_item {
45static struct kmem_cache *tipc_queue_item_cache; 45static struct kmem_cache *tipc_queue_item_cache;
46static struct list_head signal_queue_head; 46static struct list_head signal_queue_head;
47static DEFINE_SPINLOCK(qitem_lock); 47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled; 48static int handler_enabled __read_mostly;
49 49
50static void process_signal_queue(unsigned long dummy); 50static void process_signal_queue(unsigned long dummy);
51 51
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1c1e6151875e..a79c755cb417 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -210,9 +210,7 @@ static int link_name_validate(const char *name,
210 (z_local > 255) || (c_local > 4095) || (n_local > 4095) || 210 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
211 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) || 211 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
212 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 212 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
213 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) || 213 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME))
214 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
215 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
216 return 0; 214 return 0;
217 215
218 /* return link name components, if necessary */ 216 /* return link name components, if necessary */
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 360c478b0b53..46754779fd3d 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -41,7 +41,7 @@
41#include "subscr.h" 41#include "subscr.h"
42#include "port.h" 42#include "port.h"
43 43
44static int tipc_nametbl_size = 1024; /* must be a power of 2 */ 44#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
45 45
46/** 46/**
47 * struct name_info - name sequence publication info 47 * struct name_info - name sequence publication info
@@ -114,7 +114,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
114 114
115static int hash(int x) 115static int hash(int x)
116{ 116{
117 return x & (tipc_nametbl_size - 1); 117 return x & (TIPC_NAMETBL_SIZE - 1);
118} 118}
119 119
120/** 120/**
@@ -667,9 +667,9 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
667{ 667{
668 struct publication *publ; 668 struct publication *publ;
669 669
670 if (table.local_publ_count >= tipc_max_publications) { 670 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
671 pr_warn("Publication failed, local publication limit reached (%u)\n", 671 pr_warn("Publication failed, local publication limit reached (%u)\n",
672 tipc_max_publications); 672 TIPC_MAX_PUBLICATIONS);
673 return NULL; 673 return NULL;
674 } 674 }
675 675
@@ -783,7 +783,7 @@ static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
783 if (!list_is_last(&publ->zone_list, &info->zone_list)) 783 if (!list_is_last(&publ->zone_list, &info->zone_list))
784 ret += tipc_snprintf(buf + ret, len - ret, 784 ret += tipc_snprintf(buf + ret, len - ret,
785 "\n%33s", " "); 785 "\n%33s", " ");
786 }; 786 }
787 787
788 ret += tipc_snprintf(buf + ret, len - ret, "\n"); 788 ret += tipc_snprintf(buf + ret, len - ret, "\n");
789 return ret; 789 return ret;
@@ -871,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
871 ret += nametbl_header(buf, len, depth); 871 ret += nametbl_header(buf, len, depth);
872 lowbound = 0; 872 lowbound = 0;
873 upbound = ~0; 873 upbound = ~0;
874 for (i = 0; i < tipc_nametbl_size; i++) { 874 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
875 seq_head = &table.types[i]; 875 seq_head = &table.types[i];
876 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { 876 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
877 ret += nameseq_list(seq, buf + ret, len - ret, 877 ret += nameseq_list(seq, buf + ret, len - ret,
@@ -935,7 +935,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
935 935
936int tipc_nametbl_init(void) 936int tipc_nametbl_init(void)
937{ 937{
938 table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), 938 table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
939 GFP_ATOMIC); 939 GFP_ATOMIC);
940 if (!table.types) 940 if (!table.types)
941 return -ENOMEM; 941 return -ENOMEM;
@@ -953,7 +953,7 @@ void tipc_nametbl_stop(void)
953 953
954 /* Verify name table is empty, then release it */ 954 /* Verify name table is empty, then release it */
955 write_lock_bh(&tipc_nametbl_lock); 955 write_lock_bh(&tipc_nametbl_lock);
956 for (i = 0; i < tipc_nametbl_size; i++) { 956 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
957 if (hlist_empty(&table.types[i])) 957 if (hlist_empty(&table.types[i]))
958 continue; 958 continue;
959 pr_err("nametbl_stop(): orphaned hash chain detected\n"); 959 pr_err("nametbl_stop(): orphaned hash chain detected\n");
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 5b5cea259caf..7d305ecc09c2 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -171,7 +171,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
171 tipc_link_send(buf, dnode, msg_link_selector(msg)); 171 tipc_link_send(buf, dnode, msg_link_selector(msg));
172} 172}
173 173
174int tipc_net_start(u32 addr) 174void tipc_net_start(u32 addr)
175{ 175{
176 char addr_string[16]; 176 char addr_string[16];
177 177
@@ -187,7 +187,6 @@ int tipc_net_start(u32 addr)
187 pr_info("Started in network mode\n"); 187 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 188 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
190 return 0;
191} 190}
192 191
193void tipc_net_stop(void) 192void tipc_net_stop(void)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 9eb4b9e220eb..079daadb3f72 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -41,7 +41,7 @@ extern rwlock_t tipc_net_lock;
41 41
42void tipc_net_route_msg(struct sk_buff *buf); 42void tipc_net_route_msg(struct sk_buff *buf);
43 43
44int tipc_net_start(u32 addr); 44void tipc_net_start(u32 addr);
45void tipc_net_stop(void); 45void tipc_net_stop(void);
46 46
47#endif 47#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 47a839df27dc..6675914dc592 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
62 rep_nlh = nlmsg_hdr(rep_buf); 62 rep_nlh = nlmsg_hdr(rep_buf);
63 memcpy(rep_nlh, req_nlh, hdr_space); 63 memcpy(rep_nlh, req_nlh, hdr_space);
64 rep_nlh->nlmsg_len = rep_buf->len; 64 rep_nlh->nlmsg_len = rep_buf->len;
65 genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid); 65 genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
66 } 66 }
67 67
68 return 0; 68 return 0;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 5ed5965eb0be..0f7d0d007e22 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -304,9 +304,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
304 } 304 }
305 305
306 /* Refuse subscription if global limit exceeded */ 306 /* Refuse subscription if global limit exceeded */
307 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 307 if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
308 pr_warn("Subscription rejected, limit reached (%u)\n", 308 pr_warn("Subscription rejected, limit reached (%u)\n",
309 tipc_max_subscriptions); 309 TIPC_MAX_SUBSCRIPTIONS);
310 subscr_terminate(subscriber); 310 subscr_terminate(subscriber);
311 return NULL; 311 return NULL;
312 } 312 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c5ee4ff61364..5b5c876c80e9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -441,7 +441,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
441 /* ---- Socket is dead now and most probably destroyed ---- */ 441 /* ---- Socket is dead now and most probably destroyed ---- */
442 442
443 /* 443 /*
444 * Fixme: BSD difference: In BSD all sockets connected to use get 444 * Fixme: BSD difference: In BSD all sockets connected to us get
445 * ECONNRESET and we die on the spot. In Linux we behave 445 * ECONNRESET and we die on the spot. In Linux we behave
446 * like files and pipes do and wait for the last 446 * like files and pipes do and wait for the last
447 * dereference. 447 * dereference.
@@ -481,7 +481,6 @@ static int unix_listen(struct socket *sock, int backlog)
481 struct sock *sk = sock->sk; 481 struct sock *sk = sock->sk;
482 struct unix_sock *u = unix_sk(sk); 482 struct unix_sock *u = unix_sk(sk);
483 struct pid *old_pid = NULL; 483 struct pid *old_pid = NULL;
484 const struct cred *old_cred = NULL;
485 484
486 err = -EOPNOTSUPP; 485 err = -EOPNOTSUPP;
487 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 486 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -503,8 +502,6 @@ static int unix_listen(struct socket *sock, int backlog)
503out_unlock: 502out_unlock:
504 unix_state_unlock(sk); 503 unix_state_unlock(sk);
505 put_pid(old_pid); 504 put_pid(old_pid);
506 if (old_cred)
507 put_cred(old_cred);
508out: 505out:
509 return err; 506 return err;
510} 507}
@@ -2060,10 +2057,14 @@ static int unix_shutdown(struct socket *sock, int mode)
2060 struct sock *sk = sock->sk; 2057 struct sock *sk = sock->sk;
2061 struct sock *other; 2058 struct sock *other;
2062 2059
2063 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); 2060 if (mode < SHUT_RD || mode > SHUT_RDWR)
2064 2061 return -EINVAL;
2065 if (!mode) 2062 /* This maps:
2066 return 0; 2063 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2064 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2065 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2066 */
2067 ++mode;
2067 2068
2068 unix_state_lock(sk); 2069 unix_state_lock(sk);
2069 sk->sk_shutdown |= mode; 2070 sk->sk_shutdown |= mode;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 750b13408449..06748f108a57 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -110,12 +110,12 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
110} 110}
111 111
112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
113 u32 pid, u32 seq, u32 flags, int sk_ino) 113 u32 portid, u32 seq, u32 flags, int sk_ino)
114{ 114{
115 struct nlmsghdr *nlh; 115 struct nlmsghdr *nlh;
116 struct unix_diag_msg *rep; 116 struct unix_diag_msg *rep;
117 117
118 nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), 118 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
119 flags); 119 flags);
120 if (!nlh) 120 if (!nlh)
121 return -EMSGSIZE; 121 return -EMSGSIZE;
@@ -159,7 +159,7 @@ out_nlmsg_trim:
159} 159}
160 160
161static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 161static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
162 u32 pid, u32 seq, u32 flags) 162 u32 portid, u32 seq, u32 flags)
163{ 163{
164 int sk_ino; 164 int sk_ino;
165 165
@@ -170,7 +170,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
170 if (!sk_ino) 170 if (!sk_ino)
171 return 0; 171 return 0;
172 172
173 return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino); 173 return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
174} 174}
175 175
176static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 176static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -200,7 +200,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
200 if (!(req->udiag_states & (1 << sk->sk_state))) 200 if (!(req->udiag_states & (1 << sk->sk_state)))
201 goto next; 201 goto next;
202 if (sk_diag_dump(sk, skb, req, 202 if (sk_diag_dump(sk, skb, req,
203 NETLINK_CB(cb->skb).pid, 203 NETLINK_CB(cb->skb).portid,
204 cb->nlh->nlmsg_seq, 204 cb->nlh->nlmsg_seq,
205 NLM_F_MULTI) < 0) 205 NLM_F_MULTI) < 0)
206 goto done; 206 goto done;
@@ -267,7 +267,7 @@ again:
267 if (!rep) 267 if (!rep)
268 goto out; 268 goto out;
269 269
270 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, 270 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
271 nlh->nlmsg_seq, 0, req->udiag_ino); 271 nlh->nlmsg_seq, 0, req->udiag_ino);
272 if (err < 0) { 272 if (err < 0) {
273 nlmsg_free(rep); 273 nlmsg_free(rep);
@@ -277,7 +277,7 @@ again:
277 277
278 goto again; 278 goto again;
279 } 279 }
280 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid, 280 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
281 MSG_DONTWAIT); 281 MSG_DONTWAIT);
282 if (err > 0) 282 if (err > 0)
283 err = 0; 283 err = 0;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d355f67d0cdd..2f876b9ee344 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
105 105
106 ASSERT_WDEV_LOCK(wdev); 106 ASSERT_WDEV_LOCK(wdev);
107 107
108 if (!netif_running(wdev->netdev)) 108 if (wdev->netdev && !netif_running(wdev->netdev))
109 return; 109 return;
110 110
111 switch (wdev->iftype) { 111 switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
143 case NL80211_IFTYPE_WDS: 143 case NL80211_IFTYPE_WDS:
144 /* these interface types don't really have a channel */ 144 /* these interface types don't really have a channel */
145 return; 145 return;
146 case NL80211_IFTYPE_P2P_DEVICE:
147 if (wdev->wiphy->features &
148 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
149 *chanmode = CHAN_MODE_EXCLUSIVE;
150 return;
146 case NL80211_IFTYPE_UNSPECIFIED: 151 case NL80211_IFTYPE_UNSPECIFIED:
147 case NUM_NL80211_IFTYPES: 152 case NUM_NL80211_IFTYPES:
148 WARN_ON(1); 153 WARN_ON(1);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index dcd64d5b07aa..443d4d7deea2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
230 rtnl_lock(); 230 rtnl_lock();
231 mutex_lock(&rdev->devlist_mtx); 231 mutex_lock(&rdev->devlist_mtx);
232 232
233 list_for_each_entry(wdev, &rdev->wdev_list, list) 233 list_for_each_entry(wdev, &rdev->wdev_list, list) {
234 if (wdev->netdev) 234 if (wdev->netdev) {
235 dev_close(wdev->netdev); 235 dev_close(wdev->netdev);
236 continue;
237 }
238 /* otherwise, check iftype */
239 switch (wdev->iftype) {
240 case NL80211_IFTYPE_P2P_DEVICE:
241 if (!wdev->p2p_started)
242 break;
243 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
244 wdev->p2p_started = false;
245 rdev->opencount--;
246 break;
247 default:
248 break;
249 }
250 }
236 251
237 mutex_unlock(&rdev->devlist_mtx); 252 mutex_unlock(&rdev->devlist_mtx);
238 rtnl_unlock(); 253 rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
407 if (WARN_ON(wiphy->software_iftypes & types)) 422 if (WARN_ON(wiphy->software_iftypes & types))
408 return -EINVAL; 423 return -EINVAL;
409 424
425 /* Only a single P2P_DEVICE can be allowed */
426 if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
427 c->limits[j].max > 1))
428 return -EINVAL;
429
410 cnt += c->limits[j].max; 430 cnt += c->limits[j].max;
411 /* 431 /*
412 * Don't advertise an unsupported type 432 * Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
734 dev_put(wdev->netdev); 754 dev_put(wdev->netdev);
735} 755}
736 756
757void cfg80211_unregister_wdev(struct wireless_dev *wdev)
758{
759 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
760
761 ASSERT_RTNL();
762
763 if (WARN_ON(wdev->netdev))
764 return;
765
766 mutex_lock(&rdev->devlist_mtx);
767 list_del_rcu(&wdev->list);
768 rdev->devlist_generation++;
769
770 switch (wdev->iftype) {
771 case NL80211_IFTYPE_P2P_DEVICE:
772 if (!wdev->p2p_started)
773 break;
774 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
775 wdev->p2p_started = false;
776 rdev->opencount--;
777 break;
778 default:
779 WARN_ON_ONCE(1);
780 break;
781 }
782 mutex_unlock(&rdev->devlist_mtx);
783}
784EXPORT_SYMBOL(cfg80211_unregister_wdev);
785
737static struct device_type wiphy_type = { 786static struct device_type wiphy_type = {
738 .name = "wlan", 787 .name = "wlan",
739}; 788};
diff --git a/net/wireless/core.h b/net/wireless/core.h
index bc7430b54771..a343be4a52bd 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -55,7 +55,7 @@ struct cfg80211_registered_device {
55 int opencount; /* also protected by devlist_mtx */ 55 int opencount; /* also protected by devlist_mtx */
56 wait_queue_head_t dev_wait; 56 wait_queue_head_t dev_wait;
57 57
58 u32 ap_beacons_nlpid; 58 u32 ap_beacons_nlportid;
59 59
60 /* protected by RTNL only */ 60 /* protected by RTNL only */
61 int num_running_ifaces; 61 int num_running_ifaces;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1cdb1d5e6b0f..8016fee0752b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -612,10 +612,21 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
612} 612}
613EXPORT_SYMBOL(cfg80211_del_sta); 613EXPORT_SYMBOL(cfg80211_del_sta);
614 614
615void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
616 enum nl80211_connect_failed_reason reason,
617 gfp_t gfp)
618{
619 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
620 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
621
622 nl80211_send_conn_failed_event(rdev, dev, mac_addr, reason, gfp);
623}
624EXPORT_SYMBOL(cfg80211_conn_failed);
625
615struct cfg80211_mgmt_registration { 626struct cfg80211_mgmt_registration {
616 struct list_head list; 627 struct list_head list;
617 628
618 u32 nlpid; 629 u32 nlportid;
619 630
620 int match_len; 631 int match_len;
621 632
@@ -624,7 +635,7 @@ struct cfg80211_mgmt_registration {
624 u8 match[]; 635 u8 match[];
625}; 636};
626 637
627int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, 638int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
628 u16 frame_type, const u8 *match_data, 639 u16 frame_type, const u8 *match_data,
629 int match_len) 640 int match_len)
630{ 641{
@@ -672,7 +683,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
672 683
673 memcpy(nreg->match, match_data, match_len); 684 memcpy(nreg->match, match_data, match_len);
674 nreg->match_len = match_len; 685 nreg->match_len = match_len;
675 nreg->nlpid = snd_pid; 686 nreg->nlportid = snd_portid;
676 nreg->frame_type = cpu_to_le16(frame_type); 687 nreg->frame_type = cpu_to_le16(frame_type);
677 list_add(&nreg->list, &wdev->mgmt_registrations); 688 list_add(&nreg->list, &wdev->mgmt_registrations);
678 689
@@ -685,7 +696,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
685 return err; 696 return err;
686} 697}
687 698
688void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid) 699void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
689{ 700{
690 struct wiphy *wiphy = wdev->wiphy; 701 struct wiphy *wiphy = wdev->wiphy;
691 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 702 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -694,7 +705,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
694 spin_lock_bh(&wdev->mgmt_registrations_lock); 705 spin_lock_bh(&wdev->mgmt_registrations_lock);
695 706
696 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { 707 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
697 if (reg->nlpid != nlpid) 708 if (reg->nlportid != nlportid)
698 continue; 709 continue;
699 710
700 if (rdev->ops->mgmt_frame_register) { 711 if (rdev->ops->mgmt_frame_register) {
@@ -710,8 +721,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
710 721
711 spin_unlock_bh(&wdev->mgmt_registrations_lock); 722 spin_unlock_bh(&wdev->mgmt_registrations_lock);
712 723
713 if (nlpid == wdev->ap_unexpected_nlpid) 724 if (nlportid == wdev->ap_unexpected_nlportid)
714 wdev->ap_unexpected_nlpid = 0; 725 wdev->ap_unexpected_nlportid = 0;
715} 726}
716 727
717void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) 728void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -736,7 +747,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
736 const u8 *buf, size_t len, bool no_cck, 747 const u8 *buf, size_t len, bool no_cck,
737 bool dont_wait_for_ack, u64 *cookie) 748 bool dont_wait_for_ack, u64 *cookie)
738{ 749{
739 struct net_device *dev = wdev->netdev;
740 const struct ieee80211_mgmt *mgmt; 750 const struct ieee80211_mgmt *mgmt;
741 u16 stype; 751 u16 stype;
742 752
@@ -796,7 +806,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
796 case NL80211_IFTYPE_AP: 806 case NL80211_IFTYPE_AP:
797 case NL80211_IFTYPE_P2P_GO: 807 case NL80211_IFTYPE_P2P_GO:
798 case NL80211_IFTYPE_AP_VLAN: 808 case NL80211_IFTYPE_AP_VLAN:
799 if (!ether_addr_equal(mgmt->bssid, dev->dev_addr)) 809 if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
800 err = -EINVAL; 810 err = -EINVAL;
801 break; 811 break;
802 case NL80211_IFTYPE_MESH_POINT: 812 case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +819,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
809 * cfg80211 doesn't track the stations 819 * cfg80211 doesn't track the stations
810 */ 820 */
811 break; 821 break;
822 case NL80211_IFTYPE_P2P_DEVICE:
823 /*
824 * fall through, P2P device only supports
825 * public action frames
826 */
812 default: 827 default:
813 err = -EOPNOTSUPP; 828 err = -EOPNOTSUPP;
814 break; 829 break;
@@ -819,7 +834,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
819 return err; 834 return err;
820 } 835 }
821 836
822 if (!ether_addr_equal(mgmt->sa, dev->dev_addr)) 837 if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
823 return -EINVAL; 838 return -EINVAL;
824 839
825 /* Transmit the Action frame as requested by user space */ 840 /* Transmit the Action frame as requested by user space */
@@ -868,7 +883,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
868 /* found match! */ 883 /* found match! */
869 884
870 /* Indicate the received Action frame to user space */ 885 /* Indicate the received Action frame to user space */
871 if (nl80211_send_mgmt(rdev, wdev, reg->nlpid, 886 if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
872 freq, sig_mbm, 887 freq, sig_mbm,
873 buf, len, gfp)) 888 buf, len, gfp))
874 continue; 889 continue;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1e37dbf00cb3..0418a6d5c1a6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -496,11 +496,11 @@ static bool is_valid_ie_attr(const struct nlattr *attr)
496} 496}
497 497
498/* message building helper */ 498/* message building helper */
499static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, 499static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
500 int flags, u8 cmd) 500 int flags, u8 cmd)
501{ 501{
502 /* since there is no private header just add the generic one */ 502 /* since there is no private header just add the generic one */
503 return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); 503 return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
504} 504}
505 505
506static int nl80211_msg_put_channel(struct sk_buff *msg, 506static int nl80211_msg_put_channel(struct sk_buff *msg,
@@ -851,7 +851,7 @@ nla_put_failure:
851 return -ENOBUFS; 851 return -ENOBUFS;
852} 852}
853 853
854static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, 854static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
855 struct cfg80211_registered_device *dev) 855 struct cfg80211_registered_device *dev)
856{ 856{
857 void *hdr; 857 void *hdr;
@@ -866,7 +866,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
866 const struct ieee80211_txrx_stypes *mgmt_stypes = 866 const struct ieee80211_txrx_stypes *mgmt_stypes =
867 dev->wiphy.mgmt_stypes; 867 dev->wiphy.mgmt_stypes;
868 868
869 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 869 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
870 if (!hdr) 870 if (!hdr)
871 return -1; 871 return -1;
872 872
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1100 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1100 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1101 goto nla_put_failure; 1101 goto nla_put_failure;
1102 } 1102 }
1103 CMD(start_p2p_device, START_P2P_DEVICE);
1103 1104
1104#ifdef CONFIG_NL80211_TESTMODE 1105#ifdef CONFIG_NL80211_TESTMODE
1105 CMD(testmode_cmd, TESTMODE); 1106 CMD(testmode_cmd, TESTMODE);
@@ -1266,7 +1267,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1266 continue; 1267 continue;
1267 if (++idx <= start) 1268 if (++idx <= start)
1268 continue; 1269 continue;
1269 if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, 1270 if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
1270 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1271 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1271 dev) < 0) { 1272 dev) < 0) {
1272 idx--; 1273 idx--;
@@ -1289,7 +1290,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1289 if (!msg) 1290 if (!msg)
1290 return -ENOMEM; 1291 return -ENOMEM;
1291 1292
1292 if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) { 1293 if (nl80211_send_wiphy(msg, info->snd_portid, info->snd_seq, 0, dev) < 0) {
1293 nlmsg_free(msg); 1294 nlmsg_free(msg);
1294 return -ENOBUFS; 1295 return -ENOBUFS;
1295 } 1296 }
@@ -1735,26 +1736,26 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
1735 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); 1736 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
1736} 1737}
1737 1738
1738static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, 1739static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
1739 struct cfg80211_registered_device *rdev, 1740 struct cfg80211_registered_device *rdev,
1740 struct wireless_dev *wdev) 1741 struct wireless_dev *wdev)
1741{ 1742{
1742 struct net_device *dev = wdev->netdev; 1743 struct net_device *dev = wdev->netdev;
1743 void *hdr; 1744 void *hdr;
1744 1745
1745 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); 1746 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_INTERFACE);
1746 if (!hdr) 1747 if (!hdr)
1747 return -1; 1748 return -1;
1748 1749
1749 if (dev && 1750 if (dev &&
1750 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 1751 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1751 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) || 1752 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
1752 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
1753 goto nla_put_failure; 1753 goto nla_put_failure;
1754 1754
1755 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 1755 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1756 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || 1756 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
1757 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || 1757 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
1758 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
1758 nla_put_u32(msg, NL80211_ATTR_GENERATION, 1759 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1759 rdev->devlist_generation ^ 1760 rdev->devlist_generation ^
1760 (cfg80211_rdev_list_generation << 2))) 1761 (cfg80211_rdev_list_generation << 2)))
@@ -1806,7 +1807,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
1806 if_idx++; 1807 if_idx++;
1807 continue; 1808 continue;
1808 } 1809 }
1809 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, 1810 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
1810 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1811 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1811 rdev, wdev) < 0) { 1812 rdev, wdev) < 0) {
1812 mutex_unlock(&rdev->devlist_mtx); 1813 mutex_unlock(&rdev->devlist_mtx);
@@ -1837,7 +1838,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
1837 if (!msg) 1838 if (!msg)
1838 return -ENOMEM; 1839 return -ENOMEM;
1839 1840
1840 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, 1841 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
1841 dev, wdev) < 0) { 1842 dev, wdev) < 0) {
1842 nlmsg_free(msg); 1843 nlmsg_free(msg);
1843 return -ENOBUFS; 1844 return -ENOBUFS;
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2021 return PTR_ERR(wdev); 2022 return PTR_ERR(wdev);
2022 } 2023 }
2023 2024
2024 if (type == NL80211_IFTYPE_MESH_POINT && 2025 switch (type) {
2025 info->attrs[NL80211_ATTR_MESH_ID]) { 2026 case NL80211_IFTYPE_MESH_POINT:
2027 if (!info->attrs[NL80211_ATTR_MESH_ID])
2028 break;
2026 wdev_lock(wdev); 2029 wdev_lock(wdev);
2027 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != 2030 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
2028 IEEE80211_MAX_MESH_ID_LEN); 2031 IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,9 +2034,29 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2031 memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), 2034 memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
2032 wdev->mesh_id_up_len); 2035 wdev->mesh_id_up_len);
2033 wdev_unlock(wdev); 2036 wdev_unlock(wdev);
2037 break;
2038 case NL80211_IFTYPE_P2P_DEVICE:
2039 /*
2040 * P2P Device doesn't have a netdev, so doesn't go
2041 * through the netdev notifier and must be added here
2042 */
2043 mutex_init(&wdev->mtx);
2044 INIT_LIST_HEAD(&wdev->event_list);
2045 spin_lock_init(&wdev->event_lock);
2046 INIT_LIST_HEAD(&wdev->mgmt_registrations);
2047 spin_lock_init(&wdev->mgmt_registrations_lock);
2048
2049 mutex_lock(&rdev->devlist_mtx);
2050 wdev->identifier = ++rdev->wdev_id;
2051 list_add_rcu(&wdev->list, &rdev->wdev_list);
2052 rdev->devlist_generation++;
2053 mutex_unlock(&rdev->devlist_mtx);
2054 break;
2055 default:
2056 break;
2034 } 2057 }
2035 2058
2036 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, 2059 if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
2037 rdev, wdev) < 0) { 2060 rdev, wdev) < 0) {
2038 nlmsg_free(msg); 2061 nlmsg_free(msg);
2039 return -ENOBUFS; 2062 return -ENOBUFS;
@@ -2168,7 +2191,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2168 if (!msg) 2191 if (!msg)
2169 return -ENOMEM; 2192 return -ENOMEM;
2170 2193
2171 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 2194 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2172 NL80211_CMD_NEW_KEY); 2195 NL80211_CMD_NEW_KEY);
2173 if (IS_ERR(hdr)) 2196 if (IS_ERR(hdr))
2174 return PTR_ERR(hdr); 2197 return PTR_ERR(hdr);
@@ -2746,7 +2769,7 @@ nla_put_failure:
2746 return false; 2769 return false;
2747} 2770}
2748 2771
2749static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 2772static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
2750 int flags, 2773 int flags,
2751 struct cfg80211_registered_device *rdev, 2774 struct cfg80211_registered_device *rdev,
2752 struct net_device *dev, 2775 struct net_device *dev,
@@ -2755,7 +2778,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2755 void *hdr; 2778 void *hdr;
2756 struct nlattr *sinfoattr, *bss_param; 2779 struct nlattr *sinfoattr, *bss_param;
2757 2780
2758 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 2781 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
2759 if (!hdr) 2782 if (!hdr)
2760 return -1; 2783 return -1;
2761 2784
@@ -2908,7 +2931,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
2908 goto out_err; 2931 goto out_err;
2909 2932
2910 if (nl80211_send_station(skb, 2933 if (nl80211_send_station(skb,
2911 NETLINK_CB(cb->skb).pid, 2934 NETLINK_CB(cb->skb).portid,
2912 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2935 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2913 dev, netdev, mac_addr, 2936 dev, netdev, mac_addr,
2914 &sinfo) < 0) 2937 &sinfo) < 0)
@@ -2954,7 +2977,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
2954 if (!msg) 2977 if (!msg)
2955 return -ENOMEM; 2978 return -ENOMEM;
2956 2979
2957 if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, 2980 if (nl80211_send_station(msg, info->snd_portid, info->snd_seq, 0,
2958 rdev, dev, mac_addr, &sinfo) < 0) { 2981 rdev, dev, mac_addr, &sinfo) < 0) {
2959 nlmsg_free(msg); 2982 nlmsg_free(msg);
2960 return -ENOBUFS; 2983 return -ENOBUFS;
@@ -3280,7 +3303,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
3280 return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); 3303 return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr);
3281} 3304}
3282 3305
3283static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, 3306static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
3284 int flags, struct net_device *dev, 3307 int flags, struct net_device *dev,
3285 u8 *dst, u8 *next_hop, 3308 u8 *dst, u8 *next_hop,
3286 struct mpath_info *pinfo) 3309 struct mpath_info *pinfo)
@@ -3288,7 +3311,7 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
3288 void *hdr; 3311 void *hdr;
3289 struct nlattr *pinfoattr; 3312 struct nlattr *pinfoattr;
3290 3313
3291 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 3314 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
3292 if (!hdr) 3315 if (!hdr)
3293 return -1; 3316 return -1;
3294 3317
@@ -3366,7 +3389,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
3366 if (err) 3389 if (err)
3367 goto out_err; 3390 goto out_err;
3368 3391
3369 if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, 3392 if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
3370 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3393 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3371 netdev, dst, next_hop, 3394 netdev, dst, next_hop,
3372 &pinfo) < 0) 3395 &pinfo) < 0)
@@ -3415,7 +3438,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
3415 if (!msg) 3438 if (!msg)
3416 return -ENOMEM; 3439 return -ENOMEM;
3417 3440
3418 if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0, 3441 if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0,
3419 dev, dst, next_hop, &pinfo) < 0) { 3442 dev, dst, next_hop, &pinfo) < 0) {
3420 nlmsg_free(msg); 3443 nlmsg_free(msg);
3421 return -ENOBUFS; 3444 return -ENOBUFS;
@@ -3656,7 +3679,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3656 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3679 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3657 if (!msg) 3680 if (!msg)
3658 return -ENOMEM; 3681 return -ENOMEM;
3659 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 3682 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
3660 NL80211_CMD_GET_MESH_CONFIG); 3683 NL80211_CMD_GET_MESH_CONFIG);
3661 if (!hdr) 3684 if (!hdr)
3662 goto out; 3685 goto out;
@@ -3975,7 +3998,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3975 goto out; 3998 goto out;
3976 } 3999 }
3977 4000
3978 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 4001 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
3979 NL80211_CMD_GET_REG); 4002 NL80211_CMD_GET_REG);
3980 if (!hdr) 4003 if (!hdr)
3981 goto put_failure; 4004 goto put_failure;
@@ -4593,7 +4616,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4593 4616
4594 ASSERT_WDEV_LOCK(wdev); 4617 ASSERT_WDEV_LOCK(wdev);
4595 4618
4596 hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).pid, seq, flags, 4619 hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
4597 NL80211_CMD_NEW_SCAN_RESULTS); 4620 NL80211_CMD_NEW_SCAN_RESULTS);
4598 if (!hdr) 4621 if (!hdr)
4599 return -1; 4622 return -1;
@@ -4712,14 +4735,14 @@ static int nl80211_dump_scan(struct sk_buff *skb,
4712 return skb->len; 4735 return skb->len;
4713} 4736}
4714 4737
4715static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, 4738static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
4716 int flags, struct net_device *dev, 4739 int flags, struct net_device *dev,
4717 struct survey_info *survey) 4740 struct survey_info *survey)
4718{ 4741{
4719 void *hdr; 4742 void *hdr;
4720 struct nlattr *infoattr; 4743 struct nlattr *infoattr;
4721 4744
4722 hdr = nl80211hdr_put(msg, pid, seq, flags, 4745 hdr = nl80211hdr_put(msg, portid, seq, flags,
4723 NL80211_CMD_NEW_SURVEY_RESULTS); 4746 NL80211_CMD_NEW_SURVEY_RESULTS);
4724 if (!hdr) 4747 if (!hdr)
4725 return -ENOMEM; 4748 return -ENOMEM;
@@ -4813,7 +4836,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
4813 } 4836 }
4814 4837
4815 if (nl80211_send_survey(skb, 4838 if (nl80211_send_survey(skb,
4816 NETLINK_CB(cb->skb).pid, 4839 NETLINK_CB(cb->skb).portid,
4817 cb->nlh->nlmsg_seq, NLM_F_MULTI, 4840 cb->nlh->nlmsg_seq, NLM_F_MULTI,
4818 netdev, 4841 netdev,
4819 &survey) < 0) 4842 &survey) < 0)
@@ -5428,7 +5451,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
5428 } 5451 }
5429 5452
5430 while (1) { 5453 while (1) {
5431 void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid, 5454 void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
5432 cb->nlh->nlmsg_seq, NLM_F_MULTI, 5455 cb->nlh->nlmsg_seq, NLM_F_MULTI,
5433 NL80211_CMD_TESTMODE); 5456 NL80211_CMD_TESTMODE);
5434 struct nlattr *tmdata; 5457 struct nlattr *tmdata;
@@ -5468,7 +5491,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
5468 5491
5469static struct sk_buff * 5492static struct sk_buff *
5470__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, 5493__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
5471 int approxlen, u32 pid, u32 seq, gfp_t gfp) 5494 int approxlen, u32 portid, u32 seq, gfp_t gfp)
5472{ 5495{
5473 struct sk_buff *skb; 5496 struct sk_buff *skb;
5474 void *hdr; 5497 void *hdr;
@@ -5478,7 +5501,7 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
5478 if (!skb) 5501 if (!skb)
5479 return NULL; 5502 return NULL;
5480 5503
5481 hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE); 5504 hdr = nl80211hdr_put(skb, portid, seq, 0, NL80211_CMD_TESTMODE);
5482 if (!hdr) { 5505 if (!hdr) {
5483 kfree_skb(skb); 5506 kfree_skb(skb);
5484 return NULL; 5507 return NULL;
@@ -5508,7 +5531,7 @@ struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
5508 return NULL; 5531 return NULL;
5509 5532
5510 return __cfg80211_testmode_alloc_skb(rdev, approxlen, 5533 return __cfg80211_testmode_alloc_skb(rdev, approxlen,
5511 rdev->testmode_info->snd_pid, 5534 rdev->testmode_info->snd_portid,
5512 rdev->testmode_info->snd_seq, 5535 rdev->testmode_info->snd_seq,
5513 GFP_KERNEL); 5536 GFP_KERNEL);
5514} 5537}
@@ -5846,7 +5869,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5846 if (!msg) 5869 if (!msg)
5847 return -ENOMEM; 5870 return -ENOMEM;
5848 5871
5849 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 5872 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
5850 NL80211_CMD_REMAIN_ON_CHANNEL); 5873 NL80211_CMD_REMAIN_ON_CHANNEL);
5851 5874
5852 if (IS_ERR(hdr)) { 5875 if (IS_ERR(hdr)) {
@@ -6055,6 +6078,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
6055 case NL80211_IFTYPE_AP_VLAN: 6078 case NL80211_IFTYPE_AP_VLAN:
6056 case NL80211_IFTYPE_MESH_POINT: 6079 case NL80211_IFTYPE_MESH_POINT:
6057 case NL80211_IFTYPE_P2P_GO: 6080 case NL80211_IFTYPE_P2P_GO:
6081 case NL80211_IFTYPE_P2P_DEVICE:
6058 break; 6082 break;
6059 default: 6083 default:
6060 return -EOPNOTSUPP; 6084 return -EOPNOTSUPP;
@@ -6064,7 +6088,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
6064 if (!rdev->ops->mgmt_tx) 6088 if (!rdev->ops->mgmt_tx)
6065 return -EOPNOTSUPP; 6089 return -EOPNOTSUPP;
6066 6090
6067 return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type, 6091 return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type,
6068 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), 6092 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
6069 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); 6093 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
6070} 6094}
@@ -6101,6 +6125,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6101 case NL80211_IFTYPE_AP_VLAN: 6125 case NL80211_IFTYPE_AP_VLAN:
6102 case NL80211_IFTYPE_MESH_POINT: 6126 case NL80211_IFTYPE_MESH_POINT:
6103 case NL80211_IFTYPE_P2P_GO: 6127 case NL80211_IFTYPE_P2P_GO:
6128 case NL80211_IFTYPE_P2P_DEVICE:
6104 break; 6129 break;
6105 default: 6130 default:
6106 return -EOPNOTSUPP; 6131 return -EOPNOTSUPP;
@@ -6144,7 +6169,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6144 if (!msg) 6169 if (!msg)
6145 return -ENOMEM; 6170 return -ENOMEM;
6146 6171
6147 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 6172 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6148 NL80211_CMD_FRAME); 6173 NL80211_CMD_FRAME);
6149 6174
6150 if (IS_ERR(hdr)) { 6175 if (IS_ERR(hdr)) {
@@ -6197,6 +6222,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
6197 case NL80211_IFTYPE_AP: 6222 case NL80211_IFTYPE_AP:
6198 case NL80211_IFTYPE_AP_VLAN: 6223 case NL80211_IFTYPE_AP_VLAN:
6199 case NL80211_IFTYPE_P2P_GO: 6224 case NL80211_IFTYPE_P2P_GO:
6225 case NL80211_IFTYPE_P2P_DEVICE:
6200 break; 6226 break;
6201 default: 6227 default:
6202 return -EOPNOTSUPP; 6228 return -EOPNOTSUPP;
@@ -6260,7 +6286,7 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
6260 if (!msg) 6286 if (!msg)
6261 return -ENOMEM; 6287 return -ENOMEM;
6262 6288
6263 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 6289 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6264 NL80211_CMD_GET_POWER_SAVE); 6290 NL80211_CMD_GET_POWER_SAVE);
6265 if (!hdr) { 6291 if (!hdr) {
6266 err = -ENOBUFS; 6292 err = -ENOBUFS;
@@ -6462,7 +6488,7 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6462 if (!msg) 6488 if (!msg)
6463 return -ENOMEM; 6489 return -ENOMEM;
6464 6490
6465 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 6491 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6466 NL80211_CMD_GET_WOWLAN); 6492 NL80211_CMD_GET_WOWLAN);
6467 if (!hdr) 6493 if (!hdr)
6468 goto nla_put_failure; 6494 goto nla_put_failure;
@@ -6736,10 +6762,10 @@ static int nl80211_register_unexpected_frame(struct sk_buff *skb,
6736 wdev->iftype != NL80211_IFTYPE_P2P_GO) 6762 wdev->iftype != NL80211_IFTYPE_P2P_GO)
6737 return -EINVAL; 6763 return -EINVAL;
6738 6764
6739 if (wdev->ap_unexpected_nlpid) 6765 if (wdev->ap_unexpected_nlportid)
6740 return -EBUSY; 6766 return -EBUSY;
6741 6767
6742 wdev->ap_unexpected_nlpid = info->snd_pid; 6768 wdev->ap_unexpected_nlportid = info->snd_portid;
6743 return 0; 6769 return 0;
6744} 6770}
6745 6771
@@ -6769,7 +6795,7 @@ static int nl80211_probe_client(struct sk_buff *skb,
6769 if (!msg) 6795 if (!msg)
6770 return -ENOMEM; 6796 return -ENOMEM;
6771 6797
6772 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 6798 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6773 NL80211_CMD_PROBE_CLIENT); 6799 NL80211_CMD_PROBE_CLIENT);
6774 6800
6775 if (IS_ERR(hdr)) { 6801 if (IS_ERR(hdr)) {
@@ -6804,10 +6830,72 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6804 if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) 6830 if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
6805 return -EOPNOTSUPP; 6831 return -EOPNOTSUPP;
6806 6832
6807 if (rdev->ap_beacons_nlpid) 6833 if (rdev->ap_beacons_nlportid)
6808 return -EBUSY; 6834 return -EBUSY;
6809 6835
6810 rdev->ap_beacons_nlpid = info->snd_pid; 6836 rdev->ap_beacons_nlportid = info->snd_portid;
6837
6838 return 0;
6839}
6840
6841static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
6842{
6843 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6844 struct wireless_dev *wdev = info->user_ptr[1];
6845 int err;
6846
6847 if (!rdev->ops->start_p2p_device)
6848 return -EOPNOTSUPP;
6849
6850 if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
6851 return -EOPNOTSUPP;
6852
6853 if (wdev->p2p_started)
6854 return 0;
6855
6856 mutex_lock(&rdev->devlist_mtx);
6857 err = cfg80211_can_add_interface(rdev, wdev->iftype);
6858 mutex_unlock(&rdev->devlist_mtx);
6859 if (err)
6860 return err;
6861
6862 err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
6863 if (err)
6864 return err;
6865
6866 wdev->p2p_started = true;
6867 mutex_lock(&rdev->devlist_mtx);
6868 rdev->opencount++;
6869 mutex_unlock(&rdev->devlist_mtx);
6870
6871 return 0;
6872}
6873
6874static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
6875{
6876 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6877 struct wireless_dev *wdev = info->user_ptr[1];
6878
6879 if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
6880 return -EOPNOTSUPP;
6881
6882 if (!rdev->ops->stop_p2p_device)
6883 return -EOPNOTSUPP;
6884
6885 if (!wdev->p2p_started)
6886 return 0;
6887
6888 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
6889 wdev->p2p_started = false;
6890
6891 mutex_lock(&rdev->devlist_mtx);
6892 rdev->opencount--;
6893 mutex_unlock(&rdev->devlist_mtx);
6894
6895 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
6896 rdev->scan_req->aborted = true;
6897 ___cfg80211_scan_done(rdev, true);
6898 }
6811 6899
6812 return 0; 6900 return 0;
6813} 6901}
@@ -6819,7 +6907,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6819#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ 6907#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
6820 NL80211_FLAG_CHECK_NETDEV_UP) 6908 NL80211_FLAG_CHECK_NETDEV_UP)
6821#define NL80211_FLAG_NEED_WDEV 0x10 6909#define NL80211_FLAG_NEED_WDEV 0x10
6822/* If a netdev is associated, it must be UP */ 6910/* If a netdev is associated, it must be UP, P2P must be started */
6823#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ 6911#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
6824 NL80211_FLAG_CHECK_NETDEV_UP) 6912 NL80211_FLAG_CHECK_NETDEV_UP)
6825 6913
@@ -6880,6 +6968,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
6880 } 6968 }
6881 6969
6882 dev_hold(dev); 6970 dev_hold(dev);
6971 } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
6972 if (!wdev->p2p_started) {
6973 mutex_unlock(&cfg80211_mutex);
6974 if (rtnl)
6975 rtnl_unlock();
6976 return -ENETDOWN;
6977 }
6883 } 6978 }
6884 6979
6885 cfg80211_lock_rdev(rdev); 6980 cfg80211_lock_rdev(rdev);
@@ -7441,7 +7536,22 @@ static struct genl_ops nl80211_ops[] = {
7441 .internal_flags = NL80211_FLAG_NEED_NETDEV | 7536 .internal_flags = NL80211_FLAG_NEED_NETDEV |
7442 NL80211_FLAG_NEED_RTNL, 7537 NL80211_FLAG_NEED_RTNL,
7443 }, 7538 },
7444 7539 {
7540 .cmd = NL80211_CMD_START_P2P_DEVICE,
7541 .doit = nl80211_start_p2p_device,
7542 .policy = nl80211_policy,
7543 .flags = GENL_ADMIN_PERM,
7544 .internal_flags = NL80211_FLAG_NEED_WDEV |
7545 NL80211_FLAG_NEED_RTNL,
7546 },
7547 {
7548 .cmd = NL80211_CMD_STOP_P2P_DEVICE,
7549 .doit = nl80211_stop_p2p_device,
7550 .policy = nl80211_policy,
7551 .flags = GENL_ADMIN_PERM,
7552 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
7553 NL80211_FLAG_NEED_RTNL,
7554 },
7445}; 7555};
7446 7556
7447static struct genl_multicast_group nl80211_mlme_mcgrp = { 7557static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7520,12 +7630,12 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
7520static int nl80211_send_scan_msg(struct sk_buff *msg, 7630static int nl80211_send_scan_msg(struct sk_buff *msg,
7521 struct cfg80211_registered_device *rdev, 7631 struct cfg80211_registered_device *rdev,
7522 struct wireless_dev *wdev, 7632 struct wireless_dev *wdev,
7523 u32 pid, u32 seq, int flags, 7633 u32 portid, u32 seq, int flags,
7524 u32 cmd) 7634 u32 cmd)
7525{ 7635{
7526 void *hdr; 7636 void *hdr;
7527 7637
7528 hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); 7638 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
7529 if (!hdr) 7639 if (!hdr)
7530 return -1; 7640 return -1;
7531 7641
@@ -7549,11 +7659,11 @@ static int
7549nl80211_send_sched_scan_msg(struct sk_buff *msg, 7659nl80211_send_sched_scan_msg(struct sk_buff *msg,
7550 struct cfg80211_registered_device *rdev, 7660 struct cfg80211_registered_device *rdev,
7551 struct net_device *netdev, 7661 struct net_device *netdev,
7552 u32 pid, u32 seq, int flags, u32 cmd) 7662 u32 portid, u32 seq, int flags, u32 cmd)
7553{ 7663{
7554 void *hdr; 7664 void *hdr;
7555 7665
7556 hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); 7666 hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
7557 if (!hdr) 7667 if (!hdr)
7558 return -1; 7668 return -1;
7559 7669
@@ -8254,6 +8364,40 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
8254 nlmsg_free(msg); 8364 nlmsg_free(msg);
8255} 8365}
8256 8366
8367void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
8368 struct net_device *dev, const u8 *mac_addr,
8369 enum nl80211_connect_failed_reason reason,
8370 gfp_t gfp)
8371{
8372 struct sk_buff *msg;
8373 void *hdr;
8374
8375 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
8376 if (!msg)
8377 return;
8378
8379 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED);
8380 if (!hdr) {
8381 nlmsg_free(msg);
8382 return;
8383 }
8384
8385 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
8386 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
8387 nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason))
8388 goto nla_put_failure;
8389
8390 genlmsg_end(msg, hdr);
8391
8392 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
8393 nl80211_mlme_mcgrp.id, gfp);
8394 return;
8395
8396 nla_put_failure:
8397 genlmsg_cancel(msg, hdr);
8398 nlmsg_free(msg);
8399}
8400
8257static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, 8401static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
8258 const u8 *addr, gfp_t gfp) 8402 const u8 *addr, gfp_t gfp)
8259{ 8403{
@@ -8262,9 +8406,9 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
8262 struct sk_buff *msg; 8406 struct sk_buff *msg;
8263 void *hdr; 8407 void *hdr;
8264 int err; 8408 int err;
8265 u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid); 8409 u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
8266 8410
8267 if (!nlpid) 8411 if (!nlportid)
8268 return false; 8412 return false;
8269 8413
8270 msg = nlmsg_new(100, gfp); 8414 msg = nlmsg_new(100, gfp);
@@ -8288,7 +8432,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
8288 return true; 8432 return true;
8289 } 8433 }
8290 8434
8291 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); 8435 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
8292 return true; 8436 return true;
8293 8437
8294 nla_put_failure: 8438 nla_put_failure:
@@ -8312,7 +8456,7 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
8312} 8456}
8313 8457
8314int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 8458int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
8315 struct wireless_dev *wdev, u32 nlpid, 8459 struct wireless_dev *wdev, u32 nlportid,
8316 int freq, int sig_dbm, 8460 int freq, int sig_dbm,
8317 const u8 *buf, size_t len, gfp_t gfp) 8461 const u8 *buf, size_t len, gfp_t gfp)
8318{ 8462{
@@ -8341,7 +8485,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
8341 8485
8342 genlmsg_end(msg, hdr); 8486 genlmsg_end(msg, hdr);
8343 8487
8344 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); 8488 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
8345 8489
8346 nla_put_failure: 8490 nla_put_failure:
8347 genlmsg_cancel(msg, hdr); 8491 genlmsg_cancel(msg, hdr);
@@ -8696,9 +8840,9 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
8696 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 8840 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
8697 struct sk_buff *msg; 8841 struct sk_buff *msg;
8698 void *hdr; 8842 void *hdr;
8699 u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid); 8843 u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid);
8700 8844
8701 if (!nlpid) 8845 if (!nlportid)
8702 return; 8846 return;
8703 8847
8704 msg = nlmsg_new(len + 100, gfp); 8848 msg = nlmsg_new(len + 100, gfp);
@@ -8721,7 +8865,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
8721 8865
8722 genlmsg_end(msg, hdr); 8866 genlmsg_end(msg, hdr);
8723 8867
8724 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); 8868 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
8725 return; 8869 return;
8726 8870
8727 nla_put_failure: 8871 nla_put_failure:
@@ -8745,9 +8889,9 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
8745 8889
8746 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 8890 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
8747 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) 8891 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
8748 cfg80211_mlme_unregister_socket(wdev, notify->pid); 8892 cfg80211_mlme_unregister_socket(wdev, notify->portid);
8749 if (rdev->ap_beacons_nlpid == notify->pid) 8893 if (rdev->ap_beacons_nlportid == notify->portid)
8750 rdev->ap_beacons_nlpid = 0; 8894 rdev->ap_beacons_nlportid = 0;
8751 } 8895 }
8752 8896
8753 rcu_read_unlock(); 8897 rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 9f2616fffb40..f6153516068c 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -91,6 +91,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
91 struct net_device *dev, const u8 *mac_addr, 91 struct net_device *dev, const u8 *mac_addr,
92 gfp_t gfp); 92 gfp_t gfp);
93 93
94void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
95 struct net_device *dev, const u8 *mac_addr,
96 enum nl80211_connect_failed_reason reason,
97 gfp_t gfp);
98
94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 99int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
95 struct wireless_dev *wdev, u32 nlpid, 100 struct wireless_dev *wdev, u32 nlpid,
96 int freq, int sig_dbm, 101 int freq, int sig_dbm,
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index c4ad7958af52..7d604c06c3dc 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
41 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, 41 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
42 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, 42 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
43 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, 43 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
44 [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
45 [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
44 /* 46 /*
45 * add more here as they are defined in radiotap.h 47 * add more here as they are defined in radiotap.h
46 */ 48 */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 72d170ca3406..3b8cbbc214db 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -510,9 +510,11 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
510 * 510 *
511 * This lets us know if a specific frequency rule is or is not relevant to 511 * This lets us know if a specific frequency rule is or is not relevant to
512 * a specific frequency's band. Bands are device specific and artificial 512 * a specific frequency's band. Bands are device specific and artificial
513 * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is 513 * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
514 * safe for now to assume that a frequency rule should not be part of a 514 * however it is safe for now to assume that a frequency rule should not be
515 * frequency's band if the start freq or end freq are off by more than 2 GHz. 515 * part of a frequency's band if the start freq or end freq are off by more
516 * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
517 * 60 GHz band.
516 * This resolution can be lowered and should be considered as we add 518 * This resolution can be lowered and should be considered as we add
517 * regulatory rule support for other "bands". 519 * regulatory rule support for other "bands".
518 **/ 520 **/
@@ -520,9 +522,16 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
520 u32 freq_khz) 522 u32 freq_khz)
521{ 523{
522#define ONE_GHZ_IN_KHZ 1000000 524#define ONE_GHZ_IN_KHZ 1000000
523 if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) 525 /*
526 * From 802.11ad: directional multi-gigabit (DMG):
527 * Pertaining to operation in a frequency band containing a channel
528 * with the Channel starting frequency above 45 GHz.
529 */
530 u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
531 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
532 if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
524 return true; 533 return true;
525 if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) 534 if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
526 return true; 535 return true;
527 return false; 536 return false;
528#undef ONE_GHZ_IN_KHZ 537#undef ONE_GHZ_IN_KHZ
@@ -1955,8 +1964,7 @@ static void restore_regulatory_settings(bool reset_user)
1955 if (reg_request->initiator != 1964 if (reg_request->initiator !=
1956 NL80211_REGDOM_SET_BY_USER) 1965 NL80211_REGDOM_SET_BY_USER)
1957 continue; 1966 continue;
1958 list_del(&reg_request->list); 1967 list_move_tail(&reg_request->list, &tmp_reg_req_list);
1959 list_add_tail(&reg_request->list, &tmp_reg_req_list);
1960 } 1968 }
1961 } 1969 }
1962 spin_unlock(&reg_requests_lock); 1970 spin_unlock(&reg_requests_lock);
@@ -2015,8 +2023,7 @@ static void restore_regulatory_settings(bool reset_user)
2015 "into the queue\n", 2023 "into the queue\n",
2016 reg_request->alpha2[0], 2024 reg_request->alpha2[0],
2017 reg_request->alpha2[1]); 2025 reg_request->alpha2[1]);
2018 list_del(&reg_request->list); 2026 list_move_tail(&reg_request->list, &reg_requests_list);
2019 list_add_tail(&reg_request->list, &reg_requests_list);
2020 } 2027 }
2021 spin_unlock(&reg_requests_lock); 2028 spin_unlock(&reg_requests_lock);
2022 2029
@@ -2201,7 +2208,6 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd)
2201static int __set_regdom(const struct ieee80211_regdomain *rd) 2208static int __set_regdom(const struct ieee80211_regdomain *rd)
2202{ 2209{
2203 const struct ieee80211_regdomain *intersected_rd = NULL; 2210 const struct ieee80211_regdomain *intersected_rd = NULL;
2204 struct cfg80211_registered_device *rdev = NULL;
2205 struct wiphy *request_wiphy; 2211 struct wiphy *request_wiphy;
2206 /* Some basic sanity checks first */ 2212 /* Some basic sanity checks first */
2207 2213
@@ -2313,24 +2319,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2313 return 0; 2319 return 0;
2314 } 2320 }
2315 2321
2316 if (!intersected_rd) 2322 return -EINVAL;
2317 return -EINVAL;
2318
2319 rdev = wiphy_to_dev(request_wiphy);
2320
2321 rdev->country_ie_alpha2[0] = rd->alpha2[0];
2322 rdev->country_ie_alpha2[1] = rd->alpha2[1];
2323 rdev->env = last_request->country_ie_env;
2324
2325 BUG_ON(intersected_rd == rd);
2326
2327 kfree(rd);
2328 rd = NULL;
2329
2330 reset_regdomains(false);
2331 cfg80211_regdomain = intersected_rd;
2332
2333 return 0;
2334} 2323}
2335 2324
2336 2325
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 848523a2b22f..9730c9862bdc 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -815,7 +815,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
815 return NULL; 815 return NULL;
816 816
817 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 817 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
818 (signal < 0 || signal > 100))) 818 (signal < 0 || signal > 100)))
819 return NULL; 819 return NULL;
820 820
821 if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable))) 821 if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 994e2f0cc7a8..ef35f4ef2aa6 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
684 684
685const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) 685const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
686{ 686{
687 u8 *end, *pos; 687 if (bss->information_elements == NULL)
688
689 pos = bss->information_elements;
690 if (pos == NULL)
691 return NULL; 688 return NULL;
692 end = pos + bss->len_information_elements; 689 return cfg80211_find_ie(ie, bss->information_elements,
693 690 bss->len_information_elements);
694 while (pos + 1 < end) {
695 if (pos + 2 + pos[1] > end)
696 break;
697 if (pos[0] == ie)
698 return pos;
699 pos += 2 + pos[1];
700 }
701
702 return NULL;
703} 691}
704EXPORT_SYMBOL(ieee80211_bss_get_ie); 692EXPORT_SYMBOL(ieee80211_bss_get_ie);
705 693
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
812 if (otype == NL80211_IFTYPE_AP_VLAN) 800 if (otype == NL80211_IFTYPE_AP_VLAN)
813 return -EOPNOTSUPP; 801 return -EOPNOTSUPP;
814 802
803 /* cannot change into P2P device type */
804 if (ntype == NL80211_IFTYPE_P2P_DEVICE)
805 return -EOPNOTSUPP;
806
815 if (!rdev->ops->change_virtual_intf || 807 if (!rdev->ops->change_virtual_intf ||
816 !(rdev->wiphy.interface_modes & (1 << ntype))) 808 !(rdev->wiphy.interface_modes & (1 << ntype)))
817 return -EOPNOTSUPP; 809 return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
889 case NUM_NL80211_IFTYPES: 881 case NUM_NL80211_IFTYPES:
890 /* not happening */ 882 /* not happening */
891 break; 883 break;
884 case NL80211_IFTYPE_P2P_DEVICE:
885 WARN_ON(1);
886 break;
892 } 887 }
893 } 888 }
894 889
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1053 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) { 1048 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
1054 if (wdev_iter == wdev) 1049 if (wdev_iter == wdev)
1055 continue; 1050 continue;
1056 if (!netif_running(wdev_iter->netdev)) 1051 if (wdev_iter->netdev) {
1057 continue; 1052 if (!netif_running(wdev_iter->netdev))
1053 continue;
1054 } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
1055 if (!wdev_iter->p2p_started)
1056 continue;
1057 } else {
1058 WARN_ON(1);
1059 }
1058 1060
1059 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype)) 1061 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
1060 continue; 1062 continue;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index b0eb7aa49b60..c8717c1d082e 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -478,13 +478,13 @@ void wireless_send_event(struct net_device * dev,
478 if (descr->header_type == IW_HEADER_TYPE_POINT) { 478 if (descr->header_type == IW_HEADER_TYPE_POINT) {
479 /* Check if number of token fits within bounds */ 479 /* Check if number of token fits within bounds */
480 if (wrqu->data.length > descr->max_tokens) { 480 if (wrqu->data.length > descr->max_tokens) {
481 netdev_err(dev, "(WE) : Wireless Event too big (%d)\n", 481 netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n",
482 wrqu->data.length); 482 cmd, wrqu->data.length);
483 return; 483 return;
484 } 484 }
485 if (wrqu->data.length < descr->min_tokens) { 485 if (wrqu->data.length < descr->min_tokens) {
486 netdev_err(dev, "(WE) : Wireless Event too small (%d)\n", 486 netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n",
487 wrqu->data.length); 487 cmd, wrqu->data.length);
488 return; 488 return;
489 } 489 }
490 /* Calculate extra_len - extra is NULL for restricted events */ 490 /* Calculate extra_len - extra is NULL for restricted events */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 46550997548c..41eabc46f110 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -42,13 +42,12 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
42static struct dst_entry *xfrm_policy_sk_bundles; 42static struct dst_entry *xfrm_policy_sk_bundles;
43static DEFINE_RWLOCK(xfrm_policy_lock); 43static DEFINE_RWLOCK(xfrm_policy_lock);
44 44
45static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 45static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
46static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 46static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
47 __read_mostly;
47 48
48static struct kmem_cache *xfrm_dst_cache __read_mostly; 49static struct kmem_cache *xfrm_dst_cache __read_mostly;
49 50
50static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
53static int stale_bundle(struct dst_entry *dst); 52static int stale_bundle(struct dst_entry *dst);
54static int xfrm_bundle_ok(struct xfrm_dst *xdst); 53static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -95,6 +94,24 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl
95 return false; 94 return false;
96} 95}
97 96
97static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
98{
99 struct xfrm_policy_afinfo *afinfo;
100
101 if (unlikely(family >= NPROTO))
102 return NULL;
103 rcu_read_lock();
104 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
105 if (unlikely(!afinfo))
106 rcu_read_unlock();
107 return afinfo;
108}
109
110static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
111{
112 rcu_read_unlock();
113}
114
98static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 115static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
99 const xfrm_address_t *saddr, 116 const xfrm_address_t *saddr,
100 const xfrm_address_t *daddr, 117 const xfrm_address_t *daddr,
@@ -2421,7 +2438,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2421 return -EINVAL; 2438 return -EINVAL;
2422 if (unlikely(afinfo->family >= NPROTO)) 2439 if (unlikely(afinfo->family >= NPROTO))
2423 return -EAFNOSUPPORT; 2440 return -EAFNOSUPPORT;
2424 write_lock_bh(&xfrm_policy_afinfo_lock); 2441 spin_lock(&xfrm_policy_afinfo_lock);
2425 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2442 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2426 err = -ENOBUFS; 2443 err = -ENOBUFS;
2427 else { 2444 else {
@@ -2442,9 +2459,9 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2442 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2459 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2443 if (likely(afinfo->garbage_collect == NULL)) 2460 if (likely(afinfo->garbage_collect == NULL))
2444 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2461 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2445 xfrm_policy_afinfo[afinfo->family] = afinfo; 2462 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2446 } 2463 }
2447 write_unlock_bh(&xfrm_policy_afinfo_lock); 2464 spin_unlock(&xfrm_policy_afinfo_lock);
2448 2465
2449 rtnl_lock(); 2466 rtnl_lock();
2450 for_each_net(net) { 2467 for_each_net(net) {
@@ -2477,21 +2494,26 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2477 return -EINVAL; 2494 return -EINVAL;
2478 if (unlikely(afinfo->family >= NPROTO)) 2495 if (unlikely(afinfo->family >= NPROTO))
2479 return -EAFNOSUPPORT; 2496 return -EAFNOSUPPORT;
2480 write_lock_bh(&xfrm_policy_afinfo_lock); 2497 spin_lock(&xfrm_policy_afinfo_lock);
2481 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2498 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2482 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2499 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2483 err = -EINVAL; 2500 err = -EINVAL;
2484 else { 2501 else
2485 struct dst_ops *dst_ops = afinfo->dst_ops; 2502 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2486 xfrm_policy_afinfo[afinfo->family] = NULL; 2503 NULL);
2487 dst_ops->kmem_cachep = NULL; 2504 }
2488 dst_ops->check = NULL; 2505 spin_unlock(&xfrm_policy_afinfo_lock);
2489 dst_ops->negative_advice = NULL; 2506 if (!err) {
2490 dst_ops->link_failure = NULL; 2507 struct dst_ops *dst_ops = afinfo->dst_ops;
2491 afinfo->garbage_collect = NULL; 2508
2492 } 2509 synchronize_rcu();
2510
2511 dst_ops->kmem_cachep = NULL;
2512 dst_ops->check = NULL;
2513 dst_ops->negative_advice = NULL;
2514 dst_ops->link_failure = NULL;
2515 afinfo->garbage_collect = NULL;
2493 } 2516 }
2494 write_unlock_bh(&xfrm_policy_afinfo_lock);
2495 return err; 2517 return err;
2496} 2518}
2497EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2519EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
@@ -2500,33 +2522,16 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
2500{ 2522{
2501 struct xfrm_policy_afinfo *afinfo; 2523 struct xfrm_policy_afinfo *afinfo;
2502 2524
2503 read_lock_bh(&xfrm_policy_afinfo_lock); 2525 rcu_read_lock();
2504 afinfo = xfrm_policy_afinfo[AF_INET]; 2526 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2505 if (afinfo) 2527 if (afinfo)
2506 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2528 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2507#if IS_ENABLED(CONFIG_IPV6) 2529#if IS_ENABLED(CONFIG_IPV6)
2508 afinfo = xfrm_policy_afinfo[AF_INET6]; 2530 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2509 if (afinfo) 2531 if (afinfo)
2510 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2532 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2511#endif 2533#endif
2512 read_unlock_bh(&xfrm_policy_afinfo_lock); 2534 rcu_read_unlock();
2513}
2514
2515static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2516{
2517 struct xfrm_policy_afinfo *afinfo;
2518 if (unlikely(family >= NPROTO))
2519 return NULL;
2520 read_lock(&xfrm_policy_afinfo_lock);
2521 afinfo = xfrm_policy_afinfo[family];
2522 if (unlikely(!afinfo))
2523 read_unlock(&xfrm_policy_afinfo_lock);
2524 return afinfo;
2525}
2526
2527static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2528{
2529 read_unlock(&xfrm_policy_afinfo_lock);
2530} 2535}
2531 2536
2532static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2537static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bd2d9841ad59..3459692092ec 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -166,7 +166,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
166int __xfrm_state_delete(struct xfrm_state *x); 166int __xfrm_state_delete(struct xfrm_state *x);
167 167
168int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 168int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
169void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 169void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
170 170
171static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family) 171static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
172{ 172{
@@ -1674,13 +1674,13 @@ void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1674EXPORT_SYMBOL(km_policy_notify); 1674EXPORT_SYMBOL(km_policy_notify);
1675EXPORT_SYMBOL(km_state_notify); 1675EXPORT_SYMBOL(km_state_notify);
1676 1676
1677void km_state_expired(struct xfrm_state *x, int hard, u32 pid) 1677void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1678{ 1678{
1679 struct net *net = xs_net(x); 1679 struct net *net = xs_net(x);
1680 struct km_event c; 1680 struct km_event c;
1681 1681
1682 c.data.hard = hard; 1682 c.data.hard = hard;
1683 c.pid = pid; 1683 c.portid = portid;
1684 c.event = XFRM_MSG_EXPIRE; 1684 c.event = XFRM_MSG_EXPIRE;
1685 km_state_notify(x, &c); 1685 km_state_notify(x, &c);
1686 1686
@@ -1700,7 +1700,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1700 1700
1701 read_lock(&xfrm_km_lock); 1701 read_lock(&xfrm_km_lock);
1702 list_for_each_entry(km, &xfrm_km_list, list) { 1702 list_for_each_entry(km, &xfrm_km_list, list) {
1703 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT); 1703 acqret = km->acquire(x, t, pol);
1704 if (!acqret) 1704 if (!acqret)
1705 err = acqret; 1705 err = acqret;
1706 } 1706 }
@@ -1726,13 +1726,13 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1726} 1726}
1727EXPORT_SYMBOL(km_new_mapping); 1727EXPORT_SYMBOL(km_new_mapping);
1728 1728
1729void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid) 1729void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
1730{ 1730{
1731 struct net *net = xp_net(pol); 1731 struct net *net = xp_net(pol);
1732 struct km_event c; 1732 struct km_event c;
1733 1733
1734 c.data.hard = hard; 1734 c.data.hard = hard;
1735 c.pid = pid; 1735 c.portid = portid;
1736 c.event = XFRM_MSG_POLEXPIRE; 1736 c.event = XFRM_MSG_POLEXPIRE;
1737 km_policy_notify(pol, dir, &c); 1737 km_policy_notify(pol, dir, &c);
1738 1738
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bc542448307a..421f98444335 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -623,7 +623,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
623 } 623 }
624 624
625 c.seq = nlh->nlmsg_seq; 625 c.seq = nlh->nlmsg_seq;
626 c.pid = nlh->nlmsg_pid; 626 c.portid = nlh->nlmsg_pid;
627 c.event = nlh->nlmsg_type; 627 c.event = nlh->nlmsg_type;
628 628
629 km_state_notify(x, &c); 629 km_state_notify(x, &c);
@@ -696,7 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
696 goto out; 696 goto out;
697 697
698 c.seq = nlh->nlmsg_seq; 698 c.seq = nlh->nlmsg_seq;
699 c.pid = nlh->nlmsg_pid; 699 c.portid = nlh->nlmsg_pid;
700 c.event = nlh->nlmsg_type; 700 c.event = nlh->nlmsg_type;
701 km_state_notify(x, &c); 701 km_state_notify(x, &c);
702 702
@@ -847,7 +847,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
847 struct nlmsghdr *nlh; 847 struct nlmsghdr *nlh;
848 int err; 848 int err;
849 849
850 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 850 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
851 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 851 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
852 if (nlh == NULL) 852 if (nlh == NULL)
853 return -EMSGSIZE; 853 return -EMSGSIZE;
@@ -927,7 +927,7 @@ static inline size_t xfrm_spdinfo_msgsize(void)
927} 927}
928 928
929static int build_spdinfo(struct sk_buff *skb, struct net *net, 929static int build_spdinfo(struct sk_buff *skb, struct net *net,
930 u32 pid, u32 seq, u32 flags) 930 u32 portid, u32 seq, u32 flags)
931{ 931{
932 struct xfrmk_spdinfo si; 932 struct xfrmk_spdinfo si;
933 struct xfrmu_spdinfo spc; 933 struct xfrmu_spdinfo spc;
@@ -936,7 +936,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
936 int err; 936 int err;
937 u32 *f; 937 u32 *f;
938 938
939 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 939 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
940 if (nlh == NULL) /* shouldn't really happen ... */ 940 if (nlh == NULL) /* shouldn't really happen ... */
941 return -EMSGSIZE; 941 return -EMSGSIZE;
942 942
@@ -969,17 +969,17 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
969 struct net *net = sock_net(skb->sk); 969 struct net *net = sock_net(skb->sk);
970 struct sk_buff *r_skb; 970 struct sk_buff *r_skb;
971 u32 *flags = nlmsg_data(nlh); 971 u32 *flags = nlmsg_data(nlh);
972 u32 spid = NETLINK_CB(skb).pid; 972 u32 sportid = NETLINK_CB(skb).portid;
973 u32 seq = nlh->nlmsg_seq; 973 u32 seq = nlh->nlmsg_seq;
974 974
975 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 975 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
976 if (r_skb == NULL) 976 if (r_skb == NULL)
977 return -ENOMEM; 977 return -ENOMEM;
978 978
979 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) 979 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
980 BUG(); 980 BUG();
981 981
982 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 982 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
983} 983}
984 984
985static inline size_t xfrm_sadinfo_msgsize(void) 985static inline size_t xfrm_sadinfo_msgsize(void)
@@ -990,7 +990,7 @@ static inline size_t xfrm_sadinfo_msgsize(void)
990} 990}
991 991
992static int build_sadinfo(struct sk_buff *skb, struct net *net, 992static int build_sadinfo(struct sk_buff *skb, struct net *net,
993 u32 pid, u32 seq, u32 flags) 993 u32 portid, u32 seq, u32 flags)
994{ 994{
995 struct xfrmk_sadinfo si; 995 struct xfrmk_sadinfo si;
996 struct xfrmu_sadhinfo sh; 996 struct xfrmu_sadhinfo sh;
@@ -998,7 +998,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
998 int err; 998 int err;
999 u32 *f; 999 u32 *f;
1000 1000
1001 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 1001 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1002 if (nlh == NULL) /* shouldn't really happen ... */ 1002 if (nlh == NULL) /* shouldn't really happen ... */
1003 return -EMSGSIZE; 1003 return -EMSGSIZE;
1004 1004
@@ -1026,17 +1026,17 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1026 struct net *net = sock_net(skb->sk); 1026 struct net *net = sock_net(skb->sk);
1027 struct sk_buff *r_skb; 1027 struct sk_buff *r_skb;
1028 u32 *flags = nlmsg_data(nlh); 1028 u32 *flags = nlmsg_data(nlh);
1029 u32 spid = NETLINK_CB(skb).pid; 1029 u32 sportid = NETLINK_CB(skb).portid;
1030 u32 seq = nlh->nlmsg_seq; 1030 u32 seq = nlh->nlmsg_seq;
1031 1031
1032 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 1032 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1033 if (r_skb == NULL) 1033 if (r_skb == NULL)
1034 return -ENOMEM; 1034 return -ENOMEM;
1035 1035
1036 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) 1036 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
1037 BUG(); 1037 BUG();
1038 1038
1039 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 1039 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1040} 1040}
1041 1041
1042static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1042static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1056,7 +1056,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1056 if (IS_ERR(resp_skb)) { 1056 if (IS_ERR(resp_skb)) {
1057 err = PTR_ERR(resp_skb); 1057 err = PTR_ERR(resp_skb);
1058 } else { 1058 } else {
1059 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1059 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1060 } 1060 }
1061 xfrm_state_put(x); 1061 xfrm_state_put(x);
1062out_noput: 1062out_noput:
@@ -1137,7 +1137,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1137 goto out; 1137 goto out;
1138 } 1138 }
1139 1139
1140 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1140 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1141 1141
1142out: 1142out:
1143 xfrm_state_put(x); 1143 xfrm_state_put(x);
@@ -1425,7 +1425,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1425 1425
1426 c.event = nlh->nlmsg_type; 1426 c.event = nlh->nlmsg_type;
1427 c.seq = nlh->nlmsg_seq; 1427 c.seq = nlh->nlmsg_seq;
1428 c.pid = nlh->nlmsg_pid; 1428 c.portid = nlh->nlmsg_pid;
1429 km_policy_notify(xp, p->dir, &c); 1429 km_policy_notify(xp, p->dir, &c);
1430 1430
1431 xfrm_pol_put(xp); 1431 xfrm_pol_put(xp);
@@ -1511,7 +1511,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1511 struct nlmsghdr *nlh; 1511 struct nlmsghdr *nlh;
1512 int err; 1512 int err;
1513 1513
1514 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1514 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1515 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1515 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1516 if (nlh == NULL) 1516 if (nlh == NULL)
1517 return -EMSGSIZE; 1517 return -EMSGSIZE;
@@ -1648,7 +1648,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1648 err = PTR_ERR(resp_skb); 1648 err = PTR_ERR(resp_skb);
1649 } else { 1649 } else {
1650 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 1650 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1651 NETLINK_CB(skb).pid); 1651 NETLINK_CB(skb).portid);
1652 } 1652 }
1653 } else { 1653 } else {
1654 kuid_t loginuid = audit_get_loginuid(current); 1654 kuid_t loginuid = audit_get_loginuid(current);
@@ -1665,7 +1665,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1665 c.data.byid = p->index; 1665 c.data.byid = p->index;
1666 c.event = nlh->nlmsg_type; 1666 c.event = nlh->nlmsg_type;
1667 c.seq = nlh->nlmsg_seq; 1667 c.seq = nlh->nlmsg_seq;
1668 c.pid = nlh->nlmsg_pid; 1668 c.portid = nlh->nlmsg_pid;
1669 km_policy_notify(xp, p->dir, &c); 1669 km_policy_notify(xp, p->dir, &c);
1670 } 1670 }
1671 1671
@@ -1695,7 +1695,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1695 c.data.proto = p->proto; 1695 c.data.proto = p->proto;
1696 c.event = nlh->nlmsg_type; 1696 c.event = nlh->nlmsg_type;
1697 c.seq = nlh->nlmsg_seq; 1697 c.seq = nlh->nlmsg_seq;
1698 c.pid = nlh->nlmsg_pid; 1698 c.portid = nlh->nlmsg_pid;
1699 c.net = net; 1699 c.net = net;
1700 km_state_notify(NULL, &c); 1700 km_state_notify(NULL, &c);
1701 1701
@@ -1722,7 +1722,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1722 struct nlmsghdr *nlh; 1722 struct nlmsghdr *nlh;
1723 int err; 1723 int err;
1724 1724
1725 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1725 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1726 if (nlh == NULL) 1726 if (nlh == NULL)
1727 return -EMSGSIZE; 1727 return -EMSGSIZE;
1728 1728
@@ -1804,11 +1804,11 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1804 spin_lock_bh(&x->lock); 1804 spin_lock_bh(&x->lock);
1805 c.data.aevent = p->flags; 1805 c.data.aevent = p->flags;
1806 c.seq = nlh->nlmsg_seq; 1806 c.seq = nlh->nlmsg_seq;
1807 c.pid = nlh->nlmsg_pid; 1807 c.portid = nlh->nlmsg_pid;
1808 1808
1809 if (build_aevent(r_skb, x, &c) < 0) 1809 if (build_aevent(r_skb, x, &c) < 0)
1810 BUG(); 1810 BUG();
1811 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); 1811 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
1812 spin_unlock_bh(&x->lock); 1812 spin_unlock_bh(&x->lock);
1813 xfrm_state_put(x); 1813 xfrm_state_put(x);
1814 return err; 1814 return err;
@@ -1854,7 +1854,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1854 1854
1855 c.event = nlh->nlmsg_type; 1855 c.event = nlh->nlmsg_type;
1856 c.seq = nlh->nlmsg_seq; 1856 c.seq = nlh->nlmsg_seq;
1857 c.pid = nlh->nlmsg_pid; 1857 c.portid = nlh->nlmsg_pid;
1858 c.data.aevent = XFRM_AE_CU; 1858 c.data.aevent = XFRM_AE_CU;
1859 km_state_notify(x, &c); 1859 km_state_notify(x, &c);
1860 err = 0; 1860 err = 0;
@@ -1889,7 +1889,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1889 c.data.type = type; 1889 c.data.type = type;
1890 c.event = nlh->nlmsg_type; 1890 c.event = nlh->nlmsg_type;
1891 c.seq = nlh->nlmsg_seq; 1891 c.seq = nlh->nlmsg_seq;
1892 c.pid = nlh->nlmsg_pid; 1892 c.portid = nlh->nlmsg_pid;
1893 c.net = net; 1893 c.net = net;
1894 km_policy_notify(NULL, 0, &c); 1894 km_policy_notify(NULL, 0, &c);
1895 return 0; 1895 return 0;
@@ -1957,7 +1957,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1957 // reset the timers here? 1957 // reset the timers here?
1958 WARN(1, "Dont know what to do with soft policy expire\n"); 1958 WARN(1, "Dont know what to do with soft policy expire\n");
1959 } 1959 }
1960 km_policy_expired(xp, p->dir, up->hard, current->pid); 1960 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
1961 1961
1962out: 1962out:
1963 xfrm_pol_put(xp); 1963 xfrm_pol_put(xp);
@@ -1985,7 +1985,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1985 err = -EINVAL; 1985 err = -EINVAL;
1986 if (x->km.state != XFRM_STATE_VALID) 1986 if (x->km.state != XFRM_STATE_VALID)
1987 goto out; 1987 goto out;
1988 km_state_expired(x, ue->hard, current->pid); 1988 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
1989 1989
1990 if (ue->hard) { 1990 if (ue->hard) {
1991 kuid_t loginuid = audit_get_loginuid(current); 1991 kuid_t loginuid = audit_get_loginuid(current);
@@ -2397,7 +2397,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
2397 struct nlmsghdr *nlh; 2397 struct nlmsghdr *nlh;
2398 int err; 2398 int err;
2399 2399
2400 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2400 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2401 if (nlh == NULL) 2401 if (nlh == NULL)
2402 return -EMSGSIZE; 2402 return -EMSGSIZE;
2403 2403
@@ -2456,7 +2456,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
2456 if (skb == NULL) 2456 if (skb == NULL)
2457 return -ENOMEM; 2457 return -ENOMEM;
2458 2458
2459 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2459 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2460 if (nlh == NULL) { 2460 if (nlh == NULL) {
2461 kfree_skb(skb); 2461 kfree_skb(skb);
2462 return -EMSGSIZE; 2462 return -EMSGSIZE;
@@ -2524,7 +2524,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2524 if (skb == NULL) 2524 if (skb == NULL)
2525 return -ENOMEM; 2525 return -ENOMEM;
2526 2526
2527 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2527 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2528 err = -EMSGSIZE; 2528 err = -EMSGSIZE;
2529 if (nlh == NULL) 2529 if (nlh == NULL)
2530 goto out_free_skb; 2530 goto out_free_skb;
@@ -2594,8 +2594,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2594} 2594}
2595 2595
2596static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2596static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2597 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2597 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
2598 int dir)
2599{ 2598{
2600 __u32 seq = xfrm_get_acqseq(); 2599 __u32 seq = xfrm_get_acqseq();
2601 struct xfrm_user_acquire *ua; 2600 struct xfrm_user_acquire *ua;
@@ -2610,7 +2609,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2610 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2609 memcpy(&ua->id, &x->id, sizeof(ua->id));
2611 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2610 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2612 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2611 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2613 copy_to_user_policy(xp, &ua->policy, dir); 2612 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
2614 ua->aalgos = xt->aalgos; 2613 ua->aalgos = xt->aalgos;
2615 ua->ealgos = xt->ealgos; 2614 ua->ealgos = xt->ealgos;
2616 ua->calgos = xt->calgos; 2615 ua->calgos = xt->calgos;
@@ -2632,7 +2631,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2632} 2631}
2633 2632
2634static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2633static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2635 struct xfrm_policy *xp, int dir) 2634 struct xfrm_policy *xp)
2636{ 2635{
2637 struct net *net = xs_net(x); 2636 struct net *net = xs_net(x);
2638 struct sk_buff *skb; 2637 struct sk_buff *skb;
@@ -2641,7 +2640,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2641 if (skb == NULL) 2640 if (skb == NULL)
2642 return -ENOMEM; 2641 return -ENOMEM;
2643 2642
2644 if (build_acquire(skb, x, xt, xp, dir) < 0) 2643 if (build_acquire(skb, x, xt, xp) < 0)
2645 BUG(); 2644 BUG();
2646 2645
2647 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2646 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
@@ -2724,7 +2723,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2724 struct nlmsghdr *nlh; 2723 struct nlmsghdr *nlh;
2725 int err; 2724 int err;
2726 2725
2727 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2726 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2728 if (nlh == NULL) 2727 if (nlh == NULL)
2729 return -EMSGSIZE; 2728 return -EMSGSIZE;
2730 2729
@@ -2784,7 +2783,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
2784 if (skb == NULL) 2783 if (skb == NULL)
2785 return -ENOMEM; 2784 return -ENOMEM;
2786 2785
2787 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2786 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2788 err = -EMSGSIZE; 2787 err = -EMSGSIZE;
2789 if (nlh == NULL) 2788 if (nlh == NULL)
2790 goto out_free_skb; 2789 goto out_free_skb;
@@ -2838,7 +2837,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
2838 if (skb == NULL) 2837 if (skb == NULL)
2839 return -ENOMEM; 2838 return -ENOMEM;
2840 2839
2841 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2840 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2842 err = -EMSGSIZE; 2841 err = -EMSGSIZE;
2843 if (nlh == NULL) 2842 if (nlh == NULL)
2844 goto out_free_skb; 2843 goto out_free_skb;
@@ -2991,7 +2990,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
2991 .input = xfrm_netlink_rcv, 2990 .input = xfrm_netlink_rcv,
2992 }; 2991 };
2993 2992
2994 nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg); 2993 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
2995 if (nlsk == NULL) 2994 if (nlsk == NULL)
2996 return -ENOMEM; 2995 return -ENOMEM;
2997 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2996 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 8a77725423e0..14d810ead420 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -113,13 +113,12 @@ static int __init selnl_init(void)
113{ 113{
114 struct netlink_kernel_cfg cfg = { 114 struct netlink_kernel_cfg cfg = {
115 .groups = SELNLGRP_MAX, 115 .groups = SELNLGRP_MAX,
116 .flags = NL_CFG_F_NONROOT_RECV,
116 }; 117 };
117 118
118 selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, 119 selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
119 THIS_MODULE, &cfg);
120 if (selnl == NULL) 120 if (selnl == NULL)
121 panic("SELinux: Cannot create netlink socket."); 121 panic("SELinux: Cannot create netlink socket.");
122 netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
123 return 0; 122 return 0;
124} 123}
125 124