aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2012-09-03 09:28:30 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2012-09-03 09:34:51 -0400
commitace1fe1231bdfffd60b5e703aa5b7283fbf98dbd (patch)
tree06c7492a8f3cc65f916768616ca24c6bc7171761
parentce9f3f31efb88841e4df98794b13dbac8c4901da (diff)
parenta2dc375e12334b3d8f787a48b2fb6172ccfb80ae (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
This merges (3f509c6 netfilter: nf_nat_sip: fix incorrect handling of EBUSY for RTCP expectation) to Patrick McHardy's IPv6 NAT changes.
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt75
-rw-r--r--Documentation/networking/batman-adv.txt7
-rw-r--r--Documentation/networking/ip-sysctl.txt37
-rw-r--r--Documentation/networking/stmmac.txt5
-rw-r--r--Makefile2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpica/tbxface.c1
-rw-r--r--drivers/bcma/Kconfig4
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c28
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c123
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c105
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_proc.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c31
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c105
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6008
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c28
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_main.c31
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c4
-rw-r--r--drivers/net/can/softing/softing_fw.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c24
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c549
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h52
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c274
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c235
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h49
-rw-r--r--drivers/net/ethernet/sfc/nic.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c621
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/ieee802154/Kconfig (renamed from drivers/ieee802154/Kconfig)0
-rw-r--r--drivers/net/ieee802154/Makefile (renamed from drivers/ieee802154/Makefile)0
-rw-r--r--drivers/net/ieee802154/at86rf230.c (renamed from drivers/ieee802154/at86rf230.c)12
-rw-r--r--drivers/net/ieee802154/fakehard.c (renamed from drivers/ieee802154/fakehard.c)1
-rw-r--r--drivers/net/ieee802154/fakelb.c (renamed from drivers/ieee802154/fakelb.c)0
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/mdio-gpio.c132
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c170
-rw-r--r--drivers/net/team/team.c15
-rw-r--r--drivers/net/tun.c46
-rw-r--r--drivers/net/wimax/i2400m/driver.c3
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo.c48
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c16
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c9
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c12
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c26
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c59
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c141
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c58
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/mwl8k.c17
-rw-r--r--drivers/net/wireless/p54/lmac.h4
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c20
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c15
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h13
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c6
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--drivers/pwm/Kconfig31
-rw-r--r--drivers/pwm/core.c12
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/pwm-vt8500.c2
-rw-r--r--drivers/staging/winbond/wbusb.c4
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_transport.c15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/vfio/vfio.c19
-rw-r--r--drivers/vhost/tcm_vhost.c203
-rw-r--r--drivers/vhost/tcm_vhost.h12
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/inode.c15
-rw-r--r--fs/ceph/ioctl.c3
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/namei.c8
-rw-r--r--fs/nfs/Makefile18
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/idmap.c62
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c76
-rw-r--r--fs/nfs/nfs4super.c15
-rw-r--r--fs/nfs/nfs4xdr.c26
-rw-r--r--fs/nfs/objlayout/objio_osd.c55
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c39
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/super.c39
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/seq_file.c4
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h26
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/if_vlan.h9
-rw-r--r--include/linux/inet_diag.h1
-rw-r--r--include/linux/kref.h18
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/nl80211.h30
-rw-r--r--include/linux/of_mdio.h33
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/rfkill.h31
-rw-r--r--include/linux/seq_file.h14
-rw-r--r--include/linux/snmp.h4
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h45
-rw-r--r--include/net/ax25.h4
-rw-r--r--include/net/cfg80211.h40
-rw-r--r--include/net/ieee80211_radiotap.h11
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/mac80211.h87
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h1
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/packet.h2
-rw-r--r--include/net/request_sock.h49
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sock.h11
-rw-r--r--include/net/tcp.h73
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--init/Kconfig19
-rw-r--r--ipc/mqueue.c61
-rw-r--r--kernel/pid.c1
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--lib/nlattr.c4
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/appletalk/atalk_proc.c3
-rw-r--r--net/atm/resources.c2
-rw-r--r--net/ax25/ax25_uid.c21
-rw-r--r--net/batman-adv/bat_iv_ogm.c96
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c214
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h11
-rw-r--r--net/batman-adv/debugfs.c12
-rw-r--r--net/batman-adv/gateway_client.c53
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/main.c27
-rw-r--r--net/batman-adv/main.h29
-rw-r--r--net/batman-adv/packet.h35
-rw-r--r--net/batman-adv/routing.c85
-rw-r--r--net/batman-adv/send.c8
-rw-r--r--net/batman-adv/soft-interface.c79
-rw-r--r--net/batman-adv/soft-interface.h5
-rw-r--r--net/batman-adv/translation-table.c416
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h120
-rw-r--r--net/batman-adv/unicast.c16
-rw-r--r--net/batman-adv/vis.c144
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c11
-rw-r--r--net/ceph/mon_client.c51
-rw-r--r--net/core/dev.c24
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/link_watch.c8
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/request_sock.c95
-rw-r--r--net/core/scm.c31
-rw-r--r--net/core/sock.c12
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/ieee802154/6lowpan.c53
-rw-r--r--net/ipv4/af_inet.c28
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_frontend.c7
-rw-r--r--net/ipv4/inet_connection_sock.c57
-rw-r--r--net/ipv4/inet_diag.c21
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/ping.c22
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c87
-rw-r--r--net/ipv4/tcp.c49
-rw-r--r--net/ipv4/tcp_fastopen.c83
-rw-r--r--net/ipv4/tcp_input.c90
-rw-r--r--net/ipv4/tcp_ipv4.c275
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_output.c21
-rw-r--r--net/ipv4/tcp_timer.c39
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_diag.c5
-rw-r--r--net/ipv6/addrconf.c35
-rw-r--r--net/ipv6/ip6_flowlabel.c47
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipx/ipx_proc.c3
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/cfg.c66
-rw-r--r--net/mac80211/debugfs.c32
-rw-r--r--net/mac80211/driver-ops.h11
-rw-r--r--net/mac80211/ibss.c15
-rw-r--r--net/mac80211/ieee80211_i.h30
-rw-r--r--net/mac80211/iface.c289
-rw-r--r--net/mac80211/main.c21
-rw-r--r--net/mac80211/mesh.c28
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c44
-rw-r--r--net/mac80211/mesh_plink.c38
-rw-r--r--net/mac80211/mlme.c240
-rw-r--r--net/mac80211/offchannel.c6
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rx.c58
-rw-r--r--net/mac80211/scan.c12
-rw-r--r--net/mac80211/status.c22
-rw-r--r--net/mac80211/trace.h11
-rw-r--r--net/mac80211/tx.c109
-rw-r--r--net/mac80211/util.c57
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_nat_sip.c5
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/xt_LOG.c16
-rw-r--r--net/netfilter/xt_owner.c30
-rw-r--r--net/netfilter/xt_recent.c13
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/socket.c6
-rw-r--r--net/rfkill/core.c14
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_basic.c3
-rw-r--r--net/sched/cls_cgroup.c3
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_fw.c3
-rw-r--r--net/sched/cls_route.c3
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c3
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/unix/af_unix.c12
-rw-r--r--net/wireless/chan.c7
-rw-r--r--net/wireless/core.c53
-rw-r--r--net/wireless/mlme.c10
-rw-r--r--net/wireless/nl80211.c122
-rw-r--r--net/wireless/radiotap.c2
-rw-r--r--net/wireless/util.c36
368 files changed, 6763 insertions, 3347 deletions
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
new file mode 100644
index 000000000000..8516929c7251
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
@@ -0,0 +1,75 @@
1Properties for an MDIO bus multiplexer controlled by a memory-mapped device
2
3This is a special case of a MDIO bus multiplexer. A memory-mapped device,
4like an FPGA, is used to control which child bus is connected. The mdio-mux
5node must be a child of the memory-mapped device. The driver currently only
6supports devices with eight-bit registers.
7
8Required properties in addition to the generic multiplexer properties:
9
10- compatible : string, must contain "mdio-mux-mmioreg"
11
12- reg : integer, contains the offset of the register that controls the bus
13 multiplexer. The size field in the 'reg' property is the size of
14 register, and must therefore be 1.
15
16- mux-mask : integer, contains an eight-bit mask that specifies which
17 bits in the register control the actual bus multiplexer. The
18 'reg' property of each child mdio-mux node must be constrained by
19 this mask.
20
21Example:
22
23The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
24For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
25A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
26BRDCFG1 that control the actual mux.
27
28 /* The FPGA node */
29 fpga: board-control@3,0 {
30 #address-cells = <1>;
31 #size-cells = <1>;
32 compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
33 reg = <3 0 0x30>;
34 ranges = <0 3 0 0x30>;
35
36 mdio-mux-emi2 {
37 compatible = "mdio-mux-mmioreg", "mdio-mux";
38 mdio-parent-bus = <&xmdio0>;
39 #address-cells = <1>;
40 #size-cells = <0>;
41 reg = <9 1>; // BRDCFG1
42 mux-mask = <0x6>; // EMI2
43
44 emi2_slot1: mdio@0 { // Slot 1 XAUI (FM2)
45 reg = <0>;
46 #address-cells = <1>;
47 #size-cells = <0>;
48
49 phy_xgmii_slot1: ethernet-phy@0 {
50 compatible = "ethernet-phy-ieee802.3-c45";
51 reg = <4>;
52 };
53 };
54
55 emi2_slot2: mdio@2 { // Slot 2 XAUI (FM1)
56 reg = <2>;
57 #address-cells = <1>;
58 #size-cells = <0>;
59
60 phy_xgmii_slot2: ethernet-phy@4 {
61 compatible = "ethernet-phy-ieee802.3-c45";
62 reg = <0>;
63 };
64 };
65 };
66 };
67
68 /* The parent MDIO bus. */
69 xmdio0: mdio@f1000 {
70 #address-cells = <1>;
71 #size-cells = <0>;
72 compatible = "fsl,fman-xmdio";
73 reg = <0xf1000 0x1000>;
74 interrupts = <100 1 0 0>;
75 };
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 8f3ae4a6147e..a173d2a879f5 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -75,9 +75,10 @@ folder:
75 75
76There is a special folder for debugging information: 76There is a special folder for debugging information:
77 77
78# ls /sys/kernel/debug/batman_adv/bat0/ 78# ls /sys/kernel/debug/batman_adv/bat0/
79# bla_claim_table log socket transtable_local 79# bla_backbone_table log transtable_global
80# gateways originators transtable_global vis_data 80# bla_claim_table originators transtable_local
81# gateways socket vis_data
81 82
82Some of the files contain all sort of status information regard- 83Some of the files contain all sort of status information regard-
83ing the mesh network. For example, you can view the table of 84ing the mesh network. For example, you can view the table of
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ca447b35b833..c7fc10724948 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
439tcp_synack_retries - INTEGER 439tcp_synack_retries - INTEGER
440 Number of times SYNACKs for a passive TCP connection attempt will 440 Number of times SYNACKs for a passive TCP connection attempt will
441 be retransmitted. Should not be higher than 255. Default value 441 be retransmitted. Should not be higher than 255. Default value
442 is 5, which corresponds to ~180seconds. 442 is 5, which corresponds to 31seconds till the last retransmission
443 with the current initial RTO of 1second. With this the final timeout
444 for a passive TCP connection will happen after 63seconds.
443 445
444tcp_syncookies - BOOLEAN 446tcp_syncookies - BOOLEAN
445 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES 447 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
465tcp_fastopen - INTEGER 467tcp_fastopen - INTEGER
466 Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data 468 Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
467 in the opening SYN packet. To use this feature, the client application 469 in the opening SYN packet. To use this feature, the client application
468 must not use connect(). Instead, it should use sendmsg() or sendto() 470 must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
469 with MSG_FASTOPEN flag which performs a TCP handshake automatically. 471 connect() to perform a TCP handshake automatically.
470 472
471 The values (bitmap) are: 473 The values (bitmap) are
472 1: Enables sending data in the opening SYN on the client 474 1: Enables sending data in the opening SYN on the client.
473 5: Enables sending data in the opening SYN on the client regardless 475 2: Enables TCP Fast Open on the server side, i.e., allowing data in
474 of cookie availability. 476 a SYN packet to be accepted and passed to the application before
477 3-way hand shake finishes.
478 4: Send data in the opening SYN regardless of cookie availability and
479 without a cookie option.
480 0x100: Accept SYN data w/o validating the cookie.
481 0x200: Accept data-in-SYN w/o any cookie option present.
482 0x400/0x800: Enable Fast Open on all listeners regardless of the
483 TCP_FASTOPEN socket option. The two different flags designate two
484 different ways of setting max_qlen without the TCP_FASTOPEN socket
485 option.
475 486
476 Default: 0 487 Default: 0
477 488
489 Note that the client & server side Fast Open flags (1 and 2
490 respectively) must be also enabled before the rest of flags can take
491 effect.
492
493 See include/net/tcp.h and the code for more details.
494
478tcp_syn_retries - INTEGER 495tcp_syn_retries - INTEGER
479 Number of times initial SYNs for an active TCP connection attempt 496 Number of times initial SYNs for an active TCP connection attempt
480 will be retransmitted. Should not be higher than 255. Default value 497 will be retransmitted. Should not be higher than 255. Default value
481 is 5, which corresponds to ~180seconds. 498 is 6, which corresponds to 63seconds till the last restransmission
499 with the current initial RTO of 1second. With this the final timeout
500 for an active TCP connection attempt will happen after 127seconds.
482 501
483tcp_timestamps - BOOLEAN 502tcp_timestamps - BOOLEAN
484 Enable timestamps as defined in RFC1323. 503 Enable timestamps as defined in RFC1323.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index c676b9cedbd0..ef9ee71b4d7f 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -173,7 +173,6 @@ Where:
173For MDIO bus The we have: 173For MDIO bus The we have:
174 174
175 struct stmmac_mdio_bus_data { 175 struct stmmac_mdio_bus_data {
176 int bus_id;
177 int (*phy_reset)(void *priv); 176 int (*phy_reset)(void *priv);
178 unsigned int phy_mask; 177 unsigned int phy_mask;
179 int *irqs; 178 int *irqs;
@@ -181,7 +180,6 @@ For MDIO bus The we have:
181 }; 180 };
182 181
183Where: 182Where:
184 o bus_id: bus identifier;
185 o phy_reset: hook to reset the phy device attached to the bus. 183 o phy_reset: hook to reset the phy device attached to the bus.
186 o phy_mask: phy mask passed when register the MDIO bus within the driver. 184 o phy_mask: phy mask passed when register the MDIO bus within the driver.
187 o irqs: list of IRQs, one per PHY. 185 o irqs: list of IRQs, one per PHY.
@@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
230with fixed_link support. 228with fixed_link support.
231 229
232static struct stmmac_mdio_bus_data stmmac1_mdio_bus = { 230static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
233 .bus_id = 1,
234 |
235 |-> phy device on the bus_id 1
236 .phy_reset = phy_reset; 231 .phy_reset = phy_reset;
237 | 232 |
238 |-> function to provide the phy_reset on this board 233 |-> function to provide the phy_reset on this board
diff --git a/Makefile b/Makefile
index 9cc77acfc881..354026873b13 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/drivers/Makefile b/drivers/Makefile
index 5b421840c48d..1ecd1bfe5069 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -120,7 +120,6 @@ obj-$(CONFIG_VHOST_NET) += vhost/
120obj-$(CONFIG_VLYNQ) += vlynq/ 120obj-$(CONFIG_VLYNQ) += vlynq/
121obj-$(CONFIG_STAGING) += staging/ 121obj-$(CONFIG_STAGING) += staging/
122obj-y += platform/ 122obj-y += platform/
123obj-y += ieee802154/
124#common clk code 123#common clk code
125obj-y += clk/ 124obj-y += clk/
126 125
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ea4c6d52605a..29e51bc01383 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
387 387
388 return (AE_NOT_FOUND); 388 return (AE_NOT_FOUND);
389} 389}
390ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
390 391
391acpi_status 392acpi_status
392acpi_get_table(char *signature, 393acpi_get_table(char *signature,
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 06b3207adebd..a533af218368 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
48 48
49config BCMA_SFLASH 49config BCMA_SFLASH
50 bool 50 bool
51 depends on BCMA_DRIVER_MIPS && BROKEN 51 depends on BCMA_DRIVER_MIPS
52 default y 52 default y
53 53
54config BCMA_NFLASH 54config BCMA_NFLASH
55 bool 55 bool
56 depends on BCMA_DRIVER_MIPS && BROKEN 56 depends on BCMA_DRIVER_MIPS
57 default y 57 default y
58 58
59config BCMA_DRIVER_GMAC_CMN 59config BCMA_DRIVER_GMAC_CMN
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 3cf9cc923cd2..169fc58427d3 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
54#ifdef CONFIG_BCMA_SFLASH 54#ifdef CONFIG_BCMA_SFLASH
55/* driver_chipcommon_sflash.c */ 55/* driver_chipcommon_sflash.c */
56int bcma_sflash_init(struct bcma_drv_cc *cc); 56int bcma_sflash_init(struct bcma_drv_cc *cc);
57extern struct platform_device bcma_sflash_dev;
57#else 58#else
58static inline int bcma_sflash_init(struct bcma_drv_cc *cc) 59static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
59{ 60{
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
65#ifdef CONFIG_BCMA_NFLASH 66#ifdef CONFIG_BCMA_NFLASH
66/* driver_chipcommon_nflash.c */ 67/* driver_chipcommon_nflash.c */
67int bcma_nflash_init(struct bcma_drv_cc *cc); 68int bcma_nflash_init(struct bcma_drv_cc *cc);
69extern struct platform_device bcma_nflash_dev;
68#else 70#else
69static inline int bcma_nflash_init(struct bcma_drv_cc *cc) 71static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
70{ 72{
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 574d62435bc2..9042781edec3 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,15 +5,37 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13struct platform_device bcma_nflash_dev = {
14 .name = "bcma_nflash",
15 .num_resources = 0,
16};
17
14/* Initialize NAND flash access */ 18/* Initialize NAND flash access */
15int bcma_nflash_init(struct bcma_drv_cc *cc) 19int bcma_nflash_init(struct bcma_drv_cc *cc)
16{ 20{
17 bcma_err(cc->core->bus, "NAND flash support is broken\n"); 21 struct bcma_bus *bus = cc->core->bus;
22
23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
24 cc->core->id.rev != 0x38) {
25 bcma_err(bus, "NAND flash on unsupported board!\n");
26 return -ENOTSUPP;
27 }
28
29 if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
30 bcma_err(bus, "NAND flash not present according to ChipCommon\n");
31 return -ENODEV;
32 }
33
34 cc->nflash.present = true;
35
36 /* Prepare platform device, but don't register it yet. It's too early,
37 * malloc (required by device_private_init) is not available yet. */
38 bcma_nflash_dev.dev.platform_data = &cc->nflash;
39
18 return 0; 40 return 0;
19} 41}
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 6e157a58a1d7..2c4eec2ca5a0 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,15 +5,132 @@
5 * Licensed under the GNU/GPL. See COPYING for details. 5 * Licensed under the GNU/GPL. See COPYING for details.
6 */ 6 */
7 7
8#include <linux/platform_device.h>
8#include <linux/bcma/bcma.h> 9#include <linux/bcma/bcma.h>
9#include <linux/bcma/bcma_driver_chipcommon.h>
10#include <linux/delay.h>
11 10
12#include "bcma_private.h" 11#include "bcma_private.h"
13 12
13static struct resource bcma_sflash_resource = {
14 .name = "bcma_sflash",
15 .start = BCMA_SFLASH,
16 .end = 0,
17 .flags = IORESOURCE_MEM | IORESOURCE_READONLY,
18};
19
20struct platform_device bcma_sflash_dev = {
21 .name = "bcma_sflash",
22 .resource = &bcma_sflash_resource,
23 .num_resources = 1,
24};
25
26struct bcma_sflash_tbl_e {
27 char *name;
28 u32 id;
29 u32 blocksize;
30 u16 numblocks;
31};
32
33static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
34 { "", 0x14, 0x10000, 32, },
35 { 0 },
36};
37
38static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
39 { 0 },
40};
41
42static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
43 { 0 },
44};
45
46static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
47{
48 int i;
49 bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
50 BCMA_CC_FLASHCTL_START | opcode);
51 for (i = 0; i < 1000; i++) {
52 if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
53 BCMA_CC_FLASHCTL_BUSY))
54 return;
55 cpu_relax();
56 }
57 bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
58}
59
14/* Initialize serial flash access */ 60/* Initialize serial flash access */
15int bcma_sflash_init(struct bcma_drv_cc *cc) 61int bcma_sflash_init(struct bcma_drv_cc *cc)
16{ 62{
17 bcma_err(cc->core->bus, "Serial flash support is broken\n"); 63 struct bcma_bus *bus = cc->core->bus;
64 struct bcma_sflash *sflash = &cc->sflash;
65 struct bcma_sflash_tbl_e *e;
66 u32 id, id2;
67
68 switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
69 case BCMA_CC_FLASHT_STSER:
70 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
71
72 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
73 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
74 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
75
76 bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
77 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
78 id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
79
80 switch (id) {
81 case 0xbf:
82 for (e = bcma_sflash_sst_tbl; e->name; e++) {
83 if (e->id == id2)
84 break;
85 }
86 break;
87 default:
88 for (e = bcma_sflash_st_tbl; e->name; e++) {
89 if (e->id == id)
90 break;
91 }
92 break;
93 }
94 if (!e->name) {
95 bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
96 return -ENOTSUPP;
97 }
98
99 break;
100 case BCMA_CC_FLASHT_ATSER:
101 bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
102 id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
103
104 for (e = bcma_sflash_at_tbl; e->name; e++) {
105 if (e->id == id)
106 break;
107 }
108 if (!e->name) {
109 bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
110 return -ENOTSUPP;
111 }
112
113 break;
114 default:
115 bcma_err(bus, "Unsupported flash type\n");
116 return -ENOTSUPP;
117 }
118
119 sflash->window = BCMA_SFLASH;
120 sflash->blocksize = e->blocksize;
121 sflash->numblocks = e->numblocks;
122 sflash->size = sflash->blocksize * sflash->numblocks;
123 sflash->present = true;
124
125 bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
126 e->name, sflash->size / 1024, sflash->blocksize,
127 sflash->numblocks);
128
129 /* Prepare platform device, but don't register it yet. It's too early,
130 * malloc (required by device_private_init) is not available yet. */
131 bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
132 sflash->size;
133 bcma_sflash_dev.dev.platform_data = sflash;
134
18 return 0; 135 return 0;
19} 136}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 758af9ccdef0..a8f570d69075 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,6 +7,7 @@
7 7
8#include "bcma_private.h" 8#include "bcma_private.h"
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/platform_device.h>
10#include <linux/bcma/bcma.h> 11#include <linux/bcma/bcma.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12 13
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
136 dev_id++; 137 dev_id++;
137 } 138 }
138 139
140#ifdef CONFIG_BCMA_SFLASH
141 if (bus->drv_cc.sflash.present) {
142 err = platform_device_register(&bcma_sflash_dev);
143 if (err)
144 bcma_err(bus, "Error registering serial flash\n");
145 }
146#endif
147
148#ifdef CONFIG_BCMA_NFLASH
149 if (bus->drv_cc.nflash.present) {
150 err = platform_device_register(&bcma_nflash_dev);
151 if (err)
152 bcma_err(bus, "Error registering NAND flash\n");
153 }
154#endif
155
139 return 0; 156 return 0;
140} 157}
141 158
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6f007b6c240d..6ec0fff79bc2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -64,6 +64,7 @@
64#define I830_PTE_SYSTEM_CACHED 0x00000006 64#define I830_PTE_SYSTEM_CACHED 0x00000006
65/* GT PTE cache control fields */ 65/* GT PTE cache control fields */
66#define GEN6_PTE_UNCACHED 0x00000002 66#define GEN6_PTE_UNCACHED 0x00000002
67#define HSW_PTE_UNCACHED 0x00000000
67#define GEN6_PTE_LLC 0x00000004 68#define GEN6_PTE_LLC 0x00000004
68#define GEN6_PTE_LLC_MLC 0x00000006 69#define GEN6_PTE_LLC_MLC 0x00000006
69#define GEN6_PTE_GFDT 0x00000008 70#define GEN6_PTE_GFDT 0x00000008
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 08fc5cbb13cd..58e32f7c3229 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
1156 return true; 1156 return true;
1157} 1157}
1158 1158
1159static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
1160 unsigned int flags)
1161{
1162 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1163 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1164 u32 pte_flags;
1165
1166 if (type_mask == AGP_USER_MEMORY)
1167 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
1168 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1169 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1170 if (gfdt)
1171 pte_flags |= GEN6_PTE_GFDT;
1172 } else { /* set 'normal'/'cached' to LLC by default */
1173 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1174 if (gfdt)
1175 pte_flags |= GEN6_PTE_GFDT;
1176 }
1177
1178 /* gen6 has bit11-4 for physical addr bit39-32 */
1179 addr |= (addr >> 28) & 0xff0;
1180 writel(addr | pte_flags, intel_private.gtt + entry);
1181}
1182
1159static void gen6_write_entry(dma_addr_t addr, unsigned int entry, 1183static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1160 unsigned int flags) 1184 unsigned int flags)
1161{ 1185{
@@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1382 .check_flags = gen6_check_flags, 1406 .check_flags = gen6_check_flags,
1383 .chipset_flush = i9xx_chipset_flush, 1407 .chipset_flush = i9xx_chipset_flush,
1384}; 1408};
1409static const struct intel_gtt_driver haswell_gtt_driver = {
1410 .gen = 6,
1411 .setup = i9xx_setup,
1412 .cleanup = gen6_cleanup,
1413 .write_entry = haswell_write_entry,
1414 .dma_mask_size = 40,
1415 .check_flags = gen6_check_flags,
1416 .chipset_flush = i9xx_chipset_flush,
1417};
1385static const struct intel_gtt_driver valleyview_gtt_driver = { 1418static const struct intel_gtt_driver valleyview_gtt_driver = {
1386 .gen = 7, 1419 .gen = 7,
1387 .setup = i9xx_setup, 1420 .setup = i9xx_setup,
@@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
1499 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, 1532 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1500 "ValleyView", &valleyview_gtt_driver }, 1533 "ValleyView", &valleyview_gtt_driver },
1501 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, 1534 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1502 "Haswell", &sandybridge_gtt_driver }, 1535 "Haswell", &haswell_gtt_driver },
1503 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, 1536 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1504 "Haswell", &sandybridge_gtt_driver }, 1537 "Haswell", &haswell_gtt_driver },
1505 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, 1538 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
1506 "Haswell", &sandybridge_gtt_driver }, 1539 "Haswell", &haswell_gtt_driver },
1507 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, 1540 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1508 "Haswell", &sandybridge_gtt_driver }, 1541 "Haswell", &haswell_gtt_driver },
1509 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, 1542 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1510 "Haswell", &sandybridge_gtt_driver }, 1543 "Haswell", &haswell_gtt_driver },
1511 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, 1544 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
1512 "Haswell", &sandybridge_gtt_driver }, 1545 "Haswell", &haswell_gtt_driver },
1513 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, 1546 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1514 "Haswell", &sandybridge_gtt_driver }, 1547 "Haswell", &haswell_gtt_driver },
1515 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, 1548 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1516 "Haswell", &sandybridge_gtt_driver }, 1549 "Haswell", &haswell_gtt_driver },
1517 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, 1550 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
1518 "Haswell", &sandybridge_gtt_driver }, 1551 "Haswell", &haswell_gtt_driver },
1519 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, 1552 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
1520 "Haswell", &sandybridge_gtt_driver }, 1553 "Haswell", &haswell_gtt_driver },
1521 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, 1554 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
1522 "Haswell", &sandybridge_gtt_driver }, 1555 "Haswell", &haswell_gtt_driver },
1523 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, 1556 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
1524 "Haswell", &sandybridge_gtt_driver }, 1557 "Haswell", &haswell_gtt_driver },
1525 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, 1558 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
1526 "Haswell", &sandybridge_gtt_driver }, 1559 "Haswell", &haswell_gtt_driver },
1527 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, 1560 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
1528 "Haswell", &sandybridge_gtt_driver }, 1561 "Haswell", &haswell_gtt_driver },
1529 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, 1562 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
1530 "Haswell", &sandybridge_gtt_driver }, 1563 "Haswell", &haswell_gtt_driver },
1531 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, 1564 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
1532 "Haswell", &sandybridge_gtt_driver }, 1565 "Haswell", &haswell_gtt_driver },
1533 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, 1566 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
1534 "Haswell", &sandybridge_gtt_driver }, 1567 "Haswell", &haswell_gtt_driver },
1535 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, 1568 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
1536 "Haswell", &sandybridge_gtt_driver }, 1569 "Haswell", &haswell_gtt_driver },
1537 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, 1570 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
1538 "Haswell", &sandybridge_gtt_driver }, 1571 "Haswell", &haswell_gtt_driver },
1539 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, 1572 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
1540 "Haswell", &sandybridge_gtt_driver }, 1573 "Haswell", &haswell_gtt_driver },
1541 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, 1574 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
1542 "Haswell", &sandybridge_gtt_driver }, 1575 "Haswell", &haswell_gtt_driver },
1543 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, 1576 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
1544 "Haswell", &sandybridge_gtt_driver }, 1577 "Haswell", &haswell_gtt_driver },
1545 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, 1578 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
1546 "Haswell", &sandybridge_gtt_driver }, 1579 "Haswell", &haswell_gtt_driver },
1547 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, 1580 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
1548 "Haswell", &sandybridge_gtt_driver }, 1581 "Haswell", &haswell_gtt_driver },
1549 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, 1582 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
1550 "Haswell", &sandybridge_gtt_driver }, 1583 "Haswell", &haswell_gtt_driver },
1551 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, 1584 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
1552 "Haswell", &sandybridge_gtt_driver }, 1585 "Haswell", &haswell_gtt_driver },
1553 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, 1586 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
1554 "Haswell", &sandybridge_gtt_driver }, 1587 "Haswell", &haswell_gtt_driver },
1555 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, 1588 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
1556 "Haswell", &sandybridge_gtt_driver }, 1589 "Haswell", &haswell_gtt_driver },
1557 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, 1590 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
1558 "Haswell", &sandybridge_gtt_driver }, 1591 "Haswell", &haswell_gtt_driver },
1559 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, 1592 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
1560 "Haswell", &sandybridge_gtt_driver }, 1593 "Haswell", &haswell_gtt_driver },
1561 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, 1594 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
1562 "Haswell", &sandybridge_gtt_driver }, 1595 "Haswell", &haswell_gtt_driver },
1563 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, 1596 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
1564 "Haswell", &sandybridge_gtt_driver }, 1597 "Haswell", &haswell_gtt_driver },
1565 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, 1598 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
1566 "Haswell", &sandybridge_gtt_driver }, 1599 "Haswell", &haswell_gtt_driver },
1567 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, 1600 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
1568 "Haswell", &sandybridge_gtt_driver }, 1601 "Haswell", &haswell_gtt_driver },
1569 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, 1602 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
1570 "Haswell", &sandybridge_gtt_driver }, 1603 "Haswell", &haswell_gtt_driver },
1571 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, 1604 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
1572 "Haswell", &sandybridge_gtt_driver }, 1605 "Haswell", &haswell_gtt_driver },
1573 { 0, NULL, NULL } 1606 { 0, NULL, NULL }
1574}; 1607};
1575 1608
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b7adb4a967fd..28637c181b15 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 706 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 707 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); 708 p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
709
710 p->crtc_hadjusted = false;
711 p->crtc_vadjusted = false;
712} 709}
713EXPORT_SYMBOL(drm_mode_set_crtcinfo); 710EXPORT_SYMBOL(drm_mode_set_crtcinfo);
714 711
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 371c695322d9..da457b18eaaf 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
89 * Create a given set of proc files represented by an array of 89 * Create a given set of proc files represented by an array of
90 * gdm_proc_lists in the given root directory. 90 * gdm_proc_lists in the given root directory.
91 */ 91 */
92int drm_proc_create_files(struct drm_info_list *files, int count, 92static int drm_proc_create_files(struct drm_info_list *files, int count,
93 struct proc_dir_entry *root, struct drm_minor *minor) 93 struct proc_dir_entry *root, struct drm_minor *minor)
94{ 94{
95 struct drm_device *dev = minor->dev; 95 struct drm_device *dev = minor->dev;
@@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
172 return 0; 172 return 0;
173} 173}
174 174
175int drm_proc_remove_files(struct drm_info_list *files, int count, 175static int drm_proc_remove_files(struct drm_info_list *files, int count,
176 struct drm_minor *minor) 176 struct drm_minor *minor)
177{ 177{
178 struct list_head *pos, *q; 178 struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c4657a54f97..489e2b162b27 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
2365 2365
2366 /* Flush everything onto the inactive list. */ 2366 /* Flush everything onto the inactive list. */
2367 for_each_ring(ring, dev_priv, i) { 2367 for_each_ring(ring, dev_priv, i) {
2368 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2369 if (ret)
2370 return ret;
2371
2368 ret = i915_ring_idle(ring); 2372 ret = i915_ring_idle(ring);
2369 if (ret) 2373 if (ret)
2370 return ret; 2374 return ret;
@@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
2372 /* Is the device fubar? */ 2376 /* Is the device fubar? */
2373 if (WARN_ON(!list_empty(&ring->gpu_write_list))) 2377 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2374 return -EBUSY; 2378 return -EBUSY;
2375
2376 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2377 if (ret)
2378 return ret;
2379 } 2379 }
2380 2380
2381 return 0; 2381 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ee9b68f6bc36..d9a5372ec56f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
261 pte_flags |= GEN6_PTE_CACHE_LLC; 261 pte_flags |= GEN6_PTE_CACHE_LLC;
262 break; 262 break;
263 case I915_CACHE_NONE: 263 case I915_CACHE_NONE:
264 pte_flags |= GEN6_PTE_UNCACHED; 264 if (IS_HASWELL(dev))
265 pte_flags |= HSW_PTE_UNCACHED;
266 else
267 pte_flags |= GEN6_PTE_UNCACHED;
265 break; 268 break;
266 default: 269 default:
267 BUG(); 270 BUG();
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index acc99b21e0b6..28725ce5b82c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,6 +115,7 @@
115 115
116#define GEN6_PTE_VALID (1 << 0) 116#define GEN6_PTE_VALID (1 << 0)
117#define GEN6_PTE_UNCACHED (1 << 1) 117#define GEN6_PTE_UNCACHED (1 << 1)
118#define HSW_PTE_UNCACHED (0)
118#define GEN6_PTE_CACHE_LLC (2 << 1) 119#define GEN6_PTE_CACHE_LLC (2 << 1)
119#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 120#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
120#define GEN6_PTE_CACHE_BITS (3 << 1) 121#define GEN6_PTE_CACHE_BITS (3 << 1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 7ed4a41c3965..23bdc8cd1458 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
326 return ret; 326 return ret;
327} 327}
328 328
329static struct edid *intel_crt_get_edid(struct drm_connector *connector,
330 struct i2c_adapter *i2c)
331{
332 struct edid *edid;
333
334 edid = drm_get_edid(connector, i2c);
335
336 if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
337 DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
338 intel_gmbus_force_bit(i2c, true);
339 edid = drm_get_edid(connector, i2c);
340 intel_gmbus_force_bit(i2c, false);
341 }
342
343 return edid;
344}
345
346/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
347static int intel_crt_ddc_get_modes(struct drm_connector *connector,
348 struct i2c_adapter *adapter)
349{
350 struct edid *edid;
351
352 edid = intel_crt_get_edid(connector, adapter);
353 if (!edid)
354 return 0;
355
356 return intel_connector_update_modes(connector, edid);
357}
358
329static bool intel_crt_detect_ddc(struct drm_connector *connector) 359static bool intel_crt_detect_ddc(struct drm_connector *connector)
330{ 360{
331 struct intel_crt *crt = intel_attached_crt(connector); 361 struct intel_crt *crt = intel_attached_crt(connector);
@@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
336 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 366 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
337 367
338 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 368 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
339 edid = drm_get_edid(connector, i2c); 369 edid = intel_crt_get_edid(connector, i2c);
340 370
341 if (edid) { 371 if (edid) {
342 bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 372 bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
544 struct i2c_adapter *i2c; 574 struct i2c_adapter *i2c;
545 575
546 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); 576 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
547 ret = intel_ddc_get_modes(connector, i2c); 577 ret = intel_crt_ddc_get_modes(connector, i2c);
548 if (ret || !IS_G4X(dev)) 578 if (ret || !IS_G4X(dev))
549 return ret; 579 return ret;
550 580
551 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 581 /* Try to probe digital port for output in DVI-I -> VGA mode. */
552 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 582 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
553 return intel_ddc_get_modes(connector, i2c); 583 return intel_crt_ddc_get_modes(connector, i2c);
554} 584}
555 585
556static int intel_crt_set_property(struct drm_connector *connector, 586static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 132ab511b90c..cd54cf88a28f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -342,6 +342,8 @@ struct intel_fbc_work {
342 int interval; 342 int interval;
343}; 343};
344 344
345int intel_connector_update_modes(struct drm_connector *connector,
346 struct edid *edid);
345int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 347int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
346 348
347extern void intel_attach_force_audio_property(struct drm_connector *connector); 349extern void intel_attach_force_audio_property(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 45848b9b670b..29b72593fbb2 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -33,6 +33,25 @@
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35/** 35/**
36 * intel_connector_update_modes - update connector from edid
37 * @connector: DRM connector device to use
38 * @edid: previously read EDID information
39 */
40int intel_connector_update_modes(struct drm_connector *connector,
41 struct edid *edid)
42{
43 int ret;
44
45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid);
48 connector->display_info.raw_edid = NULL;
49 kfree(edid);
50
51 return ret;
52}
53
54/**
36 * intel_ddc_get_modes - get modelist from monitor 55 * intel_ddc_get_modes - get modelist from monitor
37 * @connector: DRM connector device to use 56 * @connector: DRM connector device to use
38 * @adapter: i2c adapter 57 * @adapter: i2c adapter
@@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
43 struct i2c_adapter *adapter) 62 struct i2c_adapter *adapter)
44{ 63{
45 struct edid *edid; 64 struct edid *edid;
46 int ret = 0;
47 65
48 edid = drm_get_edid(connector, adapter); 66 edid = drm_get_edid(connector, adapter);
49 if (edid) { 67 if (!edid)
50 drm_mode_connector_update_edid_property(connector, edid); 68 return 0;
51 ret = drm_add_edid_modes(connector, edid);
52 drm_edid_to_eld(connector, edid);
53 connector->display_info.raw_edid = NULL;
54 kfree(edid);
55 }
56 69
57 return ret; 70 return intel_connector_update_modes(connector, edid);
58} 71}
59 72
60static const struct drm_prop_enum_list force_audio_names[] = { 73static const struct drm_prop_enum_list force_audio_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 58c07cdafb7e..1881c8c83f0e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
2441 dev_priv->max_delay << 24 | 2441 dev_priv->max_delay << 24 |
2442 dev_priv->min_delay << 16); 2442 dev_priv->min_delay << 16);
2443 2443
2444 if (IS_HASWELL(dev)) { 2444 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2445 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 2445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
2446 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 2446 I915_WRITE(GEN6_RP_UP_EI, 66000);
2447 I915_WRITE(GEN6_RP_UP_EI, 66000); 2447 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2448 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2449 } else {
2450 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2451 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2452 I915_WRITE(GEN6_RP_UP_EI, 100000);
2453 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2454 }
2455 2448
2456 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2449 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2457 I915_WRITE(GEN6_RP_CONTROL, 2450 I915_WRITE(GEN6_RP_CONTROL,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d172e9873131..d81bb0bf2885 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1692 edid = intel_sdvo_get_edid(connector); 1692 edid = intel_sdvo_get_edid(connector);
1693 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) 1693 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1694 has_audio = drm_detect_monitor_audio(edid); 1694 has_audio = drm_detect_monitor_audio(edid);
1695 kfree(edid);
1695 1696
1696 return has_audio; 1697 return has_audio;
1697} 1698}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c6fcb5b86a45..f4d4505fe831 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -444,11 +444,28 @@ union atom_enable_ss {
444static void atombios_crtc_program_ss(struct radeon_device *rdev, 444static void atombios_crtc_program_ss(struct radeon_device *rdev,
445 int enable, 445 int enable,
446 int pll_id, 446 int pll_id,
447 int crtc_id,
447 struct radeon_atom_ss *ss) 448 struct radeon_atom_ss *ss)
448{ 449{
450 unsigned i;
449 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 451 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
450 union atom_enable_ss args; 452 union atom_enable_ss args;
451 453
454 if (!enable) {
455 for (i = 0; i < rdev->num_crtc; i++) {
456 if (rdev->mode_info.crtcs[i] &&
457 rdev->mode_info.crtcs[i]->enabled &&
458 i != crtc_id &&
459 pll_id == rdev->mode_info.crtcs[i]->pll_id) {
460 /* one other crtc is using this pll don't turn
461 * off spread spectrum as it might turn off
462 * display on active crtc
463 */
464 return;
465 }
466 }
467 }
468
452 memset(&args, 0, sizeof(args)); 469 memset(&args, 0, sizeof(args));
453 470
454 if (ASIC_IS_DCE5(rdev)) { 471 if (ASIC_IS_DCE5(rdev)) {
@@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1028 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1045 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1029 &ref_div, &post_div); 1046 &ref_div, &post_div);
1030 1047
1031 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); 1048 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1032 1049
1033 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1050 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1034 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1051 encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1051 ss.step = step_size; 1068 ss.step = step_size;
1052 } 1069 }
1053 1070
1054 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); 1071 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1055 } 1072 }
1056} 1073}
1057 1074
@@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
1572 ASIC_INTERNAL_SS_ON_DCPLL, 1589 ASIC_INTERNAL_SS_ON_DCPLL,
1573 rdev->clock.default_dispclk); 1590 rdev->clock.default_dispclk);
1574 if (ss_enabled) 1591 if (ss_enabled)
1575 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); 1592 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
1576 /* XXX: DCE5, make sure voltage, dispclk is high enough */ 1593 /* XXX: DCE5, make sure voltage, dispclk is high enough */
1577 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); 1594 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
1578 if (ss_enabled) 1595 if (ss_enabled)
1579 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); 1596 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
1580 } 1597 }
1581 1598
1582} 1599}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 3dab49cb1d4a..ab74e6b149e7 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -47,13 +47,17 @@ struct r600_cs_track {
47 u32 npipes; 47 u32 npipes;
48 /* value we track */ 48 /* value we track */
49 u32 sq_config; 49 u32 sq_config;
50 u32 log_nsamples;
50 u32 nsamples; 51 u32 nsamples;
51 u32 cb_color_base_last[8]; 52 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8]; 53 struct radeon_bo *cb_color_bo[8];
53 u64 cb_color_bo_mc[8]; 54 u64 cb_color_bo_mc[8];
54 u32 cb_color_bo_offset[8]; 55 u64 cb_color_bo_offset[8];
55 struct radeon_bo *cb_color_frag_bo[8]; /* unused */ 56 struct radeon_bo *cb_color_frag_bo[8];
56 struct radeon_bo *cb_color_tile_bo[8]; /* unused */ 57 u64 cb_color_frag_offset[8];
58 struct radeon_bo *cb_color_tile_bo[8];
59 u64 cb_color_tile_offset[8];
60 u32 cb_color_mask[8];
57 u32 cb_color_info[8]; 61 u32 cb_color_info[8];
58 u32 cb_color_view[8]; 62 u32 cb_color_view[8];
59 u32 cb_color_size_idx[8]; /* unused */ 63 u32 cb_color_size_idx[8]; /* unused */
@@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
349 unsigned array_mode; 353 unsigned array_mode;
350 u32 format; 354 u32 format;
351 355
352 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
353 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
354 return -EINVAL;
355 }
356 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 356 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
357 format = G_0280A0_FORMAT(track->cb_color_info[i]); 357 format = G_0280A0_FORMAT(track->cb_color_info[i]);
358 if (!r600_fmt_is_valid_color(format)) { 358 if (!r600_fmt_is_valid_color(format)) {
@@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
420 } 420 }
421 421
422 /* check offset */ 422 /* check offset */
423 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); 423 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
424 r600_fmt_get_blocksize(format) * track->nsamples;
424 switch (array_mode) { 425 switch (array_mode) {
425 default: 426 default:
426 case V_0280A0_ARRAY_LINEAR_GENERAL: 427 case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
441 * broken userspace. 442 * broken userspace.
442 */ 443 */
443 } else { 444 } else {
444 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", 445 dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
445 __func__, i, array_mode, 446 __func__, i, array_mode,
446 track->cb_color_bo_offset[i], tmp, 447 track->cb_color_bo_offset[i], tmp,
447 radeon_bo_size(track->cb_color_bo[i]), 448 radeon_bo_size(track->cb_color_bo[i]),
@@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
458 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 459 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
459 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 460 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
460 ib[track->cb_color_size_idx[i]] = tmp; 461 ib[track->cb_color_size_idx[i]] = tmp;
462
463 /* FMASK/CMASK */
464 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
465 case V_0280A0_TILE_DISABLE:
466 break;
467 case V_0280A0_FRAG_ENABLE:
468 if (track->nsamples > 1) {
469 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
470 /* the tile size is 8x8, but the size is in units of bits.
471 * for bytes, do just * 8. */
472 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
473
474 if (bytes + track->cb_color_frag_offset[i] >
475 radeon_bo_size(track->cb_color_frag_bo[i])) {
476 dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
477 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
478 __func__, tile_max, bytes,
479 track->cb_color_frag_offset[i],
480 radeon_bo_size(track->cb_color_frag_bo[i]));
481 return -EINVAL;
482 }
483 }
484 /* fall through */
485 case V_0280A0_CLEAR_ENABLE:
486 {
487 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
488 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
489 * (128*128) / (8*8) / 2 = 128 bytes per block. */
490 uint32_t bytes = (block_max + 1) * 128;
491
492 if (bytes + track->cb_color_tile_offset[i] >
493 radeon_bo_size(track->cb_color_tile_bo[i])) {
494 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
495 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
496 __func__, block_max, bytes,
497 track->cb_color_tile_offset[i],
498 radeon_bo_size(track->cb_color_tile_bo[i]));
499 return -EINVAL;
500 }
501 break;
502 }
503 default:
504 dev_warn(p->dev, "%s invalid tile mode\n", __func__);
505 return -EINVAL;
506 }
461 return 0; 507 return 0;
462} 508}
463 509
@@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
566 612
567 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 613 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
568 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 614 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
569 tmp = ntiles * bpe * 64 * nviews; 615 tmp = ntiles * bpe * 64 * nviews * track->nsamples;
570 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 616 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
571 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 617 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
572 array_mode, 618 array_mode,
@@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1231 break; 1277 break;
1232 case R_028C04_PA_SC_AA_CONFIG: 1278 case R_028C04_PA_SC_AA_CONFIG:
1233 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1279 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1280 track->log_nsamples = tmp;
1234 track->nsamples = 1 << tmp; 1281 track->nsamples = 1 << tmp;
1235 track->cb_dirty = true; 1282 track->cb_dirty = true;
1236 break; 1283 break;
@@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1312 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1359 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1313 return -EINVAL; 1360 return -EINVAL;
1314 } 1361 }
1315 ib[idx] = track->cb_color_base_last[tmp];
1316 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1362 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1363 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1364 ib[idx] = track->cb_color_base_last[tmp];
1317 } else { 1365 } else {
1318 r = r600_cs_packet_next_reloc(p, &reloc); 1366 r = r600_cs_packet_next_reloc(p, &reloc);
1319 if (r) { 1367 if (r) {
1320 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1368 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1321 return -EINVAL; 1369 return -EINVAL;
1322 } 1370 }
1323 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1324 track->cb_color_frag_bo[tmp] = reloc->robj; 1371 track->cb_color_frag_bo[tmp] = reloc->robj;
1372 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1373 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1374 }
1375 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1376 track->cb_dirty = true;
1325 } 1377 }
1326 break; 1378 break;
1327 case R_0280C0_CB_COLOR0_TILE: 1379 case R_0280C0_CB_COLOR0_TILE:
@@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1338 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1390 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1339 return -EINVAL; 1391 return -EINVAL;
1340 } 1392 }
1341 ib[idx] = track->cb_color_base_last[tmp];
1342 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1393 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1394 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1395 ib[idx] = track->cb_color_base_last[tmp];
1343 } else { 1396 } else {
1344 r = r600_cs_packet_next_reloc(p, &reloc); 1397 r = r600_cs_packet_next_reloc(p, &reloc);
1345 if (r) { 1398 if (r) {
1346 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1399 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1347 return -EINVAL; 1400 return -EINVAL;
1348 } 1401 }
1349 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1350 track->cb_color_tile_bo[tmp] = reloc->robj; 1402 track->cb_color_tile_bo[tmp] = reloc->robj;
1403 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1404 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1405 }
1406 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1407 track->cb_dirty = true;
1408 }
1409 break;
1410 case R_028100_CB_COLOR0_MASK:
1411 case R_028104_CB_COLOR1_MASK:
1412 case R_028108_CB_COLOR2_MASK:
1413 case R_02810C_CB_COLOR3_MASK:
1414 case R_028110_CB_COLOR4_MASK:
1415 case R_028114_CB_COLOR5_MASK:
1416 case R_028118_CB_COLOR6_MASK:
1417 case R_02811C_CB_COLOR7_MASK:
1418 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1419 track->cb_color_mask[tmp] = ib[idx];
1420 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1421 track->cb_dirty = true;
1351 } 1422 }
1352 break; 1423 break;
1353 case CB_COLOR0_BASE: 1424 case CB_COLOR0_BASE:
@@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
1492} 1563}
1493 1564
1494static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1565static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1495 unsigned w0, unsigned h0, unsigned d0, unsigned format, 1566 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1496 unsigned block_align, unsigned height_align, unsigned base_align, 1567 unsigned block_align, unsigned height_align, unsigned base_align,
1497 unsigned *l0_size, unsigned *mipmap_size) 1568 unsigned *l0_size, unsigned *mipmap_size)
1498{ 1569{
@@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1520 1591
1521 depth = r600_mip_minify(d0, i); 1592 depth = r600_mip_minify(d0, i);
1522 1593
1523 size = nbx * nby * blocksize; 1594 size = nbx * nby * blocksize * nsamples;
1524 if (nfaces) 1595 if (nfaces)
1525 size *= nfaces; 1596 size *= nfaces;
1526 else 1597 else
@@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1672 1743
1673 nfaces = larray - barray + 1; 1744 nfaces = larray - barray + 1;
1674 } 1745 }
1675 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, 1746 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1676 pitch_align, height_align, base_align, 1747 pitch_align, height_align, base_align,
1677 &l0_size, &mipmap_size); 1748 &l0_size, &mipmap_size);
1678 /* using get ib will give us the offset into the texture bo */ 1749 /* using get ib will give us the offset into the texture bo */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fd328f4c3ea8..bdb69a63062f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -92,6 +92,20 @@
92#define R_028094_CB_COLOR5_VIEW 0x028094 92#define R_028094_CB_COLOR5_VIEW 0x028094
93#define R_028098_CB_COLOR6_VIEW 0x028098 93#define R_028098_CB_COLOR6_VIEW 0x028098
94#define R_02809C_CB_COLOR7_VIEW 0x02809C 94#define R_02809C_CB_COLOR7_VIEW 0x02809C
95#define R_028100_CB_COLOR0_MASK 0x028100
96#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
97#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
98#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
99#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
100#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
101#define C_028100_FMASK_TILE_MAX 0x00000FFF
102#define R_028104_CB_COLOR1_MASK 0x028104
103#define R_028108_CB_COLOR2_MASK 0x028108
104#define R_02810C_CB_COLOR3_MASK 0x02810C
105#define R_028110_CB_COLOR4_MASK 0x028110
106#define R_028114_CB_COLOR5_MASK 0x028114
107#define R_028118_CB_COLOR6_MASK 0x028118
108#define R_02811C_CB_COLOR7_MASK 0x02811C
95#define CB_COLOR0_INFO 0x280a0 109#define CB_COLOR0_INFO 0x280a0
96# define CB_FORMAT(x) ((x) << 2) 110# define CB_FORMAT(x) ((x) << 2)
97# define CB_ARRAY_MODE(x) ((x) << 8) 111# define CB_ARRAY_MODE(x) ((x) << 8)
@@ -1400,6 +1414,9 @@
1400#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) 1414#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
1401#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) 1415#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
1402#define C_0280A0_TILE_MODE 0xFFF3FFFF 1416#define C_0280A0_TILE_MODE 0xFFF3FFFF
1417#define V_0280A0_TILE_DISABLE 0
1418#define V_0280A0_CLEAR_ENABLE 1
1419#define V_0280A0_FRAG_ENABLE 2
1403#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) 1420#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
1404#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) 1421#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
1405#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF 1422#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 99304194a65c..59a15315ae9f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -142,21 +142,6 @@ struct radeon_device;
142/* 142/*
143 * BIOS. 143 * BIOS.
144 */ 144 */
145#define ATRM_BIOS_PAGE 4096
146
147#if defined(CONFIG_VGA_SWITCHEROO)
148bool radeon_atrm_supported(struct pci_dev *pdev);
149int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
150#else
151static inline bool radeon_atrm_supported(struct pci_dev *pdev)
152{
153 return false;
154}
155
156static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
157 return -EINVAL;
158}
159#endif
160bool radeon_get_bios(struct radeon_device *rdev); 145bool radeon_get_bios(struct radeon_device *rdev);
161 146
162/* 147/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f9c21f9d16bc..d67d4f3eb6f4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
452 } 452 }
453 453
454 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ 454 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
455 if ((dev->pdev->device == 0x9802) && 455 if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
456 (dev->pdev->subsystem_vendor == 0x1734) && 456 (dev->pdev->subsystem_vendor == 0x1734) &&
457 (dev->pdev->subsystem_device == 0x11bd)) { 457 (dev->pdev->subsystem_device == 0x11bd)) {
458 if (*connector_type == DRM_MODE_CONNECTOR_VGA) { 458 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 98724fcb0088..2a2cf0b88a28 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
30 /* handle for device - and atpx */ 30 /* handle for device - and atpx */
31 acpi_handle dhandle; 31 acpi_handle dhandle;
32 acpi_handle atpx_handle; 32 acpi_handle atpx_handle;
33 acpi_handle atrm_handle;
34} radeon_atpx_priv; 33} radeon_atpx_priv;
35 34
36/* retrieve the ROM in 4k blocks */
37static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
38 int offset, int len)
39{
40 acpi_status status;
41 union acpi_object atrm_arg_elements[2], *obj;
42 struct acpi_object_list atrm_arg;
43 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
44
45 atrm_arg.count = 2;
46 atrm_arg.pointer = &atrm_arg_elements[0];
47
48 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
49 atrm_arg_elements[0].integer.value = offset;
50
51 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
52 atrm_arg_elements[1].integer.value = len;
53
54 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
55 if (ACPI_FAILURE(status)) {
56 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
57 return -ENODEV;
58 }
59
60 obj = (union acpi_object *)buffer.pointer;
61 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
62 len = obj->buffer.length;
63 kfree(buffer.pointer);
64 return len;
65}
66
67bool radeon_atrm_supported(struct pci_dev *pdev)
68{
69 /* get the discrete ROM only via ATRM */
70 if (!radeon_atpx_priv.atpx_detected)
71 return false;
72
73 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
74 return false;
75 return true;
76}
77
78
79int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
80{
81 return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
82}
83
84static int radeon_atpx_get_version(acpi_handle handle) 35static int radeon_atpx_get_version(acpi_handle handle)
85{ 36{
86 acpi_status status; 37 acpi_status status;
@@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
198 149
199static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 150static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
200{ 151{
201 acpi_handle dhandle, atpx_handle, atrm_handle; 152 acpi_handle dhandle, atpx_handle;
202 acpi_status status; 153 acpi_status status;
203 154
204 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 155 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
209 if (ACPI_FAILURE(status)) 160 if (ACPI_FAILURE(status))
210 return false; 161 return false;
211 162
212 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
213 if (ACPI_FAILURE(status))
214 return false;
215
216 radeon_atpx_priv.dhandle = dhandle; 163 radeon_atpx_priv.dhandle = dhandle;
217 radeon_atpx_priv.atpx_handle = atpx_handle; 164 radeon_atpx_priv.atpx_handle = atpx_handle;
218 radeon_atpx_priv.atrm_handle = atrm_handle;
219 return true; 165 return true;
220} 166}
221 167
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 501f4881e5aa..d306cc8fdeaa 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/acpi.h>
35/* 36/*
36 * BIOS. 37 * BIOS.
37 */ 38 */
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
98 return true; 99 return true;
99} 100}
100 101
102#ifdef CONFIG_ACPI
101/* ATRM is used to get the BIOS on the discrete cards in 103/* ATRM is used to get the BIOS on the discrete cards in
102 * dual-gpu systems. 104 * dual-gpu systems.
103 */ 105 */
106/* retrieve the ROM in 4k blocks */
107#define ATRM_BIOS_PAGE 4096
108/**
109 * radeon_atrm_call - fetch a chunk of the vbios
110 *
111 * @atrm_handle: acpi ATRM handle
112 * @bios: vbios image pointer
113 * @offset: offset of vbios image data to fetch
114 * @len: length of vbios image data to fetch
115 *
116 * Executes ATRM to fetch a chunk of the discrete
117 * vbios image on PX systems (all asics).
118 * Returns the length of the buffer fetched.
119 */
120static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
121 int offset, int len)
122{
123 acpi_status status;
124 union acpi_object atrm_arg_elements[2], *obj;
125 struct acpi_object_list atrm_arg;
126 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
127
128 atrm_arg.count = 2;
129 atrm_arg.pointer = &atrm_arg_elements[0];
130
131 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
132 atrm_arg_elements[0].integer.value = offset;
133
134 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
135 atrm_arg_elements[1].integer.value = len;
136
137 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
138 if (ACPI_FAILURE(status)) {
139 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
140 return -ENODEV;
141 }
142
143 obj = (union acpi_object *)buffer.pointer;
144 memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
145 len = obj->buffer.length;
146 kfree(buffer.pointer);
147 return len;
148}
149
104static bool radeon_atrm_get_bios(struct radeon_device *rdev) 150static bool radeon_atrm_get_bios(struct radeon_device *rdev)
105{ 151{
106 int ret; 152 int ret;
107 int size = 256 * 1024; 153 int size = 256 * 1024;
108 int i; 154 int i;
155 struct pci_dev *pdev = NULL;
156 acpi_handle dhandle, atrm_handle;
157 acpi_status status;
158 bool found = false;
159
160 /* ATRM is for the discrete card only */
161 if (rdev->flags & RADEON_IS_IGP)
162 return false;
163
164 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
165 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
166 if (!dhandle)
167 continue;
168
169 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
170 if (!ACPI_FAILURE(status)) {
171 found = true;
172 break;
173 }
174 }
109 175
110 if (!radeon_atrm_supported(rdev->pdev)) 176 if (!found)
111 return false; 177 return false;
112 178
113 rdev->bios = kmalloc(size, GFP_KERNEL); 179 rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
117 } 183 }
118 184
119 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { 185 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
120 ret = radeon_atrm_get_bios_chunk(rdev->bios, 186 ret = radeon_atrm_call(atrm_handle,
121 (i * ATRM_BIOS_PAGE), 187 rdev->bios,
122 ATRM_BIOS_PAGE); 188 (i * ATRM_BIOS_PAGE),
189 ATRM_BIOS_PAGE);
123 if (ret < ATRM_BIOS_PAGE) 190 if (ret < ATRM_BIOS_PAGE)
124 break; 191 break;
125 } 192 }
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
130 } 197 }
131 return true; 198 return true;
132} 199}
200#else
201static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
202{
203 return false;
204}
205#endif
133 206
134static bool ni_read_disabled_bios(struct radeon_device *rdev) 207static bool ni_read_disabled_bios(struct radeon_device *rdev)
135{ 208{
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
476 return legacy_read_disabled_bios(rdev); 549 return legacy_read_disabled_bios(rdev);
477} 550}
478 551
552#ifdef CONFIG_ACPI
553static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
554{
555 bool ret = false;
556 struct acpi_table_header *hdr;
557 acpi_size tbl_size;
558 UEFI_ACPI_VFCT *vfct;
559 GOP_VBIOS_CONTENT *vbios;
560 VFCT_IMAGE_HEADER *vhdr;
561
562 if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
563 return false;
564 if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
565 DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
566 goto out_unmap;
567 }
568
569 vfct = (UEFI_ACPI_VFCT *)hdr;
570 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
571 DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
572 goto out_unmap;
573 }
574
575 vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
576 vhdr = &vbios->VbiosHeader;
577 DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
578 vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
579 vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
580
581 if (vhdr->PCIBus != rdev->pdev->bus->number ||
582 vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
583 vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
584 vhdr->VendorID != rdev->pdev->vendor ||
585 vhdr->DeviceID != rdev->pdev->device) {
586 DRM_INFO("ACPI VFCT table is not for this card\n");
587 goto out_unmap;
588 };
589
590 if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
591 DRM_ERROR("ACPI VFCT image truncated\n");
592 goto out_unmap;
593 }
594
595 rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
596 ret = !!rdev->bios;
597
598out_unmap:
599 return ret;
600}
601#else
602static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
603{
604 return false;
605}
606#endif
479 607
480bool radeon_get_bios(struct radeon_device *rdev) 608bool radeon_get_bios(struct radeon_device *rdev)
481{ 609{
@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
484 612
485 r = radeon_atrm_get_bios(rdev); 613 r = radeon_atrm_get_bios(rdev);
486 if (r == false) 614 if (r == false)
615 r = radeon_acpi_vfct_bios(rdev);
616 if (r == false)
487 r = igp_read_bios_from_vram(rdev); 617 r = igp_read_bios_from_vram(rdev);
488 if (r == false) 618 if (r == false)
489 r = radeon_read_bios(rdev); 619 r = radeon_read_bios(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d7269f48d37c..27d22d709c90 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -62,9 +62,10 @@
62 * 2.18.0 - r600-eg: allow "invalid" DB formats 62 * 2.18.0 - r600-eg: allow "invalid" DB formats
63 * 2.19.0 - r600-eg: MSAA textures 63 * 2.19.0 - r600-eg: MSAA textures
64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query 64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
65 * 2.21.0 - r600-r700: FMASK and CMASK
65 */ 66 */
66#define KMS_DRIVER_MAJOR 2 67#define KMS_DRIVER_MAJOR 2
67#define KMS_DRIVER_MINOR 20 68#define KMS_DRIVER_MINOR 21
68#define KMS_DRIVER_PATCHLEVEL 0 69#define KMS_DRIVER_PATCHLEVEL 0
69int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 70int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
70int radeon_driver_unload_kms(struct drm_device *dev); 71int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1cb014b571ab..9024e7222839 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 132 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
133 sizeof(struct radeon_bo)); 133 sizeof(struct radeon_bo));
134 134
135retry:
135 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 136 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
136 if (bo == NULL) 137 if (bo == NULL)
137 return -ENOMEM; 138 return -ENOMEM;
@@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
145 bo->surface_reg = -1; 146 bo->surface_reg = -1;
146 INIT_LIST_HEAD(&bo->list); 147 INIT_LIST_HEAD(&bo->list);
147 INIT_LIST_HEAD(&bo->va); 148 INIT_LIST_HEAD(&bo->va);
148
149retry:
150 radeon_ttm_placement_from_domain(bo, domain); 149 radeon_ttm_placement_from_domain(bo, domain);
151 /* Kernel allocation are uninterruptible */ 150 /* Kernel allocation are uninterruptible */
152 down_read(&rdev->pm.mclk_lock); 151 down_read(&rdev->pm.mclk_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ec79b3750430..43c431a2686d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
706 if (radeon_debugfs_ring_init(rdev, ring)) { 706 if (radeon_debugfs_ring_init(rdev, ring)) {
707 DRM_ERROR("Failed to register debugfs file for rings !\n"); 707 DRM_ERROR("Failed to register debugfs file for rings !\n");
708 } 708 }
709 radeon_ring_lockup_update(ring);
709 return 0; 710 return 0;
710} 711}
711 712
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 5e659b034d9a..f93e45d869f4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -744,14 +744,6 @@ r600 0x9400
7440x00028C38 CB_CLRCMP_DST 7440x00028C38 CB_CLRCMP_DST
7450x00028C3C CB_CLRCMP_MSK 7450x00028C3C CB_CLRCMP_MSK
7460x00028C34 CB_CLRCMP_SRC 7460x00028C34 CB_CLRCMP_SRC
7470x00028100 CB_COLOR0_MASK
7480x00028104 CB_COLOR1_MASK
7490x00028108 CB_COLOR2_MASK
7500x0002810C CB_COLOR3_MASK
7510x00028110 CB_COLOR4_MASK
7520x00028114 CB_COLOR5_MASK
7530x00028118 CB_COLOR6_MASK
7540x0002811C CB_COLOR7_MASK
7550x00028808 CB_COLOR_CONTROL 7470x00028808 CB_COLOR_CONTROL
7560x0002842C CB_FOG_BLUE 7480x0002842C CB_FOG_BLUE
7570x00028428 CB_FOG_GREEN 7490x00028428 CB_FOG_GREEN
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index f5dd89e891de..9159d48d1dfd 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
354 354
355static void udl_crtc_disable(struct drm_crtc *crtc) 355static void udl_crtc_disable(struct drm_crtc *crtc)
356{ 356{
357 357 udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
358
359} 358}
360 359
361static void udl_crtc_destroy(struct drm_crtc *crtc) 360static void udl_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6b0078ffa763..c50724bd30f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
1688 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1688 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1689 struct drm_framebuffer *old_fb = crtc->fb; 1689 struct drm_framebuffer *old_fb = crtc->fb;
1690 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 1690 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
1691 struct drm_file *file_priv = event->base.file_priv; 1691 struct drm_file *file_priv ;
1692 struct vmw_fence_obj *fence = NULL; 1692 struct vmw_fence_obj *fence = NULL;
1693 struct drm_clip_rect clips; 1693 struct drm_clip_rect clips;
1694 int ret; 1694 int ret;
1695 1695
1696 if (event == NULL)
1697 return -EINVAL;
1698
1696 /* require ScreenObject support for page flipping */ 1699 /* require ScreenObject support for page flipping */
1697 if (!dev_priv->sou_priv) 1700 if (!dev_priv->sou_priv)
1698 return -ENOSYS; 1701 return -ENOSYS;
1699 1702
1703 file_priv = event->base.file_priv;
1700 if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) 1704 if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
1701 return -EINVAL; 1705 return -EINVAL;
1702 1706
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index aedb94f34bf7..dae3ddfe7619 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
405 } 405 }
406 } 406 }
407 } 407 }
408 ret = num;
408abort: 409abort:
409 sret = diolan_i2c_stop(dev); 410 sret = diolan_i2c_stop(dev);
410 if (sret < 0 && ret >= 0) 411 if (sret < 0 && ret >= 0)
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5e6f1eed4f83..61b00edacb08 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
350 350
351 i2c_clk = clk_get_rate(dev->clk); 351 i2c_clk = clk_get_rate(dev->clk);
352 352
353 /* fallback to std. mode if machine has not provided it */
354 if (dev->cfg.clk_freq == 0)
355 dev->cfg.clk_freq = 100000;
356
357 /* 353 /*
358 * The spec says, in case of std. mode the divider is 354 * The spec says, in case of std. mode the divider is
359 * 2 whereas it is 3 for fast and fastplus mode of 355 * 2 whereas it is 3 for fast and fastplus mode of
@@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
911 .functionality = nmk_i2c_functionality 907 .functionality = nmk_i2c_functionality
912}; 908};
913 909
910static struct nmk_i2c_controller u8500_i2c = {
911 /*
912 * Slave data setup time; 250ns, 100ns, and 10ns, which
913 * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
914 */
915 .slsu = 0xe,
916 .tft = 1, /* Tx FIFO threshold */
917 .rft = 8, /* Rx FIFO threshold */
918 .clk_freq = 400000, /* fast mode operation */
919 .timeout = 200, /* Slave response timeout(ms) */
920 .sm = I2C_FREQ_MODE_FAST,
921};
922
914static atomic_t adapter_id = ATOMIC_INIT(0); 923static atomic_t adapter_id = ATOMIC_INIT(0);
915 924
916static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) 925static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
917{ 926{
918 int ret = 0; 927 int ret = 0;
919 struct nmk_i2c_controller *pdata = 928 struct nmk_i2c_controller *pdata = adev->dev.platform_data;
920 adev->dev.platform_data;
921 struct nmk_i2c_dev *dev; 929 struct nmk_i2c_dev *dev;
922 struct i2c_adapter *adap; 930 struct i2c_adapter *adap;
923 931
924 if (!pdata) { 932 if (!pdata)
925 dev_warn(&adev->dev, "no platform data\n"); 933 /* No i2c configuration found, using the default. */
926 return -ENODEV; 934 pdata = &u8500_i2c;
927 } 935
928 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); 936 dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
929 if (!dev) { 937 if (!dev) {
930 dev_err(&adev->dev, "cannot allocate memory\n"); 938 dev_err(&adev->dev, "cannot allocate memory\n");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6849635b268a..5d19a49803c1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
584 584
585 r = pm_runtime_get_sync(dev->dev); 585 r = pm_runtime_get_sync(dev->dev);
586 if (IS_ERR_VALUE(r)) 586 if (IS_ERR_VALUE(r))
587 return r; 587 goto out;
588 588
589 r = omap_i2c_wait_for_bb(dev); 589 r = omap_i2c_wait_for_bb(dev);
590 if (r < 0) 590 if (r < 0)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 66eb53fac202..9a08c57bc936 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
712 return 0; 712 return 0;
713} 713}
714 714
715#ifdef CONFIG_PM 715#ifdef CONFIG_PM_SLEEP
716static int tegra_i2c_suspend(struct device *dev) 716static int tegra_i2c_suspend(struct device *dev)
717{ 717{
718 struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); 718 struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c2bd806950e..707ab7bd4ea5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -107,8 +107,6 @@ config MII
107 or internal device. It is safe to say Y or M here even if your 107 or internal device. It is safe to say Y or M here even if your
108 ethernet card lacks MII. 108 ethernet card lacks MII.
109 109
110source "drivers/ieee802154/Kconfig"
111
112config IFB 110config IFB
113 tristate "Intermediate Functional Block support" 111 tristate "Intermediate Functional Block support"
114 depends on NET_CLS_ACT 112 depends on NET_CLS_ACT
@@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig"
290 288
291source "drivers/net/wan/Kconfig" 289source "drivers/net/wan/Kconfig"
292 290
291source "drivers/net/ieee802154/Kconfig"
292
293config XEN_NETDEV_FRONTEND 293config XEN_NETDEV_FRONTEND
294 tristate "Xen network device frontend driver" 294 tristate "Xen network device frontend driver"
295 depends on XEN 295 depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3d375ca128a6..b682a1de7be8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
53obj-$(CONFIG_WAN) += wan/ 53obj-$(CONFIG_WAN) += wan/
54obj-$(CONFIG_WLAN) += wireless/ 54obj-$(CONFIG_WLAN) += wireless/
55obj-$(CONFIG_WIMAX) += wimax/ 55obj-$(CONFIG_WIMAX) += wimax/
56obj-$(CONFIG_IEEE802154) += ieee802154/
56 57
57obj-$(CONFIG_VMXNET3) += vmxnet3/ 58obj-$(CONFIG_VMXNET3) += vmxnet3/
58obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o 59obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b24ce257ac7b..7858c58df4a3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2811 arp_work.work); 2811 arp_work.work);
2812 struct slave *slave, *oldcurrent; 2812 struct slave *slave, *oldcurrent;
2813 int do_failover = 0; 2813 int do_failover = 0;
2814 int delta_in_ticks; 2814 int delta_in_ticks, extra_ticks;
2815 int i; 2815 int i;
2816 2816
2817 read_lock(&bond->lock); 2817 read_lock(&bond->lock);
2818 2818
2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2819 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2820 extra_ticks = delta_in_ticks / 2;
2820 2821
2821 if (bond->slave_cnt == 0) 2822 if (bond->slave_cnt == 0)
2822 goto re_arm; 2823 goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2839 if (slave->link != BOND_LINK_UP) { 2840 if (slave->link != BOND_LINK_UP) {
2840 if (time_in_range(jiffies, 2841 if (time_in_range(jiffies,
2841 trans_start - delta_in_ticks, 2842 trans_start - delta_in_ticks,
2842 trans_start + delta_in_ticks) && 2843 trans_start + delta_in_ticks + extra_ticks) &&
2843 time_in_range(jiffies, 2844 time_in_range(jiffies,
2844 slave->dev->last_rx - delta_in_ticks, 2845 slave->dev->last_rx - delta_in_ticks,
2845 slave->dev->last_rx + delta_in_ticks)) { 2846 slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
2846 2847
2847 slave->link = BOND_LINK_UP; 2848 slave->link = BOND_LINK_UP;
2848 bond_set_active_slave(slave); 2849 bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2872 */ 2873 */
2873 if (!time_in_range(jiffies, 2874 if (!time_in_range(jiffies,
2874 trans_start - delta_in_ticks, 2875 trans_start - delta_in_ticks,
2875 trans_start + 2 * delta_in_ticks) || 2876 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2876 !time_in_range(jiffies, 2877 !time_in_range(jiffies,
2877 slave->dev->last_rx - delta_in_ticks, 2878 slave->dev->last_rx - delta_in_ticks,
2878 slave->dev->last_rx + 2 * delta_in_ticks)) { 2879 slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
2879 2880
2880 slave->link = BOND_LINK_DOWN; 2881 slave->link = BOND_LINK_DOWN;
2881 bond_set_backup_slave(slave); 2882 bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 struct slave *slave; 2934 struct slave *slave;
2934 int i, commit = 0; 2935 int i, commit = 0;
2935 unsigned long trans_start; 2936 unsigned long trans_start;
2937 int extra_ticks;
2938
2939 /* All the time comparisons below need some extra time. Otherwise, on
2940 * fast networks the ARP probe/reply may arrive within the same jiffy
2941 * as it was sent. Then, the next time the ARP monitor is run, one
2942 * arp_interval will already have passed in the comparisons.
2943 */
2944 extra_ticks = delta_in_ticks / 2;
2936 2945
2937 bond_for_each_slave(bond, slave, i) { 2946 bond_for_each_slave(bond, slave, i) {
2938 slave->new_link = BOND_LINK_NOCHANGE; 2947 slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2940 if (slave->link != BOND_LINK_UP) { 2949 if (slave->link != BOND_LINK_UP) {
2941 if (time_in_range(jiffies, 2950 if (time_in_range(jiffies,
2942 slave_last_rx(bond, slave) - delta_in_ticks, 2951 slave_last_rx(bond, slave) - delta_in_ticks,
2943 slave_last_rx(bond, slave) + delta_in_ticks)) { 2952 slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
2944 2953
2945 slave->new_link = BOND_LINK_UP; 2954 slave->new_link = BOND_LINK_UP;
2946 commit++; 2955 commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2956 */ 2965 */
2957 if (time_in_range(jiffies, 2966 if (time_in_range(jiffies,
2958 slave->jiffies - delta_in_ticks, 2967 slave->jiffies - delta_in_ticks,
2959 slave->jiffies + 2 * delta_in_ticks)) 2968 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
2960 continue; 2969 continue;
2961 2970
2962 /* 2971 /*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2976 !bond->current_arp_slave && 2985 !bond->current_arp_slave &&
2977 !time_in_range(jiffies, 2986 !time_in_range(jiffies,
2978 slave_last_rx(bond, slave) - delta_in_ticks, 2987 slave_last_rx(bond, slave) - delta_in_ticks,
2979 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { 2988 slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
2980 2989
2981 slave->new_link = BOND_LINK_DOWN; 2990 slave->new_link = BOND_LINK_DOWN;
2982 commit++; 2991 commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2992 if (bond_is_active_slave(slave) && 3001 if (bond_is_active_slave(slave) &&
2993 (!time_in_range(jiffies, 3002 (!time_in_range(jiffies,
2994 trans_start - delta_in_ticks, 3003 trans_start - delta_in_ticks,
2995 trans_start + 2 * delta_in_ticks) || 3004 trans_start + 2 * delta_in_ticks + extra_ticks) ||
2996 !time_in_range(jiffies, 3005 !time_in_range(jiffies,
2997 slave_last_rx(bond, slave) - delta_in_ticks, 3006 slave_last_rx(bond, slave) - delta_in_ticks,
2998 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { 3007 slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
2999 3008
3000 slave->new_link = BOND_LINK_DOWN; 3009 slave->new_link = BOND_LINK_DOWN;
3001 commit++; 3010 commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3027 if ((!bond->curr_active_slave && 3036 if ((!bond->curr_active_slave &&
3028 time_in_range(jiffies, 3037 time_in_range(jiffies,
3029 trans_start - delta_in_ticks, 3038 trans_start - delta_in_ticks,
3030 trans_start + delta_in_ticks)) || 3039 trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
3031 bond->curr_active_slave != slave) { 3040 bond->curr_active_slave != slave) {
3032 slave->link = BOND_LINK_UP; 3041 slave->link = BOND_LINK_UP;
3033 if (bond->current_arp_slave) { 3042 if (bond->current_arp_slave) {
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 4f50145f6483..662c5f7eb0c5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
109 priv = netdev_priv(dev); 109 priv = netdev_priv(dev);
110 110
111 dev->irq = res_irq->start; 111 dev->irq = res_irq->start;
112 priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); 112 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
113 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
114 priv->irq_flags |= IRQF_SHARED;
113 priv->reg_base = addr; 115 priv->reg_base = addr;
114 /* The CAN clock frequency is half the oscillator clock frequency */ 116 /* The CAN clock frequency is half the oscillator clock frequency */
115 priv->can.clock.freq = pdata->osc_freq / 2; 117 priv->can.clock.freq = pdata->osc_freq / 2;
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 310596175676..b595d3422b9f 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
150 const uint8_t *mem, *end, *dat; 150 const uint8_t *mem, *end, *dat;
151 uint16_t type, len; 151 uint16_t type, len;
152 uint32_t addr; 152 uint32_t addr;
153 uint8_t *buf = NULL; 153 uint8_t *buf = NULL, *new_buf;
154 int buflen = 0; 154 int buflen = 0;
155 int8_t type_end = 0; 155 int8_t type_end = 0;
156 156
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
199 if (len > buflen) { 199 if (len > buflen) {
200 /* align buflen */ 200 /* align buflen */
201 buflen = (len + (1024-1)) & ~(1024-1); 201 buflen = (len + (1024-1)) & ~(1024-1);
202 buf = krealloc(buf, buflen, GFP_KERNEL); 202 new_buf = krealloc(buf, buflen, GFP_KERNEL);
203 if (!buf) { 203 if (!new_buf) {
204 ret = -ENOMEM; 204 ret = -ENOMEM;
205 goto failed; 205 goto failed;
206 } 206 }
207 buf = new_buf;
207 } 208 }
208 /* verify record data */ 209 /* verify record data */
209 memcpy_fromio(buf, &dpram[addr + offset], len); 210 memcpy_fromio(buf, &dpram[addr + offset], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 463b9ec57d80..6d1a24acb77e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
1708 continue; \ 1708 continue; \
1709 else 1709 else
1710 1710
1711#define for_each_napi_rx_queue(bp, var) \
1712 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1713
1714/* Skip OOO FP */ 1711/* Skip OOO FP */
1715#define for_each_tx_queue(bp, var) \ 1712#define for_each_tx_queue(bp, var) \
1716 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e879e19eb0d6..af20c6ee2cd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2046 */ 2046 */
2047 bnx2x_setup_tc(bp->dev, bp->max_cos); 2047 bnx2x_setup_tc(bp->dev, bp->max_cos);
2048 2048
2049 /* Add all NAPI objects */
2050 bnx2x_add_all_napi(bp);
2049 bnx2x_napi_enable(bp); 2051 bnx2x_napi_enable(bp);
2050 2052
2051 /* set pf load just before approaching the MCP */ 2053 /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2408 2410
2409 /* Disable HW interrupts, NAPI */ 2411 /* Disable HW interrupts, NAPI */
2410 bnx2x_netif_stop(bp, 1); 2412 bnx2x_netif_stop(bp, 1);
2413 /* Delete all NAPI objects */
2414 bnx2x_del_all_napi(bp);
2411 2415
2412 /* Release IRQs */ 2416 /* Release IRQs */
2413 bnx2x_free_irq(bp); 2417 bnx2x_free_irq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfa757e74296..21b553229ea4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
792 bp->num_napi_queues = bp->num_queues; 792 bp->num_napi_queues = bp->num_queues;
793 793
794 /* Add NAPI objects */ 794 /* Add NAPI objects */
795 for_each_napi_rx_queue(bp, i) 795 for_each_rx_queue(bp, i)
796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
797 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
798} 798}
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
801{ 801{
802 int i; 802 int i;
803 803
804 for_each_napi_rx_queue(bp, i) 804 for_each_rx_queue(bp, i)
805 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
806} 806}
807 807
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc4e0e3885b0..c37a68d68090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2888 */ 2888 */
2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2889static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2890{ 2890{
2891 bnx2x_del_all_napi(bp);
2892 bnx2x_disable_msi(bp); 2891 bnx2x_disable_msi(bp);
2893 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2892 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2894 bnx2x_set_int_mode(bp); 2893 bnx2x_set_int_mode(bp);
2895 bnx2x_add_all_napi(bp);
2896} 2894}
2897 2895
2898/** 2896/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 02b5a343b195..21054987257a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8427,6 +8427,8 @@ unload_error:
8427 8427
8428 /* Disable HW interrupts, NAPI */ 8428 /* Disable HW interrupts, NAPI */
8429 bnx2x_netif_stop(bp, 1); 8429 bnx2x_netif_stop(bp, 1);
8430 /* Delete all NAPI objects */
8431 bnx2x_del_all_napi(bp);
8430 8432
8431 /* Release IRQs */ 8433 /* Release IRQs */
8432 bnx2x_free_irq(bp); 8434 bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11229static void poll_bnx2x(struct net_device *dev) 11231static void poll_bnx2x(struct net_device *dev)
11230{ 11232{
11231 struct bnx2x *bp = netdev_priv(dev); 11233 struct bnx2x *bp = netdev_priv(dev);
11234 int i;
11232 11235
11233 disable_irq(bp->pdev->irq); 11236 for_each_eth_queue(bp, i) {
11234 bnx2x_interrupt(bp->pdev->irq, dev); 11237 struct bnx2x_fastpath *fp = &bp->fp[i];
11235 enable_irq(bp->pdev->irq); 11238 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11239 }
11236} 11240}
11237#endif 11241#endif
11238 11242
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11899 */ 11903 */
11900 bnx2x_set_int_mode(bp); 11904 bnx2x_set_int_mode(bp);
11901 11905
11902 /* Add all NAPI objects */
11903 bnx2x_add_all_napi(bp);
11904
11905 rc = register_netdev(dev); 11906 rc = register_netdev(dev);
11906 if (rc) { 11907 if (rc) {
11907 dev_err(&pdev->dev, "Cannot register net device\n"); 11908 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11976 11977
11977 unregister_netdev(dev); 11978 unregister_netdev(dev);
11978 11979
11979 /* Delete all NAPI objects */
11980 bnx2x_del_all_napi(bp);
11981
11982 /* Power on: we can't let PCI layer write to us while we are in D3 */ 11980 /* Power on: we can't let PCI layer write to us while we are in D3 */
11983 bnx2x_set_power_state(bp, PCI_D0); 11981 bnx2x_set_power_state(bp, PCI_D0);
11984 11982
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12025 bnx2x_tx_disable(bp); 12023 bnx2x_tx_disable(bp);
12026 12024
12027 bnx2x_netif_stop(bp, 0); 12025 bnx2x_netif_stop(bp, 0);
12026 /* Delete all NAPI objects */
12027 bnx2x_del_all_napi(bp);
12028 12028
12029 del_timer_sync(&bp->timer); 12029 del_timer_sync(&bp->timer);
12030 12030
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d266c86a53f7..5b622993ff17 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 110#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 111#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
112 112
113#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
113#define FW_VER_LEN 32 114#define FW_VER_LEN 32
114 115
115struct be_dma_mem { 116struct be_dma_mem {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fac97b4bb59..701b3e9a715b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
120 120
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 dev_warn(&adapter->pdev->dev, 122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n", 123 "VF is not privileged to issue opcode %d-%d\n",
124 opcode, subsystem); 124 opcode, subsystem);
125 } else { 125 } else {
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
259 int num = 0, status = 0; 259 int num = 0, status = 0;
260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 260 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
261 261
262 spin_lock_bh(&adapter->mcc_cq_lock); 262 spin_lock(&adapter->mcc_cq_lock);
263 while ((compl = be_mcc_compl_get(adapter))) { 263 while ((compl = be_mcc_compl_get(adapter))) {
264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 264 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
265 /* Interpret flags as an async trailer */ 265 /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
280 if (num) 280 if (num)
281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 281 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
282 282
283 spin_unlock_bh(&adapter->mcc_cq_lock); 283 spin_unlock(&adapter->mcc_cq_lock);
284 return status; 284 return status;
285} 285}
286 286
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
295 if (be_error(adapter)) 295 if (be_error(adapter))
296 return -EIO; 296 return -EIO;
297 297
298 local_bh_disable();
298 status = be_process_mcc(adapter); 299 status = be_process_mcc(adapter);
300 local_bh_enable();
299 301
300 if (atomic_read(&mcc_obj->q.used) == 0) 302 if (atomic_read(&mcc_obj->q.used) == 0)
301 break; 303 break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 90a903d83d87..111dc8813f68 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2176,8 +2176,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
2176{ 2176{
2177 u32 num = 0; 2177 u32 num = 0;
2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2179 !sriov_want(adapter) && be_physfn(adapter) && 2179 !sriov_want(adapter) && be_physfn(adapter)) {
2180 !be_is_mc(adapter)) {
2181 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2182 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); 2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2183 } 2182 }
@@ -2646,8 +2645,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2646 } 2645 }
2647 2646
2648 for_all_vfs(adapter, vf_cfg, vf) { 2647 for_all_vfs(adapter, vf_cfg, vf) {
2649 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, 2648 lnk_speed = 1000;
2650 NULL, vf + 1); 2649 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2651 if (status) 2650 if (status)
2652 goto err; 2651 goto err;
2653 vf_cfg->tx_rate = lnk_speed * 10; 2652 vf_cfg->tx_rate = lnk_speed * 10;
@@ -2724,6 +2723,8 @@ static int be_get_config(struct be_adapter *adapter)
2724 if (pos) { 2723 if (pos) {
2725 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, 2724 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2726 &dev_num_vfs); 2725 &dev_num_vfs);
2726 if (!lancer_chip(adapter))
2727 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs; 2728 adapter->dev_num_vfs = dev_num_vfs;
2728 } 2729 }
2729 return 0; 2730 return 0;
@@ -3437,6 +3438,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
3437 if (mem->va) 3438 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 3439 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma); 3440 mem->dma);
3441 kfree(adapter->pmac_id);
3440} 3442}
3441 3443
3442static int be_ctrl_init(struct be_adapter *adapter) 3444static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3475,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
3473 } 3475 }
3474 memset(rx_filter->va, 0, rx_filter->size); 3476 memset(rx_filter->va, 0, rx_filter->size);
3475 3477
3478 /* primary mac needs 1 pmac entry */
3479 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3480 sizeof(*adapter->pmac_id), GFP_KERNEL);
3481 if (!adapter->pmac_id)
3482 return -ENOMEM;
3483
3476 mutex_init(&adapter->mbox_lock); 3484 mutex_init(&adapter->mbox_lock);
3477 spin_lock_init(&adapter->mcc_lock); 3485 spin_lock_init(&adapter->mcc_lock);
3478 spin_lock_init(&adapter->mcc_cq_lock); 3486 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3609,12 +3617,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
3609 else 3617 else
3610 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; 3618 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3611 3619
3612 /* primary mac needs 1 pmac entry */
3613 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3614 sizeof(u32), GFP_KERNEL);
3615 if (!adapter->pmac_id)
3616 return -ENOMEM;
3617
3618 status = be_cmd_get_cntl_attributes(adapter); 3620 status = be_cmd_get_cntl_attributes(adapter);
3619 if (status) 3621 if (status)
3620 return status; 3622 return status;
@@ -3763,7 +3765,9 @@ static void be_worker(struct work_struct *work)
3763 /* when interrupts are not yet enabled, just reap any pending 3765 /* when interrupts are not yet enabled, just reap any pending
3764 * mcc completions */ 3766 * mcc completions */
3765 if (!netif_running(adapter->netdev)) { 3767 if (!netif_running(adapter->netdev)) {
3768 local_bh_disable();
3766 be_process_mcc(adapter); 3769 be_process_mcc(adapter);
3770 local_bh_enable();
3767 goto reschedule; 3771 goto reschedule;
3768 } 3772 }
3769 3773
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3574e1499dfc..feff51664dcf 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
62 ---help--- 62 ---help---
63 This driver supports the MDIO bus used by the gianfar and UCC drivers. 63 This driver supports the MDIO bus used by the gianfar and UCC drivers.
64 64
65config FSL_XGMAC_MDIO
66 tristate "Freescale XGMAC MDIO"
67 depends on FSL_SOC
68 select PHYLIB
69 ---help---
70 This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
71
65config UCC_GETH 72config UCC_GETH
66 tristate "Freescale QE Gigabit Ethernet" 73 tristate "Freescale QE Gigabit Ethernet"
67 depends on QUICC_ENGINE 74 depends on QUICC_ENGINE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 1752488c9ee5..3d1839afff65 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
9endif 9endif
10obj-$(CONFIG_FS_ENET) += fs_enet/ 10obj-$(CONFIG_FS_ENET) += fs_enet/
11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o 11obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
12obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
12obj-$(CONFIG_GIANFAR) += gianfar_driver.o 13obj-$(CONFIG_GIANFAR) += gianfar_driver.o
13obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o 14obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
14gianfar_driver-objs := gianfar.o \ 15gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9527b28d70d1..c93a05654b46 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -19,54 +19,90 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/unistd.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/interrupt.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <linux/delay.h> 24#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32#include <linux/module.h> 25#include <linux/module.h>
33#include <linux/platform_device.h>
34#include <linux/crc32.h>
35#include <linux/mii.h> 26#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/of.h>
38#include <linux/of_address.h> 27#include <linux/of_address.h>
39#include <linux/of_mdio.h> 28#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 29#include <linux/of_device.h>
41 30
42#include <asm/io.h> 31#include <asm/io.h>
43#include <asm/irq.h> 32#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
44#include <asm/uaccess.h>
45#include <asm/ucc.h>
46 33
47#include "gianfar.h" 34#include "gianfar.h"
48#include "fsl_pq_mdio.h" 35
36#define MIIMIND_BUSY 0x00000001
37#define MIIMIND_NOTVALID 0x00000004
38#define MIIMCFG_INIT_VALUE 0x00000007
39#define MIIMCFG_RESET 0x80000000
40
41#define MII_READ_COMMAND 0x00000001
42
43struct fsl_pq_mii {
44 u32 miimcfg; /* MII management configuration reg */
45 u32 miimcom; /* MII management command reg */
46 u32 miimadd; /* MII management address reg */
47 u32 miimcon; /* MII management control reg */
48 u32 miimstat; /* MII management status reg */
49 u32 miimind; /* MII management indication reg */
50};
51
52struct fsl_pq_mdio {
53 u8 res1[16];
54 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
55 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
56 u8 res2[4];
57 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
58 u8 res3[1280];
59 struct fsl_pq_mii mii;
60 u8 res4[28];
61 u32 utbipar; /* TBI phy address reg (only on UCC) */
62 u8 res5[2728];
63} __packed;
49 64
50/* Number of microseconds to wait for an MII register to respond */ 65/* Number of microseconds to wait for an MII register to respond */
51#define MII_TIMEOUT 1000 66#define MII_TIMEOUT 1000
52 67
53struct fsl_pq_mdio_priv { 68struct fsl_pq_mdio_priv {
54 void __iomem *map; 69 void __iomem *map;
55 struct fsl_pq_mdio __iomem *regs; 70 struct fsl_pq_mii __iomem *regs;
71 int irqs[PHY_MAX_ADDR];
72};
73
74/*
75 * Per-device-type data. Each type of device tree node that we support gets
76 * one of these.
77 *
78 * @mii_offset: the offset of the MII registers within the memory map of the
79 * node. Some nodes define only the MII registers, and some define the whole
80 * MAC (which includes the MII registers).
81 *
82 * @get_tbipa: determines the address of the TBIPA register
83 *
84 * @ucc_configure: a special function for extra QE configuration
85 */
86struct fsl_pq_mdio_data {
87 unsigned int mii_offset; /* offset of the MII registers */
88 uint32_t __iomem * (*get_tbipa)(void __iomem *p);
89 void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
56}; 90};
57 91
58/* 92/*
59 * Write value to the PHY at mii_id at register regnum, 93 * Write value to the PHY at mii_id at register regnum, on the bus attached
60 * on the bus attached to the local interface, which may be different from the 94 * to the local interface, which may be different from the generic mdio bus
61 * generic mdio bus (tied to a single interface), waiting until the write is 95 * (tied to a single interface), waiting until the write is done before
62 * done before returning. This is helpful in programming interfaces like 96 * returning. This is helpful in programming interfaces like the TBI which
63 * the TBI which control interfaces like onchip SERDES and are always tied to 97 * control interfaces like onchip SERDES and are always tied to the local
64 * the local mdio pins, which may not be the same as system mdio bus, used for 98 * mdio pins, which may not be the same as system mdio bus, used for
65 * controlling the external PHYs, for example. 99 * controlling the external PHYs, for example.
66 */ 100 */
67int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 101static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
68 int regnum, u16 value) 102 u16 value)
69{ 103{
104 struct fsl_pq_mdio_priv *priv = bus->priv;
105 struct fsl_pq_mii __iomem *regs = priv->regs;
70 u32 status; 106 u32 status;
71 107
72 /* Set the PHY address and the register address we want to write */ 108 /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
83} 119}
84 120
85/* 121/*
86 * Read the bus for PHY at addr mii_id, register regnum, and 122 * Read the bus for PHY at addr mii_id, register regnum, and return the value.
87 * return the value. Clears miimcom first. All PHY operation 123 * Clears miimcom first.
88 * done on the bus attached to the local interface, 124 *
89 * which may be different from the generic mdio bus 125 * All PHY operation done on the bus attached to the local interface, which
90 * This is helpful in programming interfaces like 126 * may be different from the generic mdio bus. This is helpful in programming
91 * the TBI which, in turn, control interfaces like onchip SERDES 127 * interfaces like the TBI which, in turn, control interfaces like on-chip
92 * and are always tied to the local mdio pins, which may not be the 128 * SERDES and are always tied to the local mdio pins, which may not be the
93 * same as system mdio bus, used for controlling the external PHYs, for eg. 129 * same as system mdio bus, used for controlling the external PHYs, for eg.
94 */ 130 */
95int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, 131static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
96 int mii_id, int regnum)
97{ 132{
98 u16 value; 133 struct fsl_pq_mdio_priv *priv = bus->priv;
134 struct fsl_pq_mii __iomem *regs = priv->regs;
99 u32 status; 135 u32 status;
136 u16 value;
100 137
101 /* Set the PHY address and the register address we want to read */ 138 /* Set the PHY address and the register address we want to read */
102 out_be32(&regs->miimadd, (mii_id << 8) | regnum); 139 out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
115 /* Grab the value of the register from miimstat */ 152 /* Grab the value of the register from miimstat */
116 value = in_be32(&regs->miimstat); 153 value = in_be32(&regs->miimstat);
117 154
155 dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
118 return value; 156 return value;
119} 157}
120 158
121static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
122{
123 struct fsl_pq_mdio_priv *priv = bus->priv;
124
125 return priv->regs;
126}
127
128/*
129 * Write value to the PHY at mii_id at register regnum,
130 * on the bus, waiting until the write is done before returning.
131 */
132int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
133{
134 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
135
136 /* Write to the local MII regs */
137 return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
138}
139
140/*
141 * Read the bus for PHY at addr mii_id, register regnum, and
142 * return the value. Clears miimcom first.
143 */
144int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
145{
146 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
147
148 /* Read the local MII regs */
149 return fsl_pq_local_mdio_read(regs, mii_id, regnum);
150}
151
152/* Reset the MIIM registers, and wait for the bus to free */ 159/* Reset the MIIM registers, and wait for the bus to free */
153static int fsl_pq_mdio_reset(struct mii_bus *bus) 160static int fsl_pq_mdio_reset(struct mii_bus *bus)
154{ 161{
155 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 162 struct fsl_pq_mdio_priv *priv = bus->priv;
163 struct fsl_pq_mii __iomem *regs = priv->regs;
156 u32 status; 164 u32 status;
157 165
158 mutex_lock(&bus->mdio_lock); 166 mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
170 mutex_unlock(&bus->mdio_lock); 178 mutex_unlock(&bus->mdio_lock);
171 179
172 if (!status) { 180 if (!status) {
173 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 181 dev_err(&bus->dev, "timeout waiting for MII bus\n");
174 bus->name);
175 return -EBUSY; 182 return -EBUSY;
176 } 183 }
177 184
178 return 0; 185 return 0;
179} 186}
180 187
181void fsl_pq_mdio_bus_name(char *name, struct device_node *np) 188#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
189/*
190 * This is mildly evil, but so is our hardware for doing this.
191 * Also, we have to cast back to struct gfar because of
192 * definition weirdness done in gianfar.h.
193 */
194static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
182{ 195{
183 const u32 *addr; 196 struct gfar __iomem *enet_regs = p;
184 u64 taddr = OF_BAD_ADDR;
185
186 addr = of_get_address(np, 0, NULL, NULL);
187 if (addr)
188 taddr = of_translate_address(np, addr);
189 197
190 snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, 198 return &enet_regs->tbipa;
191 (unsigned long long)taddr);
192} 199}
193EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
194 200
201/*
202 * Return the TBIPAR address for an eTSEC2 node
203 */
204static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
205{
206 return p;
207}
208#endif
195 209
196static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 210#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
211/*
212 * Return the TBIPAR address for a QE MDIO node
213 */
214static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
197{ 215{
198#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 216 struct fsl_pq_mdio __iomem *mdio = p;
199 struct gfar __iomem *enet_regs;
200 217
201 /* 218 return &mdio->utbipar;
202 * This is mildly evil, but so is our hardware for doing this.
203 * Also, we have to cast back to struct gfar because of
204 * definition weirdness done in gianfar.h.
205 */
206 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
207 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
208 of_device_is_compatible(np, "gianfar")) {
209 enet_regs = (struct gfar __iomem *)regs;
210 return &enet_regs->tbipa;
211 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
212 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
213 return of_iomap(np, 1);
214 }
215#endif
216 return NULL;
217} 219}
218 220
219 221/*
220static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 222 * Find the UCC node that controls the given MDIO node
223 *
224 * For some reason, the QE MDIO nodes are not children of the UCC devices
225 * that control them. Therefore, we need to scan all UCC nodes looking for
226 * the one that encompases the given MDIO node. We do this by comparing
227 * physical addresses. The 'start' and 'end' addresses of the MDIO node are
228 * passed, and the correct UCC node will cover the entire address range.
229 *
230 * This assumes that there is only one QE MDIO node in the entire device tree.
231 */
232static void ucc_configure(phys_addr_t start, phys_addr_t end)
221{ 233{
222#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) 234 static bool found_mii_master;
223 struct device_node *np = NULL; 235 struct device_node *np = NULL;
224 int err = 0;
225 236
226 for_each_compatible_node(np, NULL, "ucc_geth") { 237 if (found_mii_master)
227 struct resource tempres; 238 return;
228 239
229 err = of_address_to_resource(np, 0, &tempres); 240 for_each_compatible_node(np, NULL, "ucc_geth") {
230 if (err) 241 struct resource res;
242 const uint32_t *iprop;
243 uint32_t id;
244 int ret;
245
246 ret = of_address_to_resource(np, 0, &res);
247 if (ret < 0) {
248 pr_debug("fsl-pq-mdio: no address range in node %s\n",
249 np->full_name);
231 continue; 250 continue;
251 }
232 252
233 /* if our mdio regs fall within this UCC regs range */ 253 /* if our mdio regs fall within this UCC regs range */
234 if ((start >= tempres.start) && (end <= tempres.end)) { 254 if ((start < res.start) || (end > res.end))
235 /* Find the id of the UCC */ 255 continue;
236 const u32 *id; 256
237 257 iprop = of_get_property(np, "cell-index", NULL);
238 id = of_get_property(np, "cell-index", NULL); 258 if (!iprop) {
239 if (!id) { 259 iprop = of_get_property(np, "device-id", NULL);
240 id = of_get_property(np, "device-id", NULL); 260 if (!iprop) {
241 if (!id) 261 pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
242 continue; 262 np->full_name);
263 continue;
243 } 264 }
265 }
244 266
245 *ucc_id = *id; 267 id = be32_to_cpup(iprop);
246 268
247 return 0; 269 /*
270 * cell-index and device-id for QE nodes are
271 * numbered from 1, not 0.
272 */
273 if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
274 pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
275 np->full_name);
276 continue;
248 } 277 }
278
279 pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
280 found_mii_master = true;
249 } 281 }
282}
250 283
251 if (err)
252 return err;
253 else
254 return -EINVAL;
255#else
256 return -ENODEV;
257#endif 284#endif
258}
259 285
260static int fsl_pq_mdio_probe(struct platform_device *ofdev) 286static struct of_device_id fsl_pq_mdio_match[] = {
287#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
288 {
289 .compatible = "fsl,gianfar-tbi",
290 .data = &(struct fsl_pq_mdio_data) {
291 .mii_offset = 0,
292 .get_tbipa = get_gfar_tbipa,
293 },
294 },
295 {
296 .compatible = "fsl,gianfar-mdio",
297 .data = &(struct fsl_pq_mdio_data) {
298 .mii_offset = 0,
299 .get_tbipa = get_gfar_tbipa,
300 },
301 },
302 {
303 .type = "mdio",
304 .compatible = "gianfar",
305 .data = &(struct fsl_pq_mdio_data) {
306 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
307 .get_tbipa = get_gfar_tbipa,
308 },
309 },
310 {
311 .compatible = "fsl,etsec2-tbi",
312 .data = &(struct fsl_pq_mdio_data) {
313 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
314 .get_tbipa = get_etsec_tbipa,
315 },
316 },
317 {
318 .compatible = "fsl,etsec2-mdio",
319 .data = &(struct fsl_pq_mdio_data) {
320 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
321 .get_tbipa = get_etsec_tbipa,
322 },
323 },
324#endif
325#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
326 {
327 .compatible = "fsl,ucc-mdio",
328 .data = &(struct fsl_pq_mdio_data) {
329 .mii_offset = 0,
330 .get_tbipa = get_ucc_tbipa,
331 .ucc_configure = ucc_configure,
332 },
333 },
334 {
335 /* Legacy UCC MDIO node */
336 .type = "mdio",
337 .compatible = "ucc_geth_phy",
338 .data = &(struct fsl_pq_mdio_data) {
339 .mii_offset = 0,
340 .get_tbipa = get_ucc_tbipa,
341 .ucc_configure = ucc_configure,
342 },
343 },
344#endif
345 /* No Kconfig option for Fman support yet */
346 {
347 .compatible = "fsl,fman-mdio",
348 .data = &(struct fsl_pq_mdio_data) {
349 .mii_offset = 0,
350 /* Fman TBI operations are handled elsewhere */
351 },
352 },
353
354 {},
355};
356MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
357
358static int fsl_pq_mdio_probe(struct platform_device *pdev)
261{ 359{
262 struct device_node *np = ofdev->dev.of_node; 360 const struct of_device_id *id =
361 of_match_device(fsl_pq_mdio_match, &pdev->dev);
362 const struct fsl_pq_mdio_data *data = id->data;
363 struct device_node *np = pdev->dev.of_node;
364 struct resource res;
263 struct device_node *tbi; 365 struct device_node *tbi;
264 struct fsl_pq_mdio_priv *priv; 366 struct fsl_pq_mdio_priv *priv;
265 struct fsl_pq_mdio __iomem *regs = NULL;
266 void __iomem *map;
267 u32 __iomem *tbipa;
268 struct mii_bus *new_bus; 367 struct mii_bus *new_bus;
269 int tbiaddr = -1;
270 const u32 *addrp;
271 u64 addr = 0, size = 0;
272 int err; 368 int err;
273 369
274 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 370 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
275 if (!priv)
276 return -ENOMEM;
277 371
278 new_bus = mdiobus_alloc(); 372 new_bus = mdiobus_alloc_size(sizeof(*priv));
279 if (!new_bus) { 373 if (!new_bus)
280 err = -ENOMEM; 374 return -ENOMEM;
281 goto err_free_priv;
282 }
283 375
376 priv = new_bus->priv;
284 new_bus->name = "Freescale PowerQUICC MII Bus", 377 new_bus->name = "Freescale PowerQUICC MII Bus",
285 new_bus->read = &fsl_pq_mdio_read, 378 new_bus->read = &fsl_pq_mdio_read;
286 new_bus->write = &fsl_pq_mdio_write, 379 new_bus->write = &fsl_pq_mdio_write;
287 new_bus->reset = &fsl_pq_mdio_reset, 380 new_bus->reset = &fsl_pq_mdio_reset;
288 new_bus->priv = priv; 381 new_bus->irq = priv->irqs;
289 fsl_pq_mdio_bus_name(new_bus->id, np); 382
290 383 err = of_address_to_resource(np, 0, &res);
291 addrp = of_get_address(np, 0, &size, NULL); 384 if (err < 0) {
292 if (!addrp) { 385 dev_err(&pdev->dev, "could not obtain address information\n");
293 err = -EINVAL; 386 goto error;
294 goto err_free_bus;
295 } 387 }
296 388
297 /* Set the PHY base address */ 389 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
298 addr = of_translate_address(np, addrp); 390 (unsigned long long)res.start);
299 if (addr == OF_BAD_ADDR) {
300 err = -EINVAL;
301 goto err_free_bus;
302 }
303 391
304 map = ioremap(addr, size); 392 priv->map = of_iomap(np, 0);
305 if (!map) { 393 if (!priv->map) {
306 err = -ENOMEM; 394 err = -ENOMEM;
307 goto err_free_bus; 395 goto error;
308 } 396 }
309 priv->map = map;
310
311 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
312 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
313 of_device_is_compatible(np, "fsl,ucc-mdio") ||
314 of_device_is_compatible(np, "ucc_geth_phy"))
315 map -= offsetof(struct fsl_pq_mdio, miimcfg);
316 regs = map;
317 priv->regs = regs;
318
319 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
320 397
321 if (NULL == new_bus->irq) { 398 /*
322 err = -ENOMEM; 399 * Some device tree nodes represent only the MII registers, and
323 goto err_unmap_regs; 400 * others represent the MAC and MII registers. The 'mii_offset' field
401 * contains the offset of the MII registers inside the mapped register
402 * space.
403 */
404 if (data->mii_offset > resource_size(&res)) {
405 dev_err(&pdev->dev, "invalid register map\n");
406 err = -EINVAL;
407 goto error;
324 } 408 }
409 priv->regs = priv->map + data->mii_offset;
325 410
326 new_bus->parent = &ofdev->dev; 411 new_bus->parent = &pdev->dev;
327 dev_set_drvdata(&ofdev->dev, new_bus); 412 dev_set_drvdata(&pdev->dev, new_bus);
328
329 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
330 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
331 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
332 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
333 of_device_is_compatible(np, "gianfar")) {
334 tbipa = get_gfar_tbipa(regs, np);
335 if (!tbipa) {
336 err = -EINVAL;
337 goto err_free_irqs;
338 }
339 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
340 of_device_is_compatible(np, "ucc_geth_phy")) {
341 u32 id;
342 static u32 mii_mng_master;
343
344 tbipa = &regs->utbipar;
345
346 if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
347 goto err_free_irqs;
348 413
349 if (!mii_mng_master) { 414 if (data->get_tbipa) {
350 mii_mng_master = id; 415 for_each_child_of_node(np, tbi) {
351 ucc_set_qe_mux_mii_mng(id - 1); 416 if (strcmp(tbi->type, "tbi-phy") == 0) {
417 dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
418 strrchr(tbi->full_name, '/') + 1);
419 break;
420 }
352 } 421 }
353 } else {
354 err = -ENODEV;
355 goto err_free_irqs;
356 }
357 422
358 for_each_child_of_node(np, tbi) { 423 if (tbi) {
359 if (!strncmp(tbi->type, "tbi-phy", 8)) 424 const u32 *prop = of_get_property(tbi, "reg", NULL);
360 break; 425 uint32_t __iomem *tbipa;
361 }
362 426
363 if (tbi) { 427 if (!prop) {
364 const u32 *prop = of_get_property(tbi, "reg", NULL); 428 dev_err(&pdev->dev,
429 "missing 'reg' property in node %s\n",
430 tbi->full_name);
431 err = -EBUSY;
432 goto error;
433 }
365 434
366 if (prop) 435 tbipa = data->get_tbipa(priv->map);
367 tbiaddr = *prop;
368 436
369 if (tbiaddr == -1) { 437 out_be32(tbipa, be32_to_cpup(prop));
370 err = -EBUSY;
371 goto err_free_irqs;
372 } else {
373 out_be32(tbipa, tbiaddr);
374 } 438 }
375 } 439 }
376 440
441 if (data->ucc_configure)
442 data->ucc_configure(res.start, res.end);
443
377 err = of_mdiobus_register(new_bus, np); 444 err = of_mdiobus_register(new_bus, np);
378 if (err) { 445 if (err) {
379 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 446 dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
380 new_bus->name); 447 new_bus->name);
381 goto err_free_irqs; 448 goto error;
382 } 449 }
383 450
384 return 0; 451 return 0;
385 452
386err_free_irqs: 453error:
387 kfree(new_bus->irq); 454 if (priv->map)
388err_unmap_regs: 455 iounmap(priv->map);
389 iounmap(priv->map); 456
390err_free_bus:
391 kfree(new_bus); 457 kfree(new_bus);
392err_free_priv: 458
393 kfree(priv);
394 return err; 459 return err;
395} 460}
396 461
397 462
398static int fsl_pq_mdio_remove(struct platform_device *ofdev) 463static int fsl_pq_mdio_remove(struct platform_device *pdev)
399{ 464{
400 struct device *device = &ofdev->dev; 465 struct device *device = &pdev->dev;
401 struct mii_bus *bus = dev_get_drvdata(device); 466 struct mii_bus *bus = dev_get_drvdata(device);
402 struct fsl_pq_mdio_priv *priv = bus->priv; 467 struct fsl_pq_mdio_priv *priv = bus->priv;
403 468
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
406 dev_set_drvdata(device, NULL); 471 dev_set_drvdata(device, NULL);
407 472
408 iounmap(priv->map); 473 iounmap(priv->map);
409 bus->priv = NULL;
410 mdiobus_free(bus); 474 mdiobus_free(bus);
411 kfree(priv);
412 475
413 return 0; 476 return 0;
414} 477}
415 478
416static struct of_device_id fsl_pq_mdio_match[] = {
417 {
418 .type = "mdio",
419 .compatible = "ucc_geth_phy",
420 },
421 {
422 .type = "mdio",
423 .compatible = "gianfar",
424 },
425 {
426 .compatible = "fsl,ucc-mdio",
427 },
428 {
429 .compatible = "fsl,gianfar-tbi",
430 },
431 {
432 .compatible = "fsl,gianfar-mdio",
433 },
434 {
435 .compatible = "fsl,etsec2-tbi",
436 },
437 {
438 .compatible = "fsl,etsec2-mdio",
439 },
440 {},
441};
442MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
443
444static struct platform_driver fsl_pq_mdio_driver = { 479static struct platform_driver fsl_pq_mdio_driver = {
445 .driver = { 480 .driver = {
446 .name = "fsl-pq_mdio", 481 .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644
index bd17a2a0139b..000000000000
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 *
5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
7 *
8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#ifndef __FSL_PQ_MDIO_H
17#define __FSL_PQ_MDIO_H
18
19#define MIIMIND_BUSY 0x00000001
20#define MIIMIND_NOTVALID 0x00000004
21#define MIIMCFG_INIT_VALUE 0x00000007
22#define MIIMCFG_RESET 0x80000000
23
24#define MII_READ_COMMAND 0x00000001
25
26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
33 u32 miimcfg; /* MII management configuration reg */
34 u32 miimcom; /* MII management command reg */
35 u32 miimadd; /* MII management address reg */
36 u32 miimcon; /* MII management control reg */
37 u32 miimstat; /* MII management status reg */
38 u32 miimind; /* MII management indication reg */
39 u8 reserved[28]; /* Space holder */
40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
42} __packed;
43
44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
47 int regnum, u16 value);
48int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
49int __init fsl_pq_mdio_init(void);
50void fsl_pq_mdio_exit(void);
51void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
52#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4605f7246687..4d5b58ce1298 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -100,7 +100,6 @@
100#include <linux/of_net.h> 100#include <linux/of_net.h>
101 101
102#include "gianfar.h" 102#include "gianfar.h"
103#include "fsl_pq_mdio.h"
104 103
105#define TX_TIMEOUT (1*HZ) 104#define TX_TIMEOUT (1*HZ)
106 105
@@ -1041,7 +1040,7 @@ static int gfar_probe(struct platform_device *ofdev)
1041 1040
1042 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1041 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1043 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1042 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1044 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1043 dev->features |= NETIF_F_HW_VLAN_RX;
1045 } 1044 }
1046 1045
1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1046 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 21c6574c5f15..164288439220 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,7 +42,6 @@
42#include <asm/machdep.h> 42#include <asm/machdep.h>
43 43
44#include "ucc_geth.h" 44#include "ucc_geth.h"
45#include "fsl_pq_mdio.h"
46 45
47#undef DEBUG 46#undef DEBUG
48 47
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000000..1afb5ea2a984
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,274 @@
1/*
2 * QorIQ 10G MDIO Controller
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 *
6 * Authors: Andy Fleming <afleming@freescale.com>
7 * Timur Tabi <timur@freescale.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/phy.h>
19#include <linux/mdio.h>
20#include <linux/of_platform.h>
21#include <linux/of_mdio.h>
22
23/* Number of microseconds to wait for a register to respond */
24#define TIMEOUT 1000
25
26struct tgec_mdio_controller {
27 __be32 reserved[12];
28 __be32 mdio_stat; /* MDIO configuration and status */
29 __be32 mdio_ctl; /* MDIO control */
30 __be32 mdio_data; /* MDIO data */
31 __be32 mdio_addr; /* MDIO address */
32} __packed;
33
34#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
35#define MDIO_STAT_BSY (1 << 0)
36#define MDIO_STAT_RD_ER (1 << 1)
37#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
38#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
39#define MDIO_CTL_PRE_DIS (1 << 10)
40#define MDIO_CTL_SCAN_EN (1 << 11)
41#define MDIO_CTL_POST_INC (1 << 14)
42#define MDIO_CTL_READ (1 << 15)
43
44#define MDIO_DATA(x) (x & 0xffff)
45#define MDIO_DATA_BSY (1 << 31)
46
47/*
48 * Wait untill the MDIO bus is free
49 */
50static int xgmac_wait_until_free(struct device *dev,
51 struct tgec_mdio_controller __iomem *regs)
52{
53 uint32_t status;
54
55 /* Wait till the bus is free */
56 status = spin_event_timeout(
57 !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
58 if (!status) {
59 dev_err(dev, "timeout waiting for bus to be free\n");
60 return -ETIMEDOUT;
61 }
62
63 return 0;
64}
65
66/*
67 * Wait till the MDIO read or write operation is complete
68 */
69static int xgmac_wait_until_done(struct device *dev,
70 struct tgec_mdio_controller __iomem *regs)
71{
72 uint32_t status;
73
74 /* Wait till the MDIO write is complete */
75 status = spin_event_timeout(
76 !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
77 if (!status) {
78 dev_err(dev, "timeout waiting for operation to complete\n");
79 return -ETIMEDOUT;
80 }
81
82 return 0;
83}
84
85/*
86 * Write value to the PHY for this device to the register at regnum,waiting
87 * until the write is done before it returns. All PHY configuration has to be
88 * done through the TSEC1 MIIM regs.
89 */
90static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
91{
92 struct tgec_mdio_controller __iomem *regs = bus->priv;
93 uint16_t dev_addr = regnum >> 16;
94 int ret;
95
96 /* Setup the MII Mgmt clock speed */
97 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
98
99 ret = xgmac_wait_until_free(&bus->dev, regs);
100 if (ret)
101 return ret;
102
103 /* Set the port and dev addr */
104 out_be32(&regs->mdio_ctl,
105 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
106
107 /* Set the register address */
108 out_be32(&regs->mdio_addr, regnum & 0xffff);
109
110 ret = xgmac_wait_until_free(&bus->dev, regs);
111 if (ret)
112 return ret;
113
114 /* Write the value to the register */
115 out_be32(&regs->mdio_data, MDIO_DATA(value));
116
117 ret = xgmac_wait_until_done(&bus->dev, regs);
118 if (ret)
119 return ret;
120
121 return 0;
122}
123
124/*
125 * Reads from register regnum in the PHY for device dev, returning the value.
126 * Clears miimcom first. All PHY configuration has to be done through the
127 * TSEC1 MIIM regs.
128 */
129static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
130{
131 struct tgec_mdio_controller __iomem *regs = bus->priv;
132 uint16_t dev_addr = regnum >> 16;
133 uint32_t mdio_ctl;
134 uint16_t value;
135 int ret;
136
137 /* Setup the MII Mgmt clock speed */
138 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
139
140 ret = xgmac_wait_until_free(&bus->dev, regs);
141 if (ret)
142 return ret;
143
144 /* Set the Port and Device Addrs */
145 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
146 out_be32(&regs->mdio_ctl, mdio_ctl);
147
148 /* Set the register address */
149 out_be32(&regs->mdio_addr, regnum & 0xffff);
150
151 ret = xgmac_wait_until_free(&bus->dev, regs);
152 if (ret)
153 return ret;
154
155 /* Initiate the read */
156 out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
157
158 ret = xgmac_wait_until_done(&bus->dev, regs);
159 if (ret)
160 return ret;
161
162 /* Return all Fs if nothing was there */
163 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
164 dev_err(&bus->dev, "MDIO read error\n");
165 return 0xffff;
166 }
167
168 value = in_be32(&regs->mdio_data) & 0xffff;
169 dev_dbg(&bus->dev, "read %04x\n", value);
170
171 return value;
172}
173
174/* Reset the MIIM registers, and wait for the bus to free */
175static int xgmac_mdio_reset(struct mii_bus *bus)
176{
177 struct tgec_mdio_controller __iomem *regs = bus->priv;
178 int ret;
179
180 mutex_lock(&bus->mdio_lock);
181
182 /* Setup the MII Mgmt clock speed */
183 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
184
185 ret = xgmac_wait_until_free(&bus->dev, regs);
186
187 mutex_unlock(&bus->mdio_lock);
188
189 return ret;
190}
191
192static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
193{
194 struct device_node *np = pdev->dev.of_node;
195 struct mii_bus *bus;
196 struct resource res;
197 int ret;
198
199 ret = of_address_to_resource(np, 0, &res);
200 if (ret) {
201 dev_err(&pdev->dev, "could not obtain address\n");
202 return ret;
203 }
204
205 bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
206 if (!bus)
207 return -ENOMEM;
208
209 bus->name = "Freescale XGMAC MDIO Bus";
210 bus->read = xgmac_mdio_read;
211 bus->write = xgmac_mdio_write;
212 bus->reset = xgmac_mdio_reset;
213 bus->irq = bus->priv;
214 bus->parent = &pdev->dev;
215 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
216
217 /* Set the PHY base address */
218 bus->priv = of_iomap(np, 0);
219 if (!bus->priv) {
220 ret = -ENOMEM;
221 goto err_ioremap;
222 }
223
224 ret = of_mdiobus_register(bus, np);
225 if (ret) {
226 dev_err(&pdev->dev, "cannot register MDIO bus\n");
227 goto err_registration;
228 }
229
230 dev_set_drvdata(&pdev->dev, bus);
231
232 return 0;
233
234err_registration:
235 iounmap(bus->priv);
236
237err_ioremap:
238 mdiobus_free(bus);
239
240 return ret;
241}
242
243static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
244{
245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
246
247 mdiobus_unregister(bus);
248 iounmap(bus->priv);
249 mdiobus_free(bus);
250
251 return 0;
252}
253
254static struct of_device_id xgmac_mdio_match[] = {
255 {
256 .compatible = "fsl,fman-xmdio",
257 },
258 {},
259};
260MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
261
262static struct platform_driver xgmac_mdio_driver = {
263 .driver = {
264 .name = "fsl-fman_xmdio",
265 .of_match_table = xgmac_mdio_match,
266 },
267 .probe = xgmac_mdio_probe,
268 .remove = xgmac_mdio_remove,
269};
270
271module_platform_driver(xgmac_mdio_driver);
272
273MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
274MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 080c89093feb..c98586408005 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
653 **/ 653 **/
654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) 654static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
655{ 655{
656 u16 data = er32(POEMB); 656 u32 data = er32(POEMB);
657 657
658 if (active) 658 if (active)
659 data |= E1000_PHY_CTRL_D0A_LPLU; 659 data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
677 **/ 677 **/
678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) 678static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
679{ 679{
680 u16 data = er32(POEMB); 680 u32 data = er32(POEMB);
681 681
682 if (!active) { 682 if (!active) {
683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU; 683 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cd153326c3cf..cb3356c9af80 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -310,6 +310,7 @@ struct e1000_adapter {
310 */ 310 */
311 struct e1000_ring *tx_ring /* One per active queue */ 311 struct e1000_ring *tx_ring /* One per active queue */
312 ____cacheline_aligned_in_smp; 312 ____cacheline_aligned_in_smp;
313 u32 tx_fifo_limit;
313 314
314 struct napi_struct napi; 315 struct napi_struct napi;
315 316
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 2e76f06720fd..c11ac2756667 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1942,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
1942 return -EINVAL; 1942 return -EINVAL;
1943 1943
1944 if (ec->rx_coalesce_usecs == 4) { 1944 if (ec->rx_coalesce_usecs == 4) {
1945 adapter->itr = adapter->itr_setting = 4; 1945 adapter->itr_setting = 4;
1946 adapter->itr = adapter->itr_setting;
1946 } else if (ec->rx_coalesce_usecs <= 3) { 1947 } else if (ec->rx_coalesce_usecs <= 3) {
1947 adapter->itr = 20000; 1948 adapter->itr = 20000;
1948 adapter->itr_setting = ec->rx_coalesce_usecs; 1949 adapter->itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 46c3b1f9ff89..121990cab144 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION 59#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3446 3446
3447 /* 3447 /*
3448 * if short on Rx space, Rx wins and must trump Tx 3448 * if short on Rx space, Rx wins and must trump Tx
3449 * adjustment or use Early Receive if available 3449 * adjustment
3450 */ 3450 */
3451 if (pba < min_rx_space) 3451 if (pba < min_rx_space)
3452 pba = min_rx_space; 3452 pba = min_rx_space;
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
3517 } 3517 }
3518 3518
3519 /* 3519 /*
3520 * Alignment of Tx data is on an arbitrary byte boundary with the
3521 * maximum size per Tx descriptor limited only to the transmit
3522 * allocation of the packet buffer minus 96 bytes with an upper
3523 * limit of 24KB due to receive synchronization limitations.
3524 */
3525 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3526 24 << 10);
3527
3528 /*
3520 * Disable Adaptive Interrupt Moderation if 2 full packets cannot 3529 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3521 * fit in receive buffer. 3530 * fit in receive buffer.
3522 */ 3531 */
@@ -3746,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3746 e_dbg("icr is %08X\n", icr); 3755 e_dbg("icr is %08X\n", icr);
3747 if (icr & E1000_ICR_RXSEQ) { 3756 if (icr & E1000_ICR_RXSEQ) {
3748 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3758 /*
3759 * Force memory writes to complete before acknowledging the
3760 * interrupt is handled.
3761 */
3749 wmb(); 3762 wmb();
3750 } 3763 }
3751 3764
@@ -3787,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3787 goto msi_test_failed; 3800 goto msi_test_failed;
3788 } 3801 }
3789 3802
3803 /*
3804 * Force memory writes to complete before enabling and firing an
3805 * interrupt.
3806 */
3790 wmb(); 3807 wmb();
3791 3808
3792 e1000_irq_enable(adapter); 3809 e1000_irq_enable(adapter);
@@ -3798,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3798 3815
3799 e1000_irq_disable(adapter); 3816 e1000_irq_disable(adapter);
3800 3817
3801 rmb(); 3818 rmb(); /* read flags after interrupt has been fired */
3802 3819
3803 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3820 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3804 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3821 adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4661,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4661 struct e1000_buffer *buffer_info; 4678 struct e1000_buffer *buffer_info;
4662 unsigned int i; 4679 unsigned int i;
4663 u32 cmd_length = 0; 4680 u32 cmd_length = 0;
4664 u16 ipcse = 0, tucse, mss; 4681 u16 ipcse = 0, mss;
4665 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4682 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4666 4683
4667 if (!skb_is_gso(skb)) 4684 if (!skb_is_gso(skb))
@@ -4695,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4695 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 4712 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4696 tucss = skb_transport_offset(skb); 4713 tucss = skb_transport_offset(skb);
4697 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 4714 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4698 tucse = 0;
4699 4715
4700 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 4716 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4701 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 4717 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4709,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4709 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 4725 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4710 context_desc->upper_setup.tcp_fields.tucss = tucss; 4726 context_desc->upper_setup.tcp_fields.tucss = tucss;
4711 context_desc->upper_setup.tcp_fields.tucso = tucso; 4727 context_desc->upper_setup.tcp_fields.tucso = tucso;
4712 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 4728 context_desc->upper_setup.tcp_fields.tucse = 0;
4713 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 4729 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4714 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 4730 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4715 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 4731 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
@@ -4785,12 +4801,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4785 return 1; 4801 return 1;
4786} 4802}
4787 4803
4788#define E1000_MAX_PER_TXD 8192
4789#define E1000_MAX_TXD_PWR 12
4790
4791static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, 4804static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4792 unsigned int first, unsigned int max_per_txd, 4805 unsigned int first, unsigned int max_per_txd,
4793 unsigned int nr_frags, unsigned int mss) 4806 unsigned int nr_frags)
4794{ 4807{
4795 struct e1000_adapter *adapter = tx_ring->adapter; 4808 struct e1000_adapter *adapter = tx_ring->adapter;
4796 struct pci_dev *pdev = adapter->pdev; 4809 struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5036,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5023 5036
5024static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) 5037static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5025{ 5038{
5039 BUG_ON(size > tx_ring->count);
5040
5026 if (e1000_desc_unused(tx_ring) >= size) 5041 if (e1000_desc_unused(tx_ring) >= size)
5027 return 0; 5042 return 0;
5028 return __e1000_maybe_stop_tx(tx_ring, size); 5043 return __e1000_maybe_stop_tx(tx_ring, size);
5029} 5044}
5030 5045
5031#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
5032static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 5046static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5033 struct net_device *netdev) 5047 struct net_device *netdev)
5034{ 5048{
5035 struct e1000_adapter *adapter = netdev_priv(netdev); 5049 struct e1000_adapter *adapter = netdev_priv(netdev);
5036 struct e1000_ring *tx_ring = adapter->tx_ring; 5050 struct e1000_ring *tx_ring = adapter->tx_ring;
5037 unsigned int first; 5051 unsigned int first;
5038 unsigned int max_per_txd = E1000_MAX_PER_TXD;
5039 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
5040 unsigned int tx_flags = 0; 5052 unsigned int tx_flags = 0;
5041 unsigned int len = skb_headlen(skb); 5053 unsigned int len = skb_headlen(skb);
5042 unsigned int nr_frags; 5054 unsigned int nr_frags;
@@ -5056,18 +5068,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5056 } 5068 }
5057 5069
5058 mss = skb_shinfo(skb)->gso_size; 5070 mss = skb_shinfo(skb)->gso_size;
5059 /*
5060 * The controller does a simple calculation to
5061 * make sure there is enough room in the FIFO before
5062 * initiating the DMA for each buffer. The calc is:
5063 * 4 = ceil(buffer len/mss). To make sure we don't
5064 * overrun the FIFO, adjust the max buffer len if mss
5065 * drops.
5066 */
5067 if (mss) { 5071 if (mss) {
5068 u8 hdr_len; 5072 u8 hdr_len;
5069 max_per_txd = min(mss << 2, max_per_txd);
5070 max_txd_pwr = fls(max_per_txd) - 1;
5071 5073
5072 /* 5074 /*
5073 * TSO Workaround for 82571/2/3 Controllers -- if skb->data 5075 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5099,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5097 count++; 5099 count++;
5098 count++; 5100 count++;
5099 5101
5100 count += TXD_USE_COUNT(len, max_txd_pwr); 5102 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5101 5103
5102 nr_frags = skb_shinfo(skb)->nr_frags; 5104 nr_frags = skb_shinfo(skb)->nr_frags;
5103 for (f = 0; f < nr_frags; f++) 5105 for (f = 0; f < nr_frags; f++)
5104 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 5106 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5105 max_txd_pwr); 5107 adapter->tx_fifo_limit);
5106 5108
5107 if (adapter->hw.mac.tx_pkt_filtering) 5109 if (adapter->hw.mac.tx_pkt_filtering)
5108 e1000_transfer_dhcp_info(adapter, skb); 5110 e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5146,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5144 tx_flags |= E1000_TX_FLAGS_NO_FCS; 5146 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5145 5147
5146 /* if count is 0 then mapping error has occurred */ 5148 /* if count is 0 then mapping error has occurred */
5147 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5149 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5150 nr_frags);
5148 if (count) { 5151 if (count) {
5149 skb_tx_timestamp(skb); 5152 skb_tx_timestamp(skb);
5150 5153
5151 netdev_sent_queue(netdev, skb->len); 5154 netdev_sent_queue(netdev, skb->len);
5152 e1000_tx_queue(tx_ring, tx_flags, count); 5155 e1000_tx_queue(tx_ring, tx_flags, count);
5153 /* Make sure there is space in the ring for the next send. */ 5156 /* Make sure there is space in the ring for the next send. */
5154 e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); 5157 e1000_maybe_stop_tx(tx_ring,
5155 5158 (MAX_SKB_FRAGS *
5159 DIV_ROUND_UP(PAGE_SIZE,
5160 adapter->tx_fifo_limit) + 2));
5156 } else { 5161 } else {
5157 dev_kfree_skb_any(skb); 5162 dev_kfree_skb_any(skb);
5158 tx_ring->buffer_info[first].time_stamp = 0; 5163 tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6332,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6327 adapter->hw.phy.autoneg_advertised = 0x2f; 6332 adapter->hw.phy.autoneg_advertised = 0x2f;
6328 6333
6329 /* ring size defaults */ 6334 /* ring size defaults */
6330 adapter->rx_ring->count = 256; 6335 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6331 adapter->tx_ring->count = 256; 6336 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6332 6337
6333 /* 6338 /*
6334 * Initial Wake on LAN setting - If APM wake is enabled in 6339 * Initial Wake on LAN setting - If APM wake is enabled in
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 98cadb0c4dab..eb26fda63c99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
101 101
102/* Supported Rx Buffer Sizes */ 102/* Supported Rx Buffer Sizes */
103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
104#define IXGBEVF_RXBUFFER_2048 2048 104#define IXGBEVF_RXBUFFER_3K 3072
105#define IXGBEVF_RXBUFFER_7K 7168
106#define IXGBEVF_RXBUFFER_15K 15360
105#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 107#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
106 108
107#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 109#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 60ef64587412..a5d9cc5bb257 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1057,15 +1057,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1057 1057
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1059 1059
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1060 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1061 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1062
1063 else
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1063 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1067} 1064}
1068 1065
1066static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1067{
1068 struct ixgbe_hw *hw = &adapter->hw;
1069 struct net_device *netdev = adapter->netdev;
1070 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1071 int i;
1072 u16 rx_buf_len;
1073
1074 /* notify the PF of our intent to use this size of frame */
1075 ixgbevf_rlpml_set_vf(hw, max_frame);
1076
1077 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1078 max_frame += VLAN_HLEN;
1079
1080 /*
1081 * Make best use of allocation by using all but 1K of a
1082 * power of 2 allocation that will be used for skb->head.
1083 */
1084 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1085 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1086 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1087 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1088 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1089 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1090 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1091 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1092 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1093 else
1094 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1095
1096 for (i = 0; i < adapter->num_rx_queues; i++)
1097 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1098}
1099
1069/** 1100/**
1070 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1101 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1071 * @adapter: board private structure 1102 * @adapter: board private structure
@@ -1076,18 +1107,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1076{ 1107{
1077 u64 rdba; 1108 u64 rdba;
1078 struct ixgbe_hw *hw = &adapter->hw; 1109 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1081 int i, j; 1110 int i, j;
1082 u32 rdlen; 1111 u32 rdlen;
1083 int rx_buf_len;
1084 1112
1085 /* PSRTYPE must be initialized in 82599 */ 1113 /* PSRTYPE must be initialized in 82599 */
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1114 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN) 1115
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1116 /* set_rx_buffer_len must be called before ring initialization */
1089 else 1117 ixgbevf_set_rx_buffer_len(adapter);
1090 rx_buf_len = ALIGN(max_frame, 1024);
1091 1118
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1119 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1120 /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1130,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1130 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1131 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1132 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1107 1133
1108 ixgbevf_configure_srrctl(adapter, j); 1134 ixgbevf_configure_srrctl(adapter, j);
1109 } 1135 }
@@ -1315,7 +1341,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1315 int i, j = 0; 1341 int i, j = 0;
1316 int num_rx_rings = adapter->num_rx_queues; 1342 int num_rx_rings = adapter->num_rx_queues;
1317 u32 txdctl, rxdctl; 1343 u32 txdctl, rxdctl;
1318 u32 msg[2];
1319 1344
1320 for (i = 0; i < adapter->num_tx_queues; i++) { 1345 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx; 1346 j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1381,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1381 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1357 } 1382 }
1358 1383
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1362
1363 spin_unlock(&adapter->mbx_lock); 1384 spin_unlock(&adapter->mbx_lock);
1364 1385
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1386 clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1867,6 +1888,22 @@ err_set_interrupt:
1867} 1888}
1868 1889
1869/** 1890/**
1891 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1892 * @adapter: board private structure to clear interrupt scheme on
1893 *
1894 * We go through and clear interrupt specific resources and reset the structure
1895 * to pre-load conditions
1896 **/
1897static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1898{
1899 adapter->num_tx_queues = 0;
1900 adapter->num_rx_queues = 0;
1901
1902 ixgbevf_free_q_vectors(adapter);
1903 ixgbevf_reset_interrupt_capability(adapter);
1904}
1905
1906/**
1870 * ixgbevf_sw_init - Initialize general software structures 1907 * ixgbevf_sw_init - Initialize general software structures
1871 * (struct ixgbevf_adapter) 1908 * (struct ixgbevf_adapter)
1872 * @adapter: board private structure to initialize 1909 * @adapter: board private structure to initialize
@@ -2860,10 +2897,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2860static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2897static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2861{ 2898{
2862 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2899 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2863 struct ixgbe_hw *hw = &adapter->hw;
2864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2900 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2865 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2901 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2866 u32 msg[2];
2867 2902
2868 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2903 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2869 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2904 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2912,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2877 /* must set new MTU before calling down or up */ 2912 /* must set new MTU before calling down or up */
2878 netdev->mtu = new_mtu; 2913 netdev->mtu = new_mtu;
2879 2914
2880 if (!netif_running(netdev)) {
2881 msg[0] = IXGBE_VF_SET_LPE;
2882 msg[1] = max_frame;
2883 hw->mbx.ops.write_posted(hw, msg, 2);
2884 }
2885
2886 if (netif_running(netdev)) 2915 if (netif_running(netdev))
2887 ixgbevf_reinit_locked(adapter); 2916 ixgbevf_reinit_locked(adapter);
2888 2917
2889 return 0; 2918 return 0;
2890} 2919}
2891 2920
2892static void ixgbevf_shutdown(struct pci_dev *pdev) 2921static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
2893{ 2922{
2894 struct net_device *netdev = pci_get_drvdata(pdev); 2923 struct net_device *netdev = pci_get_drvdata(pdev);
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2924 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2925#ifdef CONFIG_PM
2926 int retval = 0;
2927#endif
2896 2928
2897 netif_device_detach(netdev); 2929 netif_device_detach(netdev);
2898 2930
2899 if (netif_running(netdev)) { 2931 if (netif_running(netdev)) {
2932 rtnl_lock();
2900 ixgbevf_down(adapter); 2933 ixgbevf_down(adapter);
2901 ixgbevf_free_irq(adapter); 2934 ixgbevf_free_irq(adapter);
2902 ixgbevf_free_all_tx_resources(adapter); 2935 ixgbevf_free_all_tx_resources(adapter);
2903 ixgbevf_free_all_rx_resources(adapter); 2936 ixgbevf_free_all_rx_resources(adapter);
2937 rtnl_unlock();
2904 } 2938 }
2905 2939
2906 pci_save_state(pdev); 2940 ixgbevf_clear_interrupt_scheme(adapter);
2941
2942#ifdef CONFIG_PM
2943 retval = pci_save_state(pdev);
2944 if (retval)
2945 return retval;
2907 2946
2947#endif
2908 pci_disable_device(pdev); 2948 pci_disable_device(pdev);
2949
2950 return 0;
2951}
2952
2953#ifdef CONFIG_PM
2954static int ixgbevf_resume(struct pci_dev *pdev)
2955{
2956 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2957 struct net_device *netdev = adapter->netdev;
2958 u32 err;
2959
2960 pci_set_power_state(pdev, PCI_D0);
2961 pci_restore_state(pdev);
2962 /*
2963 * pci_restore_state clears dev->state_saved so call
2964 * pci_save_state to restore it.
2965 */
2966 pci_save_state(pdev);
2967
2968 err = pci_enable_device_mem(pdev);
2969 if (err) {
2970 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2971 return err;
2972 }
2973 pci_set_master(pdev);
2974
2975 rtnl_lock();
2976 err = ixgbevf_init_interrupt_scheme(adapter);
2977 rtnl_unlock();
2978 if (err) {
2979 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
2980 return err;
2981 }
2982
2983 ixgbevf_reset(adapter);
2984
2985 if (netif_running(netdev)) {
2986 err = ixgbevf_open(netdev);
2987 if (err)
2988 return err;
2989 }
2990
2991 netif_device_attach(netdev);
2992
2993 return err;
2994}
2995
2996#endif /* CONFIG_PM */
2997static void ixgbevf_shutdown(struct pci_dev *pdev)
2998{
2999 ixgbevf_suspend(pdev, PMSG_SUSPEND);
2909} 3000}
2910 3001
2911static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3002static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3037,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2946 return stats; 3037 return stats;
2947} 3038}
2948 3039
2949static const struct net_device_ops ixgbe_netdev_ops = { 3040static const struct net_device_ops ixgbevf_netdev_ops = {
2950 .ndo_open = ixgbevf_open, 3041 .ndo_open = ixgbevf_open,
2951 .ndo_stop = ixgbevf_close, 3042 .ndo_stop = ixgbevf_close,
2952 .ndo_start_xmit = ixgbevf_xmit_frame, 3043 .ndo_start_xmit = ixgbevf_xmit_frame,
@@ -2962,7 +3053,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
2962 3053
2963static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3054static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2964{ 3055{
2965 dev->netdev_ops = &ixgbe_netdev_ops; 3056 dev->netdev_ops = &ixgbevf_netdev_ops;
2966 ixgbevf_set_ethtool_ops(dev); 3057 ixgbevf_set_ethtool_ops(dev);
2967 dev->watchdog_timeo = 5 * HZ; 3058 dev->watchdog_timeo = 5 * HZ;
2968} 3059}
@@ -3131,6 +3222,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3131 return 0; 3222 return 0;
3132 3223
3133err_register: 3224err_register:
3225 ixgbevf_clear_interrupt_scheme(adapter);
3134err_sw_init: 3226err_sw_init:
3135 ixgbevf_reset_interrupt_capability(adapter); 3227 ixgbevf_reset_interrupt_capability(adapter);
3136 iounmap(hw->hw_addr); 3228 iounmap(hw->hw_addr);
@@ -3168,6 +3260,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3168 if (netdev->reg_state == NETREG_REGISTERED) 3260 if (netdev->reg_state == NETREG_REGISTERED)
3169 unregister_netdev(netdev); 3261 unregister_netdev(netdev);
3170 3262
3263 ixgbevf_clear_interrupt_scheme(adapter);
3171 ixgbevf_reset_interrupt_capability(adapter); 3264 ixgbevf_reset_interrupt_capability(adapter);
3172 3265
3173 iounmap(adapter->hw.hw_addr); 3266 iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3360,11 @@ static struct pci_driver ixgbevf_driver = {
3267 .id_table = ixgbevf_pci_tbl, 3360 .id_table = ixgbevf_pci_tbl,
3268 .probe = ixgbevf_probe, 3361 .probe = ixgbevf_probe,
3269 .remove = __devexit_p(ixgbevf_remove), 3362 .remove = __devexit_p(ixgbevf_remove),
3363#ifdef CONFIG_PM
3364 /* Power Management Hooks */
3365 .suspend = ixgbevf_suspend,
3366 .resume = ixgbevf_resume,
3367#endif
3270 .shutdown = ixgbevf_shutdown, 3368 .shutdown = ixgbevf_shutdown,
3271 .err_handler = &ixgbevf_err_handler 3369 .err_handler = &ixgbevf_err_handler
3272}; 3370};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index ec89b86f7ca4..3d555a10f592 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -419,6 +419,20 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
419 return 0; 419 return 0;
420} 420}
421 421
422/**
423 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
424 * @hw: pointer to the HW structure
425 * @max_size: value to assign to max frame size
426 **/
427void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
428{
429 u32 msgbuf[2];
430
431 msgbuf[0] = IXGBE_VF_SET_LPE;
432 msgbuf[1] = max_size;
433 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
434}
435
422static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 436static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
423 .init_hw = ixgbevf_init_hw_vf, 437 .init_hw = ixgbevf_init_hw_vf,
424 .reset_hw = ixgbevf_reset_hw_vf, 438 .reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 25c951daee5d..07fd87688e35 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -170,5 +170,6 @@ struct ixgbevf_info {
170 const struct ixgbe_mac_operations *mac_ops; 170 const struct ixgbe_mac_operations *mac_ops;
171}; 171};
172 172
173void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
173#endif /* __IXGBE_VF_H__ */ 174#endif /* __IXGBE_VF_H__ */
174 175
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f45def01a98e..876beceaf2d7 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3409,7 +3409,7 @@ set_speed:
3409 3409
3410 pause_flags = 0; 3410 pause_flags = 0;
3411 /* setup pause frame */ 3411 /* setup pause frame */
3412 if (np->duplex != 0) { 3412 if (netif_running(dev) && (np->duplex != 0)) {
3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3413 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 3414 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 3415 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4435 4435
4436 regs->version = FORCEDETH_REGS_VER; 4436 regs->version = FORCEDETH_REGS_VER;
4437 spin_lock_irq(&np->lock); 4437 spin_lock_irq(&np->lock);
4438 for (i = 0; i <= np->register_size/sizeof(u32); i++) 4438 for (i = 0; i < np->register_size/sizeof(u32); i++)
4439 rbuf[i] = readl(base + i*sizeof(u32)); 4439 rbuf[i] = readl(base + i*sizeof(u32));
4440 spin_unlock_irq(&np->lock); 4440 spin_unlock_irq(&np->lock);
4441} 4441}
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
5455 5455
5456 netif_stop_queue(dev); 5456 netif_stop_queue(dev);
5457 spin_lock_irq(&np->lock); 5457 spin_lock_irq(&np->lock);
5458 nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5458 nv_stop_rxtx(dev); 5459 nv_stop_rxtx(dev);
5459 nv_txrx_reset(dev); 5460 nv_txrx_reset(dev);
5460 5461
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5904 goto out_error; 5905 goto out_error;
5905 } 5906 }
5906 5907
5908 netif_carrier_off(dev);
5909
5910 /* Some NICs freeze when TX pause is enabled while NIC is
5911 * down, and this stays across warm reboots. The sequence
5912 * below should be enough to recover from that state.
5913 */
5914 nv_update_pause(dev, 0);
5915 nv_start_tx(dev);
5916 nv_stop_tx(dev);
5917
5907 if (id->driver_data & DEV_HAS_VLAN) 5918 if (id->driver_data & DEV_HAS_VLAN)
5908 nv_vlan_mode(dev, dev->features); 5919 nv_vlan_mode(dev, dev->features);
5909 5920
5910 netif_carrier_off(dev);
5911
5912 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5921 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5913 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5922 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5914 5923
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b47d5b35024e..0c96604e6246 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_DLINK, 0x4300,
291 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 292 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, 293 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
292 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, 294 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..a606db43c5ba 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
202 202
203#define EFX_ASSERT_RESET_SERIALISED(efx) \ 203#define EFX_ASSERT_RESET_SERIALISED(efx) \
204 do { \ 204 do { \
205 if ((efx->state == STATE_RUNNING) || \ 205 if ((efx->state == STATE_READY) || \
206 (efx->state == STATE_DISABLED)) \ 206 (efx->state == STATE_DISABLED)) \
207 ASSERT_RTNL(); \ 207 ASSERT_RTNL(); \
208 } while (0) 208 } while (0)
209 209
210static int efx_check_disabled(struct efx_nic *efx)
211{
212 if (efx->state == STATE_DISABLED) {
213 netif_err(efx, drv, efx->net_dev,
214 "device is disabled due to earlier errors\n");
215 return -EIO;
216 }
217 return 0;
218}
219
210/************************************************************************** 220/**************************************************************************
211 * 221 *
212 * Event queue processing 222 * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
630 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 640 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
631 sizeof(struct efx_rx_page_state)); 641 sizeof(struct efx_rx_page_state));
632 642
643 /* We must keep at least one descriptor in a TX ring empty.
644 * We could avoid this when the queue size does not exactly
645 * match the hardware ring size, but it's not that important.
646 * Therefore we stop the queue when one more skb might fill
647 * the ring completely. We wake it when half way back to
648 * empty.
649 */
650 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
651 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
652
633 /* Initialise the channels */ 653 /* Initialise the channels */
634 efx_for_each_channel(channel, efx) { 654 efx_for_each_channel(channel, efx) {
635 efx_for_each_channel_tx_queue(tx_queue, channel) 655 efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
730 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 750 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
731 u32 old_rxq_entries, old_txq_entries; 751 u32 old_rxq_entries, old_txq_entries;
732 unsigned i, next_buffer_table = 0; 752 unsigned i, next_buffer_table = 0;
733 int rc = 0; 753 int rc;
754
755 rc = efx_check_disabled(efx);
756 if (rc)
757 return rc;
734 758
735 /* Not all channels should be reallocated. We must avoid 759 /* Not all channels should be reallocated. We must avoid
736 * reallocating their buffer table entries. 760 * reallocating their buffer table entries.
@@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1365{ 1389{
1366 struct efx_channel *channel; 1390 struct efx_channel *channel;
1367 1391
1392 BUG_ON(efx->state == STATE_DISABLED);
1393
1368 if (efx->legacy_irq) 1394 if (efx->legacy_irq)
1369 efx->legacy_irq_enabled = true; 1395 efx->legacy_irq_enabled = true;
1370 efx_nic_enable_interrupts(efx); 1396 efx_nic_enable_interrupts(efx);
@@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1382{ 1408{
1383 struct efx_channel *channel; 1409 struct efx_channel *channel;
1384 1410
1411 if (efx->state == STATE_DISABLED)
1412 return;
1413
1385 efx_mcdi_mode_poll(efx); 1414 efx_mcdi_mode_poll(efx);
1386 1415
1387 efx_nic_disable_interrupts(efx); 1416 efx_nic_disable_interrupts(efx);
@@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
1533 return rc; 1562 return rc;
1534} 1563}
1535 1564
1536/* Called after previous invocation(s) of efx_stop_all, restarts the port, 1565/* If the interface is supposed to be running but is not, start
1537 * kernel transmit queues and NAPI processing, and ensures that the port is 1566 * the hardware and software data path, regular activity for the port
1538 * scheduled to be reconfigured. This function is safe to call multiple 1567 * (MAC statistics, link polling, etc.) and schedule the port to be
1539 * times when the NIC is in any state. 1568 * reconfigured. Interrupts must already be enabled. This function
1569 * is safe to call multiple times, so long as the NIC is not disabled.
1570 * Requires the RTNL lock.
1540 */ 1571 */
1541static void efx_start_all(struct efx_nic *efx) 1572static void efx_start_all(struct efx_nic *efx)
1542{ 1573{
1543 EFX_ASSERT_RESET_SERIALISED(efx); 1574 EFX_ASSERT_RESET_SERIALISED(efx);
1575 BUG_ON(efx->state == STATE_DISABLED);
1544 1576
1545 /* Check that it is appropriate to restart the interface. All 1577 /* Check that it is appropriate to restart the interface. All
1546 * of these flags are safe to read under just the rtnl lock */ 1578 * of these flags are safe to read under just the rtnl lock */
1547 if (efx->port_enabled) 1579 if (efx->port_enabled || !netif_running(efx->net_dev))
1548 return;
1549 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1550 return;
1551 if (!netif_running(efx->net_dev))
1552 return; 1580 return;
1553 1581
1554 efx_start_port(efx); 1582 efx_start_port(efx);
@@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
1582 cancel_work_sync(&efx->mac_work); 1610 cancel_work_sync(&efx->mac_work);
1583} 1611}
1584 1612
1585/* Quiesce hardware and software without bringing the link down. 1613/* Quiesce the hardware and software data path, and regular activity
1586 * Safe to call multiple times, when the nic and interface is in any 1614 * for the port without bringing the link down. Safe to call multiple
1587 * state. The caller is guaranteed to subsequently be in a position 1615 * times with the NIC in almost any state, but interrupts should be
1588 * to modify any hardware and software state they see fit without 1616 * enabled. Requires the RTNL lock.
1589 * taking locks. */ 1617 */
1590static void efx_stop_all(struct efx_nic *efx) 1618static void efx_stop_all(struct efx_nic *efx)
1591{ 1619{
1592 EFX_ASSERT_RESET_SERIALISED(efx); 1620 EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1739 struct efx_nic *efx = netdev_priv(net_dev); 1767 struct efx_nic *efx = netdev_priv(net_dev);
1740 struct mii_ioctl_data *data = if_mii(ifr); 1768 struct mii_ioctl_data *data = if_mii(ifr);
1741 1769
1742 EFX_ASSERT_RESET_SERIALISED(efx);
1743
1744 /* Convert phy_id from older PRTAD/DEVAD format */ 1770 /* Convert phy_id from older PRTAD/DEVAD format */
1745 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1771 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1746 (data->phy_id & 0xfc00) == 0x0400) 1772 (data->phy_id & 0xfc00) == 0x0400)
@@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
1820static int efx_net_open(struct net_device *net_dev) 1846static int efx_net_open(struct net_device *net_dev)
1821{ 1847{
1822 struct efx_nic *efx = netdev_priv(net_dev); 1848 struct efx_nic *efx = netdev_priv(net_dev);
1823 EFX_ASSERT_RESET_SERIALISED(efx); 1849 int rc;
1824 1850
1825 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 1851 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
1826 raw_smp_processor_id()); 1852 raw_smp_processor_id());
1827 1853
1828 if (efx->state == STATE_DISABLED) 1854 rc = efx_check_disabled(efx);
1829 return -EIO; 1855 if (rc)
1856 return rc;
1830 if (efx->phy_mode & PHY_MODE_SPECIAL) 1857 if (efx->phy_mode & PHY_MODE_SPECIAL)
1831 return -EBUSY; 1858 return -EBUSY;
1832 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1859 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
1852 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 1879 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
1853 raw_smp_processor_id()); 1880 raw_smp_processor_id());
1854 1881
1855 if (efx->state != STATE_DISABLED) { 1882 /* Stop the device and flush all the channels */
1856 /* Stop the device and flush all the channels */ 1883 efx_stop_all(efx);
1857 efx_stop_all(efx);
1858 }
1859 1884
1860 return 0; 1885 return 0;
1861} 1886}
@@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
1915static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1940static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1916{ 1941{
1917 struct efx_nic *efx = netdev_priv(net_dev); 1942 struct efx_nic *efx = netdev_priv(net_dev);
1943 int rc;
1918 1944
1919 EFX_ASSERT_RESET_SERIALISED(efx); 1945 rc = efx_check_disabled(efx);
1920 1946 if (rc)
1947 return rc;
1921 if (new_mtu > EFX_MAX_MTU) 1948 if (new_mtu > EFX_MAX_MTU)
1922 return -EINVAL; 1949 return -EINVAL;
1923 1950
@@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1926 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1953 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1927 1954
1928 mutex_lock(&efx->mac_lock); 1955 mutex_lock(&efx->mac_lock);
1929 /* Reconfigure the MAC before enabling the dma queues so that
1930 * the RX buffers don't overflow */
1931 net_dev->mtu = new_mtu; 1956 net_dev->mtu = new_mtu;
1932 efx->type->reconfigure_mac(efx); 1957 efx->type->reconfigure_mac(efx);
1933 mutex_unlock(&efx->mac_lock); 1958 mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1942 struct sockaddr *addr = data; 1967 struct sockaddr *addr = data;
1943 char *new_addr = addr->sa_data; 1968 char *new_addr = addr->sa_data;
1944 1969
1945 EFX_ASSERT_RESET_SERIALISED(efx);
1946
1947 if (!is_valid_ether_addr(new_addr)) { 1970 if (!is_valid_ether_addr(new_addr)) {
1948 netif_err(efx, drv, efx->net_dev, 1971 netif_err(efx, drv, efx->net_dev,
1949 "invalid ethernet MAC address requested: %pM\n", 1972 "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
2079 2102
2080 rtnl_lock(); 2103 rtnl_lock();
2081 2104
2105 /* Enable resets to be scheduled and check whether any were
2106 * already requested. If so, the NIC is probably hosed so we
2107 * abort.
2108 */
2109 efx->state = STATE_READY;
2110 smp_mb(); /* ensure we change state before checking reset_pending */
2111 if (efx->reset_pending) {
2112 netif_err(efx, probe, efx->net_dev,
2113 "aborting probe due to scheduled reset\n");
2114 rc = -EIO;
2115 goto fail_locked;
2116 }
2117
2082 rc = dev_alloc_name(net_dev, net_dev->name); 2118 rc = dev_alloc_name(net_dev, net_dev->name);
2083 if (rc < 0) 2119 if (rc < 0)
2084 goto fail_locked; 2120 goto fail_locked;
2085 efx_update_name(efx); 2121 efx_update_name(efx);
2086 2122
2123 /* Always start with carrier off; PHY events will detect the link */
2124 netif_carrier_off(net_dev);
2125
2087 rc = register_netdevice(net_dev); 2126 rc = register_netdevice(net_dev);
2088 if (rc) 2127 if (rc)
2089 goto fail_locked; 2128 goto fail_locked;
@@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
2094 efx_init_tx_queue_core_txq(tx_queue); 2133 efx_init_tx_queue_core_txq(tx_queue);
2095 } 2134 }
2096 2135
2097 /* Always start with carrier off; PHY events will detect the link */
2098 netif_carrier_off(net_dev);
2099
2100 rtnl_unlock(); 2136 rtnl_unlock();
2101 2137
2102 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2138 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
2108 2144
2109 return 0; 2145 return 0;
2110 2146
2147fail_registered:
2148 rtnl_lock();
2149 unregister_netdevice(net_dev);
2111fail_locked: 2150fail_locked:
2151 efx->state = STATE_UNINIT;
2112 rtnl_unlock(); 2152 rtnl_unlock();
2113 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2153 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2114 return rc; 2154 return rc;
2115
2116fail_registered:
2117 unregister_netdev(net_dev);
2118 return rc;
2119} 2155}
2120 2156
2121static void efx_unregister_netdev(struct efx_nic *efx) 2157static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
2138 2174
2139 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2175 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2140 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2176 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2141 unregister_netdev(efx->net_dev); 2177
2178 rtnl_lock();
2179 unregister_netdevice(efx->net_dev);
2180 efx->state = STATE_UNINIT;
2181 rtnl_unlock();
2142} 2182}
2143 2183
2144/************************************************************************** 2184/**************************************************************************
@@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2154 EFX_ASSERT_RESET_SERIALISED(efx); 2194 EFX_ASSERT_RESET_SERIALISED(efx);
2155 2195
2156 efx_stop_all(efx); 2196 efx_stop_all(efx);
2157 mutex_lock(&efx->mac_lock);
2158
2159 efx_stop_interrupts(efx, false); 2197 efx_stop_interrupts(efx, false);
2198
2199 mutex_lock(&efx->mac_lock);
2160 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2200 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2161 efx->phy_op->fini(efx); 2201 efx->phy_op->fini(efx);
2162 efx->type->fini(efx); 2202 efx->type->fini(efx);
@@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
2276 if (!pending) 2316 if (!pending)
2277 return; 2317 return;
2278 2318
2279 /* If we're not RUNNING then don't reset. Leave the reset_pending
2280 * flags set so that efx_pci_probe_main will be retried */
2281 if (efx->state != STATE_RUNNING) {
2282 netif_info(efx, drv, efx->net_dev,
2283 "scheduled reset quenched. NIC not RUNNING\n");
2284 return;
2285 }
2286
2287 rtnl_lock(); 2319 rtnl_lock();
2288 (void)efx_reset(efx, fls(pending) - 1); 2320
2321 /* We checked the state in efx_schedule_reset() but it may
2322 * have changed by now. Now that we have the RTNL lock,
2323 * it cannot change again.
2324 */
2325 if (efx->state == STATE_READY)
2326 (void)efx_reset(efx, fls(pending) - 1);
2327
2289 rtnl_unlock(); 2328 rtnl_unlock();
2290} 2329}
2291 2330
@@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2311 } 2350 }
2312 2351
2313 set_bit(method, &efx->reset_pending); 2352 set_bit(method, &efx->reset_pending);
2353 smp_mb(); /* ensure we change reset_pending before checking state */
2354
2355 /* If we're not READY then just leave the flags set as the cue
2356 * to abort probing or reschedule the reset later.
2357 */
2358 if (ACCESS_ONCE(efx->state) != STATE_READY)
2359 return;
2314 2360
2315 /* efx_process_channel() will no longer read events once a 2361 /* efx_process_channel() will no longer read events once a
2316 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2362 * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
2376/* This zeroes out and then fills in the invariants in a struct 2422/* This zeroes out and then fills in the invariants in a struct
2377 * efx_nic (including all sub-structures). 2423 * efx_nic (including all sub-structures).
2378 */ 2424 */
2379static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, 2425static int efx_init_struct(struct efx_nic *efx,
2380 struct pci_dev *pci_dev, struct net_device *net_dev) 2426 struct pci_dev *pci_dev, struct net_device *net_dev)
2381{ 2427{
2382 int i; 2428 int i;
2383 2429
2384 /* Initialise common structures */ 2430 /* Initialise common structures */
2385 memset(efx, 0, sizeof(*efx));
2386 spin_lock_init(&efx->biu_lock); 2431 spin_lock_init(&efx->biu_lock);
2387#ifdef CONFIG_SFC_MTD 2432#ifdef CONFIG_SFC_MTD
2388 INIT_LIST_HEAD(&efx->mtd_list); 2433 INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2392 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 2437 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2393 efx->pci_dev = pci_dev; 2438 efx->pci_dev = pci_dev;
2394 efx->msg_enable = debug; 2439 efx->msg_enable = debug;
2395 efx->state = STATE_INIT; 2440 efx->state = STATE_UNINIT;
2396 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2441 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2397 2442
2398 efx->net_dev = net_dev; 2443 efx->net_dev = net_dev;
@@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2409 goto fail; 2454 goto fail;
2410 } 2455 }
2411 2456
2412 efx->type = type;
2413
2414 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2457 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2415 2458
2416 /* Higher numbered interrupt modes are less capable! */ 2459 /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
2455 */ 2498 */
2456static void efx_pci_remove_main(struct efx_nic *efx) 2499static void efx_pci_remove_main(struct efx_nic *efx)
2457{ 2500{
2501 /* Flush reset_work. It can no longer be scheduled since we
2502 * are not READY.
2503 */
2504 BUG_ON(efx->state == STATE_READY);
2505 cancel_work_sync(&efx->reset_work);
2506
2458#ifdef CONFIG_RFS_ACCEL 2507#ifdef CONFIG_RFS_ACCEL
2459 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 2508 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2460 efx->net_dev->rx_cpu_rmap = NULL; 2509 efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2480 2529
2481 /* Mark the NIC as fini, then stop the interface */ 2530 /* Mark the NIC as fini, then stop the interface */
2482 rtnl_lock(); 2531 rtnl_lock();
2483 efx->state = STATE_FINI;
2484 dev_close(efx->net_dev); 2532 dev_close(efx->net_dev);
2485 2533 efx_stop_interrupts(efx, false);
2486 /* Allow any queued efx_resets() to complete */
2487 rtnl_unlock(); 2534 rtnl_unlock();
2488 2535
2489 efx_stop_interrupts(efx, false);
2490 efx_sriov_fini(efx); 2536 efx_sriov_fini(efx);
2491 efx_unregister_netdev(efx); 2537 efx_unregister_netdev(efx);
2492 2538
2493 efx_mtd_remove(efx); 2539 efx_mtd_remove(efx);
2494 2540
2495 /* Wait for any scheduled resets to complete. No more will be
2496 * scheduled from this point because efx_stop_all() has been
2497 * called, we are no longer registered with driverlink, and
2498 * the net_device's have been removed. */
2499 cancel_work_sync(&efx->reset_work);
2500
2501 efx_pci_remove_main(efx); 2541 efx_pci_remove_main(efx);
2502 2542
2503 efx_fini_io(efx); 2543 efx_fini_io(efx);
@@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2617static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2657static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2618 const struct pci_device_id *entry) 2658 const struct pci_device_id *entry)
2619{ 2659{
2620 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2621 struct net_device *net_dev; 2660 struct net_device *net_dev;
2622 struct efx_nic *efx; 2661 struct efx_nic *efx;
2623 int rc; 2662 int rc;
@@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2627 EFX_MAX_RX_QUEUES); 2666 EFX_MAX_RX_QUEUES);
2628 if (!net_dev) 2667 if (!net_dev)
2629 return -ENOMEM; 2668 return -ENOMEM;
2630 net_dev->features |= (type->offload_features | NETIF_F_SG | 2669 efx = netdev_priv(net_dev);
2670 efx->type = (const struct efx_nic_type *) entry->driver_data;
2671 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2631 NETIF_F_HIGHDMA | NETIF_F_TSO | 2672 NETIF_F_HIGHDMA | NETIF_F_TSO |
2632 NETIF_F_RXCSUM); 2673 NETIF_F_RXCSUM);
2633 if (type->offload_features & NETIF_F_V6_CSUM) 2674 if (efx->type->offload_features & NETIF_F_V6_CSUM)
2634 net_dev->features |= NETIF_F_TSO6; 2675 net_dev->features |= NETIF_F_TSO6;
2635 /* Mask for features that also apply to VLAN devices */ 2676 /* Mask for features that also apply to VLAN devices */
2636 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2677 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2638 NETIF_F_RXCSUM); 2679 NETIF_F_RXCSUM);
2639 /* All offloads can be toggled */ 2680 /* All offloads can be toggled */
2640 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 2681 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2641 efx = netdev_priv(net_dev);
2642 pci_set_drvdata(pci_dev, efx); 2682 pci_set_drvdata(pci_dev, efx);
2643 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2683 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2644 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2684 rc = efx_init_struct(efx, pci_dev, net_dev);
2645 if (rc) 2685 if (rc)
2646 goto fail1; 2686 goto fail1;
2647 2687
@@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2656 goto fail2; 2696 goto fail2;
2657 2697
2658 rc = efx_pci_probe_main(efx); 2698 rc = efx_pci_probe_main(efx);
2659
2660 /* Serialise against efx_reset(). No more resets will be
2661 * scheduled since efx_stop_all() has been called, and we have
2662 * not and never have been registered.
2663 */
2664 cancel_work_sync(&efx->reset_work);
2665
2666 if (rc) 2699 if (rc)
2667 goto fail3; 2700 goto fail3;
2668 2701
2669 /* If there was a scheduled reset during probe, the NIC is
2670 * probably hosed anyway.
2671 */
2672 if (efx->reset_pending) {
2673 rc = -EIO;
2674 goto fail4;
2675 }
2676
2677 /* Switch to the running state before we expose the device to the OS,
2678 * so that dev_open()|efx_start_all() will actually start the device */
2679 efx->state = STATE_RUNNING;
2680
2681 rc = efx_register_netdev(efx); 2702 rc = efx_register_netdev(efx);
2682 if (rc) 2703 if (rc)
2683 goto fail4; 2704 goto fail4;
@@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
2717{ 2738{
2718 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2739 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2719 2740
2720 efx->state = STATE_FINI; 2741 rtnl_lock();
2721 2742
2722 netif_device_detach(efx->net_dev); 2743 if (efx->state != STATE_DISABLED) {
2744 efx->state = STATE_UNINIT;
2723 2745
2724 efx_stop_all(efx); 2746 netif_device_detach(efx->net_dev);
2725 efx_stop_interrupts(efx, false); 2747
2748 efx_stop_all(efx);
2749 efx_stop_interrupts(efx, false);
2750 }
2751
2752 rtnl_unlock();
2726 2753
2727 return 0; 2754 return 0;
2728} 2755}
@@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev)
2731{ 2758{
2732 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2759 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2733 2760
2734 efx->state = STATE_INIT; 2761 rtnl_lock();
2735 2762
2736 efx_start_interrupts(efx, false); 2763 if (efx->state != STATE_DISABLED) {
2764 efx_start_interrupts(efx, false);
2737 2765
2738 mutex_lock(&efx->mac_lock); 2766 mutex_lock(&efx->mac_lock);
2739 efx->phy_op->reconfigure(efx); 2767 efx->phy_op->reconfigure(efx);
2740 mutex_unlock(&efx->mac_lock); 2768 mutex_unlock(&efx->mac_lock);
2741 2769
2742 efx_start_all(efx); 2770 efx_start_all(efx);
2743 2771
2744 netif_device_attach(efx->net_dev); 2772 netif_device_attach(efx->net_dev);
2745 2773
2746 efx->state = STATE_RUNNING; 2774 efx->state = STATE_READY;
2747 2775
2748 efx->type->resume_wol(efx); 2776 efx->type->resume_wol(efx);
2777 }
2778
2779 rtnl_unlock();
2749 2780
2750 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 2781 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2751 queue_work(reset_workqueue, &efx->reset_work); 2782 queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8cba2df82b18..f8e7e204981f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
529 if (!efx_tests) 529 if (!efx_tests)
530 goto fail; 530 goto fail;
531 531
532 532 if (efx->state != STATE_READY) {
533 ASSERT_RTNL();
534 if (efx->state != STATE_RUNNING) {
535 rc = -EIO; 533 rc = -EIO;
536 goto fail1; 534 goto fail1;
537 } 535 }
@@ -863,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
863 &ip_entry->ip4dst, &ip_entry->pdst); 861 &ip_entry->ip4dst, &ip_entry->pdst);
864 if (rc != 0) { 862 if (rc != 0) {
865 rc = efx_filter_get_ipv4_full( 863 rc = efx_filter_get_ipv4_full(
866 &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, 864 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
867 &ip_entry->ip4dst, &ip_entry->pdst); 865 &ip_entry->ip4src, &ip_entry->psrc);
868 EFX_WARN_ON_PARANOID(rc); 866 EFX_WARN_ON_PARANOID(rc);
869 ip_mask->ip4src = ~0; 867 ip_mask->ip4src = ~0;
870 ip_mask->psrc = ~0; 868 ip_mask->psrc = ~0;
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 8687a6c3db0d..ec1e99d0dcad 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
380 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
382 err = 0; 382 err = 0;
383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
384 err = -EBUSY; 384 err = -EBUSY;
385 } else { 385 } else {
386 /* Reset the PHY, reconfigure the MAC and enable/disable 386 /* Reset the PHY, reconfigure the MAC and enable/disable
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a989692..7ab1232494ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,29 +91,31 @@ struct efx_special_buffer {
91}; 91};
92 92
93/** 93/**
94 * struct efx_tx_buffer - An Efx TX buffer 94 * struct efx_tx_buffer - buffer state for a TX descriptor
95 * @skb: The associated socket buffer. 95 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
96 * Set only on the final fragment of a packet; %NULL for all other 96 * freed when descriptor completes
97 * fragments. When this fragment completes, then we can free this 97 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
98 * skb. 98 * freed when descriptor completes.
99 * @tsoh: The associated TSO header structure, or %NULL if this
100 * buffer is not a TSO header.
101 * @dma_addr: DMA address of the fragment. 99 * @dma_addr: DMA address of the fragment.
100 * @flags: Flags for allocation and DMA mapping type
102 * @len: Length of this fragment. 101 * @len: Length of this fragment.
103 * This field is zero when the queue slot is empty. 102 * This field is zero when the queue slot is empty.
104 * @continuation: True if this fragment is not the end of a packet.
105 * @unmap_single: True if dma_unmap_single should be used.
106 * @unmap_len: Length of this fragment to unmap 103 * @unmap_len: Length of this fragment to unmap
107 */ 104 */
108struct efx_tx_buffer { 105struct efx_tx_buffer {
109 const struct sk_buff *skb; 106 union {
110 struct efx_tso_header *tsoh; 107 const struct sk_buff *skb;
108 void *heap_buf;
109 };
111 dma_addr_t dma_addr; 110 dma_addr_t dma_addr;
111 unsigned short flags;
112 unsigned short len; 112 unsigned short len;
113 bool continuation;
114 bool unmap_single;
115 unsigned short unmap_len; 113 unsigned short unmap_len;
116}; 114};
115#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
116#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
117#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
118#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
117 119
118/** 120/**
119 * struct efx_tx_queue - An Efx TX queue 121 * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +135,7 @@ struct efx_tx_buffer {
133 * @channel: The associated channel 135 * @channel: The associated channel
134 * @core_txq: The networking core TX queue structure 136 * @core_txq: The networking core TX queue structure
135 * @buffer: The software buffer ring 137 * @buffer: The software buffer ring
138 * @tsoh_page: Array of pages of TSO header buffers
136 * @txd: The hardware descriptor ring 139 * @txd: The hardware descriptor ring
137 * @ptr_mask: The size of the ring minus 1. 140 * @ptr_mask: The size of the ring minus 1.
138 * @initialised: Has hardware queue been initialised? 141 * @initialised: Has hardware queue been initialised?
@@ -156,9 +159,6 @@ struct efx_tx_buffer {
156 * variable indicates that the queue is full. This is to 159 * variable indicates that the queue is full. This is to
157 * avoid cache-line ping-pong between the xmit path and the 160 * avoid cache-line ping-pong between the xmit path and the
158 * completion path. 161 * completion path.
159 * @tso_headers_free: A list of TSO headers allocated for this TX queue
160 * that are not in use, and so available for new TSO sends. The list
161 * is protected by the TX queue lock.
162 * @tso_bursts: Number of times TSO xmit invoked by kernel 162 * @tso_bursts: Number of times TSO xmit invoked by kernel
163 * @tso_long_headers: Number of packets with headers too long for standard 163 * @tso_long_headers: Number of packets with headers too long for standard
164 * blocks 164 * blocks
@@ -175,6 +175,7 @@ struct efx_tx_queue {
175 struct efx_channel *channel; 175 struct efx_channel *channel;
176 struct netdev_queue *core_txq; 176 struct netdev_queue *core_txq;
177 struct efx_tx_buffer *buffer; 177 struct efx_tx_buffer *buffer;
178 struct efx_buffer *tsoh_page;
178 struct efx_special_buffer txd; 179 struct efx_special_buffer txd;
179 unsigned int ptr_mask; 180 unsigned int ptr_mask;
180 bool initialised; 181 bool initialised;
@@ -187,7 +188,6 @@ struct efx_tx_queue {
187 unsigned int insert_count ____cacheline_aligned_in_smp; 188 unsigned int insert_count ____cacheline_aligned_in_smp;
188 unsigned int write_count; 189 unsigned int write_count;
189 unsigned int old_read_count; 190 unsigned int old_read_count;
190 struct efx_tso_header *tso_headers_free;
191 unsigned int tso_bursts; 191 unsigned int tso_bursts;
192 unsigned int tso_long_headers; 192 unsigned int tso_long_headers;
193 unsigned int tso_packets; 193 unsigned int tso_packets;
@@ -430,11 +430,9 @@ enum efx_int_mode {
430#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 430#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
431 431
432enum nic_state { 432enum nic_state {
433 STATE_INIT = 0, 433 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
434 STATE_RUNNING = 1, 434 STATE_READY = 1, /* hardware ready and netdev registered */
435 STATE_FINI = 2, 435 STATE_DISABLED = 2, /* device disabled due to hardware errors */
436 STATE_DISABLED = 3,
437 STATE_MAX,
438}; 436};
439 437
440/* 438/*
@@ -654,7 +652,7 @@ struct vfdi_status;
654 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 652 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
655 * @irq_rx_moderation: IRQ moderation time for RX event queues 653 * @irq_rx_moderation: IRQ moderation time for RX event queues
656 * @msg_enable: Log message enable flags 654 * @msg_enable: Log message enable flags
657 * @state: Device state flag. Serialised by the rtnl_lock. 655 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
658 * @reset_pending: Bitmask for pending resets 656 * @reset_pending: Bitmask for pending resets
659 * @tx_queue: TX DMA queues 657 * @tx_queue: TX DMA queues
660 * @rx_queue: RX DMA queues 658 * @rx_queue: RX DMA queues
@@ -664,6 +662,8 @@ struct vfdi_status;
664 * should be allocated for this NIC 662 * should be allocated for this NIC
665 * @rxq_entries: Size of receive queues requested by user. 663 * @rxq_entries: Size of receive queues requested by user.
666 * @txq_entries: Size of transmit queues requested by user. 664 * @txq_entries: Size of transmit queues requested by user.
665 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
666 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
667 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 667 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
668 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 668 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
669 * @sram_lim_qw: Qword address limit of SRAM 669 * @sram_lim_qw: Qword address limit of SRAM
@@ -774,6 +774,9 @@ struct efx_nic {
774 774
775 unsigned rxq_entries; 775 unsigned rxq_entries;
776 unsigned txq_entries; 776 unsigned txq_entries;
777 unsigned int txq_stop_thresh;
778 unsigned int txq_wake_thresh;
779
777 unsigned tx_dc_base; 780 unsigned tx_dc_base;
778 unsigned rx_dc_base; 781 unsigned rx_dc_base;
779 unsigned sram_lim_qw; 782 unsigned sram_lim_qw;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799762d6..cdff40b65729 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
298/************************************************************************** 298/**************************************************************************
299 * 299 *
300 * Generic buffer handling 300 * Generic buffer handling
301 * These buffers are used for interrupt status and MAC stats 301 * These buffers are used for interrupt status, MAC stats, etc.
302 * 302 *
303 **************************************************************************/ 303 **************************************************************************/
304 304
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
401 ++tx_queue->write_count; 401 ++tx_queue->write_count;
402 402
403 /* Create TX descriptor ring entry */ 403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
404 EFX_POPULATE_QWORD_4(*txd, 405 EFX_POPULATE_QWORD_4(*txd,
405 FSF_AZ_TX_KER_CONT, buffer->continuation, 406 FSF_AZ_TX_KER_CONT,
407 buffer->flags & EFX_TX_BUF_CONT,
406 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
407 FSF_AZ_TX_KER_BUF_REGION, 0, 409 FSF_AZ_TX_KER_BUF_REGION, 0,
408 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 18713436b443..ebca75ed78dc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
22#include "nic.h" 22#include "nic.h"
23#include "workarounds.h" 23#include "workarounds.h"
24 24
25/*
26 * TX descriptor ring full threshold
27 *
28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue
30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32
33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 25static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34 struct efx_tx_buffer *buffer, 26 struct efx_tx_buffer *buffer,
35 unsigned int *pkts_compl, 27 unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
39 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 31 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
40 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 32 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41 buffer->unmap_len); 33 buffer->unmap_len);
42 if (buffer->unmap_single) 34 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
43 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 35 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
44 DMA_TO_DEVICE); 36 DMA_TO_DEVICE);
45 else 37 else
46 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 38 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
47 DMA_TO_DEVICE); 39 DMA_TO_DEVICE);
48 buffer->unmap_len = 0; 40 buffer->unmap_len = 0;
49 buffer->unmap_single = false;
50 } 41 }
51 42
52 if (buffer->skb) { 43 if (buffer->flags & EFX_TX_BUF_SKB) {
53 (*pkts_compl)++; 44 (*pkts_compl)++;
54 (*bytes_compl) += buffer->skb->len; 45 (*bytes_compl) += buffer->skb->len;
55 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 46 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56 buffer->skb = NULL;
57 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 47 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58 "TX queue %d transmission id %x complete\n", 48 "TX queue %d transmission id %x complete\n",
59 tx_queue->queue, tx_queue->read_count); 49 tx_queue->queue, tx_queue->read_count);
50 } else if (buffer->flags & EFX_TX_BUF_HEAP) {
51 kfree(buffer->heap_buf);
60 } 52 }
61}
62 53
63/** 54 buffer->len = 0;
64 * struct efx_tso_header - a DMA mapped buffer for packet headers 55 buffer->flags = 0;
65 * @next: Linked list of free ones. 56}
66 * The list is protected by the TX queue lock.
67 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68 * @dma_addr: The DMA address of the header below.
69 *
70 * This controls the memory used for a TSO header. Use TSOH_DATA()
71 * to find the packet header data. Use TSOH_SIZE() to calculate the
72 * total size required for a given packet header length. TSO headers
73 * in the free list are exactly %TSOH_STD_SIZE bytes in size.
74 */
75struct efx_tso_header {
76 union {
77 struct efx_tso_header *next;
78 size_t unmap_len;
79 };
80 dma_addr_t dma_addr;
81};
82 57
83static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 58static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
84 struct sk_buff *skb); 59 struct sk_buff *skb);
85static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87 struct efx_tso_header *tsoh);
88
89static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90 struct efx_tx_buffer *buffer)
91{
92 if (buffer->tsoh) {
93 if (likely(!buffer->tsoh->unmap_len)) {
94 buffer->tsoh->next = tx_queue->tso_headers_free;
95 tx_queue->tso_headers_free = buffer->tsoh;
96 } else {
97 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98 }
99 buffer->tsoh = NULL;
100 }
101}
102
103 60
104static inline unsigned 61static inline unsigned
105efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 62efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
138 return max_descs; 95 return max_descs;
139} 96}
140 97
98/* Get partner of a TX queue, seen as part of the same net core queue */
99static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
100{
101 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
102 return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
103 else
104 return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
105}
106
107static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
108{
109 /* We need to consider both queues that the net core sees as one */
110 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
111 struct efx_nic *efx = txq1->efx;
112 unsigned int fill_level;
113
114 fill_level = max(txq1->insert_count - txq1->old_read_count,
115 txq2->insert_count - txq2->old_read_count);
116 if (likely(fill_level < efx->txq_stop_thresh))
117 return;
118
119 /* We used the stale old_read_count above, which gives us a
120 * pessimistic estimate of the fill level (which may even
121 * validly be >= efx->txq_entries). Now try again using
122 * read_count (more likely to be a cache miss).
123 *
124 * If we read read_count and then conditionally stop the
125 * queue, it is possible for the completion path to race with
126 * us and complete all outstanding descriptors in the middle,
127 * after which there will be no more completions to wake it.
128 * Therefore we stop the queue first, then read read_count
129 * (with a memory barrier to ensure the ordering), then
130 * restart the queue if the fill level turns out to be low
131 * enough.
132 */
133 netif_tx_stop_queue(txq1->core_txq);
134 smp_mb();
135 txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
136 txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
137
138 fill_level = max(txq1->insert_count - txq1->old_read_count,
139 txq2->insert_count - txq2->old_read_count);
140 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
141 if (likely(fill_level < efx->txq_stop_thresh)) {
142 smp_mb();
143 if (likely(!efx->loopback_selftest))
144 netif_tx_start_queue(txq1->core_txq);
145 }
146}
147
141/* 148/*
142 * Add a socket buffer to a TX queue 149 * Add a socket buffer to a TX queue
143 * 150 *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
151 * This function is split out from efx_hard_start_xmit to allow the 158 * This function is split out from efx_hard_start_xmit to allow the
152 * loopback test to direct packets via specific TX queues. 159 * loopback test to direct packets via specific TX queues.
153 * 160 *
154 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 161 * Returns NETDEV_TX_OK.
155 * You must hold netif_tx_lock() to call this function. 162 * You must hold netif_tx_lock() to call this function.
156 */ 163 */
157netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 164netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
160 struct device *dma_dev = &efx->pci_dev->dev; 167 struct device *dma_dev = &efx->pci_dev->dev;
161 struct efx_tx_buffer *buffer; 168 struct efx_tx_buffer *buffer;
162 skb_frag_t *fragment; 169 skb_frag_t *fragment;
163 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 170 unsigned int len, unmap_len = 0, insert_ptr;
164 dma_addr_t dma_addr, unmap_addr = 0; 171 dma_addr_t dma_addr, unmap_addr = 0;
165 unsigned int dma_len; 172 unsigned int dma_len;
166 bool unmap_single; 173 unsigned short dma_flags;
167 int q_space, i = 0; 174 int i = 0;
168 netdev_tx_t rc = NETDEV_TX_OK;
169 175
170 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 176 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
171 177
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
183 return NETDEV_TX_OK; 189 return NETDEV_TX_OK;
184 } 190 }
185 191
186 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
187 q_space = efx->txq_entries - 1 - fill_level;
188
189 /* Map for DMA. Use dma_map_single rather than dma_map_page 192 /* Map for DMA. Use dma_map_single rather than dma_map_page
190 * since this is more efficient on machines with sparse 193 * since this is more efficient on machines with sparse
191 * memory. 194 * memory.
192 */ 195 */
193 unmap_single = true; 196 dma_flags = EFX_TX_BUF_MAP_SINGLE;
194 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 197 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
195 198
196 /* Process all fragments */ 199 /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
205 208
206 /* Add to TX queue, splitting across DMA boundaries */ 209 /* Add to TX queue, splitting across DMA boundaries */
207 do { 210 do {
208 if (unlikely(q_space-- <= 0)) {
209 /* It might be that completions have
210 * happened since the xmit path last
211 * checked. Update the xmit path's
212 * copy of read_count.
213 */
214 netif_tx_stop_queue(tx_queue->core_txq);
215 /* This memory barrier protects the
216 * change of queue state from the access
217 * of read_count. */
218 smp_mb();
219 tx_queue->old_read_count =
220 ACCESS_ONCE(tx_queue->read_count);
221 fill_level = (tx_queue->insert_count
222 - tx_queue->old_read_count);
223 q_space = efx->txq_entries - 1 - fill_level;
224 if (unlikely(q_space-- <= 0)) {
225 rc = NETDEV_TX_BUSY;
226 goto unwind;
227 }
228 smp_mb();
229 if (likely(!efx->loopback_selftest))
230 netif_tx_start_queue(
231 tx_queue->core_txq);
232 }
233
234 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
235 buffer = &tx_queue->buffer[insert_ptr]; 212 buffer = &tx_queue->buffer[insert_ptr];
236 efx_tsoh_free(tx_queue, buffer); 213 EFX_BUG_ON_PARANOID(buffer->flags);
237 EFX_BUG_ON_PARANOID(buffer->tsoh);
238 EFX_BUG_ON_PARANOID(buffer->skb);
239 EFX_BUG_ON_PARANOID(buffer->len); 214 EFX_BUG_ON_PARANOID(buffer->len);
240 EFX_BUG_ON_PARANOID(!buffer->continuation);
241 EFX_BUG_ON_PARANOID(buffer->unmap_len); 215 EFX_BUG_ON_PARANOID(buffer->unmap_len);
242 216
243 dma_len = efx_max_tx_len(efx, dma_addr); 217 dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
247 /* Fill out per descriptor fields */ 221 /* Fill out per descriptor fields */
248 buffer->len = dma_len; 222 buffer->len = dma_len;
249 buffer->dma_addr = dma_addr; 223 buffer->dma_addr = dma_addr;
224 buffer->flags = EFX_TX_BUF_CONT;
250 len -= dma_len; 225 len -= dma_len;
251 dma_addr += dma_len; 226 dma_addr += dma_len;
252 ++tx_queue->insert_count; 227 ++tx_queue->insert_count;
253 } while (len); 228 } while (len);
254 229
255 /* Transfer ownership of the unmapping to the final buffer */ 230 /* Transfer ownership of the unmapping to the final buffer */
256 buffer->unmap_single = unmap_single; 231 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
257 buffer->unmap_len = unmap_len; 232 buffer->unmap_len = unmap_len;
258 unmap_len = 0; 233 unmap_len = 0;
259 234
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
264 len = skb_frag_size(fragment); 239 len = skb_frag_size(fragment);
265 i++; 240 i++;
266 /* Map for DMA */ 241 /* Map for DMA */
267 unmap_single = false; 242 dma_flags = 0;
268 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 243 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
269 DMA_TO_DEVICE); 244 DMA_TO_DEVICE);
270 } 245 }
271 246
272 /* Transfer ownership of the skb to the final buffer */ 247 /* Transfer ownership of the skb to the final buffer */
273 buffer->skb = skb; 248 buffer->skb = skb;
274 buffer->continuation = false; 249 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
275 250
276 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 251 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
277 252
278 /* Pass off to hardware */ 253 /* Pass off to hardware */
279 efx_nic_push_buffers(tx_queue); 254 efx_nic_push_buffers(tx_queue);
280 255
256 efx_tx_maybe_stop_queue(tx_queue);
257
281 return NETDEV_TX_OK; 258 return NETDEV_TX_OK;
282 259
283 dma_err: 260 dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
289 /* Mark the packet as transmitted, and free the SKB ourselves */ 266 /* Mark the packet as transmitted, and free the SKB ourselves */
290 dev_kfree_skb_any(skb); 267 dev_kfree_skb_any(skb);
291 268
292 unwind:
293 /* Work backwards until we hit the original insert pointer value */ 269 /* Work backwards until we hit the original insert pointer value */
294 while (tx_queue->insert_count != tx_queue->write_count) { 270 while (tx_queue->insert_count != tx_queue->write_count) {
295 unsigned int pkts_compl = 0, bytes_compl = 0; 271 unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
297 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
298 buffer = &tx_queue->buffer[insert_ptr]; 274 buffer = &tx_queue->buffer[insert_ptr];
299 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 275 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
300 buffer->len = 0;
301 } 276 }
302 277
303 /* Free the fragment we were mid-way through pushing */ 278 /* Free the fragment we were mid-way through pushing */
304 if (unmap_len) { 279 if (unmap_len) {
305 if (unmap_single) 280 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
306 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 281 dma_unmap_single(dma_dev, unmap_addr, unmap_len,
307 DMA_TO_DEVICE); 282 DMA_TO_DEVICE);
308 else 283 else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
310 DMA_TO_DEVICE); 285 DMA_TO_DEVICE);
311 } 286 }
312 287
313 return rc; 288 return NETDEV_TX_OK;
314} 289}
315 290
316/* Remove packets from the TX queue 291/* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
340 } 315 }
341 316
342 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 317 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
343 buffer->continuation = true;
344 buffer->len = 0;
345 318
346 ++tx_queue->read_count; 319 ++tx_queue->read_count;
347 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 320 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
450{ 423{
451 unsigned fill_level; 424 unsigned fill_level;
452 struct efx_nic *efx = tx_queue->efx; 425 struct efx_nic *efx = tx_queue->efx;
426 struct efx_tx_queue *txq2;
453 unsigned int pkts_compl = 0, bytes_compl = 0; 427 unsigned int pkts_compl = 0, bytes_compl = 0;
454 428
455 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 429 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
457 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 431 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
458 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 432 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
459 433
460 /* See if we need to restart the netif queue. This barrier 434 /* See if we need to restart the netif queue. This memory
461 * separates the update of read_count from the test of the 435 * barrier ensures that we write read_count (inside
462 * queue state. */ 436 * efx_dequeue_buffers()) before reading the queue status.
437 */
463 smp_mb(); 438 smp_mb();
464 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 439 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
465 likely(efx->port_enabled) && 440 likely(efx->port_enabled) &&
466 likely(netif_device_present(efx->net_dev))) { 441 likely(netif_device_present(efx->net_dev))) {
467 fill_level = tx_queue->insert_count - tx_queue->read_count; 442 txq2 = efx_tx_queue_partner(tx_queue);
468 if (fill_level < EFX_TXQ_THRESHOLD(efx)) 443 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
444 txq2->insert_count - txq2->read_count);
445 if (fill_level <= efx->txq_wake_thresh)
469 netif_tx_wake_queue(tx_queue->core_txq); 446 netif_tx_wake_queue(tx_queue->core_txq);
470 } 447 }
471 448
@@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
480 } 457 }
481} 458}
482 459
460/* Size of page-based TSO header buffers. Larger blocks must be
461 * allocated from the heap.
462 */
463#define TSOH_STD_SIZE 128
464#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
465
466/* At most half the descriptors in the queue at any time will refer to
467 * a TSO header buffer, since they must always be followed by a
468 * payload descriptor referring to an skb.
469 */
470static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
471{
472 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
473}
474
483int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 475int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
484{ 476{
485 struct efx_nic *efx = tx_queue->efx; 477 struct efx_nic *efx = tx_queue->efx;
486 unsigned int entries; 478 unsigned int entries;
487 int i, rc; 479 int rc;
488 480
489 /* Create the smallest power-of-two aligned ring */ 481 /* Create the smallest power-of-two aligned ring */
490 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 482 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
500 GFP_KERNEL); 492 GFP_KERNEL);
501 if (!tx_queue->buffer) 493 if (!tx_queue->buffer)
502 return -ENOMEM; 494 return -ENOMEM;
503 for (i = 0; i <= tx_queue->ptr_mask; ++i) 495
504 tx_queue->buffer[i].continuation = true; 496 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
497 tx_queue->tsoh_page =
498 kcalloc(efx_tsoh_page_count(tx_queue),
499 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
500 if (!tx_queue->tsoh_page) {
501 rc = -ENOMEM;
502 goto fail1;
503 }
504 }
505 505
506 /* Allocate hardware ring */ 506 /* Allocate hardware ring */
507 rc = efx_nic_probe_tx(tx_queue); 507 rc = efx_nic_probe_tx(tx_queue);
508 if (rc) 508 if (rc)
509 goto fail; 509 goto fail2;
510 510
511 return 0; 511 return 0;
512 512
513 fail: 513fail2:
514 kfree(tx_queue->tsoh_page);
515 tx_queue->tsoh_page = NULL;
516fail1:
514 kfree(tx_queue->buffer); 517 kfree(tx_queue->buffer);
515 tx_queue->buffer = NULL; 518 tx_queue->buffer = NULL;
516 return rc; 519 return rc;
@@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
546 unsigned int pkts_compl = 0, bytes_compl = 0; 549 unsigned int pkts_compl = 0, bytes_compl = 0;
547 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 550 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
548 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 551 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
549 buffer->continuation = true;
550 buffer->len = 0;
551 552
552 ++tx_queue->read_count; 553 ++tx_queue->read_count;
553 } 554 }
@@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
568 efx_nic_fini_tx(tx_queue); 569 efx_nic_fini_tx(tx_queue);
569 570
570 efx_release_tx_buffers(tx_queue); 571 efx_release_tx_buffers(tx_queue);
571
572 /* Free up TSO header cache */
573 efx_fini_tso(tx_queue);
574} 572}
575 573
576void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 574void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
577{ 575{
576 int i;
577
578 if (!tx_queue->buffer) 578 if (!tx_queue->buffer)
579 return; 579 return;
580 580
@@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
582 "destroying TX queue %d\n", tx_queue->queue); 582 "destroying TX queue %d\n", tx_queue->queue);
583 efx_nic_remove_tx(tx_queue); 583 efx_nic_remove_tx(tx_queue);
584 584
585 if (tx_queue->tsoh_page) {
586 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
587 efx_nic_free_buffer(tx_queue->efx,
588 &tx_queue->tsoh_page[i]);
589 kfree(tx_queue->tsoh_page);
590 tx_queue->tsoh_page = NULL;
591 }
592
585 kfree(tx_queue->buffer); 593 kfree(tx_queue->buffer);
586 tx_queue->buffer = NULL; 594 tx_queue->buffer = NULL;
587} 595}
@@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
604#define TSOH_OFFSET NET_IP_ALIGN 612#define TSOH_OFFSET NET_IP_ALIGN
605#endif 613#endif
606 614
607#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
608
609/* Total size of struct efx_tso_header, buffer and padding */
610#define TSOH_SIZE(hdr_len) \
611 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
612
613/* Size of blocks on free list. Larger blocks must be allocated from
614 * the heap.
615 */
616#define TSOH_STD_SIZE 128
617
618#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 615#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
619#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
620#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
621#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
622#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
623 616
624/** 617/**
625 * struct tso_state - TSO state for an SKB 618 * struct tso_state - TSO state for an SKB
@@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
631 * @in_len: Remaining length in current SKB fragment 624 * @in_len: Remaining length in current SKB fragment
632 * @unmap_len: Length of SKB fragment 625 * @unmap_len: Length of SKB fragment
633 * @unmap_addr: DMA address of SKB fragment 626 * @unmap_addr: DMA address of SKB fragment
634 * @unmap_single: DMA single vs page mapping flag 627 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
635 * @protocol: Network protocol (after any VLAN header) 628 * @protocol: Network protocol (after any VLAN header)
629 * @ip_off: Offset of IP header
630 * @tcp_off: Offset of TCP header
636 * @header_len: Number of bytes of header 631 * @header_len: Number of bytes of header
637 * @full_packet_size: Number of bytes to put in each outgoing segment 632 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
638 * 633 *
639 * The state used during segmentation. It is put into this data structure 634 * The state used during segmentation. It is put into this data structure
640 * just to make it easy to pass into inline functions. 635 * just to make it easy to pass into inline functions.
@@ -651,11 +646,13 @@ struct tso_state {
651 unsigned in_len; 646 unsigned in_len;
652 unsigned unmap_len; 647 unsigned unmap_len;
653 dma_addr_t unmap_addr; 648 dma_addr_t unmap_addr;
654 bool unmap_single; 649 unsigned short dma_flags;
655 650
656 __be16 protocol; 651 __be16 protocol;
652 unsigned int ip_off;
653 unsigned int tcp_off;
657 unsigned header_len; 654 unsigned header_len;
658 int full_packet_size; 655 unsigned int ip_base_len;
659}; 656};
660 657
661 658
@@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
687 return protocol; 684 return protocol;
688} 685}
689 686
690 687static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
691/* 688 struct efx_tx_buffer *buffer, unsigned int len)
692 * Allocate a page worth of efx_tso_header structures, and string them
693 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
694 */
695static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
696{ 689{
697 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 690 u8 *result;
698 struct efx_tso_header *tsoh;
699 dma_addr_t dma_addr;
700 u8 *base_kva, *kva;
701
702 base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
703 if (base_kva == NULL) {
704 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
705 "Unable to allocate page for TSO headers\n");
706 return -ENOMEM;
707 }
708
709 /* dma_alloc_coherent() allocates pages. */
710 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
711
712 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
713 tsoh = (struct efx_tso_header *)kva;
714 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
715 tsoh->next = tx_queue->tso_headers_free;
716 tx_queue->tso_headers_free = tsoh;
717 }
718
719 return 0;
720}
721 691
692 EFX_BUG_ON_PARANOID(buffer->len);
693 EFX_BUG_ON_PARANOID(buffer->flags);
694 EFX_BUG_ON_PARANOID(buffer->unmap_len);
722 695
723/* Free up a TSO header, and all others in the same page. */ 696 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
724static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 697 unsigned index =
725 struct efx_tso_header *tsoh, 698 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
726 struct device *dma_dev) 699 struct efx_buffer *page_buf =
727{ 700 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
728 struct efx_tso_header **p; 701 unsigned offset =
729 unsigned long base_kva; 702 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
730 dma_addr_t base_dma; 703
731 704 if (unlikely(!page_buf->addr) &&
732 base_kva = (unsigned long)tsoh & PAGE_MASK; 705 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
733 base_dma = tsoh->dma_addr & PAGE_MASK; 706 return NULL;
734 707
735 p = &tx_queue->tso_headers_free; 708 result = (u8 *)page_buf->addr + offset;
736 while (*p != NULL) { 709 buffer->dma_addr = page_buf->dma_addr + offset;
737 if (((unsigned long)*p & PAGE_MASK) == base_kva) 710 buffer->flags = EFX_TX_BUF_CONT;
738 *p = (*p)->next; 711 } else {
739 else 712 tx_queue->tso_long_headers++;
740 p = &(*p)->next;
741 }
742
743 dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
744}
745 713
746static struct efx_tso_header * 714 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
747efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 715 if (unlikely(!buffer->heap_buf))
748{ 716 return NULL;
749 struct efx_tso_header *tsoh; 717 result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
750 718 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
751 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
752 if (unlikely(!tsoh))
753 return NULL;
754
755 tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
756 TSOH_BUFFER(tsoh), header_len,
757 DMA_TO_DEVICE);
758 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
759 tsoh->dma_addr))) {
760 kfree(tsoh);
761 return NULL;
762 } 719 }
763 720
764 tsoh->unmap_len = header_len; 721 buffer->len = len;
765 return tsoh;
766}
767 722
768static void 723 return result;
769efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
770{
771 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
772 tsoh->dma_addr, tsoh->unmap_len,
773 DMA_TO_DEVICE);
774 kfree(tsoh);
775} 724}
776 725
777/** 726/**
@@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
781 * @len: Length of fragment 730 * @len: Length of fragment
782 * @final_buffer: The final buffer inserted into the queue 731 * @final_buffer: The final buffer inserted into the queue
783 * 732 *
784 * Push descriptors onto the TX queue. Return 0 on success or 1 if 733 * Push descriptors onto the TX queue.
785 * @tx_queue full.
786 */ 734 */
787static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 735static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
788 dma_addr_t dma_addr, unsigned len, 736 dma_addr_t dma_addr, unsigned len,
789 struct efx_tx_buffer **final_buffer) 737 struct efx_tx_buffer **final_buffer)
790{ 738{
791 struct efx_tx_buffer *buffer; 739 struct efx_tx_buffer *buffer;
792 struct efx_nic *efx = tx_queue->efx; 740 struct efx_nic *efx = tx_queue->efx;
793 unsigned dma_len, fill_level, insert_ptr; 741 unsigned dma_len, insert_ptr;
794 int q_space;
795 742
796 EFX_BUG_ON_PARANOID(len <= 0); 743 EFX_BUG_ON_PARANOID(len <= 0);
797 744
798 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
799 /* -1 as there is no way to represent all descriptors used */
800 q_space = efx->txq_entries - 1 - fill_level;
801
802 while (1) { 745 while (1) {
803 if (unlikely(q_space-- <= 0)) {
804 /* It might be that completions have happened
805 * since the xmit path last checked. Update
806 * the xmit path's copy of read_count.
807 */
808 netif_tx_stop_queue(tx_queue->core_txq);
809 /* This memory barrier protects the change of
810 * queue state from the access of read_count. */
811 smp_mb();
812 tx_queue->old_read_count =
813 ACCESS_ONCE(tx_queue->read_count);
814 fill_level = (tx_queue->insert_count
815 - tx_queue->old_read_count);
816 q_space = efx->txq_entries - 1 - fill_level;
817 if (unlikely(q_space-- <= 0)) {
818 *final_buffer = NULL;
819 return 1;
820 }
821 smp_mb();
822 netif_tx_start_queue(tx_queue->core_txq);
823 }
824
825 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 746 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
826 buffer = &tx_queue->buffer[insert_ptr]; 747 buffer = &tx_queue->buffer[insert_ptr];
827 ++tx_queue->insert_count; 748 ++tx_queue->insert_count;
@@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
830 tx_queue->read_count >= 751 tx_queue->read_count >=
831 efx->txq_entries); 752 efx->txq_entries);
832 753
833 efx_tsoh_free(tx_queue, buffer);
834 EFX_BUG_ON_PARANOID(buffer->len); 754 EFX_BUG_ON_PARANOID(buffer->len);
835 EFX_BUG_ON_PARANOID(buffer->unmap_len); 755 EFX_BUG_ON_PARANOID(buffer->unmap_len);
836 EFX_BUG_ON_PARANOID(buffer->skb); 756 EFX_BUG_ON_PARANOID(buffer->flags);
837 EFX_BUG_ON_PARANOID(!buffer->continuation);
838 EFX_BUG_ON_PARANOID(buffer->tsoh);
839 757
840 buffer->dma_addr = dma_addr; 758 buffer->dma_addr = dma_addr;
841 759
@@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
845 if (dma_len >= len) 763 if (dma_len >= len)
846 break; 764 break;
847 765
848 buffer->len = dma_len; /* Don't set the other members */ 766 buffer->len = dma_len;
767 buffer->flags = EFX_TX_BUF_CONT;
849 dma_addr += dma_len; 768 dma_addr += dma_len;
850 len -= dma_len; 769 len -= dma_len;
851 } 770 }
@@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
853 EFX_BUG_ON_PARANOID(!len); 772 EFX_BUG_ON_PARANOID(!len);
854 buffer->len = len; 773 buffer->len = len;
855 *final_buffer = buffer; 774 *final_buffer = buffer;
856 return 0;
857} 775}
858 776
859 777
@@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
864 * a single fragment, and we know it doesn't cross a page boundary. It 782 * a single fragment, and we know it doesn't cross a page boundary. It
865 * also allows us to not worry about end-of-packet etc. 783 * also allows us to not worry about end-of-packet etc.
866 */ 784 */
867static void efx_tso_put_header(struct efx_tx_queue *tx_queue, 785static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
868 struct efx_tso_header *tsoh, unsigned len) 786 struct efx_tx_buffer *buffer, u8 *header)
869{ 787{
870 struct efx_tx_buffer *buffer; 788 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
871 789 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
872 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 790 header, buffer->len,
873 efx_tsoh_free(tx_queue, buffer); 791 DMA_TO_DEVICE);
874 EFX_BUG_ON_PARANOID(buffer->len); 792 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
875 EFX_BUG_ON_PARANOID(buffer->unmap_len); 793 buffer->dma_addr))) {
876 EFX_BUG_ON_PARANOID(buffer->skb); 794 kfree(buffer->heap_buf);
877 EFX_BUG_ON_PARANOID(!buffer->continuation); 795 buffer->len = 0;
878 EFX_BUG_ON_PARANOID(buffer->tsoh); 796 buffer->flags = 0;
879 buffer->len = len; 797 return -ENOMEM;
880 buffer->dma_addr = tsoh->dma_addr; 798 }
881 buffer->tsoh = tsoh; 799 buffer->unmap_len = buffer->len;
800 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
801 }
882 802
883 ++tx_queue->insert_count; 803 ++tx_queue->insert_count;
804 return 0;
884} 805}
885 806
886 807
887/* Remove descriptors put into a tx_queue. */ 808/* Remove buffers put into a tx_queue. None of the buffers must have
809 * an skb attached.
810 */
888static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 811static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
889{ 812{
890 struct efx_tx_buffer *buffer; 813 struct efx_tx_buffer *buffer;
891 dma_addr_t unmap_addr;
892 814
893 /* Work backwards until we hit the original insert pointer value */ 815 /* Work backwards until we hit the original insert pointer value */
894 while (tx_queue->insert_count != tx_queue->write_count) { 816 while (tx_queue->insert_count != tx_queue->write_count) {
895 --tx_queue->insert_count; 817 --tx_queue->insert_count;
896 buffer = &tx_queue->buffer[tx_queue->insert_count & 818 buffer = &tx_queue->buffer[tx_queue->insert_count &
897 tx_queue->ptr_mask]; 819 tx_queue->ptr_mask];
898 efx_tsoh_free(tx_queue, buffer); 820 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
899 EFX_BUG_ON_PARANOID(buffer->skb);
900 if (buffer->unmap_len) {
901 unmap_addr = (buffer->dma_addr + buffer->len -
902 buffer->unmap_len);
903 if (buffer->unmap_single)
904 dma_unmap_single(&tx_queue->efx->pci_dev->dev,
905 unmap_addr, buffer->unmap_len,
906 DMA_TO_DEVICE);
907 else
908 dma_unmap_page(&tx_queue->efx->pci_dev->dev,
909 unmap_addr, buffer->unmap_len,
910 DMA_TO_DEVICE);
911 buffer->unmap_len = 0;
912 }
913 buffer->len = 0;
914 buffer->continuation = true;
915 } 821 }
916} 822}
917 823
@@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
919/* Parse the SKB header and initialise state. */ 825/* Parse the SKB header and initialise state. */
920static void tso_start(struct tso_state *st, const struct sk_buff *skb) 826static void tso_start(struct tso_state *st, const struct sk_buff *skb)
921{ 827{
922 /* All ethernet/IP/TCP headers combined size is TCP header size 828 st->ip_off = skb_network_header(skb) - skb->data;
923 * plus offset of TCP header relative to start of packet. 829 st->tcp_off = skb_transport_header(skb) - skb->data;
924 */ 830 st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
925 st->header_len = ((tcp_hdr(skb)->doff << 2u) 831 if (st->protocol == htons(ETH_P_IP)) {
926 + PTR_DIFF(tcp_hdr(skb), skb->data)); 832 st->ip_base_len = st->header_len - st->ip_off;
927 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
928
929 if (st->protocol == htons(ETH_P_IP))
930 st->ipv4_id = ntohs(ip_hdr(skb)->id); 833 st->ipv4_id = ntohs(ip_hdr(skb)->id);
931 else 834 } else {
835 st->ip_base_len = st->header_len - st->tcp_off;
932 st->ipv4_id = 0; 836 st->ipv4_id = 0;
837 }
933 st->seqnum = ntohl(tcp_hdr(skb)->seq); 838 st->seqnum = ntohl(tcp_hdr(skb)->seq);
934 839
935 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 840 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
938 843
939 st->out_len = skb->len - st->header_len; 844 st->out_len = skb->len - st->header_len;
940 st->unmap_len = 0; 845 st->unmap_len = 0;
941 st->unmap_single = false; 846 st->dma_flags = 0;
942} 847}
943 848
944static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 849static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
947 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 852 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
948 skb_frag_size(frag), DMA_TO_DEVICE); 853 skb_frag_size(frag), DMA_TO_DEVICE);
949 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 854 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
950 st->unmap_single = false; 855 st->dma_flags = 0;
951 st->unmap_len = skb_frag_size(frag); 856 st->unmap_len = skb_frag_size(frag);
952 st->in_len = skb_frag_size(frag); 857 st->in_len = skb_frag_size(frag);
953 st->dma_addr = st->unmap_addr; 858 st->dma_addr = st->unmap_addr;
@@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
965 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, 870 st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
966 len, DMA_TO_DEVICE); 871 len, DMA_TO_DEVICE);
967 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 872 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
968 st->unmap_single = true; 873 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
969 st->unmap_len = len; 874 st->unmap_len = len;
970 st->in_len = len; 875 st->in_len = len;
971 st->dma_addr = st->unmap_addr; 876 st->dma_addr = st->unmap_addr;
@@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
982 * @st: TSO state 887 * @st: TSO state
983 * 888 *
984 * Form descriptors for the current fragment, until we reach the end 889 * Form descriptors for the current fragment, until we reach the end
985 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 890 * of fragment or end-of-packet.
986 * space in @tx_queue.
987 */ 891 */
988static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 892static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
989 const struct sk_buff *skb, 893 const struct sk_buff *skb,
990 struct tso_state *st) 894 struct tso_state *st)
991{ 895{
992 struct efx_tx_buffer *buffer; 896 struct efx_tx_buffer *buffer;
993 int n, end_of_packet, rc; 897 int n;
994 898
995 if (st->in_len == 0) 899 if (st->in_len == 0)
996 return 0; 900 return;
997 if (st->packet_space == 0) 901 if (st->packet_space == 0)
998 return 0; 902 return;
999 903
1000 EFX_BUG_ON_PARANOID(st->in_len <= 0); 904 EFX_BUG_ON_PARANOID(st->in_len <= 0);
1001 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 905 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1006 st->out_len -= n; 910 st->out_len -= n;
1007 st->in_len -= n; 911 st->in_len -= n;
1008 912
1009 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 913 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1010 if (likely(rc == 0)) {
1011 if (st->out_len == 0)
1012 /* Transfer ownership of the skb */
1013 buffer->skb = skb;
1014 914
1015 end_of_packet = st->out_len == 0 || st->packet_space == 0; 915 if (st->out_len == 0) {
1016 buffer->continuation = !end_of_packet; 916 /* Transfer ownership of the skb */
917 buffer->skb = skb;
918 buffer->flags = EFX_TX_BUF_SKB;
919 } else if (st->packet_space != 0) {
920 buffer->flags = EFX_TX_BUF_CONT;
921 }
1017 922
1018 if (st->in_len == 0) { 923 if (st->in_len == 0) {
1019 /* Transfer ownership of the DMA mapping */ 924 /* Transfer ownership of the DMA mapping */
1020 buffer->unmap_len = st->unmap_len; 925 buffer->unmap_len = st->unmap_len;
1021 buffer->unmap_single = st->unmap_single; 926 buffer->flags |= st->dma_flags;
1022 st->unmap_len = 0; 927 st->unmap_len = 0;
1023 }
1024 } 928 }
1025 929
1026 st->dma_addr += n; 930 st->dma_addr += n;
1027 return rc;
1028} 931}
1029 932
1030 933
@@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1035 * @st: TSO state 938 * @st: TSO state
1036 * 939 *
1037 * Generate a new header and prepare for the new packet. Return 0 on 940 * Generate a new header and prepare for the new packet. Return 0 on
1038 * success, or -1 if failed to alloc header. 941 * success, or -%ENOMEM if failed to alloc header.
1039 */ 942 */
1040static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 943static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1041 const struct sk_buff *skb, 944 const struct sk_buff *skb,
1042 struct tso_state *st) 945 struct tso_state *st)
1043{ 946{
1044 struct efx_tso_header *tsoh; 947 struct efx_tx_buffer *buffer =
948 &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
1045 struct tcphdr *tsoh_th; 949 struct tcphdr *tsoh_th;
1046 unsigned ip_length; 950 unsigned ip_length;
1047 u8 *header; 951 u8 *header;
952 int rc;
1048 953
1049 /* Allocate a DMA-mapped header buffer. */ 954 /* Allocate and insert a DMA-mapped header buffer. */
1050 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { 955 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1051 if (tx_queue->tso_headers_free == NULL) { 956 if (!header)
1052 if (efx_tsoh_block_alloc(tx_queue)) 957 return -ENOMEM;
1053 return -1;
1054 }
1055 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1056 tsoh = tx_queue->tso_headers_free;
1057 tx_queue->tso_headers_free = tsoh->next;
1058 tsoh->unmap_len = 0;
1059 } else {
1060 tx_queue->tso_long_headers++;
1061 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1062 if (unlikely(!tsoh))
1063 return -1;
1064 }
1065 958
1066 header = TSOH_BUFFER(tsoh); 959 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1067 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1068 960
1069 /* Copy and update the headers. */ 961 /* Copy and update the headers. */
1070 memcpy(header, skb->data, st->header_len); 962 memcpy(header, skb->data, st->header_len);
@@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1073 st->seqnum += skb_shinfo(skb)->gso_size; 965 st->seqnum += skb_shinfo(skb)->gso_size;
1074 if (st->out_len > skb_shinfo(skb)->gso_size) { 966 if (st->out_len > skb_shinfo(skb)->gso_size) {
1075 /* This packet will not finish the TSO burst. */ 967 /* This packet will not finish the TSO burst. */
1076 ip_length = st->full_packet_size - ETH_HDR_LEN(skb); 968 st->packet_space = skb_shinfo(skb)->gso_size;
1077 tsoh_th->fin = 0; 969 tsoh_th->fin = 0;
1078 tsoh_th->psh = 0; 970 tsoh_th->psh = 0;
1079 } else { 971 } else {
1080 /* This packet will be the last in the TSO burst. */ 972 /* This packet will be the last in the TSO burst. */
1081 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; 973 st->packet_space = st->out_len;
1082 tsoh_th->fin = tcp_hdr(skb)->fin; 974 tsoh_th->fin = tcp_hdr(skb)->fin;
1083 tsoh_th->psh = tcp_hdr(skb)->psh; 975 tsoh_th->psh = tcp_hdr(skb)->psh;
1084 } 976 }
977 ip_length = st->ip_base_len + st->packet_space;
1085 978
1086 if (st->protocol == htons(ETH_P_IP)) { 979 if (st->protocol == htons(ETH_P_IP)) {
1087 struct iphdr *tsoh_iph = 980 struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
1088 (struct iphdr *)(header + SKB_IPV4_OFF(skb));
1089 981
1090 tsoh_iph->tot_len = htons(ip_length); 982 tsoh_iph->tot_len = htons(ip_length);
1091 983
@@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1094 st->ipv4_id++; 986 st->ipv4_id++;
1095 } else { 987 } else {
1096 struct ipv6hdr *tsoh_iph = 988 struct ipv6hdr *tsoh_iph =
1097 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); 989 (struct ipv6hdr *)(header + st->ip_off);
1098 990
1099 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); 991 tsoh_iph->payload_len = htons(ip_length);
1100 } 992 }
1101 993
1102 st->packet_space = skb_shinfo(skb)->gso_size; 994 rc = efx_tso_put_header(tx_queue, buffer, header);
1103 ++tx_queue->tso_packets; 995 if (unlikely(rc))
996 return rc;
1104 997
1105 /* Form a descriptor for this header. */ 998 ++tx_queue->tso_packets;
1106 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1107 999
1108 return 0; 1000 return 0;
1109} 1001}
@@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1118 * 1010 *
1119 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1011 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1120 * @skb was not enqueued. In all cases @skb is consumed. Return 1012 * @skb was not enqueued. In all cases @skb is consumed. Return
1121 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1013 * %NETDEV_TX_OK.
1122 */ 1014 */
1123static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1015static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1124 struct sk_buff *skb) 1016 struct sk_buff *skb)
1125{ 1017{
1126 struct efx_nic *efx = tx_queue->efx; 1018 struct efx_nic *efx = tx_queue->efx;
1127 int frag_i, rc, rc2 = NETDEV_TX_OK; 1019 int frag_i, rc;
1128 struct tso_state state; 1020 struct tso_state state;
1129 1021
1130 /* Find the packet protocol and sanity-check it */ 1022 /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1156 goto mem_err; 1048 goto mem_err;
1157 1049
1158 while (1) { 1050 while (1) {
1159 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1051 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1160 if (unlikely(rc)) {
1161 rc2 = NETDEV_TX_BUSY;
1162 goto unwind;
1163 }
1164 1052
1165 /* Move onto the next fragment? */ 1053 /* Move onto the next fragment? */
1166 if (state.in_len == 0) { 1054 if (state.in_len == 0) {
@@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1184 /* Pass off to hardware */ 1072 /* Pass off to hardware */
1185 efx_nic_push_buffers(tx_queue); 1073 efx_nic_push_buffers(tx_queue);
1186 1074
1075 efx_tx_maybe_stop_queue(tx_queue);
1076
1187 tx_queue->tso_bursts++; 1077 tx_queue->tso_bursts++;
1188 return NETDEV_TX_OK; 1078 return NETDEV_TX_OK;
1189 1079
@@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1192 "Out of memory for TSO headers, or DMA mapping error\n"); 1082 "Out of memory for TSO headers, or DMA mapping error\n");
1193 dev_kfree_skb_any(skb); 1083 dev_kfree_skb_any(skb);
1194 1084
1195 unwind:
1196 /* Free the DMA mapping we were in the process of writing out */ 1085 /* Free the DMA mapping we were in the process of writing out */
1197 if (state.unmap_len) { 1086 if (state.unmap_len) {
1198 if (state.unmap_single) 1087 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1199 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1088 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1200 state.unmap_len, DMA_TO_DEVICE); 1089 state.unmap_len, DMA_TO_DEVICE);
1201 else 1090 else
@@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1204 } 1093 }
1205 1094
1206 efx_enqueue_unwind(tx_queue); 1095 efx_enqueue_unwind(tx_queue);
1207 return rc2; 1096 return NETDEV_TX_OK;
1208}
1209
1210
1211/*
1212 * Free up all TSO datastructures associated with tx_queue. This
1213 * routine should be called only once the tx_queue is both empty and
1214 * will no longer be used.
1215 */
1216static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1217{
1218 unsigned i;
1219
1220 if (tx_queue->buffer) {
1221 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1222 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1223 }
1224
1225 while (tx_queue->tso_headers_free != NULL)
1226 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1227 &tx_queue->efx->pci_dev->dev);
1228} 1097}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e2d083228f3a..719be3912aa9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __COMMON_H__
26#define __COMMON_H__
27
25#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
26#include <linux/netdevice.h> 29#include <linux/netdevice.h>
27#include <linux/phy.h> 30#include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
366 369
367extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 370extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
368extern const struct stmmac_ring_mode_ops ring_mode_ops; 371extern const struct stmmac_ring_mode_ops ring_mode_ops;
372
373#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9820ec842cc0..223adf95fd03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -20,6 +20,10 @@
20 20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/ 22*******************************************************************************/
23
24#ifndef __DESCS_H__
25#define __DESCS_H__
26
23struct dma_desc { 27struct dma_desc {
24 /* Receive descriptor */ 28 /* Receive descriptor */
25 union { 29 union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
166 * is not calculated */ 170 * is not calculated */
167 cic_full = 3, /* IP header and pseudoheader */ 171 cic_full = 3, /* IP header and pseudoheader */
168}; 172};
173
174#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index dd8d6e19dff6..7ee9499a6e38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -27,6 +27,9 @@
27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 27 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
28*******************************************************************************/ 28*******************************************************************************/
29 29
30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__
32
30#if defined(CONFIG_STMMAC_RING) 33#if defined(CONFIG_STMMAC_RING)
31static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
32{ 35{
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
124 p->des01.tx.buffer1_size = len; 127 p->des01.tx.buffer1_size = len;
125} 128}
126#endif 129#endif
130
131#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc7..2ec6aeae349e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC100_H__
26#define __DWMAC100_H__
27
25#include <linux/phy.h> 28#include <linux/phy.h>
26#include "common.h" 29#include "common.h"
27 30
@@ -119,3 +122,5 @@ enum ttc_control {
119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 122#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120 123
121extern const struct stmmac_dma_ops dwmac100_dma_ops; 124extern const struct stmmac_dma_ops dwmac100_dma_ops;
125
126#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index f90fcb5f9573..0e4cacedc1f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -19,6 +19,8 @@
19 19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22#ifndef __DWMAC1000_H__
23#define __DWMAC1000_H__
22 24
23#include <linux/phy.h> 25#include <linux/phy.h>
24#include "common.h" 26#include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
229#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
230 232
231/* Synopsys Core versions */ 233/* Synopsys Core versions */
232#define DWMAC_CORE_3_40 34 234#define DWMAC_CORE_3_40 0x34
233 235
234extern const struct stmmac_dma_ops dwmac1000_dma_ops; 236extern const struct stmmac_dma_ops dwmac1000_dma_ops;
237#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e678ce39d014..e49c9a0fd6ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __DWMAC_DMA_H__
26#define __DWMAC_DMA_H__
27
25/* DMA CRS Control and Status Register Mapping */ 28/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ 29#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ 30#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
109extern void dwmac_dma_stop_rx(void __iomem *ioaddr); 112extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
110extern int dwmac_dma_interrupt(void __iomem *ioaddr, 113extern int dwmac_dma_interrupt(void __iomem *ioaddr,
111 struct stmmac_extra_stats *x); 114 struct stmmac_extra_stats *x);
115
116#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a38352024cb8..67995ef25251 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#ifndef __MMC_H__
26#define __MMC_H__
27
25/* MMC control register */ 28/* MMC control register */
26/* When set, all counter are reset */ 29/* When set, all counter are reset */
27#define MMC_CNTRL_COUNTER_RESET 0x1 30#define MMC_CNTRL_COUNTER_RESET 0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
129extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); 132extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
130extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); 133extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
131extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); 134extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
135
136#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index c07cfe989f6e..0c74a702d461 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -33,7 +33,7 @@
33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ 33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ 34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ 35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
36#define MMC_DEFAUL_MASK 0xffffffff 36#define MMC_DEFAULT_MASK 0xffffffff
37 37
38/* MMC TX counter registers */ 38/* MMC TX counter registers */
39 39
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
147/* To mask all all interrupts.*/ 147/* To mask all all interrupts.*/
148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) 148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
149{ 149{
150 writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
152} 152}
153 153
154/* This reads the MAC core counters (if actaully supported). 154/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f2d3665430ad..e872e1da3137 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#ifndef __STMMAC_H__
24#define __STMMAC_H__
25
23#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "March_2012" 27#define DRV_MODULE_VERSION "March_2012"
25 28
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
166{ 169{
167} 170}
168#endif /* CONFIG_STMMAC_PCI */ 171#endif /* CONFIG_STMMAC_PCI */
172
173#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ade108232048..0376a5e6b2bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
177 new_bus->write = &stmmac_mdio_write; 177 new_bus->write = &stmmac_mdio_write;
178 new_bus->reset = &stmmac_mdio_reset; 178 new_bus->reset = &stmmac_mdio_reset;
179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", 179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
180 new_bus->name, mdio_bus_data->bus_id); 180 new_bus->name, priv->plat->bus_id);
181 new_bus->priv = ndev; 181 new_bus->priv = ndev;
182 new_bus->irq = irqlist; 182 new_bus->irq = irqlist;
183 new_bus->phy_mask = mdio_bus_data->phy_mask; 183 new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
213 * and no PHY number was provided to the MAC, 213 * and no PHY number was provided to the MAC,
214 * use the one probed here. 214 * use the one probed here.
215 */ 215 */
216 if ((priv->plat->bus_id == mdio_bus_data->bus_id) && 216 if (priv->plat->phy_addr == -1)
217 (priv->plat->phy_addr == -1))
218 priv->plat->phy_addr = addr; 217 priv->plat->phy_addr = addr;
219 218
220 act = (priv->plat->bus_id == mdio_bus_data->bus_id) && 219 act = (priv->plat->phy_addr == addr);
221 (priv->plat->phy_addr == addr);
222 switch (phydev->irq) { 220 switch (phydev->irq) {
223 case PHY_POLL: 221 case PHY_POLL:
224 irq_str = "POLL"; 222 irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
258{ 256{
259 struct stmmac_priv *priv = netdev_priv(ndev); 257 struct stmmac_priv *priv = netdev_priv(ndev);
260 258
259 if (!priv->mii)
260 return 0;
261
261 mdiobus_unregister(priv->mii); 262 mdiobus_unregister(priv->mii);
262 priv->mii->priv = NULL; 263 priv->mii->priv = NULL;
263 mdiobus_free(priv->mii); 264 mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 13afb8edfadc..1f069b0f6af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
42 42
43 mdio_data.bus_id = 1;
44 mdio_data.phy_reset = NULL; 43 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 44 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 45 plat_dat.mdio_bus_data = &mdio_data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b93245c11995..ed112b55ae7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
78{ 78{
79 int ret = 0; 79 int ret = 0;
80 struct resource *res; 80 struct resource *res;
81 struct device *dev = &pdev->dev;
81 void __iomem *addr = NULL; 82 void __iomem *addr = NULL;
82 struct stmmac_priv *priv = NULL; 83 struct stmmac_priv *priv = NULL;
83 struct plat_stmmacenet_data *plat_dat = NULL; 84 struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
87 if (!res) 88 if (!res)
88 return -ENODEV; 89 return -ENODEV;
89 90
90 if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 91 addr = devm_request_and_ioremap(dev, res);
91 pr_err("%s: ERROR: memory allocation failed"
92 "cannot get the I/O addr 0x%x\n",
93 __func__, (unsigned int)res->start);
94 return -EBUSY;
95 }
96
97 addr = ioremap(res->start, resource_size(res));
98 if (!addr) { 92 if (!addr) {
99 pr_err("%s: ERROR: memory mapping failed", __func__); 93 pr_err("%s: ERROR: memory mapping failed", __func__);
100 ret = -ENOMEM; 94 return -ENOMEM;
101 goto out_release_region;
102 } 95 }
103 96
104 if (pdev->dev.of_node) { 97 if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
107 GFP_KERNEL); 100 GFP_KERNEL);
108 if (!plat_dat) { 101 if (!plat_dat) {
109 pr_err("%s: ERROR: no memory", __func__); 102 pr_err("%s: ERROR: no memory", __func__);
110 ret = -ENOMEM; 103 return -ENOMEM;
111 goto out_unmap;
112 } 104 }
113 105
114 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 106 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
115 if (ret) { 107 if (ret) {
116 pr_err("%s: main dt probe failed", __func__); 108 pr_err("%s: main dt probe failed", __func__);
117 goto out_unmap; 109 return ret;
118 } 110 }
119 } else { 111 } else {
120 plat_dat = pdev->dev.platform_data; 112 plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
124 if (plat_dat->init) { 116 if (plat_dat->init) {
125 ret = plat_dat->init(pdev); 117 ret = plat_dat->init(pdev);
126 if (unlikely(ret)) 118 if (unlikely(ret))
127 goto out_unmap; 119 return ret;
128 } 120 }
129 121
130 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); 122 priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
131 if (!priv) { 123 if (!priv) {
132 pr_err("%s: main driver probe failed", __func__); 124 pr_err("%s: main driver probe failed", __func__);
133 goto out_unmap; 125 return -ENODEV;
134 } 126 }
135 127
136 /* Get MAC address if available (DT) */ 128 /* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
142 if (priv->dev->irq == -ENXIO) { 134 if (priv->dev->irq == -ENXIO) {
143 pr_err("%s: ERROR: MAC IRQ configuration " 135 pr_err("%s: ERROR: MAC IRQ configuration "
144 "information not found\n", __func__); 136 "information not found\n", __func__);
145 ret = -ENXIO; 137 return -ENXIO;
146 goto out_unmap;
147 } 138 }
148 139
149 /* 140 /*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
165 pr_debug("STMMAC platform driver registration completed"); 156 pr_debug("STMMAC platform driver registration completed");
166 157
167 return 0; 158 return 0;
168
169out_unmap:
170 iounmap(addr);
171 platform_set_drvdata(pdev, NULL);
172
173out_release_region:
174 release_mem_region(res->start, resource_size(res));
175
176 return ret;
177} 159}
178 160
179/** 161/**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
186{ 168{
187 struct net_device *ndev = platform_get_drvdata(pdev); 169 struct net_device *ndev = platform_get_drvdata(pdev);
188 struct stmmac_priv *priv = netdev_priv(ndev); 170 struct stmmac_priv *priv = netdev_priv(ndev);
189 struct resource *res;
190 int ret = stmmac_dvr_remove(ndev); 171 int ret = stmmac_dvr_remove(ndev);
191 172
192 if (priv->plat->exit) 173 if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
194 175
195 platform_set_drvdata(pdev, NULL); 176 platform_set_drvdata(pdev, NULL);
196 177
197 iounmap((void __force __iomem *)priv->ioaddr);
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 release_mem_region(res->start, resource_size(res));
200
201 return ret; 178 return ret;
202} 179}
203 180
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184b..aea9b14cdfbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -21,6 +21,8 @@
21 21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24#ifndef __STMMAC_TIMER_H__
25#define __STMMAC_TIMER_H__
24 26
25struct stmmac_timer { 27struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq); 28 void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
40extern int tmu2_register_user(void *fnt, void *data); 42extern int tmu2_register_user(void *fnt, void *data);
41extern void tmu2_unregister_user(void); 43extern void tmu2_unregister_user(void);
42#endif 44#endif
45
46#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 277c93e9ff4d..8fa947a2d929 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev)
1359 } 1359 }
1360 1360
1361 data->rxskbs[i] = skb; 1361 data->rxskbs[i] = skb;
1362 data->rxskbs[i] = skb;
1363 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1362 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1364 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1363 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1365 } 1364 }
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a5826a3111a6..2c08bf6e7bf3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
637 if (data && is_valid_ether_addr(data->mac_addr)) { 637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else { 639 } else {
640 eth_random_addr(ndev->dev_addr); 640 eth_hw_addr_random(ndev);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 } 641 }
643 642
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 643 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index bdd8891c215a..88943d90c765 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
557 if (data && is_valid_ether_addr(data->mac_addr)) { 557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); 558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else { 559 } else {
560 eth_random_addr(ndev->dev_addr); 560 eth_hw_addr_random(ndev);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 } 561 }
563 562
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 563 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1fc4eefc20ed..1fc4eefc20ed 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
diff --git a/drivers/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 4f4371d3aa7d..4f4371d3aa7d 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 5d309408395d..ba753d87a32f 100644
--- a/drivers/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
952 .resume = at86rf230_resume, 952 .resume = at86rf230_resume,
953}; 953};
954 954
955static int __init at86rf230_init(void) 955module_spi_driver(at86rf230_driver);
956{
957 return spi_register_driver(&at86rf230_driver);
958}
959module_init(at86rf230_init);
960
961static void __exit at86rf230_exit(void)
962{
963 spi_unregister_driver(&at86rf230_driver);
964}
965module_exit(at86rf230_exit);
966 956
967MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); 957MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
968MODULE_LICENSE("GPL v2"); 958MODULE_LICENSE("GPL v2");
diff --git a/drivers/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 73d453159408..7d39add7d467 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
446module_init(fake_init); 446module_init(fake_init);
447module_exit(fake_exit); 447module_exit(fake_exit);
448MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
449
diff --git a/drivers/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index e7456fcd0913..e7456fcd0913 100644
--- a/drivers/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3090dc65a6f1..983bbf4d5ef6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
159 several child MDIO busses to a parent bus. Child bus 159 several child MDIO busses to a parent bus. Child bus
160 selection is under the control of GPIO lines. 160 selection is under the control of GPIO lines.
161 161
162config MDIO_BUS_MUX_MMIOREG
163 tristate "Support for MMIO device-controlled MDIO bus multiplexers"
164 depends on OF_MDIO
165 select MDIO_BUS_MUX
166 help
167 This module provides a driver for MDIO bus multiplexers that
168 are controlled via a simple memory-mapped device, like an FPGA.
169 The multiplexer connects one of several child MDIO busses to a
170 parent bus. Child bus selection is under the control of one of
171 the FPGA's registers.
172
173 Currently, only 8-bit registers are supported.
174
162endif # PHYLIB 175endif # PHYLIB
163 176
164config MICREL_KS8995MA 177config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6d2dc6c94f2e..426674debae4 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
28obj-$(CONFIG_AMD_PHY) += amd.o 28obj-$(CONFIG_AMD_PHY) += amd.o
29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o 29obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o 30obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
31obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7189adf54bd1..899274f2f9b1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -28,17 +28,38 @@
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/mdio-gpio.h> 29#include <linux/mdio-gpio.h>
30 30
31#ifdef CONFIG_OF_GPIO
32#include <linux/of_gpio.h> 31#include <linux/of_gpio.h>
33#include <linux/of_mdio.h> 32#include <linux/of_mdio.h>
34#include <linux/of_platform.h>
35#endif
36 33
37struct mdio_gpio_info { 34struct mdio_gpio_info {
38 struct mdiobb_ctrl ctrl; 35 struct mdiobb_ctrl ctrl;
39 int mdc, mdio; 36 int mdc, mdio;
40}; 37};
41 38
39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
40{
41 struct device_node *np = pdev->dev.of_node;
42 struct mdio_gpio_platform_data *pdata;
43 int ret;
44
45 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
46 if (!pdata)
47 return NULL;
48
49 ret = of_get_gpio(np, 0);
50 if (ret < 0)
51 return NULL;
52
53 pdata->mdc = ret;
54
55 ret = of_get_gpio(np, 1);
56 if (ret < 0)
57 return NULL;
58 pdata->mdio = ret;
59
60 return pdata;
61}
62
42static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) 63static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
43{ 64{
44 struct mdio_gpio_info *bitbang = 65 struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
162 183
163static int __devinit mdio_gpio_probe(struct platform_device *pdev) 184static int __devinit mdio_gpio_probe(struct platform_device *pdev)
164{ 185{
165 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; 186 struct mdio_gpio_platform_data *pdata;
166 struct mii_bus *new_bus; 187 struct mii_bus *new_bus;
167 int ret; 188 int ret;
168 189
190 if (pdev->dev.of_node)
191 pdata = mdio_gpio_of_get_data(pdev);
192 else
193 pdata = pdev->dev.platform_data;
194
169 if (!pdata) 195 if (!pdata)
170 return -ENODEV; 196 return -ENODEV;
171 197
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
173 if (!new_bus) 199 if (!new_bus)
174 return -ENODEV; 200 return -ENODEV;
175 201
176 ret = mdiobus_register(new_bus); 202 if (pdev->dev.of_node)
203 ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
204 else
205 ret = mdiobus_register(new_bus);
206
177 if (ret) 207 if (ret)
178 mdio_gpio_bus_deinit(&pdev->dev); 208 mdio_gpio_bus_deinit(&pdev->dev);
179 209
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
187 return 0; 217 return 0;
188} 218}
189 219
190#ifdef CONFIG_OF_GPIO 220static struct of_device_id mdio_gpio_of_match[] = {
191 221 { .compatible = "virtual,mdio-gpio", },
192static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) 222 { /* sentinel */ }
193{
194 struct mdio_gpio_platform_data *pdata;
195 struct mii_bus *new_bus;
196 int ret;
197
198 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
199 if (!pdata)
200 return -ENOMEM;
201
202 ret = of_get_gpio(ofdev->dev.of_node, 0);
203 if (ret < 0)
204 goto out_free;
205 pdata->mdc = ret;
206
207 ret = of_get_gpio(ofdev->dev.of_node, 1);
208 if (ret < 0)
209 goto out_free;
210 pdata->mdio = ret;
211
212 new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
213 if (!new_bus)
214 goto out_free;
215
216 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev);
219
220 return ret;
221
222out_free:
223 kfree(pdata);
224 return -ENODEV;
225}
226
227static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
228{
229 mdio_gpio_bus_destroy(&ofdev->dev);
230 kfree(ofdev->dev.platform_data);
231
232 return 0;
233}
234
235static struct of_device_id mdio_ofgpio_match[] = {
236 {
237 .compatible = "virtual,mdio-gpio",
238 },
239 {},
240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242
243static struct platform_driver mdio_ofgpio_driver = {
244 .driver = {
245 .name = "mdio-ofgpio",
246 .owner = THIS_MODULE,
247 .of_match_table = mdio_ofgpio_match,
248 },
249 .probe = mdio_ofgpio_probe,
250 .remove = __devexit_p(mdio_ofgpio_remove),
251}; 223};
252 224
253static inline int __init mdio_ofgpio_init(void)
254{
255 return platform_driver_register(&mdio_ofgpio_driver);
256}
257
258static inline void mdio_ofgpio_exit(void)
259{
260 platform_driver_unregister(&mdio_ofgpio_driver);
261}
262#else
263static inline int __init mdio_ofgpio_init(void) { return 0; }
264static inline void mdio_ofgpio_exit(void) { }
265#endif /* CONFIG_OF_GPIO */
266
267static struct platform_driver mdio_gpio_driver = { 225static struct platform_driver mdio_gpio_driver = {
268 .probe = mdio_gpio_probe, 226 .probe = mdio_gpio_probe,
269 .remove = __devexit_p(mdio_gpio_remove), 227 .remove = __devexit_p(mdio_gpio_remove),
270 .driver = { 228 .driver = {
271 .name = "mdio-gpio", 229 .name = "mdio-gpio",
272 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
231 .of_match_table = mdio_gpio_of_match,
273 }, 232 },
274}; 233};
275 234
276static int __init mdio_gpio_init(void) 235static int __init mdio_gpio_init(void)
277{ 236{
278 int ret; 237 return platform_driver_register(&mdio_gpio_driver);
279
280 ret = mdio_ofgpio_init();
281 if (ret)
282 return ret;
283
284 ret = platform_driver_register(&mdio_gpio_driver);
285 if (ret)
286 mdio_ofgpio_exit();
287
288 return ret;
289} 238}
290module_init(mdio_gpio_init); 239module_init(mdio_gpio_init);
291 240
292static void __exit mdio_gpio_exit(void) 241static void __exit mdio_gpio_exit(void)
293{ 242{
294 platform_driver_unregister(&mdio_gpio_driver); 243 platform_driver_unregister(&mdio_gpio_driver);
295 mdio_ofgpio_exit();
296} 244}
297module_exit(mdio_gpio_exit); 245module_exit(mdio_gpio_exit);
298 246
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644
index 000000000000..098239a98b19
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -0,0 +1,170 @@
1/*
2 * Simple memory-mapped device MDIO MUX driver
3 *
4 * Author: Timur Tabi <timur@freescale.com>
5 *
6 * Copyright 2012 Freescale Semiconductor, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13#include <linux/platform_device.h>
14#include <linux/device.h>
15#include <linux/of_mdio.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/phy.h>
19#include <linux/mdio-mux.h>
20
21struct mdio_mux_mmioreg_state {
22 void *mux_handle;
23 phys_addr_t phys;
24 uint8_t mask;
25};
26
27/*
28 * MDIO multiplexing switch function
29 *
30 * This function is called by the mdio-mux layer when it thinks the mdio bus
31 * multiplexer needs to switch.
32 *
33 * 'current_child' is the current value of the mux register (masked via
34 * s->mask).
35 *
36 * 'desired_child' is the value of the 'reg' property of the target child MDIO
37 * node.
38 *
39 * The first time this function is called, current_child == -1.
40 *
41 * If current_child == desired_child, then the mux is already set to the
42 * correct bus.
43 */
44static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
45 void *data)
46{
47 struct mdio_mux_mmioreg_state *s = data;
48
49 if (current_child ^ desired_child) {
50 void *p = ioremap(s->phys, 1);
51 uint8_t x, y;
52
53 if (!p)
54 return -ENOMEM;
55
56 x = ioread8(p);
57 y = (x & ~s->mask) | desired_child;
58 if (x != y) {
59 iowrite8((x & ~s->mask) | desired_child, p);
60 pr_debug("%s: %02x -> %02x\n", __func__, x, y);
61 }
62
63 iounmap(p);
64 }
65
66 return 0;
67}
68
69static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
70{
71 struct device_node *np2, *np = pdev->dev.of_node;
72 struct mdio_mux_mmioreg_state *s;
73 struct resource res;
74 const __be32 *iprop;
75 int len, ret;
76
77 dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
78
79 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
80 if (!s)
81 return -ENOMEM;
82
83 ret = of_address_to_resource(np, 0, &res);
84 if (ret) {
85 dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
86 np->full_name);
87 return ret;
88 }
89 s->phys = res.start;
90
91 if (resource_size(&res) != sizeof(uint8_t)) {
92 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
93 return -EINVAL;
94 }
95
96 iprop = of_get_property(np, "mux-mask", &len);
97 if (!iprop || len != sizeof(uint32_t)) {
98 dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
99 return -ENODEV;
100 }
101 if (be32_to_cpup(iprop) > 255) {
102 dev_err(&pdev->dev, "only 8-bit registers are supported\n");
103 return -EINVAL;
104 }
105 s->mask = be32_to_cpup(iprop);
106
107 /*
108 * Verify that the 'reg' property of each child MDIO bus does not
109 * set any bits outside of the 'mask'.
110 */
111 for_each_available_child_of_node(np, np2) {
112 iprop = of_get_property(np2, "reg", &len);
113 if (!iprop || len != sizeof(uint32_t)) {
114 dev_err(&pdev->dev, "mdio-mux child node %s is "
115 "missing a 'reg' property\n", np2->full_name);
116 return -ENODEV;
117 }
118 if (be32_to_cpup(iprop) & ~s->mask) {
119 dev_err(&pdev->dev, "mdio-mux child node %s has "
120 "a 'reg' value with unmasked bits\n",
121 np2->full_name);
122 return -ENODEV;
123 }
124 }
125
126 ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
127 &s->mux_handle, s);
128 if (ret) {
129 dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
130 np->full_name);
131 return ret;
132 }
133
134 pdev->dev.platform_data = s;
135
136 return 0;
137}
138
139static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
140{
141 struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
142
143 mdio_mux_uninit(s->mux_handle);
144
145 return 0;
146}
147
148static struct of_device_id mdio_mux_mmioreg_match[] = {
149 {
150 .compatible = "mdio-mux-mmioreg",
151 },
152 {},
153};
154MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
155
156static struct platform_driver mdio_mux_mmioreg_driver = {
157 .driver = {
158 .name = "mdio-mux-mmioreg",
159 .owner = THIS_MODULE,
160 .of_match_table = mdio_mux_mmioreg_match,
161 },
162 .probe = mdio_mux_mmioreg_probe,
163 .remove = __devexit_p(mdio_mux_mmioreg_remove),
164};
165
166module_platform_driver(mdio_mux_mmioreg_driver);
167
168MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
169MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
170MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c8a3f108dc94..b4f67b55ef79 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -989,6 +989,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
989 return -EBUSY; 989 return -EBUSY;
990 } 990 }
991 991
992 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
993 vlan_uses_dev(dev)) {
994 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
995 portname);
996 return -EPERM;
997 }
998
992 err = team_dev_type_check_change(dev, port_dev); 999 err = team_dev_type_check_change(dev, port_dev);
993 if (err) 1000 if (err)
994 return err; 1001 return err;
@@ -2486,7 +2493,7 @@ static void __team_options_change_check(struct team *team)
2486 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); 2493 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2487 } 2494 }
2488 err = team_nl_send_event_options_get(team, &sel_opt_inst_list); 2495 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2489 if (err) 2496 if (err && err != -ESRCH)
2490 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", 2497 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2491 err); 2498 err);
2492} 2499}
@@ -2517,9 +2524,9 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
2517 2524
2518send_event: 2525send_event:
2519 err = team_nl_send_event_port_list_get(port->team); 2526 err = team_nl_send_event_port_list_get(port->team);
2520 if (err) 2527 if (err && err != -ESRCH)
2521 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 2528 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2522 port->dev->name); 2529 port->dev->name, err);
2523 2530
2524} 2531}
2525 2532
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3a16d4fdaa05..498dc0d4ba5e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -120,8 +120,8 @@ struct tun_sock;
120struct tun_struct { 120struct tun_struct {
121 struct tun_file *tfile; 121 struct tun_file *tfile;
122 unsigned int flags; 122 unsigned int flags;
123 uid_t owner; 123 kuid_t owner;
124 gid_t group; 124 kgid_t group;
125 125
126 struct net_device *dev; 126 struct net_device *dev;
127 netdev_features_t set_features; 127 netdev_features_t set_features;
@@ -1031,8 +1031,8 @@ static void tun_setup(struct net_device *dev)
1031{ 1031{
1032 struct tun_struct *tun = netdev_priv(dev); 1032 struct tun_struct *tun = netdev_priv(dev);
1033 1033
1034 tun->owner = -1; 1034 tun->owner = INVALID_UID;
1035 tun->group = -1; 1035 tun->group = INVALID_GID;
1036 1036
1037 dev->ethtool_ops = &tun_ethtool_ops; 1037 dev->ethtool_ops = &tun_ethtool_ops;
1038 dev->destructor = tun_free_netdev; 1038 dev->destructor = tun_free_netdev;
@@ -1155,14 +1155,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1155 char *buf) 1155 char *buf)
1156{ 1156{
1157 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 1157 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1158 return sprintf(buf, "%d\n", tun->owner); 1158 return uid_valid(tun->owner)?
1159 sprintf(buf, "%u\n",
1160 from_kuid_munged(current_user_ns(), tun->owner)):
1161 sprintf(buf, "-1\n");
1159} 1162}
1160 1163
1161static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 1164static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1162 char *buf) 1165 char *buf)
1163{ 1166{
1164 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 1167 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1165 return sprintf(buf, "%d\n", tun->group); 1168 return gid_valid(tun->group) ?
1169 sprintf(buf, "%u\n",
1170 from_kgid_munged(current_user_ns(), tun->group)):
1171 sprintf(buf, "-1\n");
1166} 1172}
1167 1173
1168static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 1174static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
@@ -1189,8 +1195,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1189 else 1195 else
1190 return -EINVAL; 1196 return -EINVAL;
1191 1197
1192 if (((tun->owner != -1 && cred->euid != tun->owner) || 1198 if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
1193 (tun->group != -1 && !in_egroup_p(tun->group))) && 1199 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
1194 !capable(CAP_NET_ADMIN)) 1200 !capable(CAP_NET_ADMIN))
1195 return -EPERM; 1201 return -EPERM;
1196 err = security_tun_dev_attach(tun->socket.sk); 1202 err = security_tun_dev_attach(tun->socket.sk);
@@ -1374,6 +1380,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1374 void __user* argp = (void __user*)arg; 1380 void __user* argp = (void __user*)arg;
1375 struct sock_fprog fprog; 1381 struct sock_fprog fprog;
1376 struct ifreq ifr; 1382 struct ifreq ifr;
1383 kuid_t owner;
1384 kgid_t group;
1377 int sndbuf; 1385 int sndbuf;
1378 int vnet_hdr_sz; 1386 int vnet_hdr_sz;
1379 int ret; 1387 int ret;
@@ -1447,16 +1455,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1447 1455
1448 case TUNSETOWNER: 1456 case TUNSETOWNER:
1449 /* Set owner of the device */ 1457 /* Set owner of the device */
1450 tun->owner = (uid_t) arg; 1458 owner = make_kuid(current_user_ns(), arg);
1451 1459 if (!uid_valid(owner)) {
1452 tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); 1460 ret = -EINVAL;
1461 break;
1462 }
1463 tun->owner = owner;
1464 tun_debug(KERN_INFO, tun, "owner set to %d\n",
1465 from_kuid(&init_user_ns, tun->owner));
1453 break; 1466 break;
1454 1467
1455 case TUNSETGROUP: 1468 case TUNSETGROUP:
1456 /* Set group of the device */ 1469 /* Set group of the device */
1457 tun->group= (gid_t) arg; 1470 group = make_kgid(current_user_ns(), arg);
1458 1471 if (!gid_valid(group)) {
1459 tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); 1472 ret = -EINVAL;
1473 break;
1474 }
1475 tun->group = group;
1476 tun_debug(KERN_INFO, tun, "group set to %d\n",
1477 from_kgid(&init_user_ns, tun->group));
1460 break; 1478 break;
1461 1479
1462 case TUNSETLINK: 1480 case TUNSETLINK:
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 025426132754..9c34d2fccfac 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
222 struct sk_buff *skb; 222 struct sk_buff *skb;
223 const struct i2400m_tlv_detailed_device_info *ddi; 223 const struct i2400m_tlv_detailed_device_info *ddi;
224 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 224 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
225 const unsigned char zeromac[ETH_ALEN] = { 0 };
226 225
227 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 226 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
228 skb = i2400m_get_device_info(i2400m); 227 skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
244 "to that of boot mode's\n"); 243 "to that of boot mode's\n");
245 dev_warn(dev, "device reports %pM\n", ddi->mac_address); 244 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
246 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); 245 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
247 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 246 if (is_zero_ether_addr(ddi->mac_address))
248 dev_err(dev, "device reports an invalid MAC address, " 247 dev_err(dev, "device reports an invalid MAC address, "
249 "not updating\n"); 248 "not updating\n");
250 else { 249 else {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 689a71c1af71..154a4965be4f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1661} 1661}
1662 1662
1663/* Put adm8211_tx_hdr on skb and transmit */ 1663/* Put adm8211_tx_hdr on skb and transmit */
1664static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1664static void adm8211_tx(struct ieee80211_hw *dev,
1665 struct ieee80211_tx_control *control,
1666 struct sk_buff *skb)
1665{ 1667{
1666 struct adm8211_tx_hdr *txhdr; 1668 struct adm8211_tx_hdr *txhdr;
1667 size_t payload_len, hdrlen; 1669 size_t payload_len, hdrlen;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index f9f15bb3f03a..c586f78c307f 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -232,8 +232,10 @@ static int adhoc;
232 232
233static int probe = 1; 233static int probe = 1;
234 234
235static kuid_t proc_kuid;
235static int proc_uid /* = 0 */; 236static int proc_uid /* = 0 */;
236 237
238static kgid_t proc_kgid;
237static int proc_gid /* = 0 */; 239static int proc_gid /* = 0 */;
238 240
239static int airo_perm = 0555; 241static int airo_perm = 0555;
@@ -4499,78 +4501,79 @@ struct proc_data {
4499static int setup_proc_entry( struct net_device *dev, 4501static int setup_proc_entry( struct net_device *dev,
4500 struct airo_info *apriv ) { 4502 struct airo_info *apriv ) {
4501 struct proc_dir_entry *entry; 4503 struct proc_dir_entry *entry;
4504
4502 /* First setup the device directory */ 4505 /* First setup the device directory */
4503 strcpy(apriv->proc_name,dev->name); 4506 strcpy(apriv->proc_name,dev->name);
4504 apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, 4507 apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
4505 airo_entry); 4508 airo_entry);
4506 if (!apriv->proc_entry) 4509 if (!apriv->proc_entry)
4507 goto fail; 4510 goto fail;
4508 apriv->proc_entry->uid = proc_uid; 4511 apriv->proc_entry->uid = proc_kuid;
4509 apriv->proc_entry->gid = proc_gid; 4512 apriv->proc_entry->gid = proc_kgid;
4510 4513
4511 /* Setup the StatsDelta */ 4514 /* Setup the StatsDelta */
4512 entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, 4515 entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
4513 apriv->proc_entry, &proc_statsdelta_ops, dev); 4516 apriv->proc_entry, &proc_statsdelta_ops, dev);
4514 if (!entry) 4517 if (!entry)
4515 goto fail_stats_delta; 4518 goto fail_stats_delta;
4516 entry->uid = proc_uid; 4519 entry->uid = proc_kuid;
4517 entry->gid = proc_gid; 4520 entry->gid = proc_kgid;
4518 4521
4519 /* Setup the Stats */ 4522 /* Setup the Stats */
4520 entry = proc_create_data("Stats", S_IRUGO & proc_perm, 4523 entry = proc_create_data("Stats", S_IRUGO & proc_perm,
4521 apriv->proc_entry, &proc_stats_ops, dev); 4524 apriv->proc_entry, &proc_stats_ops, dev);
4522 if (!entry) 4525 if (!entry)
4523 goto fail_stats; 4526 goto fail_stats;
4524 entry->uid = proc_uid; 4527 entry->uid = proc_kuid;
4525 entry->gid = proc_gid; 4528 entry->gid = proc_kgid;
4526 4529
4527 /* Setup the Status */ 4530 /* Setup the Status */
4528 entry = proc_create_data("Status", S_IRUGO & proc_perm, 4531 entry = proc_create_data("Status", S_IRUGO & proc_perm,
4529 apriv->proc_entry, &proc_status_ops, dev); 4532 apriv->proc_entry, &proc_status_ops, dev);
4530 if (!entry) 4533 if (!entry)
4531 goto fail_status; 4534 goto fail_status;
4532 entry->uid = proc_uid; 4535 entry->uid = proc_kuid;
4533 entry->gid = proc_gid; 4536 entry->gid = proc_kgid;
4534 4537
4535 /* Setup the Config */ 4538 /* Setup the Config */
4536 entry = proc_create_data("Config", proc_perm, 4539 entry = proc_create_data("Config", proc_perm,
4537 apriv->proc_entry, &proc_config_ops, dev); 4540 apriv->proc_entry, &proc_config_ops, dev);
4538 if (!entry) 4541 if (!entry)
4539 goto fail_config; 4542 goto fail_config;
4540 entry->uid = proc_uid; 4543 entry->uid = proc_kuid;
4541 entry->gid = proc_gid; 4544 entry->gid = proc_kgid;
4542 4545
4543 /* Setup the SSID */ 4546 /* Setup the SSID */
4544 entry = proc_create_data("SSID", proc_perm, 4547 entry = proc_create_data("SSID", proc_perm,
4545 apriv->proc_entry, &proc_SSID_ops, dev); 4548 apriv->proc_entry, &proc_SSID_ops, dev);
4546 if (!entry) 4549 if (!entry)
4547 goto fail_ssid; 4550 goto fail_ssid;
4548 entry->uid = proc_uid; 4551 entry->uid = proc_kuid;
4549 entry->gid = proc_gid; 4552 entry->gid = proc_kgid;
4550 4553
4551 /* Setup the APList */ 4554 /* Setup the APList */
4552 entry = proc_create_data("APList", proc_perm, 4555 entry = proc_create_data("APList", proc_perm,
4553 apriv->proc_entry, &proc_APList_ops, dev); 4556 apriv->proc_entry, &proc_APList_ops, dev);
4554 if (!entry) 4557 if (!entry)
4555 goto fail_aplist; 4558 goto fail_aplist;
4556 entry->uid = proc_uid; 4559 entry->uid = proc_kuid;
4557 entry->gid = proc_gid; 4560 entry->gid = proc_kgid;
4558 4561
4559 /* Setup the BSSList */ 4562 /* Setup the BSSList */
4560 entry = proc_create_data("BSSList", proc_perm, 4563 entry = proc_create_data("BSSList", proc_perm,
4561 apriv->proc_entry, &proc_BSSList_ops, dev); 4564 apriv->proc_entry, &proc_BSSList_ops, dev);
4562 if (!entry) 4565 if (!entry)
4563 goto fail_bsslist; 4566 goto fail_bsslist;
4564 entry->uid = proc_uid; 4567 entry->uid = proc_kuid;
4565 entry->gid = proc_gid; 4568 entry->gid = proc_kgid;
4566 4569
4567 /* Setup the WepKey */ 4570 /* Setup the WepKey */
4568 entry = proc_create_data("WepKey", proc_perm, 4571 entry = proc_create_data("WepKey", proc_perm,
4569 apriv->proc_entry, &proc_wepkey_ops, dev); 4572 apriv->proc_entry, &proc_wepkey_ops, dev);
4570 if (!entry) 4573 if (!entry)
4571 goto fail_wepkey; 4574 goto fail_wepkey;
4572 entry->uid = proc_uid; 4575 entry->uid = proc_kuid;
4573 entry->gid = proc_gid; 4576 entry->gid = proc_kgid;
4574 4577
4575 return 0; 4578 return 0;
4576 4579
@@ -5697,11 +5700,16 @@ static int __init airo_init_module( void )
5697{ 5700{
5698 int i; 5701 int i;
5699 5702
5703 proc_kuid = make_kuid(&init_user_ns, proc_uid);
5704 proc_kgid = make_kgid(&init_user_ns, proc_gid);
5705 if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid))
5706 return -EINVAL;
5707
5700 airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); 5708 airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
5701 5709
5702 if (airo_entry) { 5710 if (airo_entry) {
5703 airo_entry->uid = proc_uid; 5711 airo_entry->uid = proc_kuid;
5704 airo_entry->gid = proc_gid; 5712 airo_entry->gid = proc_kgid;
5705 } 5713 }
5706 5714
5707 for (i = 0; i < 4 && io[i] && irq[i]; i++) { 5715 for (i = 0; i < 4 && io[i] && irq[i]; i++) {
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 88b8d64c90f1..e361afed99ff 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1726,7 +1726,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
1726 ieee80211_wake_queues(priv->hw); 1726 ieee80211_wake_queues(priv->hw);
1727} 1727}
1728 1728
1729static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1729static void at76_mac80211_tx(struct ieee80211_hw *hw,
1730 struct ieee80211_tx_control *control,
1731 struct sk_buff *skb)
1730{ 1732{
1731 struct at76_priv *priv = hw->priv; 1733 struct at76_priv *priv = hw->priv;
1732 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; 1734 struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 4026c906cc7b..b7e0258887e7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1482 case AR5K_EEPROM_MODE_11A: 1482 case AR5K_EEPROM_MODE_11A:
1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); 1483 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
1484 rate_pcal_info = ee->ee_rate_tpwr_a; 1484 rate_pcal_info = ee->ee_rate_tpwr_a;
1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; 1485 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
1486 break; 1486 break;
1487 case AR5K_EEPROM_MODE_11B: 1487 case AR5K_EEPROM_MODE_11B:
1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); 1488 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index dc2bcfeadeb4..94a9bbea6874 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -182,6 +182,7 @@
182#define AR5K_EEPROM_EEP_DELTA 10 182#define AR5K_EEPROM_EEP_DELTA 10
183#define AR5K_EEPROM_N_MODES 3 183#define AR5K_EEPROM_N_MODES 3
184#define AR5K_EEPROM_N_5GHZ_CHAN 10 184#define AR5K_EEPROM_N_5GHZ_CHAN 10
185#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
185#define AR5K_EEPROM_N_2GHZ_CHAN 3 186#define AR5K_EEPROM_N_2GHZ_CHAN 3
186#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 187#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
187#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 188#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 384e67af73bc..df61a09adb6d 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -55,7 +55,8 @@
55\********************/ 55\********************/
56 56
57static void 57static void
58ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 58ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
59 struct sk_buff *skb)
59{ 60{
60 struct ath5k_hw *ah = hw->priv; 61 struct ath5k_hw *ah = hw->priv;
61 u16 qnum = skb_get_queue_mapping(skb); 62 u16 qnum = skb_get_queue_mapping(skb);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b09285c36c4a..7373e4b92c92 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -280,6 +280,7 @@ struct ath_tx_control {
280 struct ath_txq *txq; 280 struct ath_txq *txq;
281 struct ath_node *an; 281 struct ath_node *an;
282 u8 paprd; 282 u8 paprd;
283 struct ieee80211_sta *sta;
283}; 284};
284 285
285#define ATH_TX_ERROR 0x01 286#define ATH_TX_ERROR 0x01
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 936e920fb88e..b30596fcf73a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
542 542
543int ath9k_tx_init(struct ath9k_htc_priv *priv); 543int ath9k_tx_init(struct ath9k_htc_priv *priv);
544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 544int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
545 struct ieee80211_sta *sta,
545 struct sk_buff *skb, u8 slot, bool is_cab); 546 struct sk_buff *skb, u8 slot, bool is_cab);
546void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); 547void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
547bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); 548bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 77d541feb910..f42d2eb6af99 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
326 goto next; 326 goto next;
327 } 327 }
328 328
329 ret = ath9k_htc_tx_start(priv, skb, tx_slot, true); 329 ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
330 if (ret != 0) { 330 if (ret != 0) {
331 ath9k_htc_tx_clear_slot(priv, tx_slot); 331 ath9k_htc_tx_clear_slot(priv, tx_slot);
332 dev_kfree_skb_any(skb); 332 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c785129692ff..c32f6e3ffb18 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -856,7 +856,9 @@ set_timer:
856/* mac80211 Callbacks */ 856/* mac80211 Callbacks */
857/**********************/ 857/**********************/
858 858
859static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 859static void ath9k_htc_tx(struct ieee80211_hw *hw,
860 struct ieee80211_tx_control *control,
861 struct sk_buff *skb)
860{ 862{
861 struct ieee80211_hdr *hdr; 863 struct ieee80211_hdr *hdr;
862 struct ath9k_htc_priv *priv = hw->priv; 864 struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +885,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
883 goto fail_tx; 885 goto fail_tx;
884 } 886 }
885 887
886 ret = ath9k_htc_tx_start(priv, skb, slot, false); 888 ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
887 if (ret != 0) { 889 if (ret != 0) {
888 ath_dbg(common, XMIT, "Tx failed\n"); 890 ath_dbg(common, XMIT, "Tx failed\n");
889 goto clear_slot; 891 goto clear_slot;
@@ -1331,6 +1333,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1331 return ret; 1333 return ret;
1332} 1334}
1333 1335
1336static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
1337 struct ieee80211_vif *vif,
1338 struct ieee80211_sta *sta, u32 changed)
1339{
1340 struct ath9k_htc_priv *priv = hw->priv;
1341 struct ath_common *common = ath9k_hw_common(priv->ah);
1342 struct ath9k_htc_target_rate trate;
1343
1344 mutex_lock(&priv->mutex);
1345 ath9k_htc_ps_wakeup(priv);
1346
1347 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
1348 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
1349 ath9k_htc_setup_rate(priv, sta, &trate);
1350 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1351 ath_dbg(common, CONFIG,
1352 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1353 sta->addr, be32_to_cpu(trate.capflags));
1354 else
1355 ath_dbg(common, CONFIG,
1356 "Unable to update supported rates for sta: %pM\n",
1357 sta->addr);
1358 }
1359
1360 ath9k_htc_ps_restore(priv);
1361 mutex_unlock(&priv->mutex);
1362}
1363
1334static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, 1364static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
1335 struct ieee80211_vif *vif, u16 queue, 1365 struct ieee80211_vif *vif, u16 queue,
1336 const struct ieee80211_tx_queue_params *params) 1366 const struct ieee80211_tx_queue_params *params)
@@ -1758,6 +1788,7 @@ struct ieee80211_ops ath9k_htc_ops = {
1758 .sta_add = ath9k_htc_sta_add, 1788 .sta_add = ath9k_htc_sta_add,
1759 .sta_remove = ath9k_htc_sta_remove, 1789 .sta_remove = ath9k_htc_sta_remove,
1760 .conf_tx = ath9k_htc_conf_tx, 1790 .conf_tx = ath9k_htc_conf_tx,
1791 .sta_rc_update = ath9k_htc_sta_rc_update,
1761 .bss_info_changed = ath9k_htc_bss_info_changed, 1792 .bss_info_changed = ath9k_htc_bss_info_changed,
1762 .set_key = ath9k_htc_set_key, 1793 .set_key = ath9k_htc_set_key,
1763 .get_tsf = ath9k_htc_get_tsf, 1794 .get_tsf = ath9k_htc_get_tsf,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 47e61d0da33b..06cdcb772d78 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
333} 333}
334 334
335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, 335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
336 struct ieee80211_sta *sta,
336 struct sk_buff *skb, 337 struct sk_buff *skb,
337 u8 slot, bool is_cab) 338 u8 slot, bool is_cab)
338{ 339{
339 struct ieee80211_hdr *hdr; 340 struct ieee80211_hdr *hdr;
340 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
341 struct ieee80211_sta *sta = tx_info->control.sta;
342 struct ieee80211_vif *vif = tx_info->control.vif; 342 struct ieee80211_vif *vif = tx_info->control.vif;
343 struct ath9k_htc_sta *ista; 343 struct ath9k_htc_sta *ista;
344 struct ath9k_htc_vif *avp = NULL; 344 struct ath9k_htc_vif *avp = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a22df749b8db..8a2b04d5922f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -696,7 +696,9 @@ mutex_unlock:
696 return r; 696 return r;
697} 697}
698 698
699static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 699static void ath9k_tx(struct ieee80211_hw *hw,
700 struct ieee80211_tx_control *control,
701 struct sk_buff *skb)
700{ 702{
701 struct ath_softc *sc = hw->priv; 703 struct ath_softc *sc = hw->priv;
702 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 704 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
756 758
757 memset(&txctl, 0, sizeof(struct ath_tx_control)); 759 memset(&txctl, 0, sizeof(struct ath_tx_control));
758 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 760 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
761 txctl.sta = control->sta;
759 762
760 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); 763 ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
761 764
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2c9da6b2ecb1..ef91f6cc2d79 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1773 TX_STAT_INC(txq->axq_qnum, queued); 1773 TX_STAT_INC(txq->axq_qnum, queued);
1774} 1774}
1775 1775
1776static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1776static void setup_frame_info(struct ieee80211_hw *hw,
1777 struct ieee80211_sta *sta,
1778 struct sk_buff *skb,
1777 int framelen) 1779 int framelen)
1778{ 1780{
1779 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1781 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1780 struct ieee80211_sta *sta = tx_info->control.sta;
1781 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1782 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1782 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1783 const struct ieee80211_rate *rate; 1784 const struct ieee80211_rate *rate;
@@ -1935,7 +1936,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1935{ 1936{
1936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1937 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1938 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1938 struct ieee80211_sta *sta = info->control.sta; 1939 struct ieee80211_sta *sta = txctl->sta;
1939 struct ieee80211_vif *vif = info->control.vif; 1940 struct ieee80211_vif *vif = info->control.vif;
1940 struct ath_softc *sc = hw->priv; 1941 struct ath_softc *sc = hw->priv;
1941 struct ath_txq *txq = txctl->txq; 1942 struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1980,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1979 !ieee80211_is_data(hdr->frame_control)) 1980 !ieee80211_is_data(hdr->frame_control))
1980 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1981 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1981 1982
1982 setup_frame_info(hw, skb, frmlen); 1983 setup_frame_info(hw, sta, skb, frmlen);
1983 1984
1984 /* 1985 /*
1985 * At this point, the vif, hw_key and sta pointers in the tx control 1986 * At this point, the vif, hw_key and sta pointers in the tx control
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 376be11161c0..2aa4a59c72c8 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -425,6 +425,7 @@ struct ar9170 {
425 bool rx_has_plcp; 425 bool rx_has_plcp;
426 struct sk_buff *rx_failover; 426 struct sk_buff *rx_failover;
427 int rx_failover_missing; 427 int rx_failover_missing;
428 u32 ampdu_ref;
428 429
429 /* FIFO for collecting outstanding BlockAckRequest */ 430 /* FIFO for collecting outstanding BlockAckRequest */
430 struct list_head bar_list[__AR9170_NUM_TXQ]; 431 struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
577void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 578void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
578 579
579/* TX */ 580/* TX */
580void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 581void carl9170_op_tx(struct ieee80211_hw *hw,
582 struct ieee80211_tx_control *control,
583 struct sk_buff *skb);
581void carl9170_tx_janitor(struct work_struct *work); 584void carl9170_tx_janitor(struct work_struct *work);
582void carl9170_tx_process_status(struct ar9170 *ar, 585void carl9170_tx_process_status(struct ar9170 *ar,
583 const struct carl9170_rsp *cmd); 586 const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index b813f43061f5..a0b723078547 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -624,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
624#undef TID_CHECK 624#undef TID_CHECK
625} 625}
626 626
627static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) 627static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
628 struct ieee80211_rx_status *rx_status)
628{ 629{
629 __le16 fc; 630 __le16 fc;
630 631
@@ -637,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
637 return true; 638 return true;
638 } 639 }
639 640
641 rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
642 rx_status->ampdu_reference = ar->ampdu_ref;
643
640 /* 644 /*
641 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts 645 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
642 * certain frame types can be part of an aMPDU. 646 * certain frame types can be part of an aMPDU.
@@ -685,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
685 if (unlikely(len < sizeof(*mac))) 689 if (unlikely(len < sizeof(*mac)))
686 goto drop; 690 goto drop;
687 691
692 memset(&status, 0, sizeof(status));
693
688 mpdu_len = len - sizeof(*mac); 694 mpdu_len = len - sizeof(*mac);
689 695
690 mac = (void *)(buf + mpdu_len); 696 mac = (void *)(buf + mpdu_len);
691 mac_status = mac->status; 697 mac_status = mac->status;
692 switch (mac_status & AR9170_RX_STATUS_MPDU) { 698 switch (mac_status & AR9170_RX_STATUS_MPDU) {
693 case AR9170_RX_STATUS_MPDU_FIRST: 699 case AR9170_RX_STATUS_MPDU_FIRST:
700 ar->ampdu_ref++;
694 /* Aggregated MPDUs start with an PLCP header */ 701 /* Aggregated MPDUs start with an PLCP header */
695 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { 702 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
696 head = (void *) buf; 703 head = (void *) buf;
@@ -721,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
721 break; 728 break;
722 729
723 case AR9170_RX_STATUS_MPDU_LAST: 730 case AR9170_RX_STATUS_MPDU_LAST:
731 status.flag |= RX_FLAG_AMPDU_IS_LAST;
732
724 /* 733 /*
725 * The last frame of an A-MPDU has an extra tail 734 * The last frame of an A-MPDU has an extra tail
726 * which does contain the phy status of the whole 735 * which does contain the phy status of the whole
727 * aggregate. 736 * aggregate.
728 */ 737 */
729
730 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { 738 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
731 mpdu_len -= sizeof(struct ar9170_rx_phystatus); 739 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
732 phy = (void *)(buf + mpdu_len); 740 phy = (void *)(buf + mpdu_len);
@@ -774,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
774 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN))) 782 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
775 goto drop; 783 goto drop;
776 784
777 memset(&status, 0, sizeof(status));
778 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) 785 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
779 goto drop; 786 goto drop;
780 787
781 if (!carl9170_ampdu_check(ar, buf, mac_status)) 788 if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
782 goto drop; 789 goto drop;
783 790
784 if (phy) 791 if (phy)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6a8681407a1d..84377cf580e0 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
867 return false; 867 return false;
868} 868}
869 869
870static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) 870static int carl9170_tx_prepare(struct ar9170 *ar,
871 struct ieee80211_sta *sta,
872 struct sk_buff *skb)
871{ 873{
872 struct ieee80211_hdr *hdr; 874 struct ieee80211_hdr *hdr;
873 struct _carl9170_tx_superframe *txc; 875 struct _carl9170_tx_superframe *txc;
874 struct carl9170_vif_info *cvif; 876 struct carl9170_vif_info *cvif;
875 struct ieee80211_tx_info *info; 877 struct ieee80211_tx_info *info;
876 struct ieee80211_tx_rate *txrate; 878 struct ieee80211_tx_rate *txrate;
877 struct ieee80211_sta *sta;
878 struct carl9170_tx_info *arinfo; 879 struct carl9170_tx_info *arinfo;
879 unsigned int hw_queue; 880 unsigned int hw_queue;
880 int i; 881 int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
910 else 911 else
911 cvif = NULL; 912 cvif = NULL;
912 913
913 sta = info->control.sta;
914
915 txc = (void *)skb_push(skb, sizeof(*txc)); 914 txc = (void *)skb_push(skb, sizeof(*txc));
916 memset(txc, 0, sizeof(*txc)); 915 memset(txc, 0, sizeof(*txc));
917 916
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
1457 return false; 1456 return false;
1458} 1457}
1459 1458
1460void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1459void carl9170_op_tx(struct ieee80211_hw *hw,
1460 struct ieee80211_tx_control *control,
1461 struct sk_buff *skb)
1461{ 1462{
1462 struct ar9170 *ar = hw->priv; 1463 struct ar9170 *ar = hw->priv;
1463 struct ieee80211_tx_info *info; 1464 struct ieee80211_tx_info *info;
1464 struct ieee80211_sta *sta; 1465 struct ieee80211_sta *sta = control->sta;
1465 bool run; 1466 bool run;
1466 1467
1467 if (unlikely(!IS_STARTED(ar))) 1468 if (unlikely(!IS_STARTED(ar)))
1468 goto err_free; 1469 goto err_free;
1469 1470
1470 info = IEEE80211_SKB_CB(skb); 1471 info = IEEE80211_SKB_CB(skb);
1471 sta = info->control.sta;
1472 1472
1473 if (unlikely(carl9170_tx_prepare(ar, skb))) 1473 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1474 goto err_free; 1474 goto err_free;
1475 1475
1476 carl9170_tx_accounting(ar, skb); 1476 carl9170_tx_accounting(ar, skb);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d97a95b1addb..73730e94e0ac 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
3412} 3412}
3413 3413
3414static void b43_op_tx(struct ieee80211_hw *hw, 3414static void b43_op_tx(struct ieee80211_hw *hw,
3415 struct sk_buff *skb) 3415 struct ieee80211_tx_control *control,
3416 struct sk_buff *skb)
3416{ 3417{
3417 struct b43_wl *wl = hw_to_b43_wl(hw); 3418 struct b43_wl *wl = hw_to_b43_wl(hw);
3418 3419
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 3ea1a85d38d1..291cdf654088 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
2492} 2492}
2493 2493
2494static void b43legacy_op_tx(struct ieee80211_hw *hw, 2494static void b43legacy_op_tx(struct ieee80211_hw *hw,
2495 struct ieee80211_tx_control *control,
2495 struct sk_buff *skb) 2496 struct sk_buff *skb)
2496{ 2497{
2497 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2498 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 1c70defba6c3..718da8d6d658 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -267,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
267 } 267 }
268} 268}
269 269
270static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 270static void brcms_ops_tx(struct ieee80211_hw *hw,
271 struct ieee80211_tx_control *control,
272 struct sk_buff *skb)
271{ 273{
272 struct brcms_info *wl = hw->priv; 274 struct brcms_info *wl = hw->priv;
273 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 275 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -279,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
279 goto done; 281 goto done;
280 } 282 }
281 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw); 283 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
282 tx_info->rate_driver_data[0] = tx_info->control.sta; 284 tx_info->rate_driver_data[0] = control->sta;
283 done: 285 done:
284 spin_unlock_bh(&wl->lock); 286 spin_unlock_bh(&wl->lock);
285} 287}
@@ -1235,6 +1237,9 @@ uint brcms_reset(struct brcms_info *wl)
1235 /* dpc will not be rescheduled */ 1237 /* dpc will not be rescheduled */
1236 wl->resched = false; 1238 wl->resched = false;
1237 1239
1240 /* inform publicly that interface is down */
1241 wl->pub->up = false;
1242
1238 return 0; 1243 return 0;
1239} 1244}
1240 1245
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 95aa8e1683ec..83324b321652 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
2042 return; 2042 return;
2043 } 2043 }
2044 len = ETH_ALEN; 2044 len = ETH_ALEN;
2045 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); 2045 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
2046 &len);
2046 if (ret) { 2047 if (ret) {
2047 IPW_DEBUG_INFO("failed querying ordinals at line %d\n", 2048 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
2048 __LINE__); 2049 __LINE__);
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index faec40467208..e252acb9c862 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
460 * start C_TX command process 460 * start C_TX command process
461 */ 461 */
462static int 462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) 463il3945_tx_skb(struct il_priv *il,
464 struct ieee80211_sta *sta,
465 struct sk_buff *skb)
464{ 466{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
512 hdr_len = ieee80211_hdrlen(fc); 514 hdr_len = ieee80211_hdrlen(fc);
513 515
514 /* Find idx into station table for destination station */ 516 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 517 sta_id = il_sta_id_or_broadcast(il, sta);
516 if (sta_id == IL_INVALID_STATION) { 518 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 519 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop; 520 goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
2859} 2861}
2860 2862
2861static void 2863static void
2862il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2864il3945_mac_tx(struct ieee80211_hw *hw,
2865 struct ieee80211_tx_control *control,
2866 struct sk_buff *skb)
2863{ 2867{
2864 struct il_priv *il = hw->priv; 2868 struct il_priv *il = hw->priv;
2865 2869
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2868 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2872 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2869 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2873 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2870 2874
2871 if (il3945_tx_skb(il, skb)) 2875 if (il3945_tx_skb(il, control->sta, skb))
2872 dev_kfree_skb_any(skb); 2876 dev_kfree_skb_any(skb);
2873 2877
2874 D_MAC80211("leave\n"); 2878 D_MAC80211("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 34f61a0581a2..eac4dc8bc879 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1526} 1526}
1527 1527
1528static void 1528static void
1529il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, 1529il4965_tx_cmd_build_rate(struct il_priv *il,
1530 struct ieee80211_tx_info *info, __le16 fc) 1530 struct il_tx_cmd *tx_cmd,
1531 struct ieee80211_tx_info *info,
1532 struct ieee80211_sta *sta,
1533 __le16 fc)
1531{ 1534{
1532 const u8 rts_retry_limit = 60; 1535 const u8 rts_retry_limit = 60;
1533 u32 rate_flags; 1536 u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1561 rate_idx = info->control.rates[0].idx; 1564 rate_idx = info->control.rates[0].idx;
1562 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 1565 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1563 || rate_idx > RATE_COUNT_LEGACY) 1566 || rate_idx > RATE_COUNT_LEGACY)
1564 rate_idx = 1567 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1565 rate_lowest_index(&il->bands[info->band],
1566 info->control.sta);
1567 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 1568 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1568 if (info->band == IEEE80211_BAND_5GHZ) 1569 if (info->band == IEEE80211_BAND_5GHZ)
1569 rate_idx += IL_FIRST_OFDM_RATE; 1570 rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1630 * start C_TX command process 1631 * start C_TX command process
1631 */ 1632 */
1632int 1633int
1633il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) 1634il4965_tx_skb(struct il_priv *il,
1635 struct ieee80211_sta *sta,
1636 struct sk_buff *skb)
1634{ 1637{
1635 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1638 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1636 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1639 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1637 struct ieee80211_sta *sta = info->control.sta;
1638 struct il_station_priv *sta_priv = NULL; 1640 struct il_station_priv *sta_priv = NULL;
1639 struct il_tx_queue *txq; 1641 struct il_tx_queue *txq;
1640 struct il_queue *q; 1642 struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1680 sta_id = il->hw_params.bcast_id; 1682 sta_id = il->hw_params.bcast_id;
1681 else { 1683 else {
1682 /* Find idx into station table for destination station */ 1684 /* Find idx into station table for destination station */
1683 sta_id = il_sta_id_or_broadcast(il, info->control.sta); 1685 sta_id = il_sta_id_or_broadcast(il, sta);
1684 1686
1685 if (sta_id == IL_INVALID_STATION) { 1687 if (sta_id == IL_INVALID_STATION) {
1686 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 1688 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1786 /* TODO need this for burst mode later on */ 1788 /* TODO need this for burst mode later on */
1787 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); 1789 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1788 1790
1789 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); 1791 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1790 1792
1791 il_update_stats(il, true, fc, len); 1793 il_update_stats(il, true, fc, len);
1792 /* 1794 /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
5828} 5830}
5829 5831
5830void 5832void
5831il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 5833il4965_mac_tx(struct ieee80211_hw *hw,
5834 struct ieee80211_tx_control *control,
5835 struct sk_buff *skb)
5832{ 5836{
5833 struct il_priv *il = hw->priv; 5837 struct il_priv *il = hw->priv;
5834 5838
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5837 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 5841 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5838 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 5842 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5839 5843
5840 if (il4965_tx_skb(il, skb)) 5844 if (il4965_tx_skb(il, control->sta, skb))
5841 dev_kfree_skb_any(skb); 5845 dev_kfree_skb_any(skb);
5842 5846
5843 D_MACDUMP("leave\n"); 5847 D_MACDUMP("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1db677689cfe..2d092f328547 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); 78int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, 79void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
80 struct ieee80211_tx_info *info); 80 struct ieee80211_tx_info *info);
81int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); 81int il4965_tx_skb(struct il_priv *il,
82 struct ieee80211_sta *sta,
83 struct sk_buff *skb);
82int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, 84int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
83 struct ieee80211_sta *sta, u16 tid, u16 * ssn); 85 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
84int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, 86int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
163int il4965_eeprom_check_version(struct il_priv *il); 165int il4965_eeprom_check_version(struct il_priv *il);
164 166
165/* mac80211 handlers (for 4965) */ 167/* mac80211 handlers (for 4965) */
166void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 168void il4965_mac_tx(struct ieee80211_hw *hw,
169 struct ieee80211_tx_control *control,
170 struct sk_buff *skb);
167int il4965_mac_start(struct ieee80211_hw *hw); 171int il4965_mac_start(struct ieee80211_hw *hw);
168void il4965_mac_stop(struct ieee80211_hw *hw); 172void il4965_mac_stop(struct ieee80211_hw *hw);
169void il4965_configure_filter(struct ieee80211_hw *hw, 173void il4965_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 9bb16bdf6d26..75e12f29d9eb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
201 201
202 202
203/* tx */ 203/* tx */
204int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 204int iwlagn_tx_skb(struct iwl_priv *priv,
205 struct ieee80211_sta *sta,
206 struct sk_buff *skb);
205int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 207int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
206 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 208 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
207int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, 209int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
485} 487}
486 488
487#ifdef CONFIG_IWLWIFI_DEBUGFS 489#ifdef CONFIG_IWLWIFI_DEBUGFS
488int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); 490int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
489void iwl_dbgfs_unregister(struct iwl_priv *priv);
490#else 491#else
491static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 492static inline int iwl_dbgfs_register(struct iwl_priv *priv,
493 struct dentry *dbgfs_dir)
492{ 494{
493 return 0; 495 return 0;
494} 496}
495static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
496{
497}
498#endif /* CONFIG_IWLWIFI_DEBUGFS */ 497#endif /* CONFIG_IWLWIFI_DEBUGFS */
499 498
500#ifdef CONFIG_IWLWIFI_DEBUG 499#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1102ac..1a98fa3ab06d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
124 const struct fw_img *img; 124 const struct fw_img *img;
125 size_t bufsz; 125 size_t bufsz;
126 126
127 if (!iwl_is_ready_rf(priv))
128 return -EAGAIN;
129
127 /* default is to dump the entire data segment */ 130 /* default is to dump the entire data segment */
128 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 131 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
129 priv->dbgfs_sram_offset = 0x800000; 132 priv->dbgfs_sram_offset = 0x800000;
@@ -2349,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2349 * Create the debugfs files and directories 2352 * Create the debugfs files and directories
2350 * 2353 *
2351 */ 2354 */
2352int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 2355int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2353{ 2356{
2354 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 2357 struct dentry *dir_data, *dir_rf, *dir_debug;
2355 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
2356
2357 dir_drv = debugfs_create_dir(name, phyd);
2358 if (!dir_drv)
2359 return -ENOMEM;
2360 2358
2361 priv->debugfs_dir = dir_drv; 2359 priv->debugfs_dir = dbgfs_dir;
2362 2360
2363 dir_data = debugfs_create_dir("data", dir_drv); 2361 dir_data = debugfs_create_dir("data", dbgfs_dir);
2364 if (!dir_data) 2362 if (!dir_data)
2365 goto err; 2363 goto err;
2366 dir_rf = debugfs_create_dir("rf", dir_drv); 2364 dir_rf = debugfs_create_dir("rf", dbgfs_dir);
2367 if (!dir_rf) 2365 if (!dir_rf)
2368 goto err; 2366 goto err;
2369 dir_debug = debugfs_create_dir("debug", dir_drv); 2367 dir_debug = debugfs_create_dir("debug", dbgfs_dir);
2370 if (!dir_debug) 2368 if (!dir_debug)
2371 goto err; 2369 goto err;
2372 2370
@@ -2412,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2412 /* Calibrations disabled/enabled status*/ 2410 /* Calibrations disabled/enabled status*/
2413 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); 2411 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2414 2412
2415 if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) 2413 /*
2416 goto err; 2414 * Create a symlink with mac80211. This is not very robust, as it does
2415 * not remove the symlink created. The implicit assumption is that
2416 * when the opmode exits, mac80211 will also exit, and will remove
2417 * this symlink as part of its cleanup.
2418 */
2419 if (priv->mac80211_registered) {
2420 char buf[100];
2421 struct dentry *mac80211_dir, *dev_dir, *root_dir;
2422
2423 dev_dir = dbgfs_dir->d_parent;
2424 root_dir = dev_dir->d_parent;
2425 mac80211_dir = priv->hw->wiphy->debugfsdir;
2426
2427 snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
2428 dev_dir->d_name.name);
2429
2430 if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
2431 goto err;
2432 }
2433
2417 return 0; 2434 return 0;
2418 2435
2419err: 2436err:
2420 IWL_ERR(priv, "Can't create the debugfs directory\n"); 2437 IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
2421 iwl_dbgfs_unregister(priv);
2422 return -ENOMEM; 2438 return -ENOMEM;
2423} 2439}
2424
2425/**
2426 * Remove the debugfs files and directories
2427 *
2428 */
2429void iwl_dbgfs_unregister(struct iwl_priv *priv)
2430{
2431 if (!priv->debugfs_dir)
2432 return;
2433
2434 debugfs_remove_recursive(priv->debugfs_dir);
2435 priv->debugfs_dir = NULL;
2436}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index a5f7bce96325..ff8162d4c454 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode); 195 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
196 } 196 }
197 197
198 hw->wiphy->max_remain_on_channel_duration = 1000; 198 hw->wiphy->max_remain_on_channel_duration = 500;
199 199
200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 200 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
201 WIPHY_FLAG_DISABLE_BEACON_HINTS | 201 WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
511} 511}
512#endif 512#endif
513 513
514static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 514static void iwlagn_mac_tx(struct ieee80211_hw *hw,
515 struct ieee80211_tx_control *control,
516 struct sk_buff *skb)
515{ 517{
516 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 518 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
517 519
518 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 520 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
519 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 521 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
520 522
521 if (iwlagn_tx_skb(priv, skb)) 523 if (iwlagn_tx_skb(priv, control->sta, skb))
522 dev_kfree_skb_any(skb); 524 dev_kfree_skb_any(skb);
523} 525}
524 526
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 84d3db5aa506..7ff3f1430678 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
862 * No race since we hold the mutex here and a new one 862 * No race since we hold the mutex here and a new one
863 * can't come in at this time. 863 * can't come in at this time.
864 */ 864 */
865 ieee80211_remain_on_channel_expired(priv->hw); 865 if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
866 ieee80211_remain_on_channel_expired(priv->hw);
866 867
867 exit_pending = 868 exit_pending =
868 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 869 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
994 iwlagn_prepare_restart(priv); 995 iwlagn_prepare_restart(priv);
995 mutex_unlock(&priv->mutex); 996 mutex_unlock(&priv->mutex);
996 iwl_cancel_deferred_work(priv); 997 iwl_cancel_deferred_work(priv);
997 ieee80211_restart_hw(priv->hw); 998 if (priv->mac80211_registered)
999 ieee80211_restart_hw(priv->hw);
1000 else
1001 IWL_ERR(priv,
1002 "Cannot request restart before registrating with mac80211");
998 } else { 1003 } else {
999 WARN_ON(1); 1004 WARN_ON(1);
1000 } 1005 }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1222 1227
1223static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1228static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1224 const struct iwl_cfg *cfg, 1229 const struct iwl_cfg *cfg,
1225 const struct iwl_fw *fw) 1230 const struct iwl_fw *fw,
1231 struct dentry *dbgfs_dir)
1226{ 1232{
1227 struct iwl_priv *priv; 1233 struct iwl_priv *priv;
1228 struct ieee80211_hw *hw; 1234 struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1466 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) 1472 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1467 goto out_destroy_workqueue; 1473 goto out_destroy_workqueue;
1468 1474
1469 if (iwl_dbgfs_register(priv, DRV_NAME)) 1475 if (iwl_dbgfs_register(priv, dbgfs_dir))
1470 IWL_ERR(priv, 1476 goto out_mac80211_unregister;
1471 "failed to create debugfs files. Ignoring error\n");
1472 1477
1473 return op_mode; 1478 return op_mode;
1474 1479
1480out_mac80211_unregister:
1481 iwlagn_mac_unregister(priv);
1475out_destroy_workqueue: 1482out_destroy_workqueue:
1483 iwl_tt_exit(priv);
1484 iwl_testmode_free(priv);
1485 iwl_cancel_deferred_work(priv);
1476 destroy_workqueue(priv->workqueue); 1486 destroy_workqueue(priv->workqueue);
1477 priv->workqueue = NULL; 1487 priv->workqueue = NULL;
1478 iwl_uninit_drv(priv); 1488 iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1493 1503
1494 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 1504 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
1495 1505
1496 iwl_dbgfs_unregister(priv);
1497
1498 iwl_testmode_free(priv); 1506 iwl_testmode_free(priv);
1499 iwlagn_mac_unregister(priv); 1507 iwlagn_mac_unregister(priv);
1500 1508
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b29b798f7550..fe36a38f3505 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -150,7 +150,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
150 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 150 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
151 151
152 if (!(flags & CMD_ASYNC)) { 152 if (!(flags & CMD_ASYNC)) {
153 cmd.flags |= CMD_WANT_SKB; 153 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
154 might_sleep(); 154 might_sleep();
155 } 155 }
156 156
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5971a23aa47d..f5ca73a89870 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, 127static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
128 struct iwl_tx_cmd *tx_cmd, 128 struct iwl_tx_cmd *tx_cmd,
129 struct ieee80211_tx_info *info, 129 struct ieee80211_tx_info *info,
130 struct ieee80211_sta *sta,
130 __le16 fc) 131 __le16 fc)
131{ 132{
132 u32 rate_flags; 133 u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
187 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
188 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
189 rate_idx = rate_lowest_index( 190 rate_idx = rate_lowest_index(
190 &priv->eeprom_data->bands[info->band], 191 &priv->eeprom_data->bands[info->band], sta);
191 info->control.sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE; 194 rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
291/* 291/*
292 * start REPLY_TX command process 292 * start REPLY_TX command process
293 */ 293 */
294int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 294int iwlagn_tx_skb(struct iwl_priv *priv,
295 struct ieee80211_sta *sta,
296 struct sk_buff *skb)
295{ 297{
296 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 298 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
345 sta_id = ctx->bcast_sta_id; 347 sta_id = ctx->bcast_sta_id;
346 else { 348 else {
347 /* Find index into station table for destination station */ 349 /* Find index into station table for destination station */
348 sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta); 350 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
349 if (sta_id == IWL_INVALID_STATION) { 351 if (sta_id == IWL_INVALID_STATION) {
350 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 352 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
351 hdr->addr1); 353 hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
355 357
356 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 358 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
357 359
358 if (info->control.sta) 360 if (sta)
359 sta_priv = (void *)info->control.sta->drv_priv; 361 sta_priv = (void *)sta->drv_priv;
360 362
361 if (sta_priv && sta_priv->asleep && 363 if (sta_priv && sta_priv->asleep &&
362 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { 364 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
397 /* TODO need this for burst mode later on */ 399 /* TODO need this for burst mode later on */
398 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); 400 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
399 401
400 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 402 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
401 403
402 memset(&info->status, 0, sizeof(info->status)); 404 memset(&info->status, 0, sizeof(info->status));
403 405
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
431 * only. Check this here. 433 * only. Check this here.
432 */ 434 */
433 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && 435 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
434 tid_data->agg.state != IWL_AGG_OFF, 436 tid_data->agg.state != IWL_AGG_OFF,
435 "Tx while agg.state = %d", tid_data->agg.state)) 437 "Tx while agg.state = %d", tid_data->agg.state))
436 goto drop_unlock_sta; 438 goto drop_unlock_sta;
437 439
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index cc41cfaedfbd..48d6d44c16d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -101,6 +101,10 @@ MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 101MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL"); 102MODULE_LICENSE("GPL");
103 103
104#ifdef CONFIG_IWLWIFI_DEBUGFS
105static struct dentry *iwl_dbgfs_root;
106#endif
107
104/** 108/**
105 * struct iwl_drv - drv common data 109 * struct iwl_drv - drv common data
106 * @list: list of drv structures using this opmode 110 * @list: list of drv structures using this opmode
@@ -126,6 +130,12 @@ struct iwl_drv {
126 char firmware_name[25]; /* name of firmware file to load */ 130 char firmware_name[25]; /* name of firmware file to load */
127 131
128 struct completion request_firmware_complete; 132 struct completion request_firmware_complete;
133
134#ifdef CONFIG_IWLWIFI_DEBUGFS
135 struct dentry *dbgfs_drv;
136 struct dentry *dbgfs_trans;
137 struct dentry *dbgfs_op_mode;
138#endif
129}; 139};
130 140
131#define DVM_OP_MODE 0 141#define DVM_OP_MODE 0
@@ -194,7 +204,8 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
194 return 0; 204 return 0;
195} 205}
196 206
197static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 207static void iwl_req_fw_callback(const struct firmware *ucode_raw,
208 void *context);
198 209
199#define UCODE_EXPERIMENTAL_INDEX 100 210#define UCODE_EXPERIMENTAL_INDEX 100
200#define UCODE_EXPERIMENTAL_TAG "exp" 211#define UCODE_EXPERIMENTAL_TAG "exp"
@@ -231,7 +242,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
231 242
232 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, 243 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
233 drv->trans->dev, 244 drv->trans->dev,
234 GFP_KERNEL, drv, iwl_ucode_callback); 245 GFP_KERNEL, drv, iwl_req_fw_callback);
235} 246}
236 247
237struct fw_img_parsing { 248struct fw_img_parsing {
@@ -759,13 +770,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
759 return 0; 770 return 0;
760} 771}
761 772
773static struct iwl_op_mode *
774_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
775{
776 const struct iwl_op_mode_ops *ops = op->ops;
777 struct dentry *dbgfs_dir = NULL;
778 struct iwl_op_mode *op_mode = NULL;
779
780#ifdef CONFIG_IWLWIFI_DEBUGFS
781 drv->dbgfs_op_mode = debugfs_create_dir(op->name,
782 drv->dbgfs_drv);
783 if (!drv->dbgfs_op_mode) {
784 IWL_ERR(drv,
785 "failed to create opmode debugfs directory\n");
786 return op_mode;
787 }
788 dbgfs_dir = drv->dbgfs_op_mode;
789#endif
790
791 op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
792
793#ifdef CONFIG_IWLWIFI_DEBUGFS
794 if (!op_mode) {
795 debugfs_remove_recursive(drv->dbgfs_op_mode);
796 drv->dbgfs_op_mode = NULL;
797 }
798#endif
799
800 return op_mode;
801}
802
803static void _iwl_op_mode_stop(struct iwl_drv *drv)
804{
805 /* op_mode can be NULL if its start failed */
806 if (drv->op_mode) {
807 iwl_op_mode_stop(drv->op_mode);
808 drv->op_mode = NULL;
809
810#ifdef CONFIG_IWLWIFI_DEBUGFS
811 debugfs_remove_recursive(drv->dbgfs_op_mode);
812 drv->dbgfs_op_mode = NULL;
813#endif
814 }
815}
816
762/** 817/**
763 * iwl_ucode_callback - callback when firmware was loaded 818 * iwl_req_fw_callback - callback when firmware was loaded
764 * 819 *
765 * If loaded successfully, copies the firmware into buffers 820 * If loaded successfully, copies the firmware into buffers
766 * for the card to fetch (via DMA). 821 * for the card to fetch (via DMA).
767 */ 822 */
768static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 823static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
769{ 824{
770 struct iwl_drv *drv = context; 825 struct iwl_drv *drv = context;
771 struct iwl_fw *fw = &drv->fw; 826 struct iwl_fw *fw = &drv->fw;
@@ -908,8 +963,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
908 list_add_tail(&drv->list, &op->drv); 963 list_add_tail(&drv->list, &op->drv);
909 964
910 if (op->ops) { 965 if (op->ops) {
911 const struct iwl_op_mode_ops *ops = op->ops; 966 drv->op_mode = _iwl_op_mode_start(drv, op);
912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
913 967
914 if (!drv->op_mode) { 968 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx); 969 mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1023,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
969 init_completion(&drv->request_firmware_complete); 1023 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list); 1024 INIT_LIST_HEAD(&drv->list);
971 1025
1026#ifdef CONFIG_IWLWIFI_DEBUGFS
1027 /* Create the device debugfs entries. */
1028 drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
1029 iwl_dbgfs_root);
1030
1031 if (!drv->dbgfs_drv) {
1032 IWL_ERR(drv, "failed to create debugfs directory\n");
1033 goto err_free_drv;
1034 }
1035
1036 /* Create transport layer debugfs dir */
1037 drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
1038
1039 if (!drv->trans->dbgfs_dir) {
1040 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1041 goto err_free_dbgfs;
1042 }
1043#endif
1044
972 ret = iwl_request_firmware(drv, true); 1045 ret = iwl_request_firmware(drv, true);
973 1046
974 if (ret) { 1047 if (ret) {
975 IWL_ERR(trans, "Couldn't request the fw\n"); 1048 IWL_ERR(trans, "Couldn't request the fw\n");
976 kfree(drv); 1049 goto err_fw;
977 drv = NULL;
978 } 1050 }
979 1051
980 return drv; 1052 return drv;
1053
1054err_fw:
1055#ifdef CONFIG_IWLWIFI_DEBUGFS
1056err_free_dbgfs:
1057 debugfs_remove_recursive(drv->dbgfs_drv);
1058err_free_drv:
1059#endif
1060 kfree(drv);
1061 drv = NULL;
1062
1063 return drv;
981} 1064}
982 1065
983void iwl_drv_stop(struct iwl_drv *drv) 1066void iwl_drv_stop(struct iwl_drv *drv)
984{ 1067{
985 wait_for_completion(&drv->request_firmware_complete); 1068 wait_for_completion(&drv->request_firmware_complete);
986 1069
987 /* op_mode can be NULL if its start failed */ 1070 _iwl_op_mode_stop(drv);
988 if (drv->op_mode)
989 iwl_op_mode_stop(drv->op_mode);
990 1071
991 iwl_dealloc_ucode(drv); 1072 iwl_dealloc_ucode(drv);
992 1073
@@ -1000,6 +1081,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
1000 list_del(&drv->list); 1081 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx); 1082 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002 1083
1084#ifdef CONFIG_IWLWIFI_DEBUGFS
1085 debugfs_remove_recursive(drv->dbgfs_drv);
1086#endif
1087
1003 kfree(drv); 1088 kfree(drv);
1004} 1089}
1005 1090
@@ -1022,15 +1107,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1022{ 1107{
1023 int i; 1108 int i;
1024 struct iwl_drv *drv; 1109 struct iwl_drv *drv;
1110 struct iwlwifi_opmode_table *op;
1025 1111
1026 mutex_lock(&iwlwifi_opmode_table_mtx); 1112 mutex_lock(&iwlwifi_opmode_table_mtx);
1027 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { 1113 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1028 if (strcmp(iwlwifi_opmode_table[i].name, name)) 1114 op = &iwlwifi_opmode_table[i];
1115 if (strcmp(op->name, name))
1029 continue; 1116 continue;
1030 iwlwifi_opmode_table[i].ops = ops; 1117 op->ops = ops;
1031 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) 1118 /* TODO: need to handle exceptional case */
1032 drv->op_mode = ops->start(drv->trans, drv->cfg, 1119 list_for_each_entry(drv, &op->drv, list)
1033 &drv->fw); 1120 drv->op_mode = _iwl_op_mode_start(drv, op);
1121
1034 mutex_unlock(&iwlwifi_opmode_table_mtx); 1122 mutex_unlock(&iwlwifi_opmode_table_mtx);
1035 return 0; 1123 return 0;
1036 } 1124 }
@@ -1051,12 +1139,9 @@ void iwl_opmode_deregister(const char *name)
1051 iwlwifi_opmode_table[i].ops = NULL; 1139 iwlwifi_opmode_table[i].ops = NULL;
1052 1140
1053 /* call the stop routine for all devices */ 1141 /* call the stop routine for all devices */
1054 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) { 1142 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1055 if (drv->op_mode) { 1143 _iwl_op_mode_stop(drv);
1056 iwl_op_mode_stop(drv->op_mode); 1144
1057 drv->op_mode = NULL;
1058 }
1059 }
1060 mutex_unlock(&iwlwifi_opmode_table_mtx); 1145 mutex_unlock(&iwlwifi_opmode_table_mtx);
1061 return; 1146 return;
1062 } 1147 }
@@ -1076,6 +1161,14 @@ static int __init iwl_drv_init(void)
1076 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 1161 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
1077 pr_info(DRV_COPYRIGHT "\n"); 1162 pr_info(DRV_COPYRIGHT "\n");
1078 1163
1164#ifdef CONFIG_IWLWIFI_DEBUGFS
1165 /* Create the root of iwlwifi debugfs subsystem. */
1166 iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
1167
1168 if (!iwl_dbgfs_root)
1169 return -EFAULT;
1170#endif
1171
1079 return iwl_pci_register_driver(); 1172 return iwl_pci_register_driver();
1080} 1173}
1081module_init(iwl_drv_init); 1174module_init(iwl_drv_init);
@@ -1083,6 +1176,10 @@ module_init(iwl_drv_init);
1083static void __exit iwl_drv_exit(void) 1176static void __exit iwl_drv_exit(void)
1084{ 1177{
1085 iwl_pci_unregister_driver(); 1178 iwl_pci_unregister_driver();
1179
1180#ifdef CONFIG_IWLWIFI_DEBUGFS
1181 debugfs_remove_recursive(iwl_dbgfs_root);
1182#endif
1086} 1183}
1087module_exit(iwl_drv_exit); 1184module_exit(iwl_drv_exit);
1088 1185
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 2cbf137b25bf..285de5f68c05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -90,9 +90,9 @@
90 * 4) The bus specific component configures the bus 90 * 4) The bus specific component configures the bus
91 * 5) The bus specific component calls to the drv bus agnostic part 91 * 5) The bus specific component calls to the drv bus agnostic part
92 * (iwl_drv_start) 92 * (iwl_drv_start)
93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback 93 * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
94 * 7) iwl_ucode_callback parses the fw file 94 * 7) iwl_req_fw_callback parses the fw file
95 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw 95 * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
96 */ 96 */
97 97
98struct iwl_drv; 98struct iwl_drv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 64886f95664f..c8d9b9517468 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -134,7 +134,8 @@ struct iwl_cfg;
134struct iwl_op_mode_ops { 134struct iwl_op_mode_ops {
135 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 135 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
136 const struct iwl_cfg *cfg, 136 const struct iwl_cfg *cfg,
137 const struct iwl_fw *fw); 137 const struct iwl_fw *fw,
138 struct dentry *dbgfs_dir);
138 void (*stop)(struct iwl_op_mode *op_mode); 139 void (*stop)(struct iwl_op_mode *op_mode);
139 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 140 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
140 struct iwl_device_cmd *cmd); 141 struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 92576a3e84ef..ff1154232885 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command 184 * @CMD_SYNC: The caller will be stalled until the fw responds to the command
185 * @CMD_ASYNC: Return right away and don't want for the response 185 * @CMD_ASYNC: Return right away and don't want for the response
186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 186 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
187 * response. 187 * response. The caller needs to call iwl_free_resp when done.
188 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
189 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
190 * copied. The pointer passed to the response handler is in the transport
191 * ownership and don't need to be freed by the op_mode. This also means
192 * that the pointer is invalidated after the op_mode's handler returns.
188 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 193 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
189 */ 194 */
190enum CMD_MODE { 195enum CMD_MODE {
191 CMD_SYNC = 0, 196 CMD_SYNC = 0,
192 CMD_ASYNC = BIT(0), 197 CMD_ASYNC = BIT(0),
193 CMD_WANT_SKB = BIT(1), 198 CMD_WANT_SKB = BIT(1),
194 CMD_ON_DEMAND = BIT(2), 199 CMD_WANT_HCMD = BIT(2),
200 CMD_ON_DEMAND = BIT(3),
195}; 201};
196 202
197#define DEF_CMD_PAYLOAD_SIZE 320 203#define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
460 size_t dev_cmd_headroom; 466 size_t dev_cmd_headroom;
461 char dev_cmd_pool_name[50]; 467 char dev_cmd_pool_name[50];
462 468
469 struct dentry *dbgfs_dir;
470
463 /* pointer to trans specific struct */ 471 /* pointer to trans specific struct */
464 /*Ensure that this pointer will always be aligned to sizeof pointer */ 472 /*Ensure that this pointer will always be aligned to sizeof pointer */
465 char trans_specific[0] __aligned(sizeof(void *)); 473 char trans_specific[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f4c3500b68c6..89bfb43f4946 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -282,8 +282,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
282 if (!trans_pcie->drv) 282 if (!trans_pcie->drv)
283 goto out_free_trans; 283 goto out_free_trans;
284 284
285 /* register transport layer debugfs here */
286 if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
287 goto out_free_drv;
288
285 return 0; 289 return 0;
286 290
291out_free_drv:
292 iwl_drv_stop(trans_pcie->drv);
287out_free_trans: 293out_free_trans:
288 iwl_trans_pcie_free(iwl_trans); 294 iwl_trans_pcie_free(iwl_trans);
289 pci_set_drvdata(pdev, NULL); 295 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c58208c..71c79943e633 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -184,6 +184,7 @@ struct iwl_queue {
184 184
185struct iwl_pcie_tx_queue_entry { 185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd; 186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd;
187 struct sk_buff *skb; 188 struct sk_buff *skb;
188 struct iwl_cmd_meta meta; 189 struct iwl_cmd_meta meta;
189}; 190};
@@ -350,7 +351,7 @@ int iwl_queue_space(const struct iwl_queue *q);
350/***************************************************** 351/*****************************************************
351* Error handling 352* Error handling
352******************************************************/ 353******************************************************/
353int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 354int iwl_dump_fh(struct iwl_trans *trans, char **buf);
354void iwl_dump_csr(struct iwl_trans *trans); 355void iwl_dump_csr(struct iwl_trans *trans);
355 356
356/***************************************************** 357/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1f009c..498372008810 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -421,13 +421,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
421 index = SEQ_TO_INDEX(sequence); 421 index = SEQ_TO_INDEX(sequence);
422 cmd_index = get_cmd_index(&txq->q, index); 422 cmd_index = get_cmd_index(&txq->q, index);
423 423
424 if (reclaim) 424 if (reclaim) {
425 cmd = txq->entries[cmd_index].cmd; 425 struct iwl_pcie_tx_queue_entry *ent;
426 else 426 ent = &txq->entries[cmd_index];
427 cmd = ent->copy_cmd;
428 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
429 } else {
427 cmd = NULL; 430 cmd = NULL;
431 }
428 432
429 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 433 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
430 434
435 if (reclaim) {
436 /* The original command isn't needed any more */
437 kfree(txq->entries[cmd_index].copy_cmd);
438 txq->entries[cmd_index].copy_cmd = NULL;
439 }
440
431 /* 441 /*
432 * After here, we should always check rxcb._page_stolen, 442 * After here, we should always check rxcb._page_stolen,
433 * if it is true then one of the handlers took the page. 443 * if it is true then one of the handlers took the page.
@@ -555,7 +565,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
555 } 565 }
556 566
557 iwl_dump_csr(trans); 567 iwl_dump_csr(trans);
558 iwl_dump_fh(trans, NULL, false); 568 iwl_dump_fh(trans, NULL);
559 569
560 iwl_op_mode_nic_error(trans->op_mode); 570 iwl_op_mode_nic_error(trans->op_mode);
561} 571}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f78df58..848851177e7e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
492 iwl_tx_queue_unmap(trans, txq_id); 492 iwl_tx_queue_unmap(trans, txq_id);
493 493
494 /* De-alloc array of command/tx buffers */ 494 /* De-alloc array of command/tx buffers */
495
496 if (txq_id == trans_pcie->cmd_queue) 495 if (txq_id == trans_pcie->cmd_queue)
497 for (i = 0; i < txq->q.n_window; i++) 496 for (i = 0; i < txq->q.n_window; i++) {
498 kfree(txq->entries[i].cmd); 497 kfree(txq->entries[i].cmd);
498 kfree(txq->entries[i].copy_cmd);
499 }
499 500
500 /* De-alloc circular buffer of TFDs */ 501 /* De-alloc circular buffer of TFDs */
501 if (txq->q.n_bd) { 502 if (txq->q.n_bd) {
@@ -896,6 +897,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
896static int iwl_prepare_card_hw(struct iwl_trans *trans) 897static int iwl_prepare_card_hw(struct iwl_trans *trans)
897{ 898{
898 int ret; 899 int ret;
900 int t = 0;
899 901
900 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 902 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
901 903
@@ -908,17 +910,15 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
908 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 910 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
909 CSR_HW_IF_CONFIG_REG_PREPARE); 911 CSR_HW_IF_CONFIG_REG_PREPARE);
910 912
911 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 913 do {
912 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 914 ret = iwl_set_hw_ready(trans);
913 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 915 if (ret >= 0)
916 return 0;
914 917
915 if (ret < 0) 918 usleep_range(200, 1000);
916 return ret; 919 t += 200;
920 } while (t < 150000);
917 921
918 /* HW should be ready by now, check again. */
919 ret = iwl_set_hw_ready(trans);
920 if (ret >= 0)
921 return 0;
922 return ret; 922 return ret;
923} 923}
924 924
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
1649#undef IWL_CMD 1649#undef IWL_CMD
1650} 1650}
1651 1651
1652int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) 1652int iwl_dump_fh(struct iwl_trans *trans, char **buf)
1653{ 1653{
1654 int i; 1654 int i;
1655#ifdef CONFIG_IWLWIFI_DEBUG
1656 int pos = 0;
1657 size_t bufsz = 0;
1658#endif
1659 static const u32 fh_tbl[] = { 1655 static const u32 fh_tbl[] = {
1660 FH_RSCSR_CHNL0_STTS_WPTR_REG, 1656 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1661 FH_RSCSR_CHNL0_RBDCB_BASE_REG, 1657 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1667 FH_TSSR_TX_STATUS_REG, 1663 FH_TSSR_TX_STATUS_REG,
1668 FH_TSSR_TX_ERROR_REG 1664 FH_TSSR_TX_ERROR_REG
1669 }; 1665 };
1670#ifdef CONFIG_IWLWIFI_DEBUG 1666
1671 if (display) { 1667#ifdef CONFIG_IWLWIFI_DEBUGFS
1672 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; 1668 if (buf) {
1669 int pos = 0;
1670 size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1671
1673 *buf = kmalloc(bufsz, GFP_KERNEL); 1672 *buf = kmalloc(bufsz, GFP_KERNEL);
1674 if (!*buf) 1673 if (!*buf)
1675 return -ENOMEM; 1674 return -ENOMEM;
1675
1676 pos += scnprintf(*buf + pos, bufsz - pos, 1676 pos += scnprintf(*buf + pos, bufsz - pos,
1677 "FH register values:\n"); 1677 "FH register values:\n");
1678 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1678
1679 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1679 pos += scnprintf(*buf + pos, bufsz - pos, 1680 pos += scnprintf(*buf + pos, bufsz - pos,
1680 " %34s: 0X%08x\n", 1681 " %34s: 0X%08x\n",
1681 get_fh_string(fh_tbl[i]), 1682 get_fh_string(fh_tbl[i]),
1682 iwl_read_direct32(trans, fh_tbl[i])); 1683 iwl_read_direct32(trans, fh_tbl[i]));
1683 } 1684
1684 return pos; 1685 return pos;
1685 } 1686 }
1686#endif 1687#endif
1688
1687 IWL_ERR(trans, "FH register values:\n"); 1689 IWL_ERR(trans, "FH register values:\n");
1688 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { 1690 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1689 IWL_ERR(trans, " %34s: 0X%08x\n", 1691 IWL_ERR(trans, " %34s: 0X%08x\n",
1690 get_fh_string(fh_tbl[i]), 1692 get_fh_string(fh_tbl[i]),
1691 iwl_read_direct32(trans, fh_tbl[i])); 1693 iwl_read_direct32(trans, fh_tbl[i]));
1692 } 1694
1693 return 0; 1695 return 0;
1694} 1696}
1695 1697
@@ -1769,7 +1771,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
1769#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 1771#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1770 if (!debugfs_create_file(#name, mode, parent, trans, \ 1772 if (!debugfs_create_file(#name, mode, parent, trans, \
1771 &iwl_dbgfs_##name##_ops)) \ 1773 &iwl_dbgfs_##name##_ops)) \
1772 return -ENOMEM; \ 1774 goto err; \
1773} while (0) 1775} while (0)
1774 1776
1775/* file operation */ 1777/* file operation */
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1982 size_t count, loff_t *ppos) 1984 size_t count, loff_t *ppos)
1983{ 1985{
1984 struct iwl_trans *trans = file->private_data; 1986 struct iwl_trans *trans = file->private_data;
1985 char *buf; 1987 char *buf = NULL;
1986 int pos = 0; 1988 int pos = 0;
1987 ssize_t ret = -EFAULT; 1989 ssize_t ret = -EFAULT;
1988 1990
1989 ret = pos = iwl_dump_fh(trans, &buf, true); 1991 ret = pos = iwl_dump_fh(trans, &buf);
1990 if (buf) { 1992 if (buf) {
1991 ret = simple_read_from_buffer(user_buf, 1993 ret = simple_read_from_buffer(user_buf,
1992 count, ppos, buf, pos); 1994 count, ppos, buf, pos);
@@ -2033,6 +2035,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2033 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2035 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2034 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); 2036 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2035 return 0; 2037 return 0;
2038
2039err:
2040 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2041 return -ENOMEM;
2036} 2042}
2037#else 2043#else
2038static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, 2044static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6baf8deef519..392d2bc5e357 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -521,7 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
521 u16 copy_size, cmd_size; 521 u16 copy_size, cmd_size;
522 bool had_nocopy = false; 522 bool had_nocopy = false;
523 int i; 523 int i;
524 u8 *cmd_dest; 524 u32 cmd_pos;
525#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 525#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
526 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; 526 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
527 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; 527 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
@@ -584,15 +584,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
584 INDEX_TO_SEQ(q->write_ptr)); 584 INDEX_TO_SEQ(q->write_ptr));
585 585
586 /* and copy the data that needs to be copied */ 586 /* and copy the data that needs to be copied */
587 587 cmd_pos = offsetof(struct iwl_device_cmd, payload);
588 cmd_dest = out_cmd->payload;
589 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 588 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
590 if (!cmd->len[i]) 589 if (!cmd->len[i])
591 continue; 590 continue;
592 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) 591 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
593 break; 592 break;
594 memcpy(cmd_dest, cmd->data[i], cmd->len[i]); 593 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
595 cmd_dest += cmd->len[i]; 594 cmd_pos += cmd->len[i];
595 }
596
597 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
598
599 /*
600 * since out_cmd will be the source address of the FH, it will write
601 * the retry count there. So when the user needs to receivce the HCMD
602 * that corresponds to the response in the response handler, it needs
603 * to set CMD_WANT_HCMD.
604 */
605 if (cmd->flags & CMD_WANT_HCMD) {
606 txq->entries[idx].copy_cmd =
607 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
608 if (unlikely(!txq->entries[idx].copy_cmd)) {
609 idx = -ENOMEM;
610 goto out;
611 }
596 } 612 }
597 613
598 IWL_DEBUG_HC(trans, 614 IWL_DEBUG_HC(trans,
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index a03457292c88..7001856241e6 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
227 lbtf_deb_leave(LBTF_DEB_MAIN); 227 lbtf_deb_leave(LBTF_DEB_MAIN);
228} 228}
229 229
230static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 230static void lbtf_op_tx(struct ieee80211_hw *hw,
231 struct ieee80211_tx_control *control,
232 struct sk_buff *skb)
231{ 233{
232 struct lbtf_private *priv = hw->priv; 234 struct lbtf_private *priv = hw->priv;
233 235
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 00838395778c..72b0456e41bf 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -709,7 +709,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
709 return ack; 709 return ack;
710} 710}
711 711
712static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 712static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
713 struct ieee80211_tx_control *control,
714 struct sk_buff *skb)
713{ 715{
714 bool ack; 716 bool ack;
715 struct ieee80211_tx_info *txi; 717 struct ieee80211_tx_info *txi;
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1727#endif 1729#endif
1728 BIT(NL80211_IFTYPE_AP) | 1730 BIT(NL80211_IFTYPE_AP) |
1729 BIT(NL80211_IFTYPE_P2P_GO) }, 1731 BIT(NL80211_IFTYPE_P2P_GO) },
1732 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
1730}; 1733};
1731 1734
1732static const struct ieee80211_iface_combination hwsim_if_comb = { 1735static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
1813 BIT(NL80211_IFTYPE_P2P_CLIENT) | 1816 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1814 BIT(NL80211_IFTYPE_P2P_GO) | 1817 BIT(NL80211_IFTYPE_P2P_GO) |
1815 BIT(NL80211_IFTYPE_ADHOC) | 1818 BIT(NL80211_IFTYPE_ADHOC) |
1816 BIT(NL80211_IFTYPE_MESH_POINT); 1819 BIT(NL80211_IFTYPE_MESH_POINT) |
1820 BIT(NL80211_IFTYPE_P2P_DEVICE);
1817 1821
1818 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1822 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1819 IEEE80211_HW_SIGNAL_DBM | 1823 IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224e03ade145..5099e5375cb3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
1830} 1830}
1831 1831
1832static void 1832static void
1833mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1833mwl8k_txq_xmit(struct ieee80211_hw *hw,
1834 int index,
1835 struct ieee80211_sta *sta,
1836 struct sk_buff *skb)
1834{ 1837{
1835 struct mwl8k_priv *priv = hw->priv; 1838 struct mwl8k_priv *priv = hw->priv;
1836 struct ieee80211_tx_info *tx_info; 1839 struct ieee80211_tx_info *tx_info;
1837 struct mwl8k_vif *mwl8k_vif; 1840 struct mwl8k_vif *mwl8k_vif;
1838 struct ieee80211_sta *sta;
1839 struct ieee80211_hdr *wh; 1841 struct ieee80211_hdr *wh;
1840 struct mwl8k_tx_queue *txq; 1842 struct mwl8k_tx_queue *txq;
1841 struct mwl8k_tx_desc *tx; 1843 struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1867 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1869 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1868 1870
1869 tx_info = IEEE80211_SKB_CB(skb); 1871 tx_info = IEEE80211_SKB_CB(skb);
1870 sta = tx_info->control.sta;
1871 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1872 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1872 1873
1873 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1874 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
2019 tx->pkt_phys_addr = cpu_to_le32(dma); 2020 tx->pkt_phys_addr = cpu_to_le32(dma);
2020 tx->pkt_len = cpu_to_le16(skb->len); 2021 tx->pkt_len = cpu_to_le16(skb->len);
2021 tx->rate_info = 0; 2022 tx->rate_info = 0;
2022 if (!priv->ap_fw && tx_info->control.sta != NULL) 2023 if (!priv->ap_fw && sta != NULL)
2023 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; 2024 tx->peer_id = MWL8K_STA(sta)->peer_id;
2024 else 2025 else
2025 tx->peer_id = 0; 2026 tx->peer_id = 0;
2026 2027
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
4364/* 4365/*
4365 * Core driver operations. 4366 * Core driver operations.
4366 */ 4367 */
4367static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 4368static void mwl8k_tx(struct ieee80211_hw *hw,
4369 struct ieee80211_tx_control *control,
4370 struct sk_buff *skb)
4368{ 4371{
4369 struct mwl8k_priv *priv = hw->priv; 4372 struct mwl8k_priv *priv = hw->priv;
4370 int index = skb_get_queue_mapping(skb); 4373 int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
4376 return; 4379 return;
4377 } 4380 }
4378 4381
4379 mwl8k_txq_xmit(hw, index, skb); 4382 mwl8k_txq_xmit(hw, index, control->sta, skb);
4380} 4383}
4381 4384
4382static int mwl8k_start(struct ieee80211_hw *hw) 4385static int mwl8k_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 3d8d622bec55..de1d46bf97df 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
526void p54_unregister_leds(struct p54_common *priv); 526void p54_unregister_leds(struct p54_common *priv);
527 527
528/* xmit functions */ 528/* xmit functions */
529void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); 529void p54_tx_80211(struct ieee80211_hw *dev,
530 struct ieee80211_tx_control *control,
531 struct sk_buff *skb);
530int p54_tx_cancel(struct p54_common *priv, __le32 req_id); 532int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
531void p54_tx(struct p54_common *priv, struct sk_buff *skb); 533void p54_tx(struct p54_common *priv, struct sk_buff *skb);
532 534
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7cffea795ad2..5e91ad06dd5d 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -158,7 +158,7 @@ static int p54_beacon_update(struct p54_common *priv,
158 * to cancel the old beacon template by hand, instead the firmware 158 * to cancel the old beacon template by hand, instead the firmware
159 * will release the previous one through the feedback mechanism. 159 * will release the previous one through the feedback mechanism.
160 */ 160 */
161 p54_tx_80211(priv->hw, beacon); 161 p54_tx_80211(priv->hw, NULL, beacon);
162 priv->tsf_high32 = 0; 162 priv->tsf_high32 = 0;
163 priv->tsf_low32 = 0; 163 priv->tsf_low32 = 0;
164 164
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f38786e02623..5861e13a6fd8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
676EXPORT_SYMBOL_GPL(p54_rx); 676EXPORT_SYMBOL_GPL(p54_rx);
677 677
678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, 678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
679 struct ieee80211_tx_info *info, u8 *queue, 679 struct ieee80211_tx_info *info,
680 u32 *extra_len, u16 *flags, u16 *aid, 680 struct ieee80211_sta *sta,
681 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
681 bool *burst_possible) 682 bool *burst_possible)
682{ 683{
683 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
746 } 747 }
747 } 748 }
748 749
749 if (info->control.sta) 750 if (sta)
750 *aid = info->control.sta->aid; 751 *aid = sta->aid;
751 break; 752 break;
752 } 753 }
753} 754}
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
767 } 768 }
768} 769}
769 770
770void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) 771void p54_tx_80211(struct ieee80211_hw *dev,
772 struct ieee80211_tx_control *control,
773 struct sk_buff *skb)
771{ 774{
772 struct p54_common *priv = dev->priv; 775 struct p54_common *priv = dev->priv;
773 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
784 u8 nrates = 0, nremaining = 8; 787 u8 nrates = 0, nremaining = 8;
785 bool burst_allowed = false; 788 bool burst_allowed = false;
786 789
787 p54_tx_80211_header(priv, skb, info, &queue, &extra_len, 790 p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
788 &hdr_flags, &aid, &burst_allowed); 791 &hdr_flags, &aid, &burst_allowed);
789 792
790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 793 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8afb546c2b2d..f991e8bedc70 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1287,7 +1287,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
1287/* 1287/*
1288 * mac80211 handlers. 1288 * mac80211 handlers.
1289 */ 1289 */
1290void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 1290void rt2x00mac_tx(struct ieee80211_hw *hw,
1291 struct ieee80211_tx_control *control,
1292 struct sk_buff *skb);
1291int rt2x00mac_start(struct ieee80211_hw *hw); 1293int rt2x00mac_start(struct ieee80211_hw *hw);
1292void rt2x00mac_stop(struct ieee80211_hw *hw); 1294void rt2x00mac_stop(struct ieee80211_hw *hw);
1293int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1295int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index a6b88bd4a1a5..a59048ffa092 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
194 */ 194 */
195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 195 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
196 while (skb) { 196 while (skb) {
197 rt2x00mac_tx(rt2x00dev->hw, skb); 197 rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); 198 skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
199 } 199 }
200} 200}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ff26c2159bf..c3d0f2f87b69 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
99 return retval; 99 return retval;
100} 100}
101 101
102void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 102void rt2x00mac_tx(struct ieee80211_hw *hw,
103 struct ieee80211_tx_control *control,
104 struct sk_buff *skb)
103{ 105{
104 struct rt2x00_dev *rt2x00dev = hw->priv; 106 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f7e74a0a7759..e488b944a034 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, 315static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
316 struct sk_buff *skb, 316 struct sk_buff *skb,
317 struct txentry_desc *txdesc, 317 struct txentry_desc *txdesc,
318 struct ieee80211_sta *sta,
318 const struct rt2x00_rate *hwrate) 319 const struct rt2x00_rate *hwrate)
319{ 320{
320 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 321 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
322 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 323 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
323 struct rt2x00_sta *sta_priv = NULL; 324 struct rt2x00_sta *sta_priv = NULL;
324 325
325 if (tx_info->control.sta) { 326 if (sta) {
326 txdesc->u.ht.mpdu_density = 327 txdesc->u.ht.mpdu_density =
327 tx_info->control.sta->ht_cap.ampdu_density; 328 sta->ht_cap.ampdu_density;
328 329
329 sta_priv = sta_to_rt2x00_sta(tx_info->control.sta); 330 sta_priv = sta_to_rt2x00_sta(sta);
330 txdesc->u.ht.wcid = sta_priv->wcid; 331 txdesc->u.ht.wcid = sta_priv->wcid;
331 } 332 }
332 333
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
341 * MIMO PS should be set to 1 for STA's using dynamic SM PS 342 * MIMO PS should be set to 1 for STA's using dynamic SM PS
342 * when using more then one tx stream (>MCS7). 343 * when using more then one tx stream (>MCS7).
343 */ 344 */
344 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && 345 if (sta && txdesc->u.ht.mcs > 7 &&
345 ((tx_info->control.sta->ht_cap.cap & 346 ((sta->ht_cap.cap &
346 IEEE80211_HT_CAP_SM_PS) >> 347 IEEE80211_HT_CAP_SM_PS) >>
347 IEEE80211_HT_CAP_SM_PS_SHIFT) == 348 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
348 WLAN_HT_CAP_SM_PS_DYNAMIC) 349 WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
409 410
410static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, 411static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
411 struct sk_buff *skb, 412 struct sk_buff *skb,
412 struct txentry_desc *txdesc) 413 struct txentry_desc *txdesc,
414 struct ieee80211_sta *sta)
413{ 415{
414 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
503 505
504 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 506 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
505 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 507 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
506 hwrate); 508 sta, hwrate);
507 else 509 else
508 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, 510 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
509 hwrate); 511 hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
595 * after that we are free to use the skb->cb array 597 * after that we are free to use the skb->cb array
596 * for our information. 598 * for our information.
597 */ 599 */
598 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc); 600 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
599 601
600 /* 602 /*
601 * All information is retrieved from the skb->cb array, 603 * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
740 * after that we are free to use the skb->cb array 742 * after that we are free to use the skb->cb array
741 * for our information. 743 * for our information.
742 */ 744 */
743 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc); 745 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
744 746
745 /* 747 /*
746 * Fill in skb descriptor 748 * Fill in skb descriptor
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index aceaf689f737..021d83e1b1d3 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
244 return IRQ_HANDLED; 244 return IRQ_HANDLED;
245} 245}
246 246
247static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 247static void rtl8180_tx(struct ieee80211_hw *dev,
248 struct ieee80211_tx_control *control,
249 struct sk_buff *skb)
248{ 250{
249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
250 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
710 /* TODO: use actual beacon queue */ 712 /* TODO: use actual beacon queue */
711 skb_set_queue_mapping(skb, 0); 713 skb_set_queue_mapping(skb, 0);
712 714
713 rtl8180_tx(dev, skb); 715 rtl8180_tx(dev, NULL, skb);
714 716
715resched: 717resched:
716 /* 718 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 533024095c43..7811b6315973 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
228 } 228 }
229} 229}
230 230
231static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 231static void rtl8187_tx(struct ieee80211_hw *dev,
232 struct ieee80211_tx_control *control,
233 struct sk_buff *skb)
232{ 234{
233 struct rtl8187_priv *priv = dev->priv; 235 struct rtl8187_priv *priv = dev->priv;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 236 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
1076 /* TODO: use actual beacon queue */ 1078 /* TODO: use actual beacon queue */
1077 skb_set_queue_mapping(skb, 0); 1079 skb_set_queue_mapping(skb, 0);
1078 1080
1079 rtl8187_tx(dev, skb); 1081 rtl8187_tx(dev, NULL, skb);
1080 1082
1081resched: 1083resched:
1082 /* 1084 /*
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 942e56b77b60..59381fe8ed06 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 1341 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
1342 1342
1343 info->control.rates[0].idx = 0; 1343 info->control.rates[0].idx = 0;
1344 info->control.sta = sta;
1345 info->band = hw->conf.channel->band; 1344 info->band = hw->conf.channel->band;
1346 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 1345 rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
1347 } 1346 }
1348err_free: 1347err_free:
1349 return 0; 1348 return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a18ad2a98938..a7c0e52869ba 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
124 mutex_unlock(&rtlpriv->locks.conf_mutex); 124 mutex_unlock(&rtlpriv->locks.conf_mutex);
125} 125}
126 126
127static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 127static void rtl_op_tx(struct ieee80211_hw *hw,
128 struct ieee80211_tx_control *control,
129 struct sk_buff *skb)
128{ 130{
129 struct rtl_priv *rtlpriv = rtl_priv(hw); 131 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 132 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
138 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) 140 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
139 goto err_free; 141 goto err_free;
140 142
141 if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) 143 if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
142 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 144 rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
143 145
144 return; 146 return;
145 147
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 80f75d3ba84a..aad9d44c0a51 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -504,7 +504,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
504 _rtl_update_earlymode_info(hw, skb, 504 _rtl_update_earlymode_info(hw, skb,
505 &tcb_desc, tid); 505 &tcb_desc, tid);
506 506
507 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); 507 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
508 } 508 }
509 } 509 }
510} 510}
@@ -929,7 +929,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
929 info = IEEE80211_SKB_CB(pskb); 929 info = IEEE80211_SKB_CB(pskb);
930 pdesc = &ring->desc[0]; 930 pdesc = &ring->desc[0];
931 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 931 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
932 info, pskb, BEACON_QUEUE, &tcb_desc); 932 info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
933 933
934 __skb_queue_tail(&ring->queue, pskb); 934 __skb_queue_tail(&ring->queue, pskb);
935 935
@@ -1305,11 +1305,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1305} 1305}
1306 1306
1307static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, 1307static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1308 struct ieee80211_sta *sta,
1308 struct sk_buff *skb) 1309 struct sk_buff *skb)
1309{ 1310{
1310 struct rtl_priv *rtlpriv = rtl_priv(hw); 1311 struct rtl_priv *rtlpriv = rtl_priv(hw);
1311 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1312 struct ieee80211_sta *sta = info->control.sta;
1313 struct rtl_sta_info *sta_entry = NULL; 1312 struct rtl_sta_info *sta_entry = NULL;
1314 u8 tid = rtl_get_tid(skb); 1313 u8 tid = rtl_get_tid(skb);
1315 1314
@@ -1337,13 +1336,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1337 return true; 1336 return true;
1338} 1337}
1339 1338
1340static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 1339static int rtl_pci_tx(struct ieee80211_hw *hw,
1341 struct rtl_tcb_desc *ptcb_desc) 1340 struct ieee80211_sta *sta,
1341 struct sk_buff *skb,
1342 struct rtl_tcb_desc *ptcb_desc)
1342{ 1343{
1343 struct rtl_priv *rtlpriv = rtl_priv(hw); 1344 struct rtl_priv *rtlpriv = rtl_priv(hw);
1344 struct rtl_sta_info *sta_entry = NULL; 1345 struct rtl_sta_info *sta_entry = NULL;
1345 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1346 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1346 struct ieee80211_sta *sta = info->control.sta;
1347 struct rtl8192_tx_ring *ring; 1347 struct rtl8192_tx_ring *ring;
1348 struct rtl_tx_desc *pdesc; 1348 struct rtl_tx_desc *pdesc;
1349 u8 idx; 1349 u8 idx;
@@ -1418,7 +1418,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1418 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 1418 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1419 1419
1420 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, 1420 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1421 info, skb, hw_queue, ptcb_desc); 1421 info, sta, skb, hw_queue, ptcb_desc);
1422 1422
1423 __skb_queue_tail(&ring->queue, skb); 1423 __skb_queue_tail(&ring->queue, skb);
1424 1424
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52166640f167..390d6d4fcaa0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
596 596
597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 597void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
598 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 598 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
599 struct ieee80211_tx_info *info, struct sk_buff *skb, 599 struct ieee80211_tx_info *info,
600 struct ieee80211_sta *sta,
601 struct sk_buff *skb,
600 u8 hw_queue, struct rtl_tcb_desc *tcb_desc) 602 u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
601{ 603{
602 struct rtl_priv *rtlpriv = rtl_priv(hw); 604 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
604 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 606 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
605 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 607 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
606 bool defaultadapter = true; 608 bool defaultadapter = true;
607 struct ieee80211_sta *sta;
608 u8 *pdesc = pdesc_tx; 609 u8 *pdesc = pdesc_tx;
609 u16 seq_number; 610 u16 seq_number;
610 __le16 fc = hdr->frame_control; 611 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c4adb9777365..a7cdd514cb2e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -713,6 +713,7 @@ struct rx_desc_92c {
713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 713void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
714 struct ieee80211_hdr *hdr, 714 struct ieee80211_hdr *hdr,
715 u8 *pdesc, struct ieee80211_tx_info *info, 715 u8 *pdesc, struct ieee80211_tx_info *info,
716 struct ieee80211_sta *sta,
716 struct sk_buff *skb, u8 hw_queue, 717 struct sk_buff *skb, u8 hw_queue,
717 struct rtl_tcb_desc *ptcb_desc); 718 struct rtl_tcb_desc *ptcb_desc);
718bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, 719bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 2e6eb356a93e..27863d773790 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -496,7 +496,9 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
496 496
497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
498 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 498 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
499 struct ieee80211_tx_info *info, struct sk_buff *skb, 499 struct ieee80211_tx_info *info,
500 struct ieee80211_sta *sta,
501 struct sk_buff *skb,
500 u8 queue_index, 502 u8 queue_index,
501 struct rtl_tcb_desc *tcb_desc) 503 struct rtl_tcb_desc *tcb_desc)
502{ 504{
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 506 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 507 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true; 508 bool defaultadapter = true;
507 struct ieee80211_sta *sta = info->control.sta = info->control.sta;
508 u8 *qc = ieee80211_get_qos_ctl(hdr); 509 u8 *qc = ieee80211_get_qos_ctl(hdr);
509 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
510 u16 seq_number; 511 u16 seq_number;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 332b06e78b00..725c53accc58 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
420 struct sk_buff_head *); 420 struct sk_buff_head *);
421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
422 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 422 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
423 struct ieee80211_tx_info *info, struct sk_buff *skb, 423 struct ieee80211_tx_info *info,
424 struct ieee80211_sta *sta,
425 struct sk_buff *skb,
424 u8 queue_index, 426 u8 queue_index,
425 struct rtl_tcb_desc *tcb_desc); 427 struct rtl_tcb_desc *tcb_desc);
426void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, 428void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f80690d82c11..4686f340b9d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
551 551
552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 552void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
553 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 553 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
554 struct ieee80211_tx_info *info, struct sk_buff *skb, 554 struct ieee80211_tx_info *info,
555 struct ieee80211_sta *sta,
556 struct sk_buff *skb,
555 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 557 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
556{ 558{
557 struct rtl_priv *rtlpriv = rtl_priv(hw); 559 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
559 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 561 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
560 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 562 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
561 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 563 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
562 struct ieee80211_sta *sta = info->control.sta;
563 u8 *pdesc = pdesc_tx; 564 u8 *pdesc = pdesc_tx;
564 u16 seq_number; 565 u16 seq_number;
565 __le16 fc = hdr->frame_control; 566 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 057a52431b00..c1b5dfb79d53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -730,6 +730,7 @@ struct rx_desc_92d {
730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, 730void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
731 struct ieee80211_hdr *hdr, 731 struct ieee80211_hdr *hdr,
732 u8 *pdesc, struct ieee80211_tx_info *info, 732 u8 *pdesc, struct ieee80211_tx_info *info,
733 struct ieee80211_sta *sta,
733 struct sk_buff *skb, u8 hw_queue, 734 struct sk_buff *skb, u8 hw_queue,
734 struct rtl_tcb_desc *ptcb_desc); 735 struct rtl_tcb_desc *ptcb_desc);
735bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, 736bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36d1cb3aef8a..28c53fb12aeb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
591 591
592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, 592void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
593 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 593 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
594 struct ieee80211_tx_info *info, struct sk_buff *skb, 594 struct ieee80211_tx_info *info,
595 struct ieee80211_sta *sta,
596 struct sk_buff *skb,
595 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) 597 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
596{ 598{
597 struct rtl_priv *rtlpriv = rtl_priv(hw); 599 struct rtl_priv *rtlpriv = rtl_priv(hw);
598 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 600 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
599 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 601 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 602 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
601 struct ieee80211_sta *sta = info->control.sta;
602 u8 *pdesc = pdesc_tx; 603 u8 *pdesc = pdesc_tx;
603 u16 seq_number; 604 u16 seq_number;
604 __le16 fc = hdr->frame_control; 605 __le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 011e7b0695f2..64dd66f287c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -31,6 +31,7 @@
31 31
32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, 32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
33 u8 *pdesc, struct ieee80211_tx_info *info, 33 u8 *pdesc, struct ieee80211_tx_info *info,
34 struct ieee80211_sta *sta,
34 struct sk_buff *skb, u8 hw_queue, 35 struct sk_buff *skb, u8 hw_queue,
35 struct rtl_tcb_desc *ptcb_desc); 36 struct rtl_tcb_desc *ptcb_desc);
36void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, 37void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index aa970fc18a21..914046903cfd 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
848 _rtl_submit_tx_urb(hw, _urb); 848 _rtl_submit_tx_urb(hw, _urb);
849} 849}
850 850
851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, 851static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
852 u16 hw_queue) 852 struct ieee80211_sta *sta,
853 struct sk_buff *skb,
854 u16 hw_queue)
853{ 855{
854 struct rtl_priv *rtlpriv = rtl_priv(hw); 856 struct rtl_priv *rtlpriv = rtl_priv(hw);
855 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 857 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
891 seq_number += 1; 893 seq_number += 1;
892 seq_number <<= 4; 894 seq_number <<= 4;
893 } 895 }
894 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, 896 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
895 hw_queue, &tcb_desc); 897 hw_queue, &tcb_desc);
896 if (!ieee80211_has_morefrags(hdr->frame_control)) { 898 if (!ieee80211_has_morefrags(hdr->frame_control)) {
897 if (qc) 899 if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
901 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 903 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
902} 904}
903 905
904static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 906static int rtl_usb_tx(struct ieee80211_hw *hw,
907 struct ieee80211_sta *sta,
908 struct sk_buff *skb,
905 struct rtl_tcb_desc *dummy) 909 struct rtl_tcb_desc *dummy)
906{ 910{
907 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); 911 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
913 if (unlikely(is_hal_stop(rtlhal))) 917 if (unlikely(is_hal_stop(rtlhal)))
914 goto err_free; 918 goto err_free;
915 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); 919 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
916 _rtl_usb_tx_preprocess(hw, skb, hw_queue); 920 _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
917 _rtl_usb_transmit(hw, skb, hw_queue); 921 _rtl_usb_transmit(hw, skb, hw_queue);
918 return NETDEV_TX_OK; 922 return NETDEV_TX_OK;
919 923
@@ -923,6 +927,7 @@ err_free:
923} 927}
924 928
925static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, 929static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
930 struct ieee80211_sta *sta,
926 struct sk_buff *skb) 931 struct sk_buff *skb)
927{ 932{
928 return false; 933 return false;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaa21f29710..40153e7bf702 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
122 EEPROM_BOOT_EFUSE, 122 EEPROM_BOOT_EFUSE,
123}; 123};
124 124
125enum rtl_status { 125enum ttl_status {
126 RTL_STATUS_INTERFACE_START = 0, 126 RTL_STATUS_INTERFACE_START = 0,
127}; 127};
128 128
@@ -1418,6 +1418,7 @@ struct rtl_hal_ops {
1418 void (*fill_tx_desc) (struct ieee80211_hw *hw, 1418 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1419 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1419 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1420 struct ieee80211_tx_info *info, 1420 struct ieee80211_tx_info *info,
1421 struct ieee80211_sta *sta,
1421 struct sk_buff *skb, u8 hw_queue, 1422 struct sk_buff *skb, u8 hw_queue,
1422 struct rtl_tcb_desc *ptcb_desc); 1423 struct rtl_tcb_desc *ptcb_desc);
1423 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc, 1424 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1475,11 +1476,15 @@ struct rtl_intf_ops {
1475 int (*adapter_start) (struct ieee80211_hw *hw); 1476 int (*adapter_start) (struct ieee80211_hw *hw);
1476 void (*adapter_stop) (struct ieee80211_hw *hw); 1477 void (*adapter_stop) (struct ieee80211_hw *hw);
1477 1478
1478 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb, 1479 int (*adapter_tx) (struct ieee80211_hw *hw,
1479 struct rtl_tcb_desc *ptcb_desc); 1480 struct ieee80211_sta *sta,
1481 struct sk_buff *skb,
1482 struct rtl_tcb_desc *ptcb_desc);
1480 void (*flush)(struct ieee80211_hw *hw, bool drop); 1483 void (*flush)(struct ieee80211_hw *hw, bool drop);
1481 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1484 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1482 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); 1485 bool (*waitq_insert) (struct ieee80211_hw *hw,
1486 struct ieee80211_sta *sta,
1487 struct sk_buff *skb);
1483 1488
1484 /*pci */ 1489 /*pci */
1485 void (*disable_aspm) (struct ieee80211_hw *hw); 1490 void (*disable_aspm) (struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3118c425bcf1..441cbccbd381 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -354,7 +354,9 @@ out:
354 return ret; 354 return ret;
355} 355}
356 356
357static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 357static void wl1251_op_tx(struct ieee80211_hw *hw,
358 struct ieee80211_tx_control *control,
359 struct sk_buff *skb)
358{ 360{
359 struct wl1251 *wl = hw->priv; 361 struct wl1251 *wl = hw->priv;
360 unsigned long flags; 362 unsigned long flags;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 72548609f711..ff830cf50c70 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1181,7 +1181,9 @@ out:
1181 return ret; 1181 return ret;
1182} 1182}
1183 1183
1184static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1184static void wl1271_op_tx(struct ieee80211_hw *hw,
1185 struct ieee80211_tx_control *control,
1186 struct sk_buff *skb)
1185{ 1187{
1186 struct wl1271 *wl = hw->priv; 1188 struct wl1271 *wl = hw->priv;
1187 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1189 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1199,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1197 mapping = skb_get_queue_mapping(skb); 1199 mapping = skb_get_queue_mapping(skb);
1198 q = wl1271_tx_get_queue(mapping); 1200 q = wl1271_tx_get_queue(mapping);
1199 1201
1200 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 1202 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1201 1203
1202 spin_lock_irqsave(&wl->wl_lock, flags); 1204 spin_lock_irqsave(&wl->wl_lock, flags);
1203 1205
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0081f746482..1a2f31c289c5 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
130} 130}
131EXPORT_SYMBOL(wl12xx_is_dummy_packet); 131EXPORT_SYMBOL(wl12xx_is_dummy_packet);
132 132
133u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 133static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 struct sk_buff *skb) 134 struct sk_buff *skb, struct ieee80211_sta *sta)
135{ 135{
136 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 136 if (sta) {
137
138 if (control->control.sta) {
139 struct wl1271_station *wl_sta; 137 struct wl1271_station *wl_sta;
140 138
141 wl_sta = (struct wl1271_station *) 139 wl_sta = (struct wl1271_station *)sta->drv_priv;
142 control->control.sta->drv_priv;
143 return wl_sta->hlid; 140 return wl_sta->hlid;
144 } else { 141 } else {
145 struct ieee80211_hdr *hdr; 142 struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156} 153}
157 154
158u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 155u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
159 struct sk_buff *skb) 156 struct sk_buff *skb, struct ieee80211_sta *sta)
160{ 157{
161 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
162 159
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
164 return wl->system_hlid; 161 return wl->system_hlid;
165 162
166 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 163 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
167 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); 164 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
168 165
169 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 166 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
170 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && 167 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -344,13 +341,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
344 341
345/* caller must hold wl->mutex */ 342/* caller must hold wl->mutex */
346static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 343static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
347 struct sk_buff *skb, u32 buf_offset) 344 struct sk_buff *skb, u32 buf_offset, u8 hlid)
348{ 345{
349 struct ieee80211_tx_info *info; 346 struct ieee80211_tx_info *info;
350 u32 extra = 0; 347 u32 extra = 0;
351 int ret = 0; 348 int ret = 0;
352 u32 total_len; 349 u32 total_len;
353 u8 hlid;
354 bool is_dummy; 350 bool is_dummy;
355 bool is_gem = false; 351 bool is_gem = false;
356 352
@@ -359,9 +355,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
359 return -EINVAL; 355 return -EINVAL;
360 } 356 }
361 357
358 if (hlid == WL12XX_INVALID_LINK_ID) {
359 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
360 return -EINVAL;
361 }
362
362 info = IEEE80211_SKB_CB(skb); 363 info = IEEE80211_SKB_CB(skb);
363 364
364 /* TODO: handle dummy packets on multi-vifs */
365 is_dummy = wl12xx_is_dummy_packet(wl, skb); 365 is_dummy = wl12xx_is_dummy_packet(wl, skb);
366 366
367 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 367 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +386,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
386 386
387 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 387 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
388 } 388 }
389 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
390 if (hlid == WL12XX_INVALID_LINK_ID) {
391 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
392 return -EINVAL;
393 }
394 389
395 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 390 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
396 is_gem); 391 is_gem);
@@ -517,7 +512,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
517} 512}
518 513
519static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, 514static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
520 struct wl12xx_vif *wlvif) 515 struct wl12xx_vif *wlvif,
516 u8 *hlid)
521{ 517{
522 struct sk_buff *skb = NULL; 518 struct sk_buff *skb = NULL;
523 int i, h, start_hlid; 519 int i, h, start_hlid;
@@ -544,10 +540,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
544 if (!skb) 540 if (!skb)
545 wlvif->last_tx_hlid = 0; 541 wlvif->last_tx_hlid = 0;
546 542
543 *hlid = wlvif->last_tx_hlid;
547 return skb; 544 return skb;
548} 545}
549 546
550static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 547static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
551{ 548{
552 unsigned long flags; 549 unsigned long flags;
553 struct wl12xx_vif *wlvif = wl->last_wlvif; 550 struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +553,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
556 /* continue from last wlvif (round robin) */ 553 /* continue from last wlvif (round robin) */
557 if (wlvif) { 554 if (wlvif) {
558 wl12xx_for_each_wlvif_continue(wl, wlvif) { 555 wl12xx_for_each_wlvif_continue(wl, wlvif) {
559 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 556 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
560 if (skb) { 557 if (skb) {
561 wl->last_wlvif = wlvif; 558 wl->last_wlvif = wlvif;
562 break; 559 break;
@@ -565,13 +562,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
565 } 562 }
566 563
567 /* dequeue from the system HLID before the restarting wlvif list */ 564 /* dequeue from the system HLID before the restarting wlvif list */
568 if (!skb) 565 if (!skb) {
569 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); 566 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
567 *hlid = wl->system_hlid;
568 }
570 569
571 /* do a new pass over the wlvif list */ 570 /* do a new pass over the wlvif list */
572 if (!skb) { 571 if (!skb) {
573 wl12xx_for_each_wlvif(wl, wlvif) { 572 wl12xx_for_each_wlvif(wl, wlvif) {
574 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 573 skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
575 if (skb) { 574 if (skb) {
576 wl->last_wlvif = wlvif; 575 wl->last_wlvif = wlvif;
577 break; 576 break;
@@ -591,6 +590,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
591 int q; 590 int q;
592 591
593 skb = wl->dummy_packet; 592 skb = wl->dummy_packet;
593 *hlid = wl->system_hlid;
594 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 594 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
595 spin_lock_irqsave(&wl->wl_lock, flags); 595 spin_lock_irqsave(&wl->wl_lock, flags);
596 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 596 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +602,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
602} 602}
603 603
604static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 604static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
605 struct sk_buff *skb) 605 struct sk_buff *skb, u8 hlid)
606{ 606{
607 unsigned long flags; 607 unsigned long flags;
608 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 608 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +610,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
610 if (wl12xx_is_dummy_packet(wl, skb)) { 610 if (wl12xx_is_dummy_packet(wl, skb)) {
611 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 611 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
612 } else { 612 } else {
613 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
614 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 613 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
615 614
616 /* make sure we dequeue the same packet next time */ 615 /* make sure we dequeue the same packet next time */
@@ -686,26 +685,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
686 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 685 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
687 int ret = 0; 686 int ret = 0;
688 int bus_ret = 0; 687 int bus_ret = 0;
688 u8 hlid;
689 689
690 if (unlikely(wl->state == WL1271_STATE_OFF)) 690 if (unlikely(wl->state == WL1271_STATE_OFF))
691 return 0; 691 return 0;
692 692
693 while ((skb = wl1271_skb_dequeue(wl))) { 693 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
694 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 694 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
695 bool has_data = false; 695 bool has_data = false;
696 696
697 wlvif = NULL; 697 wlvif = NULL;
698 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) 698 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
699 wlvif = wl12xx_vif_to_data(info->control.vif); 699 wlvif = wl12xx_vif_to_data(info->control.vif);
700 else
701 hlid = wl->system_hlid;
700 702
701 has_data = wlvif && wl1271_tx_is_data_present(skb); 703 has_data = wlvif && wl1271_tx_is_data_present(skb);
702 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); 704 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
705 hlid);
703 if (ret == -EAGAIN) { 706 if (ret == -EAGAIN) {
704 /* 707 /*
705 * Aggregation buffer is full. 708 * Aggregation buffer is full.
706 * Flush buffer and try again. 709 * Flush buffer and try again.
707 */ 710 */
708 wl1271_skb_queue_head(wl, wlvif, skb); 711 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
709 712
710 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 713 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
711 last_len); 714 last_len);
@@ -722,7 +725,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
722 * Firmware buffer is full. 725 * Firmware buffer is full.
723 * Queue back last skb, and stop aggregating. 726 * Queue back last skb, and stop aggregating.
724 */ 727 */
725 wl1271_skb_queue_head(wl, wlvif, skb); 728 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
726 /* No work left, avoid scheduling redundant tx work */ 729 /* No work left, avoid scheduling redundant tx work */
727 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 730 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
728 goto out_ack; 731 goto out_ack;
@@ -732,7 +735,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
732 * fw still expects dummy packet, 735 * fw still expects dummy packet,
733 * so re-enqueue it 736 * so re-enqueue it
734 */ 737 */
735 wl1271_skb_queue_head(wl, wlvif, skb); 738 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
736 else 739 else
737 ieee80211_free_txskb(wl->hw, skb); 740 ieee80211_free_txskb(wl->hw, skb);
738 goto out_ack; 741 goto out_ack;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 1e939b016155..349520d8b724 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 243u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
244 enum ieee80211_band rate_band); 244 enum ieee80211_band rate_band);
245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 245u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
246u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
247 struct sk_buff *skb);
248u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 246u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
249 struct sk_buff *skb); 247 struct sk_buff *skb, struct ieee80211_sta *sta);
250void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); 248void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
251void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 249void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
252bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 250bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c9e2660e1263..459880104758 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
937 * control block of the skbuff will be initialized. If necessary the incoming 937 * control block of the skbuff will be initialized. If necessary the incoming
938 * mac80211 queues will be stopped. 938 * mac80211 queues will be stopped.
939 */ 939 */
940static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 940static void zd_op_tx(struct ieee80211_hw *hw,
941 struct ieee80211_tx_control *control,
942 struct sk_buff *skb)
941{ 943{
942 struct zd_mac *mac = zd_hw_mac(hw); 944 struct zd_mac *mac = zd_hw_mac(hw);
943 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 945 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
1176 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif); 1178 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
1177 if (!skb) 1179 if (!skb)
1178 break; 1180 break;
1179 zd_op_tx(mac->hw, skb); 1181 zd_op_tx(mac->hw, NULL, skb);
1180 } 1182 }
1181 1183
1182 /* 1184 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 39afd37e62b3..c934fe8583f5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,8 +57,7 @@
57static const struct ethtool_ops xennet_ethtool_ops; 57static const struct ethtool_ops xennet_ethtool_ops;
58 58
59struct netfront_cb { 59struct netfront_cb {
60 struct page *page; 60 int pull_to;
61 unsigned offset;
62}; 61};
63 62
64#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 63#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
867 struct sk_buff *skb; 866 struct sk_buff *skb;
868 867
869 while ((skb = __skb_dequeue(rxq)) != NULL) { 868 while ((skb = __skb_dequeue(rxq)) != NULL) {
870 struct page *page = NETFRONT_SKB_CB(skb)->page; 869 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
871 void *vaddr = page_address(page);
872 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
873
874 memcpy(skb->data, vaddr + offset,
875 skb_headlen(skb));
876 870
877 if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) 871 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
878 __free_page(page);
879 872
880 /* Ethernet work: Delayed to here as it peeks the header. */ 873 /* Ethernet work: Delayed to here as it peeks the header. */
881 skb->protocol = eth_type_trans(skb, dev); 874 skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
913 struct sk_buff_head errq; 906 struct sk_buff_head errq;
914 struct sk_buff_head tmpq; 907 struct sk_buff_head tmpq;
915 unsigned long flags; 908 unsigned long flags;
916 unsigned int len;
917 int err; 909 int err;
918 910
919 spin_lock(&np->rx_lock); 911 spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
955 } 947 }
956 } 948 }
957 949
958 NETFRONT_SKB_CB(skb)->page = 950 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
959 skb_frag_page(&skb_shinfo(skb)->frags[0]); 951 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
960 NETFRONT_SKB_CB(skb)->offset = rx->offset; 952 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
961
962 len = rx->status;
963 if (len > RX_COPY_THRESHOLD)
964 len = RX_COPY_THRESHOLD;
965 skb_put(skb, len);
966 953
967 if (rx->status > len) { 954 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
968 skb_shinfo(skb)->frags[0].page_offset = 955 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
969 rx->offset + len; 956 skb->data_len = rx->status;
970 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
971 skb->data_len = rx->status - len;
972 } else {
973 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
974 skb_shinfo(skb)->nr_frags = 0;
975 }
976 957
977 i = xennet_fill_frags(np, skb, &tmpq); 958 i = xennet_fill_frags(np, skb, &tmpq);
978 959
@@ -999,7 +980,7 @@ err:
999 * receive throughout using the standard receive 980 * receive throughout using the standard receive
1000 * buffer size was cut by 25%(!!!). 981 * buffer size was cut by 25%(!!!).
1001 */ 982 */
1002 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 983 skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
1003 skb->len += skb->data_len; 984 skb->len += skb->data_len;
1004 985
1005 if (rx->flags & XEN_NETRXF_csum_blank) 986 if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 8fc3808d7a3e..90c5c7357a50 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -1,12 +1,31 @@
1menuconfig PWM 1menuconfig PWM
2 bool "PWM Support" 2 bool "Pulse-Width Modulation (PWM) Support"
3 depends on !MACH_JZ4740 && !PUV3_PWM 3 depends on !MACH_JZ4740 && !PUV3_PWM
4 help 4 help
5 This enables PWM support through the generic PWM framework. 5 Generic Pulse-Width Modulation (PWM) support.
6 You only need to enable this, if you also want to enable 6
7 one or more of the PWM drivers below. 7 In Pulse-Width Modulation, a variation of the width of pulses
8 8 in a rectangular pulse signal is used as a means to alter the
9 If unsure, say N. 9 average power of the signal. Applications include efficient
10 power delivery and voltage regulation. In computer systems,
11 PWMs are commonly used to control fans or the brightness of
12 display backlights.
13
14 This framework provides a generic interface to PWM devices
15 within the Linux kernel. On the driver side it provides an API
16 to register and unregister a PWM chip, an abstraction of a PWM
17 controller, that supports one or more PWM devices. Client
18 drivers can request PWM devices and use the generic framework
19 to configure as well as enable and disable them.
20
21 This generic framework replaces the legacy PWM framework which
22 allows only a single driver implementing the required API. Not
23 all legacy implementations have been ported to the framework
24 yet. The framework provides an API that is backward compatible
25 with the legacy framework so that existing client drivers
26 continue to work as expected.
27
28 If unsure, say no.
10 29
11if PWM 30if PWM
12 31
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ecb76909e946..c6e05078d3ad 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
129 return 0; 129 return 0;
130} 130}
131 131
132static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, 132static struct pwm_device *
133 const struct of_phandle_args *args) 133of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
134{ 134{
135 struct pwm_device *pwm; 135 struct pwm_device *pwm;
136 136
@@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc,
149 return pwm; 149 return pwm;
150} 150}
151 151
152void of_pwmchip_add(struct pwm_chip *chip) 152static void of_pwmchip_add(struct pwm_chip *chip)
153{ 153{
154 if (!chip->dev || !chip->dev->of_node) 154 if (!chip->dev || !chip->dev->of_node)
155 return; 155 return;
@@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip)
162 of_node_get(chip->dev->of_node); 162 of_node_get(chip->dev->of_node);
163} 163}
164 164
165void of_pwmchip_remove(struct pwm_chip *chip) 165static void of_pwmchip_remove(struct pwm_chip *chip)
166{ 166{
167 if (chip->dev && chip->dev->of_node) 167 if (chip->dev && chip->dev->of_node)
168 of_node_put(chip->dev->of_node); 168 of_node_put(chip->dev->of_node);
@@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num)
527struct pwm_device *pwm_get(struct device *dev, const char *con_id) 527struct pwm_device *pwm_get(struct device *dev, const char *con_id)
528{ 528{
529 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); 529 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
530 const char *dev_id = dev ? dev_name(dev): NULL; 530 const char *dev_id = dev ? dev_name(dev) : NULL;
531 struct pwm_chip *chip = NULL; 531 struct pwm_chip *chip = NULL;
532 unsigned int index = 0; 532 unsigned int index = 0;
533 unsigned int best = 0; 533 unsigned int best = 0;
@@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm)
609 mutex_lock(&pwm_lock); 609 mutex_lock(&pwm_lock);
610 610
611 if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { 611 if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
612 pr_warning("PWM device already freed\n"); 612 pr_warn("PWM device already freed\n");
613 goto out; 613 goto out;
614 } 614 }
615 615
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index d10386528c9c..e5187c0ade9f 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
225 225
226 /* calculate base of control bits in TCON */ 226 /* calculate base of control bits in TCON */
227 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; 227 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
228 s3c->chip.dev = &pdev->dev;
228 s3c->chip.ops = &s3c_pwm_ops; 229 s3c->chip.ops = &s3c_pwm_ops;
229 s3c->chip.base = -1; 230 s3c->chip.base = -1;
230 s3c->chip.npwm = 1; 231 s3c->chip.npwm = 1;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 02ce18d5e49a..057465e0553c 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev)
187 } 187 }
188 188
189 pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 189 pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
190 if (!pwm->mmio_base) { 190 if (!pwm->mmio_base)
191 dev_err(&pdev->dev, "failed to ioremap() region\n");
192 return -EADDRNOTAVAIL; 191 return -EADDRNOTAVAIL;
193 }
194 192
195 platform_set_drvdata(pdev, pwm); 193 platform_set_drvdata(pdev, pwm);
196 194
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 3c2ad284ee3e..0b66d0f25922 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev)
192 } 192 }
193 193
194 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 194 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
195 if (!pc->mmio_base) { 195 if (!pc->mmio_base)
196 dev_err(&pdev->dev, "failed to ioremap() registers\n");
197 return -EADDRNOTAVAIL; 196 return -EADDRNOTAVAIL;
198 }
199 197
200 ret = pwmchip_add(&pc->chip); 198 ret = pwmchip_add(&pc->chip);
201 if (ret < 0) { 199 if (ret < 0) {
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 010d232cb0c8..c3756d1be194 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)
371 } 371 }
372 372
373 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); 373 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
374 if (!pc->mmio_base) { 374 if (!pc->mmio_base)
375 dev_err(&pdev->dev, "failed to ioremap() registers\n");
376 return -EADDRNOTAVAIL; 375 return -EADDRNOTAVAIL;
377 }
378 376
379 ret = pwmchip_add(&pc->chip); 377 ret = pwmchip_add(&pc->chip);
380 if (ret < 0) { 378 if (ret < 0) {
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 548021439f0c..ad14389b7144 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
41 cpu_relax(); 41 cpu_relax();
42 42
43 if (unlikely(!loops)) 43 if (unlikely(!loops))
44 pr_warning("Waiting for status bits 0x%x to clear timed out\n", 44 pr_warn("Waiting for status bits 0x%x to clear timed out\n",
45 bitmask); 45 bitmask);
46} 46}
47 47
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 0ca857ac473e..48aa1361903e 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
119 *total_flags = new_flags; 119 *total_flags = new_flags;
120} 120}
121 121
122static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 122static void wbsoft_tx(struct ieee80211_hw *dev,
123 struct ieee80211_tx_control *control,
124 struct sk_buff *skb)
123{ 125{
124 struct wbsoft_priv *priv = dev->priv; 126 struct wbsoft_priv *priv = dev->priv;
125 127
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 6e32ff6f2fa0..5552fa7426bc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
673 struct scsi_device *sd = pdv->pdv_sd; 673 struct scsi_device *sd = pdv->pdv_sd;
674 int result; 674 int result;
675 struct pscsi_plugin_task *pt = cmd->priv; 675 struct pscsi_plugin_task *pt = cmd->priv;
676 unsigned char *cdb = &pt->pscsi_cdb[0]; 676 unsigned char *cdb;
677 /*
678 * Special case for REPORT_LUNs handling where pscsi_plugin_task has
679 * not been allocated because TCM is handling the emulation directly.
680 */
681 if (!pt)
682 return 0;
677 683
684 cdb = &pt->pscsi_cdb[0];
678 result = pt->pscsi_result; 685 result = pt->pscsi_result;
679 /* 686 /*
680 * Hack to make sure that Write-Protect modepage is set if R/O mode is 687 * Hack to make sure that Write-Protect modepage is set if R/O mode is
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0eaae23d12b5..4de3186dc44e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1166 cmd->data_length, size, cmd->t_task_cdb[0]);
1167 1167
1168 cmd->cmd_spdtl = size;
1169
1170 if (cmd->data_direction == DMA_TO_DEVICE) { 1168 if (cmd->data_direction == DMA_TO_DEVICE) {
1171 pr_err("Rejecting underflow/overflow" 1169 pr_err("Rejecting underflow/overflow"
1172 " WRITE data\n"); 1170 " WRITE data\n");
@@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
2294 return 0; 2292 return 0;
2295 2293
2296out: 2294out:
2297 while (i >= 0) { 2295 while (i > 0) {
2298 __free_page(sg_page(&cmd->t_data_sg[i]));
2299 i--; 2296 i--;
2297 __free_page(sg_page(&cmd->t_data_sg[i]));
2300 } 2298 }
2301 kfree(cmd->t_data_sg); 2299 kfree(cmd->t_data_sg);
2302 cmd->t_data_sg = NULL; 2300 cmd->t_data_sg = NULL;
@@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
2323 if (ret < 0) 2321 if (ret < 0)
2324 goto out_fail; 2322 goto out_fail;
2325 } 2323 }
2326 2324 /*
2327 /* Workaround for handling zero-length control CDBs */ 2325 * If this command doesn't have any payload and we don't have to call
2328 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { 2326 * into the fabric for data transfers, go ahead and complete it right
2327 * away.
2328 */
2329 if (!cmd->data_length) {
2329 spin_lock_irq(&cmd->t_state_lock); 2330 spin_lock_irq(&cmd->t_state_lock);
2330 cmd->t_state = TRANSPORT_COMPLETE; 2331 cmd->t_state = TRANSPORT_COMPLETE;
2331 cmd->transport_state |= CMD_T_ACTIVE; 2332 cmd->transport_state |= CMD_T_ACTIVE;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c5eb3c33c3db..eea69358ced3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
131extern struct mutex ft_lport_lock; 131extern struct mutex ft_lport_lock;
132extern struct fc4_prov ft_prov; 132extern struct fc4_prov ft_prov;
133extern struct target_fabric_configfs *ft_configfs; 133extern struct target_fabric_configfs *ft_configfs;
134extern unsigned int ft_debug_logging;
134 135
135/* 136/*
136 * Fabric methods. 137 * Fabric methods.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index b9cb5006177e..823e6922249d 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -48,7 +48,7 @@
48/* 48/*
49 * Dump cmd state for debugging. 49 * Dump cmd state for debugging.
50 */ 50 */
51void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 51static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
52{ 52{
53 struct fc_exch *ep; 53 struct fc_exch *ep;
54 struct fc_seq *sp; 54 struct fc_seq *sp;
@@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
80 } 80 }
81} 81}
82 82
83void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
84{
85 if (unlikely(ft_debug_logging))
86 _ft_dump_cmd(cmd, caller);
87}
88
83static void ft_free_cmd(struct ft_cmd *cmd) 89static void ft_free_cmd(struct ft_cmd *cmd)
84{ 90{
85 struct fc_frame *fp; 91 struct fc_frame *fp;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 87901fa74dd7..3c9e5b57caab 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
456 struct ft_tport *tport; 456 struct ft_tport *tport;
457 457
458 mutex_lock(&ft_lport_lock); 458 mutex_lock(&ft_lport_lock);
459 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); 459 tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
460 lockdep_is_held(&ft_lport_lock));
461
460 if (!tport) { 462 if (!tport) {
461 mutex_unlock(&ft_lport_lock); 463 mutex_unlock(&ft_lport_lock);
462 return; 464 return;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 9591e2b509d7..17830c9c7cc6 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
264 return group; 264 return group;
265} 265}
266 266
267/* called with vfio.group_lock held */
267static void vfio_group_release(struct kref *kref) 268static void vfio_group_release(struct kref *kref)
268{ 269{
269 struct vfio_group *group = container_of(kref, struct vfio_group, kref); 270 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
@@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref)
287 288
288static void vfio_group_put(struct vfio_group *group) 289static void vfio_group_put(struct vfio_group *group)
289{ 290{
290 mutex_lock(&vfio.group_lock); 291 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
291 /*
292 * Release needs to unlock to unregister the notifier, so only
293 * unlock if not released.
294 */
295 if (!kref_put(&group->kref, vfio_group_release))
296 mutex_unlock(&vfio.group_lock);
297} 292}
298 293
299/* Assume group_lock or group reference is held */ 294/* Assume group_lock or group reference is held */
@@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref)
401 struct vfio_device, kref); 396 struct vfio_device, kref);
402 struct vfio_group *group = device->group; 397 struct vfio_group *group = device->group;
403 398
404 mutex_lock(&group->device_lock);
405 list_del(&device->group_next); 399 list_del(&device->group_next);
406 mutex_unlock(&group->device_lock); 400 mutex_unlock(&group->device_lock);
407 401
@@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref)
416/* Device reference always implies a group reference */ 410/* Device reference always implies a group reference */
417static void vfio_device_put(struct vfio_device *device) 411static void vfio_device_put(struct vfio_device *device)
418{ 412{
419 kref_put(&device->kref, vfio_device_release); 413 struct vfio_group *group = device->group;
420 vfio_group_put(device->group); 414 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
415 vfio_group_put(group);
421} 416}
422 417
423static void vfio_device_get(struct vfio_device *device) 418static void vfio_device_get(struct vfio_device *device)
@@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1116 */ 1111 */
1117 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1112 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1118 1113
1119 fd_install(ret, filep);
1120
1121 vfio_device_get(device); 1114 vfio_device_get(device);
1122 atomic_inc(&group->container_users); 1115 atomic_inc(&group->container_users);
1116
1117 fd_install(ret, filep);
1123 break; 1118 break;
1124 } 1119 }
1125 mutex_unlock(&group->device_lock); 1120 mutex_unlock(&group->device_lock);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index fb366540ed54..ed8e2e6c8df2 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -53,9 +53,14 @@
53#include "vhost.h" 53#include "vhost.h"
54#include "tcm_vhost.h" 54#include "tcm_vhost.h"
55 55
56enum {
57 VHOST_SCSI_VQ_CTL = 0,
58 VHOST_SCSI_VQ_EVT = 1,
59 VHOST_SCSI_VQ_IO = 2,
60};
61
56struct vhost_scsi { 62struct vhost_scsi {
57 atomic_t vhost_ref_cnt; 63 struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
58 struct tcm_vhost_tpg *vs_tpg;
59 struct vhost_dev dev; 64 struct vhost_dev dev;
60 struct vhost_virtqueue vqs[3]; 65 struct vhost_virtqueue vqs[3];
61 66
@@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
131 return 1; 136 return 1;
132} 137}
133 138
134static u32 tcm_vhost_get_pr_transport_id( 139static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
135 struct se_portal_group *se_tpg,
136 struct se_node_acl *se_nacl, 140 struct se_node_acl *se_nacl,
137 struct t10_pr_registration *pr_reg, 141 struct t10_pr_registration *pr_reg,
138 int *format_code, 142 int *format_code,
@@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(
162 format_code, buf); 166 format_code, buf);
163} 167}
164 168
165static u32 tcm_vhost_get_pr_transport_id_len( 169static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
166 struct se_portal_group *se_tpg,
167 struct se_node_acl *se_nacl, 170 struct se_node_acl *se_nacl,
168 struct t10_pr_registration *pr_reg, 171 struct t10_pr_registration *pr_reg,
169 int *format_code) 172 int *format_code)
@@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(
192 format_code); 195 format_code);
193} 196}
194 197
195static char *tcm_vhost_parse_pr_out_transport_id( 198static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
196 struct se_portal_group *se_tpg,
197 const char *buf, 199 const char *buf,
198 u32 *out_tid_len, 200 u32 *out_tid_len,
199 char **port_nexus_ptr) 201 char **port_nexus_ptr)
@@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
236 return &nacl->se_node_acl; 238 return &nacl->se_node_acl;
237} 239}
238 240
239static void tcm_vhost_release_fabric_acl( 241static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
240 struct se_portal_group *se_tpg,
241 struct se_node_acl *se_nacl) 242 struct se_node_acl *se_nacl)
242{ 243{
243 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 244 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
@@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
297 return 0; 298 return 0;
298} 299}
299 300
300static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); 301static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
302{
303 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
304
305 spin_lock_bh(&vs->vs_completion_lock);
306 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
307 spin_unlock_bh(&vs->vs_completion_lock);
308
309 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
310}
301 311
302static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 312static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
303{ 313{
@@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
381 vs_completion_work); 391 vs_completion_work);
382 struct tcm_vhost_cmd *tv_cmd; 392 struct tcm_vhost_cmd *tv_cmd;
383 393
384 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { 394 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
385 struct virtio_scsi_cmd_resp v_rsp; 395 struct virtio_scsi_cmd_resp v_rsp;
386 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 396 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
387 int ret; 397 int ret;
@@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
408 vhost_signal(&vs->dev, &vs->vqs[2]); 418 vhost_signal(&vs->dev, &vs->vqs[2]);
409} 419}
410 420
411static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
412{
413 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
414
415 pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
416
417 spin_lock_bh(&vs->vs_completion_lock);
418 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
419 spin_unlock_bh(&vs->vs_completion_lock);
420
421 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
422}
423
424static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( 421static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
425 struct tcm_vhost_tpg *tv_tpg, 422 struct tcm_vhost_tpg *tv_tpg,
426 struct virtio_scsi_cmd_req *v_req, 423 struct virtio_scsi_cmd_req *v_req,
@@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
533 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 530 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
534 if (!sg) 531 if (!sg)
535 return -ENOMEM; 532 return -ENOMEM;
536 pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, 533 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
537 sg, sgl_count, IS_ERR(sg)); 534 sg, sgl_count, !sg);
538 sg_init_table(sg, sgl_count); 535 sg_init_table(sg, sgl_count);
539 536
540 tv_cmd->tvc_sgl = sg; 537 tv_cmd->tvc_sgl = sg;
@@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
787 784
788static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 785static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
789{ 786{
790 pr_err("%s: The handling func for control queue.\n", __func__); 787 pr_debug("%s: The handling func for control queue.\n", __func__);
791} 788}
792 789
793static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 790static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
794{ 791{
795 pr_err("%s: The handling func for event queue.\n", __func__); 792 pr_debug("%s: The handling func for event queue.\n", __func__);
796} 793}
797 794
798static void vhost_scsi_handle_kick(struct vhost_work *work) 795static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(
825 return -EFAULT; 822 return -EFAULT;
826 } 823 }
827 } 824 }
828
829 if (vs->vs_tpg) {
830 mutex_unlock(&vs->dev.mutex);
831 return -EEXIST;
832 }
833 mutex_unlock(&vs->dev.mutex); 825 mutex_unlock(&vs->dev.mutex);
834 826
835 mutex_lock(&tcm_vhost_mutex); 827 mutex_lock(&tcm_vhost_mutex);
@@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(
839 mutex_unlock(&tv_tpg->tv_tpg_mutex); 831 mutex_unlock(&tv_tpg->tv_tpg_mutex);
840 continue; 832 continue;
841 } 833 }
842 if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { 834 if (tv_tpg->tv_tpg_vhost_count != 0) {
843 mutex_unlock(&tv_tpg->tv_tpg_mutex); 835 mutex_unlock(&tv_tpg->tv_tpg_mutex);
844 continue; 836 continue;
845 } 837 }
@@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(
847 839
848 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && 840 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
849 (tv_tpg->tport_tpgt == t->vhost_tpgt)) { 841 (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
850 atomic_inc(&tv_tpg->tv_tpg_vhost_count); 842 tv_tpg->tv_tpg_vhost_count++;
851 smp_mb__after_atomic_inc();
852 mutex_unlock(&tv_tpg->tv_tpg_mutex); 843 mutex_unlock(&tv_tpg->tv_tpg_mutex);
853 mutex_unlock(&tcm_vhost_mutex); 844 mutex_unlock(&tcm_vhost_mutex);
854 845
855 mutex_lock(&vs->dev.mutex); 846 mutex_lock(&vs->dev.mutex);
847 if (vs->vs_tpg) {
848 mutex_unlock(&vs->dev.mutex);
849 mutex_lock(&tv_tpg->tv_tpg_mutex);
850 tv_tpg->tv_tpg_vhost_count--;
851 mutex_unlock(&tv_tpg->tv_tpg_mutex);
852 return -EEXIST;
853 }
854
856 vs->vs_tpg = tv_tpg; 855 vs->vs_tpg = tv_tpg;
857 atomic_inc(&vs->vhost_ref_cnt);
858 smp_mb__after_atomic_inc(); 856 smp_mb__after_atomic_inc();
859 mutex_unlock(&vs->dev.mutex); 857 mutex_unlock(&vs->dev.mutex);
860 return 0; 858 return 0;
@@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(
871{ 869{
872 struct tcm_vhost_tport *tv_tport; 870 struct tcm_vhost_tport *tv_tport;
873 struct tcm_vhost_tpg *tv_tpg; 871 struct tcm_vhost_tpg *tv_tpg;
874 int index; 872 int index, ret;
875 873
876 mutex_lock(&vs->dev.mutex); 874 mutex_lock(&vs->dev.mutex);
877 /* Verify that ring has been setup correctly. */ 875 /* Verify that ring has been setup correctly. */
878 for (index = 0; index < vs->dev.nvqs; ++index) { 876 for (index = 0; index < vs->dev.nvqs; ++index) {
879 if (!vhost_vq_access_ok(&vs->vqs[index])) { 877 if (!vhost_vq_access_ok(&vs->vqs[index])) {
880 mutex_unlock(&vs->dev.mutex); 878 ret = -EFAULT;
881 return -EFAULT; 879 goto err;
882 } 880 }
883 } 881 }
884 882
885 if (!vs->vs_tpg) { 883 if (!vs->vs_tpg) {
886 mutex_unlock(&vs->dev.mutex); 884 ret = -ENODEV;
887 return -ENODEV; 885 goto err;
888 } 886 }
889 tv_tpg = vs->vs_tpg; 887 tv_tpg = vs->vs_tpg;
890 tv_tport = tv_tpg->tport; 888 tv_tport = tv_tpg->tport;
891 889
892 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || 890 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
893 (tv_tpg->tport_tpgt != t->vhost_tpgt)) { 891 (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
894 mutex_unlock(&vs->dev.mutex);
895 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" 892 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
896 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 893 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
897 tv_tport->tport_name, tv_tpg->tport_tpgt, 894 tv_tport->tport_name, tv_tpg->tport_tpgt,
898 t->vhost_wwpn, t->vhost_tpgt); 895 t->vhost_wwpn, t->vhost_tpgt);
899 return -EINVAL; 896 ret = -EINVAL;
897 goto err;
900 } 898 }
901 atomic_dec(&tv_tpg->tv_tpg_vhost_count); 899 tv_tpg->tv_tpg_vhost_count--;
902 vs->vs_tpg = NULL; 900 vs->vs_tpg = NULL;
903 mutex_unlock(&vs->dev.mutex); 901 mutex_unlock(&vs->dev.mutex);
904 902
905 return 0; 903 return 0;
904
905err:
906 mutex_unlock(&vs->dev.mutex);
907 return ret;
906} 908}
907 909
908static int vhost_scsi_open(struct inode *inode, struct file *f) 910static int vhost_scsi_open(struct inode *inode, struct file *f)
@@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
918 INIT_LIST_HEAD(&s->vs_completion_list); 920 INIT_LIST_HEAD(&s->vs_completion_list);
919 spin_lock_init(&s->vs_completion_lock); 921 spin_lock_init(&s->vs_completion_lock);
920 922
921 s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; 923 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
922 s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; 924 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
923 s->vqs[2].handle_kick = vhost_scsi_handle_kick; 925 s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
924 r = vhost_dev_init(&s->dev, s->vqs, 3); 926 r = vhost_dev_init(&s->dev, s->vqs, 3);
925 if (r < 0) { 927 if (r < 0) {
926 kfree(s); 928 kfree(s);
@@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
949 return 0; 951 return 0;
950} 952}
951 953
954static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
955{
956 vhost_poll_flush(&vs->dev.vqs[index].poll);
957}
958
959static void vhost_scsi_flush(struct vhost_scsi *vs)
960{
961 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
962 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
963 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
964}
965
952static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 966static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
953{ 967{
954 if (features & ~VHOST_FEATURES) 968 if (features & ~VHOST_FEATURES)
@@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
961 return -EFAULT; 975 return -EFAULT;
962 } 976 }
963 vs->dev.acked_features = features; 977 vs->dev.acked_features = features;
964 /* TODO possibly smp_wmb() and flush vqs */ 978 smp_wmb();
979 vhost_scsi_flush(vs);
965 mutex_unlock(&vs->dev.mutex); 980 mutex_unlock(&vs->dev.mutex);
966 return 0; 981 return 0;
967} 982}
@@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
974 void __user *argp = (void __user *)arg; 989 void __user *argp = (void __user *)arg;
975 u64 __user *featurep = argp; 990 u64 __user *featurep = argp;
976 u64 features; 991 u64 features;
977 int r; 992 int r, abi_version = VHOST_SCSI_ABI_VERSION;
978 993
979 switch (ioctl) { 994 switch (ioctl) {
980 case VHOST_SCSI_SET_ENDPOINT: 995 case VHOST_SCSI_SET_ENDPOINT:
981 if (copy_from_user(&backend, argp, sizeof backend)) 996 if (copy_from_user(&backend, argp, sizeof backend))
982 return -EFAULT; 997 return -EFAULT;
998 if (backend.reserved != 0)
999 return -EOPNOTSUPP;
983 1000
984 return vhost_scsi_set_endpoint(vs, &backend); 1001 return vhost_scsi_set_endpoint(vs, &backend);
985 case VHOST_SCSI_CLEAR_ENDPOINT: 1002 case VHOST_SCSI_CLEAR_ENDPOINT:
986 if (copy_from_user(&backend, argp, sizeof backend)) 1003 if (copy_from_user(&backend, argp, sizeof backend))
987 return -EFAULT; 1004 return -EFAULT;
1005 if (backend.reserved != 0)
1006 return -EOPNOTSUPP;
988 1007
989 return vhost_scsi_clear_endpoint(vs, &backend); 1008 return vhost_scsi_clear_endpoint(vs, &backend);
990 case VHOST_SCSI_GET_ABI_VERSION: 1009 case VHOST_SCSI_GET_ABI_VERSION:
991 if (copy_from_user(&backend, argp, sizeof backend)) 1010 if (copy_to_user(argp, &abi_version, sizeof abi_version))
992 return -EFAULT;
993
994 backend.abi_version = VHOST_SCSI_ABI_VERSION;
995
996 if (copy_to_user(argp, &backend, sizeof backend))
997 return -EFAULT; 1011 return -EFAULT;
998 return 0; 1012 return 0;
999 case VHOST_GET_FEATURES: 1013 case VHOST_GET_FEATURES:
@@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1013 } 1027 }
1014} 1028}
1015 1029
1030#ifdef CONFIG_COMPAT
1031static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1032 unsigned long arg)
1033{
1034 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1035}
1036#endif
1037
1016static const struct file_operations vhost_scsi_fops = { 1038static const struct file_operations vhost_scsi_fops = {
1017 .owner = THIS_MODULE, 1039 .owner = THIS_MODULE,
1018 .release = vhost_scsi_release, 1040 .release = vhost_scsi_release,
1019 .unlocked_ioctl = vhost_scsi_ioctl, 1041 .unlocked_ioctl = vhost_scsi_ioctl,
1020 /* TODO compat ioctl? */ 1042#ifdef CONFIG_COMPAT
1043 .compat_ioctl = vhost_scsi_compat_ioctl,
1044#endif
1021 .open = vhost_scsi_open, 1045 .open = vhost_scsi_open,
1022 .llseek = noop_llseek, 1046 .llseek = noop_llseek,
1023}; 1047};
@@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1054 return "Unknown"; 1078 return "Unknown";
1055} 1079}
1056 1080
1057static int tcm_vhost_port_link( 1081static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1058 struct se_portal_group *se_tpg,
1059 struct se_lun *lun) 1082 struct se_lun *lun)
1060{ 1083{
1061 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1084 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1062 struct tcm_vhost_tpg, se_tpg); 1085 struct tcm_vhost_tpg, se_tpg);
1063 1086
1064 atomic_inc(&tv_tpg->tv_tpg_port_count); 1087 mutex_lock(&tv_tpg->tv_tpg_mutex);
1065 smp_mb__after_atomic_inc(); 1088 tv_tpg->tv_tpg_port_count++;
1089 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1066 1090
1067 return 0; 1091 return 0;
1068} 1092}
1069 1093
1070static void tcm_vhost_port_unlink( 1094static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1071 struct se_portal_group *se_tpg,
1072 struct se_lun *se_lun) 1095 struct se_lun *se_lun)
1073{ 1096{
1074 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1097 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1075 struct tcm_vhost_tpg, se_tpg); 1098 struct tcm_vhost_tpg, se_tpg);
1076 1099
1077 atomic_dec(&tv_tpg->tv_tpg_port_count); 1100 mutex_lock(&tv_tpg->tv_tpg_mutex);
1078 smp_mb__after_atomic_dec(); 1101 tv_tpg->tv_tpg_port_count--;
1102 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1079} 1103}
1080 1104
1081static struct se_node_acl *tcm_vhost_make_nodeacl( 1105static struct se_node_acl *tcm_vhost_make_nodeacl(
@@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1122 kfree(nacl); 1146 kfree(nacl);
1123} 1147}
1124 1148
1125static int tcm_vhost_make_nexus( 1149static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1126 struct tcm_vhost_tpg *tv_tpg,
1127 const char *name) 1150 const char *name)
1128{ 1151{
1129 struct se_portal_group *se_tpg; 1152 struct se_portal_group *se_tpg;
@@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(
1168 return -ENOMEM; 1191 return -ENOMEM;
1169 } 1192 }
1170 /* 1193 /*
1171 * Now register the TCM vHost virtual I_T Nexus as active with the 1194 * Now register the TCM vhost virtual I_T Nexus as active with the
1172 * call to __transport_register_session() 1195 * call to __transport_register_session()
1173 */ 1196 */
1174 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1197 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
@@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(
1179 return 0; 1202 return 0;
1180} 1203}
1181 1204
1182static int tcm_vhost_drop_nexus( 1205static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1183 struct tcm_vhost_tpg *tpg)
1184{ 1206{
1185 struct se_session *se_sess; 1207 struct se_session *se_sess;
1186 struct tcm_vhost_nexus *tv_nexus; 1208 struct tcm_vhost_nexus *tv_nexus;
@@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(
1198 return -ENODEV; 1220 return -ENODEV;
1199 } 1221 }
1200 1222
1201 if (atomic_read(&tpg->tv_tpg_port_count)) { 1223 if (tpg->tv_tpg_port_count != 0) {
1202 mutex_unlock(&tpg->tv_tpg_mutex); 1224 mutex_unlock(&tpg->tv_tpg_mutex);
1203 pr_err("Unable to remove TCM_vHost I_T Nexus with" 1225 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1204 " active TPG port count: %d\n", 1226 " active TPG port count: %d\n",
1205 atomic_read(&tpg->tv_tpg_port_count)); 1227 tpg->tv_tpg_port_count);
1206 return -EPERM; 1228 return -EBUSY;
1207 } 1229 }
1208 1230
1209 if (atomic_read(&tpg->tv_tpg_vhost_count)) { 1231 if (tpg->tv_tpg_vhost_count != 0) {
1210 mutex_unlock(&tpg->tv_tpg_mutex); 1232 mutex_unlock(&tpg->tv_tpg_mutex);
1211 pr_err("Unable to remove TCM_vHost I_T Nexus with" 1233 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1212 " active TPG vhost count: %d\n", 1234 " active TPG vhost count: %d\n",
1213 atomic_read(&tpg->tv_tpg_vhost_count)); 1235 tpg->tv_tpg_vhost_count);
1214 return -EPERM; 1236 return -EBUSY;
1215 } 1237 }
1216 1238
1217 pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" 1239 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1218 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1240 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1219 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1241 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1220 /* 1242 /*
1221 * Release the SCSI I_T Nexus to the emulated vHost Target Port 1243 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1222 */ 1244 */
1223 transport_deregister_session(tv_nexus->tvn_se_sess); 1245 transport_deregister_session(tv_nexus->tvn_se_sess);
1224 tpg->tpg_nexus = NULL; 1246 tpg->tpg_nexus = NULL;
@@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(
1228 return 0; 1250 return 0;
1229} 1251}
1230 1252
1231static ssize_t tcm_vhost_tpg_show_nexus( 1253static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1232 struct se_portal_group *se_tpg,
1233 char *page) 1254 char *page)
1234{ 1255{
1235 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, 1256 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
@@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(
1250 return ret; 1271 return ret;
1251} 1272}
1252 1273
1253static ssize_t tcm_vhost_tpg_store_nexus( 1274static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1254 struct se_portal_group *se_tpg,
1255 const char *page, 1275 const char *page,
1256 size_t count) 1276 size_t count)
1257{ 1277{
@@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1336 NULL, 1356 NULL,
1337}; 1357};
1338 1358
1339static struct se_portal_group *tcm_vhost_make_tpg( 1359static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1340 struct se_wwn *wwn,
1341 struct config_group *group, 1360 struct config_group *group,
1342 const char *name) 1361 const char *name)
1343{ 1362{
@@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1385 list_del(&tpg->tv_tpg_list); 1404 list_del(&tpg->tv_tpg_list);
1386 mutex_unlock(&tcm_vhost_mutex); 1405 mutex_unlock(&tcm_vhost_mutex);
1387 /* 1406 /*
1388 * Release the virtual I_T Nexus for this vHost TPG 1407 * Release the virtual I_T Nexus for this vhost TPG
1389 */ 1408 */
1390 tcm_vhost_drop_nexus(tpg); 1409 tcm_vhost_drop_nexus(tpg);
1391 /* 1410 /*
@@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1395 kfree(tpg); 1414 kfree(tpg);
1396} 1415}
1397 1416
1398static struct se_wwn *tcm_vhost_make_tport( 1417static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1399 struct target_fabric_configfs *tf,
1400 struct config_group *group, 1418 struct config_group *group,
1401 const char *name) 1419 const char *name)
1402{ 1420{
@@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)
1592static int __init tcm_vhost_init(void) 1610static int __init tcm_vhost_init(void)
1593{ 1611{
1594 int ret = -ENOMEM; 1612 int ret = -ENOMEM;
1595 1613 /*
1614 * Use our own dedicated workqueue for submitting I/O into
1615 * target core to avoid contention within system_wq.
1616 */
1596 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 1617 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1597 if (!tcm_vhost_workqueue) 1618 if (!tcm_vhost_workqueue)
1598 goto out; 1619 goto out;
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index c983ed21e413..d9e93557d669 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -47,9 +47,9 @@ struct tcm_vhost_tpg {
47 /* Vhost port target portal group tag for TCM */ 47 /* Vhost port target portal group tag for TCM */
48 u16 tport_tpgt; 48 u16 tport_tpgt;
49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
50 atomic_t tv_tpg_port_count; 50 int tv_tpg_port_count;
51 /* Used for vhost_scsi device reference to tpg_nexus */ 51 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
52 atomic_t tv_tpg_vhost_count; 52 int tv_tpg_vhost_count;
53 /* list for tcm_vhost_list */ 53 /* list for tcm_vhost_list */
54 struct list_head tv_tpg_list; 54 struct list_head tv_tpg_list;
55 /* Used to protect access for tpg_nexus */ 55 /* Used to protect access for tpg_nexus */
@@ -91,11 +91,13 @@ struct tcm_vhost_tport {
91 91
92struct vhost_scsi_target { 92struct vhost_scsi_target {
93 int abi_version; 93 int abi_version;
94 unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; 94 char vhost_wwpn[TRANSPORT_IQN_LEN];
95 unsigned short vhost_tpgt; 95 unsigned short vhost_tpgt;
96 unsigned short reserved;
96}; 97};
97 98
98/* VHOST_SCSI specific defines */ 99/* VHOST_SCSI specific defines */
99#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 100#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
100#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 101#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
101#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) 102/* Changing this breaks userspace. */
103#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index f8a79fca4a22..88e92041d8f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -374,6 +374,9 @@ static void fb_flashcursor(struct work_struct *work)
374 int mode; 374 int mode;
375 int ret; 375 int ret;
376 376
377 /* FIXME: we should sort out the unbind locking instead */
378 /* instead we just fail to flash the cursor if we can't get
379 * the lock instead of blocking fbcon deinit */
377 ret = console_trylock(); 380 ret = console_trylock();
378 if (ret == 0) 381 if (ret == 0)
379 return; 382 return;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fb962efdacee..6d59006bfa27 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
201 int err = -ENOMEM; 201 int err = -ENOMEM;
202 202
203 dout("ceph_fs_debugfs_init\n"); 203 dout("ceph_fs_debugfs_init\n");
204 BUG_ON(!fsc->client->debugfs_dir);
204 fsc->debugfs_congestion_kb = 205 fsc->debugfs_congestion_kb =
205 debugfs_create_file("writeback_congestion_kb", 206 debugfs_create_file("writeback_congestion_kb",
206 0600, 207 0600,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9fff9f3b17e4..4b5762ef7c2b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
992 if (rinfo->head->is_dentry) { 992 if (rinfo->head->is_dentry) {
993 struct inode *dir = req->r_locked_dir; 993 struct inode *dir = req->r_locked_dir;
994 994
995 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 995 if (dir) {
996 session, req->r_request_started, -1, 996 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
997 &req->r_caps_reservation); 997 session, req->r_request_started, -1,
998 if (err < 0) 998 &req->r_caps_reservation);
999 return err; 999 if (err < 0)
1000 return err;
1001 } else {
1002 WARN_ON_ONCE(1);
1003 }
1000 } 1004 }
1001 1005
1002 /* 1006 /*
@@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1004 * will have trouble splicing in the virtual snapdir later 1008 * will have trouble splicing in the virtual snapdir later
1005 */ 1009 */
1006 if (rinfo->head->is_dentry && !req->r_aborted && 1010 if (rinfo->head->is_dentry && !req->r_aborted &&
1011 req->r_locked_dir &&
1007 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1012 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1008 fsc->mount_options->snapdir_name, 1013 fsc->mount_options->snapdir_name,
1009 req->r_dentry->d_name.len))) { 1014 req->r_dentry->d_name.len))) {
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8e3fb69fbe62..1396ceb46797 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
42 /* validate striping parameters */ 42 /* validate striping parameters */
43 if ((l->object_size & ~PAGE_MASK) || 43 if ((l->object_size & ~PAGE_MASK) ||
44 (l->stripe_unit & ~PAGE_MASK) || 44 (l->stripe_unit & ~PAGE_MASK) ||
45 ((unsigned)l->object_size % (unsigned)l->stripe_unit)) 45 (l->stripe_unit != 0 &&
46 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
46 return -EINVAL; 47 return -EINVAL;
47 48
48 /* make sure it's a valid data pool */ 49 /* make sure it's a valid data pool */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1c8b55670804..eedec84c1809 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
1654 error = PTR_ERR(file); 1654 error = PTR_ERR(file);
1655 goto out_free_fd; 1655 goto out_free_fd;
1656 } 1656 }
1657 fd_install(fd, file);
1658 ep->file = file; 1657 ep->file = file;
1658 fd_install(fd, file);
1659 return fd; 1659 return fd;
1660 1660
1661out_free_fd: 1661out_free_fd:
diff --git a/fs/namei.c b/fs/namei.c
index db76b866a097..a856e7f7b6e3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
352/** 352/**
353 * sb_permission - Check superblock-level permissions 353 * sb_permission - Check superblock-level permissions
354 * @sb: Superblock of inode to check permission on 354 * @sb: Superblock of inode to check permission on
355 * @inode: Inode to check permission on
355 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 356 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
356 * 357 *
357 * Separate out file-system wide checks from inode-specific permission checks. 358 * Separate out file-system wide checks from inode-specific permission checks.
@@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1;
656/** 657/**
657 * may_follow_link - Check symlink following for unsafe situations 658 * may_follow_link - Check symlink following for unsafe situations
658 * @link: The path of the symlink 659 * @link: The path of the symlink
660 * @nd: nameidata pathwalk data
659 * 661 *
660 * In the case of the sysctl_protected_symlinks sysctl being enabled, 662 * In the case of the sysctl_protected_symlinks sysctl being enabled,
661 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is 663 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
@@ -678,7 +680,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
678 680
679 /* Allowed if owner and follower match. */ 681 /* Allowed if owner and follower match. */
680 inode = link->dentry->d_inode; 682 inode = link->dentry->d_inode;
681 if (current_cred()->fsuid == inode->i_uid) 683 if (uid_eq(current_cred()->fsuid, inode->i_uid))
682 return 0; 684 return 0;
683 685
684 /* Allowed if parent directory not sticky and world-writable. */ 686 /* Allowed if parent directory not sticky and world-writable. */
@@ -687,7 +689,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
687 return 0; 689 return 0;
688 690
689 /* Allowed if parent directory and link owner match. */ 691 /* Allowed if parent directory and link owner match. */
690 if (parent->i_uid == inode->i_uid) 692 if (uid_eq(parent->i_uid, inode->i_uid))
691 return 0; 693 return 0;
692 694
693 path_put_conditional(link, nd); 695 path_put_conditional(link, nd);
@@ -757,7 +759,7 @@ static int may_linkat(struct path *link)
757 /* Source inode owner (or CAP_FOWNER) can hardlink all they like, 759 /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
758 * otherwise, it must be a safe source. 760 * otherwise, it must be a safe source.
759 */ 761 */
760 if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) || 762 if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
761 capable(CAP_FOWNER)) 763 capable(CAP_FOWNER))
762 return 0; 764 return 0;
763 765
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8bf3a3f6925a..b7db60897f91 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
12nfs-$(CONFIG_SYSCTL) += sysctl.o 12nfs-$(CONFIG_SYSCTL) += sysctl.o
13nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o 13nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
14 14
15obj-$(CONFIG_NFS_V2) += nfs2.o 15obj-$(CONFIG_NFS_V2) += nfsv2.o
16nfs2-y := nfs2super.o proc.o nfs2xdr.o 16nfsv2-y := nfs2super.o proc.o nfs2xdr.o
17 17
18obj-$(CONFIG_NFS_V3) += nfs3.o 18obj-$(CONFIG_NFS_V3) += nfsv3.o
19nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o 19nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
20nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 20nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
21 21
22obj-$(CONFIG_NFS_V4) += nfs4.o 22obj-$(CONFIG_NFS_V4) += nfsv4.o
23nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ 23nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
24 delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \ 24 delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
25 nfs4namespace.o nfs4getroot.o nfs4client.o 25 nfs4namespace.o nfs4getroot.o nfs4client.o
26nfs4-$(CONFIG_SYSCTL) += nfs4sysctl.o 26nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
27nfs4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o 27nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
28 28
29obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o 29obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
30nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o 30nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9fc0d9dfc91b..99694442b93f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version)
105 105
106 if (IS_ERR(nfs)) { 106 if (IS_ERR(nfs)) {
107 mutex_lock(&nfs_version_mutex); 107 mutex_lock(&nfs_version_mutex);
108 request_module("nfs%d", version); 108 request_module("nfsv%d", version);
109 nfs = find_nfs_version(version); 109 nfs = find_nfs_version(version);
110 mutex_unlock(&nfs_version_mutex); 110 mutex_unlock(&nfs_version_mutex);
111 } 111 }
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b701358c39c3..a850079467d8 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -61,6 +61,12 @@ struct idmap {
61 struct mutex idmap_mutex; 61 struct mutex idmap_mutex;
62}; 62};
63 63
64struct idmap_legacy_upcalldata {
65 struct rpc_pipe_msg pipe_msg;
66 struct idmap_msg idmap_msg;
67 struct idmap *idmap;
68};
69
64/** 70/**
65 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields 71 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
66 * @fattr: fully initialised struct nfs_fattr 72 * @fattr: fully initialised struct nfs_fattr
@@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
324 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, 330 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
325 name, namelen, type, data, 331 name, namelen, type, data,
326 data_size, idmap); 332 data_size, idmap);
333 idmap->idmap_key_cons = NULL;
327 mutex_unlock(&idmap->idmap_mutex); 334 mutex_unlock(&idmap->idmap_mutex);
328 } 335 }
329 return ret; 336 return ret;
@@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = {
380static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); 387static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
381static ssize_t idmap_pipe_downcall(struct file *, const char __user *, 388static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
382 size_t); 389 size_t);
390static void idmap_release_pipe(struct inode *);
383static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *); 391static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
384 392
385static const struct rpc_pipe_ops idmap_upcall_ops = { 393static const struct rpc_pipe_ops idmap_upcall_ops = {
386 .upcall = rpc_pipe_generic_upcall, 394 .upcall = rpc_pipe_generic_upcall,
387 .downcall = idmap_pipe_downcall, 395 .downcall = idmap_pipe_downcall,
396 .release_pipe = idmap_release_pipe,
388 .destroy_msg = idmap_pipe_destroy_msg, 397 .destroy_msg = idmap_pipe_destroy_msg,
389}; 398};
390 399
@@ -616,7 +625,8 @@ void nfs_idmap_quit(void)
616 nfs_idmap_quit_keyring(); 625 nfs_idmap_quit_keyring();
617} 626}
618 627
619static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im, 628static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
629 struct idmap_msg *im,
620 struct rpc_pipe_msg *msg) 630 struct rpc_pipe_msg *msg)
621{ 631{
622 substring_t substr; 632 substring_t substr;
@@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
659 const char *op, 669 const char *op,
660 void *aux) 670 void *aux)
661{ 671{
672 struct idmap_legacy_upcalldata *data;
662 struct rpc_pipe_msg *msg; 673 struct rpc_pipe_msg *msg;
663 struct idmap_msg *im; 674 struct idmap_msg *im;
664 struct idmap *idmap = (struct idmap *)aux; 675 struct idmap *idmap = (struct idmap *)aux;
@@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
666 int ret = -ENOMEM; 677 int ret = -ENOMEM;
667 678
668 /* msg and im are freed in idmap_pipe_destroy_msg */ 679 /* msg and im are freed in idmap_pipe_destroy_msg */
669 msg = kmalloc(sizeof(*msg), GFP_KERNEL); 680 data = kmalloc(sizeof(*data), GFP_KERNEL);
670 if (!msg) 681 if (!data)
671 goto out0;
672
673 im = kmalloc(sizeof(*im), GFP_KERNEL);
674 if (!im)
675 goto out1; 682 goto out1;
676 683
677 ret = nfs_idmap_prepare_message(key->description, im, msg); 684 msg = &data->pipe_msg;
685 im = &data->idmap_msg;
686 data->idmap = idmap;
687
688 ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
678 if (ret < 0) 689 if (ret < 0)
679 goto out2; 690 goto out2;
680 691
@@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
683 694
684 ret = rpc_queue_upcall(idmap->idmap_pipe, msg); 695 ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
685 if (ret < 0) 696 if (ret < 0)
686 goto out2; 697 goto out3;
687 698
688 return ret; 699 return ret;
689 700
701out3:
702 idmap->idmap_key_cons = NULL;
690out2: 703out2:
691 kfree(im); 704 kfree(data);
692out1: 705out1:
693 kfree(msg);
694out0:
695 complete_request_key(cons, ret); 706 complete_request_key(cons, ret);
696 return ret; 707 return ret;
697} 708}
@@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
749 } 760 }
750 761
751 if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { 762 if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
752 ret = mlen; 763 ret = -ENOKEY;
753 complete_request_key(cons, -ENOKEY); 764 goto out;
754 goto out_incomplete;
755 } 765 }
756 766
757 namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); 767 namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
@@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
768 778
769out: 779out:
770 complete_request_key(cons, ret); 780 complete_request_key(cons, ret);
771out_incomplete:
772 return ret; 781 return ret;
773} 782}
774 783
775static void 784static void
776idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) 785idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
777{ 786{
787 struct idmap_legacy_upcalldata *data = container_of(msg,
788 struct idmap_legacy_upcalldata,
789 pipe_msg);
790 struct idmap *idmap = data->idmap;
791 struct key_construction *cons;
792 if (msg->errno) {
793 cons = ACCESS_ONCE(idmap->idmap_key_cons);
794 idmap->idmap_key_cons = NULL;
795 complete_request_key(cons, msg->errno);
796 }
778 /* Free memory allocated in nfs_idmap_legacy_upcall() */ 797 /* Free memory allocated in nfs_idmap_legacy_upcall() */
779 kfree(msg->data); 798 kfree(data);
780 kfree(msg); 799}
800
801static void
802idmap_release_pipe(struct inode *inode)
803{
804 struct rpc_inode *rpci = RPC_I(inode);
805 struct idmap *idmap = (struct idmap *)rpci->private;
806 idmap->idmap_key_cons = NULL;
781} 807}
782 808
783int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) 809int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 0952c791df36..d6b3b5f2d779 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
69 nfs_fattr_init(info->fattr); 69 nfs_fattr_init(info->fattr);
70 status = rpc_call_sync(client, &msg, 0); 70 status = rpc_call_sync(client, &msg, 0);
71 dprintk("%s: reply fsinfo: %d\n", __func__, status); 71 dprintk("%s: reply fsinfo: %d\n", __func__, status);
72 if (!(info->fattr->valid & NFS_ATTR_FATTR)) { 72 if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
73 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; 73 msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
74 msg.rpc_resp = info->fattr; 74 msg.rpc_resp = info->fattr;
75 status = rpc_call_sync(client, &msg, 0); 75 status = rpc_call_sync(client, &msg, 0);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3b950dd81e81..da0618aeeadb 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations;
205int nfs_atomic_open(struct inode *, struct dentry *, struct file *, 205int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
206 unsigned, umode_t, int *); 206 unsigned, umode_t, int *);
207 207
208/* super.c */
209extern struct file_system_type nfs4_fs_type;
210
208/* nfs4namespace.c */ 211/* nfs4namespace.c */
209rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *); 212rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
210struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); 213struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index cbcdfaf32505..24eb663f8ed5 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
74 return clp; 74 return clp;
75 75
76error: 76error:
77 kfree(clp); 77 nfs_free_client(clp);
78 return ERR_PTR(err); 78 return ERR_PTR(err);
79} 79}
80 80
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a99a8d948721..635274140b18 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3737,9 +3737,10 @@ out:
3737static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3737static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3738{ 3738{
3739 struct nfs4_cached_acl *acl; 3739 struct nfs4_cached_acl *acl;
3740 size_t buflen = sizeof(*acl) + acl_len;
3740 3741
3741 if (pages && acl_len <= PAGE_SIZE) { 3742 if (pages && buflen <= PAGE_SIZE) {
3742 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); 3743 acl = kmalloc(buflen, GFP_KERNEL);
3743 if (acl == NULL) 3744 if (acl == NULL)
3744 goto out; 3745 goto out;
3745 acl->cached = 1; 3746 acl->cached = 1;
@@ -3819,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3819 if (ret) 3820 if (ret)
3820 goto out_free; 3821 goto out_free;
3821 3822
3822 acl_len = res.acl_len - res.acl_data_offset; 3823 acl_len = res.acl_len;
3823 if (acl_len > args.acl_len) 3824 if (acl_len > args.acl_len)
3824 nfs4_write_cached_acl(inode, NULL, 0, acl_len); 3825 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3825 else 3826 else
@@ -6223,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6223 dprintk("<-- %s\n", __func__); 6224 dprintk("<-- %s\n", __func__);
6224} 6225}
6225 6226
6227static size_t max_response_pages(struct nfs_server *server)
6228{
6229 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6230 return nfs_page_array_len(0, max_resp_sz);
6231}
6232
6233static void nfs4_free_pages(struct page **pages, size_t size)
6234{
6235 int i;
6236
6237 if (!pages)
6238 return;
6239
6240 for (i = 0; i < size; i++) {
6241 if (!pages[i])
6242 break;
6243 __free_page(pages[i]);
6244 }
6245 kfree(pages);
6246}
6247
6248static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6249{
6250 struct page **pages;
6251 int i;
6252
6253 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6254 if (!pages) {
6255 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6256 return NULL;
6257 }
6258
6259 for (i = 0; i < size; i++) {
6260 pages[i] = alloc_page(gfp_flags);
6261 if (!pages[i]) {
6262 dprintk("%s: failed to allocate page\n", __func__);
6263 nfs4_free_pages(pages, size);
6264 return NULL;
6265 }
6266 }
6267
6268 return pages;
6269}
6270
6226static void nfs4_layoutget_release(void *calldata) 6271static void nfs4_layoutget_release(void *calldata)
6227{ 6272{
6228 struct nfs4_layoutget *lgp = calldata; 6273 struct nfs4_layoutget *lgp = calldata;
6274 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6275 size_t max_pages = max_response_pages(server);
6229 6276
6230 dprintk("--> %s\n", __func__); 6277 dprintk("--> %s\n", __func__);
6278 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6231 put_nfs_open_context(lgp->args.ctx); 6279 put_nfs_open_context(lgp->args.ctx);
6232 kfree(calldata); 6280 kfree(calldata);
6233 dprintk("<-- %s\n", __func__); 6281 dprintk("<-- %s\n", __func__);
@@ -6239,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6239 .rpc_release = nfs4_layoutget_release, 6287 .rpc_release = nfs4_layoutget_release,
6240}; 6288};
6241 6289
6242int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) 6290void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6243{ 6291{
6244 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6292 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6293 size_t max_pages = max_response_pages(server);
6245 struct rpc_task *task; 6294 struct rpc_task *task;
6246 struct rpc_message msg = { 6295 struct rpc_message msg = {
6247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6296 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -6259,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6259 6308
6260 dprintk("--> %s\n", __func__); 6309 dprintk("--> %s\n", __func__);
6261 6310
6311 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6312 if (!lgp->args.layout.pages) {
6313 nfs4_layoutget_release(lgp);
6314 return;
6315 }
6316 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6317
6262 lgp->res.layoutp = &lgp->args.layout; 6318 lgp->res.layoutp = &lgp->args.layout;
6263 lgp->res.seq_res.sr_slot = NULL; 6319 lgp->res.seq_res.sr_slot = NULL;
6264 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6320 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6265 task = rpc_run_task(&task_setup_data); 6321 task = rpc_run_task(&task_setup_data);
6266 if (IS_ERR(task)) 6322 if (IS_ERR(task))
6267 return PTR_ERR(task); 6323 return;
6268 status = nfs4_wait_for_completion_rpc_task(task); 6324 status = nfs4_wait_for_completion_rpc_task(task);
6269 if (status == 0) 6325 if (status == 0)
6270 status = task->tk_status; 6326 status = task->tk_status;
@@ -6272,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6272 status = pnfs_layout_process(lgp); 6328 status = pnfs_layout_process(lgp);
6273 rpc_put_task(task); 6329 rpc_put_task(task);
6274 dprintk("<-- %s status=%d\n", __func__, status); 6330 dprintk("<-- %s status=%d\n", __func__, status);
6275 return status; 6331 return;
6276} 6332}
6277 6333
6278static void 6334static void
@@ -6304,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6304 return; 6360 return;
6305 } 6361 }
6306 spin_lock(&lo->plh_inode->i_lock); 6362 spin_lock(&lo->plh_inode->i_lock);
6307 if (task->tk_status == 0) { 6363 if (task->tk_status == 0 && lrp->res.lrs_present)
6308 if (lrp->res.lrs_present) { 6364 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6309 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6310 } else
6311 BUG_ON(!list_empty(&lo->plh_segs));
6312 }
6313 lo->plh_block_lgets--; 6365 lo->plh_block_lgets--;
6314 spin_unlock(&lo->plh_inode->i_lock); 6366 spin_unlock(&lo->plh_inode->i_lock);
6315 dprintk("<-- %s\n", __func__); 6367 dprintk("<-- %s\n", __func__);
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 12a31a9dbcdd..bd61221ad2c5 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
23static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type, 23static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
24 int flags, const char *dev_name, void *raw_data); 24 int flags, const char *dev_name, void *raw_data);
25 25
26static struct file_system_type nfs4_fs_type = {
27 .owner = THIS_MODULE,
28 .name = "nfs4",
29 .mount = nfs_fs_mount,
30 .kill_sb = nfs_kill_super,
31 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
32};
33
34static struct file_system_type nfs4_remote_fs_type = { 26static struct file_system_type nfs4_remote_fs_type = {
35 .owner = THIS_MODULE, 27 .owner = THIS_MODULE,
36 .name = "nfs4", 28 .name = "nfs4",
@@ -344,14 +336,8 @@ static int __init init_nfs_v4(void)
344 if (err) 336 if (err)
345 goto out1; 337 goto out1;
346 338
347 err = register_filesystem(&nfs4_fs_type);
348 if (err < 0)
349 goto out2;
350
351 register_nfs_version(&nfs_v4); 339 register_nfs_version(&nfs_v4);
352 return 0; 340 return 0;
353out2:
354 nfs4_unregister_sysctl();
355out1: 341out1:
356 nfs_idmap_quit(); 342 nfs_idmap_quit();
357out: 343out:
@@ -361,7 +347,6 @@ out:
361static void __exit exit_nfs_v4(void) 347static void __exit exit_nfs_v4(void)
362{ 348{
363 unregister_nfs_version(&nfs_v4); 349 unregister_nfs_version(&nfs_v4);
364 unregister_filesystem(&nfs4_fs_type);
365 nfs4_unregister_sysctl(); 350 nfs4_unregister_sysctl();
366 nfs_idmap_quit(); 351 nfs_idmap_quit();
367} 352}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index ca13483edd60..1bfbd67c556d 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
5045 struct nfs_getaclres *res) 5045 struct nfs_getaclres *res)
5046{ 5046{
5047 unsigned int savep; 5047 unsigned int savep;
5048 __be32 *bm_p;
5049 uint32_t attrlen, 5048 uint32_t attrlen,
5050 bitmap[3] = {0}; 5049 bitmap[3] = {0};
5051 int status; 5050 int status;
5052 size_t page_len = xdr->buf->page_len; 5051 unsigned int pg_offset;
5053 5052
5054 res->acl_len = 0; 5053 res->acl_len = 0;
5055 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) 5054 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
5056 goto out; 5055 goto out;
5057 5056
5058 bm_p = xdr->p; 5057 xdr_enter_page(xdr, xdr->buf->page_len);
5059 res->acl_data_offset = be32_to_cpup(bm_p) + 2; 5058
5060 res->acl_data_offset <<= 2; 5059 /* Calculate the offset of the page data */
5061 /* Check if the acl data starts beyond the allocated buffer */ 5060 pg_offset = xdr->buf->head[0].iov_len;
5062 if (res->acl_data_offset > page_len)
5063 return -ERANGE;
5064 5061
5065 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) 5062 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
5066 goto out; 5063 goto out;
@@ -5074,23 +5071,20 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
5074 /* The bitmap (xdr len + bitmaps) and the attr xdr len words 5071 /* The bitmap (xdr len + bitmaps) and the attr xdr len words
5075 * are stored with the acl data to handle the problem of 5072 * are stored with the acl data to handle the problem of
5076 * variable length bitmaps.*/ 5073 * variable length bitmaps.*/
5077 xdr->p = bm_p; 5074 res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
5078 5075
5079 /* We ignore &savep and don't do consistency checks on 5076 /* We ignore &savep and don't do consistency checks on
5080 * the attr length. Let userspace figure it out.... */ 5077 * the attr length. Let userspace figure it out.... */
5081 attrlen += res->acl_data_offset; 5078 res->acl_len = attrlen;
5082 if (attrlen > page_len) { 5079 if (attrlen > (xdr->nwords << 2)) {
5083 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { 5080 if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
5084 /* getxattr interface called with a NULL buf */ 5081 /* getxattr interface called with a NULL buf */
5085 res->acl_len = attrlen;
5086 goto out; 5082 goto out;
5087 } 5083 }
5088 dprintk("NFS: acl reply: attrlen %u > page_len %zu\n", 5084 dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
5089 attrlen, page_len); 5085 attrlen, xdr->nwords << 2);
5090 return -EINVAL; 5086 return -EINVAL;
5091 } 5087 }
5092 xdr_read_pages(xdr, attrlen);
5093 res->acl_len = attrlen;
5094 } else 5088 } else
5095 status = -EOPNOTSUPP; 5089 status = -EOPNOTSUPP;
5096 5090
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index f50d3e8d6f22..ea6d111b03e9 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
570 return false; 570 return false;
571 571
572 return pgio->pg_count + req->wb_bytes <= 572 return pgio->pg_count + req->wb_bytes <=
573 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; 573 (unsigned long)pgio->pg_layout_private;
574}
575
576void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
577{
578 pnfs_generic_pg_init_read(pgio, req);
579 if (unlikely(pgio->pg_lseg == NULL))
580 return; /* Not pNFS */
581
582 pgio->pg_layout_private = (void *)
583 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
584}
585
586static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
587 unsigned long *stripe_end)
588{
589 u32 stripe_off;
590 unsigned stripe_size;
591
592 if (layout->raid_algorithm == PNFS_OSD_RAID_0)
593 return true;
594
595 stripe_size = layout->stripe_unit *
596 (layout->group_width - layout->parity);
597
598 div_u64_rem(offset, stripe_size, &stripe_off);
599 if (!stripe_off)
600 return true;
601
602 *stripe_end = stripe_size - stripe_off;
603 return false;
604}
605
606void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
607{
608 unsigned long stripe_end = 0;
609
610 pnfs_generic_pg_init_write(pgio, req);
611 if (unlikely(pgio->pg_lseg == NULL))
612 return; /* Not pNFS */
613
614 if (req->wb_offset ||
615 !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
616 &OBJIO_LSEG(pgio->pg_lseg)->layout,
617 &stripe_end)) {
618 pgio->pg_layout_private = (void *)stripe_end;
619 } else {
620 pgio->pg_layout_private = (void *)
621 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
622 }
574} 623}
575 624
576static const struct nfs_pageio_ops objio_pg_read_ops = { 625static const struct nfs_pageio_ops objio_pg_read_ops = {
577 .pg_init = pnfs_generic_pg_init_read, 626 .pg_init = objio_init_read,
578 .pg_test = objio_pg_test, 627 .pg_test = objio_pg_test,
579 .pg_doio = pnfs_generic_pg_readpages, 628 .pg_doio = pnfs_generic_pg_readpages,
580}; 629};
581 630
582static const struct nfs_pageio_ops objio_pg_write_ops = { 631static const struct nfs_pageio_ops objio_pg_write_ops = {
583 .pg_init = pnfs_generic_pg_init_write, 632 .pg_init = objio_init_write,
584 .pg_test = objio_pg_test, 633 .pg_test = objio_pg_test,
585 .pg_doio = pnfs_generic_pg_writepages, 634 .pg_doio = pnfs_generic_pg_writepages,
586}; 635};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1a6732ed04a4..311a79681e2b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
49 hdr->io_start = req_offset(hdr->req); 49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count; 50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq; 51 hdr->dreq = desc->pg_dreq;
52 hdr->layout_private = desc->pg_layout_private;
52 hdr->release = release; 53 hdr->release = release;
53 hdr->completion_ops = desc->pg_completion_ops; 54 hdr->completion_ops = desc->pg_completion_ops;
54 if (hdr->completion_ops->init_hdr) 55 if (hdr->completion_ops->init_hdr)
@@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
268 desc->pg_error = 0; 269 desc->pg_error = 0;
269 desc->pg_lseg = NULL; 270 desc->pg_lseg = NULL;
270 desc->pg_dreq = NULL; 271 desc->pg_dreq = NULL;
272 desc->pg_layout_private = NULL;
271} 273}
272EXPORT_SYMBOL_GPL(nfs_pageio_init); 274EXPORT_SYMBOL_GPL(nfs_pageio_init);
273 275
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 76875bfcf19c..2e00feacd4be 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
583 struct nfs_server *server = NFS_SERVER(ino); 583 struct nfs_server *server = NFS_SERVER(ino);
584 struct nfs4_layoutget *lgp; 584 struct nfs4_layoutget *lgp;
585 struct pnfs_layout_segment *lseg = NULL; 585 struct pnfs_layout_segment *lseg = NULL;
586 struct page **pages = NULL;
587 int i;
588 u32 max_resp_sz, max_pages;
589 586
590 dprintk("--> %s\n", __func__); 587 dprintk("--> %s\n", __func__);
591 588
@@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
594 if (lgp == NULL) 591 if (lgp == NULL)
595 return NULL; 592 return NULL;
596 593
597 /* allocate pages for xdr post processing */
598 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
599 max_pages = nfs_page_array_len(0, max_resp_sz);
600
601 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
602 if (!pages)
603 goto out_err_free;
604
605 for (i = 0; i < max_pages; i++) {
606 pages[i] = alloc_page(gfp_flags);
607 if (!pages[i])
608 goto out_err_free;
609 }
610
611 lgp->args.minlength = PAGE_CACHE_SIZE; 594 lgp->args.minlength = PAGE_CACHE_SIZE;
612 if (lgp->args.minlength > range->length) 595 if (lgp->args.minlength > range->length)
613 lgp->args.minlength = range->length; 596 lgp->args.minlength = range->length;
@@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
616 lgp->args.type = server->pnfs_curr_ld->id; 599 lgp->args.type = server->pnfs_curr_ld->id;
617 lgp->args.inode = ino; 600 lgp->args.inode = ino;
618 lgp->args.ctx = get_nfs_open_context(ctx); 601 lgp->args.ctx = get_nfs_open_context(ctx);
619 lgp->args.layout.pages = pages;
620 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
621 lgp->lsegpp = &lseg; 602 lgp->lsegpp = &lseg;
622 lgp->gfp_flags = gfp_flags; 603 lgp->gfp_flags = gfp_flags;
623 604
624 /* Synchronously retrieve layout information from server and 605 /* Synchronously retrieve layout information from server and
625 * store in lseg. 606 * store in lseg.
626 */ 607 */
627 nfs4_proc_layoutget(lgp); 608 nfs4_proc_layoutget(lgp, gfp_flags);
628 if (!lseg) { 609 if (!lseg) {
629 /* remember that LAYOUTGET failed and suspend trying */ 610 /* remember that LAYOUTGET failed and suspend trying */
630 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); 611 set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
631 } 612 }
632 613
633 /* free xdr pages */
634 for (i = 0; i < max_pages; i++)
635 __free_page(pages[i]);
636 kfree(pages);
637
638 return lseg; 614 return lseg;
639
640out_err_free:
641 /* free any allocated xdr pages, lgp as it's not used */
642 if (pages) {
643 for (i = 0; i < max_pages; i++) {
644 if (!pages[i])
645 break;
646 __free_page(pages[i]);
647 }
648 kfree(pages);
649 }
650 kfree(lgp);
651 return NULL;
652} 615}
653 616
654/* 617/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2c6c80503ba4..745aa1b39e7c 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
172 struct pnfs_devicelist *devlist); 172 struct pnfs_devicelist *devlist);
173extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, 173extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
174 struct pnfs_device *dev); 174 struct pnfs_device *dev);
175extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); 175extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
176extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); 176extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
177 177
178/* pnfs.c */ 178/* pnfs.c */
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac6a3c55dce4..239aff7338eb 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops);
319static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *); 319static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
320static int nfs4_validate_mount_data(void *options, 320static int nfs4_validate_mount_data(void *options,
321 struct nfs_parsed_mount_data *args, const char *dev_name); 321 struct nfs_parsed_mount_data *args, const char *dev_name);
322
323struct file_system_type nfs4_fs_type = {
324 .owner = THIS_MODULE,
325 .name = "nfs4",
326 .mount = nfs_fs_mount,
327 .kill_sb = nfs_kill_super,
328 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
329};
330EXPORT_SYMBOL_GPL(nfs4_fs_type);
331
332static int __init register_nfs4_fs(void)
333{
334 return register_filesystem(&nfs4_fs_type);
335}
336
337static void unregister_nfs4_fs(void)
338{
339 unregister_filesystem(&nfs4_fs_type);
340}
341#else
342static int __init register_nfs4_fs(void)
343{
344 return 0;
345}
346
347static void unregister_nfs4_fs(void)
348{
349}
322#endif 350#endif
323 351
324static struct shrinker acl_shrinker = { 352static struct shrinker acl_shrinker = {
@@ -337,12 +365,18 @@ int __init register_nfs_fs(void)
337 if (ret < 0) 365 if (ret < 0)
338 goto error_0; 366 goto error_0;
339 367
340 ret = nfs_register_sysctl(); 368 ret = register_nfs4_fs();
341 if (ret < 0) 369 if (ret < 0)
342 goto error_1; 370 goto error_1;
371
372 ret = nfs_register_sysctl();
373 if (ret < 0)
374 goto error_2;
343 register_shrinker(&acl_shrinker); 375 register_shrinker(&acl_shrinker);
344 return 0; 376 return 0;
345 377
378error_2:
379 unregister_nfs4_fs();
346error_1: 380error_1:
347 unregister_filesystem(&nfs_fs_type); 381 unregister_filesystem(&nfs_fs_type);
348error_0: 382error_0:
@@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void)
356{ 390{
357 unregister_shrinker(&acl_shrinker); 391 unregister_shrinker(&acl_shrinker);
358 nfs_unregister_sysctl(); 392 nfs_unregister_sysctl();
393 unregister_nfs4_fs();
359 unregister_filesystem(&nfs_fs_type); 394 unregister_filesystem(&nfs_fs_type);
360} 395}
361 396
@@ -2645,4 +2680,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
2645module_param(send_implementation_id, ushort, 0644); 2680module_param(send_implementation_id, ushort, 0644);
2646MODULE_PARM_DESC(send_implementation_id, 2681MODULE_PARM_DESC(send_implementation_id,
2647 "Send implementation ID with NFSv4.1 exchange_id"); 2682 "Send implementation ID with NFSv4.1 exchange_id");
2683MODULE_ALIAS("nfs4");
2684
2648#endif /* CONFIG_NFS_V4 */ 2685#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5829d0ce7cfb..e3b55372726c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
1814 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 1814 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1815 nfs_wdata_cachep); 1815 nfs_wdata_cachep);
1816 if (nfs_wdata_mempool == NULL) 1816 if (nfs_wdata_mempool == NULL)
1817 return -ENOMEM; 1817 goto out_destroy_write_cache;
1818 1818
1819 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 1819 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1820 sizeof(struct nfs_commit_data), 1820 sizeof(struct nfs_commit_data),
1821 0, SLAB_HWCACHE_ALIGN, 1821 0, SLAB_HWCACHE_ALIGN,
1822 NULL); 1822 NULL);
1823 if (nfs_cdata_cachep == NULL) 1823 if (nfs_cdata_cachep == NULL)
1824 return -ENOMEM; 1824 goto out_destroy_write_mempool;
1825 1825
1826 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 1826 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1827 nfs_wdata_cachep); 1827 nfs_wdata_cachep);
1828 if (nfs_commit_mempool == NULL) 1828 if (nfs_commit_mempool == NULL)
1829 return -ENOMEM; 1829 goto out_destroy_commit_cache;
1830 1830
1831 /* 1831 /*
1832 * NFS congestion size, scale with available memory. 1832 * NFS congestion size, scale with available memory.
@@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
1849 nfs_congestion_kb = 256*1024; 1849 nfs_congestion_kb = 256*1024;
1850 1850
1851 return 0; 1851 return 0;
1852
1853out_destroy_commit_cache:
1854 kmem_cache_destroy(nfs_cdata_cachep);
1855out_destroy_write_mempool:
1856 mempool_destroy(nfs_wdata_mempool);
1857out_destroy_write_cache:
1858 kmem_cache_destroy(nfs_wdata_cachep);
1859 return -ENOMEM;
1852} 1860}
1853 1861
1854void nfs_destroy_writepagecache(void) 1862void nfs_destroy_writepagecache(void)
1855{ 1863{
1856 mempool_destroy(nfs_commit_mempool); 1864 mempool_destroy(nfs_commit_mempool);
1865 kmem_cache_destroy(nfs_cdata_cachep);
1857 mempool_destroy(nfs_wdata_mempool); 1866 mempool_destroy(nfs_wdata_mempool);
1858 kmem_cache_destroy(nfs_wdata_cachep); 1867 kmem_cache_destroy(nfs_wdata_cachep);
1859} 1868}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 14cf9de1dbe1..99dffab4c4e4 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -9,6 +9,7 @@
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/cred.h>
12 13
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/page.h> 15#include <asm/page.h>
@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56 memset(p, 0, sizeof(*p)); 57 memset(p, 0, sizeof(*p));
57 mutex_init(&p->lock); 58 mutex_init(&p->lock);
58 p->op = op; 59 p->op = op;
60#ifdef CONFIG_USER_NS
61 p->user_ns = file->f_cred->user_ns;
62#endif
59 63
60 /* 64 /*
61 * Wrappers around seq_open(e.g. swaps_open) need to be 65 * Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a1a0386e0160..ced362533e3c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -166,8 +166,6 @@ struct drm_display_mode {
166 int crtc_vsync_start; 166 int crtc_vsync_start;
167 int crtc_vsync_end; 167 int crtc_vsync_end;
168 int crtc_vtotal; 168 int crtc_vtotal;
169 int crtc_hadjusted;
170 int crtc_vadjusted;
171 169
172 /* Driver private mode info */ 170 /* Driver private mode info */
173 int private_size; 171 int private_size;
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 3fb8bbafe5e7..6ba45d2b99db 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -515,6 +515,26 @@ struct bcma_pflash {
515 u32 window_size; 515 u32 window_size;
516}; 516};
517 517
518#ifdef CONFIG_BCMA_SFLASH
519struct bcma_sflash {
520 bool present;
521 u32 window;
522 u32 blocksize;
523 u16 numblocks;
524 u32 size;
525};
526#endif
527
528#ifdef CONFIG_BCMA_NFLASH
529struct mtd_info;
530
531struct bcma_nflash {
532 bool present;
533
534 struct mtd_info *mtd;
535};
536#endif
537
518struct bcma_serial_port { 538struct bcma_serial_port {
519 void *regs; 539 void *regs;
520 unsigned long clockspeed; 540 unsigned long clockspeed;
@@ -535,6 +555,12 @@ struct bcma_drv_cc {
535 struct bcma_chipcommon_pmu pmu; 555 struct bcma_chipcommon_pmu pmu;
536#ifdef CONFIG_BCMA_DRIVER_MIPS 556#ifdef CONFIG_BCMA_DRIVER_MIPS
537 struct bcma_pflash pflash; 557 struct bcma_pflash pflash;
558#ifdef CONFIG_BCMA_SFLASH
559 struct bcma_sflash sflash;
560#endif
561#ifdef CONFIG_BCMA_NFLASH
562 struct bcma_nflash nflash;
563#endif
538 564
539 int nr_serial_ports; 565 int nr_serial_ports;
540 struct bcma_serial_port serial_ports[4]; 566 struct bcma_serial_port serial_ports[4];
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index a393e82bf7bf..6c9cb93ae3de 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -85,4 +85,6 @@
85 * (2 ZettaBytes), high 32 bits 85 * (2 ZettaBytes), high 32 bits
86 */ 86 */
87 87
88#define BCMA_SFLASH 0x1c000000
89
88#endif /* LINUX_BCMA_REGS_H_ */ 90#endif /* LINUX_BCMA_REGS_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a810987cb80e..e6ff12dd717b 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,8 +74,6 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
74/* found in socket.c */ 74/* found in socket.c */
75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 75extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
76 76
77struct vlan_info;
78
79static inline int is_vlan_dev(struct net_device *dev) 77static inline int is_vlan_dev(struct net_device *dev)
80{ 78{
81 return dev->priv_flags & IFF_802_1Q_VLAN; 79 return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@ extern int vlan_vids_add_by_dev(struct net_device *dev,
101 const struct net_device *by_dev); 99 const struct net_device *by_dev);
102extern void vlan_vids_del_by_dev(struct net_device *dev, 100extern void vlan_vids_del_by_dev(struct net_device *dev,
103 const struct net_device *by_dev); 101 const struct net_device *by_dev);
102
103extern bool vlan_uses_dev(const struct net_device *dev);
104#else 104#else
105static inline struct net_device * 105static inline struct net_device *
106__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) 106__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@ static inline void vlan_vids_del_by_dev(struct net_device *dev,
151 const struct net_device *by_dev) 151 const struct net_device *by_dev)
152{ 152{
153} 153}
154
155static inline bool vlan_uses_dev(const struct net_device *dev)
156{
157 return false;
158}
154#endif 159#endif
155 160
156/** 161/**
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index f1362b5447fc..e788c186ed3a 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -159,6 +159,7 @@ struct inet_diag_handler {
159struct inet_connection_sock; 159struct inet_connection_sock;
160int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 160int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
161 struct sk_buff *skb, struct inet_diag_req_v2 *req, 161 struct sk_buff *skb, struct inet_diag_req_v2 *req,
162 struct user_namespace *user_ns,
162 u32 pid, u32 seq, u16 nlmsg_flags, 163 u32 pid, u32 seq, u16 nlmsg_flags,
163 const struct nlmsghdr *unlh); 164 const struct nlmsghdr *unlh);
164void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, 165void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 9c07dcebded7..65af6887872f 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -18,6 +18,7 @@
18#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h>
21 22
22struct kref { 23struct kref {
23 atomic_t refcount; 24 atomic_t refcount;
@@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
93{ 94{
94 return kref_sub(kref, 1, release); 95 return kref_sub(kref, 1, release);
95} 96}
97
98static inline int kref_put_mutex(struct kref *kref,
99 void (*release)(struct kref *kref),
100 struct mutex *lock)
101{
102 WARN_ON(release == NULL);
103 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
104 mutex_lock(lock);
105 if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
106 mutex_unlock(lock);
107 return 0;
108 }
109 release(kref);
110 return 1;
111 }
112 return 0;
113}
96#endif /* _KREF_H_ */ 114#endif /* _KREF_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9ad7fa8c10e0..ccac82e61604 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2227,6 +2227,7 @@ static inline void dev_hold(struct net_device *dev)
2227 * kind of lower layer not just hardware media. 2227 * kind of lower layer not just hardware media.
2228 */ 2228 */
2229 2229
2230extern void linkwatch_init_dev(struct net_device *dev);
2230extern void linkwatch_fire_event(struct net_device *dev); 2231extern void linkwatch_fire_event(struct net_device *dev);
2231extern void linkwatch_forget_dev(struct net_device *dev); 2232extern void linkwatch_forget_dev(struct net_device *dev);
2232 2233
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f74dd133788f..c9fdde2bc73f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -165,6 +165,7 @@ struct netlink_skb_parms {
165 struct ucred creds; /* Skb credentials */ 165 struct ucred creds; /* Skb credentials */
166 __u32 pid; 166 __u32 pid;
167 __u32 dst_group; 167 __u32 dst_group;
168 struct sock *ssk;
168}; 169};
169 170
170#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) 171#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 880805774f9f..92ce5783b707 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -69,6 +69,7 @@ struct nfs_pageio_descriptor {
69 const struct nfs_pgio_completion_ops *pg_completion_ops; 69 const struct nfs_pgio_completion_ops *pg_completion_ops;
70 struct pnfs_layout_segment *pg_lseg; 70 struct pnfs_layout_segment *pg_lseg;
71 struct nfs_direct_req *pg_dreq; 71 struct nfs_direct_req *pg_dreq;
72 void *pg_layout_private;
72}; 73};
73 74
74#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) 75#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 00485e084394..ac7c8ae254f2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1248,6 +1248,7 @@ struct nfs_pgio_header {
1248 void (*release) (struct nfs_pgio_header *hdr); 1248 void (*release) (struct nfs_pgio_header *hdr);
1249 const struct nfs_pgio_completion_ops *completion_ops; 1249 const struct nfs_pgio_completion_ops *completion_ops;
1250 struct nfs_direct_req *dreq; 1250 struct nfs_direct_req *dreq;
1251 void *layout_private;
1251 spinlock_t lock; 1252 spinlock_t lock;
1252 /* fields protected by lock */ 1253 /* fields protected by lock */
1253 int pnfs_error; 1254 int pnfs_error;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2f3878806403..458416279347 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -565,6 +565,14 @@
565 * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with 565 * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
566 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE. 566 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
567 * 567 *
568 * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
569 * its %NL80211_ATTR_WDEV identifier. It must have been created with
570 * %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
571 * P2P Device can be used for P2P operations, e.g. remain-on-channel and
572 * public action frame TX.
573 * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
574 * its %NL80211_ATTR_WDEV identifier.
575 *
568 * @NL80211_CMD_MAX: highest used command number 576 * @NL80211_CMD_MAX: highest used command number
569 * @__NL80211_CMD_AFTER_LAST: internal use 577 * @__NL80211_CMD_AFTER_LAST: internal use
570 */ 578 */
@@ -708,6 +716,9 @@ enum nl80211_commands {
708 716
709 NL80211_CMD_CH_SWITCH_NOTIFY, 717 NL80211_CMD_CH_SWITCH_NOTIFY,
710 718
719 NL80211_CMD_START_P2P_DEVICE,
720 NL80211_CMD_STOP_P2P_DEVICE,
721
711 /* add new commands above here */ 722 /* add new commands above here */
712 723
713 /* used to define NL80211_CMD_MAX below */ 724 /* used to define NL80211_CMD_MAX below */
@@ -1575,6 +1586,10 @@ enum nl80211_attrs {
1575 * @NL80211_IFTYPE_MESH_POINT: mesh point 1586 * @NL80211_IFTYPE_MESH_POINT: mesh point
1576 * @NL80211_IFTYPE_P2P_CLIENT: P2P client 1587 * @NL80211_IFTYPE_P2P_CLIENT: P2P client
1577 * @NL80211_IFTYPE_P2P_GO: P2P group owner 1588 * @NL80211_IFTYPE_P2P_GO: P2P group owner
1589 * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
1590 * and therefore can't be created in the normal ways, use the
1591 * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
1592 * commands to create and destroy one
1578 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 1593 * @NL80211_IFTYPE_MAX: highest interface type number currently defined
1579 * @NUM_NL80211_IFTYPES: number of defined interface types 1594 * @NUM_NL80211_IFTYPES: number of defined interface types
1580 * 1595 *
@@ -1593,6 +1608,7 @@ enum nl80211_iftype {
1593 NL80211_IFTYPE_MESH_POINT, 1608 NL80211_IFTYPE_MESH_POINT,
1594 NL80211_IFTYPE_P2P_CLIENT, 1609 NL80211_IFTYPE_P2P_CLIENT,
1595 NL80211_IFTYPE_P2P_GO, 1610 NL80211_IFTYPE_P2P_GO,
1611 NL80211_IFTYPE_P2P_DEVICE,
1596 1612
1597 /* keep last */ 1613 /* keep last */
1598 NUM_NL80211_IFTYPES, 1614 NUM_NL80211_IFTYPES,
@@ -2994,12 +3010,18 @@ enum nl80211_ap_sme_features {
2994 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested 3010 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
2995 * to work properly to suppport receiving regulatory hints from 3011 * to work properly to suppport receiving regulatory hints from
2996 * cellular base stations. 3012 * cellular base stations.
3013 * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
3014 * P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
3015 * in the interface combinations, even when it's only used for scan
3016 * and remain-on-channel. This could be due to, for example, the
3017 * remain-on-channel implementation requiring a channel context.
2997 */ 3018 */
2998enum nl80211_feature_flags { 3019enum nl80211_feature_flags {
2999 NL80211_FEATURE_SK_TX_STATUS = 1 << 0, 3020 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
3000 NL80211_FEATURE_HT_IBSS = 1 << 1, 3021 NL80211_FEATURE_HT_IBSS = 1 << 1,
3001 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, 3022 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
3002 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, 3023 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
3024 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4,
3003}; 3025};
3004 3026
3005/** 3027/**
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 912c27a0f7ee..6ef49b803efb 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -12,6 +12,7 @@
12#include <linux/phy.h> 12#include <linux/phy.h>
13#include <linux/of.h> 13#include <linux/of.h>
14 14
15#ifdef CONFIG_OF
15extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); 16extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
16extern struct phy_device *of_phy_find_device(struct device_node *phy_np); 17extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
17extern struct phy_device *of_phy_connect(struct net_device *dev, 18extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -24,4 +25,36 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
24 25
25extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
26 27
28#else /* CONFIG_OF */
29int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
30{
31 return -ENOSYS;
32}
33
34struct phy_device *of_phy_find_device(struct device_node *phy_np)
35{
36 return NULL;
37}
38
39struct phy_device *of_phy_connect(struct net_device *dev,
40 struct device_node *phy_np,
41 void (*hndlr)(struct net_device *),
42 u32 flags, phy_interface_t iface)
43{
44 return NULL;
45}
46
47struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
48 void (*hndlr)(struct net_device *),
49 phy_interface_t iface)
50{
51 return NULL;
52}
53
54struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
55{
56 return NULL;
57}
58#endif /* CONFIG_OF */
59
27#endif /* __LINUX_OF_MDIO_H */ 60#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fc3526077348..6b4565c440c8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2149,7 +2149,7 @@
2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 2149#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 2150#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa 2151#define PCI_DEVICE_ID_NX2_5706S 0x16aa
2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab 2152#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac 2153#define PCI_DEVICE_ID_NX2_5708S 0x16ac
2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad 2154#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae 2155#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 6fdf02737e9d..0ec590bb3611 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -354,6 +354,37 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
354} 354}
355#endif /* RFKILL || RFKILL_MODULE */ 355#endif /* RFKILL || RFKILL_MODULE */
356 356
357
358#ifdef CONFIG_RFKILL_LEDS
359/**
360 * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
361 * This function might return a NULL pointer if registering of the
362 * LED trigger failed. Use this as "default_trigger" for the LED.
363 */
364const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
365
366/**
367 * rfkill_set_led_trigger_name -- set the LED trigger name
368 * @rfkill: rfkill struct
369 * @name: LED trigger name
370 *
371 * This function sets the LED trigger name of the radio LED
372 * trigger that rfkill creates. It is optional, but if called
373 * must be called before rfkill_register() to be effective.
374 */
375void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
376#else
377static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
378{
379 return NULL;
380}
381
382static inline void
383rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
384{
385}
386#endif
387
357#endif /* __KERNEL__ */ 388#endif /* __KERNEL__ */
358 389
359#endif /* RFKILL_H */ 390#endif /* RFKILL_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 83c44eefe698..68a04a343cad 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -13,6 +13,7 @@ struct file;
13struct path; 13struct path;
14struct inode; 14struct inode;
15struct dentry; 15struct dentry;
16struct user_namespace;
16 17
17struct seq_file { 18struct seq_file {
18 char *buf; 19 char *buf;
@@ -25,6 +26,9 @@ struct seq_file {
25 struct mutex lock; 26 struct mutex lock;
26 const struct seq_operations *op; 27 const struct seq_operations *op;
27 int poll_event; 28 int poll_event;
29#ifdef CONFIG_USER_NS
30 struct user_namespace *user_ns;
31#endif
28 void *private; 32 void *private;
29}; 33};
30 34
@@ -128,6 +132,16 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter,
128int seq_put_decimal_ll(struct seq_file *m, char delimiter, 132int seq_put_decimal_ll(struct seq_file *m, char delimiter,
129 long long num); 133 long long num);
130 134
135static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
136{
137#ifdef CONFIG_USER_NS
138 return seq->user_ns;
139#else
140 extern struct user_namespace init_user_ns;
141 return &init_user_ns;
142#endif
143}
144
131#define SEQ_START_TOKEN ((void *)1) 145#define SEQ_START_TOKEN ((void *)1)
132/* 146/*
133 * Helpers for iteration over list_head-s in seq_files 147 * Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index ad6e3a6bf9fb..fdfba235f9f1 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -241,6 +241,10 @@ enum
241 LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */ 241 LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
242 LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */ 242 LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
243 LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */ 243 LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */
244 LINUX_MIB_TCPFASTOPENPASSIVE, /* TCPFastOpenPassive*/
245 LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */
246 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
247 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
244 __LINUX_MIB_MAX 248 __LINUX_MIB_MAX
245}; 249};
246 250
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index b69bdb1e08b6..a1547ea3920d 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -76,7 +76,6 @@
76/* Platfrom data for platform device structure's platform_data field */ 76/* Platfrom data for platform device structure's platform_data field */
77 77
78struct stmmac_mdio_bus_data { 78struct stmmac_mdio_bus_data {
79 int bus_id;
80 int (*phy_reset)(void *priv); 79 int (*phy_reset)(void *priv);
81 unsigned int phy_mask; 80 unsigned int phy_mask;
82 int *irqs; 81 int *irqs;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index eb125a4c30b3..ae46df590629 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -110,6 +110,7 @@ enum {
110#define TCP_REPAIR_QUEUE 20 110#define TCP_REPAIR_QUEUE 20
111#define TCP_QUEUE_SEQ 21 111#define TCP_QUEUE_SEQ 21
112#define TCP_REPAIR_OPTIONS 22 112#define TCP_REPAIR_OPTIONS 22
113#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
113 114
114struct tcp_repair_opt { 115struct tcp_repair_opt {
115 __u32 opt_code; 116 __u32 opt_code;
@@ -246,6 +247,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
246/* TCP Fast Open */ 247/* TCP Fast Open */
247#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ 248#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */
248#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ 249#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */
250#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */
249 251
250/* TCP Fast Open Cookie as stored in memory */ 252/* TCP Fast Open Cookie as stored in memory */
251struct tcp_fastopen_cookie { 253struct tcp_fastopen_cookie {
@@ -312,9 +314,14 @@ struct tcp_request_sock {
312 /* Only used by TCP MD5 Signature so far. */ 314 /* Only used by TCP MD5 Signature so far. */
313 const struct tcp_request_sock_ops *af_specific; 315 const struct tcp_request_sock_ops *af_specific;
314#endif 316#endif
317 struct sock *listener; /* needed for TFO */
315 u32 rcv_isn; 318 u32 rcv_isn;
316 u32 snt_isn; 319 u32 snt_isn;
317 u32 snt_synack; /* synack sent time */ 320 u32 snt_synack; /* synack sent time */
321 u32 rcv_nxt; /* the ack # by SYNACK. For
322 * FastOpen it's the seq#
323 * after data-in-SYN.
324 */
318}; 325};
319 326
320static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) 327static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -505,14 +512,18 @@ struct tcp_sock {
505 struct tcp_md5sig_info __rcu *md5sig_info; 512 struct tcp_md5sig_info __rcu *md5sig_info;
506#endif 513#endif
507 514
508/* TCP fastopen related information */
509 struct tcp_fastopen_request *fastopen_req;
510
511 /* When the cookie options are generated and exchanged, then this 515 /* When the cookie options are generated and exchanged, then this
512 * object holds a reference to them (cookie_values->kref). Also 516 * object holds a reference to them (cookie_values->kref). Also
513 * contains related tcp_cookie_transactions fields. 517 * contains related tcp_cookie_transactions fields.
514 */ 518 */
515 struct tcp_cookie_values *cookie_values; 519 struct tcp_cookie_values *cookie_values;
520
521/* TCP fastopen related information */
522 struct tcp_fastopen_request *fastopen_req;
523 /* fastopen_rsk points to request_sock that resulted in this big
524 * socket. Used to retransmit SYNACKs etc.
525 */
526 struct request_sock *fastopen_rsk;
516}; 527};
517 528
518enum tsq_flags { 529enum tsq_flags {
@@ -552,6 +563,34 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
552 return (struct tcp_timewait_sock *)sk; 563 return (struct tcp_timewait_sock *)sk;
553} 564}
554 565
566static inline bool tcp_passive_fastopen(const struct sock *sk)
567{
568 return (sk->sk_state == TCP_SYN_RECV &&
569 tcp_sk(sk)->fastopen_rsk != NULL);
570}
571
572static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
573{
574 return foc->len != -1;
575}
576
577static inline int fastopen_init_queue(struct sock *sk, int backlog)
578{
579 struct request_sock_queue *queue =
580 &inet_csk(sk)->icsk_accept_queue;
581
582 if (queue->fastopenq == NULL) {
583 queue->fastopenq = kzalloc(
584 sizeof(struct fastopen_queue),
585 sk->sk_allocation);
586 if (queue->fastopenq == NULL)
587 return -ENOMEM;
588 spin_lock_init(&queue->fastopenq->lock);
589 }
590 queue->fastopenq->max_qlen = backlog;
591 return 0;
592}
593
555#endif /* __KERNEL__ */ 594#endif /* __KERNEL__ */
556 595
557#endif /* _LINUX_TCP_H */ 596#endif /* _LINUX_TCP_H */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 5d2352154cf6..53539acbd81a 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -157,7 +157,7 @@ enum {
157typedef struct ax25_uid_assoc { 157typedef struct ax25_uid_assoc {
158 struct hlist_node uid_node; 158 struct hlist_node uid_node;
159 atomic_t refcount; 159 atomic_t refcount;
160 uid_t uid; 160 kuid_t uid;
161 ax25_address call; 161 ax25_address call;
162} ax25_uid_assoc; 162} ax25_uid_assoc;
163 163
@@ -434,7 +434,7 @@ extern unsigned long ax25_display_timer(struct timer_list *);
434 434
435/* ax25_uid.c */ 435/* ax25_uid.c */
436extern int ax25_uid_policy; 436extern int ax25_uid_policy;
437extern ax25_uid_assoc *ax25_findbyuid(uid_t); 437extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
438extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); 438extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
439extern const struct file_operations ax25_uid_fops; 439extern const struct file_operations ax25_uid_fops;
440extern void ax25_uid_free(void); 440extern void ax25_uid_free(void);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3d254e10ff30..ba2e6160fad1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1439,7 +1439,8 @@ struct cfg80211_gtk_rekey_data {
1439 * @add_virtual_intf: create a new virtual interface with the given name, 1439 * @add_virtual_intf: create a new virtual interface with the given name,
1440 * must set the struct wireless_dev's iftype. Beware: You must create 1440 * must set the struct wireless_dev's iftype. Beware: You must create
1441 * the new netdev in the wiphy's network namespace! Returns the struct 1441 * the new netdev in the wiphy's network namespace! Returns the struct
1442 * wireless_dev, or an ERR_PTR. 1442 * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
1443 * also set the address member in the wdev.
1443 * 1444 *
1444 * @del_virtual_intf: remove the virtual interface 1445 * @del_virtual_intf: remove the virtual interface
1445 * 1446 *
@@ -1618,6 +1619,9 @@ struct cfg80211_gtk_rekey_data {
1618 * @get_channel: Get the current operating channel for the virtual interface. 1619 * @get_channel: Get the current operating channel for the virtual interface.
1619 * For monitor interfaces, it should return %NULL unless there's a single 1620 * For monitor interfaces, it should return %NULL unless there's a single
1620 * current monitoring channel. 1621 * current monitoring channel.
1622 *
1623 * @start_p2p_device: Start the given P2P device.
1624 * @stop_p2p_device: Stop the given P2P device.
1621 */ 1625 */
1622struct cfg80211_ops { 1626struct cfg80211_ops {
1623 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 1627 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1834,6 +1838,11 @@ struct cfg80211_ops {
1834 (*get_channel)(struct wiphy *wiphy, 1838 (*get_channel)(struct wiphy *wiphy,
1835 struct wireless_dev *wdev, 1839 struct wireless_dev *wdev,
1836 enum nl80211_channel_type *type); 1840 enum nl80211_channel_type *type);
1841
1842 int (*start_p2p_device)(struct wiphy *wiphy,
1843 struct wireless_dev *wdev);
1844 void (*stop_p2p_device)(struct wiphy *wiphy,
1845 struct wireless_dev *wdev);
1837}; 1846};
1838 1847
1839/* 1848/*
@@ -2397,6 +2406,8 @@ struct cfg80211_cached_keys;
2397 * @cleanup_work: work struct used for cleanup that can't be done directly 2406 * @cleanup_work: work struct used for cleanup that can't be done directly
2398 * @beacon_interval: beacon interval used on this device for transmitting 2407 * @beacon_interval: beacon interval used on this device for transmitting
2399 * beacons, 0 when not valid 2408 * beacons, 0 when not valid
2409 * @address: The address for this device, valid only if @netdev is %NULL
2410 * @p2p_started: true if this is a P2P Device that has been started
2400 */ 2411 */
2401struct wireless_dev { 2412struct wireless_dev {
2402 struct wiphy *wiphy; 2413 struct wiphy *wiphy;
@@ -2415,7 +2426,9 @@ struct wireless_dev {
2415 2426
2416 struct work_struct cleanup_work; 2427 struct work_struct cleanup_work;
2417 2428
2418 bool use_4addr; 2429 bool use_4addr, p2p_started;
2430
2431 u8 address[ETH_ALEN] __aligned(sizeof(u16));
2419 2432
2420 /* currently used for IBSS and SME - might be rearranged later */ 2433 /* currently used for IBSS and SME - might be rearranged later */
2421 u8 ssid[IEEE80211_MAX_SSID_LEN]; 2434 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2463,6 +2476,13 @@ struct wireless_dev {
2463#endif 2476#endif
2464}; 2477};
2465 2478
2479static inline u8 *wdev_address(struct wireless_dev *wdev)
2480{
2481 if (wdev->netdev)
2482 return wdev->netdev->dev_addr;
2483 return wdev->address;
2484}
2485
2466/** 2486/**
2467 * wdev_priv - return wiphy priv from wireless_dev 2487 * wdev_priv - return wiphy priv from wireless_dev
2468 * 2488 *
@@ -3530,6 +3550,22 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
3530 */ 3550 */
3531u32 cfg80211_calculate_bitrate(struct rate_info *rate); 3551u32 cfg80211_calculate_bitrate(struct rate_info *rate);
3532 3552
3553/**
3554 * cfg80211_unregister_wdev - remove the given wdev
3555 * @wdev: struct wireless_dev to remove
3556 *
3557 * Call this function only for wdevs that have no netdev assigned,
3558 * e.g. P2P Devices. It removes the device from the list so that
3559 * it can no longer be used. It is necessary to call this function
3560 * even when cfg80211 requests the removal of the interface by
3561 * calling the del_virtual_intf() callback. The function must also
3562 * be called when the driver wishes to unregister the wdev, e.g.
3563 * when the device is unbound from the driver.
3564 *
3565 * Requires the RTNL to be held.
3566 */
3567void cfg80211_unregister_wdev(struct wireless_dev *wdev);
3568
3533/* Logging, debugging and troubleshooting/diagnostic helpers. */ 3569/* Logging, debugging and troubleshooting/diagnostic helpers. */
3534 3570
3535/* wiphy_printk helpers, similar to dev_printk */ 3571/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 71392545d0a1..7f0df133d119 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -183,6 +183,9 @@ struct ieee80211_radiotap_header {
183 * Contains a bitmap of known fields/flags, the flags, and 183 * Contains a bitmap of known fields/flags, the flags, and
184 * the MCS index. 184 * the MCS index.
185 * 185 *
186 * IEEE80211_RADIOTAP_AMPDU_STATUS u32, u16, u8, u8 unitless
187 *
188 * Contains the AMPDU information for the subframe.
186 */ 189 */
187enum ieee80211_radiotap_type { 190enum ieee80211_radiotap_type {
188 IEEE80211_RADIOTAP_TSFT = 0, 191 IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@ enum ieee80211_radiotap_type {
205 IEEE80211_RADIOTAP_DATA_RETRIES = 17, 208 IEEE80211_RADIOTAP_DATA_RETRIES = 17,
206 209
207 IEEE80211_RADIOTAP_MCS = 19, 210 IEEE80211_RADIOTAP_MCS = 19,
211 IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
208 212
209 /* valid in every it_present bitmap, even vendor namespaces */ 213 /* valid in every it_present bitmap, even vendor namespaces */
210 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, 214 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@ enum ieee80211_radiotap_type {
270#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 274#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
271#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 275#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
272 276
277/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
278#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001
279#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN 0x0002
280#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN 0x0004
281#define IEEE80211_RADIOTAP_AMPDU_IS_LAST 0x0008
282#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR 0x0010
283#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN 0x0020
273 284
274/* helpers */ 285/* helpers */
275static inline int ieee80211_get_radiotap_len(unsigned char *data) 286static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6d01fb00ff2b..9bed5d483405 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -223,7 +223,10 @@ struct ip6_flowlabel {
223 struct ipv6_txoptions *opt; 223 struct ipv6_txoptions *opt;
224 unsigned long linger; 224 unsigned long linger;
225 u8 share; 225 u8 share;
226 u32 owner; 226 union {
227 struct pid *pid;
228 kuid_t uid;
229 } owner;
227 unsigned long lastuse; 230 unsigned long lastuse;
228 unsigned long expires; 231 unsigned long expires;
229 struct net *fl_net; 232 struct net *fl_net;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bb86aa6f98dd..71f8262fc1df 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -171,6 +171,7 @@ struct ieee80211_low_level_stats {
171 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. 171 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
172 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode) 172 * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
173 * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) 173 * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
174 * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
174 */ 175 */
175enum ieee80211_bss_change { 176enum ieee80211_bss_change {
176 BSS_CHANGED_ASSOC = 1<<0, 177 BSS_CHANGED_ASSOC = 1<<0,
@@ -190,6 +191,7 @@ enum ieee80211_bss_change {
190 BSS_CHANGED_IDLE = 1<<14, 191 BSS_CHANGED_IDLE = 1<<14,
191 BSS_CHANGED_SSID = 1<<15, 192 BSS_CHANGED_SSID = 1<<15,
192 BSS_CHANGED_AP_PROBE_RESP = 1<<16, 193 BSS_CHANGED_AP_PROBE_RESP = 1<<16,
194 BSS_CHANGED_PS = 1<<17,
193 195
194 /* when adding here, make sure to change ieee80211_reconfig */ 196 /* when adding here, make sure to change ieee80211_reconfig */
195}; 197};
@@ -266,6 +268,8 @@ enum ieee80211_rssi_event {
266 * @idle: This interface is idle. There's also a global idle flag in the 268 * @idle: This interface is idle. There's also a global idle flag in the
267 * hardware config which may be more appropriate depending on what 269 * hardware config which may be more appropriate depending on what
268 * your driver/device needs to do. 270 * your driver/device needs to do.
271 * @ps: power-save mode (STA only). This flag is NOT affected by
272 * offchannel/dynamic_ps operations.
269 * @ssid: The SSID of the current vif. Only valid in AP-mode. 273 * @ssid: The SSID of the current vif. Only valid in AP-mode.
270 * @ssid_len: Length of SSID given in @ssid. 274 * @ssid_len: Length of SSID given in @ssid.
271 * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. 275 * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@ struct ieee80211_bss_conf {
296 bool arp_filter_enabled; 300 bool arp_filter_enabled;
297 bool qos; 301 bool qos;
298 bool idle; 302 bool idle;
303 bool ps;
299 u8 ssid[IEEE80211_MAX_SSID_LEN]; 304 u8 ssid[IEEE80211_MAX_SSID_LEN];
300 size_t ssid_len; 305 size_t ssid_len;
301 bool hidden_ssid; 306 bool hidden_ssid;
@@ -522,9 +527,6 @@ struct ieee80211_tx_rate {
522 * (2) driver internal use (if applicable) 527 * (2) driver internal use (if applicable)
523 * (3) TX status information - driver tells mac80211 what happened 528 * (3) TX status information - driver tells mac80211 what happened
524 * 529 *
525 * The TX control's sta pointer is only valid during the ->tx call,
526 * it may be NULL.
527 *
528 * @flags: transmit info flags, defined above 530 * @flags: transmit info flags, defined above
529 * @band: the band to transmit on (use for checking for races) 531 * @band: the band to transmit on (use for checking for races)
530 * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC 532 * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@ struct ieee80211_tx_info {
555 struct ieee80211_tx_rate rates[ 557 struct ieee80211_tx_rate rates[
556 IEEE80211_TX_MAX_RATES]; 558 IEEE80211_TX_MAX_RATES];
557 s8 rts_cts_rate_idx; 559 s8 rts_cts_rate_idx;
560 /* 3 bytes free */
558 }; 561 };
559 /* only needed before rate control */ 562 /* only needed before rate control */
560 unsigned long jiffies; 563 unsigned long jiffies;
@@ -562,7 +565,7 @@ struct ieee80211_tx_info {
562 /* NB: vif can be NULL for injected frames */ 565 /* NB: vif can be NULL for injected frames */
563 struct ieee80211_vif *vif; 566 struct ieee80211_vif *vif;
564 struct ieee80211_key_conf *hw_key; 567 struct ieee80211_key_conf *hw_key;
565 struct ieee80211_sta *sta; 568 /* 8 bytes free */
566 } control; 569 } control;
567 struct { 570 struct {
568 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; 571 struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
673 * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if 676 * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
674 * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT 677 * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
675 * to hw.radiotap_mcs_details to advertise that fact 678 * to hw.radiotap_mcs_details to advertise that fact
679 * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
680 * number (@ampdu_reference) must be populated and be a distinct number for
681 * each A-MPDU
682 * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
683 * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
684 * monitoring purposes only
685 * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
686 * subframes of a single A-MPDU
687 * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
688 * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
689 * on this subframe
690 * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
691 * is stored in the @ampdu_delimiter_crc field)
676 */ 692 */
677enum mac80211_rx_flags { 693enum mac80211_rx_flags {
678 RX_FLAG_MMIC_ERROR = 1<<0, 694 RX_FLAG_MMIC_ERROR = BIT(0),
679 RX_FLAG_DECRYPTED = 1<<1, 695 RX_FLAG_DECRYPTED = BIT(1),
680 RX_FLAG_MMIC_STRIPPED = 1<<3, 696 RX_FLAG_MMIC_STRIPPED = BIT(3),
681 RX_FLAG_IV_STRIPPED = 1<<4, 697 RX_FLAG_IV_STRIPPED = BIT(4),
682 RX_FLAG_FAILED_FCS_CRC = 1<<5, 698 RX_FLAG_FAILED_FCS_CRC = BIT(5),
683 RX_FLAG_FAILED_PLCP_CRC = 1<<6, 699 RX_FLAG_FAILED_PLCP_CRC = BIT(6),
684 RX_FLAG_MACTIME_MPDU = 1<<7, 700 RX_FLAG_MACTIME_MPDU = BIT(7),
685 RX_FLAG_SHORTPRE = 1<<8, 701 RX_FLAG_SHORTPRE = BIT(8),
686 RX_FLAG_HT = 1<<9, 702 RX_FLAG_HT = BIT(9),
687 RX_FLAG_40MHZ = 1<<10, 703 RX_FLAG_40MHZ = BIT(10),
688 RX_FLAG_SHORT_GI = 1<<11, 704 RX_FLAG_SHORT_GI = BIT(11),
689 RX_FLAG_NO_SIGNAL_VAL = 1<<12, 705 RX_FLAG_NO_SIGNAL_VAL = BIT(12),
690 RX_FLAG_HT_GF = 1<<13, 706 RX_FLAG_HT_GF = BIT(13),
707 RX_FLAG_AMPDU_DETAILS = BIT(14),
708 RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15),
709 RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16),
710 RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
711 RX_FLAG_AMPDU_IS_LAST = BIT(18),
712 RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
713 RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
691}; 714};
692 715
693/** 716/**
@@ -711,17 +734,22 @@ enum mac80211_rx_flags {
711 * HT rates are use (RX_FLAG_HT) 734 * HT rates are use (RX_FLAG_HT)
712 * @flag: %RX_FLAG_* 735 * @flag: %RX_FLAG_*
713 * @rx_flags: internal RX flags for mac80211 736 * @rx_flags: internal RX flags for mac80211
737 * @ampdu_reference: A-MPDU reference number, must be a different value for
738 * each A-MPDU but the same for each subframe within one A-MPDU
739 * @ampdu_delimiter_crc: A-MPDU delimiter CRC
714 */ 740 */
715struct ieee80211_rx_status { 741struct ieee80211_rx_status {
716 u64 mactime; 742 u64 mactime;
717 u32 device_timestamp; 743 u32 device_timestamp;
718 u16 flag; 744 u32 ampdu_reference;
745 u32 flag;
719 u16 freq; 746 u16 freq;
720 u8 rate_idx; 747 u8 rate_idx;
721 u8 rx_flags; 748 u8 rx_flags;
722 u8 band; 749 u8 band;
723 u8 antenna; 750 u8 antenna;
724 s8 signal; 751 s8 signal;
752 u8 ampdu_delimiter_crc;
725}; 753};
726 754
727/** 755/**
@@ -1074,6 +1102,16 @@ enum sta_notify_cmd {
1074}; 1102};
1075 1103
1076/** 1104/**
1105 * struct ieee80211_tx_control - TX control data
1106 *
1107 * @sta: station table entry, this sta pointer may be NULL and
1108 * it is not allowed to copy the pointer, due to RCU.
1109 */
1110struct ieee80211_tx_control {
1111 struct ieee80211_sta *sta;
1112};
1113
1114/**
1077 * enum ieee80211_hw_flags - hardware flags 1115 * enum ieee80211_hw_flags - hardware flags
1078 * 1116 *
1079 * These flags are used to indicate hardware capabilities to 1117 * These flags are used to indicate hardware capabilities to
@@ -1203,6 +1241,10 @@ enum sta_notify_cmd {
1203 * queue mapping in order to use different queues (not just one per AC) 1241 * queue mapping in order to use different queues (not just one per AC)
1204 * for different virtual interfaces. See the doc section on HW queue 1242 * for different virtual interfaces. See the doc section on HW queue
1205 * control for more details. 1243 * control for more details.
1244 *
1245 * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
1246 * P2P Interface. This will be honoured even if more than one interface
1247 * is supported.
1206 */ 1248 */
1207enum ieee80211_hw_flags { 1249enum ieee80211_hw_flags {
1208 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 1250 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1230,6 +1272,7 @@ enum ieee80211_hw_flags {
1230 IEEE80211_HW_AP_LINK_PS = 1<<22, 1272 IEEE80211_HW_AP_LINK_PS = 1<<22,
1231 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, 1273 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
1232 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24, 1274 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24,
1275 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
1233}; 1276};
1234 1277
1235/** 1278/**
@@ -1884,10 +1927,14 @@ enum ieee80211_frame_release_type {
1884 * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit 1927 * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
1885 * to this station changed. 1928 * to this station changed.
1886 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. 1929 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
1930 * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
1931 * changed (in IBSS mode) due to discovering more information about
1932 * the peer.
1887 */ 1933 */
1888enum ieee80211_rate_control_changed { 1934enum ieee80211_rate_control_changed {
1889 IEEE80211_RC_BW_CHANGED = BIT(0), 1935 IEEE80211_RC_BW_CHANGED = BIT(0),
1890 IEEE80211_RC_SMPS_CHANGED = BIT(1), 1936 IEEE80211_RC_SMPS_CHANGED = BIT(1),
1937 IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
1891}; 1938};
1892 1939
1893/** 1940/**
@@ -2264,7 +2311,9 @@ enum ieee80211_rate_control_changed {
2264 * The callback is optional and can (should!) sleep. 2311 * The callback is optional and can (should!) sleep.
2265 */ 2312 */
2266struct ieee80211_ops { 2313struct ieee80211_ops {
2267 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 2314 void (*tx)(struct ieee80211_hw *hw,
2315 struct ieee80211_tx_control *control,
2316 struct sk_buff *skb);
2268 int (*start)(struct ieee80211_hw *hw); 2317 int (*start)(struct ieee80211_hw *hw);
2269 void (*stop)(struct ieee80211_hw *hw); 2318 void (*stop)(struct ieee80211_hw *hw);
2270#ifdef CONFIG_PM 2319#ifdef CONFIG_PM
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e1ce1048fe5f..4a045cda9c60 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
18 u16 ctmask; /* bitmask of ct events to be delivered */ 18 u16 ctmask; /* bitmask of ct events to be delivered */
19 u16 expmask; /* bitmask of expect events to be delivered */ 19 u16 expmask; /* bitmask of expect events to be delivered */
20 u32 pid; /* netlink pid of destroyer */ 20 u32 pid; /* netlink pid of destroyer */
21 struct timer_list timeout;
21}; 22};
22 23
23static inline struct nf_conntrack_ecache * 24static inline struct nf_conntrack_ecache *
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index ace280d19a20..7d00583d53da 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -5,6 +5,7 @@
5#ifndef __NETNS_IPV4_H__ 5#ifndef __NETNS_IPV4_H__
6#define __NETNS_IPV4_H__ 6#define __NETNS_IPV4_H__
7 7
8#include <linux/uidgid.h>
8#include <net/inet_frag.h> 9#include <net/inet_frag.h>
9 10
10struct tcpm_hash_bucket; 11struct tcpm_hash_bucket;
@@ -60,7 +61,7 @@ struct netns_ipv4 {
60 int sysctl_icmp_ratemask; 61 int sysctl_icmp_ratemask;
61 int sysctl_icmp_errors_use_inbound_ifaddr; 62 int sysctl_icmp_errors_use_inbound_ifaddr;
62 63
63 unsigned int sysctl_ping_group_range[2]; 64 kgid_t sysctl_ping_group_range[2];
64 long sysctl_tcp_mem[3]; 65 long sysctl_tcp_mem[3];
65 66
66 atomic_t rt_genid; 67 atomic_t rt_genid;
diff --git a/include/net/netns/packet.h b/include/net/netns/packet.h
index 4780b080a436..17ec2b95c062 100644
--- a/include/net/netns/packet.h
+++ b/include/net/netns/packet.h
@@ -5,7 +5,7 @@
5#define __NETNS_PACKET_H__ 5#define __NETNS_PACKET_H__
6 6
7#include <linux/rculist.h> 7#include <linux/rculist.h>
8#include <linux/spinlock.h> 8#include <linux/mutex.h>
9 9
10struct netns_packet { 10struct netns_packet {
11 struct mutex sklist_lock; 11 struct mutex sklist_lock;
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 4c0766e201e3..b01d8dd9ee7c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -106,6 +106,34 @@ struct listen_sock {
106 struct request_sock *syn_table[0]; 106 struct request_sock *syn_table[0];
107}; 107};
108 108
109/*
110 * For a TCP Fast Open listener -
111 * lock - protects the access to all the reqsk, which is co-owned by
112 * the listener and the child socket.
113 * qlen - pending TFO requests (still in TCP_SYN_RECV).
114 * max_qlen - max TFO reqs allowed before TFO is disabled.
115 *
116 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
117 * structure above. But there is some implementation difficulty due to
118 * listen_sock being part of request_sock_queue hence will be freed when
119 * a listener is stopped. But TFO related fields may continue to be
120 * accessed even after a listener is closed, until its sk_refcnt drops
121 * to 0 implying no more outstanding TFO reqs. One solution is to keep
122 * listen_opt around until sk_refcnt drops to 0. But there is some other
123 * complexity that needs to be resolved. E.g., a listener can be disabled
124 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
125 */
126struct fastopen_queue {
127 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
128 struct request_sock *rskq_rst_tail; /* requests that caused RST.
129 * This is part of the defense
130 * against spoofing attack.
131 */
132 spinlock_t lock;
133 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
134 int max_qlen; /* != 0 iff TFO is currently enabled */
135};
136
109/** struct request_sock_queue - queue of request_socks 137/** struct request_sock_queue - queue of request_socks
110 * 138 *
111 * @rskq_accept_head - FIFO head of established children 139 * @rskq_accept_head - FIFO head of established children
@@ -129,6 +157,12 @@ struct request_sock_queue {
129 u8 rskq_defer_accept; 157 u8 rskq_defer_accept;
130 /* 3 bytes hole, try to pack */ 158 /* 3 bytes hole, try to pack */
131 struct listen_sock *listen_opt; 159 struct listen_sock *listen_opt;
160 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
161 * enabled on this listener. Check
162 * max_qlen != 0 in fastopen_queue
163 * to determine if TFO is enabled
164 * right at this moment.
165 */
132}; 166};
133 167
134extern int reqsk_queue_alloc(struct request_sock_queue *queue, 168extern int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue,
136 170
137extern void __reqsk_queue_destroy(struct request_sock_queue *queue); 171extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
138extern void reqsk_queue_destroy(struct request_sock_queue *queue); 172extern void reqsk_queue_destroy(struct request_sock_queue *queue);
173extern void reqsk_fastopen_remove(struct sock *sk,
174 struct request_sock *req, bool reset);
139 175
140static inline struct request_sock * 176static inline struct request_sock *
141 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) 177 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -190,19 +226,6 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
190 return req; 226 return req;
191} 227}
192 228
193static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
194 struct sock *parent)
195{
196 struct request_sock *req = reqsk_queue_remove(queue);
197 struct sock *child = req->sk;
198
199 WARN_ON(child == NULL);
200
201 sk_acceptq_removed(parent);
202 __reqsk_free(req);
203 return child;
204}
205
206static inline int reqsk_queue_removed(struct request_sock_queue *queue, 229static inline int reqsk_queue_removed(struct request_sock_queue *queue,
207 struct request_sock *req) 230 struct request_sock *req)
208{ 231{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d9611e032418..4616f468d599 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -188,7 +188,8 @@ struct tcf_proto_ops {
188 188
189 unsigned long (*get)(struct tcf_proto*, u32 handle); 189 unsigned long (*get)(struct tcf_proto*, u32 handle);
190 void (*put)(struct tcf_proto*, unsigned long); 190 void (*put)(struct tcf_proto*, unsigned long);
191 int (*change)(struct tcf_proto*, unsigned long, 191 int (*change)(struct sk_buff *,
192 struct tcf_proto*, unsigned long,
192 u32 handle, struct nlattr **, 193 u32 handle, struct nlattr **,
193 unsigned long *); 194 unsigned long *);
194 int (*delete)(struct tcf_proto*, unsigned long); 195 int (*delete)(struct tcf_proto*, unsigned long);
diff --git a/include/net/sock.h b/include/net/sock.h
index 72132aef53fc..84bdaeca1314 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -606,6 +606,15 @@ static inline void sk_add_bind_node(struct sock *sk,
606#define sk_for_each_bound(__sk, node, list) \ 606#define sk_for_each_bound(__sk, node, list) \
607 hlist_for_each_entry(__sk, node, list, sk_bind_node) 607 hlist_for_each_entry(__sk, node, list, sk_bind_node)
608 608
609static inline struct user_namespace *sk_user_ns(struct sock *sk)
610{
611 /* Careful only use this in a context where these parameters
612 * can not change and must all be valid, such as recvmsg from
613 * userspace.
614 */
615 return sk->sk_socket->file->f_cred->user_ns;
616}
617
609/* Sock flags */ 618/* Sock flags */
610enum sock_flags { 619enum sock_flags {
611 SOCK_DEAD, 620 SOCK_DEAD,
@@ -1670,7 +1679,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
1670 write_unlock_bh(&sk->sk_callback_lock); 1679 write_unlock_bh(&sk->sk_callback_lock);
1671} 1680}
1672 1681
1673extern int sock_i_uid(struct sock *sk); 1682extern kuid_t sock_i_uid(struct sock *sk);
1674extern unsigned long sock_i_ino(struct sock *sk); 1683extern unsigned long sock_i_ino(struct sock *sk);
1675 1684
1676static inline struct dst_entry * 1685static inline struct dst_entry *
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1f000ffe7075..1421b02a7905 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -98,11 +98,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
98 * 15 is ~13-30min depending on RTO. 98 * 15 is ~13-30min depending on RTO.
99 */ 99 */
100 100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a 101#define TCP_SYN_RETRIES 6 /* This is how many retries are done
102 * connection: ~180sec is RFC minimum */ 102 * when active opening a connection.
103 * RFC1122 says the minimum retry MUST
104 * be at least 180secs. Nevertheless
105 * this value is corresponding to
106 * 63secs of retransmission with the
107 * current initial RTO.
108 */
103 109
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a 110#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
105 * connection: ~180sec is RFC minimum */ 111 * when passive opening a connection.
112 * This is corresponding to 31secs of
113 * retransmission with the current
114 * initial RTO.
115 */
106 116
107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 117#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */ 118 * state, about 60 seconds */
@@ -214,8 +224,24 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
214 224
215/* Bit Flags for sysctl_tcp_fastopen */ 225/* Bit Flags for sysctl_tcp_fastopen */
216#define TFO_CLIENT_ENABLE 1 226#define TFO_CLIENT_ENABLE 1
227#define TFO_SERVER_ENABLE 2
217#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 228#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
218 229
230/* Process SYN data but skip cookie validation */
231#define TFO_SERVER_COOKIE_NOT_CHKED 0x100
232/* Accept SYN data w/o any cookie option */
233#define TFO_SERVER_COOKIE_NOT_REQD 0x200
234
235/* Force enable TFO on all listeners, i.e., not requiring the
236 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
237 */
238#define TFO_SERVER_WO_SOCKOPT1 0x400
239#define TFO_SERVER_WO_SOCKOPT2 0x800
240/* Always create TFO child sockets on a TFO listener even when
241 * cookie/data not present. (For testing purpose!)
242 */
243#define TFO_SERVER_ALWAYS 0x1000
244
219extern struct inet_timewait_death_row tcp_death_row; 245extern struct inet_timewait_death_row tcp_death_row;
220 246
221/* sysctl variables for tcp */ 247/* sysctl variables for tcp */
@@ -398,7 +424,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *
398 const struct tcphdr *th); 424 const struct tcphdr *th);
399extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 425extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
400 struct request_sock *req, 426 struct request_sock *req,
401 struct request_sock **prev); 427 struct request_sock **prev,
428 bool fastopen);
402extern int tcp_child_process(struct sock *parent, struct sock *child, 429extern int tcp_child_process(struct sock *parent, struct sock *child,
403 struct sk_buff *skb); 430 struct sk_buff *skb);
404extern bool tcp_use_frto(struct sock *sk); 431extern bool tcp_use_frto(struct sock *sk);
@@ -411,12 +438,6 @@ extern void tcp_metrics_init(void);
411extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check); 438extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
412extern bool tcp_remember_stamp(struct sock *sk); 439extern bool tcp_remember_stamp(struct sock *sk);
413extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); 440extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
414extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
415 struct tcp_fastopen_cookie *cookie,
416 int *syn_loss, unsigned long *last_syn_loss);
417extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
418 struct tcp_fastopen_cookie *cookie,
419 bool syn_lost);
420extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); 441extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
421extern void tcp_disable_fack(struct tcp_sock *tp); 442extern void tcp_disable_fack(struct tcp_sock *tp);
422extern void tcp_close(struct sock *sk, long timeout); 443extern void tcp_close(struct sock *sk, long timeout);
@@ -458,7 +479,8 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
458extern int tcp_connect(struct sock *sk); 479extern int tcp_connect(struct sock *sk);
459extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 480extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
460 struct request_sock *req, 481 struct request_sock *req,
461 struct request_values *rvp); 482 struct request_values *rvp,
483 struct tcp_fastopen_cookie *foc);
462extern int tcp_disconnect(struct sock *sk, int flags); 484extern int tcp_disconnect(struct sock *sk, int flags);
463 485
464void tcp_connect_init(struct sock *sk); 486void tcp_connect_init(struct sock *sk);
@@ -527,6 +549,7 @@ extern void tcp_send_delayed_ack(struct sock *sk);
527extern void tcp_cwnd_application_limited(struct sock *sk); 549extern void tcp_cwnd_application_limited(struct sock *sk);
528extern void tcp_resume_early_retransmit(struct sock *sk); 550extern void tcp_resume_early_retransmit(struct sock *sk);
529extern void tcp_rearm_rto(struct sock *sk); 551extern void tcp_rearm_rto(struct sock *sk);
552extern void tcp_reset(struct sock *sk);
530 553
531/* tcp_timer.c */ 554/* tcp_timer.c */
532extern void tcp_init_xmit_timers(struct sock *); 555extern void tcp_init_xmit_timers(struct sock *);
@@ -576,6 +599,7 @@ extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
576extern int tcp_mss_to_mtu(struct sock *sk, int mss); 599extern int tcp_mss_to_mtu(struct sock *sk, int mss);
577extern void tcp_mtup_init(struct sock *sk); 600extern void tcp_mtup_init(struct sock *sk);
578extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); 601extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
602extern void tcp_init_buffer_space(struct sock *sk);
579 603
580static inline void tcp_bound_rto(const struct sock *sk) 604static inline void tcp_bound_rto(const struct sock *sk)
581{ 605{
@@ -1094,6 +1118,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
1094 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1118 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1095 req->cookie_ts = 0; 1119 req->cookie_ts = 0;
1096 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 1120 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1121 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1097 req->mss = rx_opt->mss_clamp; 1122 req->mss = rx_opt->mss_clamp;
1098 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1123 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1099 ireq->tstamp_ok = rx_opt->tstamp_ok; 1124 ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1298,15 +1323,34 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff
1298extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1323extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1299 const struct tcp_md5sig_key *key); 1324 const struct tcp_md5sig_key *key);
1300 1325
1326/* From tcp_fastopen.c */
1327extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1328 struct tcp_fastopen_cookie *cookie,
1329 int *syn_loss, unsigned long *last_syn_loss);
1330extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1331 struct tcp_fastopen_cookie *cookie,
1332 bool syn_lost);
1301struct tcp_fastopen_request { 1333struct tcp_fastopen_request {
1302 /* Fast Open cookie. Size 0 means a cookie request */ 1334 /* Fast Open cookie. Size 0 means a cookie request */
1303 struct tcp_fastopen_cookie cookie; 1335 struct tcp_fastopen_cookie cookie;
1304 struct msghdr *data; /* data in MSG_FASTOPEN */ 1336 struct msghdr *data; /* data in MSG_FASTOPEN */
1305 u16 copied; /* queued in tcp_connect() */ 1337 u16 copied; /* queued in tcp_connect() */
1306}; 1338};
1307
1308void tcp_free_fastopen_req(struct tcp_sock *tp); 1339void tcp_free_fastopen_req(struct tcp_sock *tp);
1309 1340
1341extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1342int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1343void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
1344
1345#define TCP_FASTOPEN_KEY_LENGTH 16
1346
1347/* Fastopen key context */
1348struct tcp_fastopen_context {
1349 struct crypto_cipher __rcu *tfm;
1350 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1351 struct rcu_head rcu;
1352};
1353
1310/* write queue abstraction */ 1354/* write queue abstraction */
1311static inline void tcp_write_queue_purge(struct sock *sk) 1355static inline void tcp_write_queue_purge(struct sock *sk)
1312{ 1356{
@@ -1510,7 +1554,8 @@ struct tcp_iter_state {
1510 sa_family_t family; 1554 sa_family_t family;
1511 enum tcp_seq_states state; 1555 enum tcp_seq_states state;
1512 struct sock *syn_wait_sk; 1556 struct sock *syn_wait_sk;
1513 int bucket, offset, sbucket, num, uid; 1557 int bucket, offset, sbucket, num;
1558 kuid_t uid;
1514 loff_t last_pos; 1559 loff_t last_pos;
1515}; 1560};
1516 1561
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 128ce46fa48a..015cea01ae39 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -503,8 +503,6 @@ struct se_cmd {
503 u32 se_ordered_id; 503 u32 se_ordered_id;
504 /* Total size in bytes associated with command */ 504 /* Total size in bytes associated with command */
505 u32 data_length; 505 u32 data_length;
506 /* SCSI Presented Data Transfer Length */
507 u32 cmd_spdtl;
508 u32 residual_count; 506 u32 residual_count;
509 u32 orig_fe_lun; 507 u32 orig_fe_lun;
510 /* Persistent Reservation key */ 508 /* Persistent Reservation key */
diff --git a/init/Kconfig b/init/Kconfig
index af6c7f8ba019..b445d6f49bcf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -942,28 +942,12 @@ config UIDGID_CONVERTED
942 depends on PROC_EVENTS = n 942 depends on PROC_EVENTS = n
943 943
944 # Networking 944 # Networking
945 depends on NET = n
946 depends on NET_9P = n 945 depends on NET_9P = n
947 depends on IPX = n
948 depends on PHONET = n
949 depends on NET_CLS_FLOW = n
950 depends on NETFILTER_XT_MATCH_OWNER = n
951 depends on NETFILTER_XT_MATCH_RECENT = n
952 depends on NETFILTER_XT_TARGET_LOG = n
953 depends on NETFILTER_NETLINK_LOG = n
954 depends on INET = n
955 depends on IPV6 = n
956 depends on IP_SCTP = n
957 depends on AF_RXRPC = n 946 depends on AF_RXRPC = n
958 depends on LLC2 = n
959 depends on NET_KEY = n 947 depends on NET_KEY = n
960 depends on INET_DIAG = n
961 depends on DNS_RESOLVER = n 948 depends on DNS_RESOLVER = n
962 depends on AX25 = n
963 depends on ATALK = n
964 949
965 # Filesystems 950 # Filesystems
966 depends on USB_DEVICEFS = n
967 depends on USB_GADGETFS = n 951 depends on USB_GADGETFS = n
968 depends on USB_FUNCTIONFS = n 952 depends on USB_FUNCTIONFS = n
969 depends on DEVTMPFS = n 953 depends on DEVTMPFS = n
@@ -1019,9 +1003,6 @@ config UIDGID_CONVERTED
1019 depends on !UML || HOSTFS = n 1003 depends on !UML || HOSTFS = n
1020 1004
1021 # The rare drivers that won't build 1005 # The rare drivers that won't build
1022 depends on AIRO = n
1023 depends on AIRO_CS = n
1024 depends on TUN = n
1025 depends on INFINIBAND_QIB = n 1006 depends on INFINIBAND_QIB = n
1026 depends on BLK_DEV_LOOP = n 1007 depends on BLK_DEV_LOOP = n
1027 depends on ANDROID_BINDER_IPC = n 1008 depends on ANDROID_BINDER_IPC = n
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index f8e54f5b9080..9a08acc9e649 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
726 struct mq_attr *attr) 726 struct mq_attr *attr)
727{ 727{
728 const struct cred *cred = current_cred(); 728 const struct cred *cred = current_cred();
729 struct file *result;
730 int ret; 729 int ret;
731 730
732 if (attr) { 731 if (attr) {
@@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
748 } 747 }
749 748
750 mode &= ~current_umask(); 749 mode &= ~current_umask();
751 ret = mnt_want_write(path->mnt);
752 if (ret)
753 return ERR_PTR(ret);
754 ret = vfs_create(dir, path->dentry, mode, true); 750 ret = vfs_create(dir, path->dentry, mode, true);
755 path->dentry->d_fsdata = NULL; 751 path->dentry->d_fsdata = NULL;
756 if (!ret) 752 if (ret)
757 result = dentry_open(path, oflag, cred); 753 return ERR_PTR(ret);
758 else 754 return dentry_open(path, oflag, cred);
759 result = ERR_PTR(ret);
760 /*
761 * dentry_open() took a persistent mnt_want_write(),
762 * so we can now drop this one.
763 */
764 mnt_drop_write(path->mnt);
765 return result;
766} 755}
767 756
768/* Opens existing queue */ 757/* Opens existing queue */
@@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
788 struct mq_attr attr; 777 struct mq_attr attr;
789 int fd, error; 778 int fd, error;
790 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 779 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
791 struct dentry *root = ipc_ns->mq_mnt->mnt_root; 780 struct vfsmount *mnt = ipc_ns->mq_mnt;
781 struct dentry *root = mnt->mnt_root;
782 int ro;
792 783
793 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 784 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
794 return -EFAULT; 785 return -EFAULT;
@@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
802 if (fd < 0) 793 if (fd < 0)
803 goto out_putname; 794 goto out_putname;
804 795
796 ro = mnt_want_write(mnt); /* we'll drop it in any case */
805 error = 0; 797 error = 0;
806 mutex_lock(&root->d_inode->i_mutex); 798 mutex_lock(&root->d_inode->i_mutex);
807 path.dentry = lookup_one_len(name, root, strlen(name)); 799 path.dentry = lookup_one_len(name, root, strlen(name));
@@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
809 error = PTR_ERR(path.dentry); 801 error = PTR_ERR(path.dentry);
810 goto out_putfd; 802 goto out_putfd;
811 } 803 }
812 path.mnt = mntget(ipc_ns->mq_mnt); 804 path.mnt = mntget(mnt);
813 805
814 if (oflag & O_CREAT) { 806 if (oflag & O_CREAT) {
815 if (path.dentry->d_inode) { /* entry already exists */ 807 if (path.dentry->d_inode) { /* entry already exists */
@@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
820 } 812 }
821 filp = do_open(&path, oflag); 813 filp = do_open(&path, oflag);
822 } else { 814 } else {
815 if (ro) {
816 error = ro;
817 goto out;
818 }
823 filp = do_create(ipc_ns, root->d_inode, 819 filp = do_create(ipc_ns, root->d_inode,
824 &path, oflag, mode, 820 &path, oflag, mode,
825 u_attr ? &attr : NULL); 821 u_attr ? &attr : NULL);
@@ -845,6 +841,7 @@ out_putfd:
845 fd = error; 841 fd = error;
846 } 842 }
847 mutex_unlock(&root->d_inode->i_mutex); 843 mutex_unlock(&root->d_inode->i_mutex);
844 mnt_drop_write(mnt);
848out_putname: 845out_putname:
849 putname(name); 846 putname(name);
850 return fd; 847 return fd;
@@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
857 struct dentry *dentry; 854 struct dentry *dentry;
858 struct inode *inode = NULL; 855 struct inode *inode = NULL;
859 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 856 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
857 struct vfsmount *mnt = ipc_ns->mq_mnt;
860 858
861 name = getname(u_name); 859 name = getname(u_name);
862 if (IS_ERR(name)) 860 if (IS_ERR(name))
863 return PTR_ERR(name); 861 return PTR_ERR(name);
864 862
865 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 863 err = mnt_want_write(mnt);
866 I_MUTEX_PARENT); 864 if (err)
867 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 865 goto out_name;
866 mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
867 dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));
868 if (IS_ERR(dentry)) { 868 if (IS_ERR(dentry)) {
869 err = PTR_ERR(dentry); 869 err = PTR_ERR(dentry);
870 goto out_unlock; 870 goto out_unlock;
871 } 871 }
872 872
873 if (!dentry->d_inode) {
874 err = -ENOENT;
875 goto out_err;
876 }
877
878 inode = dentry->d_inode; 873 inode = dentry->d_inode;
879 if (inode) 874 if (!inode) {
875 err = -ENOENT;
876 } else {
880 ihold(inode); 877 ihold(inode);
881 err = mnt_want_write(ipc_ns->mq_mnt); 878 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
882 if (err) 879 }
883 goto out_err;
884 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
885 mnt_drop_write(ipc_ns->mq_mnt);
886out_err:
887 dput(dentry); 880 dput(dentry);
888 881
889out_unlock: 882out_unlock:
890 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 883 mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
891 putname(name);
892 if (inode) 884 if (inode)
893 iput(inode); 885 iput(inode);
886 mnt_drop_write(mnt);
887out_name:
888 putname(name);
894 889
895 return err; 890 return err;
896} 891}
diff --git a/kernel/pid.c b/kernel/pid.c
index e86b291ad834..aebd4f5aaf41 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -479,6 +479,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
479 } 479 }
480 return nr; 480 return nr;
481} 481}
482EXPORT_SYMBOL_GPL(pid_nr_ns);
482 483
483pid_t pid_vnr(struct pid *pid) 484pid_t pid_vnr(struct pid *pid)
484{ 485{
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index b3c7fd554250..baa528d7dfbd 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/reboot.h> 18#include <linux/reboot.h>
19#include <linux/export.h>
19 20
20#define BITS_PER_PAGE (PAGE_SIZE*8) 21#define BITS_PER_PAGE (PAGE_SIZE*8)
21 22
@@ -144,6 +145,7 @@ void free_pid_ns(struct kref *kref)
144 if (parent != NULL) 145 if (parent != NULL)
145 put_pid_ns(parent); 146 put_pid_ns(parent);
146} 147}
148EXPORT_SYMBOL_GPL(free_pid_ns);
147 149
148void zap_pid_ns_processes(struct pid_namespace *pid_ns) 150void zap_pid_ns_processes(struct pid_namespace *pid_ns)
149{ 151{
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb5178..18eca7809b08 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64), 23 [NLA_MSECS] = sizeof(u64),
24 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
25 [NLA_S8] = sizeof(s8),
26 [NLA_S16] = sizeof(s16),
27 [NLA_S32] = sizeof(s32),
28 [NLA_S64] = sizeof(s64),
25}; 29};
26 30
27static int validate_nla(const struct nlattr *nla, int maxtype, 31static int validate_nla(const struct nlattr *nla, int maxtype,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8ca533c95de0..b258da88f675 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
368 vlan_vid_del(dev, vid_info->vid); 368 vlan_vid_del(dev, vid_info->vid);
369} 369}
370EXPORT_SYMBOL(vlan_vids_del_by_dev); 370EXPORT_SYMBOL(vlan_vids_del_by_dev);
371
372bool vlan_uses_dev(const struct net_device *dev)
373{
374 return rtnl_dereference(dev->vlan_info) ? true : false;
375}
376EXPORT_SYMBOL(vlan_uses_dev);
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index b5b1a221c242..c30f3a0717fb 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -183,7 +183,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
183 ntohs(at->dest_net), at->dest_node, at->dest_port, 183 ntohs(at->dest_net), at->dest_node, at->dest_port,
184 sk_wmem_alloc_get(s), 184 sk_wmem_alloc_get(s),
185 sk_rmem_alloc_get(s), 185 sk_rmem_alloc_get(s),
186 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 186 s->sk_state,
187 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
187out: 188out:
188 return 0; 189 return 0;
189} 190}
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 23f45ce6f351..0447d5d0b639 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -432,7 +432,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
432 size = dev->ops->ioctl(dev, cmd, buf); 432 size = dev->ops->ioctl(dev, cmd, buf);
433 } 433 }
434 if (size < 0) { 434 if (size < 0) {
435 error = (size == -ENOIOCTLCMD ? -EINVAL : size); 435 error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
436 goto done; 436 goto done;
437 } 437 }
438 } 438 }
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index e3c579ba6325..957999e43ff7 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -51,14 +51,14 @@ int ax25_uid_policy;
51 51
52EXPORT_SYMBOL(ax25_uid_policy); 52EXPORT_SYMBOL(ax25_uid_policy);
53 53
54ax25_uid_assoc *ax25_findbyuid(uid_t uid) 54ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
55{ 55{
56 ax25_uid_assoc *ax25_uid, *res = NULL; 56 ax25_uid_assoc *ax25_uid, *res = NULL;
57 struct hlist_node *node; 57 struct hlist_node *node;
58 58
59 read_lock(&ax25_uid_lock); 59 read_lock(&ax25_uid_lock);
60 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 60 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
61 if (ax25_uid->uid == uid) { 61 if (uid_eq(ax25_uid->uid, uid)) {
62 ax25_uid_hold(ax25_uid); 62 ax25_uid_hold(ax25_uid);
63 res = ax25_uid; 63 res = ax25_uid;
64 break; 64 break;
@@ -84,7 +84,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
84 read_lock(&ax25_uid_lock); 84 read_lock(&ax25_uid_lock);
85 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) { 85 ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
86 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { 86 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
87 res = ax25_uid->uid; 87 res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
88 break; 88 break;
89 } 89 }
90 } 90 }
@@ -93,9 +93,14 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
93 return res; 93 return res;
94 94
95 case SIOCAX25ADDUID: 95 case SIOCAX25ADDUID:
96 {
97 kuid_t sax25_kuid;
96 if (!capable(CAP_NET_ADMIN)) 98 if (!capable(CAP_NET_ADMIN))
97 return -EPERM; 99 return -EPERM;
98 user = ax25_findbyuid(sax->sax25_uid); 100 sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
101 if (!uid_valid(sax25_kuid))
102 return -EINVAL;
103 user = ax25_findbyuid(sax25_kuid);
99 if (user) { 104 if (user) {
100 ax25_uid_put(user); 105 ax25_uid_put(user);
101 return -EEXIST; 106 return -EEXIST;
@@ -106,7 +111,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
106 return -ENOMEM; 111 return -ENOMEM;
107 112
108 atomic_set(&ax25_uid->refcount, 1); 113 atomic_set(&ax25_uid->refcount, 1);
109 ax25_uid->uid = sax->sax25_uid; 114 ax25_uid->uid = sax25_kuid;
110 ax25_uid->call = sax->sax25_call; 115 ax25_uid->call = sax->sax25_call;
111 116
112 write_lock(&ax25_uid_lock); 117 write_lock(&ax25_uid_lock);
@@ -114,7 +119,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
114 write_unlock(&ax25_uid_lock); 119 write_unlock(&ax25_uid_lock);
115 120
116 return 0; 121 return 0;
117 122 }
118 case SIOCAX25DELUID: 123 case SIOCAX25DELUID:
119 if (!capable(CAP_NET_ADMIN)) 124 if (!capable(CAP_NET_ADMIN))
120 return -EPERM; 125 return -EPERM;
@@ -172,7 +177,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
172 struct ax25_uid_assoc *pt; 177 struct ax25_uid_assoc *pt;
173 178
174 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); 179 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
175 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); 180 seq_printf(seq, "%6d %s\n",
181 from_kuid_munged(seq_user_ns(seq), pt->uid),
182 ax2asc(buf, &pt->call));
176 } 183 }
177 return 0; 184 return 0;
178} 185}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e877af8bdd1e..df79300dcb7b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
166 int16_t buff_pos; 166 int16_t buff_pos;
167 struct batadv_ogm_packet *batadv_ogm_packet; 167 struct batadv_ogm_packet *batadv_ogm_packet;
168 struct sk_buff *skb; 168 struct sk_buff *skb;
169 uint8_t *packet_pos;
169 170
170 if (hard_iface->if_status != BATADV_IF_ACTIVE) 171 if (hard_iface->if_status != BATADV_IF_ACTIVE)
171 return; 172 return;
172 173
173 packet_num = 0; 174 packet_num = 0;
174 buff_pos = 0; 175 buff_pos = 0;
175 batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; 176 packet_pos = forw_packet->skb->data;
177 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
176 178
177 /* adjust all flags and log packets */ 179 /* adjust all flags and log packets */
178 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 180 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
181 /* we might have aggregated direct link packets with an 183 /* we might have aggregated direct link packets with an
182 * ordinary base packet 184 * ordinary base packet
183 */ 185 */
184 if ((forw_packet->direct_link_flags & (1 << packet_num)) && 186 if (forw_packet->direct_link_flags & BIT(packet_num) &&
185 (forw_packet->if_incoming == hard_iface)) 187 forw_packet->if_incoming == hard_iface)
186 batadv_ogm_packet->flags |= BATADV_DIRECTLINK; 188 batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
187 else 189 else
188 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; 190 batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
189 191
190 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? 192 if (packet_num > 0 || !forw_packet->own)
191 "Sending own" : 193 fwd_str = "Forwarding";
192 "Forwarding")); 194 else
195 fwd_str = "Sending own";
196
193 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 197 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
194 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", 198 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
195 fwd_str, (packet_num > 0 ? "aggregated " : ""), 199 fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
204 buff_pos += BATADV_OGM_HLEN; 208 buff_pos += BATADV_OGM_HLEN;
205 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); 209 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
206 packet_num++; 210 packet_num++;
207 batadv_ogm_packet = (struct batadv_ogm_packet *) 211 packet_pos = forw_packet->skb->data + buff_pos;
208 (forw_packet->skb->data + buff_pos); 212 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
209 } 213 }
210 214
211 /* create clone because function is called more than once */ 215 /* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
227 struct batadv_hard_iface *primary_if = NULL; 231 struct batadv_hard_iface *primary_if = NULL;
228 struct batadv_ogm_packet *batadv_ogm_packet; 232 struct batadv_ogm_packet *batadv_ogm_packet;
229 unsigned char directlink; 233 unsigned char directlink;
234 uint8_t *packet_pos;
230 235
231 batadv_ogm_packet = (struct batadv_ogm_packet *) 236 packet_pos = forw_packet->skb->data;
232 (forw_packet->skb->data); 237 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
233 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0); 238 directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
234 239
235 if (!forw_packet->if_incoming) { 240 if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
454 int packet_len, bool direct_link) 459 int packet_len, bool direct_link)
455{ 460{
456 unsigned char *skb_buff; 461 unsigned char *skb_buff;
462 unsigned long new_direct_link_flag;
457 463
458 skb_buff = skb_put(forw_packet_aggr->skb, packet_len); 464 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
459 memcpy(skb_buff, packet_buff, packet_len); 465 memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
461 forw_packet_aggr->num_packets++; 467 forw_packet_aggr->num_packets++;
462 468
463 /* save packet direct link flag status */ 469 /* save packet direct link flag status */
464 if (direct_link) 470 if (direct_link) {
465 forw_packet_aggr->direct_link_flags |= 471 new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
466 (1 << forw_packet_aggr->num_packets); 472 forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
473 }
467} 474}
468 475
469static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, 476static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
586 struct batadv_ogm_packet *batadv_ogm_packet; 593 struct batadv_ogm_packet *batadv_ogm_packet;
587 struct batadv_hard_iface *primary_if; 594 struct batadv_hard_iface *primary_if;
588 int vis_server, tt_num_changes = 0; 595 int vis_server, tt_num_changes = 0;
596 uint32_t seqno;
597 uint8_t bandwidth;
589 598
590 vis_server = atomic_read(&bat_priv->vis_mode); 599 vis_server = atomic_read(&bat_priv->vis_mode);
591 primary_if = batadv_primary_if_get_selected(bat_priv); 600 primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
599 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 608 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
600 609
601 /* change sequence number to network order */ 610 /* change sequence number to network order */
602 batadv_ogm_packet->seqno = 611 seqno = (uint32_t)atomic_read(&hard_iface->seqno);
603 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 612 batadv_ogm_packet->seqno = htonl(seqno);
604 atomic_inc(&hard_iface->seqno); 613 atomic_inc(&hard_iface->seqno);
605 614
606 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); 615 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
607 batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc); 616 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
608 if (tt_num_changes >= 0) 617 if (tt_num_changes >= 0)
609 batadv_ogm_packet->tt_num_changes = tt_num_changes; 618 batadv_ogm_packet->tt_num_changes = tt_num_changes;
610 619
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
613 else 622 else
614 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER; 623 batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
615 624
616 if ((hard_iface == primary_if) && 625 if (hard_iface == primary_if &&
617 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER)) 626 atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
618 batadv_ogm_packet->gw_flags = 627 bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
619 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 628 batadv_ogm_packet->gw_flags = bandwidth;
620 else 629 } else {
621 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; 630 batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
631 }
622 632
623 batadv_slide_own_bcast_window(hard_iface); 633 batadv_slide_own_bcast_window(hard_iface);
624 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 634 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -642,8 +652,9 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
642 struct batadv_neigh_node *router = NULL; 652 struct batadv_neigh_node *router = NULL;
643 struct batadv_orig_node *orig_node_tmp; 653 struct batadv_orig_node *orig_node_tmp;
644 struct hlist_node *node; 654 struct hlist_node *node;
645 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 655 uint8_t sum_orig, sum_neigh;
646 uint8_t *neigh_addr; 656 uint8_t *neigh_addr;
657 uint8_t tq_avg;
647 658
648 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 659 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
649 "update_originator(): Searching and updating originator entry of received packet\n"); 660 "update_originator(): Searching and updating originator entry of received packet\n");
@@ -667,8 +678,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
667 spin_lock_bh(&tmp_neigh_node->lq_update_lock); 678 spin_lock_bh(&tmp_neigh_node->lq_update_lock);
668 batadv_ring_buffer_set(tmp_neigh_node->tq_recv, 679 batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
669 &tmp_neigh_node->tq_index, 0); 680 &tmp_neigh_node->tq_index, 0);
670 tmp_neigh_node->tq_avg = 681 tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
671 batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); 682 tmp_neigh_node->tq_avg = tq_avg;
672 spin_unlock_bh(&tmp_neigh_node->lq_update_lock); 683 spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
673 } 684 }
674 685
@@ -727,17 +738,15 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
727 if (router && (neigh_node->tq_avg == router->tq_avg)) { 738 if (router && (neigh_node->tq_avg == router->tq_avg)) {
728 orig_node_tmp = router->orig_node; 739 orig_node_tmp = router->orig_node;
729 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 740 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
730 bcast_own_sum_orig = 741 sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
731 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
732 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 742 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
733 743
734 orig_node_tmp = neigh_node->orig_node; 744 orig_node_tmp = neigh_node->orig_node;
735 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 745 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
736 bcast_own_sum_neigh = 746 sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num];
737 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
738 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 747 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
739 748
740 if (bcast_own_sum_orig >= bcast_own_sum_neigh) 749 if (sum_orig >= sum_neigh)
741 goto update_tt; 750 goto update_tt;
742 } 751 }
743 752
@@ -835,8 +844,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
835 spin_unlock_bh(&orig_node->ogm_cnt_lock); 844 spin_unlock_bh(&orig_node->ogm_cnt_lock);
836 845
837 /* pay attention to not get a value bigger than 100 % */ 846 /* pay attention to not get a value bigger than 100 % */
838 total_count = (orig_eq_count > neigh_rq_count ? 847 if (orig_eq_count > neigh_rq_count)
839 neigh_rq_count : orig_eq_count); 848 total_count = neigh_rq_count;
849 else
850 total_count = orig_eq_count;
840 851
841 /* if we have too few packets (too less data) we set tq_own to zero 852 /* if we have too few packets (too less data) we set tq_own to zero
842 * if we receive too few packets it is not considered bidirectional 853 * if we receive too few packets it is not considered bidirectional
@@ -910,6 +921,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
910 int set_mark, ret = -1; 921 int set_mark, ret = -1;
911 uint32_t seqno = ntohl(batadv_ogm_packet->seqno); 922 uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
912 uint8_t *neigh_addr; 923 uint8_t *neigh_addr;
924 uint8_t packet_count;
913 925
914 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); 926 orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
915 if (!orig_node) 927 if (!orig_node)
@@ -944,9 +956,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
944 tmp_neigh_node->real_bits, 956 tmp_neigh_node->real_bits,
945 seq_diff, set_mark); 957 seq_diff, set_mark);
946 958
947 tmp_neigh_node->real_packet_count = 959 packet_count = bitmap_weight(tmp_neigh_node->real_bits,
948 bitmap_weight(tmp_neigh_node->real_bits, 960 BATADV_TQ_LOCAL_WINDOW_SIZE);
949 BATADV_TQ_LOCAL_WINDOW_SIZE); 961 tmp_neigh_node->real_packet_count = packet_count;
950 } 962 }
951 rcu_read_unlock(); 963 rcu_read_unlock();
952 964
@@ -1163,9 +1175,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1163 /* if sender is a direct neighbor the sender mac equals 1175 /* if sender is a direct neighbor the sender mac equals
1164 * originator mac 1176 * originator mac
1165 */ 1177 */
1166 orig_neigh_node = (is_single_hop_neigh ? 1178 if (is_single_hop_neigh)
1167 orig_node : 1179 orig_neigh_node = orig_node;
1168 batadv_get_orig_node(bat_priv, ethhdr->h_source)); 1180 else
1181 orig_neigh_node = batadv_get_orig_node(bat_priv,
1182 ethhdr->h_source);
1183
1169 if (!orig_neigh_node) 1184 if (!orig_neigh_node)
1170 goto out; 1185 goto out;
1171 1186
@@ -1251,6 +1266,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1251 int buff_pos = 0, packet_len; 1266 int buff_pos = 0, packet_len;
1252 unsigned char *tt_buff, *packet_buff; 1267 unsigned char *tt_buff, *packet_buff;
1253 bool ret; 1268 bool ret;
1269 uint8_t *packet_pos;
1254 1270
1255 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); 1271 ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
1256 if (!ret) 1272 if (!ret)
@@ -1281,8 +1297,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1281 buff_pos += BATADV_OGM_HLEN; 1297 buff_pos += BATADV_OGM_HLEN;
1282 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); 1298 buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
1283 1299
1284 batadv_ogm_packet = (struct batadv_ogm_packet *) 1300 packet_pos = packet_buff + buff_pos;
1285 (packet_buff + buff_pos); 1301 batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
1286 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, 1302 } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
1287 batadv_ogm_packet->tt_num_changes)); 1303 batadv_ogm_packet->tt_num_changes));
1288 1304
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6705d35b17ce..0a9084ad19a6 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, 133static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
134 struct batadv_claim *data) 134 struct batadv_claim *data)
135{ 135{
136 struct batadv_hashtable *hash = bat_priv->claim_hash; 136 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
137 struct hlist_head *head; 137 struct hlist_head *head;
138 struct hlist_node *node; 138 struct hlist_node *node;
139 struct batadv_claim *claim; 139 struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
174batadv_backbone_hash_find(struct batadv_priv *bat_priv, 174batadv_backbone_hash_find(struct batadv_priv *bat_priv,
175 uint8_t *addr, short vid) 175 uint8_t *addr, short vid)
176{ 176{
177 struct batadv_hashtable *hash = bat_priv->backbone_hash; 177 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
178 struct hlist_head *head; 178 struct hlist_head *head;
179 struct hlist_node *node; 179 struct hlist_node *node;
180 struct batadv_backbone_gw search_entry, *backbone_gw; 180 struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
218 int i; 218 int i;
219 spinlock_t *list_lock; /* protects write access to the hash lists */ 219 spinlock_t *list_lock; /* protects write access to the hash lists */
220 220
221 hash = backbone_gw->bat_priv->claim_hash; 221 hash = backbone_gw->bat_priv->bla.claim_hash;
222 if (!hash) 222 if (!hash)
223 return; 223 return;
224 224
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
265 if (!primary_if) 265 if (!primary_if)
266 return; 266 return;
267 267
268 memcpy(&local_claim_dest, &bat_priv->claim_dest, 268 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
269 sizeof(local_claim_dest)); 269 sizeof(local_claim_dest));
270 local_claim_dest.type = claimtype; 270 local_claim_dest.type = claimtype;
271 271
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
281 NULL, 281 NULL,
282 /* Ethernet SRC/HW SRC: originator mac */ 282 /* Ethernet SRC/HW SRC: originator mac */
283 primary_if->net_dev->dev_addr, 283 primary_if->net_dev->dev_addr,
284 /* HW DST: FF:43:05:XX:00:00 284 /* HW DST: FF:43:05:XX:YY:YY
285 * with XX = claim type 285 * with XX = claim type
286 * and YY:YY = group id 286 * and YY:YY = group id
287 */ 287 */
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
295 295
296 /* now we pretend that the client would have sent this ... */ 296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) { 297 switch (claimtype) {
298 case BATADV_CLAIM_TYPE_ADD: 298 case BATADV_CLAIM_TYPE_CLAIM:
299 /* normal claim frame 299 /* normal claim frame
300 * set Ethernet SRC to the clients mac 300 * set Ethernet SRC to the clients mac
301 */ 301 */
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
303 batadv_dbg(BATADV_DBG_BLA, bat_priv, 303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); 304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break; 305 break;
306 case BATADV_CLAIM_TYPE_DEL: 306 case BATADV_CLAIM_TYPE_UNCLAIM:
307 /* unclaim frame 307 /* unclaim frame
308 * set HW SRC to the clients mac 308 * set HW SRC to the clients mac
309 */ 309 */
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
323 break; 323 break;
324 case BATADV_CLAIM_TYPE_REQUEST: 324 case BATADV_CLAIM_TYPE_REQUEST:
325 /* request frame 325 /* request frame
326 * set HW SRC to the special mac containg the crc 326 * set HW SRC and header destination to the receiving backbone
327 * gws mac
327 */ 328 */
328 memcpy(hw_src, mac, ETH_ALEN); 329 memcpy(hw_src, mac, ETH_ALEN);
329 memcpy(ethhdr->h_dest, mac, ETH_ALEN); 330 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
339 340
340 skb_reset_mac_header(skb); 341 skb_reset_mac_header(skb);
341 skb->protocol = eth_type_trans(skb, soft_iface); 342 skb->protocol = eth_type_trans(skb, soft_iface);
342 bat_priv->stats.rx_packets++; 343 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
343 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 344 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
345 skb->len + ETH_HLEN);
344 soft_iface->last_rx = jiffies; 346 soft_iface->last_rx = jiffies;
345 347
346 netif_rx(skb); 348 netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
389 /* one for the hash, one for returning */ 391 /* one for the hash, one for returning */
390 atomic_set(&entry->refcount, 2); 392 atomic_set(&entry->refcount, 2);
391 393
392 hash_added = batadv_hash_add(bat_priv->backbone_hash, 394 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
393 batadv_compare_backbone_gw, 395 batadv_compare_backbone_gw,
394 batadv_choose_backbone_gw, entry, 396 batadv_choose_backbone_gw, entry,
395 &entry->hash_entry); 397 &entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
456 if (!backbone_gw) 458 if (!backbone_gw)
457 return; 459 return;
458 460
459 hash = bat_priv->claim_hash; 461 hash = bat_priv->bla.claim_hash;
460 for (i = 0; i < hash->size; i++) { 462 for (i = 0; i < hash->size; i++) {
461 head = &hash->table[i]; 463 head = &hash->table[i];
462 464
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
467 continue; 469 continue;
468 470
469 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, 471 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
470 BATADV_CLAIM_TYPE_ADD); 472 BATADV_CLAIM_TYPE_CLAIM);
471 } 473 }
472 rcu_read_unlock(); 474 rcu_read_unlock();
473 } 475 }
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
497 499
498 /* no local broadcasts should be sent or received, for now. */ 500 /* no local broadcasts should be sent or received, for now. */
499 if (!atomic_read(&backbone_gw->request_sent)) { 501 if (!atomic_read(&backbone_gw->request_sent)) {
500 atomic_inc(&backbone_gw->bat_priv->bla_num_requests); 502 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
501 atomic_set(&backbone_gw->request_sent, 1); 503 atomic_set(&backbone_gw->request_sent, 1);
502 } 504 }
503} 505}
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
557 batadv_dbg(BATADV_DBG_BLA, bat_priv, 559 batadv_dbg(BATADV_DBG_BLA, bat_priv,
558 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 560 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
559 mac, vid); 561 mac, vid);
560 hash_added = batadv_hash_add(bat_priv->claim_hash, 562 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
561 batadv_compare_claim, 563 batadv_compare_claim,
562 batadv_choose_claim, claim, 564 batadv_choose_claim, claim,
563 &claim->hash_entry); 565 &claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
577 "bla_add_claim(): changing ownership for %pM, vid %d\n", 579 "bla_add_claim(): changing ownership for %pM, vid %d\n",
578 mac, vid); 580 mac, vid);
579 581
580 claim->backbone_gw->crc ^= 582 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
581 crc16(0, claim->addr, ETH_ALEN);
582 batadv_backbone_gw_free_ref(claim->backbone_gw); 583 batadv_backbone_gw_free_ref(claim->backbone_gw);
583 584
584 } 585 }
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
610 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", 611 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
611 mac, vid); 612 mac, vid);
612 613
613 batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim, 614 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
614 batadv_choose_claim, claim); 615 batadv_choose_claim, claim);
615 batadv_claim_free_ref(claim); /* reference from the hash is gone */ 616 batadv_claim_free_ref(claim); /* reference from the hash is gone */
616 617
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
657 * we can allow traffic again. 658 * we can allow traffic again.
658 */ 659 */
659 if (atomic_read(&backbone_gw->request_sent)) { 660 if (atomic_read(&backbone_gw->request_sent)) {
660 atomic_dec(&backbone_gw->bat_priv->bla_num_requests); 661 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
661 atomic_set(&backbone_gw->request_sent, 0); 662 atomic_set(&backbone_gw->request_sent, 0);
662 } 663 }
663 } 664 }
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
702 if (primary_if && batadv_compare_eth(backbone_addr, 703 if (primary_if && batadv_compare_eth(backbone_addr,
703 primary_if->net_dev->dev_addr)) 704 primary_if->net_dev->dev_addr))
704 batadv_bla_send_claim(bat_priv, claim_addr, vid, 705 batadv_bla_send_claim(bat_priv, claim_addr, vid,
705 BATADV_CLAIM_TYPE_DEL); 706 BATADV_CLAIM_TYPE_UNCLAIM);
706 707
707 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); 708 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
708 709
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
738 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 739 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
739 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 740 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
740 batadv_bla_send_claim(bat_priv, claim_addr, vid, 741 batadv_bla_send_claim(bat_priv, claim_addr, vid,
741 BATADV_CLAIM_TYPE_ADD); 742 BATADV_CLAIM_TYPE_CLAIM);
742 743
743 /* TODO: we could call something like tt_local_del() here. */ 744 /* TODO: we could call something like tt_local_del() here. */
744 745
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
772 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 773 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
773 774
774 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 775 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
775 bla_dst_own = &bat_priv->claim_dest; 776 bla_dst_own = &bat_priv->bla.claim_dest;
776 777
777 /* check if it is a claim packet in general */ 778 /* check if it is a claim packet in general */
778 if (memcmp(bla_dst->magic, bla_dst_own->magic, 779 if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
783 * otherwise assume it is in the hw_src 784 * otherwise assume it is in the hw_src
784 */ 785 */
785 switch (bla_dst->type) { 786 switch (bla_dst->type) {
786 case BATADV_CLAIM_TYPE_ADD: 787 case BATADV_CLAIM_TYPE_CLAIM:
787 backbone_addr = hw_src; 788 backbone_addr = hw_src;
788 break; 789 break;
789 case BATADV_CLAIM_TYPE_REQUEST: 790 case BATADV_CLAIM_TYPE_REQUEST:
790 case BATADV_CLAIM_TYPE_ANNOUNCE: 791 case BATADV_CLAIM_TYPE_ANNOUNCE:
791 case BATADV_CLAIM_TYPE_DEL: 792 case BATADV_CLAIM_TYPE_UNCLAIM:
792 backbone_addr = ethhdr->h_source; 793 backbone_addr = ethhdr->h_source;
793 break; 794 break;
794 default: 795 default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
904 905
905 /* check for the different types of claim frames ... */ 906 /* check for the different types of claim frames ... */
906 switch (bla_dst->type) { 907 switch (bla_dst->type) {
907 case BATADV_CLAIM_TYPE_ADD: 908 case BATADV_CLAIM_TYPE_CLAIM:
908 if (batadv_handle_claim(bat_priv, primary_if, hw_src, 909 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
909 ethhdr->h_source, vid)) 910 ethhdr->h_source, vid))
910 return 1; 911 return 1;
911 break; 912 break;
912 case BATADV_CLAIM_TYPE_DEL: 913 case BATADV_CLAIM_TYPE_UNCLAIM:
913 if (batadv_handle_unclaim(bat_priv, primary_if, 914 if (batadv_handle_unclaim(bat_priv, primary_if,
914 ethhdr->h_source, hw_src, vid)) 915 ethhdr->h_source, hw_src, vid))
915 return 1; 916 return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
945 spinlock_t *list_lock; /* protects write access to the hash lists */ 946 spinlock_t *list_lock; /* protects write access to the hash lists */
946 int i; 947 int i;
947 948
948 hash = bat_priv->backbone_hash; 949 hash = bat_priv->bla.backbone_hash;
949 if (!hash) 950 if (!hash)
950 return; 951 return;
951 952
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
969purge_now: 970purge_now:
970 /* don't wait for the pending request anymore */ 971 /* don't wait for the pending request anymore */
971 if (atomic_read(&backbone_gw->request_sent)) 972 if (atomic_read(&backbone_gw->request_sent))
972 atomic_dec(&bat_priv->bla_num_requests); 973 atomic_dec(&bat_priv->bla.num_requests);
973 974
974 batadv_bla_del_backbone_claims(backbone_gw); 975 batadv_bla_del_backbone_claims(backbone_gw);
975 976
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
999 struct batadv_hashtable *hash; 1000 struct batadv_hashtable *hash;
1000 int i; 1001 int i;
1001 1002
1002 hash = bat_priv->claim_hash; 1003 hash = bat_priv->bla.claim_hash;
1003 if (!hash) 1004 if (!hash)
1004 return; 1005 return;
1005 1006
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1046 struct hlist_node *node; 1047 struct hlist_node *node;
1047 struct hlist_head *head; 1048 struct hlist_head *head;
1048 struct batadv_hashtable *hash; 1049 struct batadv_hashtable *hash;
1050 __be16 group;
1049 int i; 1051 int i;
1050 1052
1051 /* reset bridge loop avoidance group id */ 1053 /* reset bridge loop avoidance group id */
1052 bat_priv->claim_dest.group = 1054 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1053 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1055 bat_priv->bla.claim_dest.group = group;
1054 1056
1055 if (!oldif) { 1057 if (!oldif) {
1056 batadv_bla_purge_claims(bat_priv, NULL, 1); 1058 batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1058 return; 1060 return;
1059 } 1061 }
1060 1062
1061 hash = bat_priv->backbone_hash; 1063 hash = bat_priv->bla.backbone_hash;
1062 if (!hash) 1064 if (!hash)
1063 return; 1065 return;
1064 1066
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1088/* (re)start the timer */ 1090/* (re)start the timer */
1089static void batadv_bla_start_timer(struct batadv_priv *bat_priv) 1091static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1090{ 1092{
1091 INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work); 1093 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1092 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work, 1094 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1093 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1095 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1094} 1096}
1095 1097
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1099 */ 1101 */
1100static void batadv_bla_periodic_work(struct work_struct *work) 1102static void batadv_bla_periodic_work(struct work_struct *work)
1101{ 1103{
1102 struct delayed_work *delayed_work = 1104 struct delayed_work *delayed_work;
1103 container_of(work, struct delayed_work, work);
1104 struct batadv_priv *bat_priv; 1105 struct batadv_priv *bat_priv;
1106 struct batadv_priv_bla *priv_bla;
1105 struct hlist_node *node; 1107 struct hlist_node *node;
1106 struct hlist_head *head; 1108 struct hlist_head *head;
1107 struct batadv_backbone_gw *backbone_gw; 1109 struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1109 struct batadv_hard_iface *primary_if; 1111 struct batadv_hard_iface *primary_if;
1110 int i; 1112 int i;
1111 1113
1112 bat_priv = container_of(delayed_work, struct batadv_priv, bla_work); 1114 delayed_work = container_of(work, struct delayed_work, work);
1115 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1116 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1113 primary_if = batadv_primary_if_get_selected(bat_priv); 1117 primary_if = batadv_primary_if_get_selected(bat_priv);
1114 if (!primary_if) 1118 if (!primary_if)
1115 goto out; 1119 goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1120 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1124 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1121 goto out; 1125 goto out;
1122 1126
1123 hash = bat_priv->backbone_hash; 1127 hash = bat_priv->bla.backbone_hash;
1124 if (!hash) 1128 if (!hash)
1125 goto out; 1129 goto out;
1126 1130
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1160 int i; 1164 int i;
1161 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1165 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1162 struct batadv_hard_iface *primary_if; 1166 struct batadv_hard_iface *primary_if;
1167 uint16_t crc;
1168 unsigned long entrytime;
1163 1169
1164 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1170 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1165 1171
1166 /* setting claim destination address */ 1172 /* setting claim destination address */
1167 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); 1173 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1168 bat_priv->claim_dest.type = 0; 1174 bat_priv->bla.claim_dest.type = 0;
1169 primary_if = batadv_primary_if_get_selected(bat_priv); 1175 primary_if = batadv_primary_if_get_selected(bat_priv);
1170 if (primary_if) { 1176 if (primary_if) {
1171 bat_priv->claim_dest.group = 1177 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1172 htons(crc16(0, primary_if->net_dev->dev_addr, 1178 bat_priv->bla.claim_dest.group = htons(crc);
1173 ETH_ALEN));
1174 batadv_hardif_free_ref(primary_if); 1179 batadv_hardif_free_ref(primary_if);
1175 } else { 1180 } else {
1176 bat_priv->claim_dest.group = 0; /* will be set later */ 1181 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1177 } 1182 }
1178 1183
1179 /* initialize the duplicate list */ 1184 /* initialize the duplicate list */
1185 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1180 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) 1186 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1181 bat_priv->bcast_duplist[i].entrytime = 1187 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1182 jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); 1188 bat_priv->bla.bcast_duplist_curr = 0;
1183 bat_priv->bcast_duplist_curr = 0;
1184 1189
1185 if (bat_priv->claim_hash) 1190 if (bat_priv->bla.claim_hash)
1186 return 0; 1191 return 0;
1187 1192
1188 bat_priv->claim_hash = batadv_hash_new(128); 1193 bat_priv->bla.claim_hash = batadv_hash_new(128);
1189 bat_priv->backbone_hash = batadv_hash_new(32); 1194 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1190 1195
1191 if (!bat_priv->claim_hash || !bat_priv->backbone_hash) 1196 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1192 return -ENOMEM; 1197 return -ENOMEM;
1193 1198
1194 batadv_hash_set_lock_class(bat_priv->claim_hash, 1199 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1195 &batadv_claim_hash_lock_class_key); 1200 &batadv_claim_hash_lock_class_key);
1196 batadv_hash_set_lock_class(bat_priv->backbone_hash, 1201 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1197 &batadv_backbone_hash_lock_class_key); 1202 &batadv_backbone_hash_lock_class_key);
1198 1203
1199 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1204 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1234 crc = crc16(0, content, length); 1239 crc = crc16(0, content, length);
1235 1240
1236 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1241 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1237 curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE; 1242 curr = (bat_priv->bla.bcast_duplist_curr + i);
1238 entry = &bat_priv->bcast_duplist[curr]; 1243 curr %= BATADV_DUPLIST_SIZE;
1244 entry = &bat_priv->bla.bcast_duplist[curr];
1239 1245
1240 /* we can stop searching if the entry is too old ; 1246 /* we can stop searching if the entry is too old ;
1241 * later entries will be even older 1247 * later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1256 return 1; 1262 return 1;
1257 } 1263 }
1258 /* not found, add a new entry (overwrite the oldest entry) */ 1264 /* not found, add a new entry (overwrite the oldest entry) */
1259 curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1265 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1260 curr %= BATADV_DUPLIST_SIZE; 1266 curr %= BATADV_DUPLIST_SIZE;
1261 entry = &bat_priv->bcast_duplist[curr]; 1267 entry = &bat_priv->bla.bcast_duplist[curr];
1262 entry->crc = crc; 1268 entry->crc = crc;
1263 entry->entrytime = jiffies; 1269 entry->entrytime = jiffies;
1264 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); 1270 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1265 bat_priv->bcast_duplist_curr = curr; 1271 bat_priv->bla.bcast_duplist_curr = curr;
1266 1272
1267 /* allow it, its the first occurence. */ 1273 /* allow it, its the first occurence. */
1268 return 0; 1274 return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1279 */ 1285 */
1280int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) 1286int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1281{ 1287{
1282 struct batadv_hashtable *hash = bat_priv->backbone_hash; 1288 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1283 struct hlist_head *head; 1289 struct hlist_head *head;
1284 struct hlist_node *node; 1290 struct hlist_node *node;
1285 struct batadv_backbone_gw *backbone_gw; 1291 struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1339 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) 1345 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1340 return 0; 1346 return 0;
1341 1347
1342 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + 1348 vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
1343 hdr_size);
1344 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 1349 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1345 } 1350 }
1346 1351
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
1359{ 1364{
1360 struct batadv_hard_iface *primary_if; 1365 struct batadv_hard_iface *primary_if;
1361 1366
1362 cancel_delayed_work_sync(&bat_priv->bla_work); 1367 cancel_delayed_work_sync(&bat_priv->bla.work);
1363 primary_if = batadv_primary_if_get_selected(bat_priv); 1368 primary_if = batadv_primary_if_get_selected(bat_priv);
1364 1369
1365 if (bat_priv->claim_hash) { 1370 if (bat_priv->bla.claim_hash) {
1366 batadv_bla_purge_claims(bat_priv, primary_if, 1); 1371 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1367 batadv_hash_destroy(bat_priv->claim_hash); 1372 batadv_hash_destroy(bat_priv->bla.claim_hash);
1368 bat_priv->claim_hash = NULL; 1373 bat_priv->bla.claim_hash = NULL;
1369 } 1374 }
1370 if (bat_priv->backbone_hash) { 1375 if (bat_priv->bla.backbone_hash) {
1371 batadv_bla_purge_backbone_gw(bat_priv, 1); 1376 batadv_bla_purge_backbone_gw(bat_priv, 1);
1372 batadv_hash_destroy(bat_priv->backbone_hash); 1377 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1373 bat_priv->backbone_hash = NULL; 1378 bat_priv->bla.backbone_hash = NULL;
1374 } 1379 }
1375 if (primary_if) 1380 if (primary_if)
1376 batadv_hardif_free_ref(primary_if); 1381 batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1409 goto allow; 1414 goto allow;
1410 1415
1411 1416
1412 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1417 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1413 /* don't allow broadcasts while requests are in flight */ 1418 /* don't allow broadcasts while requests are in flight */
1414 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1419 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1415 goto handled; 1420 goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1508 1513
1509 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1514 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1510 1515
1511 if (unlikely(atomic_read(&bat_priv->bla_num_requests))) 1516 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1512 /* don't allow broadcasts while requests are in flight */ 1517 /* don't allow broadcasts while requests are in flight */
1513 if (is_multicast_ether_addr(ethhdr->h_dest)) 1518 if (is_multicast_ether_addr(ethhdr->h_dest))
1514 goto handled; 1519 goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1564{ 1569{
1565 struct net_device *net_dev = (struct net_device *)seq->private; 1570 struct net_device *net_dev = (struct net_device *)seq->private;
1566 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1571 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1567 struct batadv_hashtable *hash = bat_priv->claim_hash; 1572 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1568 struct batadv_claim *claim; 1573 struct batadv_claim *claim;
1569 struct batadv_hard_iface *primary_if; 1574 struct batadv_hard_iface *primary_if;
1570 struct hlist_node *node; 1575 struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1593 seq_printf(seq, 1598 seq_printf(seq,
1594 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1599 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1595 net_dev->name, primary_addr, 1600 net_dev->name, primary_addr,
1596 ntohs(bat_priv->claim_dest.group)); 1601 ntohs(bat_priv->bla.claim_dest.group));
1597 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1602 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1598 "Client", "VID", "Originator", "CRC"); 1603 "Client", "VID", "Originator", "CRC");
1599 for (i = 0; i < hash->size; i++) { 1604 for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
1616 batadv_hardif_free_ref(primary_if); 1621 batadv_hardif_free_ref(primary_if);
1617 return ret; 1622 return ret;
1618} 1623}
1624
1625int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1626{
1627 struct net_device *net_dev = (struct net_device *)seq->private;
1628 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1629 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1630 struct batadv_backbone_gw *backbone_gw;
1631 struct batadv_hard_iface *primary_if;
1632 struct hlist_node *node;
1633 struct hlist_head *head;
1634 int secs, msecs;
1635 uint32_t i;
1636 bool is_own;
1637 int ret = 0;
1638 uint8_t *primary_addr;
1639
1640 primary_if = batadv_primary_if_get_selected(bat_priv);
1641 if (!primary_if) {
1642 ret = seq_printf(seq,
1643 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1644 net_dev->name);
1645 goto out;
1646 }
1647
1648 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1649 ret = seq_printf(seq,
1650 "BATMAN mesh %s disabled - primary interface not active\n",
1651 net_dev->name);
1652 goto out;
1653 }
1654
1655 primary_addr = primary_if->net_dev->dev_addr;
1656 seq_printf(seq,
1657 "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
1658 net_dev->name, primary_addr,
1659 ntohs(bat_priv->bla.claim_dest.group));
1660 seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n",
1661 "Originator", "VID", "last seen", "CRC");
1662 for (i = 0; i < hash->size; i++) {
1663 head = &hash->table[i];
1664
1665 rcu_read_lock();
1666 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1667 msecs = jiffies_to_msecs(jiffies -
1668 backbone_gw->lasttime);
1669 secs = msecs / 1000;
1670 msecs = msecs % 1000;
1671
1672 is_own = batadv_compare_eth(backbone_gw->orig,
1673 primary_addr);
1674 if (is_own)
1675 continue;
1676
1677 seq_printf(seq,
1678 " * %pM on % 5d % 4i.%03is (%04x)\n",
1679 backbone_gw->orig, backbone_gw->vid,
1680 secs, msecs, backbone_gw->crc);
1681 }
1682 rcu_read_unlock();
1683 }
1684out:
1685 if (primary_if)
1686 batadv_hardif_free_ref(primary_if);
1687 return ret;
1688}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 563cfbf94a7f..789cb73bde67 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
27int batadv_bla_is_backbone_gw(struct sk_buff *skb, 27int batadv_bla_is_backbone_gw(struct sk_buff *skb,
28 struct batadv_orig_node *orig_node, int hdr_size); 28 struct batadv_orig_node *orig_node, int hdr_size);
29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); 29int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
30int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
31 void *offset);
30int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig); 32int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
31int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 33int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
32 struct batadv_bcast_packet *bcast_packet, 34 struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */ 43#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42 44
43static inline int batadv_bla_rx(struct batadv_priv *bat_priv, 45static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
44 struct sk_buff *skb, short vid, 46 struct sk_buff *skb, short vid, bool is_bcast)
45 bool is_bcast)
46{ 47{
47 return 0; 48 return 0;
48} 49}
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
66 return 0; 67 return 0;
67} 68}
68 69
70static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
71 void *offset)
72{
73 return 0;
74}
75
69static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, 76static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
70 uint8_t *orig) 77 uint8_t *orig)
71{ 78{
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 34fbb1667bcd..391d4fb2026f 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
267 return single_open(file, batadv_bla_claim_table_seq_print_text, 267 return single_open(file, batadv_bla_claim_table_seq_print_text,
268 net_dev); 268 net_dev);
269} 269}
270
271static int batadv_bla_backbone_table_open(struct inode *inode,
272 struct file *file)
273{
274 struct net_device *net_dev = (struct net_device *)inode->i_private;
275 return single_open(file, batadv_bla_backbone_table_seq_print_text,
276 net_dev);
277}
278
270#endif 279#endif
271 280
272static int batadv_transtable_local_open(struct inode *inode, struct file *file) 281static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
305 batadv_transtable_global_open); 314 batadv_transtable_global_open);
306#ifdef CONFIG_BATMAN_ADV_BLA 315#ifdef CONFIG_BATMAN_ADV_BLA
307static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open); 316static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
317static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
318 batadv_bla_backbone_table_open);
308#endif 319#endif
309static BATADV_DEBUGINFO(transtable_local, S_IRUGO, 320static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
310 batadv_transtable_local_open); 321 batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
316 &batadv_debuginfo_transtable_global, 327 &batadv_debuginfo_transtable_global,
317#ifdef CONFIG_BATMAN_ADV_BLA 328#ifdef CONFIG_BATMAN_ADV_BLA
318 &batadv_debuginfo_bla_claim_table, 329 &batadv_debuginfo_bla_claim_table,
330 &batadv_debuginfo_bla_backbone_table,
319#endif 331#endif
320 &batadv_debuginfo_transtable_local, 332 &batadv_debuginfo_transtable_local,
321 &batadv_debuginfo_vis_data, 333 &batadv_debuginfo_vis_data,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index fc866f2e4528..15d67abc10a4 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
48 struct batadv_gw_node *gw_node; 48 struct batadv_gw_node *gw_node;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->gw.curr_gw);
52 if (!gw_node) 52 if (!gw_node)
53 goto out; 53 goto out;
54 54
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
91{ 91{
92 struct batadv_gw_node *curr_gw_node; 92 struct batadv_gw_node *curr_gw_node;
93 93
94 spin_lock_bh(&bat_priv->gw_list_lock); 94 spin_lock_bh(&bat_priv->gw.list_lock);
95 95
96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
97 new_gw_node = NULL; 97 new_gw_node = NULL;
98 98
99 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); 99 curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
100 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 100 rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
101 101
102 if (curr_gw_node) 102 if (curr_gw_node)
103 batadv_gw_node_free_ref(curr_gw_node); 103 batadv_gw_node_free_ref(curr_gw_node);
104 104
105 spin_unlock_bh(&bat_priv->gw_list_lock); 105 spin_unlock_bh(&bat_priv->gw.list_lock);
106} 106}
107 107
108void batadv_gw_deselect(struct batadv_priv *bat_priv) 108void batadv_gw_deselect(struct batadv_priv *bat_priv)
109{ 109{
110 atomic_set(&bat_priv->gw_reselect, 1); 110 atomic_set(&bat_priv->gw.reselect, 1);
111} 111}
112 112
113static struct batadv_gw_node * 113static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
117 struct hlist_node *node; 117 struct hlist_node *node;
118 struct batadv_gw_node *gw_node, *curr_gw = NULL; 118 struct batadv_gw_node *gw_node, *curr_gw = NULL;
119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
120 uint32_t gw_divisor;
120 uint8_t max_tq = 0; 121 uint8_t max_tq = 0;
121 int down, up; 122 int down, up;
123 uint8_t tq_avg;
122 struct batadv_orig_node *orig_node; 124 struct batadv_orig_node *orig_node;
123 125
126 gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
127 gw_divisor *= 64;
128
124 rcu_read_lock(); 129 rcu_read_lock();
125 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
126 if (gw_node->deleted) 131 if (gw_node->deleted)
127 continue; 132 continue;
128 133
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
134 if (!atomic_inc_not_zero(&gw_node->refcount)) 139 if (!atomic_inc_not_zero(&gw_node->refcount))
135 goto next; 140 goto next;
136 141
142 tq_avg = router->tq_avg;
143
137 switch (atomic_read(&bat_priv->gw_sel_class)) { 144 switch (atomic_read(&bat_priv->gw_sel_class)) {
138 case 1: /* fast connection */ 145 case 1: /* fast connection */
139 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, 146 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
140 &down, &up); 147 &down, &up);
141 148
142 tmp_gw_factor = (router->tq_avg * router->tq_avg * 149 tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
143 down * 100 * 100) / 150 tmp_gw_factor /= gw_divisor;
144 (BATADV_TQ_LOCAL_WINDOW_SIZE *
145 BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
146 151
147 if ((tmp_gw_factor > max_gw_factor) || 152 if ((tmp_gw_factor > max_gw_factor) ||
148 ((tmp_gw_factor == max_gw_factor) && 153 ((tmp_gw_factor == max_gw_factor) &&
149 (router->tq_avg > max_tq))) { 154 (tq_avg > max_tq))) {
150 if (curr_gw) 155 if (curr_gw)
151 batadv_gw_node_free_ref(curr_gw); 156 batadv_gw_node_free_ref(curr_gw);
152 curr_gw = gw_node; 157 curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
161 * soon as a better gateway appears which has 166 * soon as a better gateway appears which has
162 * $routing_class more tq points) 167 * $routing_class more tq points)
163 */ 168 */
164 if (router->tq_avg > max_tq) { 169 if (tq_avg > max_tq) {
165 if (curr_gw) 170 if (curr_gw)
166 batadv_gw_node_free_ref(curr_gw); 171 batadv_gw_node_free_ref(curr_gw);
167 curr_gw = gw_node; 172 curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
170 break; 175 break;
171 } 176 }
172 177
173 if (router->tq_avg > max_tq) 178 if (tq_avg > max_tq)
174 max_tq = router->tq_avg; 179 max_tq = tq_avg;
175 180
176 if (tmp_gw_factor > max_gw_factor) 181 if (tmp_gw_factor > max_gw_factor)
177 max_gw_factor = tmp_gw_factor; 182 max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
202 207
203 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 208 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
204 209
205 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) 210 if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
206 goto out; 211 goto out;
207 212
208 next_gw = batadv_gw_get_best_gw_node(bat_priv); 213 next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
321 gw_node->orig_node = orig_node; 326 gw_node->orig_node = orig_node;
322 atomic_set(&gw_node->refcount, 1); 327 atomic_set(&gw_node->refcount, 1);
323 328
324 spin_lock_bh(&bat_priv->gw_list_lock); 329 spin_lock_bh(&bat_priv->gw.list_lock);
325 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); 330 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
326 spin_unlock_bh(&bat_priv->gw_list_lock); 331 spin_unlock_bh(&bat_priv->gw.list_lock);
327 332
328 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); 333 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
329 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 334 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
350 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
351 356
352 rcu_read_lock(); 357 rcu_read_lock();
353 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
354 if (gw_node->orig_node != orig_node) 359 if (gw_node->orig_node != orig_node)
355 continue; 360 continue;
356 361
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
404 409
405 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 410 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
406 411
407 spin_lock_bh(&bat_priv->gw_list_lock); 412 spin_lock_bh(&bat_priv->gw.list_lock);
408 413
409 hlist_for_each_entry_safe(gw_node, node, node_tmp, 414 hlist_for_each_entry_safe(gw_node, node, node_tmp,
410 &bat_priv->gw_list, list) { 415 &bat_priv->gw.list, list) {
411 if (((!gw_node->deleted) || 416 if (((!gw_node->deleted) ||
412 (time_before(jiffies, gw_node->deleted + timeout))) && 417 (time_before(jiffies, gw_node->deleted + timeout))) &&
413 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) 418 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
420 batadv_gw_node_free_ref(gw_node); 425 batadv_gw_node_free_ref(gw_node);
421 } 426 }
422 427
423 spin_unlock_bh(&bat_priv->gw_list_lock); 428 spin_unlock_bh(&bat_priv->gw.list_lock);
424 429
425 /* gw_deselect() needs to acquire the gw_list_lock */ 430 /* gw_deselect() needs to acquire the gw_list_lock */
426 if (do_deselect) 431 if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
496 primary_if->net_dev->dev_addr, net_dev->name); 501 primary_if->net_dev->dev_addr, net_dev->name);
497 502
498 rcu_read_lock(); 503 rcu_read_lock();
499 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 504 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
500 if (gw_node->deleted) 505 if (gw_node->deleted)
501 continue; 506 continue;
502 507
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 282bf6e9353e..d112fd6750b0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
103{ 103{
104 struct batadv_vis_packet *vis_packet; 104 struct batadv_vis_packet *vis_packet;
105 struct batadv_hard_iface *primary_if; 105 struct batadv_hard_iface *primary_if;
106 struct sk_buff *skb;
106 107
107 primary_if = batadv_primary_if_get_selected(bat_priv); 108 primary_if = batadv_primary_if_get_selected(bat_priv);
108 if (!primary_if) 109 if (!primary_if)
109 goto out; 110 goto out;
110 111
111 vis_packet = (struct batadv_vis_packet *) 112 skb = bat_priv->vis.my_info->skb_packet;
112 bat_priv->my_vis_info->skb_packet->data; 113 vis_packet = (struct batadv_vis_packet *)skb->data;
113 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 114 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
114 memcpy(vis_packet->sender_orig, 115 memcpy(vis_packet->sender_orig,
115 primary_if->net_dev->dev_addr, ETH_ALEN); 116 primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
313 hard_iface->if_num = bat_priv->num_ifaces; 314 hard_iface->if_num = bat_priv->num_ifaces;
314 bat_priv->num_ifaces++; 315 bat_priv->num_ifaces++;
315 hard_iface->if_status = BATADV_IF_INACTIVE; 316 hard_iface->if_status = BATADV_IF_INACTIVE;
316 batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces); 317 ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
318 if (ret < 0) {
319 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
320 bat_priv->num_ifaces--;
321 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
322 goto err_dev;
323 }
317 324
318 hard_iface->batman_adv_ptype.type = ethertype; 325 hard_iface->batman_adv_ptype.type = ethertype;
319 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv; 326 hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 13c88b25ab31..b4aa470bc4a6 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
58 58
59 batadv_iv_init(); 59 batadv_iv_init();
60 60
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/
63 */
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 61 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
65 62
66 if (!batadv_event_workqueue) 63 if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
97 94
98 spin_lock_init(&bat_priv->forw_bat_list_lock); 95 spin_lock_init(&bat_priv->forw_bat_list_lock);
99 spin_lock_init(&bat_priv->forw_bcast_list_lock); 96 spin_lock_init(&bat_priv->forw_bcast_list_lock);
100 spin_lock_init(&bat_priv->tt_changes_list_lock); 97 spin_lock_init(&bat_priv->tt.changes_list_lock);
101 spin_lock_init(&bat_priv->tt_req_list_lock); 98 spin_lock_init(&bat_priv->tt.req_list_lock);
102 spin_lock_init(&bat_priv->tt_roam_list_lock); 99 spin_lock_init(&bat_priv->tt.roam_list_lock);
103 spin_lock_init(&bat_priv->tt_buff_lock); 100 spin_lock_init(&bat_priv->tt.last_changeset_lock);
104 spin_lock_init(&bat_priv->gw_list_lock); 101 spin_lock_init(&bat_priv->gw.list_lock);
105 spin_lock_init(&bat_priv->vis_hash_lock); 102 spin_lock_init(&bat_priv->vis.hash_lock);
106 spin_lock_init(&bat_priv->vis_list_lock); 103 spin_lock_init(&bat_priv->vis.list_lock);
107 104
108 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 105 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
109 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 106 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
110 INIT_HLIST_HEAD(&bat_priv->gw_list); 107 INIT_HLIST_HEAD(&bat_priv->gw.list);
111 INIT_LIST_HEAD(&bat_priv->tt_changes_list); 108 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
112 INIT_LIST_HEAD(&bat_priv->tt_req_list); 109 INIT_LIST_HEAD(&bat_priv->tt.req_list);
113 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 110 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
114 111
115 ret = batadv_originator_init(bat_priv); 112 ret = batadv_originator_init(bat_priv);
116 if (ret < 0) 113 if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
131 if (ret < 0) 128 if (ret < 0)
132 goto err; 129 goto err;
133 130
134 atomic_set(&bat_priv->gw_reselect, 0); 131 atomic_set(&bat_priv->gw.reselect, 0);
135 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
136 133
137 return 0; 134 return 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5d8fa0757947..d57b746219de 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
26#define BATADV_DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28#ifndef BATADV_SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
29#define BATADV_SOURCE_VERSION "2012.3.0" 29#define BATADV_SOURCE_VERSION "2012.4.0"
30#endif 30#endif
31 31
32/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
@@ -41,13 +41,14 @@
41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
42 */ 42 */
43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */ 43#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ 45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
46/* sliding packet range of received originator messages in sequence numbers 47/* sliding packet range of received originator messages in sequence numbers
47 * (should be a multiple of our word size) 48 * (should be a multiple of our word size)
48 */ 49 */
49#define BATADV_TQ_LOCAL_WINDOW_SIZE 64 50#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
50/* miliseconds we have to keep pending tt_req */ 51/* milliseconds we have to keep pending tt_req */
51#define BATADV_TT_REQUEST_TIMEOUT 3000 52#define BATADV_TT_REQUEST_TIMEOUT 3000
52 53
53#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5 54#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
59#define BATADV_TT_OGM_APPEND_MAX 3 60#define BATADV_TT_OGM_APPEND_MAX 3
60 61
61/* Time in which a client can roam at most ROAMING_MAX_COUNT times in 62/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
62 * miliseconds 63 * milliseconds
63 */ 64 */
64#define BATADV_ROAMING_MAX_TIME 20000 65#define BATADV_ROAMING_MAX_TIME 20000
65#define BATADV_ROAMING_MAX_COUNT 5 66#define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
123/* Append 'batman-adv: ' before kernel messages */ 124/* Append 'batman-adv: ' before kernel messages */
124#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 125#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
125 126
126/* all messages related to routing / flooding / broadcasting / etc */
127enum batadv_dbg_level {
128 BATADV_DBG_BATMAN = 1 << 0,
129 BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
130 BATADV_DBG_TT = 1 << 2, /* translation table operations */
131 BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
132 BATADV_DBG_ALL = 15,
133};
134
135/* Kernel headers */ 127/* Kernel headers */
136 128
137#include <linux/mutex.h> /* mutex */ 129#include <linux/mutex.h> /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
173int batadv_algo_select(struct batadv_priv *bat_priv, char *name); 165int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
174int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); 166int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
175 167
168/* all messages related to routing / flooding / broadcasting / etc */
169enum batadv_dbg_level {
170 BATADV_DBG_BATMAN = BIT(0),
171 BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
172 BATADV_DBG_TT = BIT(2), /* translation table operations */
173 BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */
174 BATADV_DBG_ALL = 15,
175};
176
176#ifdef CONFIG_BATMAN_ADV_DEBUG 177#ifdef CONFIG_BATMAN_ADV_DEBUG
177int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) 178int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
178__printf(2, 3); 179__printf(2, 3);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 8d3e55a96adc..2d23a14c220e 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -37,10 +37,10 @@ enum batadv_packettype {
37#define BATADV_COMPAT_VERSION 14 37#define BATADV_COMPAT_VERSION 14
38 38
39enum batadv_iv_flags { 39enum batadv_iv_flags {
40 BATADV_NOT_BEST_NEXT_HOP = 1 << 3, 40 BATADV_NOT_BEST_NEXT_HOP = BIT(3),
41 BATADV_PRIMARIES_FIRST_HOP = 1 << 4, 41 BATADV_PRIMARIES_FIRST_HOP = BIT(4),
42 BATADV_VIS_SERVER = 1 << 5, 42 BATADV_VIS_SERVER = BIT(5),
43 BATADV_DIRECTLINK = 1 << 6, 43 BATADV_DIRECTLINK = BIT(6),
44}; 44};
45 45
46/* ICMP message types */ 46/* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
60 60
61/* fragmentation defines */ 61/* fragmentation defines */
62enum batadv_unicast_frag_flags { 62enum batadv_unicast_frag_flags {
63 BATADV_UNI_FRAG_HEAD = 1 << 0, 63 BATADV_UNI_FRAG_HEAD = BIT(0),
64 BATADV_UNI_FRAG_LARGETAIL = 1 << 1, 64 BATADV_UNI_FRAG_LARGETAIL = BIT(1),
65}; 65};
66 66
67/* TT_QUERY subtypes */ 67/* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
74 74
75/* TT_QUERY flags */ 75/* TT_QUERY flags */
76enum batadv_tt_query_flags { 76enum batadv_tt_query_flags {
77 BATADV_TT_FULL_TABLE = 1 << 2, 77 BATADV_TT_FULL_TABLE = BIT(2),
78}; 78};
79 79
80/* BATADV_TT_CLIENT flags. 80/* BATADV_TT_CLIENT flags.
81 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to 81 * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
82 * 1 << 15 are used for local computation only 82 * BIT(15) are used for local computation only
83 */ 83 */
84enum batadv_tt_client_flags { 84enum batadv_tt_client_flags {
85 BATADV_TT_CLIENT_DEL = 1 << 0, 85 BATADV_TT_CLIENT_DEL = BIT(0),
86 BATADV_TT_CLIENT_ROAM = 1 << 1, 86 BATADV_TT_CLIENT_ROAM = BIT(1),
87 BATADV_TT_CLIENT_WIFI = 1 << 2, 87 BATADV_TT_CLIENT_WIFI = BIT(2),
88 BATADV_TT_CLIENT_NOPURGE = 1 << 8, 88 BATADV_TT_CLIENT_TEMP = BIT(3),
89 BATADV_TT_CLIENT_NEW = 1 << 9, 89 BATADV_TT_CLIENT_NOPURGE = BIT(8),
90 BATADV_TT_CLIENT_PENDING = 1 << 10, 90 BATADV_TT_CLIENT_NEW = BIT(9),
91 BATADV_TT_CLIENT_PENDING = BIT(10),
91}; 92};
92 93
93/* claim frame types for the bridge loop avoidance */ 94/* claim frame types for the bridge loop avoidance */
94enum batadv_bla_claimframe { 95enum batadv_bla_claimframe {
95 BATADV_CLAIM_TYPE_ADD = 0x00, 96 BATADV_CLAIM_TYPE_CLAIM = 0x00,
96 BATADV_CLAIM_TYPE_DEL = 0x01, 97 BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
97 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02, 98 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
98 BATADV_CLAIM_TYPE_REQUEST = 0x03, 99 BATADV_CLAIM_TYPE_REQUEST = 0x03,
99}; 100};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bc2b88bbea1f..939fc01371df 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
579 return router; 579 return router;
580} 580}
581 581
582int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) 582static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
583{ 583{
584 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
585 struct batadv_tt_query_packet *tt_query;
586 uint16_t tt_size;
587 struct ethhdr *ethhdr; 584 struct ethhdr *ethhdr;
588 char tt_flag;
589 size_t packet_size;
590 585
591 /* drop packet if it has not necessary minimum size */ 586 /* drop packet if it has not necessary minimum size */
592 if (unlikely(!pskb_may_pull(skb, 587 if (unlikely(!pskb_may_pull(skb, hdr_size)))
593 sizeof(struct batadv_tt_query_packet)))) 588 return -1;
594 goto out;
595
596 /* I could need to modify it */
597 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
598 goto out;
599 589
600 ethhdr = (struct ethhdr *)skb_mac_header(skb); 590 ethhdr = (struct ethhdr *)skb_mac_header(skb);
601 591
602 /* packet with unicast indication but broadcast recipient */ 592 /* packet with unicast indication but broadcast recipient */
603 if (is_broadcast_ether_addr(ethhdr->h_dest)) 593 if (is_broadcast_ether_addr(ethhdr->h_dest))
604 goto out; 594 return -1;
605 595
606 /* packet with broadcast sender address */ 596 /* packet with broadcast sender address */
607 if (is_broadcast_ether_addr(ethhdr->h_source)) 597 if (is_broadcast_ether_addr(ethhdr->h_source))
598 return -1;
599
600 /* not for me */
601 if (!batadv_is_my_mac(ethhdr->h_dest))
602 return -1;
603
604 return 0;
605}
606
607int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
608{
609 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
610 struct batadv_tt_query_packet *tt_query;
611 uint16_t tt_size;
612 int hdr_size = sizeof(*tt_query);
613 char tt_flag;
614 size_t packet_size;
615
616 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
617 return NET_RX_DROP;
618
619 /* I could need to modify it */
620 if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
608 goto out; 621 goto out;
609 622
610 tt_query = (struct batadv_tt_query_packet *)skb->data; 623 tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
721 * been incremented yet. This flag will make me check all the incoming 734 * been incremented yet. This flag will make me check all the incoming
722 * packets for the correct destination. 735 * packets for the correct destination.
723 */ 736 */
724 bat_priv->tt_poss_change = true; 737 bat_priv->tt.poss_change = true;
725 738
726 batadv_orig_node_free_ref(orig_node); 739 batadv_orig_node_free_ref(orig_node);
727out: 740out:
@@ -819,31 +832,6 @@ err:
819 return NULL; 832 return NULL;
820} 833}
821 834
822static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
823{
824 struct ethhdr *ethhdr;
825
826 /* drop packet if it has not necessary minimum size */
827 if (unlikely(!pskb_may_pull(skb, hdr_size)))
828 return -1;
829
830 ethhdr = (struct ethhdr *)skb_mac_header(skb);
831
832 /* packet with unicast indication but broadcast recipient */
833 if (is_broadcast_ether_addr(ethhdr->h_dest))
834 return -1;
835
836 /* packet with broadcast sender address */
837 if (is_broadcast_ether_addr(ethhdr->h_source))
838 return -1;
839
840 /* not for me */
841 if (!batadv_is_my_mac(ethhdr->h_dest))
842 return -1;
843
844 return 0;
845}
846
847static int batadv_route_unicast_packet(struct sk_buff *skb, 835static int batadv_route_unicast_packet(struct sk_buff *skb,
848 struct batadv_hard_iface *recv_if) 836 struct batadv_hard_iface *recv_if)
849{ 837{
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
947 unicast_packet = (struct batadv_unicast_packet *)skb->data; 935 unicast_packet = (struct batadv_unicast_packet *)skb->data;
948 936
949 if (batadv_is_my_mac(unicast_packet->dest)) { 937 if (batadv_is_my_mac(unicast_packet->dest)) {
950 tt_poss_change = bat_priv->tt_poss_change; 938 tt_poss_change = bat_priv->tt.poss_change;
951 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 939 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
952 } else { 940 } else {
953 orig_node = batadv_orig_hash_find(bat_priv, 941 orig_node = batadv_orig_hash_find(bat_priv,
954 unicast_packet->dest); 942 unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
993 } else { 981 } else {
994 memcpy(unicast_packet->dest, orig_node->orig, 982 memcpy(unicast_packet->dest, orig_node->orig,
995 ETH_ALEN); 983 ETH_ALEN);
996 curr_ttvn = (uint8_t) 984 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
997 atomic_read(&orig_node->last_ttvn);
998 batadv_orig_node_free_ref(orig_node); 985 batadv_orig_node_free_ref(orig_node);
999 } 986 }
1000 987
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1025 1012
1026 /* packet for me */ 1013 /* packet for me */
1027 if (batadv_is_my_mac(unicast_packet->dest)) { 1014 if (batadv_is_my_mac(unicast_packet->dest)) {
1028 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, 1015 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1029 hdr_size); 1016 NULL);
1017
1030 return NET_RX_SUCCESS; 1018 return NET_RX_SUCCESS;
1031 } 1019 }
1032 1020
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1063 return NET_RX_SUCCESS; 1051 return NET_RX_SUCCESS;
1064 1052
1065 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, 1053 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
1066 sizeof(struct batadv_unicast_packet)); 1054 sizeof(struct batadv_unicast_packet), NULL);
1067 return NET_RX_SUCCESS; 1055 return NET_RX_SUCCESS;
1068 } 1056 }
1069 1057
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1150 goto out; 1138 goto out;
1151 1139
1152 /* broadcast for me */ 1140 /* broadcast for me */
1153 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1141 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1142 orig_node);
1154 ret = NET_RX_SUCCESS; 1143 ret = NET_RX_SUCCESS;
1155 goto out; 1144 goto out;
1156 1145
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3b4b2daa3b3e..570a8bce0364 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -190,13 +190,13 @@ out:
190static void batadv_send_outstanding_bcast_packet(struct work_struct *work) 190static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
191{ 191{
192 struct batadv_hard_iface *hard_iface; 192 struct batadv_hard_iface *hard_iface;
193 struct delayed_work *delayed_work = 193 struct delayed_work *delayed_work;
194 container_of(work, struct delayed_work, work);
195 struct batadv_forw_packet *forw_packet; 194 struct batadv_forw_packet *forw_packet;
196 struct sk_buff *skb1; 195 struct sk_buff *skb1;
197 struct net_device *soft_iface; 196 struct net_device *soft_iface;
198 struct batadv_priv *bat_priv; 197 struct batadv_priv *bat_priv;
199 198
199 delayed_work = container_of(work, struct delayed_work, work);
200 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 200 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
201 delayed_work); 201 delayed_work);
202 soft_iface = forw_packet->if_incoming->soft_iface; 202 soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
239 239
240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) 240void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
241{ 241{
242 struct delayed_work *delayed_work = 242 struct delayed_work *delayed_work;
243 container_of(work, struct delayed_work, work);
244 struct batadv_forw_packet *forw_packet; 243 struct batadv_forw_packet *forw_packet;
245 struct batadv_priv *bat_priv; 244 struct batadv_priv *bat_priv;
246 245
246 delayed_work = container_of(work, struct delayed_work, work);
247 forw_packet = container_of(delayed_work, struct batadv_forw_packet, 247 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
248 delayed_work); 248 delayed_work);
249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); 249 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 109ea2aae96c..7b683e0bd668 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
93static struct net_device_stats *batadv_interface_stats(struct net_device *dev) 93static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
94{ 94{
95 struct batadv_priv *bat_priv = netdev_priv(dev); 95 struct batadv_priv *bat_priv = netdev_priv(dev);
96 return &bat_priv->stats; 96 struct net_device_stats *stats = &bat_priv->stats;
97
98 stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
99 stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
100 stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
101 stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
102 stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
103 return stats;
97} 104}
98 105
99static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) 106static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -142,6 +149,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
142 int data_len = skb->len, ret; 149 int data_len = skb->len, ret;
143 short vid __maybe_unused = -1; 150 short vid __maybe_unused = -1;
144 bool do_bcast = false; 151 bool do_bcast = false;
152 uint32_t seqno;
145 153
146 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 154 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
147 goto dropped; 155 goto dropped;
@@ -223,8 +231,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
223 primary_if->net_dev->dev_addr, ETH_ALEN); 231 primary_if->net_dev->dev_addr, ETH_ALEN);
224 232
225 /* set broadcast sequence number */ 233 /* set broadcast sequence number */
226 bcast_packet->seqno = 234 seqno = atomic_inc_return(&bat_priv->bcast_seqno);
227 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 235 bcast_packet->seqno = htonl(seqno);
228 236
229 batadv_add_bcast_packet_to_list(bat_priv, skb, 1); 237 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
230 238
@@ -246,14 +254,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
246 goto dropped_freed; 254 goto dropped_freed;
247 } 255 }
248 256
249 bat_priv->stats.tx_packets++; 257 batadv_inc_counter(bat_priv, BATADV_CNT_TX);
250 bat_priv->stats.tx_bytes += data_len; 258 batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
251 goto end; 259 goto end;
252 260
253dropped: 261dropped:
254 kfree_skb(skb); 262 kfree_skb(skb);
255dropped_freed: 263dropped_freed:
256 bat_priv->stats.tx_dropped++; 264 batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
257end: 265end:
258 if (primary_if) 266 if (primary_if)
259 batadv_hardif_free_ref(primary_if); 267 batadv_hardif_free_ref(primary_if);
@@ -262,7 +270,7 @@ end:
262 270
263void batadv_interface_rx(struct net_device *soft_iface, 271void batadv_interface_rx(struct net_device *soft_iface,
264 struct sk_buff *skb, struct batadv_hard_iface *recv_if, 272 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
265 int hdr_size) 273 int hdr_size, struct batadv_orig_node *orig_node)
266{ 274{
267 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 275 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
268 struct ethhdr *ethhdr; 276 struct ethhdr *ethhdr;
@@ -308,11 +316,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
308 316
309 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ 317 /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
310 318
311 bat_priv->stats.rx_packets++; 319 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
312 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 320 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
321 skb->len + ETH_HLEN);
313 322
314 soft_iface->last_rx = jiffies; 323 soft_iface->last_rx = jiffies;
315 324
325 if (orig_node)
326 batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
327 ethhdr->h_source);
328
316 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 329 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
317 goto dropped; 330 goto dropped;
318 331
@@ -379,15 +392,22 @@ struct net_device *batadv_softif_create(const char *name)
379 if (!soft_iface) 392 if (!soft_iface)
380 goto out; 393 goto out;
381 394
395 bat_priv = netdev_priv(soft_iface);
396
397 /* batadv_interface_stats() needs to be available as soon as
398 * register_netdevice() has been called
399 */
400 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
401 if (!bat_priv->bat_counters)
402 goto free_soft_iface;
403
382 ret = register_netdevice(soft_iface); 404 ret = register_netdevice(soft_iface);
383 if (ret < 0) { 405 if (ret < 0) {
384 pr_err("Unable to register the batman interface '%s': %i\n", 406 pr_err("Unable to register the batman interface '%s': %i\n",
385 name, ret); 407 name, ret);
386 goto free_soft_iface; 408 goto free_bat_counters;
387 } 409 }
388 410
389 bat_priv = netdev_priv(soft_iface);
390
391 atomic_set(&bat_priv->aggregated_ogms, 1); 411 atomic_set(&bat_priv->aggregated_ogms, 1);
392 atomic_set(&bat_priv->bonding, 0); 412 atomic_set(&bat_priv->bonding, 0);
393 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 413 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -405,29 +425,26 @@ struct net_device *batadv_softif_create(const char *name)
405 425
406 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 426 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
407 atomic_set(&bat_priv->bcast_seqno, 1); 427 atomic_set(&bat_priv->bcast_seqno, 1);
408 atomic_set(&bat_priv->ttvn, 0); 428 atomic_set(&bat_priv->tt.vn, 0);
409 atomic_set(&bat_priv->tt_local_changes, 0); 429 atomic_set(&bat_priv->tt.local_changes, 0);
410 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 430 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
411 atomic_set(&bat_priv->bla_num_requests, 0); 431#ifdef CONFIG_BATMAN_ADV_BLA
412 432 atomic_set(&bat_priv->bla.num_requests, 0);
413 bat_priv->tt_buff = NULL; 433#endif
414 bat_priv->tt_buff_len = 0; 434 bat_priv->tt.last_changeset = NULL;
415 bat_priv->tt_poss_change = false; 435 bat_priv->tt.last_changeset_len = 0;
436 bat_priv->tt.poss_change = false;
416 437
417 bat_priv->primary_if = NULL; 438 bat_priv->primary_if = NULL;
418 bat_priv->num_ifaces = 0; 439 bat_priv->num_ifaces = 0;
419 440
420 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
421 if (!bat_priv->bat_counters)
422 goto unreg_soft_iface;
423
424 ret = batadv_algo_select(bat_priv, batadv_routing_algo); 441 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
425 if (ret < 0) 442 if (ret < 0)
426 goto free_bat_counters; 443 goto unreg_soft_iface;
427 444
428 ret = batadv_sysfs_add_meshif(soft_iface); 445 ret = batadv_sysfs_add_meshif(soft_iface);
429 if (ret < 0) 446 if (ret < 0)
430 goto free_bat_counters; 447 goto unreg_soft_iface;
431 448
432 ret = batadv_debugfs_add_meshif(soft_iface); 449 ret = batadv_debugfs_add_meshif(soft_iface);
433 if (ret < 0) 450 if (ret < 0)
@@ -443,12 +460,13 @@ unreg_debugfs:
443 batadv_debugfs_del_meshif(soft_iface); 460 batadv_debugfs_del_meshif(soft_iface);
444unreg_sysfs: 461unreg_sysfs:
445 batadv_sysfs_del_meshif(soft_iface); 462 batadv_sysfs_del_meshif(soft_iface);
446free_bat_counters:
447 free_percpu(bat_priv->bat_counters);
448unreg_soft_iface: 463unreg_soft_iface:
464 free_percpu(bat_priv->bat_counters);
449 unregister_netdevice(soft_iface); 465 unregister_netdevice(soft_iface);
450 return NULL; 466 return NULL;
451 467
468free_bat_counters:
469 free_percpu(bat_priv->bat_counters);
452free_soft_iface: 470free_soft_iface:
453 free_netdev(soft_iface); 471 free_netdev(soft_iface);
454out: 472out:
@@ -518,6 +536,11 @@ static u32 batadv_get_link(struct net_device *dev)
518static const struct { 536static const struct {
519 const char name[ETH_GSTRING_LEN]; 537 const char name[ETH_GSTRING_LEN];
520} batadv_counters_strings[] = { 538} batadv_counters_strings[] = {
539 { "tx" },
540 { "tx_bytes" },
541 { "tx_dropped" },
542 { "rx" },
543 { "rx_bytes" },
521 { "forward" }, 544 { "forward" },
522 { "forward_bytes" }, 545 { "forward_bytes" },
523 { "mgmt_tx" }, 546 { "mgmt_tx" },
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 852c683b06a1..07a08fed28b9 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -21,8 +21,9 @@
21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 21#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
22 22
23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); 23int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
24void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, 24void batadv_interface_rx(struct net_device *soft_iface,
25 struct batadv_hard_iface *recv_if, int hdr_size); 25 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
26 int hdr_size, struct batadv_orig_node *orig_node);
26struct net_device *batadv_softif_create(const char *name); 27struct net_device *batadv_softif_create(const char *name);
27void batadv_softif_destroy(struct net_device *soft_iface); 28void batadv_softif_destroy(struct net_device *soft_iface);
28int batadv_softif_is_valid(const struct net_device *net_dev); 29int batadv_softif_is_valid(const struct net_device *net_dev);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 99dd8f75b3ff..112edd371b2f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
34static void batadv_tt_purge(struct work_struct *work); 34static void batadv_tt_purge(struct work_struct *work);
35static void 35static void
36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry); 36batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
37static void batadv_tt_global_del(struct batadv_priv *bat_priv,
38 struct batadv_orig_node *orig_node,
39 const unsigned char *addr,
40 const char *message, bool roaming);
37 41
38/* returns 1 if they are the same mac addr */ 42/* returns 1 if they are the same mac addr */
39static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 43static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
46 50
47static void batadv_tt_start_timer(struct batadv_priv *bat_priv) 51static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
48{ 52{
49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge); 53 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work, 54 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
51 msecs_to_jiffies(5000)); 55 msecs_to_jiffies(5000));
52} 56}
53 57
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
88 struct batadv_tt_common_entry *tt_common_entry; 92 struct batadv_tt_common_entry *tt_common_entry;
89 struct batadv_tt_local_entry *tt_local_entry = NULL; 93 struct batadv_tt_local_entry *tt_local_entry = NULL;
90 94
91 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data); 95 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
92 if (tt_common_entry) 96 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry, 97 tt_local_entry = container_of(tt_common_entry,
94 struct batadv_tt_local_entry, 98 struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
102 struct batadv_tt_common_entry *tt_common_entry; 106 struct batadv_tt_common_entry *tt_common_entry;
103 struct batadv_tt_global_entry *tt_global_entry = NULL; 107 struct batadv_tt_global_entry *tt_global_entry = NULL;
104 108
105 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data); 109 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
106 if (tt_common_entry) 110 if (tt_common_entry)
107 tt_global_entry = container_of(tt_common_entry, 111 tt_global_entry = container_of(tt_common_entry,
108 struct batadv_tt_global_entry, 112 struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
152static void 156static void
153batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry) 157batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
154{ 158{
159 if (!atomic_dec_and_test(&orig_entry->refcount))
160 return;
155 /* to avoid race conditions, immediately decrease the tt counter */ 161 /* to avoid race conditions, immediately decrease the tt counter */
156 atomic_dec(&orig_entry->orig_node->tt_size); 162 atomic_dec(&orig_entry->orig_node->tt_size);
157 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); 163 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
175 del_op_requested = flags & BATADV_TT_CLIENT_DEL; 181 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
176 182
177 /* check for ADD+DEL or DEL+ADD events */ 183 /* check for ADD+DEL or DEL+ADD events */
178 spin_lock_bh(&bat_priv->tt_changes_list_lock); 184 spin_lock_bh(&bat_priv->tt.changes_list_lock);
179 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 185 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
180 list) { 186 list) {
181 if (!batadv_compare_eth(entry->change.addr, addr)) 187 if (!batadv_compare_eth(entry->change.addr, addr))
182 continue; 188 continue;
@@ -203,15 +209,15 @@ del:
203 } 209 }
204 210
205 /* track the change in the OGMinterval list */ 211 /* track the change in the OGMinterval list */
206 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); 212 list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
207 213
208unlock: 214unlock:
209 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 215 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
210 216
211 if (event_removed) 217 if (event_removed)
212 atomic_dec(&bat_priv->tt_local_changes); 218 atomic_dec(&bat_priv->tt.local_changes);
213 else 219 else
214 atomic_inc(&bat_priv->tt_local_changes); 220 atomic_inc(&bat_priv->tt.local_changes);
215} 221}
216 222
217int batadv_tt_len(int changes_num) 223int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
221 227
222static int batadv_tt_local_init(struct batadv_priv *bat_priv) 228static int batadv_tt_local_init(struct batadv_priv *bat_priv)
223{ 229{
224 if (bat_priv->tt_local_hash) 230 if (bat_priv->tt.local_hash)
225 return 0; 231 return 0;
226 232
227 bat_priv->tt_local_hash = batadv_hash_new(1024); 233 bat_priv->tt.local_hash = batadv_hash_new(1024);
228 234
229 if (!bat_priv->tt_local_hash) 235 if (!bat_priv->tt.local_hash)
230 return -ENOMEM; 236 return -ENOMEM;
231 237
232 return 0; 238 return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
258 264
259 batadv_dbg(BATADV_DBG_TT, bat_priv, 265 batadv_dbg(BATADV_DBG_TT, bat_priv,
260 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 266 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
261 (uint8_t)atomic_read(&bat_priv->ttvn)); 267 (uint8_t)atomic_read(&bat_priv->tt.vn));
262 268
263 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 269 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
264 tt_local_entry->common.flags = BATADV_NO_FLAGS; 270 tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
266 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI; 272 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
267 atomic_set(&tt_local_entry->common.refcount, 2); 273 atomic_set(&tt_local_entry->common.refcount, 2);
268 tt_local_entry->last_seen = jiffies; 274 tt_local_entry->last_seen = jiffies;
275 tt_local_entry->common.added_at = tt_local_entry->last_seen;
269 276
270 /* the batman interface mac address should never be purged */ 277 /* the batman interface mac address should never be purged */
271 if (batadv_compare_eth(addr, soft_iface->dev_addr)) 278 if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
277 */ 284 */
278 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; 285 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
279 286
280 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt, 287 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
281 batadv_choose_orig, 288 batadv_choose_orig,
282 &tt_local_entry->common, 289 &tt_local_entry->common,
283 &tt_local_entry->common.hash_entry); 290 &tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
348 primary_if = batadv_primary_if_get_selected(bat_priv); 355 primary_if = batadv_primary_if_get_selected(bat_priv);
349 356
350 req_len = min_packet_len; 357 req_len = min_packet_len;
351 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes)); 358 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
352 359
353 /* if we have too many changes for one packet don't send any 360 /* if we have too many changes for one packet don't send any
354 * and wait for the tt table request which will be fragmented 361 * and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
381 if (new_len > 0) 388 if (new_len > 0)
382 tot_changes = new_len / batadv_tt_len(1); 389 tot_changes = new_len / batadv_tt_len(1);
383 390
384 spin_lock_bh(&bat_priv->tt_changes_list_lock); 391 spin_lock_bh(&bat_priv->tt.changes_list_lock);
385 atomic_set(&bat_priv->tt_local_changes, 0); 392 atomic_set(&bat_priv->tt.local_changes, 0);
386 393
387 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 394 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
388 list) { 395 list) {
389 if (count < tot_changes) { 396 if (count < tot_changes) {
390 memcpy(tt_buff + batadv_tt_len(count), 397 memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
394 list_del(&entry->list); 401 list_del(&entry->list);
395 kfree(entry); 402 kfree(entry);
396 } 403 }
397 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 404 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
398 405
399 /* Keep the buffer for possible tt_request */ 406 /* Keep the buffer for possible tt_request */
400 spin_lock_bh(&bat_priv->tt_buff_lock); 407 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
401 kfree(bat_priv->tt_buff); 408 kfree(bat_priv->tt.last_changeset);
402 bat_priv->tt_buff_len = 0; 409 bat_priv->tt.last_changeset_len = 0;
403 bat_priv->tt_buff = NULL; 410 bat_priv->tt.last_changeset = NULL;
404 /* check whether this new OGM has no changes due to size problems */ 411 /* check whether this new OGM has no changes due to size problems */
405 if (new_len > 0) { 412 if (new_len > 0) {
406 /* if kmalloc() fails we will reply with the full table 413 /* if kmalloc() fails we will reply with the full table
407 * instead of providing the diff 414 * instead of providing the diff
408 */ 415 */
409 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC); 416 bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
410 if (bat_priv->tt_buff) { 417 if (bat_priv->tt.last_changeset) {
411 memcpy(bat_priv->tt_buff, tt_buff, new_len); 418 memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
412 bat_priv->tt_buff_len = new_len; 419 bat_priv->tt.last_changeset_len = new_len;
413 } 420 }
414 } 421 }
415 spin_unlock_bh(&bat_priv->tt_buff_lock); 422 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
416 423
417 return count; 424 return count;
418} 425}
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
421{ 428{
422 struct net_device *net_dev = (struct net_device *)seq->private; 429 struct net_device *net_dev = (struct net_device *)seq->private;
423 struct batadv_priv *bat_priv = netdev_priv(net_dev); 430 struct batadv_priv *bat_priv = netdev_priv(net_dev);
424 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 431 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
425 struct batadv_tt_common_entry *tt_common_entry; 432 struct batadv_tt_common_entry *tt_common_entry;
426 struct batadv_hard_iface *primary_if; 433 struct batadv_hard_iface *primary_if;
427 struct hlist_node *node; 434 struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
446 453
447 seq_printf(seq, 454 seq_printf(seq,
448 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", 455 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
449 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); 456 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
450 457
451 for (i = 0; i < hash->size; i++) { 458 for (i = 0; i < hash->size; i++) {
452 head = &hash->table[i]; 459 head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
544 551
545static void batadv_tt_local_purge(struct batadv_priv *bat_priv) 552static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
546{ 553{
547 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 554 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
548 struct hlist_head *head; 555 struct hlist_head *head;
549 spinlock_t *list_lock; /* protects write access to the hash lists */ 556 spinlock_t *list_lock; /* protects write access to the hash lists */
550 uint32_t i; 557 uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
570 struct hlist_head *head; 577 struct hlist_head *head;
571 uint32_t i; 578 uint32_t i;
572 579
573 if (!bat_priv->tt_local_hash) 580 if (!bat_priv->tt.local_hash)
574 return; 581 return;
575 582
576 hash = bat_priv->tt_local_hash; 583 hash = bat_priv->tt.local_hash;
577 584
578 for (i = 0; i < hash->size; i++) { 585 for (i = 0; i < hash->size; i++) {
579 head = &hash->table[i]; 586 head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
593 600
594 batadv_hash_destroy(hash); 601 batadv_hash_destroy(hash);
595 602
596 bat_priv->tt_local_hash = NULL; 603 bat_priv->tt.local_hash = NULL;
597} 604}
598 605
599static int batadv_tt_global_init(struct batadv_priv *bat_priv) 606static int batadv_tt_global_init(struct batadv_priv *bat_priv)
600{ 607{
601 if (bat_priv->tt_global_hash) 608 if (bat_priv->tt.global_hash)
602 return 0; 609 return 0;
603 610
604 bat_priv->tt_global_hash = batadv_hash_new(1024); 611 bat_priv->tt.global_hash = batadv_hash_new(1024);
605 612
606 if (!bat_priv->tt_global_hash) 613 if (!bat_priv->tt.global_hash)
607 return -ENOMEM; 614 return -ENOMEM;
608 615
609 return 0; 616 return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
613{ 620{
614 struct batadv_tt_change_node *entry, *safe; 621 struct batadv_tt_change_node *entry, *safe;
615 622
616 spin_lock_bh(&bat_priv->tt_changes_list_lock); 623 spin_lock_bh(&bat_priv->tt.changes_list_lock);
617 624
618 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, 625 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
619 list) { 626 list) {
620 list_del(&entry->list); 627 list_del(&entry->list);
621 kfree(entry); 628 kfree(entry);
622 } 629 }
623 630
624 atomic_set(&bat_priv->tt_local_changes, 0); 631 atomic_set(&bat_priv->tt.local_changes, 0);
625 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 632 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
626} 633}
627 634
628/* find out if an orig_node is already in the list of a tt_global_entry. 635/* retrieves the orig_tt_list_entry belonging to orig_node from the
629 * returns 1 if found, 0 otherwise 636 * batadv_tt_global_entry list
637 *
638 * returns it with an increased refcounter, NULL if not found
630 */ 639 */
631static bool 640static struct batadv_tt_orig_list_entry *
632batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, 641batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
633 const struct batadv_orig_node *orig_node) 642 const struct batadv_orig_node *orig_node)
634{ 643{
635 struct batadv_tt_orig_list_entry *tmp_orig_entry; 644 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
636 const struct hlist_head *head; 645 const struct hlist_head *head;
637 struct hlist_node *node; 646 struct hlist_node *node;
638 bool found = false;
639 647
640 rcu_read_lock(); 648 rcu_read_lock();
641 head = &entry->orig_list; 649 head = &entry->orig_list;
642 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { 650 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
643 if (tmp_orig_entry->orig_node == orig_node) { 651 if (tmp_orig_entry->orig_node != orig_node)
644 found = true; 652 continue;
645 break; 653 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
646 } 654 continue;
655
656 orig_entry = tmp_orig_entry;
657 break;
647 } 658 }
648 rcu_read_unlock(); 659 rcu_read_unlock();
660
661 return orig_entry;
662}
663
664/* find out if an orig_node is already in the list of a tt_global_entry.
665 * returns true if found, false otherwise
666 */
667static bool
668batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
669 const struct batadv_orig_node *orig_node)
670{
671 struct batadv_tt_orig_list_entry *orig_entry;
672 bool found = false;
673
674 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
675 if (orig_entry) {
676 found = true;
677 batadv_tt_orig_list_entry_free_ref(orig_entry);
678 }
679
649 return found; 680 return found;
650} 681}
651 682
652static void 683static void
653batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry, 684batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
654 struct batadv_orig_node *orig_node, int ttvn) 685 struct batadv_orig_node *orig_node, int ttvn)
655{ 686{
656 struct batadv_tt_orig_list_entry *orig_entry; 687 struct batadv_tt_orig_list_entry *orig_entry;
657 688
689 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
690 if (orig_entry) {
691 /* refresh the ttvn: the current value could be a bogus one that
692 * was added during a "temporary client detection"
693 */
694 orig_entry->ttvn = ttvn;
695 goto out;
696 }
697
658 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); 698 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
659 if (!orig_entry) 699 if (!orig_entry)
660 return; 700 goto out;
661 701
662 INIT_HLIST_NODE(&orig_entry->list); 702 INIT_HLIST_NODE(&orig_entry->list);
663 atomic_inc(&orig_node->refcount); 703 atomic_inc(&orig_node->refcount);
664 atomic_inc(&orig_node->tt_size); 704 atomic_inc(&orig_node->tt_size);
665 orig_entry->orig_node = orig_node; 705 orig_entry->orig_node = orig_node;
666 orig_entry->ttvn = ttvn; 706 orig_entry->ttvn = ttvn;
707 atomic_set(&orig_entry->refcount, 2);
667 708
668 spin_lock_bh(&tt_global_entry->list_lock); 709 spin_lock_bh(&tt_global->list_lock);
669 hlist_add_head_rcu(&orig_entry->list, 710 hlist_add_head_rcu(&orig_entry->list,
670 &tt_global_entry->orig_list); 711 &tt_global->orig_list);
671 spin_unlock_bh(&tt_global_entry->list_lock); 712 spin_unlock_bh(&tt_global->list_lock);
713out:
714 if (orig_entry)
715 batadv_tt_orig_list_entry_free_ref(orig_entry);
672} 716}
673 717
674/* caller must hold orig_node refcount */ 718/* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
695 common->flags = flags; 739 common->flags = flags;
696 tt_global_entry->roam_at = 0; 740 tt_global_entry->roam_at = 0;
697 atomic_set(&common->refcount, 2); 741 atomic_set(&common->refcount, 2);
742 common->added_at = jiffies;
698 743
699 INIT_HLIST_HEAD(&tt_global_entry->orig_list); 744 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
700 spin_lock_init(&tt_global_entry->list_lock); 745 spin_lock_init(&tt_global_entry->list_lock);
701 746
702 hash_added = batadv_hash_add(bat_priv->tt_global_hash, 747 hash_added = batadv_hash_add(bat_priv->tt.global_hash,
703 batadv_compare_tt, 748 batadv_compare_tt,
704 batadv_choose_orig, common, 749 batadv_choose_orig, common,
705 &common->hash_entry); 750 &common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
709 batadv_tt_global_entry_free_ref(tt_global_entry); 754 batadv_tt_global_entry_free_ref(tt_global_entry);
710 goto out_remove; 755 goto out_remove;
711 } 756 }
712
713 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
714 ttvn);
715 } else { 757 } else {
716 /* there is already a global entry, use this one. */ 758 /* If there is already a global entry, we can use this one for
759 * our processing.
760 * But if we are trying to add a temporary client we can exit
761 * directly because the temporary information should never
762 * override any already known client state (whatever it is)
763 */
764 if (flags & BATADV_TT_CLIENT_TEMP)
765 goto out;
766
767 /* if the client was temporary added before receiving the first
768 * OGM announcing it, we have to clear the TEMP flag
769 */
770 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
717 771
718 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 772 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
719 * one originator left in the list and we previously received a 773 * one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
727 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; 781 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
728 tt_global_entry->roam_at = 0; 782 tt_global_entry->roam_at = 0;
729 } 783 }
730
731 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
732 orig_node))
733 batadv_tt_global_add_orig_entry(tt_global_entry,
734 orig_node, ttvn);
735 } 784 }
785 /* add the new orig_entry (if needed) or update it */
786 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
736 787
737 batadv_dbg(BATADV_DBG_TT, bat_priv, 788 batadv_dbg(BATADV_DBG_TT, bat_priv,
738 "Creating new global tt entry: %pM (via %pM)\n", 789 "Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
771 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 822 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
772 flags = tt_common_entry->flags; 823 flags = tt_common_entry->flags;
773 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); 824 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
774 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n", 825 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
775 tt_global_entry->common.addr, orig_entry->ttvn, 826 tt_global_entry->common.addr, orig_entry->ttvn,
776 orig_entry->orig_node->orig, last_ttvn, 827 orig_entry->orig_node->orig, last_ttvn,
777 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 828 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
778 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.')); 829 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
830 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
779 } 831 }
780} 832}
781 833
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
783{ 835{
784 struct net_device *net_dev = (struct net_device *)seq->private; 836 struct net_device *net_dev = (struct net_device *)seq->private;
785 struct batadv_priv *bat_priv = netdev_priv(net_dev); 837 struct batadv_priv *bat_priv = netdev_priv(net_dev);
786 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 838 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
787 struct batadv_tt_common_entry *tt_common_entry; 839 struct batadv_tt_common_entry *tt_common_entry;
788 struct batadv_tt_global_entry *tt_global; 840 struct batadv_tt_global_entry *tt_global;
789 struct batadv_hard_iface *primary_if; 841 struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
884 "Deleting global tt entry %pM: %s\n", 936 "Deleting global tt entry %pM: %s\n",
885 tt_global_entry->common.addr, message); 937 tt_global_entry->common.addr, message);
886 938
887 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt, 939 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
888 batadv_choose_orig, tt_global_entry->common.addr); 940 batadv_choose_orig, tt_global_entry->common.addr);
889 batadv_tt_global_entry_free_ref(tt_global_entry); 941 batadv_tt_global_entry_free_ref(tt_global_entry);
890 942
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
995 struct batadv_tt_global_entry *tt_global; 1047 struct batadv_tt_global_entry *tt_global;
996 struct batadv_tt_common_entry *tt_common_entry; 1048 struct batadv_tt_common_entry *tt_common_entry;
997 uint32_t i; 1049 uint32_t i;
998 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1050 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
999 struct hlist_node *node, *safe; 1051 struct hlist_node *node, *safe;
1000 struct hlist_head *head; 1052 struct hlist_head *head;
1001 spinlock_t *list_lock; /* protects write access to the hash lists */ 1053 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1030 orig_node->tt_initialised = false; 1082 orig_node->tt_initialised = false;
1031} 1083}
1032 1084
1033static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv, 1085static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
1034 struct hlist_head *head) 1086 char **msg)
1035{ 1087{
1036 struct batadv_tt_common_entry *tt_common_entry; 1088 bool purge = false;
1037 struct batadv_tt_global_entry *tt_global_entry; 1089 unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
1038 struct hlist_node *node, *node_tmp; 1090 unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
1039
1040 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1041 hash_entry) {
1042 tt_global_entry = container_of(tt_common_entry,
1043 struct batadv_tt_global_entry,
1044 common);
1045 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1046 continue;
1047 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1048 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1049 continue;
1050 1091
1051 batadv_dbg(BATADV_DBG_TT, bat_priv, 1092 if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
1052 "Deleting global tt entry (%pM): Roaming timeout\n", 1093 batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
1053 tt_global_entry->common.addr); 1094 purge = true;
1095 *msg = "Roaming timeout\n";
1096 }
1054 1097
1055 hlist_del_rcu(node); 1098 if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
1056 batadv_tt_global_entry_free_ref(tt_global_entry); 1099 batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
1100 purge = true;
1101 *msg = "Temporary client timeout\n";
1057 } 1102 }
1103
1104 return purge;
1058} 1105}
1059 1106
1060static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv) 1107static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1061{ 1108{
1062 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1109 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1063 struct hlist_head *head; 1110 struct hlist_head *head;
1111 struct hlist_node *node, *node_tmp;
1064 spinlock_t *list_lock; /* protects write access to the hash lists */ 1112 spinlock_t *list_lock; /* protects write access to the hash lists */
1065 uint32_t i; 1113 uint32_t i;
1114 char *msg = NULL;
1115 struct batadv_tt_common_entry *tt_common;
1116 struct batadv_tt_global_entry *tt_global;
1066 1117
1067 for (i = 0; i < hash->size; i++) { 1118 for (i = 0; i < hash->size; i++) {
1068 head = &hash->table[i]; 1119 head = &hash->table[i];
1069 list_lock = &hash->list_locks[i]; 1120 list_lock = &hash->list_locks[i];
1070 1121
1071 spin_lock_bh(list_lock); 1122 spin_lock_bh(list_lock);
1072 batadv_tt_global_roam_purge_list(bat_priv, head); 1123 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1124 hash_entry) {
1125 tt_global = container_of(tt_common,
1126 struct batadv_tt_global_entry,
1127 common);
1128
1129 if (!batadv_tt_global_to_purge(tt_global, &msg))
1130 continue;
1131
1132 batadv_dbg(BATADV_DBG_TT, bat_priv,
1133 "Deleting global tt entry (%pM): %s\n",
1134 tt_global->common.addr, msg);
1135
1136 hlist_del_rcu(node);
1137
1138 batadv_tt_global_entry_free_ref(tt_global);
1139 }
1073 spin_unlock_bh(list_lock); 1140 spin_unlock_bh(list_lock);
1074 } 1141 }
1075
1076} 1142}
1077 1143
1078static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) 1144static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1085 struct hlist_head *head; 1151 struct hlist_head *head;
1086 uint32_t i; 1152 uint32_t i;
1087 1153
1088 if (!bat_priv->tt_global_hash) 1154 if (!bat_priv->tt.global_hash)
1089 return; 1155 return;
1090 1156
1091 hash = bat_priv->tt_global_hash; 1157 hash = bat_priv->tt.global_hash;
1092 1158
1093 for (i = 0; i < hash->size; i++) { 1159 for (i = 0; i < hash->size; i++) {
1094 head = &hash->table[i]; 1160 head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1108 1174
1109 batadv_hash_destroy(hash); 1175 batadv_hash_destroy(hash);
1110 1176
1111 bat_priv->tt_global_hash = NULL; 1177 bat_priv->tt.global_hash = NULL;
1112} 1178}
1113 1179
1114static bool 1180static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1187 struct batadv_orig_node *orig_node) 1253 struct batadv_orig_node *orig_node)
1188{ 1254{
1189 uint16_t total = 0, total_one; 1255 uint16_t total = 0, total_one;
1190 struct batadv_hashtable *hash = bat_priv->tt_global_hash; 1256 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1191 struct batadv_tt_common_entry *tt_common; 1257 struct batadv_tt_common_entry *tt_common;
1192 struct batadv_tt_global_entry *tt_global; 1258 struct batadv_tt_global_entry *tt_global;
1193 struct hlist_node *node; 1259 struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1210 */ 1276 */
1211 if (tt_common->flags & BATADV_TT_CLIENT_ROAM) 1277 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1212 continue; 1278 continue;
1279 /* Temporary clients have not been announced yet, so
1280 * they have to be skipped while computing the global
1281 * crc
1282 */
1283 if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
1284 continue;
1213 1285
1214 /* find out if this global entry is announced by this 1286 /* find out if this global entry is announced by this
1215 * originator 1287 * originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1234static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) 1306static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1235{ 1307{
1236 uint16_t total = 0, total_one; 1308 uint16_t total = 0, total_one;
1237 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 1309 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1238 struct batadv_tt_common_entry *tt_common; 1310 struct batadv_tt_common_entry *tt_common;
1239 struct hlist_node *node; 1311 struct hlist_node *node;
1240 struct hlist_head *head; 1312 struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1267{ 1339{
1268 struct batadv_tt_req_node *node, *safe; 1340 struct batadv_tt_req_node *node, *safe;
1269 1341
1270 spin_lock_bh(&bat_priv->tt_req_list_lock); 1342 spin_lock_bh(&bat_priv->tt.req_list_lock);
1271 1343
1272 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1344 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1273 list_del(&node->list); 1345 list_del(&node->list);
1274 kfree(node); 1346 kfree(node);
1275 } 1347 }
1276 1348
1277 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1349 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1278} 1350}
1279 1351
1280static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, 1352static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1304{ 1376{
1305 struct batadv_tt_req_node *node, *safe; 1377 struct batadv_tt_req_node *node, *safe;
1306 1378
1307 spin_lock_bh(&bat_priv->tt_req_list_lock); 1379 spin_lock_bh(&bat_priv->tt.req_list_lock);
1308 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1380 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1309 if (batadv_has_timed_out(node->issued_at, 1381 if (batadv_has_timed_out(node->issued_at,
1310 BATADV_TT_REQUEST_TIMEOUT)) { 1382 BATADV_TT_REQUEST_TIMEOUT)) {
1311 list_del(&node->list); 1383 list_del(&node->list);
1312 kfree(node); 1384 kfree(node);
1313 } 1385 }
1314 } 1386 }
1315 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1387 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1316} 1388}
1317 1389
1318/* returns the pointer to the new tt_req_node struct if no request 1390/* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1324{ 1396{
1325 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 1397 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1326 1398
1327 spin_lock_bh(&bat_priv->tt_req_list_lock); 1399 spin_lock_bh(&bat_priv->tt.req_list_lock);
1328 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { 1400 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
1329 if (batadv_compare_eth(tt_req_node_tmp, orig_node) && 1401 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1330 !batadv_has_timed_out(tt_req_node_tmp->issued_at, 1402 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1331 BATADV_TT_REQUEST_TIMEOUT)) 1403 BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1339 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); 1411 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1340 tt_req_node->issued_at = jiffies; 1412 tt_req_node->issued_at = jiffies;
1341 1413
1342 list_add(&tt_req_node->list, &bat_priv->tt_req_list); 1414 list_add(&tt_req_node->list, &bat_priv->tt.req_list);
1343unlock: 1415unlock:
1344 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1416 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1345 return tt_req_node; 1417 return tt_req_node;
1346} 1418}
1347 1419
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
1363 const struct batadv_tt_global_entry *tt_global_entry; 1435 const struct batadv_tt_global_entry *tt_global_entry;
1364 const struct batadv_orig_node *orig_node = data_ptr; 1436 const struct batadv_orig_node *orig_node = data_ptr;
1365 1437
1366 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM) 1438 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
1439 tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
1367 return 0; 1440 return 0;
1368 1441
1369 tt_global_entry = container_of(tt_common_entry, 1442 tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
1507 if (ret) 1580 if (ret)
1508 kfree_skb(skb); 1581 kfree_skb(skb);
1509 if (ret && tt_req_node) { 1582 if (ret && tt_req_node) {
1510 spin_lock_bh(&bat_priv->tt_req_list_lock); 1583 spin_lock_bh(&bat_priv->tt.req_list_lock);
1511 list_del(&tt_req_node->list); 1584 list_del(&tt_req_node->list);
1512 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1585 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1513 kfree(tt_req_node); 1586 kfree(tt_req_node);
1514 } 1587 }
1515 return ret; 1588 return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1530 uint16_t tt_len, tt_tot; 1603 uint16_t tt_len, tt_tot;
1531 struct sk_buff *skb = NULL; 1604 struct sk_buff *skb = NULL;
1532 struct batadv_tt_query_packet *tt_response; 1605 struct batadv_tt_query_packet *tt_response;
1606 uint8_t *packet_pos;
1533 size_t len; 1607 size_t len;
1534 1608
1535 batadv_dbg(BATADV_DBG_TT, bat_priv, 1609 batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1583 goto unlock; 1657 goto unlock;
1584 1658
1585 skb_reserve(skb, ETH_HLEN); 1659 skb_reserve(skb, ETH_HLEN);
1586 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, 1660 packet_pos = skb_put(skb, len);
1587 len); 1661 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1588 tt_response->ttvn = req_ttvn; 1662 tt_response->ttvn = req_ttvn;
1589 tt_response->tt_data = htons(tt_tot); 1663 tt_response->tt_data = htons(tt_tot);
1590 1664
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1600 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1674 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1601 1675
1602 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1676 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1603 bat_priv->tt_global_hash, 1677 bat_priv->tt.global_hash,
1604 primary_if, 1678 primary_if,
1605 batadv_tt_global_valid, 1679 batadv_tt_global_valid,
1606 req_dst_orig_node); 1680 req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1663 uint16_t tt_len, tt_tot; 1737 uint16_t tt_len, tt_tot;
1664 struct sk_buff *skb = NULL; 1738 struct sk_buff *skb = NULL;
1665 struct batadv_tt_query_packet *tt_response; 1739 struct batadv_tt_query_packet *tt_response;
1740 uint8_t *packet_pos;
1666 size_t len; 1741 size_t len;
1667 1742
1668 batadv_dbg(BATADV_DBG_TT, bat_priv, 1743 batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1671 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1746 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1672 1747
1673 1748
1674 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1749 my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1675 req_ttvn = tt_request->ttvn; 1750 req_ttvn = tt_request->ttvn;
1676 1751
1677 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); 1752 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1690 * is too big send the whole local translation table 1765 * is too big send the whole local translation table
1691 */ 1766 */
1692 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || 1767 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1693 !bat_priv->tt_buff) 1768 !bat_priv->tt.last_changeset)
1694 full_table = true; 1769 full_table = true;
1695 else 1770 else
1696 full_table = false; 1771 full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1699 * I'll send only one packet with as much TT entries as I can 1774 * I'll send only one packet with as much TT entries as I can
1700 */ 1775 */
1701 if (!full_table) { 1776 if (!full_table) {
1702 spin_lock_bh(&bat_priv->tt_buff_lock); 1777 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
1703 tt_len = bat_priv->tt_buff_len; 1778 tt_len = bat_priv->tt.last_changeset_len;
1704 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1779 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1705 1780
1706 len = sizeof(*tt_response) + tt_len; 1781 len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1709 goto unlock; 1784 goto unlock;
1710 1785
1711 skb_reserve(skb, ETH_HLEN); 1786 skb_reserve(skb, ETH_HLEN);
1712 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, 1787 packet_pos = skb_put(skb, len);
1713 len); 1788 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1714 tt_response->ttvn = req_ttvn; 1789 tt_response->ttvn = req_ttvn;
1715 tt_response->tt_data = htons(tt_tot); 1790 tt_response->tt_data = htons(tt_tot);
1716 1791
1717 tt_buff = skb->data + sizeof(*tt_response); 1792 tt_buff = skb->data + sizeof(*tt_response);
1718 memcpy(tt_buff, bat_priv->tt_buff, 1793 memcpy(tt_buff, bat_priv->tt.last_changeset,
1719 bat_priv->tt_buff_len); 1794 bat_priv->tt.last_changeset_len);
1720 spin_unlock_bh(&bat_priv->tt_buff_lock); 1795 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1721 } else { 1796 } else {
1722 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt); 1797 tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
1723 tt_len *= sizeof(struct batadv_tt_change); 1798 tt_len *= sizeof(struct batadv_tt_change);
1724 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); 1799 ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1725 1800
1726 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1801 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1727 bat_priv->tt_local_hash, 1802 bat_priv->tt.local_hash,
1728 primary_if, 1803 primary_if,
1729 batadv_tt_local_valid_entry, 1804 batadv_tt_local_valid_entry,
1730 NULL); 1805 NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1756 goto out; 1831 goto out;
1757 1832
1758unlock: 1833unlock:
1759 spin_unlock_bh(&bat_priv->tt_buff_lock); 1834 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1760out: 1835out:
1761 if (orig_node) 1836 if (orig_node)
1762 batadv_orig_node_free_ref(orig_node); 1837 batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1909 } 1984 }
1910 1985
1911 /* Delete the tt_req_node from pending tt_requests list */ 1986 /* Delete the tt_req_node from pending tt_requests list */
1912 spin_lock_bh(&bat_priv->tt_req_list_lock); 1987 spin_lock_bh(&bat_priv->tt.req_list_lock);
1913 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { 1988 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1914 if (!batadv_compare_eth(node->addr, tt_response->src)) 1989 if (!batadv_compare_eth(node->addr, tt_response->src))
1915 continue; 1990 continue;
1916 list_del(&node->list); 1991 list_del(&node->list);
1917 kfree(node); 1992 kfree(node);
1918 } 1993 }
1919 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1994 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1920 1995
1921 /* Recalculate the CRC for this orig_node and store it */ 1996 /* Recalculate the CRC for this orig_node and store it */
1922 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); 1997 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
1950{ 2025{
1951 struct batadv_tt_roam_node *node, *safe; 2026 struct batadv_tt_roam_node *node, *safe;
1952 2027
1953 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2028 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1954 2029
1955 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 2030 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
1956 list_del(&node->list); 2031 list_del(&node->list);
1957 kfree(node); 2032 kfree(node);
1958 } 2033 }
1959 2034
1960 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2035 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
1961} 2036}
1962 2037
1963static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) 2038static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1964{ 2039{
1965 struct batadv_tt_roam_node *node, *safe; 2040 struct batadv_tt_roam_node *node, *safe;
1966 2041
1967 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2042 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1968 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { 2043 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
1969 if (!batadv_has_timed_out(node->first_time, 2044 if (!batadv_has_timed_out(node->first_time,
1970 BATADV_ROAMING_MAX_TIME)) 2045 BATADV_ROAMING_MAX_TIME))
1971 continue; 2046 continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1973 list_del(&node->list); 2048 list_del(&node->list);
1974 kfree(node); 2049 kfree(node);
1975 } 2050 }
1976 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2051 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
1977} 2052}
1978 2053
1979/* This function checks whether the client already reached the 2054/* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
1988 struct batadv_tt_roam_node *tt_roam_node; 2063 struct batadv_tt_roam_node *tt_roam_node;
1989 bool ret = false; 2064 bool ret = false;
1990 2065
1991 spin_lock_bh(&bat_priv->tt_roam_list_lock); 2066 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1992 /* The new tt_req will be issued only if I'm not waiting for a 2067 /* The new tt_req will be issued only if I'm not waiting for a
1993 * reply from the same orig_node yet 2068 * reply from the same orig_node yet
1994 */ 2069 */
1995 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { 2070 list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
1996 if (!batadv_compare_eth(tt_roam_node->addr, client)) 2071 if (!batadv_compare_eth(tt_roam_node->addr, client))
1997 continue; 2072 continue;
1998 2073
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
2017 BATADV_ROAMING_MAX_COUNT - 1); 2092 BATADV_ROAMING_MAX_COUNT - 1);
2018 memcpy(tt_roam_node->addr, client, ETH_ALEN); 2093 memcpy(tt_roam_node->addr, client, ETH_ALEN);
2019 2094
2020 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); 2095 list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
2021 ret = true; 2096 ret = true;
2022 } 2097 }
2023 2098
2024unlock: 2099unlock:
2025 spin_unlock_bh(&bat_priv->tt_roam_list_lock); 2100 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2026 return ret; 2101 return ret;
2027} 2102}
2028 2103
@@ -2086,13 +2161,15 @@ out:
2086static void batadv_tt_purge(struct work_struct *work) 2161static void batadv_tt_purge(struct work_struct *work)
2087{ 2162{
2088 struct delayed_work *delayed_work; 2163 struct delayed_work *delayed_work;
2164 struct batadv_priv_tt *priv_tt;
2089 struct batadv_priv *bat_priv; 2165 struct batadv_priv *bat_priv;
2090 2166
2091 delayed_work = container_of(work, struct delayed_work, work); 2167 delayed_work = container_of(work, struct delayed_work, work);
2092 bat_priv = container_of(delayed_work, struct batadv_priv, tt_work); 2168 priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
2169 bat_priv = container_of(priv_tt, struct batadv_priv, tt);
2093 2170
2094 batadv_tt_local_purge(bat_priv); 2171 batadv_tt_local_purge(bat_priv);
2095 batadv_tt_global_roam_purge(bat_priv); 2172 batadv_tt_global_purge(bat_priv);
2096 batadv_tt_req_purge(bat_priv); 2173 batadv_tt_req_purge(bat_priv);
2097 batadv_tt_roam_purge(bat_priv); 2174 batadv_tt_roam_purge(bat_priv);
2098 2175
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
2101 2178
2102void batadv_tt_free(struct batadv_priv *bat_priv) 2179void batadv_tt_free(struct batadv_priv *bat_priv)
2103{ 2180{
2104 cancel_delayed_work_sync(&bat_priv->tt_work); 2181 cancel_delayed_work_sync(&bat_priv->tt.work);
2105 2182
2106 batadv_tt_local_table_free(bat_priv); 2183 batadv_tt_local_table_free(bat_priv);
2107 batadv_tt_global_table_free(bat_priv); 2184 batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
2109 batadv_tt_changes_list_free(bat_priv); 2186 batadv_tt_changes_list_free(bat_priv);
2110 batadv_tt_roam_list_free(bat_priv); 2187 batadv_tt_roam_list_free(bat_priv);
2111 2188
2112 kfree(bat_priv->tt_buff); 2189 kfree(bat_priv->tt.last_changeset);
2113} 2190}
2114 2191
2115/* This function will enable or disable the specified flags for all the entries 2192/* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
2153/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ 2230/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2154static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) 2231static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2155{ 2232{
2156 struct batadv_hashtable *hash = bat_priv->tt_local_hash; 2233 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2157 struct batadv_tt_common_entry *tt_common; 2234 struct batadv_tt_common_entry *tt_common;
2158 struct batadv_tt_local_entry *tt_local; 2235 struct batadv_tt_local_entry *tt_local;
2159 struct hlist_node *node, *node_tmp; 2236 struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2178 "Deleting local tt entry (%pM): pending\n", 2255 "Deleting local tt entry (%pM): pending\n",
2179 tt_common->addr); 2256 tt_common->addr);
2180 2257
2181 atomic_dec(&bat_priv->num_local_tt); 2258 atomic_dec(&bat_priv->tt.local_entry_num);
2182 hlist_del_rcu(node); 2259 hlist_del_rcu(node);
2183 tt_local = container_of(tt_common, 2260 tt_local = container_of(tt_common,
2184 struct batadv_tt_local_entry, 2261 struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2196{ 2273{
2197 uint16_t changed_num = 0; 2274 uint16_t changed_num = 0;
2198 2275
2199 if (atomic_read(&bat_priv->tt_local_changes) < 1) 2276 if (atomic_read(&bat_priv->tt.local_changes) < 1)
2200 return -ENOENT; 2277 return -ENOENT;
2201 2278
2202 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash, 2279 changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
2203 BATADV_TT_CLIENT_NEW, false); 2280 BATADV_TT_CLIENT_NEW, false);
2204 2281
2205 /* all reset entries have to be counted as local entries */ 2282 /* all reset entries have to be counted as local entries */
2206 atomic_add(changed_num, &bat_priv->num_local_tt); 2283 atomic_add(changed_num, &bat_priv->tt.local_entry_num);
2207 batadv_tt_local_purge_pending_clients(bat_priv); 2284 batadv_tt_local_purge_pending_clients(bat_priv);
2208 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv); 2285 bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
2209 2286
2210 /* Increment the TTVN only once per OGM interval */ 2287 /* Increment the TTVN only once per OGM interval */
2211 atomic_inc(&bat_priv->ttvn); 2288 atomic_inc(&bat_priv->tt.vn);
2212 batadv_dbg(BATADV_DBG_TT, bat_priv, 2289 batadv_dbg(BATADV_DBG_TT, bat_priv,
2213 "Local changes committed, updating to ttvn %u\n", 2290 "Local changes committed, updating to ttvn %u\n",
2214 (uint8_t)atomic_read(&bat_priv->ttvn)); 2291 (uint8_t)atomic_read(&bat_priv->tt.vn));
2215 bat_priv->tt_poss_change = false; 2292 bat_priv->tt.poss_change = false;
2216 2293
2217 /* reset the sending counter */ 2294 /* reset the sending counter */
2218 atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); 2295 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2219 2296
2220 return batadv_tt_changes_fill_buff(bat_priv, packet_buff, 2297 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2221 packet_buff_len, packet_min_len); 2298 packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2235 2312
2236 /* if the changes have been sent often enough */ 2313 /* if the changes have been sent often enough */
2237 if ((tt_num_changes < 0) && 2314 if ((tt_num_changes < 0) &&
2238 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) { 2315 (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
2239 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, 2316 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2240 packet_min_len, packet_min_len); 2317 packet_min_len, packet_min_len);
2241 tt_num_changes = 0; 2318 tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2366out: 2443out:
2367 return ret; 2444 return ret;
2368} 2445}
2446
2447bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
2448 struct batadv_orig_node *orig_node,
2449 const unsigned char *addr)
2450{
2451 bool ret = false;
2452
2453 if (!batadv_tt_global_add(bat_priv, orig_node, addr,
2454 BATADV_TT_CLIENT_TEMP,
2455 atomic_read(&orig_node->last_ttvn)))
2456 goto out;
2457
2458 batadv_dbg(BATADV_DBG_TT, bat_priv,
2459 "Added temporary global client (addr: %pM orig: %pM)\n",
2460 addr, orig_node->orig);
2461 ret = true;
2462out:
2463 return ret;
2464}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ffa87355096b..811fffd4760c 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
59 int packet_min_len); 59 int packet_min_len);
60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, 60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
61 uint8_t *addr); 61 uint8_t *addr);
62 62bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
63 struct batadv_orig_node *orig_node,
64 const unsigned char *addr);
63 65
64#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 12635fd2c3d3..2ed82caacdca 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
145#endif 145#endif
146 146
147enum batadv_counters { 147enum batadv_counters {
148 BATADV_CNT_TX,
149 BATADV_CNT_TX_BYTES,
150 BATADV_CNT_TX_DROPPED,
151 BATADV_CNT_RX,
152 BATADV_CNT_RX_BYTES,
148 BATADV_CNT_FORWARD, 153 BATADV_CNT_FORWARD,
149 BATADV_CNT_FORWARD_BYTES, 154 BATADV_CNT_FORWARD_BYTES,
150 BATADV_CNT_MGMT_TX, 155 BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
160 BATADV_CNT_NUM, 165 BATADV_CNT_NUM,
161}; 166};
162 167
168/**
169 * struct batadv_priv_tt - per mesh interface translation table data
170 * @vn: translation table version number
171 * @local_changes: changes registered in an originator interval
172 * @poss_change: Detect an ongoing roaming phase. If true, then this node
173 * received a roaming_adv and has to inspect every packet directed to it to
174 * check whether it still is the true destination or not. This flag will be
175 * reset to false as soon as the this node's ttvn is increased
176 * @changes_list: tracks tt local changes within an originator interval
177 * @req_list: list of pending tt_requests
178 * @local_crc: Checksum of the local table, recomputed before sending a new OGM
179 */
180struct batadv_priv_tt {
181 atomic_t vn;
182 atomic_t ogm_append_cnt;
183 atomic_t local_changes;
184 bool poss_change;
185 struct list_head changes_list;
186 struct batadv_hashtable *local_hash;
187 struct batadv_hashtable *global_hash;
188 struct list_head req_list;
189 struct list_head roam_list;
190 spinlock_t changes_list_lock; /* protects changes */
191 spinlock_t req_list_lock; /* protects req_list */
192 spinlock_t roam_list_lock; /* protects roam_list */
193 atomic_t local_entry_num;
194 uint16_t local_crc;
195 unsigned char *last_changeset;
196 int16_t last_changeset_len;
197 spinlock_t last_changeset_lock; /* protects last_changeset */
198 struct delayed_work work;
199};
200
201#ifdef CONFIG_BATMAN_ADV_BLA
202struct batadv_priv_bla {
203 atomic_t num_requests; /* number of bla requests in flight */
204 struct batadv_hashtable *claim_hash;
205 struct batadv_hashtable *backbone_hash;
206 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
207 int bcast_duplist_curr;
208 struct batadv_bla_claim_dst claim_dest;
209 struct delayed_work work;
210};
211#endif
212
213struct batadv_priv_gw {
214 struct hlist_head list;
215 spinlock_t list_lock; /* protects gw_list and curr_gw */
216 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
217 atomic_t reselect;
218};
219
220struct batadv_priv_vis {
221 struct list_head send_list;
222 struct batadv_hashtable *hash;
223 spinlock_t hash_lock; /* protects hash */
224 spinlock_t list_lock; /* protects info::recv_list */
225 struct delayed_work work;
226 struct batadv_vis_info *my_info;
227};
228
163struct batadv_priv { 229struct batadv_priv {
164 atomic_t mesh_state; 230 atomic_t mesh_state;
165 struct net_device_stats stats; 231 struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
179 atomic_t bcast_seqno; 245 atomic_t bcast_seqno;
180 atomic_t bcast_queue_left; 246 atomic_t bcast_queue_left;
181 atomic_t batman_queue_left; 247 atomic_t batman_queue_left;
182 atomic_t ttvn; /* translation table version number */
183 atomic_t tt_ogm_append_cnt;
184 atomic_t tt_local_changes; /* changes registered in a OGM interval */
185 atomic_t bla_num_requests; /* number of bla requests in flight */
186 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
187 * If true, then I received a Roaming_adv and I have to inspect every
188 * packet directed to me to check whether I am still the true
189 * destination or not. This flag will be reset to false as soon as I
190 * increase my TTVN
191 */
192 bool tt_poss_change;
193 char num_ifaces; 248 char num_ifaces;
194 struct batadv_debug_log *debug_log; 249 struct batadv_debug_log *debug_log;
195 struct kobject *mesh_obj; 250 struct kobject *mesh_obj;
196 struct dentry *debug_dir; 251 struct dentry *debug_dir;
197 struct hlist_head forw_bat_list; 252 struct hlist_head forw_bat_list;
198 struct hlist_head forw_bcast_list; 253 struct hlist_head forw_bcast_list;
199 struct hlist_head gw_list;
200 struct list_head tt_changes_list; /* tracks changes in a OGM int */
201 struct list_head vis_send_list;
202 struct batadv_hashtable *orig_hash; 254 struct batadv_hashtable *orig_hash;
203 struct batadv_hashtable *tt_local_hash;
204 struct batadv_hashtable *tt_global_hash;
205#ifdef CONFIG_BATMAN_ADV_BLA
206 struct batadv_hashtable *claim_hash;
207 struct batadv_hashtable *backbone_hash;
208#endif
209 struct list_head tt_req_list; /* list of pending tt_requests */
210 struct list_head tt_roam_list;
211 struct batadv_hashtable *vis_hash;
212#ifdef CONFIG_BATMAN_ADV_BLA
213 struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
214 int bcast_duplist_curr;
215 struct batadv_bla_claim_dst claim_dest;
216#endif
217 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 255 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
218 spinlock_t forw_bcast_list_lock; /* protects */ 256 spinlock_t forw_bcast_list_lock; /* protects */
219 spinlock_t tt_changes_list_lock; /* protects tt_changes */
220 spinlock_t tt_req_list_lock; /* protects tt_req_list */
221 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
222 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
223 spinlock_t vis_hash_lock; /* protects vis_hash */
224 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
225 atomic_t num_local_tt;
226 /* Checksum of the local table, recomputed before sending a new OGM */
227 uint16_t tt_crc;
228 unsigned char *tt_buff;
229 int16_t tt_buff_len;
230 spinlock_t tt_buff_lock; /* protects tt_buff */
231 struct delayed_work tt_work;
232 struct delayed_work orig_work; 257 struct delayed_work orig_work;
233 struct delayed_work vis_work;
234 struct delayed_work bla_work;
235 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
236 atomic_t gw_reselect;
237 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ 258 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
238 struct batadv_vis_info *my_vis_info;
239 struct batadv_algo_ops *bat_algo_ops; 259 struct batadv_algo_ops *bat_algo_ops;
260#ifdef CONFIG_BATMAN_ADV_BLA
261 struct batadv_priv_bla bla;
262#endif
263 struct batadv_priv_gw gw;
264 struct batadv_priv_tt tt;
265 struct batadv_priv_vis vis;
240}; 266};
241 267
242struct batadv_socket_client { 268struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
258 uint8_t addr[ETH_ALEN]; 284 uint8_t addr[ETH_ALEN];
259 struct hlist_node hash_entry; 285 struct hlist_node hash_entry;
260 uint16_t flags; 286 uint16_t flags;
287 unsigned long added_at;
261 atomic_t refcount; 288 atomic_t refcount;
262 struct rcu_head rcu; 289 struct rcu_head rcu;
263}; 290};
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
277struct batadv_tt_orig_list_entry { 304struct batadv_tt_orig_list_entry {
278 struct batadv_orig_node *orig_node; 305 struct batadv_orig_node *orig_node;
279 uint8_t ttvn; 306 uint8_t ttvn;
307 atomic_t refcount;
280 struct rcu_head rcu; 308 struct rcu_head rcu;
281 struct hlist_node list; 309 struct hlist_node list;
282}; 310};
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 00164645b3f7..f39723281ca1 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
39 struct batadv_unicast_packet *unicast_packet; 39 struct batadv_unicast_packet *unicast_packet;
40 int hdr_len = sizeof(*unicast_packet); 40 int hdr_len = sizeof(*unicast_packet);
41 int uni_diff = sizeof(*up) - hdr_len; 41 int uni_diff = sizeof(*up) - hdr_len;
42 uint8_t *packet_pos;
42 43
43 up = (struct batadv_unicast_frag_packet *)skb->data; 44 up = (struct batadv_unicast_frag_packet *)skb->data;
44 /* set skb to the first part and tmp_skb to the second part */ 45 /* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
65 kfree_skb(tmp_skb); 66 kfree_skb(tmp_skb);
66 67
67 memmove(skb->data + uni_diff, skb->data, hdr_len); 68 memmove(skb->data + uni_diff, skb->data, hdr_len);
68 unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb, 69 packet_pos = skb_pull(skb, uni_diff);
69 uni_diff); 70 unicast_packet = (struct batadv_unicast_packet *)packet_pos;
70 unicast_packet->header.packet_type = BATADV_UNICAST; 71 unicast_packet->header.packet_type = BATADV_UNICAST;
71 72
72 return skb; 73 return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
121{ 122{
122 struct batadv_frag_packet_list_entry *tfp; 123 struct batadv_frag_packet_list_entry *tfp;
123 struct batadv_unicast_frag_packet *tmp_up = NULL; 124 struct batadv_unicast_frag_packet *tmp_up = NULL;
125 int is_head_tmp, is_head;
124 uint16_t search_seqno; 126 uint16_t search_seqno;
125 127
126 if (up->flags & BATADV_UNI_FRAG_HEAD) 128 if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
128 else 130 else
129 search_seqno = ntohs(up->seqno)-1; 131 search_seqno = ntohs(up->seqno)-1;
130 132
133 is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
134
131 list_for_each_entry(tfp, head, list) { 135 list_for_each_entry(tfp, head, list) {
132 136
133 if (!tfp->skb) 137 if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
139 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data; 143 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
140 144
141 if (tfp->seqno == search_seqno) { 145 if (tfp->seqno == search_seqno) {
142 146 is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
143 if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) != 147 if (is_head_tmp != is_head)
144 (up->flags & BATADV_UNI_FRAG_HEAD))
145 return tfp; 148 return tfp;
146 else 149 else
147 goto mov_tail; 150 goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
334 /* copy the destination for faster routing */ 337 /* copy the destination for faster routing */
335 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 338 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
336 /* set the destination tt version number */ 339 /* set the destination tt version number */
337 unicast_packet->ttvn = 340 unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
338 (uint8_t)atomic_read(&orig_node->last_ttvn);
339 341
340 /* inform the destination node that we are still missing a correct route 342 /* inform the destination node that we are still missing a correct route
341 * for this client. The destination will receive this packet and will 343 * for this client. The destination will receive this packet and will
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 2a2ea0681469..5abd1454fb07 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
41 bat_priv = info->bat_priv; 41 bat_priv = info->bat_priv;
42 42
43 list_del_init(&info->send_list); 43 list_del_init(&info->send_list);
44 spin_lock_bh(&bat_priv->vis_list_lock); 44 spin_lock_bh(&bat_priv->vis.list_lock);
45 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { 45 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
46 list_del(&entry->list); 46 list_del(&entry->list);
47 kfree(entry); 47 kfree(entry);
48 } 48 }
49 49
50 spin_unlock_bh(&bat_priv->vis_list_lock); 50 spin_unlock_bh(&bat_priv->vis.list_lock);
51 kfree_skb(info->skb_packet); 51 kfree_skb(info->skb_packet);
52 kfree(info); 52 kfree(info);
53} 53}
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
94static struct batadv_vis_info * 94static struct batadv_vis_info *
95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) 95batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
96{ 96{
97 struct batadv_hashtable *hash = bat_priv->vis_hash; 97 struct batadv_hashtable *hash = bat_priv->vis.hash;
98 struct hlist_head *head; 98 struct hlist_head *head;
99 struct hlist_node *node; 99 struct hlist_node *node;
100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; 100 struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
252 struct hlist_head *head; 252 struct hlist_head *head;
253 struct net_device *net_dev = (struct net_device *)seq->private; 253 struct net_device *net_dev = (struct net_device *)seq->private;
254 struct batadv_priv *bat_priv = netdev_priv(net_dev); 254 struct batadv_priv *bat_priv = netdev_priv(net_dev);
255 struct batadv_hashtable *hash = bat_priv->vis_hash; 255 struct batadv_hashtable *hash = bat_priv->vis.hash;
256 uint32_t i; 256 uint32_t i;
257 int ret = 0; 257 int ret = 0;
258 int vis_server = atomic_read(&bat_priv->vis_mode); 258 int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE) 264 if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
265 goto out; 265 goto out;
266 266
267 spin_lock_bh(&bat_priv->vis_hash_lock); 267 spin_lock_bh(&bat_priv->vis.hash_lock);
268 for (i = 0; i < hash->size; i++) { 268 for (i = 0; i < hash->size; i++) {
269 head = &hash->table[i]; 269 head = &hash->table[i];
270 batadv_vis_seq_print_text_bucket(seq, head); 270 batadv_vis_seq_print_text_bucket(seq, head);
271 } 271 }
272 spin_unlock_bh(&bat_priv->vis_hash_lock); 272 spin_unlock_bh(&bat_priv->vis.hash_lock);
273 273
274out: 274out:
275 if (primary_if) 275 if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
285{ 285{
286 if (list_empty(&info->send_list)) { 286 if (list_empty(&info->send_list)) {
287 kref_get(&info->refcount); 287 kref_get(&info->refcount);
288 list_add_tail(&info->send_list, &bat_priv->vis_send_list); 288 list_add_tail(&info->send_list, &bat_priv->vis.send_list);
289 } 289 }
290} 290}
291 291
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
311 return; 311 return;
312 312
313 memcpy(entry->mac, mac, ETH_ALEN); 313 memcpy(entry->mac, mac, ETH_ALEN);
314 spin_lock_bh(&bat_priv->vis_list_lock); 314 spin_lock_bh(&bat_priv->vis.list_lock);
315 list_add_tail(&entry->list, recv_list); 315 list_add_tail(&entry->list, recv_list);
316 spin_unlock_bh(&bat_priv->vis_list_lock); 316 spin_unlock_bh(&bat_priv->vis.list_lock);
317} 317}
318 318
319/* returns 1 if this mac is in the recv_list */ 319/* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
323{ 323{
324 const struct batadv_recvlist_node *entry; 324 const struct batadv_recvlist_node *entry;
325 325
326 spin_lock_bh(&bat_priv->vis_list_lock); 326 spin_lock_bh(&bat_priv->vis.list_lock);
327 list_for_each_entry(entry, recv_list, list) { 327 list_for_each_entry(entry, recv_list, list) {
328 if (batadv_compare_eth(entry->mac, mac)) { 328 if (batadv_compare_eth(entry->mac, mac)) {
329 spin_unlock_bh(&bat_priv->vis_list_lock); 329 spin_unlock_bh(&bat_priv->vis.list_lock);
330 return 1; 330 return 1;
331 } 331 }
332 } 332 }
333 spin_unlock_bh(&bat_priv->vis_list_lock); 333 spin_unlock_bh(&bat_priv->vis.list_lock);
334 return 0; 334 return 0;
335} 335}
336 336
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
354 354
355 *is_new = 0; 355 *is_new = 0;
356 /* sanity check */ 356 /* sanity check */
357 if (!bat_priv->vis_hash) 357 if (!bat_priv->vis.hash)
358 return NULL; 358 return NULL;
359 359
360 /* see if the packet is already in vis_hash */ 360 /* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
385 } 385 }
386 } 386 }
387 /* remove old entry */ 387 /* remove old entry */
388 batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp, 388 batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
389 batadv_vis_info_choose, old_info); 389 batadv_vis_info_choose, old_info);
390 batadv_send_list_del(old_info); 390 batadv_send_list_del(old_info);
391 kref_put(&old_info->refcount, batadv_free_info); 391 kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); 426 batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
427 427
428 /* try to add it */ 428 /* try to add it */
429 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, 429 hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
430 batadv_vis_info_choose, info, 430 batadv_vis_info_choose, info,
431 &info->hash_entry); 431 &info->hash_entry);
432 if (hash_added != 0) { 432 if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
449 449
450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC); 450 make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
451 451
452 spin_lock_bh(&bat_priv->vis_hash_lock); 452 spin_lock_bh(&bat_priv->vis.hash_lock);
453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, 453 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
454 &is_new, make_broadcast); 454 &is_new, make_broadcast);
455 if (!info) 455 if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new) 461 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
462 batadv_send_list_add(bat_priv, info); 462 batadv_send_list_add(bat_priv, info);
463end: 463end:
464 spin_unlock_bh(&bat_priv->vis_hash_lock); 464 spin_unlock_bh(&bat_priv->vis.hash_lock);
465} 465}
466 466
467/* handle an incoming client update packet and schedule forward if needed. */ 467/* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
484 batadv_is_my_mac(vis_packet->target_orig)) 484 batadv_is_my_mac(vis_packet->target_orig))
485 are_target = 1; 485 are_target = 1;
486 486
487 spin_lock_bh(&bat_priv->vis_hash_lock); 487 spin_lock_bh(&bat_priv->vis.hash_lock);
488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len, 488 info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
489 &is_new, are_target); 489 &is_new, are_target);
490 490
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
505 } 505 }
506 506
507end: 507end:
508 spin_unlock_bh(&bat_priv->vis_hash_lock); 508 spin_unlock_bh(&bat_priv->vis.hash_lock);
509} 509}
510 510
511/* Walk the originators and find the VIS server with the best tq. Set the packet 511/* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
574 struct hlist_head *head; 574 struct hlist_head *head;
575 struct batadv_orig_node *orig_node; 575 struct batadv_orig_node *orig_node;
576 struct batadv_neigh_node *router; 576 struct batadv_neigh_node *router;
577 struct batadv_vis_info *info = bat_priv->my_vis_info; 577 struct batadv_vis_info *info = bat_priv->vis.my_info;
578 struct batadv_vis_packet *packet; 578 struct batadv_vis_packet *packet;
579 struct batadv_vis_info_entry *entry; 579 struct batadv_vis_info_entry *entry;
580 struct batadv_tt_common_entry *tt_common_entry; 580 struct batadv_tt_common_entry *tt_common_entry;
581 uint8_t *packet_pos;
581 int best_tq = -1; 582 int best_tq = -1;
582 uint32_t i; 583 uint32_t i;
583 584
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
618 goto next; 619 goto next;
619 620
620 /* fill one entry into buffer. */ 621 /* fill one entry into buffer. */
621 entry = (struct batadv_vis_info_entry *) 622 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
622 skb_put(info->skb_packet, sizeof(*entry)); 623 entry = (struct batadv_vis_info_entry *)packet_pos;
623 memcpy(entry->src, 624 memcpy(entry->src,
624 router->if_incoming->net_dev->dev_addr, 625 router->if_incoming->net_dev->dev_addr,
625 ETH_ALEN); 626 ETH_ALEN);
@@ -636,7 +637,7 @@ next:
636 rcu_read_unlock(); 637 rcu_read_unlock();
637 } 638 }
638 639
639 hash = bat_priv->tt_local_hash; 640 hash = bat_priv->tt.local_hash;
640 641
641 for (i = 0; i < hash->size; i++) { 642 for (i = 0; i < hash->size; i++) {
642 head = &hash->table[i]; 643 head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
644 rcu_read_lock(); 645 rcu_read_lock();
645 hlist_for_each_entry_rcu(tt_common_entry, node, head, 646 hlist_for_each_entry_rcu(tt_common_entry, node, head,
646 hash_entry) { 647 hash_entry) {
647 entry = (struct batadv_vis_info_entry *) 648 packet_pos = skb_put(info->skb_packet, sizeof(*entry));
648 skb_put(info->skb_packet, 649 entry = (struct batadv_vis_info_entry *)packet_pos;
649 sizeof(*entry));
650 memset(entry->src, 0, ETH_ALEN); 650 memset(entry->src, 0, ETH_ALEN);
651 memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); 651 memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
652 entry->quality = 0; /* 0 means TT */ 652 entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) 671static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
672{ 672{
673 uint32_t i; 673 uint32_t i;
674 struct batadv_hashtable *hash = bat_priv->vis_hash; 674 struct batadv_hashtable *hash = bat_priv->vis.hash;
675 struct hlist_node *node, *node_tmp; 675 struct hlist_node *node, *node_tmp;
676 struct hlist_head *head; 676 struct hlist_head *head;
677 struct batadv_vis_info *info; 677 struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
682 hlist_for_each_entry_safe(info, node, node_tmp, 682 hlist_for_each_entry_safe(info, node, node_tmp,
683 head, hash_entry) { 683 head, hash_entry) {
684 /* never purge own data. */ 684 /* never purge own data. */
685 if (info == bat_priv->my_vis_info) 685 if (info == bat_priv->vis.my_info)
686 continue; 686 continue;
687 687
688 if (batadv_has_timed_out(info->first_seen, 688 if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
814/* called from timer; send (and maybe generate) vis packet. */ 814/* called from timer; send (and maybe generate) vis packet. */
815static void batadv_send_vis_packets(struct work_struct *work) 815static void batadv_send_vis_packets(struct work_struct *work)
816{ 816{
817 struct delayed_work *delayed_work = 817 struct delayed_work *delayed_work;
818 container_of(work, struct delayed_work, work);
819 struct batadv_priv *bat_priv; 818 struct batadv_priv *bat_priv;
819 struct batadv_priv_vis *priv_vis;
820 struct batadv_vis_info *info; 820 struct batadv_vis_info *info;
821 821
822 bat_priv = container_of(delayed_work, struct batadv_priv, vis_work); 822 delayed_work = container_of(work, struct delayed_work, work);
823 spin_lock_bh(&bat_priv->vis_hash_lock); 823 priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
824 bat_priv = container_of(priv_vis, struct batadv_priv, vis);
825 spin_lock_bh(&bat_priv->vis.hash_lock);
824 batadv_purge_vis_packets(bat_priv); 826 batadv_purge_vis_packets(bat_priv);
825 827
826 if (batadv_generate_vis_packet(bat_priv) == 0) { 828 if (batadv_generate_vis_packet(bat_priv) == 0) {
827 /* schedule if generation was successful */ 829 /* schedule if generation was successful */
828 batadv_send_list_add(bat_priv, bat_priv->my_vis_info); 830 batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
829 } 831 }
830 832
831 while (!list_empty(&bat_priv->vis_send_list)) { 833 while (!list_empty(&bat_priv->vis.send_list)) {
832 info = list_first_entry(&bat_priv->vis_send_list, 834 info = list_first_entry(&bat_priv->vis.send_list,
833 typeof(*info), send_list); 835 typeof(*info), send_list);
834 836
835 kref_get(&info->refcount); 837 kref_get(&info->refcount);
836 spin_unlock_bh(&bat_priv->vis_hash_lock); 838 spin_unlock_bh(&bat_priv->vis.hash_lock);
837 839
838 batadv_send_vis_packet(bat_priv, info); 840 batadv_send_vis_packet(bat_priv, info);
839 841
840 spin_lock_bh(&bat_priv->vis_hash_lock); 842 spin_lock_bh(&bat_priv->vis.hash_lock);
841 batadv_send_list_del(info); 843 batadv_send_list_del(info);
842 kref_put(&info->refcount, batadv_free_info); 844 kref_put(&info->refcount, batadv_free_info);
843 } 845 }
844 spin_unlock_bh(&bat_priv->vis_hash_lock); 846 spin_unlock_bh(&bat_priv->vis.hash_lock);
845 batadv_start_vis_timer(bat_priv); 847 batadv_start_vis_timer(bat_priv);
846} 848}
847 849
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
856 unsigned long first_seen; 858 unsigned long first_seen;
857 struct sk_buff *tmp_skb; 859 struct sk_buff *tmp_skb;
858 860
859 if (bat_priv->vis_hash) 861 if (bat_priv->vis.hash)
860 return 0; 862 return 0;
861 863
862 spin_lock_bh(&bat_priv->vis_hash_lock); 864 spin_lock_bh(&bat_priv->vis.hash_lock);
863 865
864 bat_priv->vis_hash = batadv_hash_new(256); 866 bat_priv->vis.hash = batadv_hash_new(256);
865 if (!bat_priv->vis_hash) { 867 if (!bat_priv->vis.hash) {
866 pr_err("Can't initialize vis_hash\n"); 868 pr_err("Can't initialize vis_hash\n");
867 goto err; 869 goto err;
868 } 870 }
869 871
870 bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); 872 bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
871 if (!bat_priv->my_vis_info) 873 if (!bat_priv->vis.my_info)
872 goto err; 874 goto err;
873 875
874 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; 876 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
875 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len); 877 bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
876 if (!bat_priv->my_vis_info->skb_packet) 878 if (!bat_priv->vis.my_info->skb_packet)
877 goto free_info; 879 goto free_info;
878 880
879 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN); 881 skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
880 tmp_skb = bat_priv->my_vis_info->skb_packet; 882 tmp_skb = bat_priv->vis.my_info->skb_packet;
881 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); 883 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
882 884
883 /* prefill the vis info */ 885 /* prefill the vis info */
884 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL); 886 first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
885 bat_priv->my_vis_info->first_seen = first_seen; 887 bat_priv->vis.my_info->first_seen = first_seen;
886 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); 888 INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
887 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); 889 INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
888 kref_init(&bat_priv->my_vis_info->refcount); 890 kref_init(&bat_priv->vis.my_info->refcount);
889 bat_priv->my_vis_info->bat_priv = bat_priv; 891 bat_priv->vis.my_info->bat_priv = bat_priv;
890 packet->header.version = BATADV_COMPAT_VERSION; 892 packet->header.version = BATADV_COMPAT_VERSION;
891 packet->header.packet_type = BATADV_VIS; 893 packet->header.packet_type = BATADV_VIS;
892 packet->header.ttl = BATADV_TTL; 894 packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
894 packet->reserved = 0; 896 packet->reserved = 0;
895 packet->entries = 0; 897 packet->entries = 0;
896 898
897 INIT_LIST_HEAD(&bat_priv->vis_send_list); 899 INIT_LIST_HEAD(&bat_priv->vis.send_list);
898 900
899 hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp, 901 hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
900 batadv_vis_info_choose, 902 batadv_vis_info_choose,
901 bat_priv->my_vis_info, 903 bat_priv->vis.my_info,
902 &bat_priv->my_vis_info->hash_entry); 904 &bat_priv->vis.my_info->hash_entry);
903 if (hash_added != 0) { 905 if (hash_added != 0) {
904 pr_err("Can't add own vis packet into hash\n"); 906 pr_err("Can't add own vis packet into hash\n");
905 /* not in hash, need to remove it manually. */ 907 /* not in hash, need to remove it manually. */
906 kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info); 908 kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
907 goto err; 909 goto err;
908 } 910 }
909 911
910 spin_unlock_bh(&bat_priv->vis_hash_lock); 912 spin_unlock_bh(&bat_priv->vis.hash_lock);
911 batadv_start_vis_timer(bat_priv); 913 batadv_start_vis_timer(bat_priv);
912 return 0; 914 return 0;
913 915
914free_info: 916free_info:
915 kfree(bat_priv->my_vis_info); 917 kfree(bat_priv->vis.my_info);
916 bat_priv->my_vis_info = NULL; 918 bat_priv->vis.my_info = NULL;
917err: 919err:
918 spin_unlock_bh(&bat_priv->vis_hash_lock); 920 spin_unlock_bh(&bat_priv->vis.hash_lock);
919 batadv_vis_quit(bat_priv); 921 batadv_vis_quit(bat_priv);
920 return -ENOMEM; 922 return -ENOMEM;
921} 923}
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
933/* shutdown vis-server */ 935/* shutdown vis-server */
934void batadv_vis_quit(struct batadv_priv *bat_priv) 936void batadv_vis_quit(struct batadv_priv *bat_priv)
935{ 937{
936 if (!bat_priv->vis_hash) 938 if (!bat_priv->vis.hash)
937 return; 939 return;
938 940
939 cancel_delayed_work_sync(&bat_priv->vis_work); 941 cancel_delayed_work_sync(&bat_priv->vis.work);
940 942
941 spin_lock_bh(&bat_priv->vis_hash_lock); 943 spin_lock_bh(&bat_priv->vis.hash_lock);
942 /* properly remove, kill timers ... */ 944 /* properly remove, kill timers ... */
943 batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL); 945 batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
944 bat_priv->vis_hash = NULL; 946 bat_priv->vis.hash = NULL;
945 bat_priv->my_vis_info = NULL; 947 bat_priv->vis.my_info = NULL;
946 spin_unlock_bh(&bat_priv->vis_hash_lock); 948 spin_unlock_bh(&bat_priv->vis.hash_lock);
947} 949}
948 950
949/* schedule packets for (re)transmission */ 951/* schedule packets for (re)transmission */
950static void batadv_start_vis_timer(struct batadv_priv *bat_priv) 952static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
951{ 953{
952 INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets); 954 INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
953 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work, 955 queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
954 msecs_to_jiffies(BATADV_VIS_INTERVAL)); 956 msecs_to_jiffies(BATADV_VIS_INTERVAL));
955} 957}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 84e716ed8963..873282fa86da 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -20,7 +20,7 @@
20#ifndef _NET_BATMAN_ADV_VIS_H_ 20#ifndef _NET_BATMAN_ADV_VIS_H_
21#define _NET_BATMAN_ADV_VIS_H_ 21#define _NET_BATMAN_ADV_VIS_H_
22 22
23/* timeout of vis packets in miliseconds */ 23/* timeout of vis packets in milliseconds */
24#define BATADV_VIS_TIMEOUT 200000 24#define BATADV_VIS_TIMEOUT 200000
25 25
26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset); 26int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69e38db28e5f..a8020293f342 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
84 return -1; 84 return -1;
85 } 85 }
86 } else { 86 } else {
87 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
88 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
89 } 88 }
90 return 0; 89 return 0;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 54b531a01121..38b5dc1823d4 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)
189 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, 189 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
190 client->monc.auth->global_id); 190 client->monc.auth->global_id);
191 191
192 dout("ceph_debugfs_client_init %p %s\n", client, name);
193
194 BUG_ON(client->debugfs_dir);
192 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); 195 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
193 if (!client->debugfs_dir) 196 if (!client->debugfs_dir)
194 goto out; 197 goto out;
@@ -234,6 +237,7 @@ out:
234 237
235void ceph_debugfs_client_cleanup(struct ceph_client *client) 238void ceph_debugfs_client_cleanup(struct ceph_client *client)
236{ 239{
240 dout("ceph_debugfs_client_cleanup %p\n", client);
237 debugfs_remove(client->debugfs_osdmap); 241 debugfs_remove(client->debugfs_osdmap);
238 debugfs_remove(client->debugfs_monmap); 242 debugfs_remove(client->debugfs_monmap);
239 debugfs_remove(client->osdc.debugfs_file); 243 debugfs_remove(client->osdc.debugfs_file);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b9796750034a..24c5eea8c45b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con)
915 con->out_connect.authorizer_len = auth ? 915 con->out_connect.authorizer_len = auth ?
916 cpu_to_le32(auth->authorizer_buf_len) : 0; 916 cpu_to_le32(auth->authorizer_buf_len) : 0;
917 917
918 con_out_kvec_reset(con);
919 con_out_kvec_add(con, sizeof (con->out_connect), 918 con_out_kvec_add(con, sizeof (con->out_connect),
920 &con->out_connect); 919 &con->out_connect);
921 if (auth && auth->authorizer_buf_len) 920 if (auth && auth->authorizer_buf_len)
@@ -1557,6 +1556,7 @@ static int process_connect(struct ceph_connection *con)
1557 return -1; 1556 return -1;
1558 } 1557 }
1559 con->auth_retry = 1; 1558 con->auth_retry = 1;
1559 con_out_kvec_reset(con);
1560 ret = prepare_write_connect(con); 1560 ret = prepare_write_connect(con);
1561 if (ret < 0) 1561 if (ret < 0)
1562 return ret; 1562 return ret;
@@ -1577,6 +1577,7 @@ static int process_connect(struct ceph_connection *con)
1577 ENTITY_NAME(con->peer_name), 1577 ENTITY_NAME(con->peer_name),
1578 ceph_pr_addr(&con->peer_addr.in_addr)); 1578 ceph_pr_addr(&con->peer_addr.in_addr));
1579 reset_connection(con); 1579 reset_connection(con);
1580 con_out_kvec_reset(con);
1580 ret = prepare_write_connect(con); 1581 ret = prepare_write_connect(con);
1581 if (ret < 0) 1582 if (ret < 0)
1582 return ret; 1583 return ret;
@@ -1601,6 +1602,7 @@ static int process_connect(struct ceph_connection *con)
1601 le32_to_cpu(con->out_connect.connect_seq), 1602 le32_to_cpu(con->out_connect.connect_seq),
1602 le32_to_cpu(con->in_reply.connect_seq)); 1603 le32_to_cpu(con->in_reply.connect_seq));
1603 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 1604 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
1605 con_out_kvec_reset(con);
1604 ret = prepare_write_connect(con); 1606 ret = prepare_write_connect(con);
1605 if (ret < 0) 1607 if (ret < 0)
1606 return ret; 1608 return ret;
@@ -1617,6 +1619,7 @@ static int process_connect(struct ceph_connection *con)
1617 le32_to_cpu(con->in_reply.global_seq)); 1619 le32_to_cpu(con->in_reply.global_seq));
1618 get_global_seq(con->msgr, 1620 get_global_seq(con->msgr,
1619 le32_to_cpu(con->in_reply.global_seq)); 1621 le32_to_cpu(con->in_reply.global_seq));
1622 con_out_kvec_reset(con);
1620 ret = prepare_write_connect(con); 1623 ret = prepare_write_connect(con);
1621 if (ret < 0) 1624 if (ret < 0)
1622 return ret; 1625 return ret;
@@ -2135,7 +2138,11 @@ more:
2135 BUG_ON(con->state != CON_STATE_CONNECTING); 2138 BUG_ON(con->state != CON_STATE_CONNECTING);
2136 con->state = CON_STATE_NEGOTIATING; 2139 con->state = CON_STATE_NEGOTIATING;
2137 2140
2138 /* Banner is good, exchange connection info */ 2141 /*
2142 * Received banner is good, exchange connection info.
2143 * Do not reset out_kvec, as sending our banner raced
2144 * with receiving peer banner after connect completed.
2145 */
2139 ret = prepare_write_connect(con); 2146 ret = prepare_write_connect(con);
2140 if (ret < 0) 2147 if (ret < 0)
2141 goto out; 2148 goto out;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 105d533b55f3..900ea0f043fc 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -311,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
311EXPORT_SYMBOL(ceph_monc_open_session); 311EXPORT_SYMBOL(ceph_monc_open_session);
312 312
313/* 313/*
314 * We require the fsid and global_id in order to initialize our
315 * debugfs dir.
316 */
317static bool have_debugfs_info(struct ceph_mon_client *monc)
318{
319 dout("have_debugfs_info fsid %d globalid %lld\n",
320 (int)monc->client->have_fsid, monc->auth->global_id);
321 return monc->client->have_fsid && monc->auth->global_id > 0;
322}
323
324/*
314 * The monitor responds with mount ack indicate mount success. The 325 * The monitor responds with mount ack indicate mount success. The
315 * included client ticket allows the client to talk to MDSs and OSDs. 326 * included client ticket allows the client to talk to MDSs and OSDs.
316 */ 327 */
@@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
320 struct ceph_client *client = monc->client; 331 struct ceph_client *client = monc->client;
321 struct ceph_monmap *monmap = NULL, *old = monc->monmap; 332 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
322 void *p, *end; 333 void *p, *end;
334 int had_debugfs_info, init_debugfs = 0;
323 335
324 mutex_lock(&monc->mutex); 336 mutex_lock(&monc->mutex);
325 337
338 had_debugfs_info = have_debugfs_info(monc);
339
326 dout("handle_monmap\n"); 340 dout("handle_monmap\n");
327 p = msg->front.iov_base; 341 p = msg->front.iov_base;
328 end = p + msg->front.iov_len; 342 end = p + msg->front.iov_len;
@@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
344 358
345 if (!client->have_fsid) { 359 if (!client->have_fsid) {
346 client->have_fsid = true; 360 client->have_fsid = true;
361 if (!had_debugfs_info && have_debugfs_info(monc)) {
362 pr_info("client%lld fsid %pU\n",
363 ceph_client_id(monc->client),
364 &monc->client->fsid);
365 init_debugfs = 1;
366 }
347 mutex_unlock(&monc->mutex); 367 mutex_unlock(&monc->mutex);
348 /* 368
349 * do debugfs initialization without mutex to avoid 369 if (init_debugfs) {
350 * creating a locking dependency 370 /*
351 */ 371 * do debugfs initialization without mutex to avoid
352 ceph_debugfs_client_init(client); 372 * creating a locking dependency
373 */
374 ceph_debugfs_client_init(monc->client);
375 }
376
353 goto out_unlocked; 377 goto out_unlocked;
354 } 378 }
355out: 379out:
@@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
865{ 889{
866 int ret; 890 int ret;
867 int was_auth = 0; 891 int was_auth = 0;
892 int had_debugfs_info, init_debugfs = 0;
868 893
869 mutex_lock(&monc->mutex); 894 mutex_lock(&monc->mutex);
895 had_debugfs_info = have_debugfs_info(monc);
870 if (monc->auth->ops) 896 if (monc->auth->ops)
871 was_auth = monc->auth->ops->is_authenticated(monc->auth); 897 was_auth = monc->auth->ops->is_authenticated(monc->auth);
872 monc->pending_auth = 0; 898 monc->pending_auth = 0;
@@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
889 __send_subscribe(monc); 915 __send_subscribe(monc);
890 __resend_generic_request(monc); 916 __resend_generic_request(monc);
891 } 917 }
918
919 if (!had_debugfs_info && have_debugfs_info(monc)) {
920 pr_info("client%lld fsid %pU\n",
921 ceph_client_id(monc->client),
922 &monc->client->fsid);
923 init_debugfs = 1;
924 }
892 mutex_unlock(&monc->mutex); 925 mutex_unlock(&monc->mutex);
926
927 if (init_debugfs) {
928 /*
929 * do debugfs initialization without mutex to avoid
930 * creating a locking dependency
931 */
932 ceph_debugfs_client_init(monc->client);
933 }
893} 934}
894 935
895static int __validate_auth(struct ceph_mon_client *monc) 936static int __validate_auth(struct ceph_mon_client *monc)
diff --git a/net/core/dev.c b/net/core/dev.c
index 0640d2a859c6..b1e6d6385516 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1466,8 +1466,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
1466 1466
1467int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1467int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1468{ 1468{
1469 if (val != NETDEV_UNREGISTER_FINAL) 1469 ASSERT_RTNL();
1470 ASSERT_RTNL();
1471 return raw_notifier_call_chain(&netdev_chain, val, dev); 1470 return raw_notifier_call_chain(&netdev_chain, val, dev);
1472} 1471}
1473EXPORT_SYMBOL(call_netdevice_notifiers); 1472EXPORT_SYMBOL(call_netdevice_notifiers);
@@ -2185,9 +2184,7 @@ EXPORT_SYMBOL(netif_skb_features);
2185/* 2184/*
2186 * Returns true if either: 2185 * Returns true if either:
2187 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2186 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2188 * 2. skb is fragmented and the device does not support SG, or if 2187 * 2. skb is fragmented and the device does not support SG.
2189 * at least one of fragments is in highmem and device does not
2190 * support DMA from it.
2191 */ 2188 */
2192static inline int skb_needs_linearize(struct sk_buff *skb, 2189static inline int skb_needs_linearize(struct sk_buff *skb,
2193 int features) 2190 int features)
@@ -4521,8 +4518,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
4521static int __dev_set_promiscuity(struct net_device *dev, int inc) 4518static int __dev_set_promiscuity(struct net_device *dev, int inc)
4522{ 4519{
4523 unsigned int old_flags = dev->flags; 4520 unsigned int old_flags = dev->flags;
4524 uid_t uid; 4521 kuid_t uid;
4525 gid_t gid; 4522 kgid_t gid;
4526 4523
4527 ASSERT_RTNL(); 4524 ASSERT_RTNL();
4528 4525
@@ -4554,7 +4551,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4554 dev->name, (dev->flags & IFF_PROMISC), 4551 dev->name, (dev->flags & IFF_PROMISC),
4555 (old_flags & IFF_PROMISC), 4552 (old_flags & IFF_PROMISC),
4556 audit_get_loginuid(current), 4553 audit_get_loginuid(current),
4557 uid, gid, 4554 from_kuid(&init_user_ns, uid),
4555 from_kgid(&init_user_ns, gid),
4558 audit_get_sessionid(current)); 4556 audit_get_sessionid(current));
4559 } 4557 }
4560 4558
@@ -5649,6 +5647,8 @@ int register_netdevice(struct net_device *dev)
5649 5647
5650 set_bit(__LINK_STATE_PRESENT, &dev->state); 5648 set_bit(__LINK_STATE_PRESENT, &dev->state);
5651 5649
5650 linkwatch_init_dev(dev);
5651
5652 dev_init_scheduler(dev); 5652 dev_init_scheduler(dev);
5653 dev_hold(dev); 5653 dev_hold(dev);
5654 list_netdevice(dev); 5654 list_netdevice(dev);
@@ -5782,7 +5782,11 @@ static void netdev_wait_allrefs(struct net_device *dev)
5782 5782
5783 /* Rebroadcast unregister notification */ 5783 /* Rebroadcast unregister notification */
5784 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5784 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5785
5786 __rtnl_unlock();
5785 rcu_barrier(); 5787 rcu_barrier();
5788 rtnl_lock();
5789
5786 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5790 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5787 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5791 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5788 &dev->state)) { 5792 &dev->state)) {
@@ -5855,7 +5859,9 @@ void netdev_run_todo(void)
5855 = list_first_entry(&list, struct net_device, todo_list); 5859 = list_first_entry(&list, struct net_device, todo_list);
5856 list_del(&dev->todo_list); 5860 list_del(&dev->todo_list);
5857 5861
5862 rtnl_lock();
5858 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); 5863 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5864 __rtnl_unlock();
5859 5865
5860 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5866 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5861 pr_err("network todo '%s' but state %d\n", 5867 pr_err("network todo '%s' but state %d\n",
@@ -6251,6 +6257,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6251 the device is just moving and can keep their slaves up. 6257 the device is just moving and can keep their slaves up.
6252 */ 6258 */
6253 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6259 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6260 rcu_barrier();
6261 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6254 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6262 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6255 6263
6256 /* 6264 /*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 585093755c23..ab7db83236c9 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -711,16 +711,15 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
711 struct net *net = dev_net(dev); 711 struct net *net = dev_net(dev);
712 struct fib_rules_ops *ops; 712 struct fib_rules_ops *ops;
713 713
714 ASSERT_RTNL();
714 715
715 switch (event) { 716 switch (event) {
716 case NETDEV_REGISTER: 717 case NETDEV_REGISTER:
717 ASSERT_RTNL();
718 list_for_each_entry(ops, &net->rules_ops, list) 718 list_for_each_entry(ops, &net->rules_ops, list)
719 attach_rules(&ops->rules_list, dev); 719 attach_rules(&ops->rules_list, dev);
720 break; 720 break;
721 721
722 case NETDEV_UNREGISTER: 722 case NETDEV_UNREGISTER:
723 ASSERT_RTNL();
724 list_for_each_entry(ops, &net->rules_ops, list) 723 list_for_each_entry(ops, &net->rules_ops, list)
725 detach_rules(&ops->rules_list, dev); 724 detach_rules(&ops->rules_list, dev);
726 break; 725 break;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6d1b16..a01922219a23 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
76} 76}
77 77
78 78
79void linkwatch_init_dev(struct net_device *dev)
80{
81 /* Handle pre-registration link state changes */
82 if (!netif_carrier_ok(dev) || netif_dormant(dev))
83 rfc2863_policy(dev);
84}
85
86
79static bool linkwatch_urgent_event(struct net_device *dev) 87static bool linkwatch_urgent_event(struct net_device *dev)
80{ 88{
81 if (!netif_running(dev)) 89 if (!netif_running(dev))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1f..dd67818025d1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
168 struct napi_struct *napi; 168 struct napi_struct *napi;
169 int budget = 16; 169 int budget = 16;
170 170
171 WARN_ON_ONCE(!irqs_disabled());
172
173 list_for_each_entry(napi, &dev->napi_list, dev_list) { 171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 local_irq_enable();
175 if (napi->poll_owner != smp_processor_id() && 172 if (napi->poll_owner != smp_processor_id() &&
176 spin_trylock(&napi->poll_lock)) { 173 spin_trylock(&napi->poll_lock)) {
177 rcu_read_lock_bh();
178 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), 174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
179 napi, budget); 175 napi, budget);
180 rcu_read_unlock_bh();
181 spin_unlock(&napi->poll_lock); 176 spin_unlock(&napi->poll_lock);
182 177
183 if (!budget) { 178 if (!budget)
184 local_irq_disable();
185 break; 179 break;
186 }
187 } 180 }
188 local_irq_disable();
189 } 181 }
190} 182}
191 183
@@ -388,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
388 struct udphdr *udph; 380 struct udphdr *udph;
389 struct iphdr *iph; 381 struct iphdr *iph;
390 struct ethhdr *eth; 382 struct ethhdr *eth;
383 static atomic_t ip_ident;
391 384
392 udp_len = len + sizeof(*udph); 385 udp_len = len + sizeof(*udph);
393 ip_len = udp_len + sizeof(*iph); 386 ip_len = udp_len + sizeof(*iph);
@@ -423,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
423 put_unaligned(0x45, (unsigned char *)iph); 416 put_unaligned(0x45, (unsigned char *)iph);
424 iph->tos = 0; 417 iph->tos = 0;
425 put_unaligned(htons(ip_len), &(iph->tot_len)); 418 put_unaligned(htons(ip_len), &(iph->tot_len));
426 iph->id = 0; 419 iph->id = htons(atomic_inc_return(&ip_ident));
427 iph->frag_off = 0; 420 iph->frag_off = 0;
428 iph->ttl = 64; 421 iph->ttl = 64;
429 iph->protocol = IPPROTO_UDP; 422 iph->protocol = IPPROTO_UDP;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 9b570a6a33c5..c31d9e8668c3 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -15,6 +15,7 @@
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/tcp.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19 20
20#include <net/request_sock.h> 21#include <net/request_sock.h>
@@ -130,3 +131,97 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
130 kfree(lopt); 131 kfree(lopt);
131} 132}
132 133
134/*
135 * This function is called to set a Fast Open socket's "fastopen_rsk" field
136 * to NULL when a TFO socket no longer needs to access the request_sock.
137 * This happens only after 3WHS has been either completed or aborted (e.g.,
138 * RST is received).
139 *
140 * Before TFO, a child socket is created only after 3WHS is completed,
141 * hence it never needs to access the request_sock. things get a lot more
142 * complex with TFO. A child socket, accepted or not, has to access its
143 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
144 * until 3WHS is either completed or aborted. Afterwards the req will stay
145 * until either the child socket is accepted, or in the rare case when the
146 * listener is closed before the child is accepted.
147 *
148 * In short, a request socket is only freed after BOTH 3WHS has completed
149 * (or aborted) and the child socket has been accepted (or listener closed).
150 * When a child socket is accepted, its corresponding req->sk is set to
151 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
152 * will be used by the code below to determine if a child socket has been
153 * accepted or not, and the check is protected by the fastopenq->lock
154 * described below.
155 *
156 * Note that fastopen_rsk is only accessed from the child socket's context
157 * with its socket lock held. But a request_sock (req) can be accessed by
158 * both its child socket through fastopen_rsk, and a listener socket through
159 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
160 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
161 * only in the rare case when both the listener and the child locks are held,
162 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
163 * The lock also protects other fields such as fastopenq->qlen, which is
164 * decremented by this function when fastopen_rsk is no longer needed.
165 *
166 * Note that another solution was to simply use the existing socket lock
167 * from the listener. But first socket lock is difficult to use. It is not
168 * a simple spin lock - one must consider sock_owned_by_user() and arrange
169 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
170 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
171 * acquire a child's lock while holding listener's socket lock. A corner
172 * case might also exist in tcp_v4_hnd_req() that will trigger this locking
173 * order.
174 *
175 * When a TFO req is created, it needs to sock_hold its listener to prevent
176 * the latter data structure from going away.
177 *
178 * This function also sets "treq->listener" to NULL and unreference listener
179 * socket. treq->listener is used by the listener so it is protected by the
180 * fastopenq->lock in this function.
181 */
182void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
183 bool reset)
184{
185 struct sock *lsk = tcp_rsk(req)->listener;
186 struct fastopen_queue *fastopenq =
187 inet_csk(lsk)->icsk_accept_queue.fastopenq;
188
189 BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
190
191 tcp_sk(sk)->fastopen_rsk = NULL;
192 spin_lock_bh(&fastopenq->lock);
193 fastopenq->qlen--;
194 tcp_rsk(req)->listener = NULL;
195 if (req->sk) /* the child socket hasn't been accepted yet */
196 goto out;
197
198 if (!reset || lsk->sk_state != TCP_LISTEN) {
199 /* If the listener has been closed don't bother with the
200 * special RST handling below.
201 */
202 spin_unlock_bh(&fastopenq->lock);
203 sock_put(lsk);
204 reqsk_free(req);
205 return;
206 }
207 /* Wait for 60secs before removing a req that has triggered RST.
208 * This is a simple defense against TFO spoofing attack - by
209 * counting the req against fastopen.max_qlen, and disabling
210 * TFO when the qlen exceeds max_qlen.
211 *
212 * For more details see CoNext'11 "TCP Fast Open" paper.
213 */
214 req->expires = jiffies + 60*HZ;
215 if (fastopenq->rskq_rst_head == NULL)
216 fastopenq->rskq_rst_head = req;
217 else
218 fastopenq->rskq_rst_tail->dl_next = req;
219
220 req->dl_next = NULL;
221 fastopenq->rskq_rst_tail = req;
222 fastopenq->qlen++;
223out:
224 spin_unlock_bh(&fastopenq->lock);
225 sock_put(lsk);
226 return;
227}
diff --git a/net/core/scm.c b/net/core/scm.c
index 040cebeed45b..6ab491d6c26f 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -45,12 +45,17 @@
45static __inline__ int scm_check_creds(struct ucred *creds) 45static __inline__ int scm_check_creds(struct ucred *creds)
46{ 46{
47 const struct cred *cred = current_cred(); 47 const struct cred *cred = current_cred();
48 kuid_t uid = make_kuid(cred->user_ns, creds->uid);
49 kgid_t gid = make_kgid(cred->user_ns, creds->gid);
50
51 if (!uid_valid(uid) || !gid_valid(gid))
52 return -EINVAL;
48 53
49 if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && 54 if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
50 ((creds->uid == cred->uid || creds->uid == cred->euid || 55 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
51 creds->uid == cred->suid) || capable(CAP_SETUID)) && 56 uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
52 ((creds->gid == cred->gid || creds->gid == cred->egid || 57 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
53 creds->gid == cred->sgid) || capable(CAP_SETGID))) { 58 gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
54 return 0; 59 return 0;
55 } 60 }
56 return -EPERM; 61 return -EPERM;
@@ -149,6 +154,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
149 goto error; 154 goto error;
150 break; 155 break;
151 case SCM_CREDENTIALS: 156 case SCM_CREDENTIALS:
157 {
158 kuid_t uid;
159 kgid_t gid;
152 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) 160 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
153 goto error; 161 goto error;
154 memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); 162 memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
@@ -166,22 +174,29 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
166 p->pid = pid; 174 p->pid = pid;
167 } 175 }
168 176
177 err = -EINVAL;
178 uid = make_kuid(current_user_ns(), p->creds.uid);
179 gid = make_kgid(current_user_ns(), p->creds.gid);
180 if (!uid_valid(uid) || !gid_valid(gid))
181 goto error;
182
169 if (!p->cred || 183 if (!p->cred ||
170 (p->cred->euid != p->creds.uid) || 184 !uid_eq(p->cred->euid, uid) ||
171 (p->cred->egid != p->creds.gid)) { 185 !gid_eq(p->cred->egid, gid)) {
172 struct cred *cred; 186 struct cred *cred;
173 err = -ENOMEM; 187 err = -ENOMEM;
174 cred = prepare_creds(); 188 cred = prepare_creds();
175 if (!cred) 189 if (!cred)
176 goto error; 190 goto error;
177 191
178 cred->uid = cred->euid = p->creds.uid; 192 cred->uid = cred->euid = uid;
179 cred->gid = cred->egid = p->creds.gid; 193 cred->gid = cred->egid = gid;
180 if (p->cred) 194 if (p->cred)
181 put_cred(p->cred); 195 put_cred(p->cred);
182 p->cred = cred; 196 p->cred = cred;
183 } 197 }
184 break; 198 break;
199 }
185 default: 200 default:
186 goto error; 201 goto error;
187 } 202 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 8f67ced8d6a8..d765156eab65 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -868,8 +868,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
868 if (cred) { 868 if (cred) {
869 struct user_namespace *current_ns = current_user_ns(); 869 struct user_namespace *current_ns = current_user_ns();
870 870
871 ucred->uid = from_kuid(current_ns, cred->euid); 871 ucred->uid = from_kuid_munged(current_ns, cred->euid);
872 ucred->gid = from_kgid(current_ns, cred->egid); 872 ucred->gid = from_kgid_munged(current_ns, cred->egid);
873 } 873 }
874} 874}
875EXPORT_SYMBOL_GPL(cred_to_ucred); 875EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -1230,7 +1230,7 @@ void sock_update_classid(struct sock *sk)
1230 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1230 rcu_read_lock(); /* doing current task, which cannot vanish. */
1231 classid = task_cls_classid(current); 1231 classid = task_cls_classid(current);
1232 rcu_read_unlock(); 1232 rcu_read_unlock();
1233 if (classid && classid != sk->sk_classid) 1233 if (classid != sk->sk_classid)
1234 sk->sk_classid = classid; 1234 sk->sk_classid = classid;
1235} 1235}
1236EXPORT_SYMBOL(sock_update_classid); 1236EXPORT_SYMBOL(sock_update_classid);
@@ -1527,12 +1527,12 @@ void sock_edemux(struct sk_buff *skb)
1527} 1527}
1528EXPORT_SYMBOL(sock_edemux); 1528EXPORT_SYMBOL(sock_edemux);
1529 1529
1530int sock_i_uid(struct sock *sk) 1530kuid_t sock_i_uid(struct sock *sk)
1531{ 1531{
1532 int uid; 1532 kuid_t uid;
1533 1533
1534 read_lock_bh(&sk->sk_callback_lock); 1534 read_lock_bh(&sk->sk_callback_lock);
1535 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1535 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1536 read_unlock_bh(&sk->sk_callback_lock); 1536 read_unlock_bh(&sk->sk_callback_lock);
1537 return uid; 1537 return uid;
1538} 1538}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2ba1a2814c24..307c322d53bb 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1313,10 +1313,10 @@ static int dn_shutdown(struct socket *sock, int how)
1313 if (scp->state == DN_O) 1313 if (scp->state == DN_O)
1314 goto out; 1314 goto out;
1315 1315
1316 if (how != SHUTDOWN_MASK) 1316 if (how != SHUT_RDWR)
1317 goto out; 1317 goto out;
1318 1318
1319 sk->sk_shutdown = how; 1319 sk->sk_shutdown = SHUTDOWN_MASK;
1320 dn_destroy_sock(sk); 1320 dn_destroy_sock(sk);
1321 err = 0; 1321 err = 0;
1322 1322
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 6a095225148e..d5291113584f 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1063,12 +1063,6 @@ out:
1063 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 1063 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
1064} 1064}
1065 1065
1066static void lowpan_dev_free(struct net_device *dev)
1067{
1068 dev_put(lowpan_dev_info(dev)->real_dev);
1069 free_netdev(dev);
1070}
1071
1072static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) 1066static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
1073{ 1067{
1074 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; 1068 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -1118,7 +1112,7 @@ static void lowpan_setup(struct net_device *dev)
1118 dev->netdev_ops = &lowpan_netdev_ops; 1112 dev->netdev_ops = &lowpan_netdev_ops;
1119 dev->header_ops = &lowpan_header_ops; 1113 dev->header_ops = &lowpan_header_ops;
1120 dev->ml_priv = &lowpan_mlme; 1114 dev->ml_priv = &lowpan_mlme;
1121 dev->destructor = lowpan_dev_free; 1115 dev->destructor = free_netdev;
1122} 1116}
1123 1117
1124static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) 1118static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1133,6 +1127,8 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
1133static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, 1127static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1134 struct packet_type *pt, struct net_device *orig_dev) 1128 struct packet_type *pt, struct net_device *orig_dev)
1135{ 1129{
1130 struct sk_buff *local_skb;
1131
1136 if (!netif_running(dev)) 1132 if (!netif_running(dev))
1137 goto drop; 1133 goto drop;
1138 1134
@@ -1144,7 +1140,12 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1144 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ 1140 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
1145 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ 1141 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
1146 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ 1142 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
1147 lowpan_process_data(skb); 1143 local_skb = skb_clone(skb, GFP_ATOMIC);
1144 if (!local_skb)
1145 goto drop;
1146 lowpan_process_data(local_skb);
1147
1148 kfree_skb(skb);
1148 break; 1149 break;
1149 default: 1150 default:
1150 break; 1151 break;
@@ -1237,6 +1238,34 @@ static inline void __init lowpan_netlink_fini(void)
1237 rtnl_link_unregister(&lowpan_link_ops); 1238 rtnl_link_unregister(&lowpan_link_ops);
1238} 1239}
1239 1240
1241static int lowpan_device_event(struct notifier_block *unused,
1242 unsigned long event,
1243 void *ptr)
1244{
1245 struct net_device *dev = ptr;
1246 LIST_HEAD(del_list);
1247 struct lowpan_dev_record *entry, *tmp;
1248
1249 if (dev->type != ARPHRD_IEEE802154)
1250 goto out;
1251
1252 if (event == NETDEV_UNREGISTER) {
1253 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
1254 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
1255 lowpan_dellink(entry->ldev, &del_list);
1256 }
1257
1258 unregister_netdevice_many(&del_list);
1259 };
1260
1261out:
1262 return NOTIFY_DONE;
1263}
1264
1265static struct notifier_block lowpan_dev_notifier = {
1266 .notifier_call = lowpan_device_event,
1267};
1268
1240static struct packet_type lowpan_packet_type = { 1269static struct packet_type lowpan_packet_type = {
1241 .type = __constant_htons(ETH_P_IEEE802154), 1270 .type = __constant_htons(ETH_P_IEEE802154),
1242 .func = lowpan_rcv, 1271 .func = lowpan_rcv,
@@ -1251,6 +1280,12 @@ static int __init lowpan_init_module(void)
1251 goto out; 1280 goto out;
1252 1281
1253 dev_add_pack(&lowpan_packet_type); 1282 dev_add_pack(&lowpan_packet_type);
1283
1284 err = register_netdevice_notifier(&lowpan_dev_notifier);
1285 if (err < 0) {
1286 dev_remove_pack(&lowpan_packet_type);
1287 lowpan_netlink_fini();
1288 }
1254out: 1289out:
1255 return err; 1290 return err;
1256} 1291}
@@ -1263,6 +1298,8 @@ static void __exit lowpan_cleanup_module(void)
1263 1298
1264 dev_remove_pack(&lowpan_packet_type); 1299 dev_remove_pack(&lowpan_packet_type);
1265 1300
1301 unregister_netdevice_notifier(&lowpan_dev_notifier);
1302
1266 /* Now 6lowpan packet_type is removed, so no new fragments are 1303 /* Now 6lowpan packet_type is removed, so no new fragments are
1267 * expected on RX, therefore that's the time to clean incomplete 1304 * expected on RX, therefore that's the time to clean incomplete
1268 * fragments. 1305 * fragments.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6681ccf5c3ee..4f70ef0b946d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -149,6 +149,11 @@ void inet_sock_destruct(struct sock *sk)
149 pr_err("Attempt to release alive inet socket %p\n", sk); 149 pr_err("Attempt to release alive inet socket %p\n", sk);
150 return; 150 return;
151 } 151 }
152 if (sk->sk_type == SOCK_STREAM) {
153 struct fastopen_queue *fastopenq =
154 inet_csk(sk)->icsk_accept_queue.fastopenq;
155 kfree(fastopenq);
156 }
152 157
153 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 158 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
154 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 159 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
@@ -212,6 +217,26 @@ int inet_listen(struct socket *sock, int backlog)
212 * we can only allow the backlog to be adjusted. 217 * we can only allow the backlog to be adjusted.
213 */ 218 */
214 if (old_state != TCP_LISTEN) { 219 if (old_state != TCP_LISTEN) {
220 /* Check special setups for testing purpose to enable TFO w/o
221 * requiring TCP_FASTOPEN sockopt.
222 * Note that only TCP sockets (SOCK_STREAM) will reach here.
223 * Also fastopenq may already been allocated because this
224 * socket was in TCP_LISTEN state previously but was
225 * shutdown() (rather than close()).
226 */
227 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
228 inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
229 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
230 err = fastopen_init_queue(sk, backlog);
231 else if ((sysctl_tcp_fastopen &
232 TFO_SERVER_WO_SOCKOPT2) != 0)
233 err = fastopen_init_queue(sk,
234 ((uint)sysctl_tcp_fastopen) >> 16);
235 else
236 err = 0;
237 if (err)
238 goto out;
239 }
215 err = inet_csk_listen_start(sk, backlog); 240 err = inet_csk_listen_start(sk, backlog);
216 if (err) 241 if (err)
217 goto out; 242 goto out;
@@ -701,7 +726,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
701 726
702 sock_rps_record_flow(sk2); 727 sock_rps_record_flow(sk2);
703 WARN_ON(!((1 << sk2->sk_state) & 728 WARN_ON(!((1 << sk2->sk_state) &
704 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 729 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
730 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
705 731
706 sock_graft(sk2, newsock); 732 sock_graft(sk2, newsock);
707 733
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6a5e6e4b142c..adf273f8ad2e 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1147,12 +1147,8 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1147 void *ptr) 1147 void *ptr)
1148{ 1148{
1149 struct net_device *dev = ptr; 1149 struct net_device *dev = ptr;
1150 struct in_device *in_dev; 1150 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1151
1152 if (event == NETDEV_UNREGISTER_FINAL)
1153 goto out;
1154 1151
1155 in_dev = __in_dev_get_rtnl(dev);
1156 ASSERT_RTNL(); 1152 ASSERT_RTNL();
1157 1153
1158 if (!in_dev) { 1154 if (!in_dev) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index fd7d9ae64f16..acdee325d972 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,9 +1050,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1050 return NOTIFY_DONE; 1050 return NOTIFY_DONE;
1051 } 1051 }
1052 1052
1053 if (event == NETDEV_UNREGISTER_FINAL)
1054 return NOTIFY_DONE;
1055
1056 in_dev = __in_dev_get_rtnl(dev); 1053 in_dev = __in_dev_get_rtnl(dev);
1057 1054
1058 switch (event) { 1055 switch (event) {
@@ -1064,14 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1064 fib_sync_up(dev); 1061 fib_sync_up(dev);
1065#endif 1062#endif
1066 atomic_inc(&net->ipv4.dev_addr_genid); 1063 atomic_inc(&net->ipv4.dev_addr_genid);
1067 rt_cache_flush(dev_net(dev), -1); 1064 rt_cache_flush(net, -1);
1068 break; 1065 break;
1069 case NETDEV_DOWN: 1066 case NETDEV_DOWN:
1070 fib_disable_ip(dev, 0, 0); 1067 fib_disable_ip(dev, 0, 0);
1071 break; 1068 break;
1072 case NETDEV_CHANGEMTU: 1069 case NETDEV_CHANGEMTU:
1073 case NETDEV_CHANGE: 1070 case NETDEV_CHANGE:
1074 rt_cache_flush(dev_net(dev), 0); 1071 rt_cache_flush(net, 0);
1075 break; 1072 break;
1076 } 1073 }
1077 return NOTIFY_DONE; 1074 return NOTIFY_DONE;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7f75f21d7b83..8464b79c493f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -283,7 +283,9 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
284{ 284{
285 struct inet_connection_sock *icsk = inet_csk(sk); 285 struct inet_connection_sock *icsk = inet_csk(sk);
286 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
286 struct sock *newsk; 287 struct sock *newsk;
288 struct request_sock *req;
287 int error; 289 int error;
288 290
289 lock_sock(sk); 291 lock_sock(sk);
@@ -296,7 +298,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
296 goto out_err; 298 goto out_err;
297 299
298 /* Find already established connection */ 300 /* Find already established connection */
299 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { 301 if (reqsk_queue_empty(queue)) {
300 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 302 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
301 303
302 /* If this is a non blocking socket don't sleep */ 304 /* If this is a non blocking socket don't sleep */
@@ -308,14 +310,32 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
308 if (error) 310 if (error)
309 goto out_err; 311 goto out_err;
310 } 312 }
311 313 req = reqsk_queue_remove(queue);
312 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 314 newsk = req->sk;
313 WARN_ON(newsk->sk_state == TCP_SYN_RECV); 315
316 sk_acceptq_removed(sk);
317 if (sk->sk_type == SOCK_STREAM && queue->fastopenq != NULL) {
318 spin_lock_bh(&queue->fastopenq->lock);
319 if (tcp_rsk(req)->listener) {
320 /* We are still waiting for the final ACK from 3WHS
321 * so can't free req now. Instead, we set req->sk to
322 * NULL to signify that the child socket is taken
323 * so reqsk_fastopen_remove() will free the req
324 * when 3WHS finishes (or is aborted).
325 */
326 req->sk = NULL;
327 req = NULL;
328 }
329 spin_unlock_bh(&queue->fastopenq->lock);
330 }
314out: 331out:
315 release_sock(sk); 332 release_sock(sk);
333 if (req)
334 __reqsk_free(req);
316 return newsk; 335 return newsk;
317out_err: 336out_err:
318 newsk = NULL; 337 newsk = NULL;
338 req = NULL;
319 *err = error; 339 *err = error;
320 goto out; 340 goto out;
321} 341}
@@ -720,13 +740,14 @@ EXPORT_SYMBOL_GPL(inet_csk_listen_start);
720void inet_csk_listen_stop(struct sock *sk) 740void inet_csk_listen_stop(struct sock *sk)
721{ 741{
722 struct inet_connection_sock *icsk = inet_csk(sk); 742 struct inet_connection_sock *icsk = inet_csk(sk);
743 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
723 struct request_sock *acc_req; 744 struct request_sock *acc_req;
724 struct request_sock *req; 745 struct request_sock *req;
725 746
726 inet_csk_delete_keepalive_timer(sk); 747 inet_csk_delete_keepalive_timer(sk);
727 748
728 /* make all the listen_opt local to us */ 749 /* make all the listen_opt local to us */
729 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); 750 acc_req = reqsk_queue_yank_acceptq(queue);
730 751
731 /* Following specs, it would be better either to send FIN 752 /* Following specs, it would be better either to send FIN
732 * (and enter FIN-WAIT-1, it is normal close) 753 * (and enter FIN-WAIT-1, it is normal close)
@@ -736,7 +757,7 @@ void inet_csk_listen_stop(struct sock *sk)
736 * To be honest, we are not able to make either 757 * To be honest, we are not able to make either
737 * of the variants now. --ANK 758 * of the variants now. --ANK
738 */ 759 */
739 reqsk_queue_destroy(&icsk->icsk_accept_queue); 760 reqsk_queue_destroy(queue);
740 761
741 while ((req = acc_req) != NULL) { 762 while ((req = acc_req) != NULL) {
742 struct sock *child = req->sk; 763 struct sock *child = req->sk;
@@ -754,6 +775,19 @@ void inet_csk_listen_stop(struct sock *sk)
754 775
755 percpu_counter_inc(sk->sk_prot->orphan_count); 776 percpu_counter_inc(sk->sk_prot->orphan_count);
756 777
778 if (sk->sk_type == SOCK_STREAM && tcp_rsk(req)->listener) {
779 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
780 BUG_ON(sk != tcp_rsk(req)->listener);
781
782 /* Paranoid, to prevent race condition if
783 * an inbound pkt destined for child is
784 * blocked by sock lock in tcp_v4_rcv().
785 * Also to satisfy an assertion in
786 * tcp_v4_destroy_sock().
787 */
788 tcp_sk(child)->fastopen_rsk = NULL;
789 sock_put(sk);
790 }
757 inet_csk_destroy_sock(child); 791 inet_csk_destroy_sock(child);
758 792
759 bh_unlock_sock(child); 793 bh_unlock_sock(child);
@@ -763,6 +797,17 @@ void inet_csk_listen_stop(struct sock *sk)
763 sk_acceptq_removed(sk); 797 sk_acceptq_removed(sk);
764 __reqsk_free(req); 798 __reqsk_free(req);
765 } 799 }
800 if (queue->fastopenq != NULL) {
801 /* Free all the reqs queued in rskq_rst_head. */
802 spin_lock_bh(&queue->fastopenq->lock);
803 acc_req = queue->fastopenq->rskq_rst_head;
804 queue->fastopenq->rskq_rst_head = NULL;
805 spin_unlock_bh(&queue->fastopenq->lock);
806 while ((req = acc_req) != NULL) {
807 acc_req = req->dl_next;
808 __reqsk_free(req);
809 }
810 }
766 WARN_ON(sk->sk_ack_backlog); 811 WARN_ON(sk->sk_ack_backlog);
767} 812}
768EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 813EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 570e61f9611f..8bc005b1435f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -69,6 +69,7 @@ static inline void inet_diag_unlock_handler(
69 69
70int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 70int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
71 struct sk_buff *skb, struct inet_diag_req_v2 *req, 71 struct sk_buff *skb, struct inet_diag_req_v2 *req,
72 struct user_namespace *user_ns,
72 u32 pid, u32 seq, u16 nlmsg_flags, 73 u32 pid, u32 seq, u16 nlmsg_flags,
73 const struct nlmsghdr *unlh) 74 const struct nlmsghdr *unlh)
74{ 75{
@@ -124,7 +125,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
124 } 125 }
125#endif 126#endif
126 127
127 r->idiag_uid = sock_i_uid(sk); 128 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
128 r->idiag_inode = sock_i_ino(sk); 129 r->idiag_inode = sock_i_ino(sk);
129 130
130 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { 131 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -199,11 +200,12 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
199 200
200static int inet_csk_diag_fill(struct sock *sk, 201static int inet_csk_diag_fill(struct sock *sk,
201 struct sk_buff *skb, struct inet_diag_req_v2 *req, 202 struct sk_buff *skb, struct inet_diag_req_v2 *req,
203 struct user_namespace *user_ns,
202 u32 pid, u32 seq, u16 nlmsg_flags, 204 u32 pid, u32 seq, u16 nlmsg_flags,
203 const struct nlmsghdr *unlh) 205 const struct nlmsghdr *unlh)
204{ 206{
205 return inet_sk_diag_fill(sk, inet_csk(sk), 207 return inet_sk_diag_fill(sk, inet_csk(sk),
206 skb, req, pid, seq, nlmsg_flags, unlh); 208 skb, req, user_ns, pid, seq, nlmsg_flags, unlh);
207} 209}
208 210
209static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 211static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -256,14 +258,16 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
256} 258}
257 259
258static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 260static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
259 struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags, 261 struct inet_diag_req_v2 *r,
262 struct user_namespace *user_ns,
263 u32 pid, u32 seq, u16 nlmsg_flags,
260 const struct nlmsghdr *unlh) 264 const struct nlmsghdr *unlh)
261{ 265{
262 if (sk->sk_state == TCP_TIME_WAIT) 266 if (sk->sk_state == TCP_TIME_WAIT)
263 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 267 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
264 skb, r, pid, seq, nlmsg_flags, 268 skb, r, pid, seq, nlmsg_flags,
265 unlh); 269 unlh);
266 return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh); 270 return inet_csk_diag_fill(sk, skb, r, user_ns, pid, seq, nlmsg_flags, unlh);
267} 271}
268 272
269int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, 273int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -311,6 +315,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
311 } 315 }
312 316
313 err = sk_diag_fill(sk, rep, req, 317 err = sk_diag_fill(sk, rep, req,
318 sk_user_ns(NETLINK_CB(in_skb).ssk),
314 NETLINK_CB(in_skb).pid, 319 NETLINK_CB(in_skb).pid,
315 nlh->nlmsg_seq, 0, nlh); 320 nlh->nlmsg_seq, 0, nlh);
316 if (err < 0) { 321 if (err < 0) {
@@ -551,6 +556,7 @@ static int inet_csk_diag_dump(struct sock *sk,
551 return 0; 556 return 0;
552 557
553 return inet_csk_diag_fill(sk, skb, r, 558 return inet_csk_diag_fill(sk, skb, r,
559 sk_user_ns(NETLINK_CB(cb->skb).ssk),
554 NETLINK_CB(cb->skb).pid, 560 NETLINK_CB(cb->skb).pid,
555 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 561 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
556} 562}
@@ -591,7 +597,9 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
591} 597}
592 598
593static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 599static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
594 struct request_sock *req, u32 pid, u32 seq, 600 struct request_sock *req,
601 struct user_namespace *user_ns,
602 u32 pid, u32 seq,
595 const struct nlmsghdr *unlh) 603 const struct nlmsghdr *unlh)
596{ 604{
597 const struct inet_request_sock *ireq = inet_rsk(req); 605 const struct inet_request_sock *ireq = inet_rsk(req);
@@ -625,7 +633,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
625 r->idiag_expires = jiffies_to_msecs(tmo); 633 r->idiag_expires = jiffies_to_msecs(tmo);
626 r->idiag_rqueue = 0; 634 r->idiag_rqueue = 0;
627 r->idiag_wqueue = 0; 635 r->idiag_wqueue = 0;
628 r->idiag_uid = sock_i_uid(sk); 636 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
629 r->idiag_inode = 0; 637 r->idiag_inode = 0;
630#if IS_ENABLED(CONFIG_IPV6) 638#if IS_ENABLED(CONFIG_IPV6)
631 if (r->idiag_family == AF_INET6) { 639 if (r->idiag_family == AF_INET6) {
@@ -702,6 +710,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
702 } 710 }
703 711
704 err = inet_diag_fill_req(skb, sk, req, 712 err = inet_diag_fill_req(skb, sk, req,
713 sk_user_ns(NETLINK_CB(cb->skb).ssk),
705 NETLINK_CB(cb->skb).pid, 714 NETLINK_CB(cb->skb).pid,
706 cb->nlh->nlmsg_seq, cb->nlh); 715 cb->nlh->nlmsg_seq, cb->nlh);
707 if (err < 0) { 716 if (err < 0) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a57570c8ee5..8aa7a4cf9139 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
124static struct kmem_cache *mrt_cachep __read_mostly; 124static struct kmem_cache *mrt_cachep __read_mostly;
125 125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id); 126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static void ipmr_free_table(struct mr_table *mrt);
128
127static int ip_mr_forward(struct net *net, struct mr_table *mrt, 129static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache, 130 struct sk_buff *skb, struct mfc_cache *cache,
129 int local); 131 int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert); 133 struct sk_buff *pkt, vifi_t vifi, int assert);
132static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm); 135 struct mfc_cache *c, struct rtmsg *rtm);
136static void mroute_clean_tables(struct mr_table *mrt);
134static void ipmr_expire_process(unsigned long arg); 137static void ipmr_expire_process(unsigned long arg);
135 138
136#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 139#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
271 274
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 275 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list); 276 list_del(&mrt->list);
274 kfree(mrt); 277 ipmr_free_table(mrt);
275 } 278 }
276 fib_rules_unregister(net->ipv4.mr_rules_ops); 279 fib_rules_unregister(net->ipv4.mr_rules_ops);
277} 280}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
299 302
300static void __net_exit ipmr_rules_exit(struct net *net) 303static void __net_exit ipmr_rules_exit(struct net *net)
301{ 304{
302 kfree(net->ipv4.mrt); 305 ipmr_free_table(net->ipv4.mrt);
303} 306}
304#endif 307#endif
305 308
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
336 return mrt; 339 return mrt;
337} 340}
338 341
342static void ipmr_free_table(struct mr_table *mrt)
343{
344 del_timer_sync(&mrt->ipmr_expire_timer);
345 mroute_clean_tables(mrt);
346 kfree(mrt);
347}
348
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 349/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340 350
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) 351static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 6232d476f37e..8f3d05424a3e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -185,10 +185,10 @@ exit:
185 return sk; 185 return sk;
186} 186}
187 187
188static void inet_get_ping_group_range_net(struct net *net, gid_t *low, 188static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
189 gid_t *high) 189 kgid_t *high)
190{ 190{
191 gid_t *data = net->ipv4.sysctl_ping_group_range; 191 kgid_t *data = net->ipv4.sysctl_ping_group_range;
192 unsigned int seq; 192 unsigned int seq;
193 193
194 do { 194 do {
@@ -203,19 +203,13 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
203static int ping_init_sock(struct sock *sk) 203static int ping_init_sock(struct sock *sk)
204{ 204{
205 struct net *net = sock_net(sk); 205 struct net *net = sock_net(sk);
206 gid_t group = current_egid(); 206 kgid_t group = current_egid();
207 gid_t range[2];
208 struct group_info *group_info = get_current_groups(); 207 struct group_info *group_info = get_current_groups();
209 int i, j, count = group_info->ngroups; 208 int i, j, count = group_info->ngroups;
210 kgid_t low, high; 209 kgid_t low, high;
211 210
212 inet_get_ping_group_range_net(net, range, range+1); 211 inet_get_ping_group_range_net(net, &low, &high);
213 low = make_kgid(&init_user_ns, range[0]); 212 if (gid_lte(low, group) && gid_lte(group, high))
214 high = make_kgid(&init_user_ns, range[1]);
215 if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
216 return -EACCES;
217
218 if (range[0] <= group && group <= range[1])
219 return 0; 213 return 0;
220 214
221 for (i = 0; i < group_info->nblocks; i++) { 215 for (i = 0; i < group_info->nblocks; i++) {
@@ -845,7 +839,9 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
845 bucket, src, srcp, dest, destp, sp->sk_state, 839 bucket, src, srcp, dest, destp, sp->sk_state,
846 sk_wmem_alloc_get(sp), 840 sk_wmem_alloc_get(sp),
847 sk_rmem_alloc_get(sp), 841 sk_rmem_alloc_get(sp),
848 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 842 0, 0L, 0,
843 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
844 0, sock_i_ino(sp),
849 atomic_read(&sp->sk_refcnt), sp, 845 atomic_read(&sp->sk_refcnt), sp,
850 atomic_read(&sp->sk_drops), len); 846 atomic_read(&sp->sk_drops), len);
851} 847}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 957acd12250b..8de53e1ddd54 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -263,6 +263,10 @@ static const struct snmp_mib snmp4_net_list[] = {
263 SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), 263 SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
264 SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), 264 SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
265 SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE), 265 SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
266 SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
267 SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
268 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
269 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
266 SNMP_MIB_SENTINEL 270 SNMP_MIB_SENTINEL
267}; 271};
268 272
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ff0f071969ea..f2425785d40a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -992,7 +992,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
992 i, src, srcp, dest, destp, sp->sk_state, 992 i, src, srcp, dest, destp, sp->sk_state,
993 sk_wmem_alloc_get(sp), 993 sk_wmem_alloc_get(sp),
994 sk_rmem_alloc_get(sp), 994 sk_rmem_alloc_get(sp),
995 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 995 0, 0L, 0,
996 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
997 0, sock_i_ino(sp),
996 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 998 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
997} 999}
998 1000
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 50f6d3adb474..dc9549b5eb1c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
934 if (mtu < ip_rt_min_pmtu) 934 if (mtu < ip_rt_min_pmtu)
935 mtu = ip_rt_min_pmtu; 935 mtu = ip_rt_min_pmtu;
936 936
937 rcu_read_lock();
937 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { 938 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
938 struct fib_nh *nh = &FIB_RES_NH(res); 939 struct fib_nh *nh = &FIB_RES_NH(res);
939 940
940 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 941 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
941 jiffies + ip_rt_mtu_expires); 942 jiffies + ip_rt_mtu_expires);
942 } 943 }
944 rcu_read_unlock();
943 return mtu; 945 return mtu;
944} 946}
945 947
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
956 dst->obsolete = DST_OBSOLETE_KILL; 958 dst->obsolete = DST_OBSOLETE_KILL;
957 } else { 959 } else {
958 rt->rt_pmtu = mtu; 960 rt->rt_pmtu = mtu;
959 dst_set_expires(&rt->dst, ip_rt_mtu_expires); 961 rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
960 } 962 }
961} 963}
962 964
@@ -1132,10 +1134,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1132 const struct rtable *rt = (const struct rtable *) dst; 1134 const struct rtable *rt = (const struct rtable *) dst;
1133 unsigned int mtu = rt->rt_pmtu; 1135 unsigned int mtu = rt->rt_pmtu;
1134 1136
1135 if (mtu && time_after_eq(jiffies, rt->dst.expires)) 1137 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1136 mtu = 0;
1137
1138 if (!mtu)
1139 mtu = dst_metric_raw(dst, RTAX_MTU); 1138 mtu = dst_metric_raw(dst, RTAX_MTU);
1140 1139
1141 if (mtu && rt_is_output_route(rt)) 1140 if (mtu && rt_is_output_route(rt))
@@ -1263,7 +1262,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1263{ 1262{
1264 struct rtable *rt = (struct rtable *) dst; 1263 struct rtable *rt = (struct rtable *) dst;
1265 1264
1266 if (dst->flags & DST_NOCACHE) { 1265 if (!list_empty(&rt->rt_uncached)) {
1267 spin_lock_bh(&rt_uncached_lock); 1266 spin_lock_bh(&rt_uncached_lock);
1268 list_del(&rt->rt_uncached); 1267 list_del(&rt->rt_uncached);
1269 spin_unlock_bh(&rt_uncached_lock); 1268 spin_unlock_bh(&rt_uncached_lock);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650e1528e1e6..ba48e799b031 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -319,6 +319,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
319 ireq->tstamp_ok = tcp_opt.saw_tstamp; 319 ireq->tstamp_ok = tcp_opt.saw_tstamp;
320 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 320 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
321 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; 321 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
322 treq->listener = NULL;
322 323
323 /* We throwed the options of the initial SYN away, so we hope 324 /* We throwed the options of the initial SYN away, so we hope
324 * the ACK carries the same options again (see RFC1122 4.2.3.8) 325 * the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1b5ce96707a3..9205e492dc9d 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -76,9 +76,9 @@ static int ipv4_local_port_range(ctl_table *table, int write,
76} 76}
77 77
78 78
79static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) 79static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
80{ 80{
81 gid_t *data = table->data; 81 kgid_t *data = table->data;
82 unsigned int seq; 82 unsigned int seq;
83 do { 83 do {
84 seq = read_seqbegin(&sysctl_local_ports.lock); 84 seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -89,12 +89,12 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low,
89} 89}
90 90
91/* Update system visible IP port range */ 91/* Update system visible IP port range */
92static void set_ping_group_range(struct ctl_table *table, gid_t range[2]) 92static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
93{ 93{
94 gid_t *data = table->data; 94 kgid_t *data = table->data;
95 write_seqlock(&sysctl_local_ports.lock); 95 write_seqlock(&sysctl_local_ports.lock);
96 data[0] = range[0]; 96 data[0] = low;
97 data[1] = range[1]; 97 data[1] = high;
98 write_sequnlock(&sysctl_local_ports.lock); 98 write_sequnlock(&sysctl_local_ports.lock);
99} 99}
100 100
@@ -103,21 +103,33 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
103 void __user *buffer, 103 void __user *buffer,
104 size_t *lenp, loff_t *ppos) 104 size_t *lenp, loff_t *ppos)
105{ 105{
106 struct user_namespace *user_ns = current_user_ns();
106 int ret; 107 int ret;
107 gid_t range[2]; 108 gid_t urange[2];
109 kgid_t low, high;
108 ctl_table tmp = { 110 ctl_table tmp = {
109 .data = &range, 111 .data = &urange,
110 .maxlen = sizeof(range), 112 .maxlen = sizeof(urange),
111 .mode = table->mode, 113 .mode = table->mode,
112 .extra1 = &ip_ping_group_range_min, 114 .extra1 = &ip_ping_group_range_min,
113 .extra2 = &ip_ping_group_range_max, 115 .extra2 = &ip_ping_group_range_max,
114 }; 116 };
115 117
116 inet_get_ping_group_range_table(table, range, range + 1); 118 inet_get_ping_group_range_table(table, &low, &high);
119 urange[0] = from_kgid_munged(user_ns, low);
120 urange[1] = from_kgid_munged(user_ns, high);
117 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 121 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
118 122
119 if (write && ret == 0) 123 if (write && ret == 0) {
120 set_ping_group_range(table, range); 124 low = make_kgid(user_ns, urange[0]);
125 high = make_kgid(user_ns, urange[1]);
126 if (!gid_valid(low) || !gid_valid(high) ||
127 (urange[1] < urange[0]) || gid_lt(high, low)) {
128 low = make_kgid(&init_user_ns, 1);
129 high = make_kgid(&init_user_ns, 0);
130 }
131 set_ping_group_range(table, low, high);
132 }
121 133
122 return ret; 134 return ret;
123} 135}
@@ -220,6 +232,45 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
220 return 0; 232 return 0;
221} 233}
222 234
235int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
236 size_t *lenp, loff_t *ppos)
237{
238 ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
239 struct tcp_fastopen_context *ctxt;
240 int ret;
241 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
242
243 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
244 if (!tbl.data)
245 return -ENOMEM;
246
247 rcu_read_lock();
248 ctxt = rcu_dereference(tcp_fastopen_ctx);
249 if (ctxt)
250 memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
251 rcu_read_unlock();
252
253 snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
254 user_key[0], user_key[1], user_key[2], user_key[3]);
255 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
256
257 if (write && ret == 0) {
258 if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
259 user_key + 2, user_key + 3) != 4) {
260 ret = -EINVAL;
261 goto bad_key;
262 }
263 tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
264 }
265
266bad_key:
267 pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
268 user_key[0], user_key[1], user_key[2], user_key[3],
269 (char *)tbl.data, ret);
270 kfree(tbl.data);
271 return ret;
272}
273
223static struct ctl_table ipv4_table[] = { 274static struct ctl_table ipv4_table[] = {
224 { 275 {
225 .procname = "tcp_timestamps", 276 .procname = "tcp_timestamps",
@@ -374,6 +425,12 @@ static struct ctl_table ipv4_table[] = {
374 .proc_handler = proc_dointvec, 425 .proc_handler = proc_dointvec,
375 }, 426 },
376 { 427 {
428 .procname = "tcp_fastopen_key",
429 .mode = 0600,
430 .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
431 .proc_handler = proc_tcp_fastopen_key,
432 },
433 {
377 .procname = "tcp_tw_recycle", 434 .procname = "tcp_tw_recycle",
378 .data = &tcp_death_row.sysctl_tw_recycle, 435 .data = &tcp_death_row.sysctl_tw_recycle,
379 .maxlen = sizeof(int), 436 .maxlen = sizeof(int),
@@ -786,7 +843,7 @@ static struct ctl_table ipv4_net_table[] = {
786 { 843 {
787 .procname = "ping_group_range", 844 .procname = "ping_group_range",
788 .data = &init_net.ipv4.sysctl_ping_group_range, 845 .data = &init_net.ipv4.sysctl_ping_group_range,
789 .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range), 846 .maxlen = sizeof(gid_t)*2,
790 .mode = 0644, 847 .mode = 0644,
791 .proc_handler = ipv4_ping_group_range, 848 .proc_handler = ipv4_ping_group_range,
792 }, 849 },
@@ -830,8 +887,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
830 * Sane defaults - nobody may create ping sockets. 887 * Sane defaults - nobody may create ping sockets.
831 * Boot scripts should set this to distro-specific group. 888 * Boot scripts should set this to distro-specific group.
832 */ 889 */
833 net->ipv4.sysctl_ping_group_range[0] = 1; 890 net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
834 net->ipv4.sysctl_ping_group_range[1] = 0; 891 net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
835 892
836 tcp_init_mem(net); 893 tcp_init_mem(net);
837 894
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2109ff4a1daf..df83d744e380 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -486,8 +486,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
486 if (sk->sk_shutdown & RCV_SHUTDOWN) 486 if (sk->sk_shutdown & RCV_SHUTDOWN)
487 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 487 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
488 488
489 /* Connected? */ 489 /* Connected or passive Fast Open socket? */
490 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 490 if (sk->sk_state != TCP_SYN_SENT &&
491 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
491 int target = sock_rcvlowat(sk, 0, INT_MAX); 492 int target = sock_rcvlowat(sk, 0, INT_MAX);
492 493
493 if (tp->urg_seq == tp->copied_seq && 494 if (tp->urg_seq == tp->copied_seq &&
@@ -840,10 +841,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
840 ssize_t copied; 841 ssize_t copied;
841 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 842 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
842 843
843 /* Wait for a connection to finish. */ 844 /* Wait for a connection to finish. One exception is TCP Fast Open
844 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 845 * (passive side) where data is allowed to be sent before a connection
846 * is fully established.
847 */
848 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
849 !tcp_passive_fastopen(sk)) {
845 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 850 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
846 goto out_err; 851 goto out_err;
852 }
847 853
848 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 854 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
849 855
@@ -1042,10 +1048,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1042 1048
1043 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1049 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1044 1050
1045 /* Wait for a connection to finish. */ 1051 /* Wait for a connection to finish. One exception is TCP Fast Open
1046 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1052 * (passive side) where data is allowed to be sent before a connection
1053 * is fully established.
1054 */
1055 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1056 !tcp_passive_fastopen(sk)) {
1047 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1057 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1048 goto do_error; 1058 goto do_error;
1059 }
1049 1060
1050 if (unlikely(tp->repair)) { 1061 if (unlikely(tp->repair)) {
1051 if (tp->repair_queue == TCP_RECV_QUEUE) { 1062 if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -2144,6 +2155,10 @@ void tcp_close(struct sock *sk, long timeout)
2144 * they look as CLOSING or LAST_ACK for Linux) 2155 * they look as CLOSING or LAST_ACK for Linux)
2145 * Probably, I missed some more holelets. 2156 * Probably, I missed some more holelets.
2146 * --ANK 2157 * --ANK
2158 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2159 * in a single packet! (May consider it later but will
2160 * probably need API support or TCP_CORK SYN-ACK until
2161 * data is written and socket is closed.)
2147 */ 2162 */
2148 tcp_send_fin(sk); 2163 tcp_send_fin(sk);
2149 } 2164 }
@@ -2215,8 +2230,16 @@ adjudge_to_death:
2215 } 2230 }
2216 } 2231 }
2217 2232
2218 if (sk->sk_state == TCP_CLOSE) 2233 if (sk->sk_state == TCP_CLOSE) {
2234 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2235 /* We could get here with a non-NULL req if the socket is
2236 * aborted (e.g., closed with unread data) before 3WHS
2237 * finishes.
2238 */
2239 if (req != NULL)
2240 reqsk_fastopen_remove(sk, req, false);
2219 inet_csk_destroy_sock(sk); 2241 inet_csk_destroy_sock(sk);
2242 }
2220 /* Otherwise, socket is reprieved until protocol close. */ 2243 /* Otherwise, socket is reprieved until protocol close. */
2221 2244
2222out: 2245out:
@@ -2688,6 +2711,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2688 else 2711 else
2689 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2712 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2690 break; 2713 break;
2714
2715 case TCP_FASTOPEN:
2716 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2717 TCPF_LISTEN)))
2718 err = fastopen_init_queue(sk, val);
2719 else
2720 err = -EINVAL;
2721 break;
2691 default: 2722 default:
2692 err = -ENOPROTOOPT; 2723 err = -ENOPROTOOPT;
2693 break; 2724 break;
@@ -3501,11 +3532,15 @@ EXPORT_SYMBOL(tcp_cookie_generator);
3501 3532
3502void tcp_done(struct sock *sk) 3533void tcp_done(struct sock *sk)
3503{ 3534{
3535 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3536
3504 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3537 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3505 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 3538 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3506 3539
3507 tcp_set_state(sk, TCP_CLOSE); 3540 tcp_set_state(sk, TCP_CLOSE);
3508 tcp_clear_xmit_timers(sk); 3541 tcp_clear_xmit_timers(sk);
3542 if (req != NULL)
3543 reqsk_fastopen_remove(sk, req, false);
3509 3544
3510 sk->sk_shutdown = SHUTDOWN_MASK; 3545 sk->sk_shutdown = SHUTDOWN_MASK;
3511 3546
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index a7f729c409d7..8f7ef0ad80e5 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -1,10 +1,91 @@
1#include <linux/err.h>
1#include <linux/init.h> 2#include <linux/init.h>
2#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/list.h>
5#include <linux/tcp.h>
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <net/inetpeer.h>
9#include <net/tcp.h>
3 10
4int sysctl_tcp_fastopen; 11int sysctl_tcp_fastopen __read_mostly;
12
13struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
14
15static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
16
17static void tcp_fastopen_ctx_free(struct rcu_head *head)
18{
19 struct tcp_fastopen_context *ctx =
20 container_of(head, struct tcp_fastopen_context, rcu);
21 crypto_free_cipher(ctx->tfm);
22 kfree(ctx);
23}
24
25int tcp_fastopen_reset_cipher(void *key, unsigned int len)
26{
27 int err;
28 struct tcp_fastopen_context *ctx, *octx;
29
30 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
31 if (!ctx)
32 return -ENOMEM;
33 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
34
35 if (IS_ERR(ctx->tfm)) {
36 err = PTR_ERR(ctx->tfm);
37error: kfree(ctx);
38 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
39 return err;
40 }
41 err = crypto_cipher_setkey(ctx->tfm, key, len);
42 if (err) {
43 pr_err("TCP: TFO cipher key error: %d\n", err);
44 crypto_free_cipher(ctx->tfm);
45 goto error;
46 }
47 memcpy(ctx->key, key, len);
48
49 spin_lock(&tcp_fastopen_ctx_lock);
50
51 octx = rcu_dereference_protected(tcp_fastopen_ctx,
52 lockdep_is_held(&tcp_fastopen_ctx_lock));
53 rcu_assign_pointer(tcp_fastopen_ctx, ctx);
54 spin_unlock(&tcp_fastopen_ctx_lock);
55
56 if (octx)
57 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
58 return err;
59}
60
61/* Computes the fastopen cookie for the peer.
62 * The peer address is a 128 bits long (pad with zeros for IPv4).
63 *
64 * The caller must check foc->len to determine if a valid cookie
65 * has been generated successfully.
66*/
67void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
68{
69 __be32 peer_addr[4] = { addr, 0, 0, 0 };
70 struct tcp_fastopen_context *ctx;
71
72 rcu_read_lock();
73 ctx = rcu_dereference(tcp_fastopen_ctx);
74 if (ctx) {
75 crypto_cipher_encrypt_one(ctx->tfm,
76 foc->val,
77 (__u8 *)peer_addr);
78 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
79 }
80 rcu_read_unlock();
81}
5 82
6static int __init tcp_fastopen_init(void) 83static int __init tcp_fastopen_init(void)
7{ 84{
85 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
86
87 get_random_bytes(key, sizeof(key));
88 tcp_fastopen_reset_cipher(key, sizeof(key));
8 return 0; 89 return 0;
9} 90}
10 91
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bcfccc5cb8d0..8c304a400798 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -378,7 +378,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
378/* 4. Try to fixup all. It is made immediately after connection enters 378/* 4. Try to fixup all. It is made immediately after connection enters
379 * established state. 379 * established state.
380 */ 380 */
381static void tcp_init_buffer_space(struct sock *sk) 381void tcp_init_buffer_space(struct sock *sk)
382{ 382{
383 struct tcp_sock *tp = tcp_sk(sk); 383 struct tcp_sock *tp = tcp_sk(sk);
384 int maxwin; 384 int maxwin;
@@ -2930,13 +2930,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2930 * tcp_xmit_retransmit_queue(). 2930 * tcp_xmit_retransmit_queue().
2931 */ 2931 */
2932static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2932static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2933 int newly_acked_sacked, bool is_dupack, 2933 int prior_sacked, bool is_dupack,
2934 int flag) 2934 int flag)
2935{ 2935{
2936 struct inet_connection_sock *icsk = inet_csk(sk); 2936 struct inet_connection_sock *icsk = inet_csk(sk);
2937 struct tcp_sock *tp = tcp_sk(sk); 2937 struct tcp_sock *tp = tcp_sk(sk);
2938 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2938 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2939 (tcp_fackets_out(tp) > tp->reordering)); 2939 (tcp_fackets_out(tp) > tp->reordering));
2940 int newly_acked_sacked = 0;
2940 int fast_rexmit = 0; 2941 int fast_rexmit = 0;
2941 2942
2942 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2943 if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2996,6 +2997,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2996 tcp_add_reno_sack(sk); 2997 tcp_add_reno_sack(sk);
2997 } else 2998 } else
2998 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2999 do_lost = tcp_try_undo_partial(sk, pkts_acked);
3000 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
2999 break; 3001 break;
3000 case TCP_CA_Loss: 3002 case TCP_CA_Loss:
3001 if (flag & FLAG_DATA_ACKED) 3003 if (flag & FLAG_DATA_ACKED)
@@ -3017,6 +3019,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3017 if (is_dupack) 3019 if (is_dupack)
3018 tcp_add_reno_sack(sk); 3020 tcp_add_reno_sack(sk);
3019 } 3021 }
3022 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
3020 3023
3021 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3024 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3022 tcp_try_undo_dsack(sk); 3025 tcp_try_undo_dsack(sk);
@@ -3124,6 +3127,12 @@ void tcp_rearm_rto(struct sock *sk)
3124{ 3127{
3125 struct tcp_sock *tp = tcp_sk(sk); 3128 struct tcp_sock *tp = tcp_sk(sk);
3126 3129
3130 /* If the retrans timer is currently being used by Fast Open
3131 * for SYN-ACK retrans purpose, stay put.
3132 */
3133 if (tp->fastopen_rsk)
3134 return;
3135
3127 if (!tp->packets_out) { 3136 if (!tp->packets_out) {
3128 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3137 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3129 } else { 3138 } else {
@@ -3594,7 +3603,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3594 int prior_packets; 3603 int prior_packets;
3595 int prior_sacked = tp->sacked_out; 3604 int prior_sacked = tp->sacked_out;
3596 int pkts_acked = 0; 3605 int pkts_acked = 0;
3597 int newly_acked_sacked = 0;
3598 bool frto_cwnd = false; 3606 bool frto_cwnd = false;
3599 3607
3600 /* If the ack is older than previous acks 3608 /* If the ack is older than previous acks
@@ -3670,8 +3678,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3670 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3678 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3671 3679
3672 pkts_acked = prior_packets - tp->packets_out; 3680 pkts_acked = prior_packets - tp->packets_out;
3673 newly_acked_sacked = (prior_packets - prior_sacked) -
3674 (tp->packets_out - tp->sacked_out);
3675 3681
3676 if (tp->frto_counter) 3682 if (tp->frto_counter)
3677 frto_cwnd = tcp_process_frto(sk, flag); 3683 frto_cwnd = tcp_process_frto(sk, flag);
@@ -3685,7 +3691,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3685 tcp_may_raise_cwnd(sk, flag)) 3691 tcp_may_raise_cwnd(sk, flag))
3686 tcp_cong_avoid(sk, ack, prior_in_flight); 3692 tcp_cong_avoid(sk, ack, prior_in_flight);
3687 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3693 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3688 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3694 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3689 is_dupack, flag); 3695 is_dupack, flag);
3690 } else { 3696 } else {
3691 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3697 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3702,7 +3708,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3702no_queue: 3708no_queue:
3703 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3709 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3704 if (flag & FLAG_DSACKING_ACK) 3710 if (flag & FLAG_DSACKING_ACK)
3705 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3711 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3706 is_dupack, flag); 3712 is_dupack, flag);
3707 /* If this ack opens up a zero window, clear backoff. It was 3713 /* If this ack opens up a zero window, clear backoff. It was
3708 * being used to time the probes, and is probably far higher than 3714 * being used to time the probes, and is probably far higher than
@@ -3722,8 +3728,7 @@ old_ack:
3722 */ 3728 */
3723 if (TCP_SKB_CB(skb)->sacked) { 3729 if (TCP_SKB_CB(skb)->sacked) {
3724 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3730 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3725 newly_acked_sacked = tp->sacked_out - prior_sacked; 3731 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3726 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
3727 is_dupack, flag); 3732 is_dupack, flag);
3728 } 3733 }
3729 3734
@@ -4039,7 +4044,7 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
4039} 4044}
4040 4045
4041/* When we get a reset we do this. */ 4046/* When we get a reset we do this. */
4042static void tcp_reset(struct sock *sk) 4047void tcp_reset(struct sock *sk)
4043{ 4048{
4044 /* We want the right error as BSD sees it (and indeed as we do). */ 4049 /* We want the right error as BSD sees it (and indeed as we do). */
4045 switch (sk->sk_state) { 4050 switch (sk->sk_state) {
@@ -5896,7 +5901,9 @@ discard:
5896 tcp_send_synack(sk); 5901 tcp_send_synack(sk);
5897#if 0 5902#if 0
5898 /* Note, we could accept data and URG from this segment. 5903 /* Note, we could accept data and URG from this segment.
5899 * There are no obstacles to make this. 5904 * There are no obstacles to make this (except that we must
5905 * either change tcp_recvmsg() to prevent it from returning data
5906 * before 3WHS completes per RFC793, or employ TCP Fast Open).
5900 * 5907 *
5901 * However, if we ignore data in ACKless segments sometimes, 5908 * However, if we ignore data in ACKless segments sometimes,
5902 * we have no reasons to accept it sometimes. 5909 * we have no reasons to accept it sometimes.
@@ -5936,6 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5936{ 5943{
5937 struct tcp_sock *tp = tcp_sk(sk); 5944 struct tcp_sock *tp = tcp_sk(sk);
5938 struct inet_connection_sock *icsk = inet_csk(sk); 5945 struct inet_connection_sock *icsk = inet_csk(sk);
5946 struct request_sock *req;
5939 int queued = 0; 5947 int queued = 0;
5940 5948
5941 tp->rx_opt.saw_tstamp = 0; 5949 tp->rx_opt.saw_tstamp = 0;
@@ -5991,7 +5999,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5991 return 0; 5999 return 0;
5992 } 6000 }
5993 6001
5994 if (!tcp_validate_incoming(sk, skb, th, 0)) 6002 req = tp->fastopen_rsk;
6003 if (req != NULL) {
6004 BUG_ON(sk->sk_state != TCP_SYN_RECV &&
6005 sk->sk_state != TCP_FIN_WAIT1);
6006
6007 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
6008 goto discard;
6009 } else if (!tcp_validate_incoming(sk, skb, th, 0))
5995 return 0; 6010 return 0;
5996 6011
5997 /* step 5: check the ACK field */ 6012 /* step 5: check the ACK field */
@@ -6001,7 +6016,22 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6001 switch (sk->sk_state) { 6016 switch (sk->sk_state) {
6002 case TCP_SYN_RECV: 6017 case TCP_SYN_RECV:
6003 if (acceptable) { 6018 if (acceptable) {
6004 tp->copied_seq = tp->rcv_nxt; 6019 /* Once we leave TCP_SYN_RECV, we no longer
6020 * need req so release it.
6021 */
6022 if (req) {
6023 reqsk_fastopen_remove(sk, req, false);
6024 } else {
6025 /* Make sure socket is routed, for
6026 * correct metrics.
6027 */
6028 icsk->icsk_af_ops->rebuild_header(sk);
6029 tcp_init_congestion_control(sk);
6030
6031 tcp_mtup_init(sk);
6032 tcp_init_buffer_space(sk);
6033 tp->copied_seq = tp->rcv_nxt;
6034 }
6005 smp_mb(); 6035 smp_mb();
6006 tcp_set_state(sk, TCP_ESTABLISHED); 6036 tcp_set_state(sk, TCP_ESTABLISHED);
6007 sk->sk_state_change(sk); 6037 sk->sk_state_change(sk);
@@ -6023,23 +6053,27 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6023 if (tp->rx_opt.tstamp_ok) 6053 if (tp->rx_opt.tstamp_ok)
6024 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6054 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6025 6055
6026 /* Make sure socket is routed, for 6056 if (req) {
6027 * correct metrics. 6057 /* Re-arm the timer because data may
6028 */ 6058 * have been sent out. This is similar
6029 icsk->icsk_af_ops->rebuild_header(sk); 6059 * to the regular data transmission case
6030 6060 * when new data has just been ack'ed.
6031 tcp_init_metrics(sk); 6061 *
6032 6062 * (TFO) - we could try to be more
6033 tcp_init_congestion_control(sk); 6063 * aggressive and retranmitting any data
6064 * sooner based on when they were sent
6065 * out.
6066 */
6067 tcp_rearm_rto(sk);
6068 } else
6069 tcp_init_metrics(sk);
6034 6070
6035 /* Prevent spurious tcp_cwnd_restart() on 6071 /* Prevent spurious tcp_cwnd_restart() on
6036 * first data packet. 6072 * first data packet.
6037 */ 6073 */
6038 tp->lsndtime = tcp_time_stamp; 6074 tp->lsndtime = tcp_time_stamp;
6039 6075
6040 tcp_mtup_init(sk);
6041 tcp_initialize_rcv_mss(sk); 6076 tcp_initialize_rcv_mss(sk);
6042 tcp_init_buffer_space(sk);
6043 tcp_fast_path_on(tp); 6077 tcp_fast_path_on(tp);
6044 } else { 6078 } else {
6045 return 1; 6079 return 1;
@@ -6047,6 +6081,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6047 break; 6081 break;
6048 6082
6049 case TCP_FIN_WAIT1: 6083 case TCP_FIN_WAIT1:
6084 /* If we enter the TCP_FIN_WAIT1 state and we are a
6085 * Fast Open socket and this is the first acceptable
6086 * ACK we have received, this would have acknowledged
6087 * our SYNACK so stop the SYNACK timer.
6088 */
6089 if (acceptable && req != NULL) {
6090 /* We no longer need the request sock. */
6091 reqsk_fastopen_remove(sk, req, false);
6092 tcp_rearm_rto(sk);
6093 }
6050 if (tp->snd_una == tp->write_seq) { 6094 if (tp->snd_una == tp->write_seq) {
6051 struct dst_entry *dst; 6095 struct dst_entry *dst;
6052 6096
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1e15c5be04e7..e64abed249cc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -352,6 +352,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
352 const int code = icmp_hdr(icmp_skb)->code; 352 const int code = icmp_hdr(icmp_skb)->code;
353 struct sock *sk; 353 struct sock *sk;
354 struct sk_buff *skb; 354 struct sk_buff *skb;
355 struct request_sock *req;
355 __u32 seq; 356 __u32 seq;
356 __u32 remaining; 357 __u32 remaining;
357 int err; 358 int err;
@@ -394,9 +395,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
394 395
395 icsk = inet_csk(sk); 396 icsk = inet_csk(sk);
396 tp = tcp_sk(sk); 397 tp = tcp_sk(sk);
398 req = tp->fastopen_rsk;
397 seq = ntohl(th->seq); 399 seq = ntohl(th->seq);
398 if (sk->sk_state != TCP_LISTEN && 400 if (sk->sk_state != TCP_LISTEN &&
399 !between(seq, tp->snd_una, tp->snd_nxt)) { 401 !between(seq, tp->snd_una, tp->snd_nxt) &&
402 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
403 /* For a Fast Open socket, allow seq to be snt_isn. */
400 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 404 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
401 goto out; 405 goto out;
402 } 406 }
@@ -435,6 +439,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
435 !icsk->icsk_backoff) 439 !icsk->icsk_backoff)
436 break; 440 break;
437 441
442 /* XXX (TFO) - revisit the following logic for TFO */
443
438 if (sock_owned_by_user(sk)) 444 if (sock_owned_by_user(sk))
439 break; 445 break;
440 446
@@ -466,6 +472,14 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
466 goto out; 472 goto out;
467 } 473 }
468 474
475 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
476 * than following the TCP_SYN_RECV case and closing the socket,
477 * we ignore the ICMP error and keep trying like a fully established
478 * socket. Is this the right thing to do?
479 */
480 if (req && req->sk == NULL)
481 goto out;
482
469 switch (sk->sk_state) { 483 switch (sk->sk_state) {
470 struct request_sock *req, **prev; 484 struct request_sock *req, **prev;
471 case TCP_LISTEN: 485 case TCP_LISTEN:
@@ -498,7 +512,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
498 512
499 case TCP_SYN_SENT: 513 case TCP_SYN_SENT:
500 case TCP_SYN_RECV: /* Cannot happen. 514 case TCP_SYN_RECV: /* Cannot happen.
501 It can f.e. if SYNs crossed. 515 It can f.e. if SYNs crossed,
516 or Fast Open.
502 */ 517 */
503 if (!sock_owned_by_user(sk)) { 518 if (!sock_owned_by_user(sk)) {
504 sk->sk_err = err; 519 sk->sk_err = err;
@@ -809,8 +824,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 824static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
810 struct request_sock *req) 825 struct request_sock *req)
811{ 826{
812 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, 827 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
813 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 828 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
829 */
830 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
831 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
832 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
814 req->ts_recent, 833 req->ts_recent,
815 0, 834 0,
816 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 835 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -839,7 +858,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
839 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 858 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
840 return -1; 859 return -1;
841 860
842 skb = tcp_make_synack(sk, dst, req, rvp); 861 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
843 862
844 if (skb) { 863 if (skb) {
845 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 864 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -1272,6 +1291,178 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1272}; 1291};
1273#endif 1292#endif
1274 1293
1294static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1295 struct request_sock *req,
1296 struct tcp_fastopen_cookie *foc,
1297 struct tcp_fastopen_cookie *valid_foc)
1298{
1299 bool skip_cookie = false;
1300 struct fastopen_queue *fastopenq;
1301
1302 if (likely(!fastopen_cookie_present(foc))) {
1303 /* See include/net/tcp.h for the meaning of these knobs */
1304 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1305 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1306 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1307 skip_cookie = true; /* no cookie to validate */
1308 else
1309 return false;
1310 }
1311 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1312 /* A FO option is present; bump the counter. */
1313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1314
1315 /* Make sure the listener has enabled fastopen, and we don't
1316 * exceed the max # of pending TFO requests allowed before trying
1317 * to validating the cookie in order to avoid burning CPU cycles
1318 * unnecessarily.
1319 *
1320 * XXX (TFO) - The implication of checking the max_qlen before
1321 * processing a cookie request is that clients can't differentiate
1322 * between qlen overflow causing Fast Open to be disabled
1323 * temporarily vs a server not supporting Fast Open at all.
1324 */
1325 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1326 fastopenq == NULL || fastopenq->max_qlen == 0)
1327 return false;
1328
1329 if (fastopenq->qlen >= fastopenq->max_qlen) {
1330 struct request_sock *req1;
1331 spin_lock(&fastopenq->lock);
1332 req1 = fastopenq->rskq_rst_head;
1333 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1334 spin_unlock(&fastopenq->lock);
1335 NET_INC_STATS_BH(sock_net(sk),
1336 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1337 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1338 foc->len = -1;
1339 return false;
1340 }
1341 fastopenq->rskq_rst_head = req1->dl_next;
1342 fastopenq->qlen--;
1343 spin_unlock(&fastopenq->lock);
1344 reqsk_free(req1);
1345 }
1346 if (skip_cookie) {
1347 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1348 return true;
1349 }
1350 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1351 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1352 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1353 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1354 memcmp(&foc->val[0], &valid_foc->val[0],
1355 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1356 return false;
1357 valid_foc->len = -1;
1358 }
1359 /* Acknowledge the data received from the peer. */
1360 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1361 return true;
1362 } else if (foc->len == 0) { /* Client requesting a cookie */
1363 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1364 NET_INC_STATS_BH(sock_net(sk),
1365 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1366 } else {
1367 /* Client sent a cookie with wrong size. Treat it
1368 * the same as invalid and return a valid one.
1369 */
1370 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1371 }
1372 return false;
1373}
1374
1375static int tcp_v4_conn_req_fastopen(struct sock *sk,
1376 struct sk_buff *skb,
1377 struct sk_buff *skb_synack,
1378 struct request_sock *req,
1379 struct request_values *rvp)
1380{
1381 struct tcp_sock *tp = tcp_sk(sk);
1382 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1383 const struct inet_request_sock *ireq = inet_rsk(req);
1384 struct sock *child;
1385
1386 req->retrans = 0;
1387 req->sk = NULL;
1388
1389 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1390 if (child == NULL) {
1391 NET_INC_STATS_BH(sock_net(sk),
1392 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1393 kfree_skb(skb_synack);
1394 return -1;
1395 }
1396 ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1397 ireq->rmt_addr, ireq->opt);
1398 /* XXX (TFO) - is it ok to ignore error and continue? */
1399
1400 spin_lock(&queue->fastopenq->lock);
1401 queue->fastopenq->qlen++;
1402 spin_unlock(&queue->fastopenq->lock);
1403
1404 /* Initialize the child socket. Have to fix some values to take
1405 * into account the child is a Fast Open socket and is created
1406 * only out of the bits carried in the SYN packet.
1407 */
1408 tp = tcp_sk(child);
1409
1410 tp->fastopen_rsk = req;
1411 /* Do a hold on the listner sk so that if the listener is being
1412 * closed, the child that has been accepted can live on and still
1413 * access listen_lock.
1414 */
1415 sock_hold(sk);
1416 tcp_rsk(req)->listener = sk;
1417
1418 /* RFC1323: The window in SYN & SYN/ACK segments is never
1419 * scaled. So correct it appropriately.
1420 */
1421 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1422
1423 /* Activate the retrans timer so that SYNACK can be retransmitted.
1424 * The request socket is not added to the SYN table of the parent
1425 * because it's been added to the accept queue directly.
1426 */
1427 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1428 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1429
1430 /* Add the child socket directly into the accept queue */
1431 inet_csk_reqsk_queue_add(sk, req, child);
1432
1433 /* Now finish processing the fastopen child socket. */
1434 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1435 tcp_init_congestion_control(child);
1436 tcp_mtup_init(child);
1437 tcp_init_buffer_space(child);
1438 tcp_init_metrics(child);
1439
1440 /* Queue the data carried in the SYN packet. We need to first
1441 * bump skb's refcnt because the caller will attempt to free it.
1442 *
1443 * XXX (TFO) - we honor a zero-payload TFO request for now.
1444 * (Any reason not to?)
1445 */
1446 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1447 /* Don't queue the skb if there is no payload in SYN.
1448 * XXX (TFO) - How about SYN+FIN?
1449 */
1450 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1451 } else {
1452 skb = skb_get(skb);
1453 skb_dst_drop(skb);
1454 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1455 skb_set_owner_r(skb, child);
1456 __skb_queue_tail(&child->sk_receive_queue, skb);
1457 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1458 }
1459 sk->sk_data_ready(sk, 0);
1460 bh_unlock_sock(child);
1461 sock_put(child);
1462 WARN_ON(req->sk == NULL);
1463 return 0;
1464}
1465
1275int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1466int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1276{ 1467{
1277 struct tcp_extend_values tmp_ext; 1468 struct tcp_extend_values tmp_ext;
@@ -1285,6 +1476,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1285 __be32 daddr = ip_hdr(skb)->daddr; 1476 __be32 daddr = ip_hdr(skb)->daddr;
1286 __u32 isn = TCP_SKB_CB(skb)->when; 1477 __u32 isn = TCP_SKB_CB(skb)->when;
1287 bool want_cookie = false; 1478 bool want_cookie = false;
1479 struct flowi4 fl4;
1480 struct tcp_fastopen_cookie foc = { .len = -1 };
1481 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1482 struct sk_buff *skb_synack;
1483 int do_fastopen;
1288 1484
1289 /* Never answer to SYNs send to broadcast or multicast */ 1485 /* Never answer to SYNs send to broadcast or multicast */
1290 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1486 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1319,7 +1515,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tcp_clear_options(&tmp_opt); 1515 tcp_clear_options(&tmp_opt);
1320 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1516 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1321 tmp_opt.user_mss = tp->rx_opt.user_mss; 1517 tmp_opt.user_mss = tp->rx_opt.user_mss;
1322 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 1518 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1519 want_cookie ? NULL : &foc);
1323 1520
1324 if (tmp_opt.cookie_plus > 0 && 1521 if (tmp_opt.cookie_plus > 0 &&
1325 tmp_opt.saw_tstamp && 1522 tmp_opt.saw_tstamp &&
@@ -1377,8 +1574,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1377 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1574 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1378 req->cookie_ts = tmp_opt.tstamp_ok; 1575 req->cookie_ts = tmp_opt.tstamp_ok;
1379 } else if (!isn) { 1576 } else if (!isn) {
1380 struct flowi4 fl4;
1381
1382 /* VJ's idea. We save last timestamp seen 1577 /* VJ's idea. We save last timestamp seen
1383 * from the destination in peer table, when entering 1578 * from the destination in peer table, when entering
1384 * state TIME-WAIT, and check against it before 1579 * state TIME-WAIT, and check against it before
@@ -1419,14 +1614,52 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1419 tcp_rsk(req)->snt_isn = isn; 1614 tcp_rsk(req)->snt_isn = isn;
1420 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1615 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1421 1616
1422 if (tcp_v4_send_synack(sk, dst, req, 1617 if (dst == NULL) {
1423 (struct request_values *)&tmp_ext, 1618 dst = inet_csk_route_req(sk, &fl4, req);
1424 skb_get_queue_mapping(skb), 1619 if (dst == NULL)
1425 want_cookie) || 1620 goto drop_and_free;
1426 want_cookie) 1621 }
1622 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1623
1624 /* We don't call tcp_v4_send_synack() directly because we need
1625 * to make sure a child socket can be created successfully before
1626 * sending back synack!
1627 *
1628 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1629 * (or better yet, call tcp_send_synack() in the child context
1630 * directly, but will have to fix bunch of other code first)
1631 * after syn_recv_sock() except one will need to first fix the
1632 * latter to remove its dependency on the current implementation
1633 * of tcp_v4_send_synack()->tcp_select_initial_window().
1634 */
1635 skb_synack = tcp_make_synack(sk, dst, req,
1636 (struct request_values *)&tmp_ext,
1637 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1638
1639 if (skb_synack) {
1640 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1641 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1642 } else
1643 goto drop_and_free;
1644
1645 if (likely(!do_fastopen)) {
1646 int err;
1647 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1648 ireq->rmt_addr, ireq->opt);
1649 err = net_xmit_eval(err);
1650 if (err || want_cookie)
1651 goto drop_and_free;
1652
1653 tcp_rsk(req)->listener = NULL;
1654 /* Add the request_sock to the SYN table */
1655 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1656 if (fastopen_cookie_present(&foc) && foc.len != 0)
1657 NET_INC_STATS_BH(sock_net(sk),
1658 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1659 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1660 (struct request_values *)&tmp_ext))
1427 goto drop_and_free; 1661 goto drop_and_free;
1428 1662
1429 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1430 return 0; 1663 return 0;
1431 1664
1432drop_and_release: 1665drop_and_release:
@@ -1554,7 +1787,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1554 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, 1787 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1555 iph->saddr, iph->daddr); 1788 iph->saddr, iph->daddr);
1556 if (req) 1789 if (req)
1557 return tcp_check_req(sk, skb, req, prev); 1790 return tcp_check_req(sk, skb, req, prev, false);
1558 1791
1559 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, 1792 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1560 th->source, iph->daddr, th->dest, inet_iif(skb)); 1793 th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1977,6 +2210,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1977 tcp_cookie_values_release); 2210 tcp_cookie_values_release);
1978 tp->cookie_values = NULL; 2211 tp->cookie_values = NULL;
1979 } 2212 }
2213 BUG_ON(tp->fastopen_rsk != NULL);
1980 2214
1981 /* If socket is aborted during connect operation */ 2215 /* If socket is aborted during connect operation */
1982 tcp_free_fastopen_req(tp); 2216 tcp_free_fastopen_req(tp);
@@ -2393,7 +2627,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2393EXPORT_SYMBOL(tcp_proc_unregister); 2627EXPORT_SYMBOL(tcp_proc_unregister);
2394 2628
2395static void get_openreq4(const struct sock *sk, const struct request_sock *req, 2629static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2396 struct seq_file *f, int i, int uid, int *len) 2630 struct seq_file *f, int i, kuid_t uid, int *len)
2397{ 2631{
2398 const struct inet_request_sock *ireq = inet_rsk(req); 2632 const struct inet_request_sock *ireq = inet_rsk(req);
2399 long delta = req->expires - jiffies; 2633 long delta = req->expires - jiffies;
@@ -2410,7 +2644,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2410 1, /* timers active (only the expire timer) */ 2644 1, /* timers active (only the expire timer) */
2411 jiffies_delta_to_clock_t(delta), 2645 jiffies_delta_to_clock_t(delta),
2412 req->retrans, 2646 req->retrans,
2413 uid, 2647 from_kuid_munged(seq_user_ns(f), uid),
2414 0, /* non standard timer */ 2648 0, /* non standard timer */
2415 0, /* open_requests have no inode */ 2649 0, /* open_requests have no inode */
2416 atomic_read(&sk->sk_refcnt), 2650 atomic_read(&sk->sk_refcnt),
@@ -2425,6 +2659,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2425 const struct tcp_sock *tp = tcp_sk(sk); 2659 const struct tcp_sock *tp = tcp_sk(sk);
2426 const struct inet_connection_sock *icsk = inet_csk(sk); 2660 const struct inet_connection_sock *icsk = inet_csk(sk);
2427 const struct inet_sock *inet = inet_sk(sk); 2661 const struct inet_sock *inet = inet_sk(sk);
2662 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2428 __be32 dest = inet->inet_daddr; 2663 __be32 dest = inet->inet_daddr;
2429 __be32 src = inet->inet_rcv_saddr; 2664 __be32 src = inet->inet_rcv_saddr;
2430 __u16 destp = ntohs(inet->inet_dport); 2665 __u16 destp = ntohs(inet->inet_dport);
@@ -2461,7 +2696,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2461 timer_active, 2696 timer_active,
2462 jiffies_delta_to_clock_t(timer_expires - jiffies), 2697 jiffies_delta_to_clock_t(timer_expires - jiffies),
2463 icsk->icsk_retransmits, 2698 icsk->icsk_retransmits,
2464 sock_i_uid(sk), 2699 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2465 icsk->icsk_probes_out, 2700 icsk->icsk_probes_out,
2466 sock_i_ino(sk), 2701 sock_i_ino(sk),
2467 atomic_read(&sk->sk_refcnt), sk, 2702 atomic_read(&sk->sk_refcnt), sk,
@@ -2469,7 +2704,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2469 jiffies_to_clock_t(icsk->icsk_ack.ato), 2704 jiffies_to_clock_t(icsk->icsk_ack.ato),
2470 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2705 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2471 tp->snd_cwnd, 2706 tp->snd_cwnd,
2472 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, 2707 sk->sk_state == TCP_LISTEN ?
2708 (fastopenq ? fastopenq->max_qlen : 0) :
2709 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2473 len); 2710 len);
2474} 2711}
2475 2712
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6ff7f10dce9d..e965319d610b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -507,6 +507,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
507 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 507 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
508 newtp->rx_opt.mss_clamp = req->mss; 508 newtp->rx_opt.mss_clamp = req->mss;
509 TCP_ECN_openreq_child(newtp, req); 509 TCP_ECN_openreq_child(newtp, req);
510 newtp->fastopen_rsk = NULL;
510 511
511 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); 512 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
512 } 513 }
@@ -515,13 +516,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
515EXPORT_SYMBOL(tcp_create_openreq_child); 516EXPORT_SYMBOL(tcp_create_openreq_child);
516 517
517/* 518/*
518 * Process an incoming packet for SYN_RECV sockets represented 519 * Process an incoming packet for SYN_RECV sockets represented as a
519 * as a request_sock. 520 * request_sock. Normally sk is the listener socket but for TFO it
521 * points to the child socket.
522 *
523 * XXX (TFO) - The current impl contains a special check for ack
524 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
520 */ 525 */
521 526
522struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 527struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
523 struct request_sock *req, 528 struct request_sock *req,
524 struct request_sock **prev) 529 struct request_sock **prev,
530 bool fastopen)
525{ 531{
526 struct tcp_options_received tmp_opt; 532 struct tcp_options_received tmp_opt;
527 const u8 *hash_location; 533 const u8 *hash_location;
@@ -530,6 +536,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
530 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 536 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
531 bool paws_reject = false; 537 bool paws_reject = false;
532 538
539 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
540
533 tmp_opt.saw_tstamp = 0; 541 tmp_opt.saw_tstamp = 0;
534 if (th->doff > (sizeof(struct tcphdr)>>2)) { 542 if (th->doff > (sizeof(struct tcphdr)>>2)) {
535 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 543 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
@@ -565,6 +573,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
565 * 573 *
566 * Enforce "SYN-ACK" according to figure 8, figure 6 574 * Enforce "SYN-ACK" according to figure 8, figure 6
567 * of RFC793, fixed by RFC1122. 575 * of RFC793, fixed by RFC1122.
576 *
577 * Note that even if there is new data in the SYN packet
578 * they will be thrown away too.
568 */ 579 */
569 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 580 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
570 return NULL; 581 return NULL;
@@ -622,9 +633,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
622 * sent (the segment carries an unacceptable ACK) ... 633 * sent (the segment carries an unacceptable ACK) ...
623 * a reset is sent." 634 * a reset is sent."
624 * 635 *
625 * Invalid ACK: reset will be sent by listening socket 636 * Invalid ACK: reset will be sent by listening socket.
637 * Note that the ACK validity check for a Fast Open socket is done
638 * elsewhere and is checked directly against the child socket rather
639 * than req because user data may have been sent out.
626 */ 640 */
627 if ((flg & TCP_FLAG_ACK) && 641 if ((flg & TCP_FLAG_ACK) && !fastopen &&
628 (TCP_SKB_CB(skb)->ack_seq != 642 (TCP_SKB_CB(skb)->ack_seq !=
629 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) 643 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
630 return sk; 644 return sk;
@@ -637,7 +651,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
637 /* RFC793: "first check sequence number". */ 651 /* RFC793: "first check sequence number". */
638 652
639 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 653 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
640 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { 654 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
641 /* Out of window: send ACK and drop. */ 655 /* Out of window: send ACK and drop. */
642 if (!(flg & TCP_FLAG_RST)) 656 if (!(flg & TCP_FLAG_RST))
643 req->rsk_ops->send_ack(sk, skb, req); 657 req->rsk_ops->send_ack(sk, skb, req);
@@ -648,7 +662,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
648 662
649 /* In sequence, PAWS is OK. */ 663 /* In sequence, PAWS is OK. */
650 664
651 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) 665 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
652 req->ts_recent = tmp_opt.rcv_tsval; 666 req->ts_recent = tmp_opt.rcv_tsval;
653 667
654 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 668 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
@@ -667,10 +681,19 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
667 681
668 /* ACK sequence verified above, just make sure ACK is 682 /* ACK sequence verified above, just make sure ACK is
669 * set. If ACK not set, just silently drop the packet. 683 * set. If ACK not set, just silently drop the packet.
684 *
685 * XXX (TFO) - if we ever allow "data after SYN", the
686 * following check needs to be removed.
670 */ 687 */
671 if (!(flg & TCP_FLAG_ACK)) 688 if (!(flg & TCP_FLAG_ACK))
672 return NULL; 689 return NULL;
673 690
691 /* For Fast Open no more processing is needed (sk is the
692 * child socket).
693 */
694 if (fastopen)
695 return sk;
696
674 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 697 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
675 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 698 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
676 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 699 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
@@ -706,11 +729,21 @@ listen_overflow:
706 } 729 }
707 730
708embryonic_reset: 731embryonic_reset:
709 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 732 if (!(flg & TCP_FLAG_RST)) {
710 if (!(flg & TCP_FLAG_RST)) 733 /* Received a bad SYN pkt - for TFO We try not to reset
734 * the local connection unless it's really necessary to
735 * avoid becoming vulnerable to outside attack aiming at
736 * resetting legit local connections.
737 */
711 req->rsk_ops->send_reset(sk, skb); 738 req->rsk_ops->send_reset(sk, skb);
712 739 } else if (fastopen) { /* received a valid RST pkt */
713 inet_csk_reqsk_queue_drop(sk, req, prev); 740 reqsk_fastopen_remove(sk, req, true);
741 tcp_reset(sk);
742 }
743 if (!fastopen) {
744 inet_csk_reqsk_queue_drop(sk, req, prev);
745 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
746 }
714 return NULL; 747 return NULL;
715} 748}
716EXPORT_SYMBOL(tcp_check_req); 749EXPORT_SYMBOL(tcp_check_req);
@@ -719,6 +752,12 @@ EXPORT_SYMBOL(tcp_check_req);
719 * Queue segment on the new socket if the new socket is active, 752 * Queue segment on the new socket if the new socket is active,
720 * otherwise we just shortcircuit this and continue with 753 * otherwise we just shortcircuit this and continue with
721 * the new socket. 754 * the new socket.
755 *
756 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
757 * when entering. But other states are possible due to a race condition
758 * where after __inet_lookup_established() fails but before the listener
759 * locked is obtained, other packets cause the same connection to
760 * be created.
722 */ 761 */
723 762
724int tcp_child_process(struct sock *parent, struct sock *child, 763int tcp_child_process(struct sock *parent, struct sock *child,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d04632673a9e..9383b51f3efc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk,
702 unsigned int mss, struct sk_buff *skb, 702 unsigned int mss, struct sk_buff *skb,
703 struct tcp_out_options *opts, 703 struct tcp_out_options *opts,
704 struct tcp_md5sig_key **md5, 704 struct tcp_md5sig_key **md5,
705 struct tcp_extend_values *xvp) 705 struct tcp_extend_values *xvp,
706 struct tcp_fastopen_cookie *foc)
706{ 707{
707 struct inet_request_sock *ireq = inet_rsk(req); 708 struct inet_request_sock *ireq = inet_rsk(req);
708 unsigned int remaining = MAX_TCP_OPTION_SPACE; 709 unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk,
747 if (unlikely(!ireq->tstamp_ok)) 748 if (unlikely(!ireq->tstamp_ok))
748 remaining -= TCPOLEN_SACKPERM_ALIGNED; 749 remaining -= TCPOLEN_SACKPERM_ALIGNED;
749 } 750 }
750 751 if (foc != NULL) {
752 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
753 need = (need + 3) & ~3U; /* Align to 32 bits */
754 if (remaining >= need) {
755 opts->options |= OPTION_FAST_OPEN_COOKIE;
756 opts->fastopen_cookie = foc;
757 remaining -= need;
758 }
759 }
751 /* Similar rationale to tcp_syn_options() applies here, too. 760 /* Similar rationale to tcp_syn_options() applies here, too.
752 * If the <SYN> options fit, the same options should fit now! 761 * If the <SYN> options fit, the same options should fit now!
753 */ 762 */
@@ -2658,7 +2667,8 @@ int tcp_send_synack(struct sock *sk)
2658 */ 2667 */
2659struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2668struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2660 struct request_sock *req, 2669 struct request_sock *req,
2661 struct request_values *rvp) 2670 struct request_values *rvp,
2671 struct tcp_fastopen_cookie *foc)
2662{ 2672{
2663 struct tcp_out_options opts; 2673 struct tcp_out_options opts;
2664 struct tcp_extend_values *xvp = tcp_xv(rvp); 2674 struct tcp_extend_values *xvp = tcp_xv(rvp);
@@ -2718,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2718#endif 2728#endif
2719 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2729 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2720 tcp_header_size = tcp_synack_options(sk, req, mss, 2730 tcp_header_size = tcp_synack_options(sk, req, mss,
2721 skb, &opts, &md5, xvp) 2731 skb, &opts, &md5, xvp, foc)
2722 + sizeof(*th); 2732 + sizeof(*th);
2723 2733
2724 skb_push(skb, tcp_header_size); 2734 skb_push(skb, tcp_header_size);
@@ -2772,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2772 } 2782 }
2773 2783
2774 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2784 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2775 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2785 /* XXX data is queued and acked as is. No buffer/window check */
2786 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
2776 2787
2777 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2788 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2778 th->window = htons(min(req->rcv_wnd, 65535U)); 2789 th->window = htons(min(req->rcv_wnd, 65535U));
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b774a03bd1dc..fc04711e80c8 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -305,6 +305,35 @@ static void tcp_probe_timer(struct sock *sk)
305} 305}
306 306
307/* 307/*
308 * Timer for Fast Open socket to retransmit SYNACK. Note that the
309 * sk here is the child socket, not the parent (listener) socket.
310 */
311static void tcp_fastopen_synack_timer(struct sock *sk)
312{
313 struct inet_connection_sock *icsk = inet_csk(sk);
314 int max_retries = icsk->icsk_syn_retries ? :
315 sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
316 struct request_sock *req;
317
318 req = tcp_sk(sk)->fastopen_rsk;
319 req->rsk_ops->syn_ack_timeout(sk, req);
320
321 if (req->retrans >= max_retries) {
322 tcp_write_err(sk);
323 return;
324 }
325 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
326 * returned from rtx_syn_ack() to make it more persistent like
327 * regular retransmit because if the child socket has been accepted
328 * it's not good to give up too easily.
329 */
330 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
331 req->retrans++;
332 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
333 TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
334}
335
336/*
308 * The TCP retransmit timer. 337 * The TCP retransmit timer.
309 */ 338 */
310 339
@@ -317,7 +346,15 @@ void tcp_retransmit_timer(struct sock *sk)
317 tcp_resume_early_retransmit(sk); 346 tcp_resume_early_retransmit(sk);
318 return; 347 return;
319 } 348 }
320 349 if (tp->fastopen_rsk) {
350 BUG_ON(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1);
352 tcp_fastopen_synack_timer(sk);
353 /* Before we receive ACK to our SYN-ACK don't retransmit
354 * anything else (e.g., data or FIN segments).
355 */
356 return;
357 }
321 if (!tp->packets_out) 358 if (!tp->packets_out)
322 goto out; 359 goto out;
323 360
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6f6d1aca3c3d..c4e64328d8ba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2110,7 +2110,9 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2110 bucket, src, srcp, dest, destp, sp->sk_state, 2110 bucket, src, srcp, dest, destp, sp->sk_state,
2111 sk_wmem_alloc_get(sp), 2111 sk_wmem_alloc_get(sp),
2112 sk_rmem_alloc_get(sp), 2112 sk_rmem_alloc_get(sp),
2113 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 2113 0, 0L, 0,
2114 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
2115 0, sock_i_ino(sp),
2114 atomic_read(&sp->sk_refcnt), sp, 2116 atomic_read(&sp->sk_refcnt), sp,
2115 atomic_read(&sp->sk_drops), len); 2117 atomic_read(&sp->sk_drops), len);
2116} 2118}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 16d0960062be..d2f336ea82ca 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -24,7 +24,9 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
24 if (!inet_diag_bc_sk(bc, sk)) 24 if (!inet_diag_bc_sk(bc, sk))
25 return 0; 25 return 0;
26 26
27 return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid, 27 return inet_sk_diag_fill(sk, NULL, skb, req,
28 sk_user_ns(NETLINK_CB(cb->skb).ssk),
29 NETLINK_CB(cb->skb).pid,
28 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 30 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
29} 31}
30 32
@@ -69,6 +71,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
69 goto out; 71 goto out;
70 72
71 err = inet_sk_diag_fill(sk, NULL, rep, req, 73 err = inet_sk_diag_fill(sk, NULL, rep, req,
74 sk_user_ns(NETLINK_CB(in_skb).ssk),
72 NETLINK_CB(in_skb).pid, 75 NETLINK_CB(in_skb).pid,
73 nlh->nlmsg_seq, 0, nlh); 76 nlh->nlmsg_seq, 0, nlh);
74 if (err < 0) { 77 if (err < 0) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 19d4bffda9d7..572cb660837b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -127,8 +127,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
127#endif 127#endif
128 128
129#ifdef CONFIG_IPV6_PRIVACY 129#ifdef CONFIG_IPV6_PRIVACY
130static int __ipv6_regen_rndid(struct inet6_dev *idev); 130static void __ipv6_regen_rndid(struct inet6_dev *idev);
131static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 131static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
132static void ipv6_regen_rndid(unsigned long data); 132static void ipv6_regen_rndid(unsigned long data);
133#endif 133#endif
134 134
@@ -852,16 +852,7 @@ retry:
852 } 852 }
853 in6_ifa_hold(ifp); 853 in6_ifa_hold(ifp);
854 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); 854 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
855 if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) { 855 __ipv6_try_regen_rndid(idev, tmpaddr);
856 spin_unlock_bh(&ifp->lock);
857 write_unlock(&idev->lock);
858 pr_warn("%s: regeneration of randomized interface id failed\n",
859 __func__);
860 in6_ifa_put(ifp);
861 in6_dev_put(idev);
862 ret = -1;
863 goto out;
864 }
865 memcpy(&addr.s6_addr[8], idev->rndid, 8); 856 memcpy(&addr.s6_addr[8], idev->rndid, 8);
866 age = (now - ifp->tstamp) / HZ; 857 age = (now - ifp->tstamp) / HZ;
867 tmp_valid_lft = min_t(__u32, 858 tmp_valid_lft = min_t(__u32,
@@ -1600,7 +1591,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
1600 1591
1601#ifdef CONFIG_IPV6_PRIVACY 1592#ifdef CONFIG_IPV6_PRIVACY
1602/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ 1593/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
1603static int __ipv6_regen_rndid(struct inet6_dev *idev) 1594static void __ipv6_regen_rndid(struct inet6_dev *idev)
1604{ 1595{
1605regen: 1596regen:
1606 get_random_bytes(idev->rndid, sizeof(idev->rndid)); 1597 get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -1627,8 +1618,6 @@ regen:
1627 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00) 1618 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
1628 goto regen; 1619 goto regen;
1629 } 1620 }
1630
1631 return 0;
1632} 1621}
1633 1622
1634static void ipv6_regen_rndid(unsigned long data) 1623static void ipv6_regen_rndid(unsigned long data)
@@ -1642,8 +1631,7 @@ static void ipv6_regen_rndid(unsigned long data)
1642 if (idev->dead) 1631 if (idev->dead)
1643 goto out; 1632 goto out;
1644 1633
1645 if (__ipv6_regen_rndid(idev) < 0) 1634 __ipv6_regen_rndid(idev);
1646 goto out;
1647 1635
1648 expires = jiffies + 1636 expires = jiffies +
1649 idev->cnf.temp_prefered_lft * HZ - 1637 idev->cnf.temp_prefered_lft * HZ -
@@ -1664,13 +1652,10 @@ out:
1664 in6_dev_put(idev); 1652 in6_dev_put(idev);
1665} 1653}
1666 1654
1667static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) 1655static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
1668{ 1656{
1669 int ret = 0;
1670
1671 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 1657 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
1672 ret = __ipv6_regen_rndid(idev); 1658 __ipv6_regen_rndid(idev);
1673 return ret;
1674} 1659}
1675#endif 1660#endif
1676 1661
@@ -2566,14 +2551,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2566 void *data) 2551 void *data)
2567{ 2552{
2568 struct net_device *dev = (struct net_device *) data; 2553 struct net_device *dev = (struct net_device *) data;
2569 struct inet6_dev *idev; 2554 struct inet6_dev *idev = __in6_dev_get(dev);
2570 int run_pending = 0; 2555 int run_pending = 0;
2571 int err; 2556 int err;
2572 2557
2573 if (event == NETDEV_UNREGISTER_FINAL)
2574 return NOTIFY_DONE;
2575
2576 idev = __in6_dev_get(dev);
2577 switch (event) { 2558 switch (event) {
2578 case NETDEV_REGISTER: 2559 case NETDEV_REGISTER:
2579 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 2560 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 9772fbd8a3f5..90bbefb57943 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -22,6 +22,7 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/export.h> 24#include <linux/export.h>
25#include <linux/pid_namespace.h>
25 26
26#include <net/net_namespace.h> 27#include <net/net_namespace.h>
27#include <net/sock.h> 28#include <net/sock.h>
@@ -91,6 +92,8 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
91static void fl_free(struct ip6_flowlabel *fl) 92static void fl_free(struct ip6_flowlabel *fl)
92{ 93{
93 if (fl) { 94 if (fl) {
95 if (fl->share == IPV6_FL_S_PROCESS)
96 put_pid(fl->owner.pid);
94 release_net(fl->fl_net); 97 release_net(fl->fl_net);
95 kfree(fl->opt); 98 kfree(fl->opt);
96 } 99 }
@@ -394,10 +397,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
394 case IPV6_FL_S_ANY: 397 case IPV6_FL_S_ANY:
395 break; 398 break;
396 case IPV6_FL_S_PROCESS: 399 case IPV6_FL_S_PROCESS:
397 fl->owner = current->pid; 400 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
398 break; 401 break;
399 case IPV6_FL_S_USER: 402 case IPV6_FL_S_USER:
400 fl->owner = current_euid(); 403 fl->owner.uid = current_euid();
401 break; 404 break;
402 default: 405 default:
403 err = -EINVAL; 406 err = -EINVAL;
@@ -561,7 +564,10 @@ recheck:
561 err = -EPERM; 564 err = -EPERM;
562 if (fl1->share == IPV6_FL_S_EXCL || 565 if (fl1->share == IPV6_FL_S_EXCL ||
563 fl1->share != fl->share || 566 fl1->share != fl->share ||
564 fl1->owner != fl->owner) 567 ((fl1->share == IPV6_FL_S_PROCESS) &&
568 (fl1->owner.pid == fl->owner.pid)) ||
569 ((fl1->share == IPV6_FL_S_USER) &&
570 uid_eq(fl1->owner.uid, fl->owner.uid)))
565 goto release; 571 goto release;
566 572
567 err = -EINVAL; 573 err = -EINVAL;
@@ -621,6 +627,7 @@ done:
621 627
622struct ip6fl_iter_state { 628struct ip6fl_iter_state {
623 struct seq_net_private p; 629 struct seq_net_private p;
630 struct pid_namespace *pid_ns;
624 int bucket; 631 int bucket;
625}; 632};
626 633
@@ -699,6 +706,7 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
699 706
700static int ip6fl_seq_show(struct seq_file *seq, void *v) 707static int ip6fl_seq_show(struct seq_file *seq, void *v)
701{ 708{
709 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
702 if (v == SEQ_START_TOKEN) 710 if (v == SEQ_START_TOKEN)
703 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", 711 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
704 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); 712 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
@@ -708,7 +716,11 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
708 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", 716 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
709 (unsigned int)ntohl(fl->label), 717 (unsigned int)ntohl(fl->label),
710 fl->share, 718 fl->share,
711 (int)fl->owner, 719 ((fl->share == IPV6_FL_S_PROCESS) ?
720 pid_nr_ns(fl->owner.pid, state->pid_ns) :
721 ((fl->share == IPV6_FL_S_USER) ?
722 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
723 0)),
712 atomic_read(&fl->users), 724 atomic_read(&fl->users),
713 fl->linger/HZ, 725 fl->linger/HZ,
714 (long)(fl->expires - jiffies)/HZ, 726 (long)(fl->expires - jiffies)/HZ,
@@ -727,8 +739,29 @@ static const struct seq_operations ip6fl_seq_ops = {
727 739
728static int ip6fl_seq_open(struct inode *inode, struct file *file) 740static int ip6fl_seq_open(struct inode *inode, struct file *file)
729{ 741{
730 return seq_open_net(inode, file, &ip6fl_seq_ops, 742 struct seq_file *seq;
731 sizeof(struct ip6fl_iter_state)); 743 struct ip6fl_iter_state *state;
744 int err;
745
746 err = seq_open_net(inode, file, &ip6fl_seq_ops,
747 sizeof(struct ip6fl_iter_state));
748
749 if (!err) {
750 seq = file->private_data;
751 state = ip6fl_seq_private(seq);
752 rcu_read_lock();
753 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
754 rcu_read_unlock();
755 }
756 return err;
757}
758
759static int ip6fl_seq_release(struct inode *inode, struct file *file)
760{
761 struct seq_file *seq = file->private_data;
762 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
763 put_pid_ns(state->pid_ns);
764 return seq_release_net(inode, file);
732} 765}
733 766
734static const struct file_operations ip6fl_seq_fops = { 767static const struct file_operations ip6fl_seq_fops = {
@@ -736,7 +769,7 @@ static const struct file_operations ip6fl_seq_fops = {
736 .open = ip6fl_seq_open, 769 .open = ip6fl_seq_open,
737 .read = seq_read, 770 .read = seq_read,
738 .llseek = seq_lseek, 771 .llseek = seq_lseek,
739 .release = seq_release_net, 772 .release = ip6fl_seq_release,
740}; 773};
741 774
742static int __net_init ip6_flowlabel_proc_init(struct net *net) 775static int __net_init ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ef0579d5bca6..7af88ef01657 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1251,7 +1251,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1251 sk_wmem_alloc_get(sp), 1251 sk_wmem_alloc_get(sp),
1252 sk_rmem_alloc_get(sp), 1252 sk_rmem_alloc_get(sp),
1253 0, 0L, 0, 1253 0, 0L, 0,
1254 sock_i_uid(sp), 0, 1254 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1255 0,
1255 sock_i_ino(sp), 1256 sock_i_ino(sp),
1256 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); 1257 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
1257} 1258}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb46061c813a..182ab9a85d6c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -190,6 +190,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
190 ireq = inet_rsk(req); 190 ireq = inet_rsk(req);
191 ireq6 = inet6_rsk(req); 191 ireq6 = inet6_rsk(req);
192 treq = tcp_rsk(req); 192 treq = tcp_rsk(req);
193 treq->listener = NULL;
193 194
194 if (security_inet_conn_request(sk, skb, req)) 195 if (security_inet_conn_request(sk, skb, req))
195 goto out_free; 196 goto out_free;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index cd49de3678fb..09078b9bc6f6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -475,7 +475,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
475 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 475 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
476 goto done; 476 goto done;
477 477
478 skb = tcp_make_synack(sk, dst, req, rvp); 478 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
479 479
480 if (skb) { 480 if (skb) {
481 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 481 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -987,7 +987,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
987 &ipv6_hdr(skb)->saddr, 987 &ipv6_hdr(skb)->saddr,
988 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 988 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
989 if (req) 989 if (req)
990 return tcp_check_req(sk, skb, req, prev); 990 return tcp_check_req(sk, skb, req, prev, false);
991 991
992 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 992 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
993 &ipv6_hdr(skb)->saddr, th->source, 993 &ipv6_hdr(skb)->saddr, th->source,
@@ -1179,6 +1179,7 @@ have_isn:
1179 want_cookie) 1179 want_cookie)
1180 goto drop_and_free; 1180 goto drop_and_free;
1181 1181
1182 tcp_rsk(req)->listener = NULL;
1182 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1183 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1183 return 0; 1184 return 0;
1184 1185
@@ -1828,7 +1829,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
1828#ifdef CONFIG_PROC_FS 1829#ifdef CONFIG_PROC_FS
1829/* Proc filesystem TCPv6 sock list dumping. */ 1830/* Proc filesystem TCPv6 sock list dumping. */
1830static void get_openreq6(struct seq_file *seq, 1831static void get_openreq6(struct seq_file *seq,
1831 const struct sock *sk, struct request_sock *req, int i, int uid) 1832 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1832{ 1833{
1833 int ttd = req->expires - jiffies; 1834 int ttd = req->expires - jiffies;
1834 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1835 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -1852,7 +1853,7 @@ static void get_openreq6(struct seq_file *seq,
1852 1, /* timers active (only the expire timer) */ 1853 1, /* timers active (only the expire timer) */
1853 jiffies_to_clock_t(ttd), 1854 jiffies_to_clock_t(ttd),
1854 req->retrans, 1855 req->retrans,
1855 uid, 1856 from_kuid_munged(seq_user_ns(seq), uid),
1856 0, /* non standard timer */ 1857 0, /* non standard timer */
1857 0, /* open_requests have no inode */ 1858 0, /* open_requests have no inode */
1858 0, req); 1859 0, req);
@@ -1902,7 +1903,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1902 timer_active, 1903 timer_active,
1903 jiffies_delta_to_clock_t(timer_expires - jiffies), 1904 jiffies_delta_to_clock_t(timer_expires - jiffies),
1904 icsk->icsk_retransmits, 1905 icsk->icsk_retransmits,
1905 sock_i_uid(sp), 1906 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1906 icsk->icsk_probes_out, 1907 icsk->icsk_probes_out,
1907 sock_i_ino(sp), 1908 sock_i_ino(sp),
1908 atomic_read(&sp->sk_refcnt), sp, 1909 atomic_read(&sp->sk_refcnt), sp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 99d0077b56b8..bbdff07eebe1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1458,7 +1458,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1458 sk_wmem_alloc_get(sp), 1458 sk_wmem_alloc_get(sp),
1459 sk_rmem_alloc_get(sp), 1459 sk_rmem_alloc_get(sp),
1460 0, 0L, 0, 1460 0, 0L, 0,
1461 sock_i_uid(sp), 0, 1461 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1462 0,
1462 sock_i_ino(sp), 1463 sock_i_ino(sp),
1463 atomic_read(&sp->sk_refcnt), sp, 1464 atomic_read(&sp->sk_refcnt), sp,
1464 atomic_read(&sp->sk_drops)); 1465 atomic_read(&sp->sk_drops));
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index f8ba30dfecae..02ff7f2f60d4 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -217,7 +217,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
217 seq_printf(seq, "%08X %08X %02X %03d\n", 217 seq_printf(seq, "%08X %08X %02X %03d\n",
218 sk_wmem_alloc_get(s), 218 sk_wmem_alloc_get(s),
219 sk_rmem_alloc_get(s), 219 sk_rmem_alloc_get(s),
220 s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); 220 s->sk_state,
221 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
221out: 222out:
222 return 0; 223 return 0;
223} 224}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ec7d161c129b..334f93b8cfcb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3661,7 +3661,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
3661 atomic_read(&s->sk_refcnt), 3661 atomic_read(&s->sk_refcnt),
3662 sk_rmem_alloc_get(s), 3662 sk_rmem_alloc_get(s),
3663 sk_wmem_alloc_get(s), 3663 sk_wmem_alloc_get(s),
3664 sock_i_uid(s), 3664 from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
3665 sock_i_ino(s) 3665 sock_i_ino(s)
3666 ); 3666 );
3667 return 0; 3667 return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1347 /* Remove from tunnel list */ 1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list); 1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu);
1350 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1351 synchronize_rcu();
1352 1352
1353 atomic_dec(&l2tp_tunnel_count); 1353 atomic_dec(&l2tp_tunnel_count);
1354 kfree(tunnel);
1355} 1354}
1356 1355
1357/* Create a socket for the tunnel, if one isn't set up by 1356/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
163 163
164struct l2tp_tunnel { 164struct l2tp_tunnel {
165 int magic; /* Should be L2TP_TUNNEL_MAGIC */ 165 int magic; /* Should be L2TP_TUNNEL_MAGIC */
166 struct rcu_head rcu;
166 rwlock_t hlist_lock; /* protect session_hlist */ 167 rwlock_t hlist_lock; /* protect session_hlist */
167 struct hlist_head session_hlist[L2TP_HASH_SIZE]; 168 struct hlist_head session_hlist[L2TP_HASH_SIZE];
168 /* hashed list of sessions, 169 /* hashed list of sessions,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index a1839c004357..7b4799cfbf8d 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
151 sk_wmem_alloc_get(sk), 151 sk_wmem_alloc_get(sk),
152 sk_rmem_alloc_get(sk) - llc->copied_seq, 152 sk_rmem_alloc_get(sk) - llc->copied_seq,
153 sk->sk_state, 153 sk->sk_state,
154 sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, 154 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
155 llc->link); 155 llc->link);
156out: 156out:
157 return 0; 157 return 0;
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 8dfd70d8fcfb..a04752e91023 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
38static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, 38static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
39 const u8 *addr[], const size_t *len, u8 *mac) 39 const u8 *addr[], const size_t *len, u8 *mac)
40{ 40{
41 u8 scratch[2 * AES_BLOCK_SIZE]; 41 u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
42 u8 *cbc, *pad;
43 const u8 *pos, *end; 42 const u8 *pos, *end;
44 size_t i, e, left, total_len; 43 size_t i, e, left, total_len;
45 44
46 cbc = scratch;
47 pad = scratch + AES_BLOCK_SIZE;
48
49 memset(cbc, 0, AES_BLOCK_SIZE); 45 memset(cbc, 0, AES_BLOCK_SIZE);
50 46
51 total_len = 0; 47 total_len = 0;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d41974aacf51..929f897a8ded 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -102,6 +102,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
102 return 0; 102 return 0;
103} 103}
104 104
105static int ieee80211_start_p2p_device(struct wiphy *wiphy,
106 struct wireless_dev *wdev)
107{
108 return ieee80211_do_open(wdev, true);
109}
110
111static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
112 struct wireless_dev *wdev)
113{
114 ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
115}
116
105static int ieee80211_set_noack_map(struct wiphy *wiphy, 117static int ieee80211_set_noack_map(struct wiphy *wiphy,
106 struct net_device *dev, 118 struct net_device *dev,
107 u16 noack_map) 119 u16 noack_map)
@@ -330,7 +342,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
330 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) { 342 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
331 struct ieee80211_supported_band *sband; 343 struct ieee80211_supported_band *sband;
332 sband = sta->local->hw.wiphy->bands[ 344 sband = sta->local->hw.wiphy->bands[
333 sta->local->hw.conf.channel->band]; 345 sta->local->oper_channel->band];
334 rate->legacy = sband->bitrates[idx].bitrate; 346 rate->legacy = sband->bitrates[idx].bitrate;
335 } else 347 } else
336 rate->mcs = idx; 348 rate->mcs = idx;
@@ -725,25 +737,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
725static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 737static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
726 const u8 *resp, size_t resp_len) 738 const u8 *resp, size_t resp_len)
727{ 739{
728 struct sk_buff *new, *old; 740 struct probe_resp *new, *old;
729 741
730 if (!resp || !resp_len) 742 if (!resp || !resp_len)
731 return 1; 743 return 1;
732 744
733 old = rtnl_dereference(sdata->u.ap.probe_resp); 745 old = rtnl_dereference(sdata->u.ap.probe_resp);
734 746
735 new = dev_alloc_skb(resp_len); 747 new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
736 if (!new) 748 if (!new)
737 return -ENOMEM; 749 return -ENOMEM;
738 750
739 memcpy(skb_put(new, resp_len), resp, resp_len); 751 new->len = resp_len;
752 memcpy(new->data, resp, resp_len);
740 753
741 rcu_assign_pointer(sdata->u.ap.probe_resp, new); 754 rcu_assign_pointer(sdata->u.ap.probe_resp, new);
742 if (old) { 755 if (old)
743 /* TODO: use call_rcu() */ 756 kfree_rcu(old, rcu_head);
744 synchronize_rcu();
745 dev_kfree_skb(old);
746 }
747 757
748 return 0; 758 return 0;
749} 759}
@@ -950,7 +960,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
950 /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) 960 /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
951 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ 961 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
952 962
953 memset(msg->da, 0xff, ETH_ALEN); 963 eth_broadcast_addr(msg->da);
954 memcpy(msg->sa, sta->sta.addr, ETH_ALEN); 964 memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
955 msg->len = htons(6); 965 msg->len = htons(6);
956 msg->dsap = 0; 966 msg->dsap = 0;
@@ -1285,9 +1295,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1285 mutex_unlock(&local->sta_mtx); 1295 mutex_unlock(&local->sta_mtx);
1286 1296
1287 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1297 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1288 params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) 1298 params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1289 ieee80211_recalc_ps(local, -1); 1299 ieee80211_recalc_ps(local, -1);
1290 1300 ieee80211_recalc_ps_vif(sdata);
1301 }
1291 return 0; 1302 return 0;
1292} 1303}
1293 1304
@@ -1661,7 +1672,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1661 } 1672 }
1662 1673
1663 if (!sdata->vif.bss_conf.use_short_slot && 1674 if (!sdata->vif.bss_conf.use_short_slot &&
1664 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) { 1675 sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
1665 sdata->vif.bss_conf.use_short_slot = true; 1676 sdata->vif.bss_conf.use_short_slot = true;
1666 changed |= BSS_CHANGED_ERP_SLOT; 1677 changed |= BSS_CHANGED_ERP_SLOT;
1667 } 1678 }
@@ -1775,6 +1786,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
1775 case NL80211_IFTYPE_ADHOC: 1786 case NL80211_IFTYPE_ADHOC:
1776 case NL80211_IFTYPE_MESH_POINT: 1787 case NL80211_IFTYPE_MESH_POINT:
1777 case NL80211_IFTYPE_P2P_CLIENT: 1788 case NL80211_IFTYPE_P2P_CLIENT:
1789 case NL80211_IFTYPE_P2P_DEVICE:
1778 break; 1790 break;
1779 case NL80211_IFTYPE_P2P_GO: 1791 case NL80211_IFTYPE_P2P_GO:
1780 if (sdata->local->ops->hw_scan) 1792 if (sdata->local->ops->hw_scan)
@@ -1927,7 +1939,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
1927 enum nl80211_tx_power_setting type, int mbm) 1939 enum nl80211_tx_power_setting type, int mbm)
1928{ 1940{
1929 struct ieee80211_local *local = wiphy_priv(wiphy); 1941 struct ieee80211_local *local = wiphy_priv(wiphy);
1930 struct ieee80211_channel *chan = local->hw.conf.channel; 1942 struct ieee80211_channel *chan = local->oper_channel;
1931 u32 changes = 0; 1943 u32 changes = 0;
1932 1944
1933 switch (type) { 1945 switch (type) {
@@ -2079,6 +2091,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2079 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 2091 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
2080 2092
2081 ieee80211_recalc_ps(local, -1); 2093 ieee80211_recalc_ps(local, -1);
2094 ieee80211_recalc_ps_vif(sdata);
2082 2095
2083 return 0; 2096 return 0;
2084} 2097}
@@ -2461,6 +2474,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2461 if (!sdata->u.mgd.associated) 2474 if (!sdata->u.mgd.associated)
2462 need_offchan = true; 2475 need_offchan = true;
2463 break; 2476 break;
2477 case NL80211_IFTYPE_P2P_DEVICE:
2478 need_offchan = true;
2479 break;
2464 default: 2480 default:
2465 return -EOPNOTSUPP; 2481 return -EOPNOTSUPP;
2466 } 2482 }
@@ -2653,6 +2669,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2653 u16 status_code, struct sk_buff *skb) 2669 u16 status_code, struct sk_buff *skb)
2654{ 2670{
2655 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2671 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2672 struct ieee80211_local *local = sdata->local;
2656 struct ieee80211_tdls_data *tf; 2673 struct ieee80211_tdls_data *tf;
2657 2674
2658 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); 2675 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2672,8 +2689,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2672 tf->u.setup_req.capability = 2689 tf->u.setup_req.capability =
2673 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2690 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2674 2691
2675 ieee80211_add_srates_ie(sdata, skb, false); 2692 ieee80211_add_srates_ie(sdata, skb, false,
2676 ieee80211_add_ext_srates_ie(sdata, skb, false); 2693 local->oper_channel->band);
2694 ieee80211_add_ext_srates_ie(sdata, skb, false,
2695 local->oper_channel->band);
2677 ieee80211_tdls_add_ext_capab(skb); 2696 ieee80211_tdls_add_ext_capab(skb);
2678 break; 2697 break;
2679 case WLAN_TDLS_SETUP_RESPONSE: 2698 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2686,8 +2705,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2686 tf->u.setup_resp.capability = 2705 tf->u.setup_resp.capability =
2687 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2706 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2688 2707
2689 ieee80211_add_srates_ie(sdata, skb, false); 2708 ieee80211_add_srates_ie(sdata, skb, false,
2690 ieee80211_add_ext_srates_ie(sdata, skb, false); 2709 local->oper_channel->band);
2710 ieee80211_add_ext_srates_ie(sdata, skb, false,
2711 local->oper_channel->band);
2691 ieee80211_tdls_add_ext_capab(skb); 2712 ieee80211_tdls_add_ext_capab(skb);
2692 break; 2713 break;
2693 case WLAN_TDLS_SETUP_CONFIRM: 2714 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2725,6 +2746,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2725 u16 status_code, struct sk_buff *skb) 2746 u16 status_code, struct sk_buff *skb)
2726{ 2747{
2727 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2748 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2749 struct ieee80211_local *local = sdata->local;
2728 struct ieee80211_mgmt *mgmt; 2750 struct ieee80211_mgmt *mgmt;
2729 2751
2730 mgmt = (void *)skb_put(skb, 24); 2752 mgmt = (void *)skb_put(skb, 24);
@@ -2747,8 +2769,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2747 mgmt->u.action.u.tdls_discover_resp.capability = 2769 mgmt->u.action.u.tdls_discover_resp.capability =
2748 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2770 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2749 2771
2750 ieee80211_add_srates_ie(sdata, skb, false); 2772 ieee80211_add_srates_ie(sdata, skb, false,
2751 ieee80211_add_ext_srates_ie(sdata, skb, false); 2773 local->oper_channel->band);
2774 ieee80211_add_ext_srates_ie(sdata, skb, false,
2775 local->oper_channel->band);
2752 ieee80211_tdls_add_ext_capab(skb); 2776 ieee80211_tdls_add_ext_capab(skb);
2753 break; 2777 break;
2754 default: 2778 default:
@@ -3005,6 +3029,8 @@ struct cfg80211_ops mac80211_config_ops = {
3005 .add_virtual_intf = ieee80211_add_iface, 3029 .add_virtual_intf = ieee80211_add_iface,
3006 .del_virtual_intf = ieee80211_del_iface, 3030 .del_virtual_intf = ieee80211_del_iface,
3007 .change_virtual_intf = ieee80211_change_iface, 3031 .change_virtual_intf = ieee80211_change_iface,
3032 .start_p2p_device = ieee80211_start_p2p_device,
3033 .stop_p2p_device = ieee80211_stop_p2p_device,
3008 .add_key = ieee80211_add_key, 3034 .add_key = ieee80211_add_key,
3009 .del_key = ieee80211_del_key, 3035 .del_key = ieee80211_del_key,
3010 .get_key = ieee80211_get_key, 3036 .get_key = ieee80211_get_key,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b8dfb440c8ef..97173f8144d4 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
63 local->user_power_level); 63 local->user_power_level);
64DEBUGFS_READONLY_FILE(power, "%d", 64DEBUGFS_READONLY_FILE(power, "%d",
65 local->hw.conf.power_level); 65 local->hw.conf.power_level);
66DEBUGFS_READONLY_FILE(frequency, "%d",
67 local->hw.conf.channel->center_freq);
68DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", 66DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
69 local->total_ps_buffered); 67 local->total_ps_buffered);
70DEBUGFS_READONLY_FILE(wep_iv, "%#08x", 68DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -91,33 +89,6 @@ static const struct file_operations reset_ops = {
91 .llseek = noop_llseek, 89 .llseek = noop_llseek,
92}; 90};
93 91
94static ssize_t channel_type_read(struct file *file, char __user *user_buf,
95 size_t count, loff_t *ppos)
96{
97 struct ieee80211_local *local = file->private_data;
98 const char *buf;
99
100 switch (local->hw.conf.channel_type) {
101 case NL80211_CHAN_NO_HT:
102 buf = "no ht\n";
103 break;
104 case NL80211_CHAN_HT20:
105 buf = "ht20\n";
106 break;
107 case NL80211_CHAN_HT40MINUS:
108 buf = "ht40-\n";
109 break;
110 case NL80211_CHAN_HT40PLUS:
111 buf = "ht40+\n";
112 break;
113 default:
114 buf = "???";
115 break;
116 }
117
118 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
119}
120
121static ssize_t hwflags_read(struct file *file, char __user *user_buf, 92static ssize_t hwflags_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos) 93 size_t count, loff_t *ppos)
123{ 94{
@@ -205,7 +176,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
205} 176}
206 177
207DEBUGFS_READONLY_FILE_OPS(hwflags); 178DEBUGFS_READONLY_FILE_OPS(hwflags);
208DEBUGFS_READONLY_FILE_OPS(channel_type);
209DEBUGFS_READONLY_FILE_OPS(queues); 179DEBUGFS_READONLY_FILE_OPS(queues);
210 180
211/* statistics stuff */ 181/* statistics stuff */
@@ -272,12 +242,10 @@ void debugfs_hw_add(struct ieee80211_local *local)
272 242
273 local->debugfs.keys = debugfs_create_dir("keys", phyd); 243 local->debugfs.keys = debugfs_create_dir("keys", phyd);
274 244
275 DEBUGFS_ADD(frequency);
276 DEBUGFS_ADD(total_ps_buffered); 245 DEBUGFS_ADD(total_ps_buffered);
277 DEBUGFS_ADD(wep_iv); 246 DEBUGFS_ADD(wep_iv);
278 DEBUGFS_ADD(queues); 247 DEBUGFS_ADD(queues);
279 DEBUGFS_ADD_MODE(reset, 0200); 248 DEBUGFS_ADD_MODE(reset, 0200);
280 DEBUGFS_ADD(channel_type);
281 DEBUGFS_ADD(hwflags); 249 DEBUGFS_ADD(hwflags);
282 DEBUGFS_ADD(user_power); 250 DEBUGFS_ADD(user_power);
283 DEBUGFS_ADD(power); 251 DEBUGFS_ADD(power);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index df9203199102..da9003b20004 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), 10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", 11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev->name, sdata->flags); 12 sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
13} 13}
14 14
15static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
22 return sdata; 22 return sdata;
23} 23}
24 24
25static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb) 25static inline void drv_tx(struct ieee80211_local *local,
26 struct ieee80211_tx_control *control,
27 struct sk_buff *skb)
26{ 28{
27 local->ops->tx(&local->hw, skb); 29 local->ops->tx(&local->hw, control, skb);
28} 30}
29 31
30static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, 32static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
526 sdata = get_bss_sdata(sdata); 528 sdata = get_bss_sdata(sdata);
527 check_sdata_in_driver(sdata); 529 check_sdata_in_driver(sdata);
528 530
531 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
532 sdata->vif.type != NL80211_IFTYPE_ADHOC);
533
529 trace_drv_sta_rc_update(local, sdata, sta, changed); 534 trace_drv_sta_rc_update(local, sdata, sta, changed);
530 if (local->ops->sta_rc_update) 535 if (local->ops->sta_rc_update)
531 local->ops->sta_rc_update(&local->hw, &sdata->vif, 536 local->ops->sta_rc_update(&local->hw, &sdata->vif,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 5746d62faba1..a9d93285dba7 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
109 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 109 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
110 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 110 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
111 IEEE80211_STYPE_PROBE_RESP); 111 IEEE80211_STYPE_PROBE_RESP);
112 memset(mgmt->da, 0xff, ETH_ALEN); 112 eth_broadcast_addr(mgmt->da);
113 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 113 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
114 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 114 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
115 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 115 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
205 mod_timer(&ifibss->timer, 205 mod_timer(&ifibss->timer,
206 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); 206 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
207 207
208 bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, 208 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
209 mgmt, skb->len, 0, GFP_KERNEL); 209 mgmt, skb->len, 0, GFP_KERNEL);
210 cfg80211_put_bss(bss); 210 cfg80211_put_bss(bss);
211 netif_carrier_on(sdata->dev); 211 netif_carrier_on(sdata->dev);
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
294 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 294 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
295 struct ieee80211_local *local = sdata->local; 295 struct ieee80211_local *local = sdata->local;
296 struct sta_info *sta; 296 struct sta_info *sta;
297 int band = local->hw.conf.channel->band; 297 int band = local->oper_channel->band;
298 298
299 /* 299 /*
300 * XXX: Consider removing the least recently used entry and 300 * XXX: Consider removing the least recently used entry and
@@ -459,8 +459,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
459 } 459 }
460 } 460 }
461 461
462 if (sta && rates_updated) 462 if (sta && rates_updated) {
463 drv_sta_rc_update(local, sdata, &sta->sta,
464 IEEE80211_RC_SUPP_RATES_CHANGED);
463 rate_control_rate_init(sta); 465 rate_control_rate_init(sta);
466 }
464 467
465 rcu_read_unlock(); 468 rcu_read_unlock();
466 } 469 }
@@ -561,7 +564,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
561 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 564 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
562 struct ieee80211_local *local = sdata->local; 565 struct ieee80211_local *local = sdata->local;
563 struct sta_info *sta; 566 struct sta_info *sta;
564 int band = local->hw.conf.channel->band; 567 int band = local->oper_channel->band;
565 568
566 /* 569 /*
567 * XXX: Consider removing the least recently used entry and 570 * XXX: Consider removing the least recently used entry and
@@ -759,7 +762,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
759 return; 762 return;
760 } 763 }
761 sdata_info(sdata, "IBSS not allowed on %d MHz\n", 764 sdata_info(sdata, "IBSS not allowed on %d MHz\n",
762 local->hw.conf.channel->center_freq); 765 local->oper_channel->center_freq);
763 766
764 /* No IBSS found - decrease scan interval and continue 767 /* No IBSS found - decrease scan interval and continue
765 * scanning. */ 768 * scanning. */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bb61f7718c4c..204bfedba306 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -193,8 +193,6 @@ struct ieee80211_tx_data {
193 struct sta_info *sta; 193 struct sta_info *sta;
194 struct ieee80211_key *key; 194 struct ieee80211_key *key;
195 195
196 struct ieee80211_channel *channel;
197
198 unsigned int flags; 196 unsigned int flags;
199}; 197};
200 198
@@ -274,9 +272,15 @@ struct beacon_data {
274 struct rcu_head rcu_head; 272 struct rcu_head rcu_head;
275}; 273};
276 274
275struct probe_resp {
276 struct rcu_head rcu_head;
277 int len;
278 u8 data[0];
279};
280
277struct ieee80211_if_ap { 281struct ieee80211_if_ap {
278 struct beacon_data __rcu *beacon; 282 struct beacon_data __rcu *beacon;
279 struct sk_buff __rcu *probe_resp; 283 struct probe_resp __rcu *probe_resp;
280 284
281 struct list_head vlans; 285 struct list_head vlans;
282 286
@@ -359,6 +363,7 @@ enum ieee80211_sta_flags {
359 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 363 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
360 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 364 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
361 IEEE80211_STA_DISABLE_40MHZ = BIT(10), 365 IEEE80211_STA_DISABLE_40MHZ = BIT(10),
366 IEEE80211_STA_DISABLE_VHT = BIT(11),
362}; 367};
363 368
364struct ieee80211_mgd_auth_data { 369struct ieee80211_mgd_auth_data {
@@ -1075,6 +1080,8 @@ struct ieee80211_local {
1075 struct idr ack_status_frames; 1080 struct idr ack_status_frames;
1076 spinlock_t ack_status_lock; 1081 spinlock_t ack_status_lock;
1077 1082
1083 struct ieee80211_sub_if_data __rcu *p2p_sdata;
1084
1078 /* dummy netdev for use w/ NAPI */ 1085 /* dummy netdev for use w/ NAPI */
1079 struct net_device napi_dev; 1086 struct net_device napi_dev;
1080 1087
@@ -1131,7 +1138,7 @@ struct ieee802_11_elems {
1131 u8 *prep; 1138 u8 *prep;
1132 u8 *perr; 1139 u8 *perr;
1133 struct ieee80211_rann_ie *rann; 1140 struct ieee80211_rann_ie *rann;
1134 u8 *ch_switch_elem; 1141 struct ieee80211_channel_sw_ie *ch_switch_ie;
1135 u8 *country_elem; 1142 u8 *country_elem;
1136 u8 *pwr_constr_elem; 1143 u8 *pwr_constr_elem;
1137 u8 *quiet_elem; /* first quite element */ 1144 u8 *quiet_elem; /* first quite element */
@@ -1157,7 +1164,6 @@ struct ieee802_11_elems {
1157 u8 preq_len; 1164 u8 preq_len;
1158 u8 prep_len; 1165 u8 prep_len;
1159 u8 perr_len; 1166 u8 perr_len;
1160 u8 ch_switch_elem_len;
1161 u8 country_elem_len; 1167 u8 country_elem_len;
1162 u8 pwr_constr_elem_len; 1168 u8 pwr_constr_elem_len;
1163 u8 quiet_elem_len; 1169 u8 quiet_elem_len;
@@ -1202,6 +1208,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
1202void ieee80211_send_pspoll(struct ieee80211_local *local, 1208void ieee80211_send_pspoll(struct ieee80211_local *local,
1203 struct ieee80211_sub_if_data *sdata); 1209 struct ieee80211_sub_if_data *sdata);
1204void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 1210void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
1211void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
1205int ieee80211_max_network_latency(struct notifier_block *nb, 1212int ieee80211_max_network_latency(struct notifier_block *nb,
1206 unsigned long data, void *dummy); 1213 unsigned long data, void *dummy);
1207int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); 1214int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1298,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
1291void ieee80211_recalc_idle(struct ieee80211_local *local); 1298void ieee80211_recalc_idle(struct ieee80211_local *local);
1292void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 1299void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
1293 const int offset); 1300 const int offset);
1301int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
1302void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
1294 1303
1295static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) 1304static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1296{ 1305{
@@ -1425,7 +1434,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1425 struct ieee80211_hdr *hdr); 1434 struct ieee80211_hdr *hdr);
1426void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, 1435void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1427 struct ieee80211_hdr *hdr, bool ack); 1436 struct ieee80211_hdr *hdr, bool ack);
1428void ieee80211_beacon_connection_loss_work(struct work_struct *work);
1429 1437
1430void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1438void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1431 enum queue_stop_reason reason); 1439 enum queue_stop_reason reason);
@@ -1457,13 +1465,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1457 u8 channel); 1465 u8 channel);
1458struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1466struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1459 u8 *dst, u32 ratemask, 1467 u8 *dst, u32 ratemask,
1468 struct ieee80211_channel *chan,
1460 const u8 *ssid, size_t ssid_len, 1469 const u8 *ssid, size_t ssid_len,
1461 const u8 *ie, size_t ie_len, 1470 const u8 *ie, size_t ie_len,
1462 bool directed); 1471 bool directed);
1463void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1472void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1464 const u8 *ssid, size_t ssid_len, 1473 const u8 *ssid, size_t ssid_len,
1465 const u8 *ie, size_t ie_len, 1474 const u8 *ie, size_t ie_len,
1466 u32 ratemask, bool directed, bool no_cck); 1475 u32 ratemask, bool directed, bool no_cck,
1476 struct ieee80211_channel *channel);
1467 1477
1468void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1478void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1469 const size_t supp_rates_len, 1479 const size_t supp_rates_len,
@@ -1487,9 +1497,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1487u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, 1497u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1488 u32 cap); 1498 u32 cap);
1489int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, 1499int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1490 struct sk_buff *skb, bool need_basic); 1500 struct sk_buff *skb, bool need_basic,
1501 enum ieee80211_band band);
1491int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, 1502int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1492 struct sk_buff *skb, bool need_basic); 1503 struct sk_buff *skb, bool need_basic,
1504 enum ieee80211_band band);
1493 1505
1494/* channel management */ 1506/* channel management */
1495enum ieee80211_chan_mode { 1507enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bfb57dcc1538..59f8adc2aa5f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
100 sdata->vif.bss_conf.idle = true; 100 sdata->vif.bss_conf.idle = true;
101 continue; 101 continue;
102 } 102 }
103
104 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
105 continue;
106
103 /* count everything else */ 107 /* count everything else */
104 sdata->vif.bss_conf.idle = false; 108 sdata->vif.bss_conf.idle = false;
105 count++; 109 count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
121 125
122 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
123 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 127 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
124 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 128 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
129 sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
125 continue; 130 continue;
126 if (sdata->old_idle == sdata->vif.bss_conf.idle) 131 if (sdata->old_idle == sdata->vif.bss_conf.idle)
127 continue; 132 continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
204{ 209{
205 return type1 == NL80211_IFTYPE_MONITOR || 210 return type1 == NL80211_IFTYPE_MONITOR ||
206 type2 == NL80211_IFTYPE_MONITOR || 211 type2 == NL80211_IFTYPE_MONITOR ||
212 type1 == NL80211_IFTYPE_P2P_DEVICE ||
213 type2 == NL80211_IFTYPE_P2P_DEVICE ||
207 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || 214 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
208 (type1 == NL80211_IFTYPE_WDS && 215 (type1 == NL80211_IFTYPE_WDS &&
209 (type2 == NL80211_IFTYPE_WDS || 216 (type2 == NL80211_IFTYPE_WDS ||
@@ -406,9 +413,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
406 * an error on interface type changes that have been pre-checked, so most 413 * an error on interface type changes that have been pre-checked, so most
407 * checks should be in ieee80211_check_concurrent_iface. 414 * checks should be in ieee80211_check_concurrent_iface.
408 */ 415 */
409static int ieee80211_do_open(struct net_device *dev, bool coming_up) 416int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
410{ 417{
411 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 418 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
419 struct net_device *dev = wdev->netdev;
412 struct ieee80211_local *local = sdata->local; 420 struct ieee80211_local *local = sdata->local;
413 struct sta_info *sta; 421 struct sta_info *sta;
414 u32 changed = 0; 422 u32 changed = 0;
@@ -443,6 +451,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
443 case NL80211_IFTYPE_STATION: 451 case NL80211_IFTYPE_STATION:
444 case NL80211_IFTYPE_MONITOR: 452 case NL80211_IFTYPE_MONITOR:
445 case NL80211_IFTYPE_ADHOC: 453 case NL80211_IFTYPE_ADHOC:
454 case NL80211_IFTYPE_P2P_DEVICE:
446 /* no special treatment */ 455 /* no special treatment */
447 break; 456 break;
448 case NL80211_IFTYPE_UNSPECIFIED: 457 case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +480,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
471 * Copy the hopefully now-present MAC address to 480 * Copy the hopefully now-present MAC address to
472 * this interface, if it has the special null one. 481 * this interface, if it has the special null one.
473 */ 482 */
474 if (is_zero_ether_addr(dev->dev_addr)) { 483 if (dev && is_zero_ether_addr(dev->dev_addr)) {
475 memcpy(dev->dev_addr, 484 memcpy(dev->dev_addr,
476 local->hw.wiphy->perm_addr, 485 local->hw.wiphy->perm_addr,
477 ETH_ALEN); 486 ETH_ALEN);
@@ -536,15 +545,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
536 local->fif_probe_req++; 545 local->fif_probe_req++;
537 } 546 }
538 547
539 changed |= ieee80211_reset_erp_info(sdata); 548 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
549 changed |= ieee80211_reset_erp_info(sdata);
540 ieee80211_bss_info_change_notify(sdata, changed); 550 ieee80211_bss_info_change_notify(sdata, changed);
541 551
542 if (sdata->vif.type == NL80211_IFTYPE_STATION || 552 switch (sdata->vif.type) {
543 sdata->vif.type == NL80211_IFTYPE_ADHOC || 553 case NL80211_IFTYPE_STATION:
544 sdata->vif.type == NL80211_IFTYPE_AP) 554 case NL80211_IFTYPE_ADHOC:
555 case NL80211_IFTYPE_AP:
556 case NL80211_IFTYPE_MESH_POINT:
545 netif_carrier_off(dev); 557 netif_carrier_off(dev);
546 else 558 break;
559 case NL80211_IFTYPE_WDS:
560 case NL80211_IFTYPE_P2P_DEVICE:
561 break;
562 default:
547 netif_carrier_on(dev); 563 netif_carrier_on(dev);
564 }
548 565
549 /* 566 /*
550 * set default queue parameters so drivers don't 567 * set default queue parameters so drivers don't
@@ -576,6 +593,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
576 } 593 }
577 594
578 rate_control_rate_init(sta); 595 rate_control_rate_init(sta);
596 netif_carrier_on(dev);
597 } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
598 rcu_assign_pointer(local->p2p_sdata, sdata);
579 } 599 }
580 600
581 /* 601 /*
@@ -601,7 +621,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
601 621
602 ieee80211_recalc_ps(local, -1); 622 ieee80211_recalc_ps(local, -1);
603 623
604 netif_tx_start_all_queues(dev); 624 if (dev)
625 netif_tx_start_all_queues(dev);
605 626
606 return 0; 627 return 0;
607 err_del_interface: 628 err_del_interface:
@@ -631,7 +652,7 @@ static int ieee80211_open(struct net_device *dev)
631 if (err) 652 if (err)
632 return err; 653 return err;
633 654
634 return ieee80211_do_open(dev, true); 655 return ieee80211_do_open(&sdata->wdev, true);
635} 656}
636 657
637static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, 658static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +673,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
652 /* 673 /*
653 * Stop TX on this interface first. 674 * Stop TX on this interface first.
654 */ 675 */
655 netif_tx_stop_all_queues(sdata->dev); 676 if (sdata->dev)
677 netif_tx_stop_all_queues(sdata->dev);
656 678
657 ieee80211_roc_purge(sdata); 679 ieee80211_roc_purge(sdata);
658 680
@@ -691,14 +713,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
691 local->fif_probe_req--; 713 local->fif_probe_req--;
692 } 714 }
693 715
694 netif_addr_lock_bh(sdata->dev); 716 if (sdata->dev) {
695 spin_lock_bh(&local->filter_lock); 717 netif_addr_lock_bh(sdata->dev);
696 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, 718 spin_lock_bh(&local->filter_lock);
697 sdata->dev->addr_len); 719 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
698 spin_unlock_bh(&local->filter_lock); 720 sdata->dev->addr_len);
699 netif_addr_unlock_bh(sdata->dev); 721 spin_unlock_bh(&local->filter_lock);
722 netif_addr_unlock_bh(sdata->dev);
700 723
701 ieee80211_configure_filter(local); 724 ieee80211_configure_filter(local);
725 }
702 726
703 del_timer_sync(&local->dynamic_ps_timer); 727 del_timer_sync(&local->dynamic_ps_timer);
704 cancel_work_sync(&local->dynamic_ps_enable_work); 728 cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +732,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
708 struct ieee80211_sub_if_data *vlan, *tmpsdata; 732 struct ieee80211_sub_if_data *vlan, *tmpsdata;
709 struct beacon_data *old_beacon = 733 struct beacon_data *old_beacon =
710 rtnl_dereference(sdata->u.ap.beacon); 734 rtnl_dereference(sdata->u.ap.beacon);
711 struct sk_buff *old_probe_resp = 735 struct probe_resp *old_probe_resp =
712 rtnl_dereference(sdata->u.ap.probe_resp); 736 rtnl_dereference(sdata->u.ap.probe_resp);
713 737
714 /* sdata_running will return false, so this will disable */ 738 /* sdata_running will return false, so this will disable */
@@ -720,7 +744,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
720 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); 744 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
721 synchronize_rcu(); 745 synchronize_rcu();
722 kfree(old_beacon); 746 kfree(old_beacon);
723 kfree_skb(old_probe_resp); 747 kfree(old_probe_resp);
724 748
725 /* down all dependent devices, that is VLANs */ 749 /* down all dependent devices, that is VLANs */
726 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 750 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,6 +783,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
759 ieee80211_adjust_monitor_flags(sdata, -1); 783 ieee80211_adjust_monitor_flags(sdata, -1);
760 ieee80211_configure_filter(local); 784 ieee80211_configure_filter(local);
761 break; 785 break;
786 case NL80211_IFTYPE_P2P_DEVICE:
787 /* relies on synchronize_rcu() below */
788 rcu_assign_pointer(local->p2p_sdata, NULL);
789 /* fall through */
762 default: 790 default:
763 flush_work(&sdata->work); 791 flush_work(&sdata->work);
764 /* 792 /*
@@ -771,14 +799,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
771 skb_queue_purge(&sdata->skb_queue); 799 skb_queue_purge(&sdata->skb_queue);
772 800
773 /* 801 /*
774 * Disable beaconing here for mesh only, AP and IBSS
775 * are already taken care of.
776 */
777 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
778 ieee80211_bss_info_change_notify(sdata,
779 BSS_CHANGED_BEACON_ENABLED);
780
781 /*
782 * Free all remaining keys, there shouldn't be any, 802 * Free all remaining keys, there shouldn't be any,
783 * except maybe group keys in AP more or WDS? 803 * except maybe group keys in AP more or WDS?
784 */ 804 */
@@ -877,9 +897,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
877 * Called when the netdev is removed or, by the code below, before 897 * Called when the netdev is removed or, by the code below, before
878 * the interface type changes. 898 * the interface type changes.
879 */ 899 */
880static void ieee80211_teardown_sdata(struct net_device *dev) 900static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
881{ 901{
882 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
883 struct ieee80211_local *local = sdata->local; 902 struct ieee80211_local *local = sdata->local;
884 int flushed; 903 int flushed;
885 int i; 904 int i;
@@ -900,6 +919,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
900 WARN_ON(flushed); 919 WARN_ON(flushed);
901} 920}
902 921
922static void ieee80211_uninit(struct net_device *dev)
923{
924 ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
925}
926
903static u16 ieee80211_netdev_select_queue(struct net_device *dev, 927static u16 ieee80211_netdev_select_queue(struct net_device *dev,
904 struct sk_buff *skb) 928 struct sk_buff *skb)
905{ 929{
@@ -909,7 +933,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
909static const struct net_device_ops ieee80211_dataif_ops = { 933static const struct net_device_ops ieee80211_dataif_ops = {
910 .ndo_open = ieee80211_open, 934 .ndo_open = ieee80211_open,
911 .ndo_stop = ieee80211_stop, 935 .ndo_stop = ieee80211_stop,
912 .ndo_uninit = ieee80211_teardown_sdata, 936 .ndo_uninit = ieee80211_uninit,
913 .ndo_start_xmit = ieee80211_subif_start_xmit, 937 .ndo_start_xmit = ieee80211_subif_start_xmit,
914 .ndo_set_rx_mode = ieee80211_set_multicast_list, 938 .ndo_set_rx_mode = ieee80211_set_multicast_list,
915 .ndo_change_mtu = ieee80211_change_mtu, 939 .ndo_change_mtu = ieee80211_change_mtu,
@@ -940,7 +964,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
940static const struct net_device_ops ieee80211_monitorif_ops = { 964static const struct net_device_ops ieee80211_monitorif_ops = {
941 .ndo_open = ieee80211_open, 965 .ndo_open = ieee80211_open,
942 .ndo_stop = ieee80211_stop, 966 .ndo_stop = ieee80211_stop,
943 .ndo_uninit = ieee80211_teardown_sdata, 967 .ndo_uninit = ieee80211_uninit,
944 .ndo_start_xmit = ieee80211_monitor_start_xmit, 968 .ndo_start_xmit = ieee80211_monitor_start_xmit,
945 .ndo_set_rx_mode = ieee80211_set_multicast_list, 969 .ndo_set_rx_mode = ieee80211_set_multicast_list,
946 .ndo_change_mtu = ieee80211_change_mtu, 970 .ndo_change_mtu = ieee80211_change_mtu,
@@ -1099,7 +1123,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1099 /* and set some type-dependent values */ 1123 /* and set some type-dependent values */
1100 sdata->vif.type = type; 1124 sdata->vif.type = type;
1101 sdata->vif.p2p = false; 1125 sdata->vif.p2p = false;
1102 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
1103 sdata->wdev.iftype = type; 1126 sdata->wdev.iftype = type;
1104 1127
1105 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); 1128 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1130,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1107 1130
1108 sdata->noack_map = 0; 1131 sdata->noack_map = 0;
1109 1132
1110 /* only monitor differs */ 1133 /* only monitor/p2p-device differ */
1111 sdata->dev->type = ARPHRD_ETHER; 1134 if (sdata->dev) {
1135 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
1136 sdata->dev->type = ARPHRD_ETHER;
1137 }
1112 1138
1113 skb_queue_head_init(&sdata->skb_queue); 1139 skb_queue_head_init(&sdata->skb_queue);
1114 INIT_WORK(&sdata->work, ieee80211_iface_work); 1140 INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1172,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1146 break; 1172 break;
1147 case NL80211_IFTYPE_WDS: 1173 case NL80211_IFTYPE_WDS:
1148 case NL80211_IFTYPE_AP_VLAN: 1174 case NL80211_IFTYPE_AP_VLAN:
1175 case NL80211_IFTYPE_P2P_DEVICE:
1149 break; 1176 break;
1150 case NL80211_IFTYPE_UNSPECIFIED: 1177 case NL80211_IFTYPE_UNSPECIFIED:
1151 case NUM_NL80211_IFTYPES: 1178 case NUM_NL80211_IFTYPES:
@@ -1156,18 +1183,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1156 ieee80211_debugfs_add_netdev(sdata); 1183 ieee80211_debugfs_add_netdev(sdata);
1157} 1184}
1158 1185
1159static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
1160{
1161 switch (sdata->vif.type) {
1162 case NL80211_IFTYPE_MESH_POINT:
1163 mesh_path_flush_by_iface(sdata);
1164 break;
1165
1166 default:
1167 break;
1168 }
1169}
1170
1171static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, 1186static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1172 enum nl80211_iftype type) 1187 enum nl80211_iftype type)
1173{ 1188{
@@ -1225,7 +1240,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1225 1240
1226 ieee80211_do_stop(sdata, false); 1241 ieee80211_do_stop(sdata, false);
1227 1242
1228 ieee80211_teardown_sdata(sdata->dev); 1243 ieee80211_teardown_sdata(sdata);
1229 1244
1230 ret = drv_change_interface(local, sdata, internal_type, p2p); 1245 ret = drv_change_interface(local, sdata, internal_type, p2p);
1231 if (ret) 1246 if (ret)
@@ -1240,7 +1255,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
1240 1255
1241 ieee80211_setup_sdata(sdata, type); 1256 ieee80211_setup_sdata(sdata, type);
1242 1257
1243 err = ieee80211_do_open(sdata->dev, false); 1258 err = ieee80211_do_open(&sdata->wdev, false);
1244 WARN(err, "type change: do_open returned %d", err); 1259 WARN(err, "type change: do_open returned %d", err);
1245 1260
1246 return ret; 1261 return ret;
@@ -1267,14 +1282,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1267 return ret; 1282 return ret;
1268 } else { 1283 } else {
1269 /* Purge and reset type-dependent state. */ 1284 /* Purge and reset type-dependent state. */
1270 ieee80211_teardown_sdata(sdata->dev); 1285 ieee80211_teardown_sdata(sdata);
1271 ieee80211_setup_sdata(sdata, type); 1286 ieee80211_setup_sdata(sdata, type);
1272 } 1287 }
1273 1288
1274 /* reset some values that shouldn't be kept across type changes */ 1289 /* reset some values that shouldn't be kept across type changes */
1275 sdata->vif.bss_conf.basic_rates = 1290 sdata->vif.bss_conf.basic_rates =
1276 ieee80211_mandatory_rates(sdata->local, 1291 ieee80211_mandatory_rates(sdata->local,
1277 sdata->local->hw.conf.channel->band); 1292 sdata->local->oper_channel->band);
1278 sdata->drop_unencrypted = 0; 1293 sdata->drop_unencrypted = 0;
1279 if (type == NL80211_IFTYPE_STATION) 1294 if (type == NL80211_IFTYPE_STATION)
1280 sdata->u.mgd.use_4addr = false; 1295 sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1298,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1283} 1298}
1284 1299
1285static void ieee80211_assign_perm_addr(struct ieee80211_local *local, 1300static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1286 struct net_device *dev, 1301 u8 *perm_addr, enum nl80211_iftype type)
1287 enum nl80211_iftype type)
1288{ 1302{
1289 struct ieee80211_sub_if_data *sdata; 1303 struct ieee80211_sub_if_data *sdata;
1290 u64 mask, start, addr, val, inc; 1304 u64 mask, start, addr, val, inc;
@@ -1293,13 +1307,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1293 int i; 1307 int i;
1294 1308
1295 /* default ... something at least */ 1309 /* default ... something at least */
1296 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 1310 memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
1297 1311
1298 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && 1312 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
1299 local->hw.wiphy->n_addresses <= 1) 1313 local->hw.wiphy->n_addresses <= 1)
1300 return; 1314 return;
1301 1315
1302
1303 mutex_lock(&local->iflist_mtx); 1316 mutex_lock(&local->iflist_mtx);
1304 1317
1305 switch (type) { 1318 switch (type) {
@@ -1312,11 +1325,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1312 list_for_each_entry(sdata, &local->interfaces, list) { 1325 list_for_each_entry(sdata, &local->interfaces, list) {
1313 if (sdata->vif.type != NL80211_IFTYPE_AP) 1326 if (sdata->vif.type != NL80211_IFTYPE_AP)
1314 continue; 1327 continue;
1315 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); 1328 memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
1316 break; 1329 break;
1317 } 1330 }
1318 /* keep default if no AP interface present */ 1331 /* keep default if no AP interface present */
1319 break; 1332 break;
1333 case NL80211_IFTYPE_P2P_CLIENT:
1334 case NL80211_IFTYPE_P2P_GO:
1335 if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
1336 list_for_each_entry(sdata, &local->interfaces, list) {
1337 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
1338 continue;
1339 if (!ieee80211_sdata_running(sdata))
1340 continue;
1341 memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
1342 goto out_unlock;
1343 }
1344 }
1345 /* otherwise fall through */
1320 default: 1346 default:
1321 /* assign a new address if possible -- try n_addresses first */ 1347 /* assign a new address if possible -- try n_addresses first */
1322 for (i = 0; i < local->hw.wiphy->n_addresses; i++) { 1348 for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1357,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1331 } 1357 }
1332 1358
1333 if (!used) { 1359 if (!used) {
1334 memcpy(dev->perm_addr, 1360 memcpy(perm_addr,
1335 local->hw.wiphy->addresses[i].addr, 1361 local->hw.wiphy->addresses[i].addr,
1336 ETH_ALEN); 1362 ETH_ALEN);
1337 break; 1363 break;
@@ -1382,7 +1408,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1382 } 1408 }
1383 1409
1384 if (!used) { 1410 if (!used) {
1385 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); 1411 memcpy(perm_addr, tmp_addr, ETH_ALEN);
1386 break; 1412 break;
1387 } 1413 }
1388 addr = (start & ~mask) | (val & mask); 1414 addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1417,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1391 break; 1417 break;
1392 } 1418 }
1393 1419
1420 out_unlock:
1394 mutex_unlock(&local->iflist_mtx); 1421 mutex_unlock(&local->iflist_mtx);
1395} 1422}
1396 1423
@@ -1398,49 +1425,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1398 struct wireless_dev **new_wdev, enum nl80211_iftype type, 1425 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1399 struct vif_params *params) 1426 struct vif_params *params)
1400{ 1427{
1401 struct net_device *ndev; 1428 struct net_device *ndev = NULL;
1402 struct ieee80211_sub_if_data *sdata = NULL; 1429 struct ieee80211_sub_if_data *sdata = NULL;
1403 int ret, i; 1430 int ret, i;
1404 int txqs = 1; 1431 int txqs = 1;
1405 1432
1406 ASSERT_RTNL(); 1433 ASSERT_RTNL();
1407 1434
1408 if (local->hw.queues >= IEEE80211_NUM_ACS) 1435 if (type == NL80211_IFTYPE_P2P_DEVICE) {
1409 txqs = IEEE80211_NUM_ACS; 1436 struct wireless_dev *wdev;
1410 1437
1411 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, 1438 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
1412 name, ieee80211_if_setup, txqs, 1); 1439 GFP_KERNEL);
1413 if (!ndev) 1440 if (!sdata)
1414 return -ENOMEM; 1441 return -ENOMEM;
1415 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1442 wdev = &sdata->wdev;
1416 1443
1417 ndev->needed_headroom = local->tx_headroom + 1444 sdata->dev = NULL;
1418 4*6 /* four MAC addresses */ 1445 strlcpy(sdata->name, name, IFNAMSIZ);
1419 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ 1446 ieee80211_assign_perm_addr(local, wdev->address, type);
1420 + 6 /* mesh */ 1447 memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
1421 + 8 /* rfc1042/bridge tunnel */ 1448 } else {
1422 - ETH_HLEN /* ethernet hard_header_len */ 1449 if (local->hw.queues >= IEEE80211_NUM_ACS)
1423 + IEEE80211_ENCRYPT_HEADROOM; 1450 txqs = IEEE80211_NUM_ACS;
1424 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1451
1425 1452 ndev = alloc_netdev_mqs(sizeof(*sdata) +
1426 ret = dev_alloc_name(ndev, ndev->name); 1453 local->hw.vif_data_size,
1427 if (ret < 0) 1454 name, ieee80211_if_setup, txqs, 1);
1428 goto fail; 1455 if (!ndev)
1429 1456 return -ENOMEM;
1430 ieee80211_assign_perm_addr(local, ndev, type); 1457 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
1431 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1458
1432 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1459 ndev->needed_headroom = local->tx_headroom +
1433 1460 4*6 /* four MAC addresses */
1434 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 1461 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
1435 sdata = netdev_priv(ndev); 1462 + 6 /* mesh */
1436 ndev->ieee80211_ptr = &sdata->wdev; 1463 + 8 /* rfc1042/bridge tunnel */
1437 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); 1464 - ETH_HLEN /* ethernet hard_header_len */
1438 memcpy(sdata->name, ndev->name, IFNAMSIZ); 1465 + IEEE80211_ENCRYPT_HEADROOM;
1466 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1467
1468 ret = dev_alloc_name(ndev, ndev->name);
1469 if (ret < 0) {
1470 free_netdev(ndev);
1471 return ret;
1472 }
1473
1474 ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
1475 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1476 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
1477
1478 /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
1479 sdata = netdev_priv(ndev);
1480 ndev->ieee80211_ptr = &sdata->wdev;
1481 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
1482 memcpy(sdata->name, ndev->name, IFNAMSIZ);
1483
1484 sdata->dev = ndev;
1485 }
1439 1486
1440 /* initialise type-independent data */ 1487 /* initialise type-independent data */
1441 sdata->wdev.wiphy = local->hw.wiphy; 1488 sdata->wdev.wiphy = local->hw.wiphy;
1442 sdata->local = local; 1489 sdata->local = local;
1443 sdata->dev = ndev;
1444#ifdef CONFIG_INET 1490#ifdef CONFIG_INET
1445 sdata->arp_filter_state = true; 1491 sdata->arp_filter_state = true;
1446#endif 1492#endif
@@ -1469,17 +1515,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1469 /* setup type-dependent data */ 1515 /* setup type-dependent data */
1470 ieee80211_setup_sdata(sdata, type); 1516 ieee80211_setup_sdata(sdata, type);
1471 1517
1472 if (params) { 1518 if (ndev) {
1473 ndev->ieee80211_ptr->use_4addr = params->use_4addr; 1519 if (params) {
1474 if (type == NL80211_IFTYPE_STATION) 1520 ndev->ieee80211_ptr->use_4addr = params->use_4addr;
1475 sdata->u.mgd.use_4addr = params->use_4addr; 1521 if (type == NL80211_IFTYPE_STATION)
1476 } 1522 sdata->u.mgd.use_4addr = params->use_4addr;
1523 }
1477 1524
1478 ndev->features |= local->hw.netdev_features; 1525 ndev->features |= local->hw.netdev_features;
1479 1526
1480 ret = register_netdevice(ndev); 1527 ret = register_netdevice(ndev);
1481 if (ret) 1528 if (ret) {
1482 goto fail; 1529 free_netdev(ndev);
1530 return ret;
1531 }
1532 }
1483 1533
1484 mutex_lock(&local->iflist_mtx); 1534 mutex_lock(&local->iflist_mtx);
1485 list_add_tail_rcu(&sdata->list, &local->interfaces); 1535 list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1539,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1489 *new_wdev = &sdata->wdev; 1539 *new_wdev = &sdata->wdev;
1490 1540
1491 return 0; 1541 return 0;
1492
1493 fail:
1494 free_netdev(ndev);
1495 return ret;
1496} 1542}
1497 1543
1498void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) 1544void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1549,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1503 list_del_rcu(&sdata->list); 1549 list_del_rcu(&sdata->list);
1504 mutex_unlock(&sdata->local->iflist_mtx); 1550 mutex_unlock(&sdata->local->iflist_mtx);
1505 1551
1506 /* clean up type-dependent data */
1507 ieee80211_clean_sdata(sdata);
1508
1509 synchronize_rcu(); 1552 synchronize_rcu();
1510 unregister_netdevice(sdata->dev); 1553
1554 if (sdata->dev) {
1555 unregister_netdevice(sdata->dev);
1556 } else {
1557 cfg80211_unregister_wdev(&sdata->wdev);
1558 kfree(sdata);
1559 }
1560}
1561
1562void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1563{
1564 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
1565 return;
1566 ieee80211_do_stop(sdata, true);
1567 ieee80211_teardown_sdata(sdata);
1511} 1568}
1512 1569
1513/* 1570/*
@@ -1518,6 +1575,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1518{ 1575{
1519 struct ieee80211_sub_if_data *sdata, *tmp; 1576 struct ieee80211_sub_if_data *sdata, *tmp;
1520 LIST_HEAD(unreg_list); 1577 LIST_HEAD(unreg_list);
1578 LIST_HEAD(wdev_list);
1521 1579
1522 ASSERT_RTNL(); 1580 ASSERT_RTNL();
1523 1581
@@ -1525,13 +1583,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1525 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1583 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1526 list_del(&sdata->list); 1584 list_del(&sdata->list);
1527 1585
1528 ieee80211_clean_sdata(sdata); 1586 if (sdata->dev)
1529 1587 unregister_netdevice_queue(sdata->dev, &unreg_list);
1530 unregister_netdevice_queue(sdata->dev, &unreg_list); 1588 else
1589 list_add(&sdata->list, &wdev_list);
1531 } 1590 }
1532 mutex_unlock(&local->iflist_mtx); 1591 mutex_unlock(&local->iflist_mtx);
1533 unregister_netdevice_many(&unreg_list); 1592 unregister_netdevice_many(&unreg_list);
1534 list_del(&unreg_list); 1593 list_del(&unreg_list);
1594
1595 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1596 list_del(&sdata->list);
1597 cfg80211_unregister_wdev(&sdata->wdev);
1598 kfree(sdata);
1599 }
1535} 1600}
1536 1601
1537static int netdev_notify(struct notifier_block *nb, 1602static int netdev_notify(struct notifier_block *nb,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c26e231c733a..bd7529363193 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -207,6 +207,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
207 sdata->vif.bss_conf.bssid = NULL; 207 sdata->vif.bss_conf.bssid = NULL;
208 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 208 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
209 sdata->vif.bss_conf.bssid = zero; 209 sdata->vif.bss_conf.bssid = zero;
210 } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
211 sdata->vif.bss_conf.bssid = sdata->vif.addr;
212 WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
213 "P2P Device BSS changed %#x", changed);
210 } else { 214 } else {
211 WARN_ON(1); 215 WARN_ON(1);
212 return; 216 return;
@@ -514,6 +518,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
514 BIT(IEEE80211_STYPE_AUTH >> 4) | 518 BIT(IEEE80211_STYPE_AUTH >> 4) |
515 BIT(IEEE80211_STYPE_DEAUTH >> 4), 519 BIT(IEEE80211_STYPE_DEAUTH >> 4),
516 }, 520 },
521 [NL80211_IFTYPE_P2P_DEVICE] = {
522 .tx = 0xffff,
523 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
524 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
525 },
517}; 526};
518 527
519static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { 528static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +545,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
536 int priv_size, i; 545 int priv_size, i;
537 struct wiphy *wiphy; 546 struct wiphy *wiphy;
538 547
548 if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
549 !ops->add_interface || !ops->remove_interface ||
550 !ops->configure_filter))
551 return NULL;
552
539 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) 553 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
540 return NULL; 554 return NULL;
541 555
@@ -588,13 +602,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
588 602
589 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); 603 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
590 604
591 BUG_ON(!ops->tx);
592 BUG_ON(!ops->start);
593 BUG_ON(!ops->stop);
594 BUG_ON(!ops->config);
595 BUG_ON(!ops->add_interface);
596 BUG_ON(!ops->remove_interface);
597 BUG_ON(!ops->configure_filter);
598 local->ops = ops; 605 local->ops = ops;
599 606
600 /* set up some defaults */ 607 /* set up some defaults */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0e2f83e71277..ff0296c7bab8 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
109 109
110 /* Disallow HT40+/- mismatch */ 110 /* Disallow HT40+/- mismatch */
111 if (ie->ht_operation && 111 if (ie->ht_operation &&
112 (local->_oper_channel_type == NL80211_CHAN_HT40MINUS || 112 (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
113 local->_oper_channel_type == NL80211_CHAN_HT40PLUS) && 113 sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
114 (sta_channel_type == NL80211_CHAN_HT40MINUS || 114 (sta_channel_type == NL80211_CHAN_HT40MINUS ||
115 sta_channel_type == NL80211_CHAN_HT40PLUS) && 115 sta_channel_type == NL80211_CHAN_HT40PLUS) &&
116 local->_oper_channel_type != sta_channel_type) 116 sdata->vif.bss_conf.channel_type != sta_channel_type)
117 goto mismatch; 117 goto mismatch;
118 118
119 return true; 119 return true;
@@ -355,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
355{ 355{
356 struct ieee80211_local *local = sdata->local; 356 struct ieee80211_local *local = sdata->local;
357 struct ieee80211_supported_band *sband; 357 struct ieee80211_supported_band *sband;
358 struct ieee80211_channel *chan = local->oper_channel;
358 u8 *pos; 359 u8 *pos;
359 360
360 if (skb_tailroom(skb) < 3) 361 if (skb_tailroom(skb) < 3)
361 return -ENOMEM; 362 return -ENOMEM;
362 363
363 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 364 sband = local->hw.wiphy->bands[chan->band];
364 if (sband->band == IEEE80211_BAND_2GHZ) { 365 if (sband->band == IEEE80211_BAND_2GHZ) {
365 pos = skb_put(skb, 2 + 1); 366 pos = skb_put(skb, 2 + 1);
366 *pos++ = WLAN_EID_DS_PARAMS; 367 *pos++ = WLAN_EID_DS_PARAMS;
367 *pos++ = 1; 368 *pos++ = 1;
368 *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); 369 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
369 } 370 }
370 371
371 return 0; 372 return 0;
@@ -380,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
380 381
381 sband = local->hw.wiphy->bands[local->oper_channel->band]; 382 sband = local->hw.wiphy->bands[local->oper_channel->band];
382 if (!sband->ht_cap.ht_supported || 383 if (!sband->ht_cap.ht_supported ||
383 local->_oper_channel_type == NL80211_CHAN_NO_HT) 384 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
384 return 0; 385 return 0;
385 386
386 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) 387 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -397,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
397{ 398{
398 struct ieee80211_local *local = sdata->local; 399 struct ieee80211_local *local = sdata->local;
399 struct ieee80211_channel *channel = local->oper_channel; 400 struct ieee80211_channel *channel = local->oper_channel;
400 enum nl80211_channel_type channel_type = local->_oper_channel_type; 401 enum nl80211_channel_type channel_type =
402 sdata->vif.bss_conf.channel_type;
401 struct ieee80211_supported_band *sband = 403 struct ieee80211_supported_band *sband =
402 local->hw.wiphy->bands[channel->band]; 404 local->hw.wiphy->bands[channel->band];
403 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 405 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -608,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
608 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 610 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
609 sdata->vif.bss_conf.basic_rates = 611 sdata->vif.bss_conf.basic_rates =
610 ieee80211_mandatory_rates(sdata->local, 612 ieee80211_mandatory_rates(sdata->local,
611 sdata->local->hw.conf.channel->band); 613 sdata->local->oper_channel->band);
612 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 614 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
613 BSS_CHANGED_BEACON_ENABLED | 615 BSS_CHANGED_BEACON_ENABLED |
614 BSS_CHANGED_HT | 616 BSS_CHANGED_HT |
615 BSS_CHANGED_BASIC_RATES | 617 BSS_CHANGED_BASIC_RATES |
616 BSS_CHANGED_BEACON_INT); 618 BSS_CHANGED_BEACON_INT);
619
620 netif_carrier_on(sdata->dev);
617} 621}
618 622
619void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 623void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -621,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
621 struct ieee80211_local *local = sdata->local; 625 struct ieee80211_local *local = sdata->local;
622 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 626 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
623 627
628 netif_carrier_off(sdata->dev);
629
630 /* stop the beacon */
624 ifmsh->mesh_id_len = 0; 631 ifmsh->mesh_id_len = 0;
625 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 632 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
626 sta_info_flush(local, NULL); 633
634 /* flush STAs and mpaths on this iface */
635 sta_info_flush(sdata->local, sdata);
636 mesh_path_flush_by_iface(sdata);
627 637
628 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 638 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
629 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 639 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 13fd5b5fdb0a..25d0f17dec71 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -215,6 +215,9 @@ struct mesh_rmc {
215/* Maximum number of paths per interface */ 215/* Maximum number of paths per interface */
216#define MESH_MAX_MPATHS 1024 216#define MESH_MAX_MPATHS 1024
217 217
218/* Number of frames buffered per destination for unresolved destinations */
219#define MESH_FRAME_QUEUE_LEN 10
220
218/* Public interfaces */ 221/* Public interfaces */
219/* Various */ 222/* Various */
220int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, 223int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 494bc39f61a4..47aeee2d8db1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -17,8 +17,6 @@
17#define MAX_METRIC 0xffffffff 17#define MAX_METRIC 0xffffffff
18#define ARITH_SHIFT 8 18#define ARITH_SHIFT 8
19 19
20/* Number of frames buffered per destination for unresolved destinations */
21#define MESH_FRAME_QUEUE_LEN 10
22#define MAX_PREQ_QUEUE_LEN 64 20#define MAX_PREQ_QUEUE_LEN 64
23 21
24/* Destination only */ 22/* Destination only */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 075bc535c601..aa749818860e 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
203{ 203{
204 struct sk_buff *skb; 204 struct sk_buff *skb;
205 struct ieee80211_hdr *hdr; 205 struct ieee80211_hdr *hdr;
206 struct sk_buff_head tmpq;
207 unsigned long flags; 206 unsigned long flags;
208 207
209 rcu_assign_pointer(mpath->next_hop, sta); 208 rcu_assign_pointer(mpath->next_hop, sta);
210 209
211 __skb_queue_head_init(&tmpq);
212
213 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 210 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 211 skb_queue_walk(&mpath->frame_queue, skb) {
215 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
216 hdr = (struct ieee80211_hdr *) skb->data; 212 hdr = (struct ieee80211_hdr *) skb->data;
217 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219 __skb_queue_tail(&tmpq, skb);
220 } 215 }
221 216
222 skb_queue_splice(&tmpq, &mpath->frame_queue);
223 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 217 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
224} 218}
225 219
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
285 struct mesh_path *from_mpath, 279 struct mesh_path *from_mpath,
286 bool copy) 280 bool copy)
287{ 281{
288 struct sk_buff *skb, *cp_skb = NULL; 282 struct sk_buff *skb, *fskb, *tmp;
289 struct sk_buff_head gateq, failq; 283 struct sk_buff_head failq;
290 unsigned long flags; 284 unsigned long flags;
291 int num_skbs;
292 285
293 BUG_ON(gate_mpath == from_mpath); 286 BUG_ON(gate_mpath == from_mpath);
294 BUG_ON(!gate_mpath->next_hop); 287 BUG_ON(!gate_mpath->next_hop);
295 288
296 __skb_queue_head_init(&gateq);
297 __skb_queue_head_init(&failq); 289 __skb_queue_head_init(&failq);
298 290
299 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 291 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
300 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 292 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
301 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 293 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
302 294
303 num_skbs = skb_queue_len(&failq); 295 skb_queue_walk_safe(&failq, fskb, tmp) {
304 296 if (skb_queue_len(&gate_mpath->frame_queue) >=
305 while (num_skbs--) { 297 MESH_FRAME_QUEUE_LEN) {
306 skb = __skb_dequeue(&failq); 298 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
307 if (copy) { 299 break;
308 cp_skb = skb_copy(skb, GFP_ATOMIC);
309 if (cp_skb)
310 __skb_queue_tail(&failq, cp_skb);
311 } 300 }
312 301
302 skb = skb_copy(fskb, GFP_ATOMIC);
303 if (WARN_ON(!skb))
304 break;
305
313 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 306 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 __skb_queue_tail(&gateq, skb); 307 skb_queue_tail(&gate_mpath->frame_queue, skb);
308
309 if (copy)
310 continue;
311
312 __skb_unlink(fskb, &failq);
313 kfree_skb(fskb);
315 } 314 }
316 315
317 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
318 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
319 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 316 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
320 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 317 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
321 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
322 318
323 if (!copy) 319 if (!copy)
324 return; 320 return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
531 527
532 read_lock_bh(&pathtbl_resize_lock); 528 read_lock_bh(&pathtbl_resize_lock);
533 memcpy(new_mpath->dst, dst, ETH_ALEN); 529 memcpy(new_mpath->dst, dst, ETH_ALEN);
534 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN); 530 eth_broadcast_addr(new_mpath->rann_snd_addr);
535 new_mpath->is_root = false; 531 new_mpath->is_root = false;
536 new_mpath->sdata = sdata; 532 new_mpath->sdata = sdata;
537 new_mpath->flags = 0; 533 new_mpath->flags = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f20e9f26d137..9d7ad366ef09 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
117 u16 ht_opmode; 117 u16 ht_opmode;
118 bool non_ht_sta = false, ht20_sta = false; 118 bool non_ht_sta = false, ht20_sta = false;
119 119
120 if (local->_oper_channel_type == NL80211_CHAN_NO_HT) 120 if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
121 return 0; 121 return 0;
122 122
123 rcu_read_lock(); 123 rcu_read_lock();
@@ -147,7 +147,8 @@ out:
147 147
148 if (non_ht_sta) 148 if (non_ht_sta)
149 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; 149 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
150 else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20) 150 else if (ht20_sta &&
151 sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
151 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; 152 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
152 else 153 else
153 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 154 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -215,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
215 u8 *da, __le16 llid, __le16 plid, __le16 reason) { 216 u8 *da, __le16 llid, __le16 plid, __le16 reason) {
216 struct ieee80211_local *local = sdata->local; 217 struct ieee80211_local *local = sdata->local;
217 struct sk_buff *skb; 218 struct sk_buff *skb;
219 struct ieee80211_tx_info *info;
218 struct ieee80211_mgmt *mgmt; 220 struct ieee80211_mgmt *mgmt;
219 bool include_plid = false; 221 bool include_plid = false;
220 u16 peering_proto = 0; 222 u16 peering_proto = 0;
221 u8 *pos, ie_len = 4; 223 u8 *pos, ie_len = 4;
222 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + 224 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
223 sizeof(mgmt->u.action.u.self_prot); 225 sizeof(mgmt->u.action.u.self_prot);
226 int err = -ENOMEM;
224 227
225 skb = dev_alloc_skb(local->tx_headroom + 228 skb = dev_alloc_skb(local->tx_headroom +
226 hdr_len + 229 hdr_len +
@@ -236,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
236 sdata->u.mesh.ie_len); 239 sdata->u.mesh.ie_len);
237 if (!skb) 240 if (!skb)
238 return -1; 241 return -1;
242 info = IEEE80211_SKB_CB(skb);
239 skb_reserve(skb, local->tx_headroom); 243 skb_reserve(skb, local->tx_headroom);
240 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 244 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
241 memset(mgmt, 0, hdr_len); 245 memset(mgmt, 0, hdr_len);
@@ -256,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
256 pos = skb_put(skb, 2); 260 pos = skb_put(skb, 2);
257 memcpy(pos + 2, &plid, 2); 261 memcpy(pos + 2, &plid, 2);
258 } 262 }
259 if (ieee80211_add_srates_ie(sdata, skb, true) || 263 if (ieee80211_add_srates_ie(sdata, skb, true,
260 ieee80211_add_ext_srates_ie(sdata, skb, true) || 264 local->oper_channel->band) ||
265 ieee80211_add_ext_srates_ie(sdata, skb, true,
266 local->oper_channel->band) ||
261 mesh_add_rsn_ie(skb, sdata) || 267 mesh_add_rsn_ie(skb, sdata) ||
262 mesh_add_meshid_ie(skb, sdata) || 268 mesh_add_meshid_ie(skb, sdata) ||
263 mesh_add_meshconf_ie(skb, sdata)) 269 mesh_add_meshconf_ie(skb, sdata))
264 return -1; 270 goto free;
265 } else { /* WLAN_SP_MESH_PEERING_CLOSE */ 271 } else { /* WLAN_SP_MESH_PEERING_CLOSE */
272 info->flags |= IEEE80211_TX_CTL_NO_ACK;
266 if (mesh_add_meshid_ie(skb, sdata)) 273 if (mesh_add_meshid_ie(skb, sdata))
267 return -1; 274 goto free;
268 } 275 }
269 276
270 /* Add Mesh Peering Management element */ 277 /* Add Mesh Peering Management element */
@@ -283,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
283 ie_len += 2; /* reason code */ 290 ie_len += 2; /* reason code */
284 break; 291 break;
285 default: 292 default:
286 return -EINVAL; 293 err = -EINVAL;
294 goto free;
287 } 295 }
288 296
289 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len)) 297 if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
290 return -ENOMEM; 298 goto free;
291 299
292 pos = skb_put(skb, 2 + ie_len); 300 pos = skb_put(skb, 2 + ie_len);
293 *pos++ = WLAN_EID_PEER_MGMT; 301 *pos++ = WLAN_EID_PEER_MGMT;
@@ -308,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
308 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 316 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
309 if (mesh_add_ht_cap_ie(skb, sdata) || 317 if (mesh_add_ht_cap_ie(skb, sdata) ||
310 mesh_add_ht_oper_ie(skb, sdata)) 318 mesh_add_ht_oper_ie(skb, sdata))
311 return -1; 319 goto free;
312 } 320 }
313 321
314 if (mesh_add_vendor_ies(skb, sdata)) 322 if (mesh_add_vendor_ies(skb, sdata))
315 return -1; 323 goto free;
316 324
317 ieee80211_tx_skb(sdata, skb); 325 ieee80211_tx_skb(sdata, skb);
318 return 0; 326 return 0;
327free:
328 kfree_skb(skb);
329 return err;
319} 330}
320 331
321/** 332/**
@@ -360,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
360 371
361 spin_lock_bh(&sta->lock); 372 spin_lock_bh(&sta->lock);
362 sta->last_rx = jiffies; 373 sta->last_rx = jiffies;
374 if (sta->plink_state == NL80211_PLINK_ESTAB) {
375 spin_unlock_bh(&sta->lock);
376 return sta;
377 }
378
363 sta->sta.supp_rates[band] = rates; 379 sta->sta.supp_rates[band] = rates;
364 if (elems->ht_cap_elem && 380 if (elems->ht_cap_elem &&
365 sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT) 381 sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
366 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 382 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
367 elems->ht_cap_elem, 383 elems->ht_cap_elem,
368 &sta->sta.ht_cap); 384 &sta->sta.ht_cap);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a4a5acdbaa4d..a8cf70bf1cba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -146,6 +146,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
146 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) 146 if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
147 return; 147 return;
148 148
149 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
150 return;
151
149 mod_timer(&sdata->u.mgd.bcn_mon_timer, 152 mod_timer(&sdata->u.mgd.bcn_mon_timer,
150 round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); 153 round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
151} 154}
@@ -182,15 +185,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
182 u16 ht_opmode; 185 u16 ht_opmode;
183 bool disable_40 = false; 186 bool disable_40 = false;
184 187
185 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 188 sband = local->hw.wiphy->bands[local->oper_channel->band];
186 189
187 switch (sdata->vif.bss_conf.channel_type) { 190 switch (sdata->vif.bss_conf.channel_type) {
188 case NL80211_CHAN_HT40PLUS: 191 case NL80211_CHAN_HT40PLUS:
189 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS) 192 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
190 disable_40 = true; 193 disable_40 = true;
191 break; 194 break;
192 case NL80211_CHAN_HT40MINUS: 195 case NL80211_CHAN_HT40MINUS:
193 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS) 196 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
194 disable_40 = true; 197 disable_40 = true;
195 break; 198 break;
196 default: 199 default:
@@ -326,6 +329,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
326 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); 329 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
327} 330}
328 331
332static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
333 struct sk_buff *skb,
334 struct ieee80211_supported_band *sband)
335{
336 u8 *pos;
337 u32 cap;
338 struct ieee80211_sta_vht_cap vht_cap;
339
340 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
341
342 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
343
344 /* determine capability flags */
345 cap = vht_cap.cap;
346
347 /* reserve and fill IE */
348 pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
349 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
350}
351
329static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 352static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
330{ 353{
331 struct ieee80211_local *local = sdata->local; 354 struct ieee80211_local *local = sdata->local;
@@ -371,6 +394,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
371 4 + /* power capability */ 394 4 + /* power capability */
372 2 + 2 * sband->n_channels + /* supported channels */ 395 2 + 2 * sband->n_channels + /* supported channels */
373 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 396 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
397 2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
374 assoc_data->ie_len + /* extra IEs */ 398 assoc_data->ie_len + /* extra IEs */
375 9, /* WMM */ 399 9, /* WMM */
376 GFP_KERNEL); 400 GFP_KERNEL);
@@ -503,6 +527,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
503 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, 527 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
504 sband, local->oper_channel, ifmgd->ap_smps); 528 sband, local->oper_channel, ifmgd->ap_smps);
505 529
530 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
531 ieee80211_add_vht_ie(sdata, skb, sband);
532
506 /* if present, add any custom non-vendor IEs that go after HT */ 533 /* if present, add any custom non-vendor IEs that go after HT */
507 if (assoc_data->ie_len && assoc_data->ie) { 534 if (assoc_data->ie_len && assoc_data->ie) {
508 noffset = ieee80211_ie_split_vendor(assoc_data->ie, 535 noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -583,8 +610,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
583 IEEE80211_SKB_CB(skb)->flags |= 610 IEEE80211_SKB_CB(skb)->flags |=
584 IEEE80211_TX_INTFL_DONT_ENCRYPT; 611 IEEE80211_TX_INTFL_DONT_ENCRYPT;
585 612
586 drv_mgd_prepare_tx(local, sdata);
587
588 ieee80211_tx_skb(sdata, skb); 613 ieee80211_tx_skb(sdata, skb);
589 } 614 }
590} 615}
@@ -687,6 +712,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
687 /* XXX: shouldn't really modify cfg80211-owned data! */ 712 /* XXX: shouldn't really modify cfg80211-owned data! */
688 ifmgd->associated->channel = sdata->local->oper_channel; 713 ifmgd->associated->channel = sdata->local->oper_channel;
689 714
715 /* XXX: wait for a beacon first? */
690 ieee80211_wake_queues_by_reason(&sdata->local->hw, 716 ieee80211_wake_queues_by_reason(&sdata->local->hw,
691 IEEE80211_QUEUE_STOP_REASON_CSA); 717 IEEE80211_QUEUE_STOP_REASON_CSA);
692 out: 718 out:
@@ -763,36 +789,32 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
763 789
764 sdata->local->csa_channel = new_ch; 790 sdata->local->csa_channel = new_ch;
765 791
792 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
793
794 if (sw_elem->mode)
795 ieee80211_stop_queues_by_reason(&sdata->local->hw,
796 IEEE80211_QUEUE_STOP_REASON_CSA);
797
766 if (sdata->local->ops->channel_switch) { 798 if (sdata->local->ops->channel_switch) {
767 /* use driver's channel switch callback */ 799 /* use driver's channel switch callback */
768 struct ieee80211_channel_switch ch_switch; 800 struct ieee80211_channel_switch ch_switch = {
769 memset(&ch_switch, 0, sizeof(ch_switch)); 801 .timestamp = timestamp,
770 ch_switch.timestamp = timestamp; 802 .block_tx = sw_elem->mode,
771 if (sw_elem->mode) { 803 .channel = new_ch,
772 ch_switch.block_tx = true; 804 .count = sw_elem->count,
773 ieee80211_stop_queues_by_reason(&sdata->local->hw, 805 };
774 IEEE80211_QUEUE_STOP_REASON_CSA); 806
775 }
776 ch_switch.channel = new_ch;
777 ch_switch.count = sw_elem->count;
778 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
779 drv_channel_switch(sdata->local, &ch_switch); 807 drv_channel_switch(sdata->local, &ch_switch);
780 return; 808 return;
781 } 809 }
782 810
783 /* channel switch handled in software */ 811 /* channel switch handled in software */
784 if (sw_elem->count <= 1) { 812 if (sw_elem->count <= 1)
785 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); 813 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
786 } else { 814 else
787 if (sw_elem->mode)
788 ieee80211_stop_queues_by_reason(&sdata->local->hw,
789 IEEE80211_QUEUE_STOP_REASON_CSA);
790 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
791 mod_timer(&ifmgd->chswitch_timer, 815 mod_timer(&ifmgd->chswitch_timer,
792 jiffies + 816 TU_TO_EXP_TIME(sw_elem->count *
793 msecs_to_jiffies(sw_elem->count * 817 cbss->beacon_interval));
794 cbss->beacon_interval));
795 }
796} 818}
797 819
798static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, 820static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
@@ -1007,6 +1029,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1007 ieee80211_change_ps(local); 1029 ieee80211_change_ps(local);
1008} 1030}
1009 1031
1032void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
1033{
1034 bool ps_allowed = ieee80211_powersave_allowed(sdata);
1035
1036 if (sdata->vif.bss_conf.ps != ps_allowed) {
1037 sdata->vif.bss_conf.ps = ps_allowed;
1038 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
1039 }
1040}
1041
1010void ieee80211_dynamic_ps_disable_work(struct work_struct *work) 1042void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
1011{ 1043{
1012 struct ieee80211_local *local = 1044 struct ieee80211_local *local =
@@ -1239,7 +1271,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1239 } 1271 }
1240 1272
1241 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 1273 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
1242 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) 1274 if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
1243 use_short_slot = true; 1275 use_short_slot = true;
1244 1276
1245 if (use_protection != bss_conf->use_cts_prot) { 1277 if (use_protection != bss_conf->use_cts_prot) {
@@ -1310,6 +1342,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1310 ieee80211_recalc_smps(local); 1342 ieee80211_recalc_smps(local);
1311 mutex_unlock(&local->iflist_mtx); 1343 mutex_unlock(&local->iflist_mtx);
1312 1344
1345 ieee80211_recalc_ps_vif(sdata);
1346
1313 netif_tx_start_all_queues(sdata->dev); 1347 netif_tx_start_all_queues(sdata->dev);
1314 netif_carrier_on(sdata->dev); 1348 netif_carrier_on(sdata->dev);
1315} 1349}
@@ -1371,6 +1405,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1371 } 1405 }
1372 local->ps_sdata = NULL; 1406 local->ps_sdata = NULL;
1373 1407
1408 /* disable per-vif ps */
1409 ieee80211_recalc_ps_vif(sdata);
1410
1374 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ 1411 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
1375 if (tx) 1412 if (tx)
1376 drv_flush(local, false); 1413 drv_flush(local, false);
@@ -1542,7 +1579,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1542 ssid_len = ssid[1]; 1579 ssid_len = ssid[1];
1543 1580
1544 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, 1581 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1545 0, (u32) -1, true, false); 1582 0, (u32) -1, true, false,
1583 ifmgd->associated->channel);
1546 } 1584 }
1547 1585
1548 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1586 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,7 +1683,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1645 ssid_len = ssid[1]; 1683 ssid_len = ssid[1];
1646 1684
1647 skb = ieee80211_build_probe_req(sdata, cbss->bssid, 1685 skb = ieee80211_build_probe_req(sdata, cbss->bssid,
1648 (u32) -1, ssid + 2, ssid_len, 1686 (u32) -1,
1687 sdata->local->oper_channel,
1688 ssid + 2, ssid_len,
1649 NULL, 0, true); 1689 NULL, 0, true);
1650 1690
1651 return skb; 1691 return skb;
@@ -1656,7 +1696,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1656{ 1696{
1657 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1697 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1658 struct ieee80211_local *local = sdata->local; 1698 struct ieee80211_local *local = sdata->local;
1659 u8 bssid[ETH_ALEN];
1660 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 1699 u8 frame_buf[DEAUTH_DISASSOC_LEN];
1661 1700
1662 mutex_lock(&ifmgd->mtx); 1701 mutex_lock(&ifmgd->mtx);
@@ -1665,9 +1704,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1665 return; 1704 return;
1666 } 1705 }
1667 1706
1668 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1707 sdata_info(sdata, "Connection to AP %pM lost\n",
1669 1708 ifmgd->associated->bssid);
1670 sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
1671 1709
1672 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 1710 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1673 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1711 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1685,7 +1723,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1685 mutex_unlock(&local->mtx); 1723 mutex_unlock(&local->mtx);
1686} 1724}
1687 1725
1688void ieee80211_beacon_connection_loss_work(struct work_struct *work) 1726static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
1689{ 1727{
1690 struct ieee80211_sub_if_data *sdata = 1728 struct ieee80211_sub_if_data *sdata =
1691 container_of(work, struct ieee80211_sub_if_data, 1729 container_of(work, struct ieee80211_sub_if_data,
@@ -2232,14 +2270,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2232 mutex_unlock(&local->iflist_mtx); 2270 mutex_unlock(&local->iflist_mtx);
2233 } 2271 }
2234 2272
2235 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 2273 if (elems->ch_switch_ie &&
2236 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, 2274 memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
2237 ETH_ALEN) == 0)) { 2275 ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
2238 struct ieee80211_channel_sw_ie *sw_elem =
2239 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
2240 ieee80211_sta_process_chanswitch(sdata, sw_elem,
2241 bss, rx_status->mactime); 2276 bss, rx_status->mactime);
2242 }
2243} 2277}
2244 2278
2245 2279
@@ -2326,7 +2360,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2326 if (baselen > len) 2360 if (baselen > len)
2327 return; 2361 return;
2328 2362
2329 if (rx_status->freq != local->hw.conf.channel->center_freq) 2363 if (rx_status->freq != local->oper_channel->center_freq)
2330 return; 2364 return;
2331 2365
2332 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && 2366 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,7 +2524,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2490 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { 2524 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
2491 struct ieee80211_supported_band *sband; 2525 struct ieee80211_supported_band *sband;
2492 2526
2493 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2527 sband = local->hw.wiphy->bands[local->oper_channel->band];
2494 2528
2495 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 2529 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2496 bssid, true); 2530 bssid, true);
@@ -2673,7 +2707,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2673 * will not answer to direct packet in unassociated state. 2707 * will not answer to direct packet in unassociated state.
2674 */ 2708 */
2675 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 2709 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2676 NULL, 0, (u32) -1, true, false); 2710 NULL, 0, (u32) -1, true, false,
2711 auth_data->bss->channel);
2677 } 2712 }
2678 2713
2679 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 2714 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -3000,41 +3035,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3000 return 0; 3035 return 0;
3001} 3036}
3002 3037
3003static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, 3038static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3004 struct cfg80211_bss *cbss, bool assoc) 3039 struct cfg80211_bss *cbss)
3005{ 3040{
3006 struct ieee80211_local *local = sdata->local; 3041 struct ieee80211_local *local = sdata->local;
3007 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3042 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3008 struct ieee80211_bss *bss = (void *)cbss->priv;
3009 struct sta_info *sta = NULL;
3010 bool have_sta = false;
3011 int err;
3012 int ht_cfreq; 3043 int ht_cfreq;
3013 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 3044 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
3014 const u8 *ht_oper_ie; 3045 const u8 *ht_oper_ie;
3015 const struct ieee80211_ht_operation *ht_oper = NULL; 3046 const struct ieee80211_ht_operation *ht_oper = NULL;
3016 struct ieee80211_supported_band *sband; 3047 struct ieee80211_supported_band *sband;
3017 3048
3018 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3019 return -EINVAL;
3020
3021 if (assoc) {
3022 rcu_read_lock();
3023 have_sta = sta_info_get(sdata, cbss->bssid);
3024 rcu_read_unlock();
3025 }
3026
3027 if (!have_sta) {
3028 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
3029 if (!sta)
3030 return -ENOMEM;
3031 }
3032
3033 mutex_lock(&local->mtx);
3034 ieee80211_recalc_idle(sdata->local);
3035 mutex_unlock(&local->mtx);
3036
3037 /* switch to the right channel */
3038 sband = local->hw.wiphy->bands[cbss->channel->band]; 3049 sband = local->hw.wiphy->bands[cbss->channel->band];
3039 3050
3040 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ; 3051 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3108,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3097 local->oper_channel = cbss->channel; 3108 local->oper_channel = cbss->channel;
3098 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3109 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3099 3110
3100 if (sta) { 3111 return 0;
3112}
3113
3114static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3115 struct cfg80211_bss *cbss, bool assoc)
3116{
3117 struct ieee80211_local *local = sdata->local;
3118 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3119 struct ieee80211_bss *bss = (void *)cbss->priv;
3120 struct sta_info *new_sta = NULL;
3121 bool have_sta = false;
3122 int err;
3123
3124 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3125 return -EINVAL;
3126
3127 if (assoc) {
3128 rcu_read_lock();
3129 have_sta = sta_info_get(sdata, cbss->bssid);
3130 rcu_read_unlock();
3131 }
3132
3133 if (!have_sta) {
3134 new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
3135 if (!new_sta)
3136 return -ENOMEM;
3137 }
3138
3139 mutex_lock(&local->mtx);
3140 ieee80211_recalc_idle(sdata->local);
3141 mutex_unlock(&local->mtx);
3142
3143 if (new_sta) {
3101 u32 rates = 0, basic_rates = 0; 3144 u32 rates = 0, basic_rates = 0;
3102 bool have_higher_than_11mbit; 3145 bool have_higher_than_11mbit;
3103 int min_rate = INT_MAX, min_rate_index = -1; 3146 int min_rate = INT_MAX, min_rate_index = -1;
3147 struct ieee80211_supported_band *sband;
3148
3149 sband = local->hw.wiphy->bands[cbss->channel->band];
3150
3151 err = ieee80211_prep_channel(sdata, cbss);
3152 if (err) {
3153 sta_info_free(local, new_sta);
3154 return err;
3155 }
3104 3156
3105 ieee80211_get_rates(sband, bss->supp_rates, 3157 ieee80211_get_rates(sband, bss->supp_rates,
3106 bss->supp_rates_len, 3158 bss->supp_rates_len,
@@ -3122,7 +3174,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3122 basic_rates = BIT(min_rate_index); 3174 basic_rates = BIT(min_rate_index);
3123 } 3175 }
3124 3176
3125 sta->sta.supp_rates[cbss->channel->band] = rates; 3177 new_sta->sta.supp_rates[cbss->channel->band] = rates;
3126 sdata->vif.bss_conf.basic_rates = basic_rates; 3178 sdata->vif.bss_conf.basic_rates = basic_rates;
3127 3179
3128 /* cf. IEEE 802.11 9.2.12 */ 3180 /* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3197,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3145 BSS_CHANGED_BEACON_INT); 3197 BSS_CHANGED_BEACON_INT);
3146 3198
3147 if (assoc) 3199 if (assoc)
3148 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 3200 sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
3149 3201
3150 err = sta_info_insert(sta); 3202 err = sta_info_insert(new_sta);
3151 sta = NULL; 3203 new_sta = NULL;
3152 if (err) { 3204 if (err) {
3153 sdata_info(sdata, 3205 sdata_info(sdata,
3154 "failed to insert STA entry for the AP (error %d)\n", 3206 "failed to insert STA entry for the AP (error %d)\n",
@@ -3300,9 +3352,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3300 } 3352 }
3301 3353
3302 /* prepare assoc data */ 3354 /* prepare assoc data */
3303 3355
3304 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 3356 /*
3305 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 3357 * keep only the 40 MHz disable bit set as it might have
3358 * been set during authentication already, all other bits
3359 * should be reset for a new connection
3360 */
3361 ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
3306 3362
3307 ifmgd->beacon_crc_valid = false; 3363 ifmgd->beacon_crc_valid = false;
3308 3364
@@ -3318,21 +3374,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3318 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3374 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
3319 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { 3375 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
3320 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3376 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3377 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3321 netdev_info(sdata->dev, 3378 netdev_info(sdata->dev,
3322 "disabling HT due to WEP/TKIP use\n"); 3379 "disabling HT/VHT due to WEP/TKIP use\n");
3323 } 3380 }
3324 } 3381 }
3325 3382
3326 if (req->flags & ASSOC_REQ_DISABLE_HT) 3383 if (req->flags & ASSOC_REQ_DISABLE_HT) {
3327 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3384 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3385 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3386 }
3328 3387
3329 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3388 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3330 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3389 sband = local->hw.wiphy->bands[req->bss->channel->band];
3331 if (!sband->ht_cap.ht_supported || 3390 if (!sband->ht_cap.ht_supported ||
3332 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) { 3391 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3333 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3392 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3334 netdev_info(sdata->dev, 3393 if (!bss->wmm_used)
3335 "disabling HT as WMM/QoS is not supported\n"); 3394 netdev_info(sdata->dev,
3395 "disabling HT as WMM/QoS is not supported by the AP\n");
3396 }
3397
3398 /* disable VHT if we don't support it or the AP doesn't use WMM */
3399 if (!sband->vht_cap.vht_supported ||
3400 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3401 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3402 if (!bss->wmm_used)
3403 netdev_info(sdata->dev,
3404 "disabling VHT as WMM/QoS is not supported by the AP\n");
3336 } 3405 }
3337 3406
3338 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3407 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3467,14 +3536,17 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3467 req->bssid, req->reason_code); 3536 req->bssid, req->reason_code);
3468 3537
3469 if (ifmgd->associated && 3538 if (ifmgd->associated &&
3470 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) 3539 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
3471 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 3540 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3472 req->reason_code, true, frame_buf); 3541 req->reason_code, true, frame_buf);
3473 else 3542 } else {
3543 drv_mgd_prepare_tx(sdata->local, sdata);
3474 ieee80211_send_deauth_disassoc(sdata, req->bssid, 3544 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3475 IEEE80211_STYPE_DEAUTH, 3545 IEEE80211_STYPE_DEAUTH,
3476 req->reason_code, true, 3546 req->reason_code, true,
3477 frame_buf); 3547 frame_buf);
3548 }
3549
3478 mutex_unlock(&ifmgd->mtx); 3550 mutex_unlock(&ifmgd->mtx);
3479 3551
3480 __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN); 3552 __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 635c3250c668..507121dad082 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
116 if (!ieee80211_sdata_running(sdata)) 116 if (!ieee80211_sdata_running(sdata))
117 continue; 117 continue;
118 118
119 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
120 continue;
121
119 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 122 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
120 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); 123 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
121 124
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
144 147
145 mutex_lock(&local->iflist_mtx); 148 mutex_lock(&local->iflist_mtx);
146 list_for_each_entry(sdata, &local->interfaces, list) { 149 list_for_each_entry(sdata, &local->interfaces, list) {
150 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
151 continue;
152
147 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 153 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
148 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); 154 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
149 155
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 6e4fd32c6617..10de668eb9f6 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
56 if (!ref) 56 if (!ref)
57 return; 57 return;
58 58
59 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 59 sband = local->hw.wiphy->bands[local->oper_channel->band];
60 60
61 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 61 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
62 set_sta_flag(sta, WLAN_STA_RATE_CONTROL); 62 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0cb4edee6af5..b382605c5733 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62 62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
64 RX_FLAG_FAILED_PLCP_CRC |
65 RX_FLAG_AMPDU_IS_ZEROLEN))
64 return 1; 66 return 1;
65 if (unlikely(skb->len < 16 + present_fcs_len)) 67 if (unlikely(skb->len < 16 + present_fcs_len))
66 return 1; 68 return 1;
@@ -91,6 +93,13 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
91 if (status->flag & RX_FLAG_HT) /* HT info */ 93 if (status->flag & RX_FLAG_HT) /* HT info */
92 len += 3; 94 len += 3;
93 95
96 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
97 /* padding */
98 while (len & 3)
99 len++;
100 len += 8;
101 }
102
94 return len; 103 return len;
95} 104}
96 105
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
215 pos++; 224 pos++;
216 *pos++ = status->rate_idx; 225 *pos++ = status->rate_idx;
217 } 226 }
227
228 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
229 u16 flags = 0;
230
231 /* ensure 4 byte alignment */
232 while ((pos - (u8 *)rthdr) & 3)
233 pos++;
234 rthdr->it_present |=
235 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
236 put_unaligned_le32(status->ampdu_reference, pos);
237 pos += 4;
238 if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
239 flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
240 if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
241 flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
242 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
243 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
244 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
245 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
246 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
247 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
248 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
249 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
250 put_unaligned_le16(flags, pos);
251 pos += 2;
252 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
253 *pos++ = status->ampdu_delimiter_crc;
254 else
255 *pos++ = 0;
256 *pos++ = 0;
257 }
218} 258}
219 259
220/* 260/*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2268 2308
2269 goto queue; 2309 goto queue;
2270 case WLAN_CATEGORY_SPECTRUM_MGMT: 2310 case WLAN_CATEGORY_SPECTRUM_MGMT:
2271 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 2311 if (status->band != IEEE80211_BAND_5GHZ)
2272 break; 2312 break;
2273 2313
2274 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2314 if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2772 if (!bssid) { 2812 if (!bssid) {
2773 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) 2813 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2774 return 0; 2814 return 0;
2775 } else if (!ieee80211_bssid_match(bssid, 2815 } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
2776 sdata->vif.addr)) {
2777 /* 2816 /*
2778 * Accept public action frames even when the 2817 * Accept public action frames even when the
2779 * BSSID doesn't match, this is used for P2P 2818 * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2793 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) 2832 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2794 return 0; 2833 return 0;
2795 break; 2834 break;
2835 case NL80211_IFTYPE_P2P_DEVICE:
2836 if (!ieee80211_is_public_action(hdr, skb->len) &&
2837 !ieee80211_is_probe_req(hdr->frame_control) &&
2838 !ieee80211_is_probe_resp(hdr->frame_control) &&
2839 !ieee80211_is_beacon(hdr->frame_control))
2840 return 0;
2841 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2842 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2843 break;
2796 default: 2844 default:
2797 /* should never get here */ 2845 /* should never get here */
2798 WARN_ON(1); 2846 WARN_ON_ONCE(1);
2799 break; 2847 break;
2800 } 2848 }
2801 2849
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 839dd9737989..740e414d44f4 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
416 local->scan_req->ssids[i].ssid_len, 416 local->scan_req->ssids[i].ssid_len,
417 local->scan_req->ie, local->scan_req->ie_len, 417 local->scan_req->ie, local->scan_req->ie_len,
418 local->scan_req->rates[band], false, 418 local->scan_req->rates[band], false,
419 local->scan_req->no_cck); 419 local->scan_req->no_cck,
420 local->hw.conf.channel);
420 421
421 /* 422 /*
422 * After sending probe requests, wait for probe responses 423 * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
479 if (local->ops->hw_scan) { 480 if (local->ops->hw_scan) {
480 __set_bit(SCAN_HW_SCANNING, &local->scanning); 481 __set_bit(SCAN_HW_SCANNING, &local->scanning);
481 } else if ((req->n_channels == 1) && 482 } else if ((req->n_channels == 1) &&
482 (req->channels[0]->center_freq == 483 (req->channels[0] == local->oper_channel)) {
483 local->hw.conf.channel->center_freq)) { 484 /*
484 485 * If we are scanning only on the operating channel
485 /* If we are scanning only on the current channel, then 486 * then we do not need to stop normal activities
486 * we do not need to stop normal activities
487 */ 487 */
488 unsigned long next_delay; 488 unsigned long next_delay;
489 489
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8cd72914cdaf..b0801b7d572d 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -519,19 +519,27 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
519 u64 cookie = (unsigned long)skb; 519 u64 cookie = (unsigned long)skb;
520 acked = info->flags & IEEE80211_TX_STAT_ACK; 520 acked = info->flags & IEEE80211_TX_STAT_ACK;
521 521
522 /*
523 * TODO: When we have non-netdev frame TX,
524 * we cannot use skb->dev->ieee80211_ptr
525 */
526
527 if (ieee80211_is_nullfunc(hdr->frame_control) || 522 if (ieee80211_is_nullfunc(hdr->frame_control) ||
528 ieee80211_is_qos_nullfunc(hdr->frame_control)) 523 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
529 cfg80211_probe_status(skb->dev, hdr->addr1, 524 cfg80211_probe_status(skb->dev, hdr->addr1,
530 cookie, acked, GFP_ATOMIC); 525 cookie, acked, GFP_ATOMIC);
531 else 526 } else if (skb->dev) {
532 cfg80211_mgmt_tx_status( 527 cfg80211_mgmt_tx_status(
533 skb->dev->ieee80211_ptr, cookie, skb->data, 528 skb->dev->ieee80211_ptr, cookie, skb->data,
534 skb->len, acked, GFP_ATOMIC); 529 skb->len, acked, GFP_ATOMIC);
530 } else {
531 struct ieee80211_sub_if_data *p2p_sdata;
532
533 rcu_read_lock();
534
535 p2p_sdata = rcu_dereference(local->p2p_sdata);
536 if (p2p_sdata) {
537 cfg80211_mgmt_tx_status(
538 &p2p_sdata->wdev, cookie, skb->data,
539 skb->len, acked, GFP_ATOMIC);
540 }
541 rcu_read_unlock();
542 }
535 } 543 }
536 544
537 if (unlikely(info->ack_frame_id)) { 545 if (unlikely(info->ack_frame_id)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c6d33b55b2df..18d9c8a52e9e 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -24,7 +24,7 @@
24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
26 __entry->p2p = sdata->vif.p2p; \ 26 __entry->p2p = sdata->vif.p2p; \
27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
274 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; 274 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
275 __entry->max_sleep_period = local->hw.conf.max_sleep_period; 275 __entry->max_sleep_period = local->hw.conf.max_sleep_period;
276 __entry->listen_interval = local->hw.conf.listen_interval; 276 __entry->listen_interval = local->hw.conf.listen_interval;
277 __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count; 277 __entry->long_frame_max_tx_count =
278 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 278 local->hw.conf.long_frame_max_tx_count;
279 __entry->center_freq = local->hw.conf.channel->center_freq; 279 __entry->short_frame_max_tx_count =
280 local->hw.conf.short_frame_max_tx_count;
281 __entry->center_freq = local->hw.conf.channel ?
282 local->hw.conf.channel->center_freq : 0;
280 __entry->channel_type = local->hw.conf.channel_type; 283 __entry->channel_type = local->hw.conf.channel_type;
281 __entry->smps = local->hw.conf.smps_mode; 284 __entry->smps = local->hw.conf.smps_mode;
282 ), 285 ),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..29eb4e678235 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
55 if (WARN_ON_ONCE(info->control.rates[0].idx < 0)) 55 if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
56 return 0; 56 return 0;
57 57
58 sband = local->hw.wiphy->bands[tx->channel->band]; 58 sband = local->hw.wiphy->bands[info->band];
59 txrate = &sband->bitrates[info->control.rates[0].idx]; 59 txrate = &sband->bitrates[info->control.rates[0].idx];
60 60
61 erp = txrate->flags & IEEE80211_RATE_ERP_G; 61 erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
615 615
616 memset(&txrc, 0, sizeof(txrc)); 616 memset(&txrc, 0, sizeof(txrc));
617 617
618 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 618 sband = tx->local->hw.wiphy->bands[info->band];
619 619
620 len = min_t(u32, tx->skb->len + FCS_LEN, 620 len = min_t(u32, tx->skb->len + FCS_LEN,
621 tx->local->hw.wiphy->frag_threshold); 621 tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
626 txrc.bss_conf = &tx->sdata->vif.bss_conf; 626 txrc.bss_conf = &tx->sdata->vif.bss_conf;
627 txrc.skb = tx->skb; 627 txrc.skb = tx->skb;
628 txrc.reported_rate.idx = -1; 628 txrc.reported_rate.idx = -1;
629 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band]; 629 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
630 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 630 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
631 txrc.max_rate_idx = -1; 631 txrc.max_rate_idx = -1;
632 else 632 else
633 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 633 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
634 memcpy(txrc.rate_idx_mcs_mask, 634 memcpy(txrc.rate_idx_mcs_mask,
635 tx->sdata->rc_rateidx_mcs_mask[tx->channel->band], 635 tx->sdata->rc_rateidx_mcs_mask[info->band],
636 sizeof(txrc.rate_idx_mcs_mask)); 636 sizeof(txrc.rate_idx_mcs_mask));
637 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || 637 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
638 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || 638 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
667 "scanning and associated. Target station: " 667 "scanning and associated. Target station: "
668 "%pM on %d GHz band\n", 668 "%pM on %d GHz band\n",
669 tx->sdata->name, hdr->addr1, 669 tx->sdata->name, hdr->addr1,
670 tx->channel->band ? 5 : 2)) 670 info->band ? 5 : 2))
671 return TX_DROP; 671 return TX_DROP;
672 672
673 /* 673 /*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1131 tx->skb = skb; 1131 tx->skb = skb;
1132 tx->local = local; 1132 tx->local = local;
1133 tx->sdata = sdata; 1133 tx->sdata = sdata;
1134 tx->channel = local->hw.conf.channel;
1135 __skb_queue_head_init(&tx->skbs); 1134 __skb_queue_head_init(&tx->skbs);
1136 1135
1137 /* 1136 /*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1204 struct sk_buff_head *skbs, 1203 struct sk_buff_head *skbs,
1205 bool txpending) 1204 bool txpending)
1206{ 1205{
1206 struct ieee80211_tx_control control;
1207 struct sk_buff *skb, *tmp; 1207 struct sk_buff *skb, *tmp;
1208 unsigned long flags; 1208 unsigned long flags;
1209 1209
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1240 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1240 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1241 1241
1242 info->control.vif = vif; 1242 info->control.vif = vif;
1243 info->control.sta = sta; 1243 control.sta = sta;
1244 1244
1245 __skb_unlink(skb, skbs); 1245 __skb_unlink(skb, skbs);
1246 drv_tx(local, skb); 1246 drv_tx(local, &control, skb);
1247 } 1247 }
1248 1248
1249 return true; 1249 return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1399 goto out; 1399 goto out;
1400 } 1400 }
1401 1401
1402 tx.channel = local->hw.conf.channel; 1402 info->band = local->hw.conf.channel->band;
1403 info->band = tx.channel->band;
1404 1403
1405 /* set up hw_queue value early */ 1404 /* set up hw_queue value early */
1406 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 1405 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1720 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1719 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1721 struct ieee80211_local *local = sdata->local; 1720 struct ieee80211_local *local = sdata->local;
1722 struct ieee80211_tx_info *info; 1721 struct ieee80211_tx_info *info;
1723 int ret = NETDEV_TX_BUSY, head_need; 1722 int head_need;
1724 u16 ethertype, hdrlen, meshhdrlen = 0; 1723 u16 ethertype, hdrlen, meshhdrlen = 0;
1725 __le16 fc; 1724 __le16 fc;
1726 struct ieee80211_hdr hdr; 1725 struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1736 u32 info_flags = 0; 1735 u32 info_flags = 0;
1737 u16 info_id = 0; 1736 u16 info_id = 0;
1738 1737
1739 if (unlikely(skb->len < ETH_HLEN)) { 1738 if (unlikely(skb->len < ETH_HLEN))
1740 ret = NETDEV_TX_OK;
1741 goto fail; 1739 goto fail;
1742 }
1743 1740
1744 /* convert Ethernet header to proper 802.11 header (based on 1741 /* convert Ethernet header to proper 802.11 header (based on
1745 * operation mode) */ 1742 * operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1787 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { 1784 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1788 /* Do not send frames with mesh_ttl == 0 */ 1785 /* Do not send frames with mesh_ttl == 0 */
1789 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1786 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1790 ret = NETDEV_TX_OK;
1791 goto fail; 1787 goto fail;
1792 } 1788 }
1793 rcu_read_lock(); 1789 rcu_read_lock();
@@ -1811,37 +1807,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1811 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1807 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1812 sdata, NULL, NULL); 1808 sdata, NULL, NULL);
1813 } else { 1809 } else {
1814 int is_mesh_mcast = 1; 1810 /* DS -> MBSS (802.11-2012 13.11.3.3).
1815 const u8 *mesh_da; 1811 * For unicast with unknown forwarding information,
1812 * destination might be in the MBSS or if that fails
1813 * forwarded to another mesh gate. In either case
1814 * resolution will be handled in ieee80211_xmit(), so
1815 * leave the original DA. This also works for mcast */
1816 const u8 *mesh_da = skb->data;
1817
1818 if (mppath)
1819 mesh_da = mppath->mpp;
1820 else if (mpath)
1821 mesh_da = mpath->dst;
1822 rcu_read_unlock();
1816 1823
1817 if (is_multicast_ether_addr(skb->data))
1818 /* DA TA mSA AE:SA */
1819 mesh_da = skb->data;
1820 else {
1821 static const u8 bcast[ETH_ALEN] =
1822 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1823 if (mppath) {
1824 /* RA TA mDA mSA AE:DA SA */
1825 mesh_da = mppath->mpp;
1826 is_mesh_mcast = 0;
1827 } else if (mpath) {
1828 mesh_da = mpath->dst;
1829 is_mesh_mcast = 0;
1830 } else {
1831 /* DA TA mSA AE:SA */
1832 mesh_da = bcast;
1833 }
1834 }
1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1824 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1836 mesh_da, sdata->vif.addr); 1825 mesh_da, sdata->vif.addr);
1837 rcu_read_unlock(); 1826 if (is_multicast_ether_addr(mesh_da))
1838 if (is_mesh_mcast) 1827 /* DA TA mSA AE:SA */
1839 meshhdrlen = 1828 meshhdrlen =
1840 ieee80211_new_mesh_header(&mesh_hdr, 1829 ieee80211_new_mesh_header(&mesh_hdr,
1841 sdata, 1830 sdata,
1842 skb->data + ETH_ALEN, 1831 skb->data + ETH_ALEN,
1843 NULL); 1832 NULL);
1844 else 1833 else
1834 /* RA TA mDA mSA AE:DA SA */
1845 meshhdrlen = 1835 meshhdrlen =
1846 ieee80211_new_mesh_header(&mesh_hdr, 1836 ieee80211_new_mesh_header(&mesh_hdr,
1847 sdata, 1837 sdata,
@@ -1880,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1880 1870
1881 if (tdls_direct) { 1871 if (tdls_direct) {
1882 /* link during setup - throw out frames to peer */ 1872 /* link during setup - throw out frames to peer */
1883 if (!tdls_auth) { 1873 if (!tdls_auth)
1884 ret = NETDEV_TX_OK;
1885 goto fail; 1874 goto fail;
1886 }
1887 1875
1888 /* DA SA BSSID */ 1876 /* DA SA BSSID */
1889 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1877 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1917,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1917 hdrlen = 24; 1905 hdrlen = 24;
1918 break; 1906 break;
1919 default: 1907 default:
1920 ret = NETDEV_TX_OK;
1921 goto fail; 1908 goto fail;
1922 } 1909 }
1923 1910
@@ -1962,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1962 1949
1963 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 1950 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1964 1951
1965 ret = NETDEV_TX_OK;
1966 goto fail; 1952 goto fail;
1967 } 1953 }
1968 1954
@@ -2017,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2017 skb = skb_clone(skb, GFP_ATOMIC); 2003 skb = skb_clone(skb, GFP_ATOMIC);
2018 kfree_skb(tmp_skb); 2004 kfree_skb(tmp_skb);
2019 2005
2020 if (!skb) { 2006 if (!skb)
2021 ret = NETDEV_TX_OK;
2022 goto fail; 2007 goto fail;
2023 }
2024 } 2008 }
2025 2009
2026 hdr.frame_control = fc; 2010 hdr.frame_control = fc;
@@ -2123,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2123 return NETDEV_TX_OK; 2107 return NETDEV_TX_OK;
2124 2108
2125 fail: 2109 fail:
2126 if (ret == NETDEV_TX_OK) 2110 dev_kfree_skb(skb);
2127 dev_kfree_skb(skb); 2111 return NETDEV_TX_OK;
2128
2129 return ret;
2130} 2112}
2131 2113
2132 2114
@@ -2301,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2301 struct ieee80211_sub_if_data *sdata = NULL; 2283 struct ieee80211_sub_if_data *sdata = NULL;
2302 struct ieee80211_if_ap *ap = NULL; 2284 struct ieee80211_if_ap *ap = NULL;
2303 struct beacon_data *beacon; 2285 struct beacon_data *beacon;
2304 struct ieee80211_supported_band *sband; 2286 enum ieee80211_band band = local->oper_channel->band;
2305 enum ieee80211_band band = local->hw.conf.channel->band;
2306 struct ieee80211_tx_rate_control txrc; 2287 struct ieee80211_tx_rate_control txrc;
2307 2288
2308 sband = local->hw.wiphy->bands[band];
2309
2310 rcu_read_lock(); 2289 rcu_read_lock();
2311 2290
2312 sdata = vif_to_sdata(vif); 2291 sdata = vif_to_sdata(vif);
@@ -2416,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2416 memset(mgmt, 0, hdr_len); 2395 memset(mgmt, 0, hdr_len);
2417 mgmt->frame_control = 2396 mgmt->frame_control =
2418 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2397 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2419 memset(mgmt->da, 0xff, ETH_ALEN); 2398 eth_broadcast_addr(mgmt->da);
2420 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 2399 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2421 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 2400 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2422 mgmt->u.beacon.beacon_int = 2401 mgmt->u.beacon.beacon_int =
@@ -2428,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2428 *pos++ = WLAN_EID_SSID; 2407 *pos++ = WLAN_EID_SSID;
2429 *pos++ = 0x0; 2408 *pos++ = 0x0;
2430 2409
2431 if (ieee80211_add_srates_ie(sdata, skb, true) || 2410 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
2432 mesh_add_ds_params_ie(skb, sdata) || 2411 mesh_add_ds_params_ie(skb, sdata) ||
2433 ieee80211_add_ext_srates_ie(sdata, skb, true) || 2412 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
2434 mesh_add_rsn_ie(skb, sdata) || 2413 mesh_add_rsn_ie(skb, sdata) ||
2435 mesh_add_ht_cap_ie(skb, sdata) || 2414 mesh_add_ht_cap_ie(skb, sdata) ||
2436 mesh_add_ht_oper_ie(skb, sdata) || 2415 mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2453,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2453 2432
2454 memset(&txrc, 0, sizeof(txrc)); 2433 memset(&txrc, 0, sizeof(txrc));
2455 txrc.hw = hw; 2434 txrc.hw = hw;
2456 txrc.sband = sband; 2435 txrc.sband = local->hw.wiphy->bands[band];
2457 txrc.bss_conf = &sdata->vif.bss_conf; 2436 txrc.bss_conf = &sdata->vif.bss_conf;
2458 txrc.skb = skb; 2437 txrc.skb = skb;
2459 txrc.reported_rate.idx = -1; 2438 txrc.reported_rate.idx = -1;
2460 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; 2439 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2461 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) 2440 if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
2462 txrc.max_rate_idx = -1; 2441 txrc.max_rate_idx = -1;
2463 else 2442 else
2464 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; 2443 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2482,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2482 struct ieee80211_vif *vif) 2461 struct ieee80211_vif *vif)
2483{ 2462{
2484 struct ieee80211_if_ap *ap = NULL; 2463 struct ieee80211_if_ap *ap = NULL;
2485 struct sk_buff *presp = NULL, *skb = NULL; 2464 struct sk_buff *skb = NULL;
2465 struct probe_resp *presp = NULL;
2486 struct ieee80211_hdr *hdr; 2466 struct ieee80211_hdr *hdr;
2487 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 2467 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2488 2468
@@ -2496,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
2496 if (!presp) 2476 if (!presp)
2497 goto out; 2477 goto out;
2498 2478
2499 skb = skb_copy(presp, GFP_ATOMIC); 2479 skb = dev_alloc_skb(presp->len);
2500 if (!skb) 2480 if (!skb)
2501 goto out; 2481 goto out;
2502 2482
2483 memcpy(skb_put(skb, presp->len), presp->data, presp->len);
2484
2503 hdr = (struct ieee80211_hdr *) skb->data; 2485 hdr = (struct ieee80211_hdr *) skb->data;
2504 memset(hdr->addr1, 0, sizeof(hdr->addr1)); 2486 memset(hdr->addr1, 0, sizeof(hdr->addr1));
2505 2487
@@ -2610,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2610 memset(hdr, 0, sizeof(*hdr)); 2592 memset(hdr, 0, sizeof(*hdr));
2611 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2593 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2612 IEEE80211_STYPE_PROBE_REQ); 2594 IEEE80211_STYPE_PROBE_REQ);
2613 memset(hdr->addr1, 0xff, ETH_ALEN); 2595 eth_broadcast_addr(hdr->addr1);
2614 memcpy(hdr->addr2, vif->addr, ETH_ALEN); 2596 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2615 memset(hdr->addr3, 0xff, ETH_ALEN); 2597 eth_broadcast_addr(hdr->addr3);
2616 2598
2617 pos = skb_put(skb, ie_ssid_len); 2599 pos = skb_put(skb, ie_ssid_len);
2618 *pos++ = WLAN_EID_SSID; 2600 *pos++ = WLAN_EID_SSID;
@@ -2709,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2709 info = IEEE80211_SKB_CB(skb); 2691 info = IEEE80211_SKB_CB(skb);
2710 2692
2711 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2693 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2712 tx.channel = local->hw.conf.channel; 2694 info->band = local->oper_channel->band;
2713 info->band = tx.channel->band;
2714 2695
2715 if (invoke_tx_handlers(&tx)) 2696 if (invoke_tx_handlers(&tx))
2716 skb = NULL; 2697 skb = NULL;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 39b82fee4904..471fb0516c99 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
276 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 276 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
277 int ac; 277 int ac;
278 278
279 if (!sdata->dev)
280 continue;
281
279 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) 282 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
280 continue; 283 continue;
281 284
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
364 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 367 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
365 int ac; 368 int ac;
366 369
370 if (!sdata->dev)
371 continue;
372
367 for (ac = 0; ac < n_acs; ac++) { 373 for (ac = 0; ac < n_acs; ac++) {
368 if (sdata->vif.hw_queue[ac] == queue || 374 if (sdata->vif.hw_queue[ac] == queue ||
369 sdata->vif.cab_queue == queue) 375 sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
768 elem_parse_failed = true; 774 elem_parse_failed = true;
769 break; 775 break;
770 case WLAN_EID_CHANNEL_SWITCH: 776 case WLAN_EID_CHANNEL_SWITCH:
771 elems->ch_switch_elem = pos; 777 if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
772 elems->ch_switch_elem_len = elen; 778 elem_parse_failed = true;
779 break;
780 }
781 elems->ch_switch_ie = (void *)pos;
773 break; 782 break;
774 case WLAN_EID_QUIET: 783 case WLAN_EID_QUIET:
775 if (!elems->quiet_elem) { 784 if (!elems->quiet_elem) {
@@ -832,7 +841,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
832 841
833 memset(&qparam, 0, sizeof(qparam)); 842 memset(&qparam, 0, sizeof(qparam));
834 843
835 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 844 use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
836 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 845 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
837 846
838 /* 847 /*
@@ -899,7 +908,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
899 drv_conf_tx(local, sdata, ac, &qparam); 908 drv_conf_tx(local, sdata, ac, &qparam);
900 } 909 }
901 910
902 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 911 if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
912 sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
903 sdata->vif.bss_conf.qos = enable_qos; 913 sdata->vif.bss_conf.qos = enable_qos;
904 if (bss_notify) 914 if (bss_notify)
905 ieee80211_bss_info_change_notify(sdata, 915 ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +929,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
919 if ((supp_rates[i] & 0x7f) * 5 > 110) 929 if ((supp_rates[i] & 0x7f) * 5 > 110)
920 have_higher_than_11mbit = 1; 930 have_higher_than_11mbit = 1;
921 931
922 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 932 if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
923 have_higher_than_11mbit) 933 have_higher_than_11mbit)
924 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; 934 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
925 else 935 else
@@ -1100,6 +1110,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1100 1110
1101struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1111struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1102 u8 *dst, u32 ratemask, 1112 u8 *dst, u32 ratemask,
1113 struct ieee80211_channel *chan,
1103 const u8 *ssid, size_t ssid_len, 1114 const u8 *ssid, size_t ssid_len,
1104 const u8 *ie, size_t ie_len, 1115 const u8 *ie, size_t ie_len,
1105 bool directed) 1116 bool directed)
@@ -1109,7 +1120,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1109 struct ieee80211_mgmt *mgmt; 1120 struct ieee80211_mgmt *mgmt;
1110 size_t buf_len; 1121 size_t buf_len;
1111 u8 *buf; 1122 u8 *buf;
1112 u8 chan; 1123 u8 chan_no;
1113 1124
1114 /* FIXME: come up with a proper value */ 1125 /* FIXME: come up with a proper value */
1115 buf = kmalloc(200 + ie_len, GFP_KERNEL); 1126 buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1133,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1122 * badly-behaved APs don't respond when this parameter is included. 1133 * badly-behaved APs don't respond when this parameter is included.
1123 */ 1134 */
1124 if (directed) 1135 if (directed)
1125 chan = 0; 1136 chan_no = 0;
1126 else 1137 else
1127 chan = ieee80211_frequency_to_channel( 1138 chan_no = ieee80211_frequency_to_channel(chan->center_freq);
1128 local->hw.conf.channel->center_freq);
1129 1139
1130 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, 1140 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
1131 local->hw.conf.channel->band, 1141 ratemask, chan_no);
1132 ratemask, chan);
1133 1142
1134 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 1143 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
1135 ssid, ssid_len, 1144 ssid, ssid_len,
@@ -1154,11 +1163,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1154void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1163void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1155 const u8 *ssid, size_t ssid_len, 1164 const u8 *ssid, size_t ssid_len,
1156 const u8 *ie, size_t ie_len, 1165 const u8 *ie, size_t ie_len,
1157 u32 ratemask, bool directed, bool no_cck) 1166 u32 ratemask, bool directed, bool no_cck,
1167 struct ieee80211_channel *channel)
1158{ 1168{
1159 struct sk_buff *skb; 1169 struct sk_buff *skb;
1160 1170
1161 skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len, 1171 skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
1172 ssid, ssid_len,
1162 ie, ie_len, directed); 1173 ie, ie_len, directed);
1163 if (skb) { 1174 if (skb) {
1164 if (no_cck) 1175 if (no_cck)
@@ -1359,7 +1370,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1359 switch (sdata->vif.type) { 1370 switch (sdata->vif.type) {
1360 case NL80211_IFTYPE_STATION: 1371 case NL80211_IFTYPE_STATION:
1361 changed |= BSS_CHANGED_ASSOC | 1372 changed |= BSS_CHANGED_ASSOC |
1362 BSS_CHANGED_ARP_FILTER; 1373 BSS_CHANGED_ARP_FILTER |
1374 BSS_CHANGED_PS;
1363 mutex_lock(&sdata->u.mgd.mtx); 1375 mutex_lock(&sdata->u.mgd.mtx);
1364 ieee80211_bss_info_change_notify(sdata, changed); 1376 ieee80211_bss_info_change_notify(sdata, changed);
1365 mutex_unlock(&sdata->u.mgd.mtx); 1377 mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1397,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1385 case NL80211_IFTYPE_MONITOR: 1397 case NL80211_IFTYPE_MONITOR:
1386 /* ignore virtual */ 1398 /* ignore virtual */
1387 break; 1399 break;
1400 case NL80211_IFTYPE_P2P_DEVICE:
1401 changed = BSS_CHANGED_IDLE;
1402 break;
1388 case NL80211_IFTYPE_UNSPECIFIED: 1403 case NL80211_IFTYPE_UNSPECIFIED:
1389 case NUM_NL80211_IFTYPES: 1404 case NUM_NL80211_IFTYPES:
1390 case NL80211_IFTYPE_P2P_CLIENT: 1405 case NL80211_IFTYPE_P2P_CLIENT:
@@ -1571,6 +1586,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
1571 list_for_each_entry(sdata, &local->interfaces, list) { 1586 list_for_each_entry(sdata, &local->interfaces, list) {
1572 if (!ieee80211_sdata_running(sdata)) 1587 if (!ieee80211_sdata_running(sdata))
1573 continue; 1588 continue;
1589 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
1590 continue;
1574 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1591 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1575 goto set; 1592 goto set;
1576 1593
@@ -1809,7 +1826,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1809} 1826}
1810 1827
1811int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, 1828int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1812 struct sk_buff *skb, bool need_basic) 1829 struct sk_buff *skb, bool need_basic,
1830 enum ieee80211_band band)
1813{ 1831{
1814 struct ieee80211_local *local = sdata->local; 1832 struct ieee80211_local *local = sdata->local;
1815 struct ieee80211_supported_band *sband; 1833 struct ieee80211_supported_band *sband;
@@ -1817,7 +1835,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1817 u8 i, rates, *pos; 1835 u8 i, rates, *pos;
1818 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 1836 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1819 1837
1820 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1838 sband = local->hw.wiphy->bands[band];
1821 rates = sband->n_bitrates; 1839 rates = sband->n_bitrates;
1822 if (rates > 8) 1840 if (rates > 8)
1823 rates = 8; 1841 rates = 8;
@@ -1840,7 +1858,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
1840} 1858}
1841 1859
1842int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, 1860int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1843 struct sk_buff *skb, bool need_basic) 1861 struct sk_buff *skb, bool need_basic,
1862 enum ieee80211_band band)
1844{ 1863{
1845 struct ieee80211_local *local = sdata->local; 1864 struct ieee80211_local *local = sdata->local;
1846 struct ieee80211_supported_band *sband; 1865 struct ieee80211_supported_band *sband;
@@ -1848,7 +1867,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1848 u8 i, exrates, *pos; 1867 u8 i, exrates, *pos;
1849 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 1868 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
1850 1869
1851 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1870 sband = local->hw.wiphy->bands[band];
1852 exrates = sband->n_bitrates; 1871 exrates = sband->n_bitrates;
1853 if (exrates > 8) 1872 if (exrates > 8)
1854 exrates -= 8; 1873 exrates -= 8;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 3c601378d27e..767cc12da0fe 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1171 goto out_err; 1171 goto out_err;
1172 } 1172 }
1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 1173 svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
1174 if (!svc->stats.cpustats) 1174 if (!svc->stats.cpustats) {
1175 ret = -ENOMEM;
1175 goto out_err; 1176 goto out_err;
1177 }
1176 1178
1177 /* I'm the first user of the service */ 1179 /* I'm the first user of the service */
1178 atomic_set(&svc->usecnt, 0); 1180 atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c9bb994ae9ba..dcb27910ab3c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -255,12 +255,15 @@ static void death_by_event(unsigned long ul_conntrack)
255{ 255{
256 struct nf_conn *ct = (void *)ul_conntrack; 256 struct nf_conn *ct = (void *)ul_conntrack;
257 struct net *net = nf_ct_net(ct); 257 struct net *net = nf_ct_net(ct);
258 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
259
260 BUG_ON(ecache == NULL);
258 261
259 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { 262 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
260 /* bad luck, let's retry again */ 263 /* bad luck, let's retry again */
261 ct->timeout.expires = jiffies + 264 ecache->timeout.expires = jiffies +
262 (random32() % net->ct.sysctl_events_retry_timeout); 265 (random32() % net->ct.sysctl_events_retry_timeout);
263 add_timer(&ct->timeout); 266 add_timer(&ecache->timeout);
264 return; 267 return;
265 } 268 }
266 /* we've got the event delivered, now it's dying */ 269 /* we've got the event delivered, now it's dying */
@@ -274,6 +277,9 @@ static void death_by_event(unsigned long ul_conntrack)
274void nf_ct_insert_dying_list(struct nf_conn *ct) 277void nf_ct_insert_dying_list(struct nf_conn *ct)
275{ 278{
276 struct net *net = nf_ct_net(ct); 279 struct net *net = nf_ct_net(ct);
280 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
281
282 BUG_ON(ecache == NULL);
277 283
278 /* add this conntrack to the dying list */ 284 /* add this conntrack to the dying list */
279 spin_lock_bh(&nf_conntrack_lock); 285 spin_lock_bh(&nf_conntrack_lock);
@@ -281,10 +287,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
281 &net->ct.dying); 287 &net->ct.dying);
282 spin_unlock_bh(&nf_conntrack_lock); 288 spin_unlock_bh(&nf_conntrack_lock);
283 /* set a new timer to retry event delivery */ 289 /* set a new timer to retry event delivery */
284 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); 290 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
285 ct->timeout.expires = jiffies + 291 ecache->timeout.expires = jiffies +
286 (random32() % net->ct.sysctl_events_retry_timeout); 292 (random32() % net->ct.sysctl_events_retry_timeout);
287 add_timer(&ct->timeout); 293 add_timer(&ecache->timeout);
288} 294}
289EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 295EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
290 296
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 966f5133a384..a205bd6ce294 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2813,7 +2813,8 @@ static int __init ctnetlink_init(void)
2813 goto err_unreg_subsys; 2813 goto err_unreg_subsys;
2814 } 2814 }
2815 2815
2816 if (register_pernet_subsys(&ctnetlink_net_ops)) { 2816 ret = register_pernet_subsys(&ctnetlink_net_ops);
2817 if (ret < 0) {
2817 pr_err("ctnetlink_init: cannot register pernet operations\n"); 2818 pr_err("ctnetlink_init: cannot register pernet operations\n");
2818 goto err_unreg_exp_subsys; 2819 goto err_unreg_exp_subsys;
2819 } 2820 }
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index f4db3a7bd285..16303c752213 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -542,7 +542,10 @@ static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
542 ret = nf_ct_expect_related(rtcp_exp); 542 ret = nf_ct_expect_related(rtcp_exp);
543 if (ret == 0) 543 if (ret == 0)
544 break; 544 break;
545 else if (ret != -EBUSY) { 545 else if (ret == -EBUSY) {
546 nf_ct_unexpect_related(rtp_exp);
547 continue;
548 } else if (ret < 0) {
546 nf_ct_unexpect_related(rtp_exp); 549 nf_ct_unexpect_related(rtp_exp);
547 port = 0; 550 port = 0;
548 break; 551 break;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..be194b144297 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -55,6 +55,7 @@ struct nfulnl_instance {
55 unsigned int qlen; /* number of nlmsgs in skb */ 55 unsigned int qlen; /* number of nlmsgs in skb */
56 struct sk_buff *skb; /* pre-allocatd skb */ 56 struct sk_buff *skb; /* pre-allocatd skb */
57 struct timer_list timer; 57 struct timer_list timer;
58 struct user_namespace *peer_user_ns; /* User namespace of the peer process */
58 int peer_pid; /* PID of the peer process */ 59 int peer_pid; /* PID of the peer process */
59 60
60 /* configurable parameters */ 61 /* configurable parameters */
@@ -132,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
132static void nfulnl_timer(unsigned long data); 133static void nfulnl_timer(unsigned long data);
133 134
134static struct nfulnl_instance * 135static struct nfulnl_instance *
135instance_create(u_int16_t group_num, int pid) 136instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
136{ 137{
137 struct nfulnl_instance *inst; 138 struct nfulnl_instance *inst;
138 int err; 139 int err;
@@ -162,6 +163,7 @@ instance_create(u_int16_t group_num, int pid)
162 163
163 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); 164 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
164 165
166 inst->peer_user_ns = user_ns;
165 inst->peer_pid = pid; 167 inst->peer_pid = pid;
166 inst->group_num = group_num; 168 inst->group_num = group_num;
167 169
@@ -480,7 +482,7 @@ __build_packet_message(struct nfulnl_instance *inst,
480 } 482 }
481 483
482 if (indev && skb_mac_header_was_set(skb)) { 484 if (indev && skb_mac_header_was_set(skb)) {
483 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 485 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
484 nla_put_be16(inst->skb, NFULA_HWLEN, 486 nla_put_be16(inst->skb, NFULA_HWLEN,
485 htons(skb->dev->hard_header_len)) || 487 htons(skb->dev->hard_header_len)) ||
486 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 488 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -503,8 +505,11 @@ __build_packet_message(struct nfulnl_instance *inst,
503 read_lock_bh(&skb->sk->sk_callback_lock); 505 read_lock_bh(&skb->sk->sk_callback_lock);
504 if (skb->sk->sk_socket && skb->sk->sk_socket->file) { 506 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
505 struct file *file = skb->sk->sk_socket->file; 507 struct file *file = skb->sk->sk_socket->file;
506 __be32 uid = htonl(file->f_cred->fsuid); 508 __be32 uid = htonl(from_kuid_munged(inst->peer_user_ns,
507 __be32 gid = htonl(file->f_cred->fsgid); 509 file->f_cred->fsuid));
510 __be32 gid = htonl(from_kgid_munged(inst->peer_user_ns,
511 file->f_cred->fsgid));
512 /* need to unlock here since NLA_PUT may goto */
508 read_unlock_bh(&skb->sk->sk_callback_lock); 513 read_unlock_bh(&skb->sk->sk_callback_lock);
509 if (nla_put_be32(inst->skb, NFULA_UID, uid) || 514 if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
510 nla_put_be32(inst->skb, NFULA_GID, gid)) 515 nla_put_be32(inst->skb, NFULA_GID, gid))
@@ -783,7 +788,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
783 } 788 }
784 789
785 inst = instance_create(group_num, 790 inst = instance_create(group_num,
786 NETLINK_CB(skb).pid); 791 NETLINK_CB(skb).pid,
792 sk_user_ns(NETLINK_CB(skb).ssk));
787 if (IS_ERR(inst)) { 793 if (IS_ERR(inst)) {
788 ret = PTR_ERR(inst); 794 ret = PTR_ERR(inst);
789 goto out; 795 goto out;
@@ -996,8 +1002,10 @@ static int __init nfnetlink_log_init(void)
996 1002
997#ifdef CONFIG_PROC_FS 1003#ifdef CONFIG_PROC_FS
998 if (!proc_create("nfnetlink_log", 0440, 1004 if (!proc_create("nfnetlink_log", 0440,
999 proc_net_netfilter, &nful_file_ops)) 1005 proc_net_netfilter, &nful_file_ops)) {
1006 status = -ENOMEM;
1000 goto cleanup_logger; 1007 goto cleanup_logger;
1008 }
1001#endif 1009#endif
1002 return status; 1010 return status;
1003 1011
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index ff5f75fddb15..02a2bf49dcbd 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -363,10 +363,12 @@ static void dump_ipv4_packet(struct sbuff *m,
363 /* Max length: 15 "UID=4294967295 " */ 363 /* Max length: 15 "UID=4294967295 " */
364 if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) { 364 if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
365 read_lock_bh(&skb->sk->sk_callback_lock); 365 read_lock_bh(&skb->sk->sk_callback_lock);
366 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 366 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
367 const struct cred *cred = skb->sk->sk_socket->file->f_cred;
367 sb_add(m, "UID=%u GID=%u ", 368 sb_add(m, "UID=%u GID=%u ",
368 skb->sk->sk_socket->file->f_cred->fsuid, 369 from_kuid_munged(&init_user_ns, cred->fsuid),
369 skb->sk->sk_socket->file->f_cred->fsgid); 370 from_kgid_munged(&init_user_ns, cred->fsgid));
371 }
370 read_unlock_bh(&skb->sk->sk_callback_lock); 372 read_unlock_bh(&skb->sk->sk_callback_lock);
371 } 373 }
372 374
@@ -719,10 +721,12 @@ static void dump_ipv6_packet(struct sbuff *m,
719 /* Max length: 15 "UID=4294967295 " */ 721 /* Max length: 15 "UID=4294967295 " */
720 if ((logflags & XT_LOG_UID) && recurse && skb->sk) { 722 if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
721 read_lock_bh(&skb->sk->sk_callback_lock); 723 read_lock_bh(&skb->sk->sk_callback_lock);
722 if (skb->sk->sk_socket && skb->sk->sk_socket->file) 724 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
725 const struct cred *cred = skb->sk->sk_socket->file->f_cred;
723 sb_add(m, "UID=%u GID=%u ", 726 sb_add(m, "UID=%u GID=%u ",
724 skb->sk->sk_socket->file->f_cred->fsuid, 727 from_kuid_munged(&init_user_ns, cred->fsuid),
725 skb->sk->sk_socket->file->f_cred->fsgid); 728 from_kgid_munged(&init_user_ns, cred->fsgid));
729 }
726 read_unlock_bh(&skb->sk->sk_callback_lock); 730 read_unlock_bh(&skb->sk->sk_callback_lock);
727 } 731 }
728 732
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index 772d7389b337..ca2e577ed8ac 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -17,6 +17,17 @@
17#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
18#include <linux/netfilter/xt_owner.h> 18#include <linux/netfilter/xt_owner.h>
19 19
20static int owner_check(const struct xt_mtchk_param *par)
21{
22 struct xt_owner_match_info *info = par->matchinfo;
23
24 /* For now only allow adding matches from the initial user namespace */
25 if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
26 (current_user_ns() != &init_user_ns))
27 return -EINVAL;
28 return 0;
29}
30
20static bool 31static bool
21owner_mt(const struct sk_buff *skb, struct xt_action_param *par) 32owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
22{ 33{
@@ -37,17 +48,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
37 return ((info->match ^ info->invert) & 48 return ((info->match ^ info->invert) &
38 (XT_OWNER_UID | XT_OWNER_GID)) == 0; 49 (XT_OWNER_UID | XT_OWNER_GID)) == 0;
39 50
40 if (info->match & XT_OWNER_UID) 51 if (info->match & XT_OWNER_UID) {
41 if ((filp->f_cred->fsuid >= info->uid_min && 52 kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
42 filp->f_cred->fsuid <= info->uid_max) ^ 53 kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
54 if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
55 uid_lte(filp->f_cred->fsuid, uid_max)) ^
43 !(info->invert & XT_OWNER_UID)) 56 !(info->invert & XT_OWNER_UID))
44 return false; 57 return false;
58 }
45 59
46 if (info->match & XT_OWNER_GID) 60 if (info->match & XT_OWNER_GID) {
47 if ((filp->f_cred->fsgid >= info->gid_min && 61 kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
48 filp->f_cred->fsgid <= info->gid_max) ^ 62 kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
63 if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
64 gid_lte(filp->f_cred->fsgid, gid_max)) ^
49 !(info->invert & XT_OWNER_GID)) 65 !(info->invert & XT_OWNER_GID))
50 return false; 66 return false;
67 }
51 68
52 return true; 69 return true;
53} 70}
@@ -56,6 +73,7 @@ static struct xt_match owner_mt_reg __read_mostly = {
56 .name = "owner", 73 .name = "owner",
57 .revision = 1, 74 .revision = 1,
58 .family = NFPROTO_UNSPEC, 75 .family = NFPROTO_UNSPEC,
76 .checkentry = owner_check,
59 .match = owner_mt, 77 .match = owner_mt,
60 .matchsize = sizeof(struct xt_owner_match_info), 78 .matchsize = sizeof(struct xt_owner_match_info),
61 .hooks = (1 << NF_INET_LOCAL_OUT) | 79 .hooks = (1 << NF_INET_LOCAL_OUT) |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index ae2ad1eec8d0..4635c9b00459 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -317,6 +317,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
317 struct recent_table *t; 317 struct recent_table *t;
318#ifdef CONFIG_PROC_FS 318#ifdef CONFIG_PROC_FS
319 struct proc_dir_entry *pde; 319 struct proc_dir_entry *pde;
320 kuid_t uid;
321 kgid_t gid;
320#endif 322#endif
321 unsigned int i; 323 unsigned int i;
322 int ret = -EINVAL; 324 int ret = -EINVAL;
@@ -372,6 +374,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
372 for (i = 0; i < ip_list_hash_size; i++) 374 for (i = 0; i < ip_list_hash_size; i++)
373 INIT_LIST_HEAD(&t->iphash[i]); 375 INIT_LIST_HEAD(&t->iphash[i]);
374#ifdef CONFIG_PROC_FS 376#ifdef CONFIG_PROC_FS
377 uid = make_kuid(&init_user_ns, ip_list_uid);
378 gid = make_kgid(&init_user_ns, ip_list_gid);
379 if (!uid_valid(uid) || !gid_valid(gid)) {
380 kfree(t);
381 ret = -EINVAL;
382 goto out;
383 }
375 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, 384 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
376 &recent_mt_fops, t); 385 &recent_mt_fops, t);
377 if (pde == NULL) { 386 if (pde == NULL) {
@@ -379,8 +388,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
379 ret = -ENOMEM; 388 ret = -ENOMEM;
380 goto out; 389 goto out;
381 } 390 }
382 pde->uid = ip_list_uid; 391 pde->uid = uid;
383 pde->gid = ip_list_gid; 392 pde->gid = gid;
384#endif 393#endif
385 spin_lock_bh(&recent_lock); 394 spin_lock_bh(&recent_lock);
386 list_add_tail(&t->list, &recent_net->tables); 395 list_add_tail(&t->list, &recent_net->tables);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533ed..382119917166 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -912,7 +912,8 @@ static void netlink_rcv_wake(struct sock *sk)
912 wake_up_interruptible(&nlk->wait); 912 wake_up_interruptible(&nlk->wait);
913} 913}
914 914
915static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) 915static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
916 struct sock *ssk)
916{ 917{
917 int ret; 918 int ret;
918 struct netlink_sock *nlk = nlk_sk(sk); 919 struct netlink_sock *nlk = nlk_sk(sk);
@@ -921,6 +922,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
921 if (nlk->netlink_rcv != NULL) { 922 if (nlk->netlink_rcv != NULL) {
922 ret = skb->len; 923 ret = skb->len;
923 skb_set_owner_r(skb, sk); 924 skb_set_owner_r(skb, sk);
925 NETLINK_CB(skb).ssk = ssk;
924 nlk->netlink_rcv(skb); 926 nlk->netlink_rcv(skb);
925 consume_skb(skb); 927 consume_skb(skb);
926 } else { 928 } else {
@@ -947,7 +949,7 @@ retry:
947 return PTR_ERR(sk); 949 return PTR_ERR(sk);
948 } 950 }
949 if (netlink_is_kernel(sk)) 951 if (netlink_is_kernel(sk))
950 return netlink_unicast_kernel(sk, skb); 952 return netlink_unicast_kernel(sk, skb, ssk);
951 953
952 if (sk_filter(sk, skb)) { 954 if (sk_filter(sk, skb)) {
953 err = skb->len; 955 err = skb->len;
@@ -1373,7 +1375,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1373 dst_pid = addr->nl_pid; 1375 dst_pid = addr->nl_pid;
1374 dst_group = ffs(addr->nl_groups); 1376 dst_group = ffs(addr->nl_groups);
1375 err = -EPERM; 1377 err = -EPERM;
1376 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1378 if ((dst_group || dst_pid) &&
1379 !netlink_capable(sock, NL_NONROOT_SEND))
1377 goto out; 1380 goto out;
1378 } else { 1381 } else {
1379 dst_pid = nlk->dst_pid; 1382 dst_pid = nlk->dst_pid;
@@ -2147,6 +2150,7 @@ static void __init netlink_add_usersock_entry(void)
2147 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2150 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2148 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2151 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2149 nl_table[NETLINK_USERSOCK].registered = 1; 2152 nl_table[NETLINK_USERSOCK].registered = 1;
2153 nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
2150 2154
2151 netlink_table_ungrab(); 2155 netlink_table_ungrab();
2152} 2156}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b7f38b161909..c7bf2f26525a 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -427,19 +427,11 @@ void ovs_flow_deferred_free(struct sw_flow *flow)
427 call_rcu(&flow->rcu, rcu_free_flow_callback); 427 call_rcu(&flow->rcu, rcu_free_flow_callback);
428} 428}
429 429
430/* RCU callback used by ovs_flow_deferred_free_acts. */
431static void rcu_free_acts_callback(struct rcu_head *rcu)
432{
433 struct sw_flow_actions *sf_acts = container_of(rcu,
434 struct sw_flow_actions, rcu);
435 kfree(sf_acts);
436}
437
438/* Schedules 'sf_acts' to be freed after the next RCU grace period. 430/* Schedules 'sf_acts' to be freed after the next RCU grace period.
439 * The caller must hold rcu_read_lock for this to be sensible. */ 431 * The caller must hold rcu_read_lock for this to be sensible. */
440void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) 432void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
441{ 433{
442 call_rcu(&sf_acts->rcu, rcu_free_acts_callback); 434 kfree_rcu(sf_acts, rcu);
443} 435}
444 436
445static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) 437static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f220c5bdb71f..94060edbbd70 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1162,7 +1162,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1162 spin_unlock(&f->lock); 1162 spin_unlock(&f->lock);
1163} 1163}
1164 1164
1165bool match_fanout_group(struct packet_type *ptype, struct sock * sk) 1165static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1166{ 1166{
1167 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) 1167 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1168 return true; 1168 return true;
@@ -3749,7 +3749,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
3749 po->ifindex, 3749 po->ifindex,
3750 po->running, 3750 po->running,
3751 atomic_read(&s->sk_rmem_alloc), 3751 atomic_read(&s->sk_rmem_alloc),
3752 sock_i_uid(s), 3752 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
3753 sock_i_ino(s)); 3753 sock_i_ino(s));
3754 } 3754 }
3755 3755
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 0acc943f713a..b7e982782255 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -612,7 +612,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
612 sk->sk_protocol, pn->sobject, pn->dobject, 612 sk->sk_protocol, pn->sobject, pn->dobject,
613 pn->resource, sk->sk_state, 613 pn->resource, sk->sk_state,
614 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 614 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
615 sock_i_uid(sk), sock_i_ino(sk), 615 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
616 sock_i_ino(sk),
616 atomic_read(&sk->sk_refcnt), sk, 617 atomic_read(&sk->sk_refcnt), sk,
617 atomic_read(&sk->sk_drops), &len); 618 atomic_read(&sk->sk_drops), &len);
618 } 619 }
@@ -796,7 +797,8 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
796 struct sock *sk = *psk; 797 struct sock *sk = *psk;
797 798
798 seq_printf(seq, "%02X %5d %lu%n", 799 seq_printf(seq, "%02X %5d %lu%n",
799 (int) (psk - pnres.sk), sock_i_uid(sk), 800 (int) (psk - pnres.sk),
801 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
800 sock_i_ino(sk), &len); 802 sock_i_ino(sk), &len);
801 } 803 }
802 seq_printf(seq, "%*s\n", 63 - len, ""); 804 seq_printf(seq, "%*s\n", 63 - len, "");
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 752b72360ebc..c275bad12068 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
150 rfkill_led_trigger_event(rfkill); 150 rfkill_led_trigger_event(rfkill);
151} 151}
152 152
153const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154{
155 return rfkill->led_trigger.name;
156}
157EXPORT_SYMBOL(rfkill_get_led_trigger_name);
158
159void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160{
161 BUG_ON(!rfkill);
162
163 rfkill->ledtrigname = name;
164}
165EXPORT_SYMBOL(rfkill_set_led_trigger_name);
166
153static int rfkill_led_trigger_register(struct rfkill *rfkill) 167static int rfkill_led_trigger_register(struct rfkill *rfkill)
154{ 168{
155 rfkill->led_trigger.name = rfkill->ledtrigname 169 rfkill->led_trigger.name = rfkill->ledtrigname
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 6dd1131f2ec1..dc3ef5aef355 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -319,7 +319,7 @@ replay:
319 } 319 }
320 } 320 }
321 321
322 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); 322 err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh);
323 if (err == 0) { 323 if (err == 0) {
324 if (tp_created) { 324 if (tp_created) {
325 spin_lock_bh(root_lock); 325 spin_lock_bh(root_lock);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 590960a22a77..344a11b342e5 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -162,7 +162,8 @@ errout:
162 return err; 162 return err;
163} 163}
164 164
165static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, 165static int basic_change(struct sk_buff *in_skb,
166 struct tcf_proto *tp, unsigned long base, u32 handle,
166 struct nlattr **tca, unsigned long *arg) 167 struct nlattr **tca, unsigned long *arg)
167{ 168{
168 int err; 169 int err;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7743ea8d1d38..91de66695b4a 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -151,7 +151,8 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
151 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 151 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
152}; 152};
153 153
154static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, 154static int cls_cgroup_change(struct sk_buff *in_skb,
155 struct tcf_proto *tp, unsigned long base,
155 u32 handle, struct nlattr **tca, 156 u32 handle, struct nlattr **tca,
156 unsigned long *arg) 157 unsigned long *arg)
157{ 158{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index ccd08c8dc6a7..ce82d0cb1b47 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -193,15 +193,19 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb)
193 193
194static u32 flow_get_skuid(const struct sk_buff *skb) 194static u32 flow_get_skuid(const struct sk_buff *skb)
195{ 195{
196 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 196 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
197 return skb->sk->sk_socket->file->f_cred->fsuid; 197 kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid;
198 return from_kuid(&init_user_ns, skuid);
199 }
198 return 0; 200 return 0;
199} 201}
200 202
201static u32 flow_get_skgid(const struct sk_buff *skb) 203static u32 flow_get_skgid(const struct sk_buff *skb)
202{ 204{
203 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) 205 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
204 return skb->sk->sk_socket->file->f_cred->fsgid; 206 kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid;
207 return from_kgid(&init_user_ns, skgid);
208 }
205 return 0; 209 return 0;
206} 210}
207 211
@@ -347,7 +351,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
347 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 351 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
348}; 352};
349 353
350static int flow_change(struct tcf_proto *tp, unsigned long base, 354static int flow_change(struct sk_buff *in_skb,
355 struct tcf_proto *tp, unsigned long base,
351 u32 handle, struct nlattr **tca, 356 u32 handle, struct nlattr **tca,
352 unsigned long *arg) 357 unsigned long *arg)
353{ 358{
@@ -386,6 +391,10 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
386 391
387 if (fls(keymask) - 1 > FLOW_KEY_MAX) 392 if (fls(keymask) - 1 > FLOW_KEY_MAX)
388 return -EOPNOTSUPP; 393 return -EOPNOTSUPP;
394
395 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
396 sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns)
397 return -EOPNOTSUPP;
389 } 398 }
390 399
391 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map); 400 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 8384a4797240..4075a0aef2aa 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -233,7 +233,8 @@ errout:
233 return err; 233 return err;
234} 234}
235 235
236static int fw_change(struct tcf_proto *tp, unsigned long base, 236static int fw_change(struct sk_buff *in_skb,
237 struct tcf_proto *tp, unsigned long base,
237 u32 handle, 238 u32 handle,
238 struct nlattr **tca, 239 struct nlattr **tca,
239 unsigned long *arg) 240 unsigned long *arg)
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 44f405cb9aaf..c10d57bf98f2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -427,7 +427,8 @@ errout:
427 return err; 427 return err;
428} 428}
429 429
430static int route4_change(struct tcf_proto *tp, unsigned long base, 430static int route4_change(struct sk_buff *in_skb,
431 struct tcf_proto *tp, unsigned long base,
431 u32 handle, 432 u32 handle,
432 struct nlattr **tca, 433 struct nlattr **tca,
433 unsigned long *arg) 434 unsigned long *arg)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 18ab93ec8d7e..494bbb90924a 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -416,7 +416,8 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) }, 416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
417}; 417};
418 418
419static int rsvp_change(struct tcf_proto *tp, unsigned long base, 419static int rsvp_change(struct sk_buff *in_skb,
420 struct tcf_proto *tp, unsigned long base,
420 u32 handle, 421 u32 handle,
421 struct nlattr **tca, 422 struct nlattr **tca,
422 unsigned long *arg) 423 unsigned long *arg)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index fe29420d0b0e..a1293b4ab7a1 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -332,7 +332,8 @@ errout:
332} 332}
333 333
334static int 334static int
335tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, 335tcindex_change(struct sk_buff *in_skb,
336 struct tcf_proto *tp, unsigned long base, u32 handle,
336 struct nlattr **tca, unsigned long *arg) 337 struct nlattr **tca, unsigned long *arg)
337{ 338{
338 struct nlattr *opt = tca[TCA_OPTIONS]; 339 struct nlattr *opt = tca[TCA_OPTIONS];
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d45373fb00b9..c7c27bc91b5a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -544,7 +544,8 @@ errout:
544 return err; 544 return err;
545} 545}
546 546
547static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, 547static int u32_change(struct sk_buff *in_skb,
548 struct tcf_proto *tp, unsigned long base, u32 handle,
548 struct nlattr **tca, 549 struct nlattr **tca,
549 unsigned long *arg) 550 unsigned long *arg)
550{ 551{
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d9cb2ab149fe..c3bea269faf4 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -220,7 +220,8 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
220 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 220 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
221 sctp_sk(sk)->type, sk->sk_state, hash, 221 sctp_sk(sk)->type, sk->sk_state, hash,
222 epb->bind_addr.port, 222 epb->bind_addr.port,
223 sock_i_uid(sk), sock_i_ino(sk)); 223 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
224 sock_i_ino(sk));
224 225
225 sctp_seq_dump_local_addrs(seq, epb); 226 sctp_seq_dump_local_addrs(seq, epb);
226 seq_printf(seq, "\n"); 227 seq_printf(seq, "\n");
@@ -332,7 +333,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
332 assoc->assoc_id, 333 assoc->assoc_id,
333 assoc->sndbuf_used, 334 assoc->sndbuf_used,
334 atomic_read(&assoc->rmem_alloc), 335 atomic_read(&assoc->rmem_alloc),
335 sock_i_uid(sk), sock_i_ino(sk), 336 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
337 sock_i_ino(sk),
336 epb->bind_addr.port, 338 epb->bind_addr.port,
337 assoc->peer.port); 339 assoc->peer.port);
338 seq_printf(seq, " "); 340 seq_printf(seq, " ");
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c5ee4ff61364..8a84ab64cafd 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2060,10 +2060,14 @@ static int unix_shutdown(struct socket *sock, int mode)
2060 struct sock *sk = sock->sk; 2060 struct sock *sk = sock->sk;
2061 struct sock *other; 2061 struct sock *other;
2062 2062
2063 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); 2063 if (mode < SHUT_RD || mode > SHUT_RDWR)
2064 2064 return -EINVAL;
2065 if (!mode) 2065 /* This maps:
2066 return 0; 2066 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2067 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2068 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2069 */
2070 ++mode;
2067 2071
2068 unix_state_lock(sk); 2072 unix_state_lock(sk);
2069 sk->sk_shutdown |= mode; 2073 sk->sk_shutdown |= mode;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d355f67d0cdd..2f876b9ee344 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
105 105
106 ASSERT_WDEV_LOCK(wdev); 106 ASSERT_WDEV_LOCK(wdev);
107 107
108 if (!netif_running(wdev->netdev)) 108 if (wdev->netdev && !netif_running(wdev->netdev))
109 return; 109 return;
110 110
111 switch (wdev->iftype) { 111 switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
143 case NL80211_IFTYPE_WDS: 143 case NL80211_IFTYPE_WDS:
144 /* these interface types don't really have a channel */ 144 /* these interface types don't really have a channel */
145 return; 145 return;
146 case NL80211_IFTYPE_P2P_DEVICE:
147 if (wdev->wiphy->features &
148 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
149 *chanmode = CHAN_MODE_EXCLUSIVE;
150 return;
146 case NL80211_IFTYPE_UNSPECIFIED: 151 case NL80211_IFTYPE_UNSPECIFIED:
147 case NUM_NL80211_IFTYPES: 152 case NUM_NL80211_IFTYPES:
148 WARN_ON(1); 153 WARN_ON(1);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index dcd64d5b07aa..443d4d7deea2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
230 rtnl_lock(); 230 rtnl_lock();
231 mutex_lock(&rdev->devlist_mtx); 231 mutex_lock(&rdev->devlist_mtx);
232 232
233 list_for_each_entry(wdev, &rdev->wdev_list, list) 233 list_for_each_entry(wdev, &rdev->wdev_list, list) {
234 if (wdev->netdev) 234 if (wdev->netdev) {
235 dev_close(wdev->netdev); 235 dev_close(wdev->netdev);
236 continue;
237 }
238 /* otherwise, check iftype */
239 switch (wdev->iftype) {
240 case NL80211_IFTYPE_P2P_DEVICE:
241 if (!wdev->p2p_started)
242 break;
243 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
244 wdev->p2p_started = false;
245 rdev->opencount--;
246 break;
247 default:
248 break;
249 }
250 }
236 251
237 mutex_unlock(&rdev->devlist_mtx); 252 mutex_unlock(&rdev->devlist_mtx);
238 rtnl_unlock(); 253 rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
407 if (WARN_ON(wiphy->software_iftypes & types)) 422 if (WARN_ON(wiphy->software_iftypes & types))
408 return -EINVAL; 423 return -EINVAL;
409 424
425 /* Only a single P2P_DEVICE can be allowed */
426 if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
427 c->limits[j].max > 1))
428 return -EINVAL;
429
410 cnt += c->limits[j].max; 430 cnt += c->limits[j].max;
411 /* 431 /*
412 * Don't advertise an unsupported type 432 * Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
734 dev_put(wdev->netdev); 754 dev_put(wdev->netdev);
735} 755}
736 756
757void cfg80211_unregister_wdev(struct wireless_dev *wdev)
758{
759 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
760
761 ASSERT_RTNL();
762
763 if (WARN_ON(wdev->netdev))
764 return;
765
766 mutex_lock(&rdev->devlist_mtx);
767 list_del_rcu(&wdev->list);
768 rdev->devlist_generation++;
769
770 switch (wdev->iftype) {
771 case NL80211_IFTYPE_P2P_DEVICE:
772 if (!wdev->p2p_started)
773 break;
774 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
775 wdev->p2p_started = false;
776 rdev->opencount--;
777 break;
778 default:
779 WARN_ON_ONCE(1);
780 break;
781 }
782 mutex_unlock(&rdev->devlist_mtx);
783}
784EXPORT_SYMBOL(cfg80211_unregister_wdev);
785
737static struct device_type wiphy_type = { 786static struct device_type wiphy_type = {
738 .name = "wlan", 787 .name = "wlan",
739}; 788};
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1cdb1d5e6b0f..8fd0242ee169 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -736,7 +736,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
736 const u8 *buf, size_t len, bool no_cck, 736 const u8 *buf, size_t len, bool no_cck,
737 bool dont_wait_for_ack, u64 *cookie) 737 bool dont_wait_for_ack, u64 *cookie)
738{ 738{
739 struct net_device *dev = wdev->netdev;
740 const struct ieee80211_mgmt *mgmt; 739 const struct ieee80211_mgmt *mgmt;
741 u16 stype; 740 u16 stype;
742 741
@@ -796,7 +795,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
796 case NL80211_IFTYPE_AP: 795 case NL80211_IFTYPE_AP:
797 case NL80211_IFTYPE_P2P_GO: 796 case NL80211_IFTYPE_P2P_GO:
798 case NL80211_IFTYPE_AP_VLAN: 797 case NL80211_IFTYPE_AP_VLAN:
799 if (!ether_addr_equal(mgmt->bssid, dev->dev_addr)) 798 if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
800 err = -EINVAL; 799 err = -EINVAL;
801 break; 800 break;
802 case NL80211_IFTYPE_MESH_POINT: 801 case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +808,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
809 * cfg80211 doesn't track the stations 808 * cfg80211 doesn't track the stations
810 */ 809 */
811 break; 810 break;
811 case NL80211_IFTYPE_P2P_DEVICE:
812 /*
813 * fall through, P2P device only supports
814 * public action frames
815 */
812 default: 816 default:
813 err = -EOPNOTSUPP; 817 err = -EOPNOTSUPP;
814 break; 818 break;
@@ -819,7 +823,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
819 return err; 823 return err;
820 } 824 }
821 825
822 if (!ether_addr_equal(mgmt->sa, dev->dev_addr)) 826 if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
823 return -EINVAL; 827 return -EINVAL;
824 828
825 /* Transmit the Action frame as requested by user space */ 829 /* Transmit the Action frame as requested by user space */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 97026f3b215a..787aeaa902fe 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1100 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1100 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1101 goto nla_put_failure; 1101 goto nla_put_failure;
1102 } 1102 }
1103 CMD(start_p2p_device, START_P2P_DEVICE);
1103 1104
1104#ifdef CONFIG_NL80211_TESTMODE 1105#ifdef CONFIG_NL80211_TESTMODE
1105 CMD(testmode_cmd, TESTMODE); 1106 CMD(testmode_cmd, TESTMODE);
@@ -1748,13 +1749,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1748 1749
1749 if (dev && 1750 if (dev &&
1750 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 1751 (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1751 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) || 1752 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
1752 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
1753 goto nla_put_failure; 1753 goto nla_put_failure;
1754 1754
1755 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 1755 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1756 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || 1756 nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
1757 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || 1757 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
1758 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
1758 nla_put_u32(msg, NL80211_ATTR_GENERATION, 1759 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1759 rdev->devlist_generation ^ 1760 rdev->devlist_generation ^
1760 (cfg80211_rdev_list_generation << 2))) 1761 (cfg80211_rdev_list_generation << 2)))
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2021 return PTR_ERR(wdev); 2022 return PTR_ERR(wdev);
2022 } 2023 }
2023 2024
2024 if (type == NL80211_IFTYPE_MESH_POINT && 2025 switch (type) {
2025 info->attrs[NL80211_ATTR_MESH_ID]) { 2026 case NL80211_IFTYPE_MESH_POINT:
2027 if (!info->attrs[NL80211_ATTR_MESH_ID])
2028 break;
2026 wdev_lock(wdev); 2029 wdev_lock(wdev);
2027 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != 2030 BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
2028 IEEE80211_MAX_MESH_ID_LEN); 2031 IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,6 +2034,26 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2031 memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), 2034 memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
2032 wdev->mesh_id_up_len); 2035 wdev->mesh_id_up_len);
2033 wdev_unlock(wdev); 2036 wdev_unlock(wdev);
2037 break;
2038 case NL80211_IFTYPE_P2P_DEVICE:
2039 /*
2040 * P2P Device doesn't have a netdev, so doesn't go
2041 * through the netdev notifier and must be added here
2042 */
2043 mutex_init(&wdev->mtx);
2044 INIT_LIST_HEAD(&wdev->event_list);
2045 spin_lock_init(&wdev->event_lock);
2046 INIT_LIST_HEAD(&wdev->mgmt_registrations);
2047 spin_lock_init(&wdev->mgmt_registrations_lock);
2048
2049 mutex_lock(&rdev->devlist_mtx);
2050 wdev->identifier = ++rdev->wdev_id;
2051 list_add_rcu(&wdev->list, &rdev->wdev_list);
2052 rdev->devlist_generation++;
2053 mutex_unlock(&rdev->devlist_mtx);
2054 break;
2055 default:
2056 break;
2034 } 2057 }
2035 2058
2036 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, 2059 if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
@@ -6053,6 +6076,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
6053 case NL80211_IFTYPE_AP_VLAN: 6076 case NL80211_IFTYPE_AP_VLAN:
6054 case NL80211_IFTYPE_MESH_POINT: 6077 case NL80211_IFTYPE_MESH_POINT:
6055 case NL80211_IFTYPE_P2P_GO: 6078 case NL80211_IFTYPE_P2P_GO:
6079 case NL80211_IFTYPE_P2P_DEVICE:
6056 break; 6080 break;
6057 default: 6081 default:
6058 return -EOPNOTSUPP; 6082 return -EOPNOTSUPP;
@@ -6099,6 +6123,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6099 case NL80211_IFTYPE_AP_VLAN: 6123 case NL80211_IFTYPE_AP_VLAN:
6100 case NL80211_IFTYPE_MESH_POINT: 6124 case NL80211_IFTYPE_MESH_POINT:
6101 case NL80211_IFTYPE_P2P_GO: 6125 case NL80211_IFTYPE_P2P_GO:
6126 case NL80211_IFTYPE_P2P_DEVICE:
6102 break; 6127 break;
6103 default: 6128 default:
6104 return -EOPNOTSUPP; 6129 return -EOPNOTSUPP;
@@ -6195,6 +6220,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
6195 case NL80211_IFTYPE_AP: 6220 case NL80211_IFTYPE_AP:
6196 case NL80211_IFTYPE_AP_VLAN: 6221 case NL80211_IFTYPE_AP_VLAN:
6197 case NL80211_IFTYPE_P2P_GO: 6222 case NL80211_IFTYPE_P2P_GO:
6223 case NL80211_IFTYPE_P2P_DEVICE:
6198 break; 6224 break;
6199 default: 6225 default:
6200 return -EOPNOTSUPP; 6226 return -EOPNOTSUPP;
@@ -6810,6 +6836,68 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6810 return 0; 6836 return 0;
6811} 6837}
6812 6838
6839static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
6840{
6841 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6842 struct wireless_dev *wdev = info->user_ptr[1];
6843 int err;
6844
6845 if (!rdev->ops->start_p2p_device)
6846 return -EOPNOTSUPP;
6847
6848 if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
6849 return -EOPNOTSUPP;
6850
6851 if (wdev->p2p_started)
6852 return 0;
6853
6854 mutex_lock(&rdev->devlist_mtx);
6855 err = cfg80211_can_add_interface(rdev, wdev->iftype);
6856 mutex_unlock(&rdev->devlist_mtx);
6857 if (err)
6858 return err;
6859
6860 err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
6861 if (err)
6862 return err;
6863
6864 wdev->p2p_started = true;
6865 mutex_lock(&rdev->devlist_mtx);
6866 rdev->opencount++;
6867 mutex_unlock(&rdev->devlist_mtx);
6868
6869 return 0;
6870}
6871
6872static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
6873{
6874 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6875 struct wireless_dev *wdev = info->user_ptr[1];
6876
6877 if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
6878 return -EOPNOTSUPP;
6879
6880 if (!rdev->ops->stop_p2p_device)
6881 return -EOPNOTSUPP;
6882
6883 if (!wdev->p2p_started)
6884 return 0;
6885
6886 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
6887 wdev->p2p_started = false;
6888
6889 mutex_lock(&rdev->devlist_mtx);
6890 rdev->opencount--;
6891 mutex_unlock(&rdev->devlist_mtx);
6892
6893 if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
6894 rdev->scan_req->aborted = true;
6895 ___cfg80211_scan_done(rdev, true);
6896 }
6897
6898 return 0;
6899}
6900
6813#define NL80211_FLAG_NEED_WIPHY 0x01 6901#define NL80211_FLAG_NEED_WIPHY 0x01
6814#define NL80211_FLAG_NEED_NETDEV 0x02 6902#define NL80211_FLAG_NEED_NETDEV 0x02
6815#define NL80211_FLAG_NEED_RTNL 0x04 6903#define NL80211_FLAG_NEED_RTNL 0x04
@@ -6817,7 +6905,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6817#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ 6905#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
6818 NL80211_FLAG_CHECK_NETDEV_UP) 6906 NL80211_FLAG_CHECK_NETDEV_UP)
6819#define NL80211_FLAG_NEED_WDEV 0x10 6907#define NL80211_FLAG_NEED_WDEV 0x10
6820/* If a netdev is associated, it must be UP */ 6908/* If a netdev is associated, it must be UP, P2P must be started */
6821#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ 6909#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
6822 NL80211_FLAG_CHECK_NETDEV_UP) 6910 NL80211_FLAG_CHECK_NETDEV_UP)
6823 6911
@@ -6878,6 +6966,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
6878 } 6966 }
6879 6967
6880 dev_hold(dev); 6968 dev_hold(dev);
6969 } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
6970 if (!wdev->p2p_started) {
6971 mutex_unlock(&cfg80211_mutex);
6972 if (rtnl)
6973 rtnl_unlock();
6974 return -ENETDOWN;
6975 }
6881 } 6976 }
6882 6977
6883 cfg80211_lock_rdev(rdev); 6978 cfg80211_lock_rdev(rdev);
@@ -7439,7 +7534,22 @@ static struct genl_ops nl80211_ops[] = {
7439 .internal_flags = NL80211_FLAG_NEED_NETDEV | 7534 .internal_flags = NL80211_FLAG_NEED_NETDEV |
7440 NL80211_FLAG_NEED_RTNL, 7535 NL80211_FLAG_NEED_RTNL,
7441 }, 7536 },
7442 7537 {
7538 .cmd = NL80211_CMD_START_P2P_DEVICE,
7539 .doit = nl80211_start_p2p_device,
7540 .policy = nl80211_policy,
7541 .flags = GENL_ADMIN_PERM,
7542 .internal_flags = NL80211_FLAG_NEED_WDEV |
7543 NL80211_FLAG_NEED_RTNL,
7544 },
7545 {
7546 .cmd = NL80211_CMD_STOP_P2P_DEVICE,
7547 .doit = nl80211_stop_p2p_device,
7548 .policy = nl80211_policy,
7549 .flags = GENL_ADMIN_PERM,
7550 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
7551 NL80211_FLAG_NEED_RTNL,
7552 },
7443}; 7553};
7444 7554
7445static struct genl_multicast_group nl80211_mlme_mcgrp = { 7555static struct genl_multicast_group nl80211_mlme_mcgrp = {
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index c4ad7958af52..7d604c06c3dc 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
41 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, 41 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
42 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, 42 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
43 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, 43 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
44 [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
45 [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
44 /* 46 /*
45 * add more here as they are defined in radiotap.h 47 * add more here as they are defined in radiotap.h
46 */ 48 */
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 994e2f0cc7a8..ef35f4ef2aa6 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
684 684
685const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) 685const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
686{ 686{
687 u8 *end, *pos; 687 if (bss->information_elements == NULL)
688
689 pos = bss->information_elements;
690 if (pos == NULL)
691 return NULL; 688 return NULL;
692 end = pos + bss->len_information_elements; 689 return cfg80211_find_ie(ie, bss->information_elements,
693 690 bss->len_information_elements);
694 while (pos + 1 < end) {
695 if (pos + 2 + pos[1] > end)
696 break;
697 if (pos[0] == ie)
698 return pos;
699 pos += 2 + pos[1];
700 }
701
702 return NULL;
703} 691}
704EXPORT_SYMBOL(ieee80211_bss_get_ie); 692EXPORT_SYMBOL(ieee80211_bss_get_ie);
705 693
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
812 if (otype == NL80211_IFTYPE_AP_VLAN) 800 if (otype == NL80211_IFTYPE_AP_VLAN)
813 return -EOPNOTSUPP; 801 return -EOPNOTSUPP;
814 802
803 /* cannot change into P2P device type */
804 if (ntype == NL80211_IFTYPE_P2P_DEVICE)
805 return -EOPNOTSUPP;
806
815 if (!rdev->ops->change_virtual_intf || 807 if (!rdev->ops->change_virtual_intf ||
816 !(rdev->wiphy.interface_modes & (1 << ntype))) 808 !(rdev->wiphy.interface_modes & (1 << ntype)))
817 return -EOPNOTSUPP; 809 return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
889 case NUM_NL80211_IFTYPES: 881 case NUM_NL80211_IFTYPES:
890 /* not happening */ 882 /* not happening */
891 break; 883 break;
884 case NL80211_IFTYPE_P2P_DEVICE:
885 WARN_ON(1);
886 break;
892 } 887 }
893 } 888 }
894 889
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1053 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) { 1048 list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
1054 if (wdev_iter == wdev) 1049 if (wdev_iter == wdev)
1055 continue; 1050 continue;
1056 if (!netif_running(wdev_iter->netdev)) 1051 if (wdev_iter->netdev) {
1057 continue; 1052 if (!netif_running(wdev_iter->netdev))
1053 continue;
1054 } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
1055 if (!wdev_iter->p2p_started)
1056 continue;
1057 } else {
1058 WARN_ON(1);
1059 }
1058 1060
1059 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype)) 1061 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
1060 continue; 1062 continue;