summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kbuild/makefiles.txt31
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile12
-rw-r--r--arch/alpha/kernel/sys_alcor.c4
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c12
-rw-r--r--arch/alpha/kernel/sys_dp264.c20
-rw-r--r--arch/alpha/kernel/sys_eb64p.c4
-rw-r--r--arch/alpha/kernel/sys_eiger.c4
-rw-r--r--arch/alpha/kernel/sys_miata.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c4
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c6
-rw-r--r--arch/alpha/kernel/sys_rawhide.c4
-rw-r--r--arch/alpha/kernel/sys_ruffian.c6
-rw-r--r--arch/alpha/kernel/sys_rx164.c4
-rw-r--r--arch/alpha/kernel/sys_sable.c10
-rw-r--r--arch/alpha/kernel/sys_sio.c8
-rw-r--r--arch/alpha/kernel/sys_sx164.c4
-rw-r--r--arch/alpha/kernel/sys_takara.c6
-rw-r--r--arch/alpha/kernel/sys_wildfire.c4
-rw-r--r--arch/arc/boot/dts/hsdk.dts11
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/kernel/smp.c5
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arc/plat-hsdk/platform.c10
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S4
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/events/intel/bts.c6
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/kernel/amd_nb.c41
-rw-r--r--arch/x86/kernel/unwind_orc.c29
-rw-r--r--arch/x86/mm/fault.c11
-rw-r--r--arch/x86/mm/mmap.c12
-rw-r--r--drivers/block/nbd.c13
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/hwmon/da9052-hwmon.c5
-rw-r--r--drivers/hwmon/tmp102.c13
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/infiniband/core/netlink.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/rmi4/rmi_f30.c5
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c17
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c13
-rw-r--r--drivers/net/can/sun4i_can.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/marvell.c8
-rw-r--r--drivers/net/tap.c25
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c122
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/fc.c37
-rw-r--r--drivers/nvme/host/rdma.c27
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c115
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/linit.c7
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/spi/spi-armada-3700.c145
-rw-r--r--drivers/spi/spi-bcm-qspi.c9
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-balloon.c19
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/cifs/Kconfig5
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c31
-rw-r--r--fs/cifs/smb2pdu.c33
-rw-r--r--fs/cifs/smb2pdu.h5
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2transport.c26
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/overlayfs/inode.c20
-rw-r--r--fs/overlayfs/namei.c32
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/readdir.c11
-rw-r--r--fs/overlayfs/super.c3
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--include/linux/if_tap.h4
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/sctp.h34
-rw-r--r--include/linux/swait.h27
-rw-r--r--include/net/fq_impl.h9
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/pkt_cls.h3
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/strparser.h3
-rw-r--r--include/net/tcp.h7
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/spi/spidev.h1
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/bpf/sockmap.c29
-rw-r--r--kernel/futex.c12
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/workqueue.c37
-rw-r--r--lib/asn1_decoder.c3
-rw-r--r--lib/assoc_array.c51
-rw-r--r--lib/ioremap.c1
-rw-r--r--net/core/filter.c32
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dsa/dsa2.c7
-rw-r--r--net/ife/ife.c2
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/ipip.c59
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/key.c37
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rds/ib_send.c16
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/cls_api.c60
-rw-r--r--net/sched/cls_basic.c20
-rw-r--r--net/sched/cls_bpf.c19
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_flower.c19
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c19
-rw-r--r--net/sched/cls_route.c19
-rw-r--r--net/sched/cls_rsvp.h19
-rw-r--r--net/sched/cls_tcindex.c38
-rw-r--r--net/sched/cls_u32.c29
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/input.c22
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c32
-rw-r--r--net/sctp/stream.c26
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/strparser/strparser.c17
-rw-r--r--net/sunrpc/xprt.c36
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/wireless/sme.c50
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c17
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--net/xfrm/xfrm_user.c25
-rw-r--r--samples/trace_events/trace-events-sample.c2
-rw-r--r--scripts/Makefile.modpost1
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Makefile43
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--security/apparmor/file.c30
-rw-r--r--security/apparmor/include/audit.h26
-rw-r--r--security/apparmor/include/net.h114
-rw-r--r--security/apparmor/include/perms.h5
-rw-r--r--security/apparmor/include/policy.h13
-rw-r--r--security/apparmor/lib.c5
-rw-r--r--security/apparmor/lsm.c387
-rw-r--r--security/apparmor/net.c184
-rw-r--r--security/apparmor/policy_unpack.c47
-rw-r--r--security/keys/keyring.c39
-rw-r--r--security/keys/trusted.c23
-rw-r--r--sound/core/seq/seq_clientmgr.c2
-rw-r--r--sound/core/timer_compat.c17
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/soc/codecs/adau17x1.c24
-rw-r--r--sound/soc/codecs/adau17x1.h2
-rw-r--r--sound/soc/codecs/rt5514-spi.c17
-rw-r--r--sound/soc/codecs/rt5514-spi.h3
-rw-r--r--sound/soc/codecs/rt5514.c63
-rw-r--r--sound/soc/codecs/rt5514.h3
-rw-r--r--sound/soc/codecs/rt5616.c2
-rw-r--r--sound/soc/codecs/rt5659.c4
-rw-r--r--sound/soc/codecs/rt5663.c3
-rw-r--r--sound/soc/soc-topology.c5
-rw-r--r--tools/include/uapi/linux/bpf.h7
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/scripts/Makefile.include6
-rw-r--r--tools/testing/selftests/lib.mk6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json23
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py22
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py62
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py2
237 files changed, 2113 insertions, 2007 deletions
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 329e740adea7..f6f80380dff2 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
1108 ld 1108 ld
1109 Link target. Often, LDFLAGS_$@ is used to set specific options to ld. 1109 Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
1110 1110
1111 objcopy
1112 Copy binary. Uses OBJCOPYFLAGS usually specified in
1113 arch/$(ARCH)/Makefile.
1114 OBJCOPYFLAGS_$@ may be used to set additional options.
1115
1116 gzip
1117 Compress target. Use maximum compression to compress target.
1118
1119 Example: 1111 Example:
1120 #arch/x86/boot/Makefile 1112 #arch/x86/boot/Makefile
1121 LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary 1113 LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
1139 resulting in the target file being recompiled for no 1131 resulting in the target file being recompiled for no
1140 obvious reason. 1132 obvious reason.
1141 1133
1134 objcopy
1135 Copy binary. Uses OBJCOPYFLAGS usually specified in
1136 arch/$(ARCH)/Makefile.
1137 OBJCOPYFLAGS_$@ may be used to set additional options.
1138
1139 gzip
1140 Compress target. Use maximum compression to compress target.
1141
1142 Example:
1143 #arch/x86/boot/compressed/Makefile
1144 $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
1145 $(call if_changed,gzip)
1146
1142 dtc 1147 dtc
1143 Create flattened device tree blob object suitable for linking 1148 Create flattened device tree blob object suitable for linking
1144 into vmlinux. Device tree blobs linked into vmlinux are placed 1149 into vmlinux. Device tree blobs linked into vmlinux are placed
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
1219 that may be shared between individual architectures. 1224 that may be shared between individual architectures.
1220 The recommended approach how to use a generic header file is 1225 The recommended approach how to use a generic header file is
1221 to list the file in the Kbuild file. 1226 to list the file in the Kbuild file.
1222 See "7.3 generic-y" for further info on syntax etc. 1227 See "7.2 generic-y" for further info on syntax etc.
1223 1228
1224--- 6.11 Post-link pass 1229--- 6.11 Post-link pass
1225 1230
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
1254arch/<arch>/include/asm/ to list asm files coming from asm-generic. 1259arch/<arch>/include/asm/ to list asm files coming from asm-generic.
1255See subsequent chapter for the syntax of the Kbuild file. 1260See subsequent chapter for the syntax of the Kbuild file.
1256 1261
1257 --- 7.1 no-export-headers 1262--- 7.1 no-export-headers
1258 1263
1259 no-export-headers is essentially used by include/uapi/linux/Kbuild to 1264 no-export-headers is essentially used by include/uapi/linux/Kbuild to
1260 avoid exporting specific headers (e.g. kvm.h) on architectures that do 1265 avoid exporting specific headers (e.g. kvm.h) on architectures that do
1261 not support it. It should be avoided as much as possible. 1266 not support it. It should be avoided as much as possible.
1262 1267
1263 --- 7.2 generic-y 1268--- 7.2 generic-y
1264 1269
1265 If an architecture uses a verbatim copy of a header from 1270 If an architecture uses a verbatim copy of a header from
1266 include/asm-generic then this is listed in the file 1271 include/asm-generic then this is listed in the file
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
1287 Example: termios.h 1292 Example: termios.h
1288 #include <asm-generic/termios.h> 1293 #include <asm-generic/termios.h>
1289 1294
1290 --- 7.3 generated-y 1295--- 7.3 generated-y
1291 1296
1292 If an architecture generates other header files alongside generic-y 1297 If an architecture generates other header files alongside generic-y
1293 wrappers, generated-y specifies them. 1298 wrappers, generated-y specifies them.
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
1299 #arch/x86/include/asm/Kbuild 1304 #arch/x86/include/asm/Kbuild
1300 generated-y += syscalls_32.h 1305 generated-y += syscalls_32.h
1301 1306
1302 --- 7.5 mandatory-y 1307--- 7.4 mandatory-y
1303 1308
1304 mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm 1309 mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
1305 to define the minimum set of headers that must be exported in 1310 to define the minimum set of headers that must be exported in
diff --git a/MAINTAINERS b/MAINTAINERS
index d85c08956875..bf1d20695cbf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6671,7 +6671,7 @@ F: include/net/ieee802154_netdev.h
6671F: Documentation/networking/ieee802154.txt 6671F: Documentation/networking/ieee802154.txt
6672 6672
6673IFE PROTOCOL 6673IFE PROTOCOL
6674M: Yotam Gigi <yotamg@mellanox.com> 6674M: Yotam Gigi <yotam.gi@gmail.com>
6675M: Jamal Hadi Salim <jhs@mojatatu.com> 6675M: Jamal Hadi Salim <jhs@mojatatu.com>
6676F: net/ife 6676F: net/ife
6677F: include/net/ife.h 6677F: include/net/ife.h
@@ -8743,7 +8743,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8743F: drivers/net/ethernet/mellanox/mlxsw/ 8743F: drivers/net/ethernet/mellanox/mlxsw/
8744 8744
8745MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) 8745MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
8746M: Yotam Gigi <yotamg@mellanox.com> 8746M: mlxsw@mellanox.com
8747L: netdev@vger.kernel.org 8747L: netdev@vger.kernel.org
8748S: Supported 8748S: Supported
8749W: http://www.mellanox.com 8749W: http://www.mellanox.com
@@ -10179,7 +10179,6 @@ F: Documentation/parport*.txt
10179 10179
10180PARAVIRT_OPS INTERFACE 10180PARAVIRT_OPS INTERFACE
10181M: Juergen Gross <jgross@suse.com> 10181M: Juergen Gross <jgross@suse.com>
10182M: Chris Wright <chrisw@sous-sol.org>
10183M: Alok Kataria <akataria@vmware.com> 10182M: Alok Kataria <akataria@vmware.com>
10184M: Rusty Russell <rusty@rustcorp.com.au> 10183M: Rusty Russell <rusty@rustcorp.com.au>
10185L: virtualization@lists.linux-foundation.org 10184L: virtualization@lists.linux-foundation.org
@@ -10891,7 +10890,7 @@ S: Maintained
10891F: drivers/block/ps3vram.c 10890F: drivers/block/ps3vram.c
10892 10891
10893PSAMPLE PACKET SAMPLING SUPPORT: 10892PSAMPLE PACKET SAMPLING SUPPORT:
10894M: Yotam Gigi <yotamg@mellanox.com> 10893M: Yotam Gigi <yotam.gi@gmail.com>
10895S: Maintained 10894S: Maintained
10896F: net/psample 10895F: net/psample
10897F: include/net/psample.h 10896F: include/net/psample.h
diff --git a/Makefile b/Makefile
index 4d1dcce4fbb9..3a8868ee967e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 14 3PATCHLEVEL = 14
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -131,8 +131,8 @@ endif
131ifneq ($(KBUILD_OUTPUT),) 131ifneq ($(KBUILD_OUTPUT),)
132# check that the output directory actually exists 132# check that the output directory actually exists
133saved-output := $(KBUILD_OUTPUT) 133saved-output := $(KBUILD_OUTPUT)
134$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT)) 134KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
135KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT)) 135 && /bin/pwd)
136$(if $(KBUILD_OUTPUT),, \ 136$(if $(KBUILD_OUTPUT),, \
137 $(error failed to create output directory "$(saved-output)")) 137 $(error failed to create output directory "$(saved-output)"))
138 138
@@ -698,11 +698,11 @@ KBUILD_CFLAGS += $(stackp-flag)
698 698
699ifeq ($(cc-name),clang) 699ifeq ($(cc-name),clang)
700ifneq ($(CROSS_COMPILE),) 700ifneq ($(CROSS_COMPILE),)
701CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%)) 701CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
702GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) 702GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
703endif 703endif
704ifneq ($(GCC_TOOLCHAIN),) 704ifneq ($(GCC_TOOLCHAIN),)
705CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN) 705CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
706endif 706endif
707KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 707KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
708KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 708KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@@ -1400,7 +1400,7 @@ help:
1400 @echo ' Build, install, and boot kernel before' 1400 @echo ' Build, install, and boot kernel before'
1401 @echo ' running kselftest on it' 1401 @echo ' running kselftest on it'
1402 @echo ' kselftest-clean - Remove all generated kselftest files' 1402 @echo ' kselftest-clean - Remove all generated kselftest files'
1403 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed' 1403 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
1404 @echo ' .config.' 1404 @echo ' .config.'
1405 @echo '' 1405 @echo ''
1406 @echo 'Userspace tools targets:' 1406 @echo 'Userspace tools targets:'
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index dabb0d205c40..e56efd5b855f 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -182,10 +182,10 @@ alcor_init_irq(void)
182 * comes in on. This makes interrupt processing much easier. 182 * comes in on. This makes interrupt processing much easier.
183 */ 183 */
184 184
185static int __init 185static int
186alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 186alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
187{ 187{
188 static char irq_tab[7][5] __initdata = { 188 static char irq_tab[7][5] = {
189 /*INT INTA INTB INTC INTD */ 189 /*INT INTA INTB INTC INTD */
190 /* note: IDSEL 17 is XLT only */ 190 /* note: IDSEL 17 is XLT only */
191 {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ 191 {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index b8687dadd038..10bc46a4ec40 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -174,10 +174,10 @@ pc164_init_irq(void)
174 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. 174 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
175 */ 175 */
176 176
177static inline int __init 177static inline int
178eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 178eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
179{ 179{
180 static char irq_tab[5][5] __initdata = { 180 static char irq_tab[5][5] = {
181 /*INT INTA INTB INTC INTD */ 181 /*INT INTA INTB INTC INTD */
182 {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ 182 {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
183 {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ 183 {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
@@ -204,10 +204,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
204 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. 204 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
205 */ 205 */
206 206
207static inline int __init 207static inline int
208cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 208cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
209{ 209{
210 static char irq_tab[5][5] __initdata = { 210 static char irq_tab[5][5] = {
211 /*INT INTA INTB INTC INTD */ 211 /*INT INTA INTB INTC INTD */
212 { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ 212 { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
213 { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ 213 { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
@@ -288,10 +288,10 @@ cia_cab_init_pci(void)
288 * 288 *
289 */ 289 */
290 290
291static inline int __init 291static inline int
292alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 292alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
293{ 293{
294 static char irq_tab[7][5] __initdata = { 294 static char irq_tab[7][5] = {
295 /*INT INTA INTB INTC INTD */ 295 /*INT INTA INTB INTC INTD */
296 { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ 296 { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
297 { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ 297 { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 3d816947de13..d33508621820 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -357,7 +357,7 @@ clipper_init_irq(void)
357 * 10 64 bit PCI option slot 3 (not bus 0) 357 * 10 64 bit PCI option slot 3 (not bus 0)
358 */ 358 */
359 359
360static int __init 360static int
361isa_irq_fixup(const struct pci_dev *dev, int irq) 361isa_irq_fixup(const struct pci_dev *dev, int irq)
362{ 362{
363 u8 irq8; 363 u8 irq8;
@@ -373,10 +373,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
373 return irq8 & 0xf; 373 return irq8 & 0xf;
374} 374}
375 375
376static int __init 376static int
377dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 377dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
378{ 378{
379 static char irq_tab[6][5] __initdata = { 379 static char irq_tab[6][5] = {
380 /*INT INTA INTB INTC INTD */ 380 /*INT INTA INTB INTC INTD */
381 { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ 381 { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
382 { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ 382 { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@@ -395,10 +395,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
395 return isa_irq_fixup(dev, irq); 395 return isa_irq_fixup(dev, irq);
396} 396}
397 397
398static int __init 398static int
399monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 399monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
400{ 400{
401 static char irq_tab[13][5] __initdata = { 401 static char irq_tab[13][5] = {
402 /*INT INTA INTB INTC INTD */ 402 /*INT INTA INTB INTC INTD */
403 { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ 403 { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
404 { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ 404 { -1, -1, -1, -1, -1}, /* IdSel 4 unused */
@@ -424,7 +424,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
424 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); 424 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
425} 425}
426 426
427static u8 __init 427static u8
428monet_swizzle(struct pci_dev *dev, u8 *pinp) 428monet_swizzle(struct pci_dev *dev, u8 *pinp)
429{ 429{
430 struct pci_controller *hose = dev->sysdata; 430 struct pci_controller *hose = dev->sysdata;
@@ -457,10 +457,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
457 return slot; 457 return slot;
458} 458}
459 459
460static int __init 460static int
461webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 461webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
462{ 462{
463 static char irq_tab[13][5] __initdata = { 463 static char irq_tab[13][5] = {
464 /*INT INTA INTB INTC INTD */ 464 /*INT INTA INTB INTC INTD */
465 { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ 465 { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
466 { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ 466 { -1, -1, -1, -1, -1}, /* IdSel 8 unused */
@@ -479,10 +479,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
479 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); 479 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
480} 480}
481 481
482static int __init 482static int
483clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 483clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
484{ 484{
485 static char irq_tab[7][5] __initdata = { 485 static char irq_tab[7][5] = {
486 /*INT INTA INTB INTC INTD */ 486 /*INT INTA INTB INTC INTD */
487 { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ 487 { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
488 { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ 488 { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 59fd789dfc80..5251937ec1b4 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -168,10 +168,10 @@ eb64p_init_irq(void)
168 * comes in on. This makes interrupt processing much easier. 168 * comes in on. This makes interrupt processing much easier.
169 */ 169 */
170 170
171static int __init 171static int
172eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 172eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
173{ 173{
174 static char irq_tab[5][5] __initdata = { 174 static char irq_tab[5][5] = {
175 /*INT INTA INTB INTC INTD */ 175 /*INT INTA INTB INTC INTD */
176 {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ 176 {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
177 {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ 177 {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 3c8c36d95448..016f79251141 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -142,7 +142,7 @@ eiger_init_irq(void)
142 } 142 }
143} 143}
144 144
145static int __init 145static int
146eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 146eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
147{ 147{
148 u8 irq_orig; 148 u8 irq_orig;
@@ -159,7 +159,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
159 return irq_orig - 0x80; 159 return irq_orig - 0x80;
160} 160}
161 161
162static u8 __init 162static u8
163eiger_swizzle(struct pci_dev *dev, u8 *pinp) 163eiger_swizzle(struct pci_dev *dev, u8 *pinp)
164{ 164{
165 struct pci_controller *hose = dev->sysdata; 165 struct pci_controller *hose = dev->sysdata;
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index d20337768606..6fa07dc5339d 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -150,10 +150,10 @@ miata_init_irq(void)
150 * comes in on. This makes interrupt processing much easier. 150 * comes in on. This makes interrupt processing much easier.
151 */ 151 */
152 152
153static int __init 153static int
154miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 154miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
155{ 155{
156 static char irq_tab[18][5] __initdata = { 156 static char irq_tab[18][5] = {
157 /*INT INTA INTB INTC INTD */ 157 /*INT INTA INTB INTC INTD */
158 {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ 158 {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
159 { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ 159 { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
@@ -197,7 +197,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
197 return COMMON_TABLE_LOOKUP; 197 return COMMON_TABLE_LOOKUP;
198} 198}
199 199
200static u8 __init 200static u8
201miata_swizzle(struct pci_dev *dev, u8 *pinp) 201miata_swizzle(struct pci_dev *dev, u8 *pinp)
202{ 202{
203 int slot, pin = *pinp; 203 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 68b7d0c63b8d..3af4f94113e1 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -146,10 +146,10 @@ mikasa_init_irq(void)
146 * comes in on. This makes interrupt processing much easier. 146 * comes in on. This makes interrupt processing much easier.
147 */ 147 */
148 148
149static int __init 149static int
150mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 150mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
151{ 151{
152 static char irq_tab[8][5] __initdata = { 152 static char irq_tab[8][5] = {
153 /*INT INTA INTB INTC INTD */ 153 /*INT INTA INTB INTC INTD */
154 {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ 154 {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
155 { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ 155 { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index cefa225ab478..239dc0e601d5 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -63,7 +63,7 @@ nautilus_init_irq(void)
63 common_init_isa_dma(); 63 common_init_isa_dma();
64} 64}
65 65
66static int __init 66static int
67nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 67nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
68{ 68{
69 /* Preserve the IRQ set up by the console. */ 69 /* Preserve the IRQ set up by the console. */
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 1f2e8b1faa9a..b106f327f765 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -194,10 +194,10 @@ noritake_init_irq(void)
194 * comes in on. This makes interrupt processing much easier. 194 * comes in on. This makes interrupt processing much easier.
195 */ 195 */
196 196
197static int __init 197static int
198noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 198noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
199{ 199{
200 static char irq_tab[15][5] __initdata = { 200 static char irq_tab[15][5] = {
201 /*INT INTA INTB INTC INTD */ 201 /*INT INTA INTB INTC INTD */
202 /* note: IDSELs 16, 17, and 25 are CORELLE only */ 202 /* note: IDSELs 16, 17, and 25 are CORELLE only */
203 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ 203 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
@@ -222,7 +222,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
222 return COMMON_TABLE_LOOKUP; 222 return COMMON_TABLE_LOOKUP;
223} 223}
224 224
225static u8 __init 225static u8
226noritake_swizzle(struct pci_dev *dev, u8 *pinp) 226noritake_swizzle(struct pci_dev *dev, u8 *pinp)
227{ 227{
228 int slot, pin = *pinp; 228 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index b21e572c1299..b76f65d0e8b5 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -222,10 +222,10 @@ rawhide_init_irq(void)
222 * 222 *
223 */ 223 */
224 224
225static int __init 225static int
226rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 226rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
227{ 227{
228 static char irq_tab[5][5] __initdata = { 228 static char irq_tab[5][5] = {
229 /*INT INTA INTB INTC INTD */ 229 /*INT INTA INTB INTC INTD */
230 { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ 230 { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
231 { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ 231 { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index 197660ba686f..07830cccabf9 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -118,10 +118,10 @@ ruffian_kill_arch (int mode)
118 * 118 *
119 */ 119 */
120 120
121static int __init 121static int
122ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 122ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
123{ 123{
124 static char irq_tab[11][5] __initdata = { 124 static char irq_tab[11][5] = {
125 /*INT INTA INTB INTC INTD */ 125 /*INT INTA INTB INTC INTD */
126 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ 126 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
127 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ 127 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
@@ -140,7 +140,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
140 return COMMON_TABLE_LOOKUP; 140 return COMMON_TABLE_LOOKUP;
141} 141}
142 142
143static u8 __init 143static u8
144ruffian_swizzle(struct pci_dev *dev, u8 *pinp) 144ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
145{ 145{
146 int slot, pin = *pinp; 146 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index b09146e4a08d..a3db719d3c38 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -143,7 +143,7 @@ rx164_init_irq(void)
143 * 143 *
144 */ 144 */
145 145
146static int __init 146static int
147rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 147rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
148{ 148{
149#if 0 149#if 0
@@ -157,7 +157,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
157 { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ 157 { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
158 }; 158 };
159#else 159#else
160 static char irq_tab[6][5] __initdata = { 160 static char irq_tab[6][5] = {
161 /*INT INTA INTB INTC INTD */ 161 /*INT INTA INTB INTC INTD */
162 { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ 162 { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
163 { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ 163 { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 76eb2afca575..3cf0d32da5d8 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -193,10 +193,10 @@ sable_init_irq(void)
193 * with the values in the irq swizzling tables above. 193 * with the values in the irq swizzling tables above.
194 */ 194 */
195 195
196static int __init 196static int
197sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 197sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
198{ 198{
199 static char irq_tab[9][5] __initdata = { 199 static char irq_tab[9][5] = {
200 /*INT INTA INTB INTC INTD */ 200 /*INT INTA INTB INTC INTD */
201 { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ 201 { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
202 { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ 202 { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
@@ -375,10 +375,10 @@ lynx_init_irq(void)
375 * with the values in the irq swizzling tables above. 375 * with the values in the irq swizzling tables above.
376 */ 376 */
377 377
378static int __init 378static int
379lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 379lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
380{ 380{
381 static char irq_tab[19][5] __initdata = { 381 static char irq_tab[19][5] = {
382 /*INT INTA INTB INTC INTD */ 382 /*INT INTA INTB INTC INTD */
383 { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ 383 { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
384 { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ 384 { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
@@ -405,7 +405,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
405 return COMMON_TABLE_LOOKUP; 405 return COMMON_TABLE_LOOKUP;
406} 406}
407 407
408static u8 __init 408static u8
409lynx_swizzle(struct pci_dev *dev, u8 *pinp) 409lynx_swizzle(struct pci_dev *dev, u8 *pinp)
410{ 410{
411 int slot, pin = *pinp; 411 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index b44b1cb923f3..37bd6d9b8eb9 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -145,7 +145,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
145 outb((level_bits >> 8) & 0xff, 0x4d1); 145 outb((level_bits >> 8) & 0xff, 0x4d1);
146} 146}
147 147
148static inline int __init 148static inline int
149noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 149noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
150{ 150{
151 /* 151 /*
@@ -166,7 +166,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
166 * that they use the default INTA line, if they are interrupt 166 * that they use the default INTA line, if they are interrupt
167 * driven at all). 167 * driven at all).
168 */ 168 */
169 static char irq_tab[][5] __initdata = { 169 static char irq_tab[][5] = {
170 /*INT A B C D */ 170 /*INT A B C D */
171 { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ 171 { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
172 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ 172 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
@@ -184,10 +184,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
184 return irq >= 0 ? tmp : -1; 184 return irq >= 0 ? tmp : -1;
185} 185}
186 186
187static inline int __init 187static inline int
188p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 188p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
189{ 189{
190 static char irq_tab[][5] __initdata = { 190 static char irq_tab[][5] = {
191 /*INT A B C D */ 191 /*INT A B C D */
192 { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ 192 { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
193 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ 193 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index 6c92f25172ff..1ec638a2746a 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -95,10 +95,10 @@ sx164_init_irq(void)
95 * 9 32 bit PCI option slot 3 95 * 9 32 bit PCI option slot 3
96 */ 96 */
97 97
98static int __init 98static int
99sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 99sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
100{ 100{
101 static char irq_tab[5][5] __initdata = { 101 static char irq_tab[5][5] = {
102 /*INT INTA INTB INTC INTD */ 102 /*INT INTA INTB INTC INTD */
103 { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ 103 { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
104 { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ 104 { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index b0913e61416c..e230c6864088 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -156,10 +156,10 @@ takara_init_irq(void)
156 * assign it whatever the hell IRQ we like and it doesn't matter. 156 * assign it whatever the hell IRQ we like and it doesn't matter.
157 */ 157 */
158 158
159static int __init 159static int
160takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) 160takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
161{ 161{
162 static char irq_tab[15][5] __initdata = { 162 static char irq_tab[15][5] = {
163 { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ 163 { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
164 { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ 164 { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
165 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ 165 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
@@ -211,7 +211,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
211 return COMMON_TABLE_LOOKUP; 211 return COMMON_TABLE_LOOKUP;
212} 212}
213 213
214static u8 __init 214static u8
215takara_swizzle(struct pci_dev *dev, u8 *pinp) 215takara_swizzle(struct pci_dev *dev, u8 *pinp)
216{ 216{
217 int slot = PCI_SLOT(dev->devfn); 217 int slot = PCI_SLOT(dev->devfn);
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 8290274dec11..8e64052811ab 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -289,10 +289,10 @@ wildfire_device_interrupt(unsigned long vector)
289 * 7 64 bit PCI 1 option slot 7 289 * 7 64 bit PCI 1 option slot 7
290 */ 290 */
291 291
292static int __init 292static int
293wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 293wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
294{ 294{
295 static char irq_tab[8][5] __initdata = { 295 static char irq_tab[8][5] = {
296 /*INT INTA INTB INTC INTD */ 296 /*INT INTA INTB INTC INTD */
297 { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ 297 { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
298 { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ 298 { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8adde1b492f1..8f627c200d60 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -137,14 +137,15 @@
137 /* 137 /*
138 * DW sdio controller has external ciu clock divider 138 * DW sdio controller has external ciu clock divider
139 * controlled via register in SDIO IP. Due to its 139 * controlled via register in SDIO IP. Due to its
140 * unexpected default value (it should devide by 1 140 * unexpected default value (it should divide by 1
141 * but it devides by 8) SDIO IP uses wrong clock and 141 * but it divides by 8) SDIO IP uses wrong clock and
142 * works unstable (see STAR 9001204800) 142 * works unstable (see STAR 9001204800)
143 * We switched to the minimum possible value of the
144 * divisor (div-by-2) in HSDK platform code.
143 * So add temporary fix and change clock frequency 145 * So add temporary fix and change clock frequency
144 * from 100000000 to 12500000 Hz until we fix dw sdio 146 * to 50000000 Hz until we fix dw sdio driver itself.
145 * driver itself.
146 */ 147 */
147 clock-frequency = <12500000>; 148 clock-frequency = <50000000>;
148 #clock-cells = <0>; 149 #clock-cells = <0>;
149 }; 150 };
150 151
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 15f0f6b5fec1..7b8f8faf8a24 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -63,7 +63,6 @@ CONFIG_MMC_SDHCI=y
63CONFIG_MMC_SDHCI_PLTFM=y 63CONFIG_MMC_SDHCI_PLTFM=y
64CONFIG_MMC_DW=y 64CONFIG_MMC_DW=y
65# CONFIG_IOMMU_SUPPORT is not set 65# CONFIG_IOMMU_SUPPORT is not set
66CONFIG_RESET_HSDK=y
67CONFIG_EXT3_FS=y 66CONFIG_EXT3_FS=y
68CONFIG_VFAT_FS=y 67CONFIG_VFAT_FS=y
69CONFIG_TMPFS=y 68CONFIG_TMPFS=y
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f46267153ec2..6df9d94a9537 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -23,6 +23,8 @@
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/reboot.h> 24#include <linux/reboot.h>
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/export.h>
27
26#include <asm/processor.h> 28#include <asm/processor.h>
27#include <asm/setup.h> 29#include <asm/setup.h>
28#include <asm/mach_desc.h> 30#include <asm/mach_desc.h>
@@ -30,6 +32,9 @@
30#ifndef CONFIG_ARC_HAS_LLSC 32#ifndef CONFIG_ARC_HAS_LLSC
31arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 33arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 34arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
35
36EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
37EXPORT_SYMBOL_GPL(smp_bitops_lock);
33#endif 38#endif
34 39
35struct plat_smp_ops __weak plat_smp_ops; 40struct plat_smp_ops __weak plat_smp_ops;
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index bd08de4be75e..19ab3cf98f0f 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -8,3 +8,4 @@
8menuconfig ARC_SOC_HSDK 8menuconfig ARC_SOC_HSDK
9 bool "ARC HS Development Kit SOC" 9 bool "ARC HS Development Kit SOC"
10 select CLK_HSDK 10 select CLK_HSDK
11 select RESET_HSDK
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 744e62e58788..fd0ae5e38639 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -74,6 +74,10 @@ static void __init hsdk_set_cpu_freq_1ghz(void)
74 pr_err("Failed to setup CPU frequency to 1GHz!"); 74 pr_err("Failed to setup CPU frequency to 1GHz!");
75} 75}
76 76
77#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
78#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
79#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
80
77static void __init hsdk_init_early(void) 81static void __init hsdk_init_early(void)
78{ 82{
79 /* 83 /*
@@ -90,6 +94,12 @@ static void __init hsdk_init_early(void)
90 writel(1, (void __iomem *) CREG_PAE_UPDATE); 94 writel(1, (void __iomem *) CREG_PAE_UPDATE);
91 95
92 /* 96 /*
97 * Switch SDIO external ciu clock divider from default div-by-8 to
98 * minimum possible div-by-2.
99 */
100 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
101
102 /*
93 * Setup CPU frequency to 1GHz. 103 * Setup CPU frequency to 1GHz.
94 * TODO: remove it after smart hsdk pll driver will be introduced. 104 * TODO: remove it after smart hsdk pll driver will be introduced.
95 */ 105 */
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e71eefa2e427..0641ba54ab62 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,7 +1,7 @@
1#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/gfp.h> 2#include <linux/gfp.h>
3#include <linux/export.h> 3#include <linux/export.h>
4#include <linux/rwlock.h> 4#include <linux/spinlock.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
478 return ret; 478 return ret;
479 479
480 dir = iommu_tce_direction(tce); 480 dir = iommu_tce_direction(tce);
481
482 idx = srcu_read_lock(&vcpu->kvm->srcu);
483
481 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, 484 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
482 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) 485 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
483 return H_PARAMETER; 486 ret = H_PARAMETER;
487 goto unlock_exit;
488 }
484 489
485 entry = ioba >> stt->page_shift; 490 entry = ioba >> stt->page_shift;
486 491
487 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 492 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
488 if (dir == DMA_NONE) { 493 if (dir == DMA_NONE)
489 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, 494 ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
490 stit->tbl, entry); 495 stit->tbl, entry);
491 } else { 496 else
492 idx = srcu_read_lock(&vcpu->kvm->srcu);
493 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, 497 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
494 entry, ua, dir); 498 entry, ua, dir);
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496 }
497 499
498 if (ret == H_SUCCESS) 500 if (ret == H_SUCCESS)
499 continue; 501 continue;
500 502
501 if (ret == H_TOO_HARD) 503 if (ret == H_TOO_HARD)
502 return ret; 504 goto unlock_exit;
503 505
504 WARN_ON_ONCE(1); 506 WARN_ON_ONCE(1);
505 kvmppc_clear_tce(stit->tbl, entry); 507 kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
507 509
508 kvmppc_tce_put(stt, entry, tce); 510 kvmppc_tce_put(stt, entry, tce);
509 511
510 return H_SUCCESS; 512unlock_exit:
513 srcu_read_unlock(&vcpu->kvm->srcu, idx);
514
515 return ret;
511} 516}
512EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 517EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
513 518
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ec69fa45d5a2..42639fba89e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
989 beq no_xive 989 beq no_xive
990 ld r11, VCPU_XIVE_SAVED_STATE(r4) 990 ld r11, VCPU_XIVE_SAVED_STATE(r4)
991 li r9, TM_QW1_OS 991 li r9, TM_QW1_OS
992 stdcix r11,r9,r10
993 eieio 992 eieio
993 stdcix r11,r9,r10
994 lwz r11, VCPU_XIVE_CAM_WORD(r4) 994 lwz r11, VCPU_XIVE_CAM_WORD(r4)
995 li r9, TM_QW1_OS + TM_WORD2 995 li r9, TM_QW1_OS + TM_WORD2
996 stwcix r11,r9,r10 996 stwcix r11,r9,r10
997 li r9, 1 997 li r9, 1
998 stw r9, VCPU_XIVE_PUSHED(r4) 998 stw r9, VCPU_XIVE_PUSHED(r4)
999 eieio
999no_xive: 1000no_xive:
1000#endif /* CONFIG_KVM_XICS */ 1001#endif /* CONFIG_KVM_XICS */
1001 1002
@@ -1310,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1310 bne 3f 1311 bne 3f
1311BEGIN_FTR_SECTION 1312BEGIN_FTR_SECTION
1312 PPC_MSGSYNC 1313 PPC_MSGSYNC
1314 lwsync
1313END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1314 lbz r0, HSTATE_HOST_IPI(r13) 1316 lbz r0, HSTATE_HOST_IPI(r13)
1315 cmpwi r0, 0 1317 cmpwi r0, 0
@@ -1400,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1400 cmpldi cr0, r10, 0 1402 cmpldi cr0, r10, 0
1401 beq 1f 1403 beq 1f
1402 /* First load to pull the context, we ignore the value */ 1404 /* First load to pull the context, we ignore the value */
1403 lwzx r11, r7, r10
1404 eieio 1405 eieio
1406 lwzx r11, r7, r10
1405 /* Second load to recover the context state (Words 0 and 1) */ 1407 /* Second load to recover the context state (Words 0 and 1) */
1406 ldx r11, r6, r10 1408 ldx r11, r6, r10
1407 b 3f 1409 b 3f
@@ -1409,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1409 cmpldi cr0, r10, 0 1411 cmpldi cr0, r10, 0
1410 beq 1f 1412 beq 1f
1411 /* First load to pull the context, we ignore the value */ 1413 /* First load to pull the context, we ignore the value */
1412 lwzcix r11, r7, r10
1413 eieio 1414 eieio
1415 lwzcix r11, r7, r10
1414 /* Second load to recover the context state (Words 0 and 1) */ 1416 /* Second load to recover the context state (Words 0 and 1) */
1415 ldcix r11, r6, r10 1417 ldcix r11, r6, r10
14163: std r11, VCPU_XIVE_SAVED_STATE(r9) 14183: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1420,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1420 stw r10, VCPU_XIVE_PUSHED(r9) 1422 stw r10, VCPU_XIVE_PUSHED(r9)
1421 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1423 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1422 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1424 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1425 eieio
14231: 14261:
1424#endif /* CONFIG_KVM_XICS */ 1427#endif /* CONFIG_KVM_XICS */
1425 /* Save more register state */ 1428 /* Save more register state */
@@ -2788,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2788 PPC_MSGCLR(6) 2791 PPC_MSGCLR(6)
2789 /* see if it's a host IPI */ 2792 /* see if it's a host IPI */
2790 li r3, 1 2793 li r3, 1
2794BEGIN_FTR_SECTION
2795 PPC_MSGSYNC
2796 lwsync
2797END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2791 lbz r0, HSTATE_HOST_IPI(r13) 2798 lbz r0, HSTATE_HOST_IPI(r13)
2792 cmpwi r0, 0 2799 cmpwi r0, 0
2793 bnelr 2800 bnelr
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3480faaf1ef8..ee279c7f4802 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
644 break; 644 break;
645#endif 645#endif
646 case KVM_CAP_PPC_HTM: 646 case KVM_CAP_PPC_HTM:
647 r = cpu_has_feature(CPU_FTR_TM_COMP) && 647 r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
648 is_kvmppc_hv_enabled(kvm);
649 break; 648 break;
650 default: 649 default:
651 r = 0; 650 r = 0;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c41adce6062c..7c6904d616d8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -522,12 +522,15 @@ ENTRY(pgm_check_handler)
522 tmhh %r8,0x0001 # test problem state bit 522 tmhh %r8,0x0001 # test problem state bit
523 jnz 2f # -> fault in user space 523 jnz 2f # -> fault in user space
524#if IS_ENABLED(CONFIG_KVM) 524#if IS_ENABLED(CONFIG_KVM)
525 # cleanup critical section for sie64a 525 # cleanup critical section for program checks in sie64a
526 lgr %r14,%r9 526 lgr %r14,%r9
527 slg %r14,BASED(.Lsie_critical_start) 527 slg %r14,BASED(.Lsie_critical_start)
528 clg %r14,BASED(.Lsie_critical_length) 528 clg %r14,BASED(.Lsie_critical_length)
529 jhe 0f 529 jhe 0f
530 brasl %r14,.Lcleanup_sie 530 lg %r14,__SF_EMPTY(%r15) # get control block pointer
531 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
532 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
533 larl %r9,sie_exit # skip forward to sie_exit
531#endif 534#endif
5320: tmhh %r8,0x4000 # PER bit set in old PSW ? 5350: tmhh %r8,0x4000 # PER bit set in old PSW ?
533 jnz 1f # -> enabled, can't be a double fault 536 jnz 1f # -> enabled, can't be a double fault
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
45 45
46 vzeroupper 46 vzeroupper
47 # 4 * 32 byte stack, 32-byte aligned 47 # 4 * 32 byte stack, 32-byte aligned
48 mov %rsp, %r8 48 lea 8(%rsp),%r10
49 and $~31, %rsp 49 and $~31, %rsp
50 sub $0x80, %rsp 50 sub $0x80, %rsp
51 51
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
443 vmovdqu %ymm15,0x01e0(%rsi) 443 vmovdqu %ymm15,0x01e0(%rsi)
444 444
445 vzeroupper 445 vzeroupper
446 mov %r8,%rsp 446 lea -8(%r10),%rsp
447 ret 447 ret
448ENDPROC(chacha20_8block_xor_avx2) 448ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
160 # done with the slightly better performing SSSE3 byte shuffling, 160 # done with the slightly better performing SSSE3 byte shuffling,
161 # 7/12-bit word rotation uses traditional shift+OR. 161 # 7/12-bit word rotation uses traditional shift+OR.
162 162
163 mov %rsp,%r11 163 lea 8(%rsp),%r10
164 sub $0x80,%rsp 164 sub $0x80,%rsp
165 and $~63,%rsp 165 and $~63,%rsp
166 166
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
625 pxor %xmm1,%xmm15 625 pxor %xmm1,%xmm15
626 movdqu %xmm15,0xf0(%rsi) 626 movdqu %xmm15,0xf0(%rsi)
627 627
628 mov %r11,%rsp 628 lea -8(%r10),%rsp
629 ret 629 ret
630ENDPROC(chacha20_4block_xor_ssse3) 630ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4b1ad51eb7cc..bcfc5668dcb2 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -809,7 +809,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
809 809
810.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 810.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
811ENTRY(\sym) 811ENTRY(\sym)
812 UNWIND_HINT_IRET_REGS offset=8 812 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
813 813
814 /* Sanity check */ 814 /* Sanity check */
815 .if \shift_ist != -1 && \paranoid == 0 815 .if \shift_ist != -1 && \paranoid == 0
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 16076eb34699..141e07b06216 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
546 if (event->attr.type != bts_pmu.type) 546 if (event->attr.type != bts_pmu.type)
547 return -ENOENT; 547 return -ENOENT;
548 548
549 if (x86_add_exclusive(x86_lbr_exclusive_bts))
550 return -EBUSY;
551
552 /* 549 /*
553 * BTS leaks kernel addresses even when CPL0 tracing is 550 * BTS leaks kernel addresses even when CPL0 tracing is
554 * disabled, so disallow intel_bts driver for unprivileged 551 * disabled, so disallow intel_bts driver for unprivileged
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
562 !capable(CAP_SYS_ADMIN)) 559 !capable(CAP_SYS_ADMIN))
563 return -EACCES; 560 return -EACCES;
564 561
562 if (x86_add_exclusive(x86_lbr_exclusive_bts))
563 return -EBUSY;
564
565 ret = x86_reserve_hardware(); 565 ret = x86_reserve_hardware();
566 if (ret) { 566 if (ret) {
567 x86_del_exclusive(x86_lbr_exclusive_bts); 567 x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index efb6c17ca902..11398d55aefa 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -111,10 +111,6 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
111 111
112#endif 112#endif
113 113
114#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
115extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
116extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
117
118/** 114/**
119 * virt_to_phys - map virtual addresses to physical 115 * virt_to_phys - map virtual addresses to physical
120 * @address: address to remap 116 * @address: address to remap
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 458da8509b75..6db28f17ff28 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
27 {} 27 {}
28}; 28};
29 29
30#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
31
30const struct pci_device_id amd_nb_misc_ids[] = { 32const struct pci_device_id amd_nb_misc_ids[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
40 {} 43 {}
41}; 44};
42EXPORT_SYMBOL_GPL(amd_nb_misc_ids); 45EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
51 {} 55 {}
52}; 56};
53 57
@@ -402,11 +406,48 @@ void amd_flush_garts(void)
402} 406}
403EXPORT_SYMBOL_GPL(amd_flush_garts); 407EXPORT_SYMBOL_GPL(amd_flush_garts);
404 408
409static void __fix_erratum_688(void *info)
410{
411#define MSR_AMD64_IC_CFG 0xC0011021
412
413 msr_set_bit(MSR_AMD64_IC_CFG, 3);
414 msr_set_bit(MSR_AMD64_IC_CFG, 14);
415}
416
417/* Apply erratum 688 fix so machines without a BIOS fix work. */
418static __init void fix_erratum_688(void)
419{
420 struct pci_dev *F4;
421 u32 val;
422
423 if (boot_cpu_data.x86 != 0x14)
424 return;
425
426 if (!amd_northbridges.num)
427 return;
428
429 F4 = node_to_amd_nb(0)->link;
430 if (!F4)
431 return;
432
433 if (pci_read_config_dword(F4, 0x164, &val))
434 return;
435
436 if (val & BIT(2))
437 return;
438
439 on_each_cpu(__fix_erratum_688, NULL, 0);
440
441 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
442}
443
405static __init int init_amd_nbs(void) 444static __init int init_amd_nbs(void)
406{ 445{
407 amd_cache_northbridges(); 446 amd_cache_northbridges();
408 amd_cache_gart(); 447 amd_cache_gart();
409 448
449 fix_erratum_688();
450
410 return 0; 451 return 0;
411} 452}
412 453
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 570b70d3f604..b95007e7c1b3 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
86 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; 86 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
87 87
88 if (unlikely((idx >= lookup_num_blocks-1))) { 88 if (unlikely((idx >= lookup_num_blocks-1))) {
89 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n", 89 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
90 idx, lookup_num_blocks, ip); 90 idx, lookup_num_blocks, (void *)ip);
91 return NULL; 91 return NULL;
92 } 92 }
93 93
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
96 96
97 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || 97 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
98 (__start_orc_unwind + stop > __stop_orc_unwind))) { 98 (__start_orc_unwind + stop > __stop_orc_unwind))) {
99 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n", 99 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
100 idx, lookup_num_blocks, start, stop, ip); 100 idx, lookup_num_blocks, start, stop, (void *)ip);
101 return NULL; 101 return NULL;
102 } 102 }
103 103
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
373 373
374 case ORC_REG_R10: 374 case ORC_REG_R10:
375 if (!state->regs || !state->full_regs) { 375 if (!state->regs || !state->full_regs) {
376 orc_warn("missing regs for base reg R10 at ip %p\n", 376 orc_warn("missing regs for base reg R10 at ip %pB\n",
377 (void *)state->ip); 377 (void *)state->ip);
378 goto done; 378 goto done;
379 } 379 }
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
382 382
383 case ORC_REG_R13: 383 case ORC_REG_R13:
384 if (!state->regs || !state->full_regs) { 384 if (!state->regs || !state->full_regs) {
385 orc_warn("missing regs for base reg R13 at ip %p\n", 385 orc_warn("missing regs for base reg R13 at ip %pB\n",
386 (void *)state->ip); 386 (void *)state->ip);
387 goto done; 387 goto done;
388 } 388 }
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
391 391
392 case ORC_REG_DI: 392 case ORC_REG_DI:
393 if (!state->regs || !state->full_regs) { 393 if (!state->regs || !state->full_regs) {
394 orc_warn("missing regs for base reg DI at ip %p\n", 394 orc_warn("missing regs for base reg DI at ip %pB\n",
395 (void *)state->ip); 395 (void *)state->ip);
396 goto done; 396 goto done;
397 } 397 }
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
400 400
401 case ORC_REG_DX: 401 case ORC_REG_DX:
402 if (!state->regs || !state->full_regs) { 402 if (!state->regs || !state->full_regs) {
403 orc_warn("missing regs for base reg DX at ip %p\n", 403 orc_warn("missing regs for base reg DX at ip %pB\n",
404 (void *)state->ip); 404 (void *)state->ip);
405 goto done; 405 goto done;
406 } 406 }
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
408 break; 408 break;
409 409
410 default: 410 default:
411 orc_warn("unknown SP base reg %d for ip %p\n", 411 orc_warn("unknown SP base reg %d for ip %pB\n",
412 orc->sp_reg, (void *)state->ip); 412 orc->sp_reg, (void *)state->ip);
413 goto done; 413 goto done;
414 } 414 }
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
436 436
437 case ORC_TYPE_REGS: 437 case ORC_TYPE_REGS:
438 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { 438 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
439 orc_warn("can't dereference registers at %p for ip %p\n", 439 orc_warn("can't dereference registers at %p for ip %pB\n",
440 (void *)sp, (void *)orig_ip); 440 (void *)sp, (void *)orig_ip);
441 goto done; 441 goto done;
442 } 442 }
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
448 448
449 case ORC_TYPE_REGS_IRET: 449 case ORC_TYPE_REGS_IRET:
450 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { 450 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
451 orc_warn("can't dereference iret registers at %p for ip %p\n", 451 orc_warn("can't dereference iret registers at %p for ip %pB\n",
452 (void *)sp, (void *)orig_ip); 452 (void *)sp, (void *)orig_ip);
453 goto done; 453 goto done;
454 } 454 }
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
465 break; 465 break;
466 466
467 default: 467 default:
468 orc_warn("unknown .orc_unwind entry type %d\n", orc->type); 468 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
469 orc->type, (void *)orig_ip);
469 break; 470 break;
470 } 471 }
471 472
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
487 break; 488 break;
488 489
489 default: 490 default:
490 orc_warn("unknown BP base reg %d for ip %p\n", 491 orc_warn("unknown BP base reg %d for ip %pB\n",
491 orc->bp_reg, (void *)orig_ip); 492 orc->bp_reg, (void *)orig_ip);
492 goto done; 493 goto done;
493 } 494 }
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
496 if (state->stack_info.type == prev_type && 497 if (state->stack_info.type == prev_type &&
497 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && 498 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
498 state->sp <= prev_sp) { 499 state->sp <= prev_sp) {
499 orc_warn("stack going in the wrong direction? ip=%p\n", 500 orc_warn("stack going in the wrong direction? ip=%pB\n",
500 (void *)orig_ip); 501 (void *)orig_ip);
501 goto done; 502 goto done;
502 } 503 }
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f5ff142e6fe0..b0ff378650a9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1441,7 +1441,17 @@ good_area:
1441 * make sure we exit gracefully rather than endlessly redo 1441 * make sure we exit gracefully rather than endlessly redo
1442 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1442 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1443 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1443 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1444 *
1445 * Note that handle_userfault() may also release and reacquire mmap_sem
1446 * (and not return with VM_FAULT_RETRY), when returning to userland to
1447 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1448 * (potentially after handling any pending signal during the return to
1449 * userland). The return to userland is identified whenever
1450 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1451 * Thus we have to be careful about not touching vma after handling the
1452 * fault, so we read the pkey beforehand.
1444 */ 1453 */
1454 pkey = vma_pkey(vma);
1445 fault = handle_mm_fault(vma, address, flags); 1455 fault = handle_mm_fault(vma, address, flags);
1446 major |= fault & VM_FAULT_MAJOR; 1456 major |= fault & VM_FAULT_MAJOR;
1447 1457
@@ -1468,7 +1478,6 @@ good_area:
1468 return; 1478 return;
1469 } 1479 }
1470 1480
1471 pkey = vma_pkey(vma);
1472 up_read(&mm->mmap_sem); 1481 up_read(&mm->mmap_sem);
1473 if (unlikely(fault & VM_FAULT_ERROR)) { 1482 if (unlikely(fault & VM_FAULT_ERROR)) {
1474 mm_fault_error(regs, error_code, address, &pkey, fault); 1483 mm_fault_error(regs, error_code, address, &pkey, fault);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 320c6237e1d1..a99679826846 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -174,15 +174,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
174 return "[mpx]"; 174 return "[mpx]";
175 return NULL; 175 return NULL;
176} 176}
177
178int valid_phys_addr_range(phys_addr_t addr, size_t count)
179{
180 return addr + count <= __pa(high_memory);
181}
182
183int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
184{
185 phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
186
187 return valid_phys_addr_range(addr, count);
188}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index baebbdfd74d5..9adfb5445f8d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
386 return result; 386 return result;
387} 387}
388 388
389/*
390 * Different settings for sk->sk_sndtimeo can result in different return values
391 * if there is a signal pending when we enter sendmsg, because reasons?
392 */
393static inline int was_interrupted(int result)
394{
395 return result == -ERESTARTSYS || result == -EINTR;
396}
397
389/* always call with the tx_lock held */ 398/* always call with the tx_lock held */
390static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 399static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
391{ 400{
@@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
458 result = sock_xmit(nbd, index, 1, &from, 467 result = sock_xmit(nbd, index, 1, &from,
459 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 468 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
460 if (result <= 0) { 469 if (result <= 0) {
461 if (result == -ERESTARTSYS) { 470 if (was_interrupted(result)) {
462 /* If we havne't sent anything we can just return BUSY, 471 /* If we havne't sent anything we can just return BUSY,
463 * however if we have sent something we need to make 472 * however if we have sent something we need to make
464 * sure we only allow this req to be sent until we are 473 * sure we only allow this req to be sent until we are
@@ -502,7 +511,7 @@ send_pages:
502 } 511 }
503 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 512 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
504 if (result <= 0) { 513 if (result <= 0) {
505 if (result == -ERESTARTSYS) { 514 if (was_interrupted(result)) {
506 /* We've already sent the header, we 515 /* We've already sent the header, we
507 * have no choice but to set pending and 516 * have no choice but to set pending and
508 * return BUSY. 517 * return BUSY.
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 34e17ee799be..68846897d213 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0); 593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
594} 594}
595 595
596#ifdef CONFIG_VIRTIO_BLK_SCSI
597static void virtblk_initialize_rq(struct request *req)
598{
599 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
600
601 scsi_req_init(&vbr->sreq);
602}
603#endif
604
596static const struct blk_mq_ops virtio_mq_ops = { 605static const struct blk_mq_ops virtio_mq_ops = {
597 .queue_rq = virtio_queue_rq, 606 .queue_rq = virtio_queue_rq,
598 .complete = virtblk_request_done, 607 .complete = virtblk_request_done,
599 .init_request = virtblk_init_request, 608 .init_request = virtblk_init_request,
609#ifdef CONFIG_VIRTIO_BLK_SCSI
610 .initialize_rq_fn = virtblk_initialize_rq,
611#endif
600 .map_queues = virtblk_map_queues, 612 .map_queues = virtblk_map_queues,
601}; 613};
602 614
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 1cb2d1c070c3..a94601d5939e 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
238 238
239 efi_random_get_seed(sys_table); 239 efi_random_get_seed(sys_table);
240 240
241 if (!nokaslr()) { 241 /* hibernation expects the runtime regions to stay in the same place */
242 if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
242 /* 243 /*
243 * Randomize the base of the UEFI runtime services region. 244 * Randomize the base of the UEFI runtime services region.
244 * Preserve the 2 MB alignment of the region by taking a 245 * Preserve the 2 MB alignment of the region by taking a
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 08129b7b80ab..41c48a1e8baa 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
593 if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps))) 593 if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
594 return -EFAULT; 594 return -EFAULT;
595 595
596 if (qcaps.capsule_count == ULONG_MAX)
597 return -EINVAL;
598
596 capsules = kcalloc(qcaps.capsule_count + 1, 599 capsules = kcalloc(qcaps.capsule_count + 1,
597 sizeof(efi_capsule_header_t), GFP_KERNEL); 600 sizeof(efi_capsule_header_t), GFP_KERNEL);
598 if (!capsules) 601 if (!capsules)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 31db356476f8..430a6b4dfac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
225 if (r) 225 if (r)
226 return r; 226 return r;
227 227
228 /* Skip this for APU for now */ 228 return amdgpu_uvd_suspend(adev);
229 if (!(adev->flags & AMD_IS_APU))
230 r = amdgpu_uvd_suspend(adev);
231
232 return r;
233} 229}
234 230
235static int uvd_v6_0_resume(void *handle) 231static int uvd_v6_0_resume(void *handle)
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
237 int r; 233 int r;
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239 235
240 /* Skip this for APU for now */ 236 r = amdgpu_uvd_resume(adev);
241 if (!(adev->flags & AMD_IS_APU)) { 237 if (r)
242 r = amdgpu_uvd_resume(adev); 238 return r;
243 if (r) 239
244 return r;
245 }
246 return uvd_v6_0_hw_init(adev); 240 return uvd_v6_0_hw_init(adev);
247} 241}
248 242
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c2743233ba10..b526f49be65d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
830{ 830{
831 uint32_t reference_clock, tmp; 831 uint32_t reference_clock, tmp;
832 struct cgs_display_info info = {0}; 832 struct cgs_display_info info = {0};
833 struct cgs_mode_info mode_info; 833 struct cgs_mode_info mode_info = {0};
834 834
835 info.mode_info = &mode_info; 835 info.mode_info = &mode_info;
836 836
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3948 uint32_t ref_clock; 3948 uint32_t ref_clock;
3949 uint32_t refresh_rate = 0; 3949 uint32_t refresh_rate = 0;
3950 struct cgs_display_info info = {0}; 3950 struct cgs_display_info info = {0};
3951 struct cgs_mode_info mode_info; 3951 struct cgs_mode_info mode_info = {0};
3952 3952
3953 info.mode_info = &mode_info; 3953 info.mode_info = &mode_info;
3954
3955 cgs_get_active_displays_info(hwmgr->device, &info); 3954 cgs_get_active_displays_info(hwmgr->device, &info);
3956 num_active_displays = info.display_count; 3955 num_active_displays = info.display_count;
3957 3956
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3967 frame_time_in_us = 1000000 / refresh_rate; 3966 frame_time_in_us = 1000000 / refresh_rate;
3968 3967
3969 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; 3968 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3969
3970 data->frame_time_x2 = frame_time_in_us * 2 / 100; 3970 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3971 3971
3972 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 3972 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21c36e256884..d4726a3358a4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2723 uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; 2723 uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2724 unsigned char *bb_start_sva; 2724 unsigned char *bb_start_sva;
2725 2725
2726 if (!wa_ctx->per_ctx.valid)
2727 return 0;
2728
2726 per_ctx_start[0] = 0x18800001; 2729 per_ctx_start[0] = 0x18800001;
2727 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; 2730 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2728 2731
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..e5320b4eb698 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
701 CACHELINE_BYTES; 701 CACHELINE_BYTES;
702 workload->wa_ctx.per_ctx.guest_gma = 702 workload->wa_ctx.per_ctx.guest_gma =
703 per_ctx & PER_CTX_ADDR_MASK; 703 per_ctx & PER_CTX_ADDR_MASK;
704 704 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
705 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
706 } 705 }
707 706
708 if (emulate_schedule_in) 707 if (emulate_schedule_in)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2294466dd415..a5bed2e71b92 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1429 return 0; 1429 return 0;
1430} 1430}
1431 1431
1432static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu, 1432static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1433 unsigned int offset, void *p_data, unsigned int bytes)
1434{
1435 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1436
1437 mmio_hw_access_pre(dev_priv);
1438 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1439 mmio_hw_access_post(dev_priv);
1440 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1441}
1442
1443static int instdone_mmio_read(struct intel_vgpu *vgpu,
1444 unsigned int offset, void *p_data, unsigned int bytes) 1433 unsigned int offset, void *p_data, unsigned int bytes)
1445{ 1434{
1446 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1435 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1589 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ 1578 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
1590 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ 1579 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
1591 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ 1580 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
1581 if (HAS_BSD2(dev_priv)) \
1582 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
1592} while (0) 1583} while (0)
1593 1584
1594#define MMIO_RING_D(prefix, d) \ 1585#define MMIO_RING_D(prefix, d) \
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1635#undef RING_REG 1626#undef RING_REG
1636 1627
1637#define RING_REG(base) (base + 0x6c) 1628#define RING_REG(base) (base + 0x6c)
1638 MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL); 1629 MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
1639 MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
1640#undef RING_REG 1630#undef RING_REG
1641 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL); 1631 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
1642 1632
1643 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); 1633 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1644 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); 1634 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1648 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1638 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1649 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1639 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1650 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1640 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1651 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1641 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
1652 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); 1642 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1653 1643
1654 /* RING MODE */ 1644 /* RING MODE */
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1662 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1652 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1663 NULL, NULL); 1653 NULL, NULL);
1664 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1654 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
1665 ring_timestamp_mmio_read, NULL); 1655 mmio_read_from_hw, NULL);
1666 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1656 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
1667 ring_timestamp_mmio_read, NULL); 1657 mmio_read_from_hw, NULL);
1668 1658
1669 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1659 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1670 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1660 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2411 struct drm_i915_private *dev_priv = gvt->dev_priv; 2401 struct drm_i915_private *dev_priv = gvt->dev_priv;
2412 int ret; 2402 int ret;
2413 2403
2414 MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
2415 intel_vgpu_reg_imr_handler);
2416
2417 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); 2404 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2418 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); 2405 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2419 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); 2406 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2476 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2463 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2477 intel_vgpu_reg_master_irq_handler); 2464 intel_vgpu_reg_master_irq_handler);
2478 2465
2479 MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2466 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
2480 F_CMD_ACCESS, NULL, NULL); 2467 mmio_read_from_hw, NULL);
2481 MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2482
2483 MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2484 NULL, NULL);
2485 MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2486 F_CMD_ACCESS, NULL, NULL);
2487 MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2488 MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2489 NULL, NULL);
2490 MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2491 F_CMD_ACCESS, NULL, NULL);
2492 MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2493 F_CMD_ACCESS, NULL, NULL);
2494 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2495 ring_mode_mmio_write);
2496 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2497 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2498 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2499 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2500 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2501 ring_timestamp_mmio_read, NULL);
2502
2503 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2504 2468
2505#define RING_REG(base) (base + 0xd0) 2469#define RING_REG(base) (base + 0xd0)
2506 MMIO_RING_F(RING_REG, 4, F_RO, 0, 2470 MMIO_RING_F(RING_REG, 4, F_RO, 0,
2507 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, 2471 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2508 ring_reset_ctl_write); 2472 ring_reset_ctl_write);
2509 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
2510 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2511 ring_reset_ctl_write);
2512#undef RING_REG 2473#undef RING_REG
2513 2474
2514#define RING_REG(base) (base + 0x230) 2475#define RING_REG(base) (base + 0x230)
2515 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); 2476 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2516 MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
2517#undef RING_REG 2477#undef RING_REG
2518 2478
2519#define RING_REG(base) (base + 0x234) 2479#define RING_REG(base) (base + 0x234)
2520 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, 2480 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2521 NULL, NULL); 2481 NULL, NULL);
2522 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
2523 ~0LL, D_BDW_PLUS, NULL, NULL);
2524#undef RING_REG 2482#undef RING_REG
2525 2483
2526#define RING_REG(base) (base + 0x244) 2484#define RING_REG(base) (base + 0x244)
2527 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2485 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2528 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2529 NULL, NULL);
2530#undef RING_REG 2486#undef RING_REG
2531 2487
2532#define RING_REG(base) (base + 0x370) 2488#define RING_REG(base) (base + 0x370)
2533 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2489 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2534 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
2535 NULL, NULL);
2536#undef RING_REG 2490#undef RING_REG
2537 2491
2538#define RING_REG(base) (base + 0x3a0) 2492#define RING_REG(base) (base + 0x3a0)
2539 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2493 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2540 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2541#undef RING_REG 2494#undef RING_REG
2542 2495
2543 MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); 2496 MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2557 2510
2558#define RING_REG(base) (base + 0x270) 2511#define RING_REG(base) (base + 0x270)
2559 MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2512 MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2560 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2561#undef RING_REG 2513#undef RING_REG
2562 2514
2563 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2515 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
2564 MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2565 2516
2566 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2517 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2567 2518
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2849 MMIO_D(0x65f08, D_SKL | D_KBL); 2800 MMIO_D(0x65f08, D_SKL | D_KBL);
2850 MMIO_D(0x320f0, D_SKL | D_KBL); 2801 MMIO_D(0x320f0, D_SKL | D_KBL);
2851 2802
2852 MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2853 MMIO_D(0x70034, D_SKL_PLUS); 2803 MMIO_D(0x70034, D_SKL_PLUS);
2854 MMIO_D(0x71034, D_SKL_PLUS); 2804 MMIO_D(0x71034, D_SKL_PLUS);
2855 MMIO_D(0x72034, D_SKL_PLUS); 2805 MMIO_D(0x72034, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index fbd023a16f18..7d01c77a0f7a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -54,9 +54,6 @@
54 54
55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
56 56
57#define _REG_VECS_EXCC 0x1A028
58#define _REG_VCS2_EXCC 0x1c028
59
60#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) 57#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
61#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) 58#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
62 59
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0d431a968a32..93a49eb0209e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
68struct shadow_per_ctx { 68struct shadow_per_ctx {
69 unsigned long guest_gma; 69 unsigned long guest_gma;
70 unsigned long shadow_gma; 70 unsigned long shadow_gma;
71 unsigned valid;
71}; 72};
72 73
73struct intel_shadow_wa_ctx { 74struct intel_shadow_wa_ctx {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 94185d610673..370b9d248fed 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
2537 .poll = i915_perf_poll, 2537 .poll = i915_perf_poll,
2538 .read = i915_perf_read, 2538 .read = i915_perf_read,
2539 .unlocked_ioctl = i915_perf_ioctl, 2539 .unlocked_ioctl = i915_perf_ioctl,
2540 /* Our ioctl have no arguments, so it's safe to use the same function
2541 * to handle 32bits compatibility.
2542 */
2543 .compat_ioctl = i915_perf_ioctl,
2540}; 2544};
2541 2545
2542 2546
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 97a62f5b9ea4..a973eb6a2890 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
477 /* disable touchscreen features */ 477 /* disable touchscreen features */
478 da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00); 478 da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
479 479
480 /* Sample every 1ms */
481 da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
482 DA9052_ADCCONT_ADCMODE,
483 DA9052_ADCCONT_ADCMODE);
484
480 err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, 485 err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
481 "tsiready-irq", da9052_tsi_datardy_irq, 486 "tsiready-irq", da9052_tsi_datardy_irq,
482 hwmon); 487 hwmon);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5eafbaada795..dfc40c740d07 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
268 return err; 268 return err;
269 } 269 }
270 270
271 tmp102->ready_time = jiffies; 271 /*
272 if (tmp102->config_orig & TMP102_CONF_SD) { 272 * Mark that we are not ready with data until the first
273 /* 273 * conversion is complete
274 * Mark that we are not ready with data until the first 274 */
275 * conversion is complete 275 tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
276 */
277 tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
278 }
279 276
280 hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, 277 hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
281 tmp102, 278 tmp102,
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 81e18f9628d0..a7355ab3bb22 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1329 struct scsi_request *req = scsi_req(rq); 1329 struct scsi_request *req = scsi_req(rq);
1330 1330
1331 scsi_req_init(req);
1331 memset(req->cmd, 0, BLK_MAX_CDB); 1332 memset(req->cmd, 0, BLK_MAX_CDB);
1332 1333
1333 if (rq_data_dir(rq) == READ) 1334 if (rq_data_dir(rq) == READ)
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b12e58787c3d..1fb72c356e36 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
175 !netlink_capable(skb, CAP_NET_ADMIN)) 175 !netlink_capable(skb, CAP_NET_ADMIN))
176 return -EPERM; 176 return -EPERM;
177 177
178 /*
179 * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
180 * mistakenly call the .dump() function.
181 */
182 if (index == RDMA_NL_LS) {
183 if (cb_table[op].doit)
184 return cb_table[op].doit(skb, nlh, extack);
185 return -EINVAL;
186 }
178 /* FIXME: Convert IWCM to properly handle doit callbacks */ 187 /* FIXME: Convert IWCM to properly handle doit callbacks */
179 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || 188 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
180 index == RDMA_NL_IWCM) { 189 index == RDMA_NL_IWCM) {
181 struct netlink_dump_control c = { 190 struct netlink_dump_control c = {
182 .dump = cb_table[op].dump, 191 .dump = cb_table[op].dump,
183 }; 192 };
184 return netlink_dump_start(nls, skb, nlh, &c); 193 if (c.dump)
194 return netlink_dump_start(nls, skb, nlh, &c);
195 return -EINVAL;
185 } 196 }
186 197
187 if (cb_table[op].doit) 198 if (cb_table[op].doit)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 3ba24c428c3b..2fae850a3eff 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
214 214
215 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 215 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
216 nldev_policy, extack); 216 nldev_policy, extack);
217 if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 217 if (err ||
218 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
219 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
218 return -EINVAL; 220 return -EINVAL;
219 221
220 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 222 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0e761d079dc4..6d6b092e2da9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1258 { "ELAN0605", 0 }, 1258 { "ELAN0605", 0 },
1259 { "ELAN0609", 0 }, 1259 { "ELAN0609", 0 },
1260 { "ELAN060B", 0 }, 1260 { "ELAN060B", 0 },
1261 { "ELAN0611", 0 },
1261 { "ELAN1000", 0 }, 1262 { "ELAN1000", 0 },
1262 { } 1263 { }
1263}; 1264};
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 34dfee555b20..82e0f0d43d55 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
232 unsigned int trackstick_button = BTN_LEFT; 232 unsigned int trackstick_button = BTN_LEFT;
233 bool button_mapped = false; 233 bool button_mapped = false;
234 int i; 234 int i;
235 int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
235 236
236 f30->gpioled_key_map = devm_kcalloc(&fn->dev, 237 f30->gpioled_key_map = devm_kcalloc(&fn->dev,
237 f30->gpioled_count, 238 button_count,
238 sizeof(f30->gpioled_key_map[0]), 239 sizeof(f30->gpioled_key_map[0]),
239 GFP_KERNEL); 240 GFP_KERNEL);
240 if (!f30->gpioled_key_map) { 241 if (!f30->gpioled_key_map) {
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
242 return -ENOMEM; 243 return -ENOMEM;
243 } 244 }
244 245
245 for (i = 0; i < f30->gpioled_count; i++) { 246 for (i = 0; i < button_count; i++) {
246 if (!rmi_f30_is_valid_button(i, f30->ctrl)) 247 if (!rmi_f30_is_valid_button(i, f30->ctrl))
247 continue; 248 continue;
248 249
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index b796e891e2ee..4b8b9d7aa75e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
230 230
231 /* Walk this report and pull out the info we need */ 231 /* Walk this report and pull out the info we need */
232 while (i < length) { 232 while (i < length) {
233 prefix = report[i]; 233 prefix = report[i++];
234
235 /* Skip over prefix */
236 i++;
237 234
238 /* Determine data size and save the data in the proper variable */ 235 /* Determine data size and save the data in the proper variable */
239 size = PREF_SIZE(prefix); 236 size = (1U << PREF_SIZE(prefix)) >> 1;
237 if (i + size > length) {
238 dev_err(ddev,
239 "Not enough data (need %d, have %d)\n",
240 i + size, length);
241 break;
242 }
243
240 switch (size) { 244 switch (size) {
241 case 1: 245 case 1:
242 data = report[i]; 246 data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
244 case 2: 248 case 2:
245 data16 = get_unaligned_le16(&report[i]); 249 data16 = get_unaligned_le16(&report[i]);
246 break; 250 break;
247 case 3: 251 case 4:
248 size = 4;
249 data32 = get_unaligned_le32(&report[i]); 252 data32 = get_unaligned_le32(&report[i]);
250 break; 253 break;
251 } 254 }
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f905f2361d12..8bae88a150fd 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -146,11 +146,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
146 WARN_ON(host->sg_len > 1); 146 WARN_ON(host->sg_len > 1);
147 147
148 /* This DMAC cannot handle if buffer is not 8-bytes alignment */ 148 /* This DMAC cannot handle if buffer is not 8-bytes alignment */
149 if (!IS_ALIGNED(sg->offset, 8)) { 149 if (!IS_ALIGNED(sg->offset, 8))
150 host->force_pio = true; 150 goto force_pio;
151 renesas_sdhi_internal_dmac_enable_dma(host, false);
152 return;
153 }
154 151
155 if (data->flags & MMC_DATA_READ) { 152 if (data->flags & MMC_DATA_READ) {
156 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 153 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
@@ -163,8 +160,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
163 } 160 }
164 161
165 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir); 162 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
166 if (ret < 0) 163 if (ret == 0)
167 return; 164 goto force_pio;
168 165
169 renesas_sdhi_internal_dmac_enable_dma(host, true); 166 renesas_sdhi_internal_dmac_enable_dma(host, true);
170 167
@@ -176,6 +173,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
176 dtran_mode); 173 dtran_mode);
177 renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, 174 renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
178 sg->dma_address); 175 sg->dma_address);
176
177 return;
178
179force_pio:
180 host->force_pio = true;
181 renesas_sdhi_internal_dmac_enable_dma(host, false);
179} 182}
180 183
181static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) 184static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a7293e186e03..9c4e6199b854 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -47,6 +47,7 @@
47#include <linux/mmc/sdio.h> 47#include <linux/mmc/sdio.h>
48#include <linux/scatterlist.h> 48#include <linux/scatterlist.h>
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/swiotlb.h>
50#include <linux/workqueue.h> 51#include <linux/workqueue.h>
51 52
52#include "tmio_mmc.h" 53#include "tmio_mmc.h"
@@ -1215,6 +1216,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1215 mmc->max_blk_count = pdata->max_blk_count ? : 1216 mmc->max_blk_count = pdata->max_blk_count ? :
1216 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; 1217 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1217 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1218 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1219 /*
1220 * Since swiotlb has memory size limitation, this will calculate
1221 * the maximum size locally (because we don't have any APIs for it now)
1222 * and check the current max_req_size. And then, this will update
1223 * the max_req_size if needed as a workaround.
1224 */
1225 if (swiotlb_max_segment()) {
1226 unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1227
1228 if (mmc->max_req_size > max_size)
1229 mmc->max_req_size = max_size;
1230 }
1218 mmc->max_seg_size = mmc->max_req_size; 1231 mmc->max_seg_size = mmc->max_req_size;
1219 1232
1220 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || 1233 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4cd821..b0c80859f746 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
342 342
343 /* enter the selected mode */ 343 /* enter the selected mode */
344 mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); 344 mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
345 if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) 345 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
346 mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; 346 mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
347 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 347 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
348 mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; 348 mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
811 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | 811 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
812 CAN_CTRLMODE_LISTENONLY | 812 CAN_CTRLMODE_LISTENONLY |
813 CAN_CTRLMODE_LOOPBACK | 813 CAN_CTRLMODE_LOOPBACK |
814 CAN_CTRLMODE_PRESUME_ACK |
815 CAN_CTRLMODE_3_SAMPLES; 814 CAN_CTRLMODE_3_SAMPLES;
816 priv->base = addr; 815 priv->base = addr;
817 priv->clk = clk; 816 priv->clk = clk;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 18cc529fb807..9b18d96ef526 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
137#define CMD_RESET_ERROR_COUNTER 49 137#define CMD_RESET_ERROR_COUNTER 49
138#define CMD_TX_ACKNOWLEDGE 50 138#define CMD_TX_ACKNOWLEDGE 50
139#define CMD_CAN_ERROR_EVENT 51 139#define CMD_CAN_ERROR_EVENT 51
140#define CMD_FLUSH_QUEUE_REPLY 68
140 141
141#define CMD_LEAF_USB_THROTTLE 77 142#define CMD_LEAF_USB_THROTTLE 77
142#define CMD_LEAF_LOG_MESSAGE 106 143#define CMD_LEAF_LOG_MESSAGE 106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
1301 goto warn; 1302 goto warn;
1302 break; 1303 break;
1303 1304
1305 case CMD_FLUSH_QUEUE_REPLY:
1306 if (dev->family != KVASER_LEAF)
1307 goto warn;
1308 break;
1309
1304 default: 1310 default:
1305warn: dev_warn(dev->udev->dev.parent, 1311warn: dev_warn(dev->udev->dev.parent,
1306 "Unhandled message (%d)\n", msg->id); 1312 "Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
1609 if (err) 1615 if (err)
1610 netdev_warn(netdev, "Cannot flush queue, error %d\n", err); 1616 netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
1611 1617
1612 if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel)) 1618 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
1619 if (err)
1613 netdev_warn(netdev, "Cannot reset card, error %d\n", err); 1620 netdev_warn(netdev, "Cannot reset card, error %d\n", err);
1614 1621
1615 err = kvaser_usb_stop_chip(priv); 1622 err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 36520634c96a..e77192683dba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2369,8 +2369,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2369 priv->enet_ver = AE_VERSION_2; 2369 priv->enet_ver = AE_VERSION_2;
2370 2370
2371 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); 2371 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2372 if (IS_ERR_OR_NULL(ae_node)) { 2372 if (!ae_node) {
2373 ret = PTR_ERR(ae_node); 2373 ret = -ENODEV;
2374 dev_err(dev, "not find ae-handle\n"); 2374 dev_err(dev, "not find ae-handle\n");
2375 goto out_read_prop_fail; 2375 goto out_read_prop_fail;
2376 } 2376 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ec8aa4562cc9..3b3983a1ffbb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1824{ 1824{
1825 struct e1000_adapter *adapter = netdev_priv(netdev); 1825 struct e1000_adapter *adapter = netdev_priv(netdev);
1826 int i; 1826 int i;
1827 char *p = NULL;
1828 const struct e1000_stats *stat = e1000_gstrings_stats; 1827 const struct e1000_stats *stat = e1000_gstrings_stats;
1829 1828
1830 e1000_update_stats(adapter); 1829 e1000_update_stats(adapter);
1831 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1830 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
1831 char *p;
1832
1832 switch (stat->type) { 1833 switch (stat->type) {
1833 case NETDEV_STATS: 1834 case NETDEV_STATS:
1834 p = (char *)netdev + stat->stat_offset; 1835 p = (char *)netdev + stat->stat_offset;
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1839 default: 1840 default:
1840 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", 1841 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
1841 stat->type, i); 1842 stat->type, i);
1842 break; 1843 continue;
1843 } 1844 }
1844 1845
1845 if (stat->sizeof_stat == sizeof(u64)) 1846 if (stat->sizeof_stat == sizeof(u64))
1846 data[i] = *(u64 *)p; 1847 data[i] = *(u64 *)p;
1847 else 1848 else
1848 data[i] = *(u32 *)p; 1849 data[i] = *(u32 *)p;
1849
1850 stat++;
1851 } 1850 }
1852/* BUG_ON(i != E1000_STATS_LEN); */ 1851/* BUG_ON(i != E1000_STATS_LEN); */
1853} 1852}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 98375e1e1185..1982f7917a8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
520 struct net_device *netdev = adapter->netdev; 520 struct net_device *netdev = adapter->netdev;
521 u32 rctl, tctl; 521 u32 rctl, tctl;
522 522
523 netif_carrier_off(netdev);
524
525 /* disable receives in the hardware */ 523 /* disable receives in the hardware */
526 rctl = er32(RCTL); 524 rctl = er32(RCTL);
527 ew32(RCTL, rctl & ~E1000_RCTL_EN); 525 ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
537 E1000_WRITE_FLUSH(); 535 E1000_WRITE_FLUSH();
538 msleep(10); 536 msleep(10);
539 537
538 /* Set the carrier off after transmits have been disabled in the
539 * hardware, to avoid race conditions with e1000_watchdog() (which
540 * may be running concurrently to us, checking for the carrier
541 * bit to decide whether it should enable transmits again). Such
542 * a race condition would result into transmission being disabled
543 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
544 */
545 netif_carrier_off(netdev);
546
540 napi_disable(&adapter->napi); 547 napi_disable(&adapter->napi);
541 548
542 e1000_irq_disable(adapter); 549 e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2756131495f0..120c68f78951 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2102,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2102 2102
2103 if (unlikely(i40e_rx_is_programming_status(qword))) { 2103 if (unlikely(i40e_rx_is_programming_status(qword))) {
2104 i40e_clean_programming_status(rx_ring, rx_desc, qword); 2104 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2105 cleaned_count++;
2105 continue; 2106 continue;
2106 } 2107 }
2107 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2108 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -2269,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2269 goto enable_int; 2270 goto enable_int;
2270 } 2271 }
2271 2272
2272 if (ITR_IS_DYNAMIC(tx_itr_setting)) { 2273 if (ITR_IS_DYNAMIC(rx_itr_setting)) {
2273 rx = i40e_set_new_dynamic_itr(&q_vector->rx); 2274 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2274 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); 2275 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2275 } 2276 }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd4a46b03cc8..ea69af267d63 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5326,7 +5326,7 @@ dma_error:
5326 DMA_TO_DEVICE); 5326 DMA_TO_DEVICE);
5327 dma_unmap_len_set(tx_buffer, len, 0); 5327 dma_unmap_len_set(tx_buffer, len, 0);
5328 5328
5329 if (i--) 5329 if (i-- == 0)
5330 i += tx_ring->count; 5330 i += tx_ring->count;
5331 tx_buffer = &tx_ring->tx_buffer_info[i]; 5331 tx_buffer = &tx_ring->tx_buffer_info[i];
5332 } 5332 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4d76afd13868..6d5f31e94358 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8020 return 0; 8020 return 0;
8021dma_error: 8021dma_error:
8022 dev_err(tx_ring->dev, "TX DMA map failed\n"); 8022 dev_err(tx_ring->dev, "TX DMA map failed\n");
8023 tx_buffer = &tx_ring->tx_buffer_info[i];
8024 8023
8025 /* clear dma mappings for failed tx_buffer_info map */ 8024 /* clear dma mappings for failed tx_buffer_info map */
8026 while (tx_buffer != first) { 8025 for (;;) {
8026 tx_buffer = &tx_ring->tx_buffer_info[i];
8027 if (dma_unmap_len(tx_buffer, len)) 8027 if (dma_unmap_len(tx_buffer, len))
8028 dma_unmap_page(tx_ring->dev, 8028 dma_unmap_page(tx_ring->dev,
8029 dma_unmap_addr(tx_buffer, dma), 8029 dma_unmap_addr(tx_buffer, dma),
8030 dma_unmap_len(tx_buffer, len), 8030 dma_unmap_len(tx_buffer, len),
8031 DMA_TO_DEVICE); 8031 DMA_TO_DEVICE);
8032 dma_unmap_len_set(tx_buffer, len, 0); 8032 dma_unmap_len_set(tx_buffer, len, 0);
8033 8033 if (tx_buffer == first)
8034 if (i--) 8034 break;
8035 if (i == 0)
8035 i += tx_ring->count; 8036 i += tx_ring->count;
8036 tx_buffer = &tx_ring->tx_buffer_info[i]; 8037 i--;
8037 } 8038 }
8038 8039
8039 if (dma_unmap_len(tx_buffer, len))
8040 dma_unmap_single(tx_ring->dev,
8041 dma_unmap_addr(tx_buffer, dma),
8042 dma_unmap_len(tx_buffer, len),
8043 DMA_TO_DEVICE);
8044 dma_unmap_len_set(tx_buffer, len, 0);
8045
8046 dev_kfree_skb_any(first->skb); 8040 dev_kfree_skb_any(first->skb);
8047 first->skb = NULL; 8041 first->skb = NULL;
8048 8042
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9c86cb7cb988..a37af5813f33 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool {
1167 u32 port_map; 1167 u32 port_map;
1168}; 1168};
1169 1169
1170#define IS_TSO_HEADER(txq_pcpu, addr) \
1171 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1172 (addr) < (txq_pcpu)->tso_headers_dma + \
1173 (txq_pcpu)->size * TSO_HEADER_SIZE)
1174
1170/* Queue modes */ 1175/* Queue modes */
1171#define MVPP2_QDIST_SINGLE_MODE 0 1176#define MVPP2_QDIST_SINGLE_MODE 0
1172#define MVPP2_QDIST_MULTI_MODE 1 1177#define MVPP2_QDIST_MULTI_MODE 1
@@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1534 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); 1539 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1535 u16 tcam_data; 1540 u16 tcam_data;
1536 1541
1537 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; 1542 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1538 if (tcam_data != data) 1543 if (tcam_data != data)
1539 return false; 1544 return false;
1540 return true; 1545 return true;
@@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2609 /* place holders only - no ports */ 2614 /* place holders only - no ports */
2610 mvpp2_prs_mac_drop_all_set(priv, 0, false); 2615 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2611 mvpp2_prs_mac_promisc_set(priv, 0, false); 2616 mvpp2_prs_mac_promisc_set(priv, 0, false);
2612 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 2617 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
2613 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 2618 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
2614} 2619}
2615 2620
2616/* Set default entries for various types of dsa packets */ 2621/* Set default entries for various types of dsa packets */
@@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3391 struct mvpp2_prs_entry *pe; 3396 struct mvpp2_prs_entry *pe;
3392 int tid; 3397 int tid;
3393 3398
3394 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3399 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3395 if (!pe) 3400 if (!pe)
3396 return NULL; 3401 return NULL;
3397 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3402 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3453 if (tid < 0) 3458 if (tid < 0)
3454 return tid; 3459 return tid;
3455 3460
3456 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3461 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3457 if (!pe) 3462 if (!pe)
3458 return -ENOMEM; 3463 return -ENOMEM;
3459 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3464 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5321 struct mvpp2_txq_pcpu_buf *tx_buf = 5326 struct mvpp2_txq_pcpu_buf *tx_buf =
5322 txq_pcpu->buffs + txq_pcpu->txq_get_index; 5327 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5323 5328
5324 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 5329 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5325 tx_buf->size, DMA_TO_DEVICE); 5330 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5331 tx_buf->size, DMA_TO_DEVICE);
5326 if (tx_buf->skb) 5332 if (tx_buf->skb)
5327 dev_kfree_skb_any(tx_buf->skb); 5333 dev_kfree_skb_any(tx_buf->skb);
5328 5334
@@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5609 5615
5610 txq_pcpu->tso_headers = 5616 txq_pcpu->tso_headers =
5611 dma_alloc_coherent(port->dev->dev.parent, 5617 dma_alloc_coherent(port->dev->dev.parent,
5612 MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, 5618 txq_pcpu->size * TSO_HEADER_SIZE,
5613 &txq_pcpu->tso_headers_dma, 5619 &txq_pcpu->tso_headers_dma,
5614 GFP_KERNEL); 5620 GFP_KERNEL);
5615 if (!txq_pcpu->tso_headers) 5621 if (!txq_pcpu->tso_headers)
@@ -5623,7 +5629,7 @@ cleanup:
5623 kfree(txq_pcpu->buffs); 5629 kfree(txq_pcpu->buffs);
5624 5630
5625 dma_free_coherent(port->dev->dev.parent, 5631 dma_free_coherent(port->dev->dev.parent,
5626 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5632 txq_pcpu->size * TSO_HEADER_SIZE,
5627 txq_pcpu->tso_headers, 5633 txq_pcpu->tso_headers,
5628 txq_pcpu->tso_headers_dma); 5634 txq_pcpu->tso_headers_dma);
5629 } 5635 }
@@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
5647 kfree(txq_pcpu->buffs); 5653 kfree(txq_pcpu->buffs);
5648 5654
5649 dma_free_coherent(port->dev->dev.parent, 5655 dma_free_coherent(port->dev->dev.parent,
5650 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5656 txq_pcpu->size * TSO_HEADER_SIZE,
5651 txq_pcpu->tso_headers, 5657 txq_pcpu->tso_headers,
5652 txq_pcpu->tso_headers_dma); 5658 txq_pcpu->tso_headers_dma);
5653 } 5659 }
@@ -6212,12 +6218,15 @@ static inline void
6212tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 6218tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6213 struct mvpp2_tx_desc *desc) 6219 struct mvpp2_tx_desc *desc)
6214{ 6220{
6221 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6222
6215 dma_addr_t buf_dma_addr = 6223 dma_addr_t buf_dma_addr =
6216 mvpp2_txdesc_dma_addr_get(port, desc); 6224 mvpp2_txdesc_dma_addr_get(port, desc);
6217 size_t buf_sz = 6225 size_t buf_sz =
6218 mvpp2_txdesc_size_get(port, desc); 6226 mvpp2_txdesc_size_get(port, desc);
6219 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 6227 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6220 buf_sz, DMA_TO_DEVICE); 6228 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6229 buf_sz, DMA_TO_DEVICE);
6221 mvpp2_txq_desc_put(txq); 6230 mvpp2_txq_desc_put(txq);
6222} 6231}
6223 6232
@@ -6490,7 +6499,7 @@ out:
6490 } 6499 }
6491 6500
6492 /* Finalize TX processing */ 6501 /* Finalize TX processing */
6493 if (txq_pcpu->count >= txq->done_pkts_coal) 6502 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
6494 mvpp2_txq_done(port, txq, txq_pcpu); 6503 mvpp2_txq_done(port, txq, txq_pcpu);
6495 6504
6496 /* Set the timer in case not all frags were processed */ 6505 /* Set the timer in case not all frags were processed */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ff60cf7342ca..fc281712869b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list); 77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78} 78}
79 79
80static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, 80static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81 struct mlx5_core_dev *dev, 81 struct mlx5_priv *priv)
82 struct mlx5_priv *priv)
83{ 82{
83 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84 struct mlx5_delayed_event *de; 84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n; 85 struct mlx5_delayed_event *n;
86 struct list_head temp;
86 87
87 /* stop delaying events */ 88 INIT_LIST_HEAD(&temp);
88 priv->is_accum_events = false; 89
90 spin_lock_irq(&priv->ctx_lock);
89 91
90 /* fire all accumulated events before new event comes */ 92 priv->is_accum_events = false;
91 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { 93 list_splice_init(&priv->waiting_events_list, &temp);
94 if (!dev_ctx->context)
95 goto out;
96 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
92 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
98
99out:
100 spin_unlock_irq(&priv->ctx_lock);
101
102 list_for_each_entry_safe(de, n, &temp, list) {
93 list_del(&de->list); 103 list_del(&de->list);
94 kfree(de); 104 kfree(de);
95 } 105 }
96} 106}
97 107
98static void cleanup_delayed_evets(struct mlx5_priv *priv) 108/* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
110 */
111static void delayed_event_start(struct mlx5_priv *priv)
99{ 112{
100 struct mlx5_delayed_event *de;
101 struct mlx5_delayed_event *n;
102
103 spin_lock_irq(&priv->ctx_lock); 113 spin_lock_irq(&priv->ctx_lock);
104 priv->is_accum_events = false; 114 priv->is_accum_events = true;
105 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
106 list_del(&de->list);
107 kfree(de);
108 }
109 spin_unlock_irq(&priv->ctx_lock); 115 spin_unlock_irq(&priv->ctx_lock);
110} 116}
111 117
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
122 return; 128 return;
123 129
124 dev_ctx->intf = intf; 130 dev_ctx->intf = intf;
125 /* accumulating events that can come after mlx5_ib calls to
126 * ib_register_device, till adding that interface to the events list.
127 */
128 131
129 priv->is_accum_events = true; 132 delayed_event_start(priv);
130 133
131 dev_ctx->context = intf->add(dev); 134 dev_ctx->context = intf->add(dev);
132 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 135 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
137 spin_lock_irq(&priv->ctx_lock); 140 spin_lock_irq(&priv->ctx_lock);
138 list_add_tail(&dev_ctx->list, &priv->ctx_list); 141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
139 142
140 fire_delayed_event_locked(dev_ctx, dev, priv);
141
142#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 143#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
143 if (dev_ctx->intf->pfault) { 144 if (dev_ctx->intf->pfault) {
144 if (priv->pfault) { 145 if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
150 } 151 }
151#endif 152#endif
152 spin_unlock_irq(&priv->ctx_lock); 153 spin_unlock_irq(&priv->ctx_lock);
153 } else {
154 kfree(dev_ctx);
155 /* delete all accumulated events */
156 cleanup_delayed_evets(priv);
157 } 154 }
155
156 delayed_event_release(dev_ctx, priv);
157
158 if (!dev_ctx->context)
159 kfree(dev_ctx);
158} 160}
159 161
160static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, 162static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
205 if (!dev_ctx) 207 if (!dev_ctx)
206 return; 208 return;
207 209
210 delayed_event_start(priv);
208 if (intf->attach) { 211 if (intf->attach) {
209 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
210 return; 213 goto out;
211 intf->attach(dev, dev_ctx->context); 214 intf->attach(dev, dev_ctx->context);
212 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 215 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
213 } else { 216 } else {
214 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 217 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
215 return; 218 goto out;
216 dev_ctx->context = intf->add(dev); 219 dev_ctx->context = intf->add(dev);
217 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 220 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
218 } 221 }
222
223out:
224 delayed_event_release(dev_ctx, priv);
219} 225}
220 226
221void mlx5_attach_device(struct mlx5_core_dev *dev) 227void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
414 if (priv->is_accum_events) 420 if (priv->is_accum_events)
415 add_delayed_event(priv, dev, event, param); 421 add_delayed_event(priv, dev, event, param);
416 422
423 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
424 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
425 * ADDED or ATTACHED bit are set.
426 */
417 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 427 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
418 if (dev_ctx->intf->event) 428 if (dev_ctx->intf->event &&
429 (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
430 test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
419 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 431 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
420 432
421 spin_unlock_irqrestore(&priv->ctx_lock, flags); 433 spin_unlock_irqrestore(&priv->ctx_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c1d384fca4dc..51c4cc00a186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -41,6 +41,11 @@
41#define MLX5E_CEE_STATE_UP 1 41#define MLX5E_CEE_STATE_UP 1
42#define MLX5E_CEE_STATE_DOWN 0 42#define MLX5E_CEE_STATE_DOWN 0
43 43
44enum {
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
47};
48
44/* If dcbx mode is non-host set the dcbx mode to host. 49/* If dcbx mode is non-host set the dcbx mode to host.
45 */ 50 */
46static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, 51static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
85{ 90{
86 struct mlx5e_priv *priv = netdev_priv(netdev); 91 struct mlx5e_priv *priv = netdev_priv(netdev);
87 struct mlx5_core_dev *mdev = priv->mdev; 92 struct mlx5_core_dev *mdev = priv->mdev;
93 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
94 bool is_tc_group_6_exist = false;
95 bool is_zero_bw_ets_tc = false;
88 int err = 0; 96 int err = 0;
89 int i; 97 int i;
90 98
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
96 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); 104 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
97 if (err) 105 if (err)
98 return err; 106 return err;
99 }
100 107
101 for (i = 0; i < ets->ets_cap; i++) { 108 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
109 if (err)
110 return err;
111
102 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); 112 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
103 if (err) 113 if (err)
104 return err; 114 return err;
115
116 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
117 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
118 is_zero_bw_ets_tc = true;
119
120 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
121 is_tc_group_6_exist = true;
122 }
123
124 /* Report 0% ets tc if exits*/
125 if (is_zero_bw_ets_tc) {
126 for (i = 0; i < ets->ets_cap; i++)
127 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
128 ets->tc_tx_bw[i] = 0;
129 }
130
131 /* Update tc_tsa based on fw setting*/
132 for (i = 0; i < ets->ets_cap; i++) {
105 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 133 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
106 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 134 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
135 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
136 !is_tc_group_6_exist)
137 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
107 } 138 }
108
109 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); 139 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
110 140
111 return err; 141 return err;
112} 142}
113 143
114enum {
115 MLX5E_VENDOR_TC_GROUP_NUM = 7,
116 MLX5E_ETS_TC_GROUP_NUM = 0,
117};
118
119static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) 144static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
120{ 145{
121 bool any_tc_mapped_to_ets = false; 146 bool any_tc_mapped_to_ets = false;
147 bool ets_zero_bw = false;
122 int strict_group; 148 int strict_group;
123 int i; 149 int i;
124 150
125 for (i = 0; i <= max_tc; i++) 151 for (i = 0; i <= max_tc; i++) {
126 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 152 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
127 any_tc_mapped_to_ets = true; 153 any_tc_mapped_to_ets = true;
154 if (!ets->tc_tx_bw[i])
155 ets_zero_bw = true;
156 }
157 }
128 158
129 strict_group = any_tc_mapped_to_ets ? 1 : 0; 159 /* strict group has higher priority than ets group */
160 strict_group = MLX5E_LOWEST_PRIO_GROUP;
161 if (any_tc_mapped_to_ets)
162 strict_group++;
163 if (ets_zero_bw)
164 strict_group++;
130 165
131 for (i = 0; i <= max_tc; i++) { 166 for (i = 0; i <= max_tc; i++) {
132 switch (ets->tc_tsa[i]) { 167 switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
137 tc_group[i] = strict_group++; 172 tc_group[i] = strict_group++;
138 break; 173 break;
139 case IEEE_8021QAZ_TSA_ETS: 174 case IEEE_8021QAZ_TSA_ETS:
140 tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; 175 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
176 if (ets->tc_tx_bw[i] && ets_zero_bw)
177 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
141 break; 178 break;
142 } 179 }
143 } 180 }
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
146static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, 183static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
147 u8 *tc_group, int max_tc) 184 u8 *tc_group, int max_tc)
148{ 185{
186 int bw_for_ets_zero_bw_tc = 0;
187 int last_ets_zero_bw_tc = -1;
188 int num_ets_zero_bw = 0;
149 int i; 189 int i;
150 190
151 for (i = 0; i <= max_tc; i++) { 191 for (i = 0; i <= max_tc; i++) {
192 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
193 !ets->tc_tx_bw[i]) {
194 num_ets_zero_bw++;
195 last_ets_zero_bw_tc = i;
196 }
197 }
198
199 if (num_ets_zero_bw)
200 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
201
202 for (i = 0; i <= max_tc; i++) {
152 switch (ets->tc_tsa[i]) { 203 switch (ets->tc_tsa[i]) {
153 case IEEE_8021QAZ_TSA_VENDOR: 204 case IEEE_8021QAZ_TSA_VENDOR:
154 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 205 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
157 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 208 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
158 break; 209 break;
159 case IEEE_8021QAZ_TSA_ETS: 210 case IEEE_8021QAZ_TSA_ETS:
160 tc_tx_bw[i] = ets->tc_tx_bw[i]; 211 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
212 ets->tc_tx_bw[i] :
213 bw_for_ets_zero_bw_tc;
161 break; 214 break;
162 } 215 }
163 } 216 }
217
218 /* Make sure the total bw for ets zero bw group is 100% */
219 if (last_ets_zero_bw_tc != -1)
220 tc_tx_bw[last_ets_zero_bw_tc] +=
221 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
164} 222}
165 223
224/* If there are ETS BW 0,
225 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
226 * Set group #0 to all the ETS BW 0 tcs and
227 * equally splits the 100% BW between them
228 * Report both group #0 and #1 as ETS type.
229 * All the tcs in group #0 will be reported with 0% BW.
230 */
166int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) 231int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
167{ 232{
168 struct mlx5_core_dev *mdev = priv->mdev; 233 struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
188 return err; 253 return err;
189 254
190 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); 255 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
191
192 return err; 256 return err;
193} 257}
194 258
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
209 } 273 }
210 274
211 /* Validate Bandwidth Sum */ 275 /* Validate Bandwidth Sum */
212 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 276 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
213 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 277 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
214 if (!ets->tc_tx_bw[i]) {
215 netdev_err(netdev,
216 "Failed to validate ETS: BW 0 is illegal\n");
217 return -EINVAL;
218 }
219
220 bw_sum += ets->tc_tx_bw[i]; 278 bw_sum += ets->tc_tx_bw[i];
221 }
222 }
223 279
224 if (bw_sum != 0 && bw_sum != 100) { 280 if (bw_sum != 0 && bw_sum != 100) {
225 netdev_err(netdev, 281 netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
533static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, 589static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
534 int pgid, u8 *bw_pct) 590 int pgid, u8 *bw_pct)
535{ 591{
536 struct mlx5e_priv *priv = netdev_priv(netdev); 592 struct ieee_ets ets;
537 struct mlx5_core_dev *mdev = priv->mdev;
538 593
539 if (pgid >= CEE_DCBX_MAX_PGS) { 594 if (pgid >= CEE_DCBX_MAX_PGS) {
540 netdev_err(netdev, 595 netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
542 return; 597 return;
543 } 598 }
544 599
545 if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) 600 mlx5e_dcbnl_ieee_getets(netdev, &ets);
546 *bw_pct = 0; 601 *bw_pct = ets.tc_tx_bw[pgid];
547} 602}
548 603
549static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, 604static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
739 ets.prio_tc[i] = i; 794 ets.prio_tc[i] = i;
740 } 795 }
741 796
742 memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
743
744 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 797 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
745 ets.prio_tc[0] = 1; 798 ets.prio_tc[0] = 1;
746 ets.prio_tc[1] = 0; 799 ets.prio_tc[1] = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1aa2028ed995..9ba1f72060aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
78}; 78};
79 79
80struct mlx5e_tc_flow_parse_attr { 80struct mlx5e_tc_flow_parse_attr {
81 struct ip_tunnel_info tun_info;
81 struct mlx5_flow_spec spec; 82 struct mlx5_flow_spec spec;
82 int num_mod_hdr_actions; 83 int num_mod_hdr_actions;
83 void *mod_hdr_actions; 84 void *mod_hdr_actions;
85 int mirred_ifindex;
84}; 86};
85 87
86enum { 88enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
322static void mlx5e_detach_encap(struct mlx5e_priv *priv, 324static void mlx5e_detach_encap(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow); 325 struct mlx5e_tc_flow *flow);
324 326
327static int mlx5e_attach_encap(struct mlx5e_priv *priv,
328 struct ip_tunnel_info *tun_info,
329 struct net_device *mirred_dev,
330 struct net_device **encap_dev,
331 struct mlx5e_tc_flow *flow);
332
325static struct mlx5_flow_handle * 333static struct mlx5_flow_handle *
326mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 334mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
327 struct mlx5e_tc_flow_parse_attr *parse_attr, 335 struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
329{ 337{
330 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 338 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
331 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 339 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
332 struct mlx5_flow_handle *rule; 340 struct net_device *out_dev, *encap_dev = NULL;
341 struct mlx5_flow_handle *rule = NULL;
342 struct mlx5e_rep_priv *rpriv;
343 struct mlx5e_priv *out_priv;
333 int err; 344 int err;
334 345
346 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
347 out_dev = __dev_get_by_index(dev_net(priv->netdev),
348 attr->parse_attr->mirred_ifindex);
349 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
350 out_dev, &encap_dev, flow);
351 if (err) {
352 rule = ERR_PTR(err);
353 if (err != -EAGAIN)
354 goto err_attach_encap;
355 }
356 out_priv = netdev_priv(encap_dev);
357 rpriv = out_priv->ppriv;
358 attr->out_rep = rpriv->rep;
359 }
360
335 err = mlx5_eswitch_add_vlan_action(esw, attr); 361 err = mlx5_eswitch_add_vlan_action(esw, attr);
336 if (err) { 362 if (err) {
337 rule = ERR_PTR(err); 363 rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
347 } 373 }
348 } 374 }
349 375
350 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); 376 /* we get here if (1) there's no error (rule being null) or when
351 if (IS_ERR(rule)) 377 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
352 goto err_add_rule; 378 */
353 379 if (rule != ERR_PTR(-EAGAIN)) {
380 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
381 if (IS_ERR(rule))
382 goto err_add_rule;
383 }
354 return rule; 384 return rule;
355 385
356err_add_rule: 386err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
361err_add_vlan: 391err_add_vlan:
362 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 392 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
363 mlx5e_detach_encap(priv, flow); 393 mlx5e_detach_encap(priv, flow);
394err_attach_encap:
364 return rule; 395 return rule;
365} 396}
366 397
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
389void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 420void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
390 struct mlx5e_encap_entry *e) 421 struct mlx5e_encap_entry *e)
391{ 422{
423 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5_esw_flow_attr *esw_attr;
392 struct mlx5e_tc_flow *flow; 425 struct mlx5e_tc_flow *flow;
393 int err; 426 int err;
394 427
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
404 mlx5e_rep_queue_neigh_stats_work(priv); 437 mlx5e_rep_queue_neigh_stats_work(priv);
405 438
406 list_for_each_entry(flow, &e->flows, encap) { 439 list_for_each_entry(flow, &e->flows, encap) {
407 flow->esw_attr->encap_id = e->encap_id; 440 esw_attr = flow->esw_attr;
408 flow->rule = mlx5e_tc_add_fdb_flow(priv, 441 esw_attr->encap_id = e->encap_id;
409 flow->esw_attr->parse_attr, 442 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
410 flow);
411 if (IS_ERR(flow->rule)) { 443 if (IS_ERR(flow->rule)) {
412 err = PTR_ERR(flow->rule); 444 err = PTR_ERR(flow->rule);
413 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", 445 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
421void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 453void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
422 struct mlx5e_encap_entry *e) 454 struct mlx5e_encap_entry *e)
423{ 455{
456 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5e_tc_flow *flow; 457 struct mlx5e_tc_flow *flow;
425 struct mlx5_fc *counter;
426 458
427 list_for_each_entry(flow, &e->flows, encap) { 459 list_for_each_entry(flow, &e->flows, encap) {
428 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { 460 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
429 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; 461 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
430 counter = mlx5_flow_rule_counter(flow->rule); 462 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
431 mlx5_del_flow_rules(flow->rule);
432 mlx5_fc_destroy(priv->mdev, counter);
433 } 463 }
434 } 464 }
435 465
@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1942 1972
1943 if (is_tcf_mirred_egress_redirect(a)) { 1973 if (is_tcf_mirred_egress_redirect(a)) {
1944 int ifindex = tcf_mirred_ifindex(a); 1974 int ifindex = tcf_mirred_ifindex(a);
1945 struct net_device *out_dev, *encap_dev = NULL; 1975 struct net_device *out_dev;
1946 struct mlx5e_priv *out_priv; 1976 struct mlx5e_priv *out_priv;
1947 1977
1948 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); 1978 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1955 rpriv = out_priv->ppriv; 1985 rpriv = out_priv->ppriv;
1956 attr->out_rep = rpriv->rep; 1986 attr->out_rep = rpriv->rep;
1957 } else if (encap) { 1987 } else if (encap) {
1958 err = mlx5e_attach_encap(priv, info, 1988 parse_attr->mirred_ifindex = ifindex;
1959 out_dev, &encap_dev, flow); 1989 parse_attr->tun_info = *info;
1960 if (err && err != -EAGAIN) 1990 attr->parse_attr = parse_attr;
1961 return err;
1962 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1991 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1963 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1992 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1964 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1993 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1965 out_priv = netdev_priv(encap_dev); 1994 /* attr->out_rep is resolved when we handle encap */
1966 rpriv = out_priv->ppriv;
1967 attr->out_rep = rpriv->rep;
1968 attr->parse_attr = parse_attr;
1969 } else { 1995 } else {
1970 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 1996 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1971 priv->netdev->name, out_dev->name); 1997 priv->netdev->name, out_dev->name);
@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2047 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 2073 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2048 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); 2074 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2049 if (err < 0) 2075 if (err < 0)
2050 goto err_handle_encap_flow; 2076 goto err_free;
2051 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); 2077 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2052 } else { 2078 } else {
2053 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); 2079 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2058 2084
2059 if (IS_ERR(flow->rule)) { 2085 if (IS_ERR(flow->rule)) {
2060 err = PTR_ERR(flow->rule); 2086 err = PTR_ERR(flow->rule);
2061 goto err_free; 2087 if (err != -EAGAIN)
2088 goto err_free;
2062 } 2089 }
2063 2090
2064 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2091 if (err != -EAGAIN)
2092 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2093
2065 err = rhashtable_insert_fast(&tc->ht, &flow->node, 2094 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2066 tc->ht_params); 2095 tc->ht_params);
2067 if (err) 2096 if (err)
@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2075err_del_rule: 2104err_del_rule:
2076 mlx5e_tc_del_flow(priv, flow); 2105 mlx5e_tc_del_flow(priv, flow);
2077 2106
2078err_handle_encap_flow:
2079 if (err == -EAGAIN) {
2080 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2081 tc->ht_params);
2082 if (err)
2083 mlx5e_tc_del_flow(priv, flow);
2084 else
2085 return 0;
2086 }
2087
2088err_free: 2107err_free:
2089 kvfree(parse_attr); 2108 kvfree(parse_attr);
2090 kfree(flow); 2109 kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8aea0a065e56..db86e1506c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) 356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
357{ 357{
358 struct mlx5_core_health *health = &dev->priv.health; 358 struct mlx5_core_health *health = &dev->priv.health;
359 unsigned long flags;
359 360
360 spin_lock(&health->wq_lock); 361 spin_lock_irqsave(&health->wq_lock, flags);
361 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); 362 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
362 spin_unlock(&health->wq_lock); 363 spin_unlock_irqrestore(&health->wq_lock, flags);
363 cancel_delayed_work_sync(&dev->priv.health.recover_work); 364 cancel_delayed_work_sync(&dev->priv.health.recover_work);
364} 365}
365 366
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1975d4388d4f..e07061f565d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
677} 677}
678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); 678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
679 679
680int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
681 u8 tc, u8 *tc_group)
682{
683 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
684 void *ets_tcn_conf;
685 int err;
686
687 err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
688 if (err)
689 return err;
690
691 ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
692 tc_configuration[tc]);
693
694 *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
695 group);
696
697 return 0;
698}
699EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
700
680int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) 701int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
681{ 702{
682 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; 703 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 12c3a4449120..c0dcfa05b077 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -294,7 +294,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
294 write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size; 294 write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
295 mlxsw_i2c_set_slave_addr(tran_buf, off); 295 mlxsw_i2c_set_slave_addr(tran_buf, off);
296 memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox + 296 memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
297 chunk_size * i, chunk_size); 297 MLXSW_I2C_BLK_MAX * i, chunk_size);
298 298
299 j = 0; 299 j = 0;
300 end = jiffies + timeout; 300 end = jiffies + timeout;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 4afc8486eb9a..5acfbe5b8b9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5827,6 +5827,29 @@ MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
5827 */ 5827 */
5828MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); 5828MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
5829 5829
5830/* reg_mtmp_tee
5831 * Temperature Event Enable.
5832 * 0 - Do not generate event
5833 * 1 - Generate event
5834 * 2 - Generate single event
5835 * Access: RW
5836 */
5837MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
5838
5839#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
5840
5841/* reg_mtmp_temperature_threshold_hi
5842 * High threshold for Temperature Warning Event. In 0.125 Celsius.
5843 * Access: RW
5844 */
5845MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
5846
5847/* reg_mtmp_temperature_threshold_lo
5848 * Low threshold for Temperature Warning Event. In 0.125 Celsius.
5849 * Access: RW
5850 */
5851MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
5852
5830#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 5853#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
5831 5854
5832/* reg_mtmp_sensor_name 5855/* reg_mtmp_sensor_name
@@ -5843,6 +5866,8 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
5843 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); 5866 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
5844 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); 5867 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
5845 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); 5868 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
5869 mlxsw_reg_mtmp_temperature_threshold_hi_set(payload,
5870 MLXSW_REG_MTMP_THRESH_HI);
5846} 5871}
5847 5872
5848static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, 5873static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index db9750695dc7..8ea9320014ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
110 */ 110 */
111 if (!switchdev_port_same_parent_id(in_dev, out_dev)) 111 if (!switchdev_port_same_parent_id(in_dev, out_dev))
112 return -EOPNOTSUPP; 112 return -EOPNOTSUPP;
113 if (!nfp_netdev_is_nfp_repr(out_dev))
114 return -EOPNOTSUPP;
113 115
114 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); 116 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
115 if (!output->port) 117 if (!output->port)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 5efef8001edf..3256e5cbad27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
74 plat_dat->axi->axi_wr_osr_lmt--; 74 plat_dat->axi->axi_wr_osr_lmt--;
75 } 75 }
76 76
77 if (of_property_read_u32(np, "read,read-requests", 77 if (of_property_read_u32(np, "snps,read-requests",
78 &plat_dat->axi->axi_rd_osr_lmt)) { 78 &plat_dat->axi->axi_rd_osr_lmt)) {
79 /** 79 /**
80 * Since the register has a reset value of 1, if property 80 * Since the register has a reset value of 1, if property
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8a280b48e3a9..6383695004a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
150 plat->rx_queues_to_use = 1; 150 plat->rx_queues_to_use = 1;
151 plat->tx_queues_to_use = 1; 151 plat->tx_queues_to_use = 1;
152 152
153 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
154 * to always set this, otherwise Queue will be classified as AVB
155 * (because MTL_QUEUE_AVB = 0).
156 */
157 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
159
153 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 160 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
154 if (!rx_node) 161 if (!rx_node)
155 return; 162 return;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 5dea2063dbc8..0bcc07f346c3 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
197{ 197{
198 int err; 198 int err;
199 199
200 err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap"); 200 err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
201 201 THIS_MODULE);
202 if (err) 202 if (err)
203 goto out1; 203 goto out1;
204 204
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index c2d0ea2fb019..cba5cb3b849a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -204,8 +204,8 @@ static int macvtap_init(void)
204{ 204{
205 int err; 205 int err;
206 206
207 err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); 207 err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
208 208 THIS_MODULE);
209 if (err) 209 if (err)
210 goto out1; 210 goto out1;
211 211
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 15cbcdba618a..4d02b27df044 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -681,9 +681,11 @@ static int m88e1116r_config_init(struct phy_device *phydev)
681 if (err < 0) 681 if (err < 0)
682 return err; 682 return err;
683 683
684 err = m88e1121_config_aneg_rgmii_delays(phydev); 684 if (phy_interface_is_rgmii(phydev)) {
685 if (err < 0) 685 err = m88e1121_config_aneg_rgmii_delays(phydev);
686 return err; 686 if (err < 0)
687 return err;
688 }
687 689
688 err = genphy_soft_reset(phydev); 690 err = genphy_soft_reset(phydev);
689 if (err < 0) 691 if (err < 0)
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 21b71ae947fd..6c0c84c33e1f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
517 &tap_proto, 0); 517 &tap_proto, 0);
518 if (!q) 518 if (!q)
519 goto err; 519 goto err;
520 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
521 sk_free(&q->sk);
522 goto err;
523 }
520 524
521 RCU_INIT_POINTER(q->sock.wq, &q->wq); 525 RCU_INIT_POINTER(q->sock.wq, &q->wq);
522 init_waitqueue_head(&q->wq.wait); 526 init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
540 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) 544 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
541 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 545 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
542 546
543 err = -ENOMEM;
544 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
545 goto err_array;
546
547 err = tap_set_queue(tap, file, q); 547 err = tap_set_queue(tap, file, q);
548 if (err) 548 if (err) {
549 goto err_queue; 549 /* tap_sock_destruct() will take care of freeing skb_array */
550 goto err_put;
551 }
550 552
551 dev_put(tap->dev); 553 dev_put(tap->dev);
552 554
553 rtnl_unlock(); 555 rtnl_unlock();
554 return err; 556 return err;
555 557
556err_queue: 558err_put:
557 skb_array_cleanup(&q->skb_array);
558err_array:
559 sock_put(&q->sk); 559 sock_put(&q->sk);
560err: 560err:
561 if (tap) 561 if (tap)
@@ -1032,6 +1032,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
1032 case TUNSETSNDBUF: 1032 case TUNSETSNDBUF:
1033 if (get_user(s, sp)) 1033 if (get_user(s, sp))
1034 return -EFAULT; 1034 return -EFAULT;
1035 if (s <= 0)
1036 return -EINVAL;
1035 1037
1036 q->sk.sk_sndbuf = s; 1038 q->sk.sk_sndbuf = s;
1037 return 0; 1039 return 0;
@@ -1249,8 +1251,8 @@ static int tap_list_add(dev_t major, const char *device_name)
1249 return 0; 1251 return 0;
1250} 1252}
1251 1253
1252int tap_create_cdev(struct cdev *tap_cdev, 1254int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1253 dev_t *tap_major, const char *device_name) 1255 const char *device_name, struct module *module)
1254{ 1256{
1255 int err; 1257 int err;
1256 1258
@@ -1259,6 +1261,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
1259 goto out1; 1261 goto out1;
1260 1262
1261 cdev_init(tap_cdev, &tap_fops); 1263 cdev_init(tap_cdev, &tap_fops);
1264 tap_cdev->owner = module;
1262 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); 1265 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1263 if (err) 1266 if (err)
1264 goto out2; 1267 goto out2;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e21bf90b819f..42bb820a56c9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1286 buflen += SKB_DATA_ALIGN(len + pad); 1286 buflen += SKB_DATA_ALIGN(len + pad);
1287 rcu_read_unlock(); 1287 rcu_read_unlock();
1288 1288
1289 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1289 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1290 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1290 return ERR_PTR(-ENOMEM); 1291 return ERR_PTR(-ENOMEM);
1291 1292
@@ -2028,7 +2029,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2028 if (!dev) 2029 if (!dev)
2029 return -ENOMEM; 2030 return -ENOMEM;
2030 err = dev_get_valid_name(net, dev, name); 2031 err = dev_get_valid_name(net, dev, name);
2031 if (err) 2032 if (err < 0)
2032 goto err_free_dev; 2033 goto err_free_dev;
2033 2034
2034 dev_net_set(dev, net); 2035 dev_net_set(dev, net);
@@ -2428,6 +2429,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2428 ret = -EFAULT; 2429 ret = -EFAULT;
2429 break; 2430 break;
2430 } 2431 }
2432 if (sndbuf <= 0) {
2433 ret = -EINVAL;
2434 break;
2435 }
2431 2436
2432 tun->sndbuf = sndbuf; 2437 tun->sndbuf = sndbuf;
2433 tun_set_sndbuf(tun); 2438 tun_set_sndbuf(tun);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 52ea80bcd639..3e7a3ac3a362 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -561,6 +561,7 @@ static const struct driver_info wwan_info = {
561#define HP_VENDOR_ID 0x03f0 561#define HP_VENDOR_ID 0x03f0
562#define MICROSOFT_VENDOR_ID 0x045e 562#define MICROSOFT_VENDOR_ID 0x045e
563#define UBLOX_VENDOR_ID 0x1546 563#define UBLOX_VENDOR_ID 0x1546
564#define TPLINK_VENDOR_ID 0x2357
564 565
565static const struct usb_device_id products[] = { 566static const struct usb_device_id products[] = {
566/* BLACKLIST !! 567/* BLACKLIST !!
@@ -813,6 +814,13 @@ static const struct usb_device_id products[] = {
813 .driver_info = 0, 814 .driver_info = 0,
814}, 815},
815 816
817 /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
818{
819 USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
820 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
821 .driver_info = 0,
822},
823
816/* WHITELIST!!! 824/* WHITELIST!!!
817 * 825 *
818 * CDC Ether uses two interfaces, not necessarily consecutive. 826 * CDC Ether uses two interfaces, not necessarily consecutive.
@@ -864,6 +872,12 @@ static const struct usb_device_id products[] = {
864 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 872 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
865 .driver_info = (kernel_ulong_t)&wwan_info, 873 .driver_info = (kernel_ulong_t)&wwan_info,
866}, { 874}, {
875 /* Huawei ME906 and ME909 */
876 USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
877 USB_CDC_SUBCLASS_ETHERNET,
878 USB_CDC_PROTO_NONE),
879 .driver_info = (unsigned long)&wwan_info,
880}, {
867 /* ZTE modules */ 881 /* ZTE modules */
868 USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, 882 USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
869 USB_CDC_SUBCLASS_ETHERNET, 883 USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 941ece08ba78..d51d9abf7986 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -615,6 +615,7 @@ enum rtl8152_flags {
615#define VENDOR_ID_LENOVO 0x17ef 615#define VENDOR_ID_LENOVO 0x17ef
616#define VENDOR_ID_LINKSYS 0x13b1 616#define VENDOR_ID_LINKSYS 0x13b1
617#define VENDOR_ID_NVIDIA 0x0955 617#define VENDOR_ID_NVIDIA 0x0955
618#define VENDOR_ID_TPLINK 0x2357
618 619
619#define MCU_TYPE_PLA 0x0100 620#define MCU_TYPE_PLA 0x0100
620#define MCU_TYPE_USB 0x0000 621#define MCU_TYPE_USB 0x0000
@@ -5319,6 +5320,7 @@ static const struct usb_device_id rtl8152_table[] = {
5319 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, 5320 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
5320 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, 5321 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
5321 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 5322 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
5323 {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
5322 {} 5324 {}
5323}; 5325};
5324 5326
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 63f749078a1f..0e3f8ed84660 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -346,7 +346,6 @@ out:
346fail: 346fail:
347 dev_put(dev); 347 dev_put(dev);
348 free_netdev(ndev); 348 free_netdev(ndev);
349 kfree(lapbeth);
350 goto out; 349 goto out;
351} 350}
352 351
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a3f5dc78353f..0aeeb233af78 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -550,6 +550,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
550 return IEEE80211_TKIP_IV_LEN; 550 return IEEE80211_TKIP_IV_LEN;
551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
552 return IEEE80211_CCMP_HDR_LEN; 552 return IEEE80211_CCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
554 return IEEE80211_CCMP_256_HDR_LEN;
555 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
556 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
557 return IEEE80211_GCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_WEP128: 558 case HTT_RX_MPDU_ENCRYPT_WEP128:
554 case HTT_RX_MPDU_ENCRYPT_WAPI: 559 case HTT_RX_MPDU_ENCRYPT_WAPI:
555 break; 560 break;
@@ -575,6 +580,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
575 return IEEE80211_TKIP_ICV_LEN; 580 return IEEE80211_TKIP_ICV_LEN;
576 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 581 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
577 return IEEE80211_CCMP_MIC_LEN; 582 return IEEE80211_CCMP_MIC_LEN;
583 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
584 return IEEE80211_CCMP_256_MIC_LEN;
585 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
586 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
587 return IEEE80211_GCMP_MIC_LEN;
578 case HTT_RX_MPDU_ENCRYPT_WEP128: 588 case HTT_RX_MPDU_ENCRYPT_WEP128:
579 case HTT_RX_MPDU_ENCRYPT_WAPI: 589 case HTT_RX_MPDU_ENCRYPT_WAPI:
580 break; 590 break;
@@ -1051,9 +1061,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1051 hdr = (void *)msdu->data; 1061 hdr = (void *)msdu->data;
1052 1062
1053 /* Tail */ 1063 /* Tail */
1054 if (status->flag & RX_FLAG_IV_STRIPPED) 1064 if (status->flag & RX_FLAG_IV_STRIPPED) {
1055 skb_trim(msdu, msdu->len - 1065 skb_trim(msdu, msdu->len -
1056 ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1066 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1067 } else {
1068 /* MIC */
1069 if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
1070 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1071 skb_trim(msdu, msdu->len - 8);
1072
1073 /* ICV */
1074 if (status->flag & RX_FLAG_ICV_STRIPPED &&
1075 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1076 skb_trim(msdu, msdu->len -
1077 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1078 }
1057 1079
1058 /* MMIC */ 1080 /* MMIC */
1059 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1081 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
@@ -1075,7 +1097,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1075static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1097static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1076 struct sk_buff *msdu, 1098 struct sk_buff *msdu,
1077 struct ieee80211_rx_status *status, 1099 struct ieee80211_rx_status *status,
1078 const u8 first_hdr[64]) 1100 const u8 first_hdr[64],
1101 enum htt_rx_mpdu_encrypt_type enctype)
1079{ 1102{
1080 struct ieee80211_hdr *hdr; 1103 struct ieee80211_hdr *hdr;
1081 struct htt_rx_desc *rxd; 1104 struct htt_rx_desc *rxd;
@@ -1083,6 +1106,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1083 u8 da[ETH_ALEN]; 1106 u8 da[ETH_ALEN];
1084 u8 sa[ETH_ALEN]; 1107 u8 sa[ETH_ALEN];
1085 int l3_pad_bytes; 1108 int l3_pad_bytes;
1109 int bytes_aligned = ar->hw_params.decap_align_bytes;
1086 1110
1087 /* Delivered decapped frame: 1111 /* Delivered decapped frame:
1088 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1112 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1111,6 +1135,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1111 /* push original 802.11 header */ 1135 /* push original 802.11 header */
1112 hdr = (struct ieee80211_hdr *)first_hdr; 1136 hdr = (struct ieee80211_hdr *)first_hdr;
1113 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1137 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1138
1139 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1140 memcpy(skb_push(msdu,
1141 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1142 (void *)hdr + round_up(hdr_len, bytes_aligned),
1143 ath10k_htt_rx_crypto_param_len(ar, enctype));
1144 }
1145
1114 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1146 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1115 1147
1116 /* original 802.11 header has a different DA and in 1148 /* original 802.11 header has a different DA and in
@@ -1171,6 +1203,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1171 u8 sa[ETH_ALEN]; 1203 u8 sa[ETH_ALEN];
1172 int l3_pad_bytes; 1204 int l3_pad_bytes;
1173 struct htt_rx_desc *rxd; 1205 struct htt_rx_desc *rxd;
1206 int bytes_aligned = ar->hw_params.decap_align_bytes;
1174 1207
1175 /* Delivered decapped frame: 1208 /* Delivered decapped frame:
1176 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1209 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1199,6 +1232,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1199 /* push original 802.11 header */ 1232 /* push original 802.11 header */
1200 hdr = (struct ieee80211_hdr *)first_hdr; 1233 hdr = (struct ieee80211_hdr *)first_hdr;
1201 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1234 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1235
1236 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1237 memcpy(skb_push(msdu,
1238 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1239 (void *)hdr + round_up(hdr_len, bytes_aligned),
1240 ath10k_htt_rx_crypto_param_len(ar, enctype));
1241 }
1242
1202 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1243 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1203 1244
1204 /* original 802.11 header has a different DA and in 1245 /* original 802.11 header has a different DA and in
@@ -1212,12 +1253,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1212static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1253static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1213 struct sk_buff *msdu, 1254 struct sk_buff *msdu,
1214 struct ieee80211_rx_status *status, 1255 struct ieee80211_rx_status *status,
1215 const u8 first_hdr[64]) 1256 const u8 first_hdr[64],
1257 enum htt_rx_mpdu_encrypt_type enctype)
1216{ 1258{
1217 struct ieee80211_hdr *hdr; 1259 struct ieee80211_hdr *hdr;
1218 size_t hdr_len; 1260 size_t hdr_len;
1219 int l3_pad_bytes; 1261 int l3_pad_bytes;
1220 struct htt_rx_desc *rxd; 1262 struct htt_rx_desc *rxd;
1263 int bytes_aligned = ar->hw_params.decap_align_bytes;
1221 1264
1222 /* Delivered decapped frame: 1265 /* Delivered decapped frame:
1223 * [amsdu header] <-- replaced with 802.11 hdr 1266 * [amsdu header] <-- replaced with 802.11 hdr
@@ -1233,6 +1276,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1233 1276
1234 hdr = (struct ieee80211_hdr *)first_hdr; 1277 hdr = (struct ieee80211_hdr *)first_hdr;
1235 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1278 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1279
1280 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1281 memcpy(skb_push(msdu,
1282 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1283 (void *)hdr + round_up(hdr_len, bytes_aligned),
1284 ath10k_htt_rx_crypto_param_len(ar, enctype));
1285 }
1286
1236 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1287 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1237} 1288}
1238 1289
@@ -1267,13 +1318,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1267 is_decrypted); 1318 is_decrypted);
1268 break; 1319 break;
1269 case RX_MSDU_DECAP_NATIVE_WIFI: 1320 case RX_MSDU_DECAP_NATIVE_WIFI:
1270 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); 1321 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1322 enctype);
1271 break; 1323 break;
1272 case RX_MSDU_DECAP_ETHERNET2_DIX: 1324 case RX_MSDU_DECAP_ETHERNET2_DIX:
1273 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1325 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1274 break; 1326 break;
1275 case RX_MSDU_DECAP_8023_SNAP_LLC: 1327 case RX_MSDU_DECAP_8023_SNAP_LLC:
1276 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); 1328 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1329 enctype);
1277 break; 1330 break;
1278 } 1331 }
1279} 1332}
@@ -1316,7 +1369,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1316 1369
1317static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1370static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1318 struct sk_buff_head *amsdu, 1371 struct sk_buff_head *amsdu,
1319 struct ieee80211_rx_status *status) 1372 struct ieee80211_rx_status *status,
1373 bool fill_crypt_header)
1320{ 1374{
1321 struct sk_buff *first; 1375 struct sk_buff *first;
1322 struct sk_buff *last; 1376 struct sk_buff *last;
@@ -1326,7 +1380,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1326 enum htt_rx_mpdu_encrypt_type enctype; 1380 enum htt_rx_mpdu_encrypt_type enctype;
1327 u8 first_hdr[64]; 1381 u8 first_hdr[64];
1328 u8 *qos; 1382 u8 *qos;
1329 size_t hdr_len;
1330 bool has_fcs_err; 1383 bool has_fcs_err;
1331 bool has_crypto_err; 1384 bool has_crypto_err;
1332 bool has_tkip_err; 1385 bool has_tkip_err;
@@ -1351,15 +1404,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1351 * decapped header. It'll be used for undecapping of each MSDU. 1404 * decapped header. It'll be used for undecapping of each MSDU.
1352 */ 1405 */
1353 hdr = (void *)rxd->rx_hdr_status; 1406 hdr = (void *)rxd->rx_hdr_status;
1354 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1407 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1355 memcpy(first_hdr, hdr, hdr_len);
1356 1408
1357 /* Each A-MSDU subframe will use the original header as the base and be 1409 /* Each A-MSDU subframe will use the original header as the base and be
1358 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1410 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1359 */ 1411 */
1360 hdr = (void *)first_hdr; 1412 hdr = (void *)first_hdr;
1361 qos = ieee80211_get_qos_ctl(hdr); 1413
1362 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1414 if (ieee80211_is_data_qos(hdr->frame_control)) {
1415 qos = ieee80211_get_qos_ctl(hdr);
1416 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1417 }
1363 1418
1364 /* Some attention flags are valid only in the last MSDU. */ 1419 /* Some attention flags are valid only in the last MSDU. */
1365 last = skb_peek_tail(amsdu); 1420 last = skb_peek_tail(amsdu);
@@ -1406,9 +1461,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1406 status->flag |= RX_FLAG_DECRYPTED; 1461 status->flag |= RX_FLAG_DECRYPTED;
1407 1462
1408 if (likely(!is_mgmt)) 1463 if (likely(!is_mgmt))
1409 status->flag |= RX_FLAG_IV_STRIPPED | 1464 status->flag |= RX_FLAG_MMIC_STRIPPED;
1410 RX_FLAG_MMIC_STRIPPED; 1465
1411} 1466 if (fill_crypt_header)
1467 status->flag |= RX_FLAG_MIC_STRIPPED |
1468 RX_FLAG_ICV_STRIPPED;
1469 else
1470 status->flag |= RX_FLAG_IV_STRIPPED;
1471 }
1412 1472
1413 skb_queue_walk(amsdu, msdu) { 1473 skb_queue_walk(amsdu, msdu) {
1414 ath10k_htt_rx_h_csum_offload(msdu); 1474 ath10k_htt_rx_h_csum_offload(msdu);
@@ -1424,6 +1484,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1424 if (is_mgmt) 1484 if (is_mgmt)
1425 continue; 1485 continue;
1426 1486
1487 if (fill_crypt_header)
1488 continue;
1489
1427 hdr = (void *)msdu->data; 1490 hdr = (void *)msdu->data;
1428 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1491 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1429 } 1492 }
@@ -1434,6 +1497,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1434 struct ieee80211_rx_status *status) 1497 struct ieee80211_rx_status *status)
1435{ 1498{
1436 struct sk_buff *msdu; 1499 struct sk_buff *msdu;
1500 struct sk_buff *first_subframe;
1501
1502 first_subframe = skb_peek(amsdu);
1437 1503
1438 while ((msdu = __skb_dequeue(amsdu))) { 1504 while ((msdu = __skb_dequeue(amsdu))) {
1439 /* Setup per-MSDU flags */ 1505 /* Setup per-MSDU flags */
@@ -1442,6 +1508,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1442 else 1508 else
1443 status->flag |= RX_FLAG_AMSDU_MORE; 1509 status->flag |= RX_FLAG_AMSDU_MORE;
1444 1510
1511 if (msdu == first_subframe) {
1512 first_subframe = NULL;
1513 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1514 } else {
1515 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1516 }
1517
1445 ath10k_process_rx(ar, status, msdu); 1518 ath10k_process_rx(ar, status, msdu);
1446 } 1519 }
1447} 1520}
@@ -1584,7 +1657,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1584 ath10k_htt_rx_h_unchain(ar, &amsdu); 1657 ath10k_htt_rx_h_unchain(ar, &amsdu);
1585 1658
1586 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); 1659 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1587 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); 1660 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1588 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); 1661 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1589 1662
1590 return num_msdus; 1663 return num_msdus;
@@ -1745,8 +1818,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1745} 1818}
1746 1819
1747static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 1820static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1748 struct sk_buff_head *amsdu, 1821 struct sk_buff_head *amsdu)
1749 int budget_left)
1750{ 1822{
1751 struct sk_buff *msdu; 1823 struct sk_buff *msdu;
1752 struct htt_rx_desc *rxd; 1824 struct htt_rx_desc *rxd;
@@ -1757,9 +1829,8 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1757 if (WARN_ON(!skb_queue_empty(amsdu))) 1829 if (WARN_ON(!skb_queue_empty(amsdu)))
1758 return -EINVAL; 1830 return -EINVAL;
1759 1831
1760 while ((msdu = __skb_dequeue(list)) && budget_left) { 1832 while ((msdu = __skb_dequeue(list))) {
1761 __skb_queue_tail(amsdu, msdu); 1833 __skb_queue_tail(amsdu, msdu);
1762 budget_left--;
1763 1834
1764 rxd = (void *)msdu->data - sizeof(*rxd); 1835 rxd = (void *)msdu->data - sizeof(*rxd);
1765 if (rxd->msdu_end.common.info0 & 1836 if (rxd->msdu_end.common.info0 &
@@ -1850,8 +1921,7 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1850 return num_msdu; 1921 return num_msdu;
1851} 1922}
1852 1923
1853static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, 1924static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1854 int budget_left)
1855{ 1925{
1856 struct ath10k_htt *htt = &ar->htt; 1926 struct ath10k_htt *htt = &ar->htt;
1857 struct htt_resp *resp = (void *)skb->data; 1927 struct htt_resp *resp = (void *)skb->data;
@@ -1908,9 +1978,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
1908 if (offload) 1978 if (offload)
1909 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); 1979 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
1910 1980
1911 while (!skb_queue_empty(&list) && budget_left) { 1981 while (!skb_queue_empty(&list)) {
1912 __skb_queue_head_init(&amsdu); 1982 __skb_queue_head_init(&amsdu);
1913 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left); 1983 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1914 switch (ret) { 1984 switch (ret) {
1915 case 0: 1985 case 0:
1916 /* Note: The in-order indication may report interleaved 1986 /* Note: The in-order indication may report interleaved
@@ -1920,10 +1990,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
1920 * should still give an idea about rx rate to the user. 1990 * should still give an idea about rx rate to the user.
1921 */ 1991 */
1922 num_msdus += skb_queue_len(&amsdu); 1992 num_msdus += skb_queue_len(&amsdu);
1923 budget_left -= skb_queue_len(&amsdu);
1924 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 1993 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1925 ath10k_htt_rx_h_filter(ar, &amsdu, status); 1994 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1926 ath10k_htt_rx_h_mpdu(ar, &amsdu, status); 1995 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
1927 ath10k_htt_rx_h_deliver(ar, &amsdu, status); 1996 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1928 break; 1997 break;
1929 case -EAGAIN: 1998 case -EAGAIN:
@@ -2563,8 +2632,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2563 } 2632 }
2564 2633
2565 spin_lock_bh(&htt->rx_ring.lock); 2634 spin_lock_bh(&htt->rx_ring.lock);
2566 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb, 2635 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2567 (budget - quota));
2568 spin_unlock_bh(&htt->rx_ring.lock); 2636 spin_unlock_bh(&htt->rx_ring.lock);
2569 if (num_rx_msdus < 0) { 2637 if (num_rx_msdus < 0) {
2570 resched_napi = true; 2638 resched_napi = true;
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index c1022a1cf855..28da14398951 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -239,6 +239,9 @@ enum htt_rx_mpdu_encrypt_type {
239 HTT_RX_MPDU_ENCRYPT_WAPI = 5, 239 HTT_RX_MPDU_ENCRYPT_WAPI = 5,
240 HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, 240 HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
241 HTT_RX_MPDU_ENCRYPT_NONE = 7, 241 HTT_RX_MPDU_ENCRYPT_NONE = 7,
242 HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
243 HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
244 HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
242}; 245};
243 246
244#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff 247#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 35bd50bcbbd5..b83f01d6e3dd 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -812,7 +812,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
812 if (!sta) { 812 if (!sta) {
813 wcn36xx_err("sta %pM is not found\n", 813 wcn36xx_err("sta %pM is not found\n",
814 bss_conf->bssid); 814 bss_conf->bssid);
815 rcu_read_unlock();
816 goto out; 815 goto out;
817 } 816 }
818 sta_priv = wcn36xx_sta_to_priv(sta); 817 sta_priv = wcn36xx_sta_to_priv(sta);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5a14cc7f28ee..37f9039bb9ca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1249 goto out; 1249 goto out;
1250 } 1250 }
1251 1251
1252 __nvme_revalidate_disk(disk, id);
1252 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); 1253 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
1253 if (!uuid_equal(&ns->uuid, &uuid) || 1254 if (!uuid_equal(&ns->uuid, &uuid) ||
1254 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || 1255 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index af075e998944..be49d0f79381 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2545 nvme_fc_abort_aen_ops(ctrl); 2545 nvme_fc_abort_aen_ops(ctrl);
2546 2546
2547 /* wait for all io that had to be aborted */ 2547 /* wait for all io that had to be aborted */
2548 spin_lock_irqsave(&ctrl->lock, flags); 2548 spin_lock_irq(&ctrl->lock);
2549 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 2549 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2550 ctrl->flags &= ~FCCTRL_TERMIO; 2550 ctrl->flags &= ~FCCTRL_TERMIO;
2551 spin_unlock_irqrestore(&ctrl->lock, flags); 2551 spin_unlock_irq(&ctrl->lock);
2552 2552
2553 nvme_fc_term_aen_ops(ctrl); 2553 nvme_fc_term_aen_ops(ctrl);
2554 2554
@@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2734{ 2734{
2735 struct nvme_fc_ctrl *ctrl; 2735 struct nvme_fc_ctrl *ctrl;
2736 unsigned long flags; 2736 unsigned long flags;
2737 int ret, idx; 2737 int ret, idx, retry;
2738 2738
2739 if (!(rport->remoteport.port_role & 2739 if (!(rport->remoteport.port_role &
2740 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 2740 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2760 ctrl->rport = rport; 2760 ctrl->rport = rport;
2761 ctrl->dev = lport->dev; 2761 ctrl->dev = lport->dev;
2762 ctrl->cnum = idx; 2762 ctrl->cnum = idx;
2763 init_waitqueue_head(&ctrl->ioabort_wait);
2763 2764
2764 get_device(ctrl->dev); 2765 get_device(ctrl->dev);
2765 kref_init(&ctrl->ref); 2766 kref_init(&ctrl->ref);
@@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2825 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 2826 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2826 spin_unlock_irqrestore(&rport->lock, flags); 2827 spin_unlock_irqrestore(&rport->lock, flags);
2827 2828
2828 ret = nvme_fc_create_association(ctrl); 2829 /*
2830 * It's possible that transactions used to create the association
2831 * may fail. Examples: CreateAssociation LS or CreateIOConnection
2832 * LS gets dropped/corrupted/fails; or a frame gets dropped or a
2833 * command times out for one of the actions to init the controller
2834 * (Connect, Get/Set_Property, Set_Features, etc). Many of these
2835 * transport errors (frame drop, LS failure) inherently must kill
2836 * the association. The transport is coded so that any command used
2837 * to create the association (prior to a LIVE state transition
2838 * while NEW or RECONNECTING) will fail if it completes in error or
2839 * times out.
2840 *
2841 * As such: as the connect request was mostly likely due to a
2842 * udev event that discovered the remote port, meaning there is
2843 * not an admin or script there to restart if the connect
2844 * request fails, retry the initial connection creation up to
2845 * three times before giving up and declaring failure.
2846 */
2847 for (retry = 0; retry < 3; retry++) {
2848 ret = nvme_fc_create_association(ctrl);
2849 if (!ret)
2850 break;
2851 }
2852
2829 if (ret) { 2853 if (ret) {
2854 /* couldn't schedule retry - fail out */
2855 dev_err(ctrl->ctrl.device,
2856 "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
2857
2830 ctrl->ctrl.opts = NULL; 2858 ctrl->ctrl.opts = NULL;
2859
2831 /* initiate nvme ctrl ref counting teardown */ 2860 /* initiate nvme ctrl ref counting teardown */
2832 nvme_uninit_ctrl(&ctrl->ctrl); 2861 nvme_uninit_ctrl(&ctrl->ctrl);
2833 nvme_put_ctrl(&ctrl->ctrl); 2862 nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 92a03ff5fb4d..0ebb539f3bd3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
571 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) 571 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
572 return; 572 return;
573 573
574 if (nvme_rdma_queue_idx(queue) == 0) {
575 nvme_rdma_free_qe(queue->device->dev,
576 &queue->ctrl->async_event_sqe,
577 sizeof(struct nvme_command), DMA_TO_DEVICE);
578 }
579
574 nvme_rdma_destroy_queue_ib(queue); 580 nvme_rdma_destroy_queue_ib(queue);
575 rdma_destroy_id(queue->cm_id); 581 rdma_destroy_id(queue->cm_id);
576} 582}
@@ -739,8 +745,6 @@ out:
739static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 745static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
740 bool remove) 746 bool remove)
741{ 747{
742 nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
743 sizeof(struct nvme_command), DMA_TO_DEVICE);
744 nvme_rdma_stop_queue(&ctrl->queues[0]); 748 nvme_rdma_stop_queue(&ctrl->queues[0]);
745 if (remove) { 749 if (remove) {
746 blk_cleanup_queue(ctrl->ctrl.admin_q); 750 blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
765 769
766 if (new) { 770 if (new) {
767 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 771 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
768 if (IS_ERR(ctrl->ctrl.admin_tagset)) 772 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
773 error = PTR_ERR(ctrl->ctrl.admin_tagset);
769 goto out_free_queue; 774 goto out_free_queue;
775 }
770 776
771 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 777 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
772 if (IS_ERR(ctrl->ctrl.admin_q)) { 778 if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
846 852
847 if (new) { 853 if (new) {
848 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); 854 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
849 if (IS_ERR(ctrl->ctrl.tagset)) 855 if (IS_ERR(ctrl->ctrl.tagset)) {
856 ret = PTR_ERR(ctrl->ctrl.tagset);
850 goto out_free_io_queues; 857 goto out_free_io_queues;
858 }
851 859
852 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 860 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
853 if (IS_ERR(ctrl->ctrl.connect_q)) { 861 if (IS_ERR(ctrl->ctrl.connect_q)) {
@@ -1606,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
1606 /* 1614 /*
1607 * reconnecting state means transport disruption, which 1615 * reconnecting state means transport disruption, which
1608 * can take a long time and even might fail permanently, 1616 * can take a long time and even might fail permanently,
1609 * so we can't let incoming I/O be requeued forever. 1617 * fail fast to give upper layers a chance to failover.
1610 * fail it fast to allow upper layers a chance to 1618 * deleting state means that the ctrl will never accept
1611 * failover. 1619 * commands again, fail it permanently.
1612 */ 1620 */
1613 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1621 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
1622 queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
1623 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
1614 return BLK_STS_IOERR; 1624 return BLK_STS_IOERR;
1625 }
1615 return BLK_STS_RESOURCE; /* try again later */ 1626 return BLK_STS_RESOURCE; /* try again later */
1616 } 1627 }
1617 } 1628 }
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1b208beeef50..645ba7eee35d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
387 387
388static void __nvmet_req_complete(struct nvmet_req *req, u16 status) 388static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
389{ 389{
390 u32 old_sqhd, new_sqhd;
391 u16 sqhd;
392
390 if (status) 393 if (status)
391 nvmet_set_status(req, status); 394 nvmet_set_status(req, status);
392 395
393 if (req->sq->size) 396 if (req->sq->size) {
394 req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; 397 do {
395 req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); 398 old_sqhd = req->sq->sqhd;
399 new_sqhd = (old_sqhd + 1) % req->sq->size;
400 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
401 old_sqhd);
402 }
403 sqhd = req->sq->sqhd & 0x0000FFFF;
404 req->rsp->sq_head = cpu_to_le16(sqhd);
396 req->rsp->sq_id = cpu_to_le16(req->sq->qid); 405 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
397 req->rsp->command_id = req->cmd->common.command_id; 406 req->rsp->command_id = req->cmd->common.command_id;
398 407
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7b8e20adf760..87e429bfcd8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,7 +74,7 @@ struct nvmet_sq {
74 struct percpu_ref ref; 74 struct percpu_ref ref;
75 u16 qid; 75 u16 qid;
76 u16 size; 76 u16 size;
77 u16 sqhd; 77 u32 sqhd;
78 struct completion free_done; 78 struct completion free_done;
79 struct completion confirm_done; 79 struct completion confirm_done;
80}; 80};
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3f6b34febbf1..433af328d981 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
534 continue; 534 continue;
535 irq = irq_find_mapping(gc->irqdomain, irqnr + i); 535 irq = irq_find_mapping(gc->irqdomain, irqnr + i);
536 generic_handle_irq(irq); 536 generic_handle_irq(irq);
537 /* Clear interrupt */ 537
538 /* Clear interrupt.
539 * We must read the pin register again, in case the
540 * value was changed while executing
541 * generic_handle_irq() above.
542 */
543 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
544 regval = readl(regs + i);
538 writel(regval, regs + i); 545 writel(regval, regs + i);
546 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
539 ret = IRQ_HANDLED; 547 ret = IRQ_HANDLED;
540 } 548 }
541 } 549 }
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3e40d4245512..9c950bbf07ba 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
407 ret = mcp_read(mcp, MCP_GPIO, &status); 407 ret = mcp_read(mcp, MCP_GPIO, &status);
408 if (ret < 0) 408 if (ret < 0)
409 status = 0; 409 status = 0;
410 else 410 else {
411 mcp->cached_gpio = status;
411 status = !!(status & (1 << offset)); 412 status = !!(status & (1 << offset));
412 413 }
413 mcp->cached_gpio = status;
414 414
415 mutex_unlock(&mcp->lock); 415 mutex_unlock(&mcp->lock);
416 return status; 416 return status;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index bb792a52248b..e03fa31446ca 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,6 +33,7 @@
33#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35#include <linux/io-64-nonatomic-lo-hi.h> 35#include <linux/io-64-nonatomic-lo-hi.h>
36#include <linux/spinlock.h>
36 37
37#include <asm/intel_pmc_ipc.h> 38#include <asm/intel_pmc_ipc.h>
38 39
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
131 /* gcr */ 132 /* gcr */
132 void __iomem *gcr_mem_base; 133 void __iomem *gcr_mem_base;
133 bool has_gcr_regs; 134 bool has_gcr_regs;
135 spinlock_t gcr_lock;
134 136
135 /* punit */ 137 /* punit */
136 struct platform_device *punit_dev; 138 struct platform_device *punit_dev;
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
225{ 227{
226 int ret; 228 int ret;
227 229
228 mutex_lock(&ipclock); 230 spin_lock(&ipcdev.gcr_lock);
229 231
230 ret = is_gcr_valid(offset); 232 ret = is_gcr_valid(offset);
231 if (ret < 0) { 233 if (ret < 0) {
232 mutex_unlock(&ipclock); 234 spin_unlock(&ipcdev.gcr_lock);
233 return ret; 235 return ret;
234 } 236 }
235 237
236 *data = readl(ipcdev.gcr_mem_base + offset); 238 *data = readl(ipcdev.gcr_mem_base + offset);
237 239
238 mutex_unlock(&ipclock); 240 spin_unlock(&ipcdev.gcr_lock);
239 241
240 return 0; 242 return 0;
241} 243}
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
255{ 257{
256 int ret; 258 int ret;
257 259
258 mutex_lock(&ipclock); 260 spin_lock(&ipcdev.gcr_lock);
259 261
260 ret = is_gcr_valid(offset); 262 ret = is_gcr_valid(offset);
261 if (ret < 0) { 263 if (ret < 0) {
262 mutex_unlock(&ipclock); 264 spin_unlock(&ipcdev.gcr_lock);
263 return ret; 265 return ret;
264 } 266 }
265 267
266 writel(data, ipcdev.gcr_mem_base + offset); 268 writel(data, ipcdev.gcr_mem_base + offset);
267 269
268 mutex_unlock(&ipclock); 270 spin_unlock(&ipcdev.gcr_lock);
269 271
270 return 0; 272 return 0;
271} 273}
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
287 u32 new_val; 289 u32 new_val;
288 int ret = 0; 290 int ret = 0;
289 291
290 mutex_lock(&ipclock); 292 spin_lock(&ipcdev.gcr_lock);
291 293
292 ret = is_gcr_valid(offset); 294 ret = is_gcr_valid(offset);
293 if (ret < 0) 295 if (ret < 0)
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
309 } 311 }
310 312
311gcr_ipc_unlock: 313gcr_ipc_unlock:
312 mutex_unlock(&ipclock); 314 spin_unlock(&ipcdev.gcr_lock);
313 return ret; 315 return ret;
314} 316}
315EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); 317EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
480 482
481static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 483static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
482{ 484{
483 resource_size_t pci_resource; 485 struct intel_pmc_ipc_dev *pmc = &ipcdev;
484 int ret; 486 int ret;
485 int len;
486 487
487 ipcdev.dev = &pci_dev_get(pdev)->dev; 488 /* Only one PMC is supported */
488 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; 489 if (pmc->dev)
490 return -EBUSY;
489 491
490 ret = pci_enable_device(pdev); 492 pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
493
494 spin_lock_init(&ipcdev.gcr_lock);
495
496 ret = pcim_enable_device(pdev);
491 if (ret) 497 if (ret)
492 return ret; 498 return ret;
493 499
494 ret = pci_request_regions(pdev, "intel_pmc_ipc"); 500 ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
495 if (ret) 501 if (ret)
496 return ret; 502 return ret;
497 503
498 pci_resource = pci_resource_start(pdev, 0); 504 init_completion(&pmc->cmd_complete);
499 len = pci_resource_len(pdev, 0);
500 if (!pci_resource || !len) {
501 dev_err(&pdev->dev, "Failed to get resource\n");
502 return -ENOMEM;
503 }
504 505
505 init_completion(&ipcdev.cmd_complete); 506 pmc->ipc_base = pcim_iomap_table(pdev)[0];
506 507
507 if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) { 508 ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
509 pmc);
510 if (ret) {
508 dev_err(&pdev->dev, "Failed to request irq\n"); 511 dev_err(&pdev->dev, "Failed to request irq\n");
509 return -EBUSY; 512 return ret;
510 } 513 }
511 514
512 ipcdev.ipc_base = ioremap_nocache(pci_resource, len); 515 pmc->dev = &pdev->dev;
513 if (!ipcdev.ipc_base) {
514 dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
515 free_irq(pdev->irq, &ipcdev);
516 ret = -ENOMEM;
517 }
518 516
519 return ret; 517 pci_set_drvdata(pdev, pmc);
520}
521 518
522static void ipc_pci_remove(struct pci_dev *pdev) 519 return 0;
523{
524 free_irq(pdev->irq, &ipcdev);
525 pci_release_regions(pdev);
526 pci_dev_put(pdev);
527 iounmap(ipcdev.ipc_base);
528 ipcdev.dev = NULL;
529} 520}
530 521
531static const struct pci_device_id ipc_pci_ids[] = { 522static const struct pci_device_id ipc_pci_ids[] = {
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
540 .name = "intel_pmc_ipc", 531 .name = "intel_pmc_ipc",
541 .id_table = ipc_pci_ids, 532 .id_table = ipc_pci_ids,
542 .probe = ipc_pci_probe, 533 .probe = ipc_pci_probe,
543 .remove = ipc_pci_remove,
544}; 534};
545 535
546static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, 536static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
850 return -ENXIO; 840 return -ENXIO;
851 } 841 }
852 size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; 842 size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
843 res->end = res->start + size - 1;
844
845 addr = devm_ioremap_resource(&pdev->dev, res);
846 if (IS_ERR(addr))
847 return PTR_ERR(addr);
853 848
854 if (!request_mem_region(res->start, size, pdev->name)) {
855 dev_err(&pdev->dev, "Failed to request ipc resource\n");
856 return -EBUSY;
857 }
858 addr = ioremap_nocache(res->start, size);
859 if (!addr) {
860 dev_err(&pdev->dev, "I/O memory remapping failed\n");
861 release_mem_region(res->start, size);
862 return -ENOMEM;
863 }
864 ipcdev.ipc_base = addr; 849 ipcdev.ipc_base = addr;
865 850
866 ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET; 851 ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
917 902
918static int ipc_plat_probe(struct platform_device *pdev) 903static int ipc_plat_probe(struct platform_device *pdev)
919{ 904{
920 struct resource *res;
921 int ret; 905 int ret;
922 906
923 ipcdev.dev = &pdev->dev; 907 ipcdev.dev = &pdev->dev;
924 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; 908 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
925 init_completion(&ipcdev.cmd_complete); 909 init_completion(&ipcdev.cmd_complete);
910 spin_lock_init(&ipcdev.gcr_lock);
926 911
927 ipcdev.irq = platform_get_irq(pdev, 0); 912 ipcdev.irq = platform_get_irq(pdev, 0);
928 if (ipcdev.irq < 0) { 913 if (ipcdev.irq < 0) {
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
939 ret = ipc_create_pmc_devices(); 924 ret = ipc_create_pmc_devices();
940 if (ret) { 925 if (ret) {
941 dev_err(&pdev->dev, "Failed to create pmc devices\n"); 926 dev_err(&pdev->dev, "Failed to create pmc devices\n");
942 goto err_device; 927 return ret;
943 } 928 }
944 929
945 if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND, 930 if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
946 "intel_pmc_ipc", &ipcdev)) { 931 "intel_pmc_ipc", &ipcdev)) {
947 dev_err(&pdev->dev, "Failed to request irq\n"); 932 dev_err(&pdev->dev, "Failed to request irq\n");
948 ret = -EBUSY; 933 ret = -EBUSY;
949 goto err_irq; 934 goto err_irq;
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
960 945
961 return 0; 946 return 0;
962err_sys: 947err_sys:
963 free_irq(ipcdev.irq, &ipcdev); 948 devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
964err_irq: 949err_irq:
965 platform_device_unregister(ipcdev.tco_dev); 950 platform_device_unregister(ipcdev.tco_dev);
966 platform_device_unregister(ipcdev.punit_dev); 951 platform_device_unregister(ipcdev.punit_dev);
967 platform_device_unregister(ipcdev.telemetry_dev); 952 platform_device_unregister(ipcdev.telemetry_dev);
968err_device: 953
969 iounmap(ipcdev.ipc_base);
970 res = platform_get_resource(pdev, IORESOURCE_MEM,
971 PLAT_RESOURCE_IPC_INDEX);
972 if (res) {
973 release_mem_region(res->start,
974 PLAT_RESOURCE_IPC_SIZE +
975 PLAT_RESOURCE_GCR_SIZE);
976 }
977 return ret; 954 return ret;
978} 955}
979 956
980static int ipc_plat_remove(struct platform_device *pdev) 957static int ipc_plat_remove(struct platform_device *pdev)
981{ 958{
982 struct resource *res;
983
984 sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); 959 sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
985 free_irq(ipcdev.irq, &ipcdev); 960 devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
986 platform_device_unregister(ipcdev.tco_dev); 961 platform_device_unregister(ipcdev.tco_dev);
987 platform_device_unregister(ipcdev.punit_dev); 962 platform_device_unregister(ipcdev.punit_dev);
988 platform_device_unregister(ipcdev.telemetry_dev); 963 platform_device_unregister(ipcdev.telemetry_dev);
989 iounmap(ipcdev.ipc_base);
990 res = platform_get_resource(pdev, IORESOURCE_MEM,
991 PLAT_RESOURCE_IPC_INDEX);
992 if (res) {
993 release_mem_region(res->start,
994 PLAT_RESOURCE_IPC_SIZE +
995 PLAT_RESOURCE_GCR_SIZE);
996 }
997 ipcdev.dev = NULL; 964 ipcdev.dev = NULL;
998 return 0; 965 return 0;
999} 966}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f18b36dd57dd..376a99b7cf5d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
590 case AXP803_DCDC3: 590 case AXP803_DCDC3:
591 return !!(reg & BIT(6)); 591 return !!(reg & BIT(6));
592 case AXP803_DCDC6: 592 case AXP803_DCDC6:
593 return !!(reg & BIT(7)); 593 return !!(reg & BIT(5));
594 } 594 }
595 break; 595 break;
596 596
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index ef2be56460fe..790a4a73ea2c 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
29}; 29};
30 30
31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ 31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
32 [RN5T618_##rid] = { \ 32 { \
33 .name = #rid, \ 33 .name = #rid, \
34 .of_match = of_match_ptr(#rid), \ 34 .of_match = of_match_ptr(#rid), \
35 .regulators_node = of_match_ptr("regulators"), \ 35 .regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 82ac331d9125..84752152d41f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
357 357
358 adapter->next_port_scan = jiffies; 358 adapter->next_port_scan = jiffies;
359 359
360 adapter->erp_action.adapter = adapter;
361
360 if (zfcp_qdio_setup(adapter)) 362 if (zfcp_qdio_setup(adapter))
361 goto failed; 363 goto failed;
362 364
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
513 port->dev.groups = zfcp_port_attr_groups; 515 port->dev.groups = zfcp_port_attr_groups;
514 port->dev.release = zfcp_port_release; 516 port->dev.release = zfcp_port_release;
515 517
518 port->erp_action.adapter = adapter;
519 port->erp_action.port = port;
520
516 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { 521 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
517 kfree(port); 522 kfree(port);
518 goto err_out; 523 goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 6f848aeacec2..cbb8156bf5e0 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -194,9 +194,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
194 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, 194 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
195 &zfcp_sdev->status); 195 &zfcp_sdev->status);
196 erp_action = &zfcp_sdev->erp_action; 196 erp_action = &zfcp_sdev->erp_action;
197 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 197 WARN_ON_ONCE(erp_action->port != port);
198 erp_action->port = port; 198 WARN_ON_ONCE(erp_action->sdev != sdev);
199 erp_action->sdev = sdev;
200 if (!(atomic_read(&zfcp_sdev->status) & 199 if (!(atomic_read(&zfcp_sdev->status) &
201 ZFCP_STATUS_COMMON_RUNNING)) 200 ZFCP_STATUS_COMMON_RUNNING))
202 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 201 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -209,8 +208,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
209 zfcp_erp_action_dismiss_port(port); 208 zfcp_erp_action_dismiss_port(port);
210 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 209 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
211 erp_action = &port->erp_action; 210 erp_action = &port->erp_action;
212 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 211 WARN_ON_ONCE(erp_action->port != port);
213 erp_action->port = port; 212 WARN_ON_ONCE(erp_action->sdev != NULL);
214 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 213 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
215 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 214 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
216 break; 215 break;
@@ -220,7 +219,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
220 zfcp_erp_action_dismiss_adapter(adapter); 219 zfcp_erp_action_dismiss_adapter(adapter);
221 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 220 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
222 erp_action = &adapter->erp_action; 221 erp_action = &adapter->erp_action;
223 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 222 WARN_ON_ONCE(erp_action->port != NULL);
223 WARN_ON_ONCE(erp_action->sdev != NULL);
224 if (!(atomic_read(&adapter->status) & 224 if (!(atomic_read(&adapter->status) &
225 ZFCP_STATUS_COMMON_RUNNING)) 225 ZFCP_STATUS_COMMON_RUNNING))
226 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 226 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -230,7 +230,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
230 return NULL; 230 return NULL;
231 } 231 }
232 232
233 erp_action->adapter = adapter; 233 WARN_ON_ONCE(erp_action->adapter != adapter);
234 memset(&erp_action->list, 0, sizeof(erp_action->list));
235 memset(&erp_action->timer, 0, sizeof(erp_action->timer));
236 erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
237 erp_action->fsf_req_id = 0;
234 erp_action->action = need; 238 erp_action->action = need;
235 erp_action->status = act_status; 239 erp_action->status = act_status;
236 240
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ebea205f9de8..4d2ba5682493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -116,10 +116,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
116 struct zfcp_unit *unit; 116 struct zfcp_unit *unit;
117 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; 117 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
118 118
119 zfcp_sdev->erp_action.adapter = adapter;
120 zfcp_sdev->erp_action.sdev = sdev;
121
119 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 122 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
120 if (!port) 123 if (!port)
121 return -ENXIO; 124 return -ENXIO;
122 125
126 zfcp_sdev->erp_action.port = port;
127
123 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); 128 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
124 if (unit) 129 if (unit)
125 put_device(&unit->dev); 130 put_device(&unit->dev);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 97d269f16888..1bc623ad3faf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
302 return -ENOMEM; 302 return -ENOMEM;
303 aac_fib_init(fibctx); 303 aac_fib_init(fibctx);
304 304
305 mutex_lock(&dev->ioctl_mutex); 305 if (!dev->adapter_shutdown) {
306 dev->adapter_shutdown = 1; 306 mutex_lock(&dev->ioctl_mutex);
307 mutex_unlock(&dev->ioctl_mutex); 307 dev->adapter_shutdown = 1;
308 mutex_unlock(&dev->ioctl_mutex);
309 }
308 310
309 cmd = (struct aac_close *) fib_data(fibctx); 311 cmd = (struct aac_close *) fib_data(fibctx);
310 cmd->command = cpu_to_le32(VM_CloseAll); 312 cmd->command = cpu_to_le32(VM_CloseAll);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 62beb2596466..c9252b138c1f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
1551{ 1551{
1552 int i; 1552 int i;
1553 1553
1554 mutex_lock(&aac->ioctl_mutex);
1554 aac->adapter_shutdown = 1; 1555 aac->adapter_shutdown = 1;
1555 aac_send_shutdown(aac); 1556 mutex_unlock(&aac->ioctl_mutex);
1556 1557
1557 if (aac->aif_thread) { 1558 if (aac->aif_thread) {
1558 int i; 1559 int i;
@@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
1565 } 1566 }
1566 kthread_stop(aac->thread); 1567 kthread_stop(aac->thread);
1567 } 1568 }
1569
1570 aac_send_shutdown(aac);
1571
1568 aac_adapter_disable_int(aac); 1572 aac_adapter_disable_int(aac);
1573
1569 if (aac_is_src(aac)) { 1574 if (aac_is_src(aac)) {
1570 if (aac->max_msix > 1) { 1575 if (aac->max_msix > 1) {
1571 for (i = 0; i < aac->max_msix; i++) { 1576 for (i = 0; i < aac->max_msix; i++) {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9abe81021484..4ed3d26ffdde 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
4091 memset(id_ctlr, 0, sizeof(*id_ctlr)); 4091 memset(id_ctlr, 0, sizeof(*id_ctlr));
4092 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); 4092 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4093 if (!rc) 4093 if (!rc)
4094 if (id_ctlr->configured_logical_drive_count < 256) 4094 if (id_ctlr->configured_logical_drive_count < 255)
4095 *nlocals = id_ctlr->configured_logical_drive_count; 4095 *nlocals = id_ctlr->configured_logical_drive_count;
4096 else 4096 else
4097 *nlocals = le16_to_cpu( 4097 *nlocals = le16_to_cpu(
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 937209805baf..3bd956d3bc5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3061 host->max_cmd_len, host->max_channel, host->max_lun, 3061 host->max_cmd_len, host->max_channel, host->max_lun,
3062 host->transportt, sht->vendor_id); 3062 host->transportt, sht->vendor_id);
3063 3063
3064 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3065
3064 /* Set up the irqs */ 3066 /* Set up the irqs */
3065 ret = qla2x00_request_irqs(ha, rsp); 3067 ret = qla2x00_request_irqs(ha, rsp);
3066 if (ret) 3068 if (ret)
@@ -3175,8 +3177,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3175 host->can_queue, base_vha->req, 3177 host->can_queue, base_vha->req,
3176 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3178 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3177 3179
3178 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3179
3180 if (ha->mqenable) { 3180 if (ha->mqenable) {
3181 bool mq = false; 3181 bool mq = false;
3182 bool startit = false; 3182 bool startit = false;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9cf6a80fe297..ad3ea24f0885 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1379 1379
1380 ret = scsi_setup_cmnd(sdev, req); 1380 ret = scsi_setup_cmnd(sdev, req);
1381out: 1381out:
1382 if (ret != BLKPREP_OK)
1383 cmd->flags &= ~SCMD_INITIALIZED;
1384 return scsi_prep_return(q, req, ret); 1382 return scsi_prep_return(q, req, ret);
1385} 1383}
1386 1384
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
1900 struct scsi_device *sdev = req->q->queuedata; 1898 struct scsi_device *sdev = req->q->queuedata;
1901 struct Scsi_Host *shost = sdev->host; 1899 struct Scsi_Host *shost = sdev->host;
1902 struct scatterlist *sg; 1900 struct scatterlist *sg;
1903 int ret;
1904 1901
1905 scsi_init_command(sdev, cmd); 1902 scsi_init_command(sdev, cmd);
1906 1903
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
1934 1931
1935 blk_mq_start_request(req); 1932 blk_mq_start_request(req);
1936 1933
1937 ret = scsi_setup_cmnd(sdev, req); 1934 return scsi_setup_cmnd(sdev, req);
1938 if (ret != BLK_STS_OK)
1939 cmd->flags &= ~SCMD_INITIALIZED;
1940 return ret;
1941} 1935}
1942 1936
1943static void scsi_mq_done(struct scsi_cmnd *cmd) 1937static void scsi_mq_done(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0419c2298eab..aa28874e8fb9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
837 837
838 val = 0; 838 val = 0;
839 list_for_each_entry(srp, &sfp->rq_list, entry) { 839 list_for_each_entry(srp, &sfp->rq_list, entry) {
840 if (val > SG_MAX_QUEUE) 840 if (val >= SG_MAX_QUEUE)
841 break; 841 break;
842 rinfo[val].req_state = srp->done + 1; 842 rinfo[val].req_state = srp->done + 1;
843 rinfo[val].problem = 843 rinfo[val].problem =
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 6c7d7a460689..568e1c65aa82 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -99,11 +99,6 @@
99/* A3700_SPI_IF_TIME_REG */ 99/* A3700_SPI_IF_TIME_REG */
100#define A3700_SPI_CLK_CAPT_EDGE BIT(7) 100#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
101 101
102/* Flags and macros for struct a3700_spi */
103#define A3700_INSTR_CNT 1
104#define A3700_ADDR_CNT 3
105#define A3700_DUMMY_CNT 1
106
107struct a3700_spi { 102struct a3700_spi {
108 struct spi_master *master; 103 struct spi_master *master;
109 void __iomem *base; 104 void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
117 u8 byte_len; 112 u8 byte_len;
118 u32 wait_mask; 113 u32 wait_mask;
119 struct completion done; 114 struct completion done;
120 u32 addr_cnt;
121 u32 instr_cnt;
122 size_t hdr_cnt;
123}; 115};
124 116
125static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) 117static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
161} 153}
162 154
163static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, 155static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
164 unsigned int pin_mode) 156 unsigned int pin_mode, bool receiving)
165{ 157{
166 u32 val; 158 u32 val;
167 159
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
177 break; 169 break;
178 case SPI_NBITS_QUAD: 170 case SPI_NBITS_QUAD:
179 val |= A3700_SPI_DATA_PIN1; 171 val |= A3700_SPI_DATA_PIN1;
172 /* RX during address reception uses 4-pin */
173 if (receiving)
174 val |= A3700_SPI_ADDR_PIN;
180 break; 175 break;
181 default: 176 default:
182 dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); 177 dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
392 387
393 spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); 388 spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
394 389
395 return true; 390 /* Timeout was reached */
391 return false;
396} 392}
397 393
398static bool a3700_spi_transfer_wait(struct spi_device *spi, 394static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
446 442
447static void a3700_spi_header_set(struct a3700_spi *a3700_spi) 443static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
448{ 444{
449 u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; 445 unsigned int addr_cnt;
450 u32 val = 0; 446 u32 val = 0;
451 447
452 /* Clear the header registers */ 448 /* Clear the header registers */
453 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); 449 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
454 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); 450 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
455 spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); 451 spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
452 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
456 453
457 /* Set header counters */ 454 /* Set header counters */
458 if (a3700_spi->tx_buf) { 455 if (a3700_spi->tx_buf) {
459 if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { 456 /*
460 instr_cnt = a3700_spi->buf_len; 457 * when tx data is not 4 bytes aligned, there will be unexpected
461 } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + 458 * bytes out of SPI output register, since it always shifts out
462 a3700_spi->addr_cnt)) { 459 * as whole 4 bytes. This might cause incorrect transaction with
463 instr_cnt = a3700_spi->instr_cnt; 460 * some devices. To avoid that, use SPI header count feature to
464 addr_cnt = a3700_spi->buf_len - instr_cnt; 461 * transfer up to 3 bytes of data first, and then make the rest
465 } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { 462 * of data 4-byte aligned.
466 instr_cnt = a3700_spi->instr_cnt; 463 */
467 addr_cnt = a3700_spi->addr_cnt; 464 addr_cnt = a3700_spi->buf_len % 4;
468 /* Need to handle the normal write case with 1 byte 465 if (addr_cnt) {
469 * data 466 val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
470 */ 467 << A3700_SPI_ADDR_CNT_BIT;
471 if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) 468 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
472 dummy_cnt = a3700_spi->buf_len - instr_cnt - 469
473 addr_cnt; 470 /* Update the buffer length to be transferred */
471 a3700_spi->buf_len -= addr_cnt;
472
473 /* transfer 1~3 bytes through address count */
474 val = 0;
475 while (addr_cnt--) {
476 val = (val << 8) | a3700_spi->tx_buf[0];
477 a3700_spi->tx_buf++;
478 }
479 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
474 } 480 }
475 val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
476 << A3700_SPI_INSTR_CNT_BIT);
477 val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
478 << A3700_SPI_ADDR_CNT_BIT);
479 val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
480 << A3700_SPI_DUMMY_CNT_BIT);
481 } 481 }
482 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
483
484 /* Update the buffer length to be transferred */
485 a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
486
487 /* Set Instruction */
488 val = 0;
489 while (instr_cnt--) {
490 val = (val << 8) | a3700_spi->tx_buf[0];
491 a3700_spi->tx_buf++;
492 }
493 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
494
495 /* Set Address */
496 val = 0;
497 while (addr_cnt--) {
498 val = (val << 8) | a3700_spi->tx_buf[0];
499 a3700_spi->tx_buf++;
500 }
501 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
502} 482}
503 483
504static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) 484static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
512static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) 492static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
513{ 493{
514 u32 val; 494 u32 val;
515 int i = 0;
516 495
517 while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { 496 while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
518 val = 0; 497 val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
519 if (a3700_spi->buf_len >= 4) { 498 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
520 val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); 499 a3700_spi->buf_len -= 4;
521 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); 500 a3700_spi->tx_buf += 4;
522
523 a3700_spi->buf_len -= 4;
524 a3700_spi->tx_buf += 4;
525 } else {
526 /*
527 * If the remained buffer length is less than 4-bytes,
528 * we should pad the write buffer with all ones. So that
529 * it avoids overwrite the unexpected bytes following
530 * the last one.
531 */
532 val = GENMASK(31, 0);
533 while (a3700_spi->buf_len) {
534 val &= ~(0xff << (8 * i));
535 val |= *a3700_spi->tx_buf++ << (8 * i);
536 i++;
537 a3700_spi->buf_len--;
538
539 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
540 val);
541 }
542 break;
543 }
544 } 501 }
545 502
546 return 0; 503 return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
645 a3700_spi->rx_buf = xfer->rx_buf; 602 a3700_spi->rx_buf = xfer->rx_buf;
646 a3700_spi->buf_len = xfer->len; 603 a3700_spi->buf_len = xfer->len;
647 604
648 /* SPI transfer headers */
649 a3700_spi_header_set(a3700_spi);
650
651 if (xfer->tx_buf) 605 if (xfer->tx_buf)
652 nbits = xfer->tx_nbits; 606 nbits = xfer->tx_nbits;
653 else if (xfer->rx_buf) 607 else if (xfer->rx_buf)
654 nbits = xfer->rx_nbits; 608 nbits = xfer->rx_nbits;
655 609
656 a3700_spi_pin_mode_set(a3700_spi, nbits); 610 a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
611
612 /* Flush the FIFOs */
613 a3700_spi_fifo_flush(a3700_spi);
614
615 /* Transfer first bytes of data when buffer is not 4-byte aligned */
616 a3700_spi_header_set(a3700_spi);
657 617
658 if (xfer->rx_buf) { 618 if (xfer->rx_buf) {
659 /* Set read data length */ 619 /* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
733 dev_err(&spi->dev, "wait wfifo empty timed out\n"); 693 dev_err(&spi->dev, "wait wfifo empty timed out\n");
734 return -ETIMEDOUT; 694 return -ETIMEDOUT;
735 } 695 }
736 } else { 696 }
737 /* 697
738 * If the instruction in SPI_INSTR does not require data 698 if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
739 * to be written to the SPI device, wait until SPI_RDY 699 dev_err(&spi->dev, "wait xfer ready timed out\n");
740 * is 1 for the SPI interface to be in idle. 700 return -ETIMEDOUT;
741 */
742 if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
743 dev_err(&spi->dev, "wait xfer ready timed out\n");
744 return -ETIMEDOUT;
745 }
746 } 701 }
747 702
748 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 703 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
834 memset(spi, 0, sizeof(struct a3700_spi)); 789 memset(spi, 0, sizeof(struct a3700_spi));
835 790
836 spi->master = master; 791 spi->master = master;
837 spi->instr_cnt = A3700_INSTR_CNT;
838 spi->addr_cnt = A3700_ADDR_CNT;
839 spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
840 A3700_DUMMY_CNT;
841 792
842 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 793 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
843 spi->base = devm_ioremap_resource(dev, res); 794 spi->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6ef6c44f39f5..a172ab299e80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1250 goto qspi_probe_err; 1250 goto qspi_probe_err;
1251 } 1251 }
1252 } else { 1252 } else {
1253 goto qspi_probe_err; 1253 goto qspi_resource_err;
1254 } 1254 }
1255 1255
1256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); 1256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1272 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); 1272 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1273 if (IS_ERR(qspi->base[CHIP_SELECT])) { 1273 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1274 ret = PTR_ERR(qspi->base[CHIP_SELECT]); 1274 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1275 goto qspi_probe_err; 1275 goto qspi_resource_err;
1276 } 1276 }
1277 } 1277 }
1278 1278
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1280 GFP_KERNEL); 1280 GFP_KERNEL);
1281 if (!qspi->dev_ids) { 1281 if (!qspi->dev_ids) {
1282 ret = -ENOMEM; 1282 ret = -ENOMEM;
1283 goto qspi_probe_err; 1283 goto qspi_resource_err;
1284 } 1284 }
1285 1285
1286 for (val = 0; val < num_irqs; val++) { 1286 for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
1369 bcm_qspi_hw_uninit(qspi); 1369 bcm_qspi_hw_uninit(qspi);
1370 clk_disable_unprepare(qspi->clk); 1370 clk_disable_unprepare(qspi->clk);
1371qspi_probe_err: 1371qspi_probe_err:
1372 spi_master_put(master);
1373 kfree(qspi->dev_ids); 1372 kfree(qspi->dev_ids);
1373qspi_resource_err:
1374 spi_master_put(master);
1374 return ret; 1375 return ret;
1375} 1376}
1376/* probe function to be called by SoC specific platform driver probe */ 1377/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 680cdf549506..ba9743fa2326 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
263 * no need to check it there. 263 * no need to check it there.
264 * However, we need to ensure the following calculations. 264 * However, we need to ensure the following calculations.
265 */ 265 */
266 if ((div < SPI_MBR_DIV_MIN) && 266 if (div < SPI_MBR_DIV_MIN ||
267 (div > SPI_MBR_DIV_MAX)) 267 div > SPI_MBR_DIV_MAX)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 /* Determine the first power of 2 greater than or equal to div */ 270 /* Determine the first power of 2 greater than or equal to div */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6e65524cbfd9..e8b5a5e21b2e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -45,7 +45,6 @@
45 45
46#define CREATE_TRACE_POINTS 46#define CREATE_TRACE_POINTS
47#include <trace/events/spi.h> 47#include <trace/events/spi.h>
48#define SPI_DYN_FIRST_BUS_NUM 0
49 48
50static DEFINE_IDR(spi_master_idr); 49static DEFINE_IDR(spi_master_idr);
51 50
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr)
2086 struct device *dev = ctlr->dev.parent; 2085 struct device *dev = ctlr->dev.parent;
2087 struct boardinfo *bi; 2086 struct boardinfo *bi;
2088 int status = -ENODEV; 2087 int status = -ENODEV;
2089 int id; 2088 int id, first_dynamic;
2090 2089
2091 if (!dev) 2090 if (!dev)
2092 return -ENODEV; 2091 return -ENODEV;
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr)
2116 } 2115 }
2117 } 2116 }
2118 if (ctlr->bus_num < 0) { 2117 if (ctlr->bus_num < 0) {
2118 first_dynamic = of_alias_get_highest_id("spi");
2119 if (first_dynamic < 0)
2120 first_dynamic = 0;
2121 else
2122 first_dynamic++;
2123
2119 mutex_lock(&board_lock); 2124 mutex_lock(&board_lock);
2120 id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0, 2125 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2121 GFP_KERNEL); 2126 0, GFP_KERNEL);
2122 mutex_unlock(&board_lock); 2127 mutex_unlock(&board_lock);
2123 if (WARN(id < 0, "couldn't get idr")) 2128 if (WARN(id < 0, "couldn't get idr"))
2124 return id; 2129 return id;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 82360594fa8e..57efbd3b053b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1024 mutex_unlock(&priv->lock); 1024 mutex_unlock(&priv->lock);
1025 1025
1026 if (use_ptemod) { 1026 if (use_ptemod) {
1027 map->pages_vm_start = vma->vm_start;
1027 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 1028 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1028 vma->vm_end - vma->vm_start, 1029 vma->vm_end - vma->vm_start,
1029 find_grant_ptes, map); 1030 find_grant_ptes, map);
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1061 set_grant_ptes_as_special, NULL); 1062 set_grant_ptes_as_special, NULL);
1062 } 1063 }
1063#endif 1064#endif
1064 map->pages_vm_start = vma->vm_start;
1065 } 1065 }
1066 1066
1067 return 0; 1067 return 0;
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e89136ab851e..b437fccd4e62 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
57static void watch_target(struct xenbus_watch *watch, 57static void watch_target(struct xenbus_watch *watch,
58 const char *path, const char *token) 58 const char *path, const char *token)
59{ 59{
60 unsigned long long new_target; 60 unsigned long long new_target, static_max;
61 int err; 61 int err;
62 static bool watch_fired; 62 static bool watch_fired;
63 static long target_diff; 63 static long target_diff;
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
72 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. 72 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
73 */ 73 */
74 new_target >>= PAGE_SHIFT - 10; 74 new_target >>= PAGE_SHIFT - 10;
75 if (watch_fired) { 75
76 balloon_set_new_target(new_target - target_diff); 76 if (!watch_fired) {
77 return; 77 watch_fired = true;
78 err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
79 &static_max);
80 if (err != 1)
81 static_max = new_target;
82 else
83 static_max >>= PAGE_SHIFT - 10;
84 target_diff = xen_pv_domain() ? 0
85 : static_max - balloon_stats.target_pages;
78 } 86 }
79 87
80 watch_fired = true; 88 balloon_set_new_target(new_target - target_diff);
81 target_diff = new_target - balloon_stats.target_pages;
82} 89}
83static struct xenbus_watch target_watch = { 90static struct xenbus_watch target_watch = {
84 .node = "memory/target", 91 .node = "memory/target",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 115a27d44bac..ff5d32cf9578 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1992,6 +1992,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
1992retry: 1992retry:
1993 spin_lock(&ci->i_ceph_lock); 1993 spin_lock(&ci->i_ceph_lock);
1994 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1994 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1995 spin_unlock(&ci->i_ceph_lock);
1995 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1996 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1996 goto out; 1997 goto out;
1997 } 1998 }
@@ -2009,8 +2010,10 @@ retry:
2009 mutex_lock(&session->s_mutex); 2010 mutex_lock(&session->s_mutex);
2010 goto retry; 2011 goto retry;
2011 } 2012 }
2012 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) 2013 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
2014 spin_unlock(&ci->i_ceph_lock);
2013 goto out; 2015 goto out;
2016 }
2014 2017
2015 flushing = __mark_caps_flushing(inode, session, true, 2018 flushing = __mark_caps_flushing(inode, session, true,
2016 &flush_tid, &oldest_flush_tid); 2019 &flush_tid, &oldest_flush_tid);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f7243617316c..d5b2e12b5d02 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -5,9 +5,14 @@ config CIFS
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_MD4 6 select CRYPTO_MD4
7 select CRYPTO_MD5 7 select CRYPTO_MD5
8 select CRYPTO_SHA256
9 select CRYPTO_CMAC
8 select CRYPTO_HMAC 10 select CRYPTO_HMAC
9 select CRYPTO_ARC4 11 select CRYPTO_ARC4
12 select CRYPTO_AEAD2
13 select CRYPTO_CCM
10 select CRYPTO_ECB 14 select CRYPTO_ECB
15 select CRYPTO_AES
11 select CRYPTO_DES 16 select CRYPTO_DES
12 help 17 help
13 This is the client VFS module for the SMB3 family of NAS protocols, 18 This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de5b2e1fcce5..e185b2853eab 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,7 +661,9 @@ struct TCP_Server_Info {
661#endif 661#endif
662 unsigned int max_read; 662 unsigned int max_read;
663 unsigned int max_write; 663 unsigned int max_write;
664 __u8 preauth_hash[512]; 664#ifdef CONFIG_CIFS_SMB311
665 __u8 preauth_sha_hash[64]; /* save initital negprot hash */
666#endif /* 3.1.1 */
665 struct delayed_work reconnect; /* reconnect workqueue job */ 667 struct delayed_work reconnect; /* reconnect workqueue job */
666 struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ 668 struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
667 unsigned long echo_interval; 669 unsigned long echo_interval;
@@ -849,7 +851,9 @@ struct cifs_ses {
849 __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; 851 __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
850 __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; 852 __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
851 __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; 853 __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
852 __u8 preauth_hash[512]; 854#ifdef CONFIG_CIFS_SMB311
855 __u8 preauth_sha_hash[64];
856#endif /* 3.1.1 */
853}; 857};
854 858
855static inline bool 859static inline bool
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e702d48bd023..81ba6e0d88d8 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
204 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 204 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
205 int i; 205 int i;
206 206
207 if (unlikely(direntry->d_name.len > 207 if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
208 direntry->d_name.len >
208 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) 209 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
209 return -ENAMETOOLONG; 210 return -ENAMETOOLONG;
210 211
@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
520 521
521 rc = check_name(direntry, tcon); 522 rc = check_name(direntry, tcon);
522 if (rc) 523 if (rc)
523 goto out_free_xid; 524 goto out;
524 525
525 server = tcon->ses->server; 526 server = tcon->ses->server;
526 527
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 7ca9808a0daa..62c88dfed57b 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
214 {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"}, 214 {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
215 {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, 215 {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
216 {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, 216 {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
217 {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"}, 217 {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
218 {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, 218 {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
219 {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, 219 {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
220 {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, 220 {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0dafdbae1f8c..bdb963d0ba32 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -522,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
522 struct cifs_open_parms oparms; 522 struct cifs_open_parms oparms;
523 struct cifs_fid fid; 523 struct cifs_fid fid;
524 struct smb2_file_full_ea_info *smb2_data; 524 struct smb2_file_full_ea_info *smb2_data;
525 int ea_buf_size = SMB2_MIN_EA_BUF;
525 526
526 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 527 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
527 if (!utf16_path) 528 if (!utf16_path)
@@ -541,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
541 return rc; 542 return rc;
542 } 543 }
543 544
544 smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL); 545 while (1) {
545 if (smb2_data == NULL) { 546 smb2_data = kzalloc(ea_buf_size, GFP_KERNEL);
546 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 547 if (smb2_data == NULL) {
547 return -ENOMEM; 548 SMB2_close(xid, tcon, fid.persistent_fid,
549 fid.volatile_fid);
550 return -ENOMEM;
551 }
552
553 rc = SMB2_query_eas(xid, tcon, fid.persistent_fid,
554 fid.volatile_fid,
555 ea_buf_size, smb2_data);
556
557 if (rc != -E2BIG)
558 break;
559
560 kfree(smb2_data);
561 ea_buf_size <<= 1;
562
563 if (ea_buf_size > SMB2_MAX_EA_BUF) {
564 cifs_dbg(VFS, "EA size is too large\n");
565 SMB2_close(xid, tcon, fid.persistent_fid,
566 fid.volatile_fid);
567 return -ENOMEM;
568 }
548 } 569 }
549 570
550 rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid,
551 smb2_data);
552 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 571 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
553 572
554 if (!rc) 573 if (!rc)
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 6f0e6343c15e..5331631386a2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -648,7 +648,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
648{ 648{
649 int rc = 0; 649 int rc = 0;
650 struct validate_negotiate_info_req vneg_inbuf; 650 struct validate_negotiate_info_req vneg_inbuf;
651 struct validate_negotiate_info_rsp *pneg_rsp; 651 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
652 u32 rsplen; 652 u32 rsplen;
653 u32 inbuflen; /* max of 4 dialects */ 653 u32 inbuflen; /* max of 4 dialects */
654 654
@@ -727,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
727 rsplen); 727 rsplen);
728 728
729 /* relax check since Mac returns max bufsize allowed on ioctl */ 729 /* relax check since Mac returns max bufsize allowed on ioctl */
730 if (rsplen > CIFSMaxBufSize) 730 if ((rsplen > CIFSMaxBufSize)
731 return -EIO; 731 || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
732 goto err_rsp_free;
732 } 733 }
733 734
734 /* check validate negotiate info response matches what we got earlier */ 735 /* check validate negotiate info response matches what we got earlier */
@@ -747,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
747 748
748 /* validate negotiate successful */ 749 /* validate negotiate successful */
749 cifs_dbg(FYI, "validate negotiate info successful\n"); 750 cifs_dbg(FYI, "validate negotiate info successful\n");
751 kfree(pneg_rsp);
750 return 0; 752 return 0;
751 753
752vneg_out: 754vneg_out:
753 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 755 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
756err_rsp_free:
757 kfree(pneg_rsp);
754 return -EIO; 758 return -EIO;
755} 759}
756 760
@@ -1255,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1255 struct smb2_tree_connect_req *req; 1259 struct smb2_tree_connect_req *req;
1256 struct smb2_tree_connect_rsp *rsp = NULL; 1260 struct smb2_tree_connect_rsp *rsp = NULL;
1257 struct kvec iov[2]; 1261 struct kvec iov[2];
1258 struct kvec rsp_iov; 1262 struct kvec rsp_iov = { NULL, 0 };
1259 int rc = 0; 1263 int rc = 0;
1260 int resp_buftype; 1264 int resp_buftype;
1261 int unc_path_len; 1265 int unc_path_len;
@@ -1372,7 +1376,7 @@ tcon_exit:
1372 return rc; 1376 return rc;
1373 1377
1374tcon_error_exit: 1378tcon_error_exit:
1375 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1379 if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1376 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1380 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1377 } 1381 }
1378 goto tcon_exit; 1382 goto tcon_exit;
@@ -1975,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1975 } else 1979 } else
1976 iov[0].iov_len = get_rfc1002_length(req) + 4; 1980 iov[0].iov_len = get_rfc1002_length(req) + 4;
1977 1981
1982 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
1983 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
1984 req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1978 1985
1979 rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); 1986 rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
1980 cifs_small_buf_release(req); 1987 cifs_small_buf_release(req);
@@ -2191,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2191 req->PersistentFileId = persistent_fid; 2198 req->PersistentFileId = persistent_fid;
2192 req->VolatileFileId = volatile_fid; 2199 req->VolatileFileId = volatile_fid;
2193 req->AdditionalInformation = cpu_to_le32(additional_info); 2200 req->AdditionalInformation = cpu_to_le32(additional_info);
2194 /* 4 for rfc1002 length field and 1 for Buffer */ 2201
2195 req->InputBufferOffset = 2202 /*
2196 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); 2203 * We do not use the input buffer (do not send extra byte)
2204 */
2205 req->InputBufferOffset = 0;
2206 inc_rfc1001_len(req, -1);
2207
2197 req->OutputBufferLength = cpu_to_le32(output_len); 2208 req->OutputBufferLength = cpu_to_le32(output_len);
2198 2209
2199 iov[0].iov_base = (char *)req; 2210 iov[0].iov_base = (char *)req;
@@ -2233,12 +2244,12 @@ qinf_exit:
2233} 2244}
2234 2245
2235int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, 2246int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
2236 u64 persistent_fid, u64 volatile_fid, 2247 u64 persistent_fid, u64 volatile_fid,
2237 struct smb2_file_full_ea_info *data) 2248 int ea_buf_size, struct smb2_file_full_ea_info *data)
2238{ 2249{
2239 return query_info(xid, tcon, persistent_fid, volatile_fid, 2250 return query_info(xid, tcon, persistent_fid, volatile_fid,
2240 FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, 2251 FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
2241 SMB2_MAX_EA_BUF, 2252 ea_buf_size,
2242 sizeof(struct smb2_file_full_ea_info), 2253 sizeof(struct smb2_file_full_ea_info),
2243 (void **)&data, 2254 (void **)&data,
2244 NULL); 2255 NULL);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6c9653a130c8..c2ec934be968 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -832,7 +832,7 @@ struct smb2_flush_rsp {
832/* Channel field for read and write: exactly one of following flags can be set*/ 832/* Channel field for read and write: exactly one of following flags can be set*/
833#define SMB2_CHANNEL_NONE 0x00000000 833#define SMB2_CHANNEL_NONE 0x00000000
834#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ 834#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
835#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ 835#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
836 836
837/* SMB2 read request without RFC1001 length at the beginning */ 837/* SMB2 read request without RFC1001 length at the beginning */
838struct smb2_read_plain_req { 838struct smb2_read_plain_req {
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
1178 char FileName[0]; /* Name to be assigned to new link */ 1178 char FileName[0]; /* Name to be assigned to new link */
1179} __packed; /* level 11 Set */ 1179} __packed; /* level 11 Set */
1180 1180
1181#define SMB2_MAX_EA_BUF 2048 1181#define SMB2_MIN_EA_BUF 2048
1182#define SMB2_MAX_EA_BUF 65536
1182 1183
1183struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1184struct smb2_file_full_ea_info { /* encoding of response for level 15 */
1184 __le32 next_entry_offset; 1185 __le32 next_entry_offset;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 003217099ef3..e9ab5227e7a8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
134 u64 persistent_file_id, u64 volatile_file_id); 134 u64 persistent_file_id, u64 volatile_file_id);
135extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, 135extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
136 u64 persistent_file_id, u64 volatile_file_id, 136 u64 persistent_file_id, u64 volatile_file_id,
137 int ea_buf_size,
137 struct smb2_file_full_ea_info *data); 138 struct smb2_file_full_ea_info *data);
138extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 139extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
139 u64 persistent_file_id, u64 volatile_file_id, 140 u64 persistent_file_id, u64 volatile_file_id,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 67367cf1f8cd..99493946e2f9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
390 return generate_smb3signingkey(ses, &triplet); 390 return generate_smb3signingkey(ses, &triplet);
391} 391}
392 392
393#ifdef CONFIG_CIFS_SMB311
393int 394int
394generate_smb311signingkey(struct cifs_ses *ses) 395generate_smb311signingkey(struct cifs_ses *ses)
395 396
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
398 struct derivation *d; 399 struct derivation *d;
399 400
400 d = &triplet.signing; 401 d = &triplet.signing;
401 d->label.iov_base = "SMB2AESCMAC"; 402 d->label.iov_base = "SMBSigningKey";
402 d->label.iov_len = 12; 403 d->label.iov_len = 14;
403 d->context.iov_base = "SmbSign"; 404 d->context.iov_base = ses->preauth_sha_hash;
404 d->context.iov_len = 8; 405 d->context.iov_len = 64;
405 406
406 d = &triplet.encryption; 407 d = &triplet.encryption;
407 d->label.iov_base = "SMB2AESCCM"; 408 d->label.iov_base = "SMBC2SCipherKey";
408 d->label.iov_len = 11; 409 d->label.iov_len = 16;
409 d->context.iov_base = "ServerIn "; 410 d->context.iov_base = ses->preauth_sha_hash;
410 d->context.iov_len = 10; 411 d->context.iov_len = 64;
411 412
412 d = &triplet.decryption; 413 d = &triplet.decryption;
413 d->label.iov_base = "SMB2AESCCM"; 414 d->label.iov_base = "SMBS2CCipherKey";
414 d->label.iov_len = 11; 415 d->label.iov_len = 16;
415 d->context.iov_base = "ServerOut"; 416 d->context.iov_base = ses->preauth_sha_hash;
416 d->context.iov_len = 10; 417 d->context.iov_len = 64;
417 418
418 return generate_smb3signingkey(ses, &triplet); 419 return generate_smb3signingkey(ses, &triplet);
419} 420}
421#endif /* 311 */
420 422
421int 423int
422smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 424smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 622081b97426..24967382a7b1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
1308 */ 1308 */
1309 over = !dir_emit(ctx, dirent->name, dirent->namelen, 1309 over = !dir_emit(ctx, dirent->name, dirent->namelen,
1310 dirent->ino, dirent->type); 1310 dirent->ino, dirent->type);
1311 ctx->pos = dirent->off; 1311 if (!over)
1312 ctx->pos = dirent->off;
1312 } 1313 }
1313 1314
1314 buf += reclen; 1315 buf += reclen;
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a619addecafc..321511ed8c42 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
598 return true; 598 return true;
599} 599}
600 600
601struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry) 601struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
602 struct dentry *index)
602{ 603{
603 struct dentry *lowerdentry = ovl_dentry_lower(dentry); 604 struct dentry *lowerdentry = ovl_dentry_lower(dentry);
604 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; 605 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
605 struct inode *inode; 606 struct inode *inode;
607 /* Already indexed or could be indexed on copy up? */
608 bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
609
610 if (WARN_ON(upperdentry && indexed && !lowerdentry))
611 return ERR_PTR(-EIO);
606 612
607 if (!realinode) 613 if (!realinode)
608 realinode = d_inode(lowerdentry); 614 realinode = d_inode(lowerdentry);
609 615
610 if (!S_ISDIR(realinode->i_mode) && 616 /*
611 (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) { 617 * Copy up origin (lower) may exist for non-indexed upper, but we must
612 struct inode *key = d_inode(lowerdentry ?: upperdentry); 618 * not use lower as hash key in that case.
619 * Hash inodes that are or could be indexed by origin inode and
620 * non-indexed upper inodes that could be hard linked by upper inode.
621 */
622 if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
623 struct inode *key = d_inode(indexed ? lowerdentry :
624 upperdentry);
613 unsigned int nlink; 625 unsigned int nlink;
614 626
615 inode = iget5_locked(dentry->d_sb, (unsigned long) key, 627 inode = iget5_locked(dentry->d_sb, (unsigned long) key,
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 654bea1a5ac9..a12dc10bf726 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
405 * be treated as stale (i.e. after unlink of the overlay inode). 405 * be treated as stale (i.e. after unlink of the overlay inode).
406 * We don't know the verification rules for directory and whiteout 406 * We don't know the verification rules for directory and whiteout
407 * index entries, because they have not been implemented yet, so return 407 * index entries, because they have not been implemented yet, so return
408 * EROFS if those entries are found to avoid corrupting an index that 408 * EINVAL if those entries are found to abort the mount to avoid
409 * was created by a newer kernel. 409 * corrupting an index that was created by a newer kernel.
410 */ 410 */
411 err = -EROFS; 411 err = -EINVAL;
412 if (d_is_dir(index) || ovl_is_whiteout(index)) 412 if (d_is_dir(index) || ovl_is_whiteout(index))
413 goto fail; 413 goto fail;
414 414
415 err = -EINVAL;
416 if (index->d_name.len < sizeof(struct ovl_fh)*2) 415 if (index->d_name.len < sizeof(struct ovl_fh)*2)
417 goto fail; 416 goto fail;
418 417
@@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
507 index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); 506 index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
508 if (IS_ERR(index)) { 507 if (IS_ERR(index)) {
509 err = PTR_ERR(index); 508 err = PTR_ERR(index);
509 if (err == -ENOENT) {
510 index = NULL;
511 goto out;
512 }
510 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" 513 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
511 "overlayfs: mount with '-o index=off' to disable inodes index.\n", 514 "overlayfs: mount with '-o index=off' to disable inodes index.\n",
512 d_inode(origin)->i_ino, name.len, name.name, 515 d_inode(origin)->i_ino, name.len, name.name,
@@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
516 519
517 inode = d_inode(index); 520 inode = d_inode(index);
518 if (d_is_negative(index)) { 521 if (d_is_negative(index)) {
519 if (upper && d_inode(origin)->i_nlink > 1) { 522 goto out_dput;
520 pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
521 d_inode(origin)->i_ino);
522 goto fail;
523 }
524
525 dput(index);
526 index = NULL;
527 } else if (upper && d_inode(upper) != inode) { 523 } else if (upper && d_inode(upper) != inode) {
528 pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n", 524 goto out_dput;
529 index, inode->i_ino, d_inode(upper)->i_ino);
530 goto fail;
531 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || 525 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
532 ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) { 526 ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
533 /* 527 /*
@@ -547,6 +541,11 @@ out:
547 kfree(name.name); 541 kfree(name.name);
548 return index; 542 return index;
549 543
544out_dput:
545 dput(index);
546 index = NULL;
547 goto out;
548
550fail: 549fail:
551 dput(index); 550 dput(index);
552 index = ERR_PTR(-EIO); 551 index = ERR_PTR(-EIO);
@@ -635,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
635 } 634 }
636 635
637 if (d.redirect) { 636 if (d.redirect) {
637 err = -ENOMEM;
638 upperredirect = kstrdup(d.redirect, GFP_KERNEL); 638 upperredirect = kstrdup(d.redirect, GFP_KERNEL);
639 if (!upperredirect) 639 if (!upperredirect)
640 goto out_put_upper; 640 goto out_put_upper;
@@ -709,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
709 upperdentry = dget(index); 709 upperdentry = dget(index);
710 710
711 if (upperdentry || ctr) { 711 if (upperdentry || ctr) {
712 inode = ovl_get_inode(dentry, upperdentry); 712 inode = ovl_get_inode(dentry, upperdentry, index);
713 err = PTR_ERR(inode); 713 err = PTR_ERR(inode);
714 if (IS_ERR(inode)) 714 if (IS_ERR(inode))
715 goto out_free_oe; 715 goto out_free_oe;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index c706a6f99928..d9a0edd4e57e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -286,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
286bool ovl_is_private_xattr(const char *name); 286bool ovl_is_private_xattr(const char *name);
287 287
288struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); 288struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
289struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry); 289struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
290 struct dentry *index);
290static inline void ovl_copyattr(struct inode *from, struct inode *to) 291static inline void ovl_copyattr(struct inode *from, struct inode *to)
291{ 292{
292 to->i_uid = from->i_uid; 293 to->i_uid = from->i_uid;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 0f85ee9c3268..698b74dd750e 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -1021,13 +1021,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1021 break; 1021 break;
1022 } 1022 }
1023 err = ovl_verify_index(index, lowerstack, numlower); 1023 err = ovl_verify_index(index, lowerstack, numlower);
1024 if (err) { 1024 /* Cleanup stale and orphan index entries */
1025 if (err == -EROFS) 1025 if (err && (err == -ESTALE || err == -ENOENT))
1026 break;
1027 err = ovl_cleanup(dir, index); 1026 err = ovl_cleanup(dir, index);
1028 if (err) 1027 if (err)
1029 break; 1028 break;
1030 } 1029
1031 dput(index); 1030 dput(index);
1032 index = NULL; 1031 index = NULL;
1033 } 1032 }
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 092d150643c1..f5738e96a052 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
174{ 174{
175 struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL); 175 struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
176 176
177 if (!oi)
178 return NULL;
179
177 oi->cache = NULL; 180 oi->cache = NULL;
178 oi->redirect = NULL; 181 oi->redirect = NULL;
179 oi->version = 0; 182 oi->version = 0;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 56d0e526870c..6526ef0e2a23 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -237,11 +237,13 @@ xfs_file_dax_read(
237 if (!count) 237 if (!count)
238 return 0; /* skip atime */ 238 return 0; /* skip atime */
239 239
240 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { 240 if (iocb->ki_flags & IOCB_NOWAIT) {
241 if (iocb->ki_flags & IOCB_NOWAIT) 241 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
242 return -EAGAIN; 242 return -EAGAIN;
243 } else {
243 xfs_ilock(ip, XFS_IOLOCK_SHARED); 244 xfs_ilock(ip, XFS_IOLOCK_SHARED);
244 } 245 }
246
245 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); 247 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
246 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 248 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
247 249
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read(
259 261
260 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); 262 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
261 263
262 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { 264 if (iocb->ki_flags & IOCB_NOWAIT) {
263 if (iocb->ki_flags & IOCB_NOWAIT) 265 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
264 return -EAGAIN; 266 return -EAGAIN;
267 } else {
265 xfs_ilock(ip, XFS_IOLOCK_SHARED); 268 xfs_ilock(ip, XFS_IOLOCK_SHARED);
266 } 269 }
267 ret = generic_file_read_iter(iocb, to); 270 ret = generic_file_read_iter(iocb, to);
@@ -552,9 +555,10 @@ xfs_file_dio_aio_write(
552 iolock = XFS_IOLOCK_SHARED; 555 iolock = XFS_IOLOCK_SHARED;
553 } 556 }
554 557
555 if (!xfs_ilock_nowait(ip, iolock)) { 558 if (iocb->ki_flags & IOCB_NOWAIT) {
556 if (iocb->ki_flags & IOCB_NOWAIT) 559 if (!xfs_ilock_nowait(ip, iolock))
557 return -EAGAIN; 560 return -EAGAIN;
561 } else {
558 xfs_ilock(ip, iolock); 562 xfs_ilock(ip, iolock);
559 } 563 }
560 564
@@ -606,9 +610,10 @@ xfs_file_dax_write(
606 size_t count; 610 size_t count;
607 loff_t pos; 611 loff_t pos;
608 612
609 if (!xfs_ilock_nowait(ip, iolock)) { 613 if (iocb->ki_flags & IOCB_NOWAIT) {
610 if (iocb->ki_flags & IOCB_NOWAIT) 614 if (!xfs_ilock_nowait(ip, iolock))
611 return -EAGAIN; 615 return -EAGAIN;
616 } else {
612 xfs_ilock(ip, iolock); 617 xfs_ilock(ip, iolock);
613 } 618 }
614 619
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 0d7b56d91308..13edf19ce9fb 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -74,8 +74,8 @@ void tap_del_queues(struct tap_dev *tap);
74int tap_get_minor(dev_t major, struct tap_dev *tap); 74int tap_get_minor(dev_t major, struct tap_dev *tap);
75void tap_free_minor(dev_t major, struct tap_dev *tap); 75void tap_free_minor(dev_t major, struct tap_dev *tap);
76int tap_queue_resize(struct tap_dev *tap); 76int tap_queue_resize(struct tap_dev *tap);
77int tap_create_cdev(struct cdev *tap_cdev, 77int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
78 dev_t *tap_major, const char *device_name); 78 const char *device_name, struct module *module);
79void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); 79void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
80 80
81#endif /*_LINUX_IF_TAP_H_*/ 81#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..c59af8ab753a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, 157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
158 u8 prio, u8 *tc); 158 u8 prio, u8 *tc);
159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); 159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
160int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
161 u8 tc, u8 *tc_group);
160int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); 162int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
161int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, 163int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
162 u8 tc, u8 *bw_pct); 164 u8 tc, u8 *bw_pct);
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 82b171e1aa0b..da803dfc7a39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -231,7 +231,7 @@ struct sctp_datahdr {
231 __be32 tsn; 231 __be32 tsn;
232 __be16 stream; 232 __be16 stream;
233 __be16 ssn; 233 __be16 ssn;
234 __be32 ppid; 234 __u32 ppid;
235 __u8 payload[0]; 235 __u8 payload[0];
236}; 236};
237 237
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
716 716
717struct sctp_strreset_outreq { 717struct sctp_strreset_outreq {
718 struct sctp_paramhdr param_hdr; 718 struct sctp_paramhdr param_hdr;
719 __u32 request_seq; 719 __be32 request_seq;
720 __u32 response_seq; 720 __be32 response_seq;
721 __u32 send_reset_at_tsn; 721 __be32 send_reset_at_tsn;
722 __u16 list_of_streams[0]; 722 __be16 list_of_streams[0];
723}; 723};
724 724
725struct sctp_strreset_inreq { 725struct sctp_strreset_inreq {
726 struct sctp_paramhdr param_hdr; 726 struct sctp_paramhdr param_hdr;
727 __u32 request_seq; 727 __be32 request_seq;
728 __u16 list_of_streams[0]; 728 __be16 list_of_streams[0];
729}; 729};
730 730
731struct sctp_strreset_tsnreq { 731struct sctp_strreset_tsnreq {
732 struct sctp_paramhdr param_hdr; 732 struct sctp_paramhdr param_hdr;
733 __u32 request_seq; 733 __be32 request_seq;
734}; 734};
735 735
736struct sctp_strreset_addstrm { 736struct sctp_strreset_addstrm {
737 struct sctp_paramhdr param_hdr; 737 struct sctp_paramhdr param_hdr;
738 __u32 request_seq; 738 __be32 request_seq;
739 __u16 number_of_streams; 739 __be16 number_of_streams;
740 __u16 reserved; 740 __be16 reserved;
741}; 741};
742 742
743enum { 743enum {
@@ -752,16 +752,16 @@ enum {
752 752
753struct sctp_strreset_resp { 753struct sctp_strreset_resp {
754 struct sctp_paramhdr param_hdr; 754 struct sctp_paramhdr param_hdr;
755 __u32 response_seq; 755 __be32 response_seq;
756 __u32 result; 756 __be32 result;
757}; 757};
758 758
759struct sctp_strreset_resptsn { 759struct sctp_strreset_resptsn {
760 struct sctp_paramhdr param_hdr; 760 struct sctp_paramhdr param_hdr;
761 __u32 response_seq; 761 __be32 response_seq;
762 __u32 result; 762 __be32 result;
763 __u32 senders_next_tsn; 763 __be32 senders_next_tsn;
764 __u32 receivers_next_tsn; 764 __be32 receivers_next_tsn;
765}; 765};
766 766
767#endif /* __LINUX_SCTP_H__ */ 767#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 29c9eb48ca89..c98aaf677466 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -10,13 +10,16 @@
10/* 10/*
11 * Simple wait queues 11 * Simple wait queues
12 * 12 *
13 * While these are very similar to the other/complex wait queues (wait.h) the 13 * While these are very similar to regular wait queues (wait.h) the most
14 * most important difference is that the simple waitqueue allows for 14 * important difference is that the simple waitqueue allows for deterministic
15 * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold 15 * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
16 * times.
17 * 16 *
18 * In order to make this so, we had to drop a fair number of features of the 17 * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
19 * other waitqueue code; notably: 18 * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
19 * priority task a chance to run.
20 *
21 * Secondly, we had to drop a fair number of features of the other waitqueue
22 * code; notably:
20 * 23 *
21 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; 24 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
22 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right 25 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
@@ -25,12 +28,14 @@
25 * - the exclusive mode; because this requires preserving the list order 28 * - the exclusive mode; because this requires preserving the list order
26 * and this is hard. 29 * and this is hard.
27 * 30 *
28 * - custom wake functions; because you cannot give any guarantees about 31 * - custom wake callback functions; because you cannot give any guarantees
29 * random code. 32 * about random code. This also allows swait to be used in RT, such that
30 * 33 * raw spinlock can be used for the swait queue head.
31 * As a side effect of this; the data structures are slimmer.
32 * 34 *
33 * One would recommend using this wait queue where possible. 35 * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
36 * For all the above, note that simple wait queues should _only_ be used under
37 * very specific realtime constraints -- it is best to stick with the regular
38 * wait queues in most cases.
34 */ 39 */
35 40
36struct task_struct; 41struct task_struct;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 4e6131cd3f43..ac1a2317941e 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq,
146 fq_flow_get_default_t get_default_func) 146 fq_flow_get_default_t get_default_func)
147{ 147{
148 struct fq_flow *flow; 148 struct fq_flow *flow;
149 bool oom;
149 150
150 lockdep_assert_held(&fq->lock); 151 lockdep_assert_held(&fq->lock);
151 152
@@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq,
167 } 168 }
168 169
169 __skb_queue_tail(&flow->queue, skb); 170 __skb_queue_tail(&flow->queue, skb);
170 171 oom = (fq->memory_usage > fq->memory_limit);
171 if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) { 172 while (fq->backlog > fq->limit || oom) {
172 flow = list_first_entry_or_null(&fq->backlogs, 173 flow = list_first_entry_or_null(&fq->backlogs,
173 struct fq_flow, 174 struct fq_flow,
174 backlogchain); 175 backlogchain);
@@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq,
183 184
184 flow->tin->overlimit++; 185 flow->tin->overlimit++;
185 fq->overlimit++; 186 fq->overlimit++;
186 if (fq->memory_usage > fq->memory_limit) 187 if (oom) {
187 fq->overmemory++; 188 fq->overmemory++;
189 oom = (fq->memory_usage > fq->memory_limit);
190 }
188 } 191 }
189} 192}
190 193
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 425752f768d2..db8162dd8c0b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
132 return sk->sk_bound_dev_if; 132 return sk->sk_bound_dev_if;
133} 133}
134 134
135static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
136{
137 return rcu_dereference_check(ireq->ireq_opt,
138 refcount_read(&ireq->req.rsk_refcnt) > 0);
139}
140
135struct inet_cork { 141struct inet_cork {
136 unsigned int flags; 142 unsigned int flags;
137 __be32 addr; 143 __be32 addr;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 13b23f3ed69a..70ca2437740e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -3,6 +3,7 @@
3#define __NET_PKT_CLS_H 3#define __NET_PKT_CLS_H
4 4
5#include <linux/pkt_cls.h> 5#include <linux/pkt_cls.h>
6#include <linux/workqueue.h>
6#include <net/sch_generic.h> 7#include <net/sch_generic.h>
7#include <net/act_api.h> 8#include <net/act_api.h>
8 9
@@ -18,6 +19,8 @@ struct tcf_walker {
18int register_tcf_proto_ops(struct tcf_proto_ops *ops); 19int register_tcf_proto_ops(struct tcf_proto_ops *ops);
19int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 20int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
20 21
22bool tcf_queue_work(struct work_struct *work);
23
21#ifdef CONFIG_NET_CLS 24#ifdef CONFIG_NET_CLS
22struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 25struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
23 bool create); 26 bool create);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 1b33a6c8b477..236bfe5b2ffe 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -11,6 +11,7 @@
11#include <linux/dynamic_queue_limits.h> 11#include <linux/dynamic_queue_limits.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/refcount.h> 13#include <linux/refcount.h>
14#include <linux/workqueue.h>
14#include <net/gen_stats.h> 15#include <net/gen_stats.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16 17
@@ -272,6 +273,7 @@ struct tcf_chain {
272 273
273struct tcf_block { 274struct tcf_block {
274 struct list_head chain_list; 275 struct list_head chain_list;
276 struct work_struct work;
275}; 277};
276 278
277static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 279static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2db3d3a9ce1d..88233cf8b8d4 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
261 struct sctp_fwdtsn_skip *skiplist); 261 struct sctp_fwdtsn_skip *skiplist);
262struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); 262struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
263struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, 263struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
264 __u16 stream_num, __u16 *stream_list, 264 __u16 stream_num, __be16 *stream_list,
265 bool out, bool in); 265 bool out, bool in);
266struct sctp_chunk *sctp_make_strreset_tsnreq( 266struct sctp_chunk *sctp_make_strreset_tsnreq(
267 const struct sctp_association *asoc); 267 const struct sctp_association *asoc);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index b8c86ec1a8f5..231dc42f1da6 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
130 130
131struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 131struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
132 const struct sctp_association *asoc, __u16 flags, 132 const struct sctp_association *asoc, __u16 flags,
133 __u16 stream_num, __u16 *stream_list, gfp_t gfp); 133 __u16 stream_num, __be16 *stream_list, gfp_t gfp);
134 134
135struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( 135struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
136 const struct sctp_association *asoc, __u16 flags, 136 const struct sctp_association *asoc, __u16 flags,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 7dc131d62ad5..d96b59f45eba 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -74,10 +74,9 @@ struct strparser {
74 u32 unrecov_intr : 1; 74 u32 unrecov_intr : 1;
75 75
76 struct sk_buff **skb_nextp; 76 struct sk_buff **skb_nextp;
77 struct timer_list msg_timer;
78 struct sk_buff *skb_head; 77 struct sk_buff *skb_head;
79 unsigned int need_bytes; 78 unsigned int need_bytes;
80 struct delayed_work delayed_work; 79 struct delayed_work msg_timer_work;
81 struct work_struct work; 80 struct work_struct work;
82 struct strp_stats stats; 81 struct strp_stats stats;
83 struct strp_callbacks cb; 82 struct strp_callbacks cb;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b1ef98ebce53..e6d0002a1b0b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -844,6 +844,7 @@ struct tcp_skb_cb {
844 __u32 key; 844 __u32 key;
845 __u32 flags; 845 __u32 flags;
846 struct bpf_map *map; 846 struct bpf_map *map;
847 void *data_end;
847 } bpf; 848 } bpf;
848 }; 849 };
849}; 850};
@@ -1770,12 +1771,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
1770 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); 1771 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1771} 1772}
1772 1773
1773/* Called when old skb is about to be deleted (to be combined with new skb) */ 1774/* Called when old skb is about to be deleted and replaced by new skb */
1774static inline void tcp_highest_sack_combine(struct sock *sk, 1775static inline void tcp_highest_sack_replace(struct sock *sk,
1775 struct sk_buff *old, 1776 struct sk_buff *old,
1776 struct sk_buff *new) 1777 struct sk_buff *new)
1777{ 1778{
1778 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) 1779 if (old == tcp_highest_sack(sk))
1779 tcp_sk(sk)->highest_sack = new; 1780 tcp_sk(sk)->highest_sack = new;
1780} 1781}
1781 1782
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8012b4ff959b..30f2ce76b517 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -576,7 +576,7 @@ union bpf_attr {
576 * @map: pointer to sockmap 576 * @map: pointer to sockmap
577 * @key: key to lookup sock in map 577 * @key: key to lookup sock in map
578 * @flags: reserved for future use 578 * @flags: reserved for future use
579 * Return: SK_REDIRECT 579 * Return: SK_PASS
580 * 580 *
581 * int bpf_sock_map_update(skops, map, key, flags) 581 * int bpf_sock_map_update(skops, map, key, flags)
582 * @skops: pointer to bpf_sock_ops 582 * @skops: pointer to bpf_sock_ops
@@ -787,9 +787,8 @@ struct xdp_md {
787}; 787};
788 788
789enum sk_action { 789enum sk_action {
790 SK_ABORTED = 0, 790 SK_DROP = 0,
791 SK_DROP, 791 SK_PASS,
792 SK_REDIRECT,
793}; 792};
794 793
795#define BPF_TAG_SIZE 8 794#define BPF_TAG_SIZE 8
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 32df53012cbd..cfe971296835 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -377,7 +377,7 @@ struct sctp_remote_error {
377 __u16 sre_type; 377 __u16 sre_type;
378 __u16 sre_flags; 378 __u16 sre_flags;
379 __u32 sre_length; 379 __u32 sre_length;
380 __u16 sre_error; 380 __be16 sre_error;
381 sctp_assoc_t sre_assoc_id; 381 sctp_assoc_t sre_assoc_id;
382 __u8 sre_data[0]; 382 __u8 sre_data[0];
383}; 383};
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index ff0e381e7a77..c4253f0090d8 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -24,6 +24,7 @@
24#define SPIDEV_H 24#define SPIDEV_H
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/ioctl.h>
27 28
28/* User space versions of kernel symbols for SPI clocking modes, 29/* User space versions of kernel symbols for SPI clocking modes,
29 * matching <linux/spi/spi.h> 30 * matching <linux/spi/spi.h>
diff --git a/init/Kconfig b/init/Kconfig
index 78cb2461012e..3c1faaa2af4a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1033,7 +1033,7 @@ endif
1033 1033
1034choice 1034choice
1035 prompt "Compiler optimization level" 1035 prompt "Compiler optimization level"
1036 default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE 1036 default CC_OPTIMIZE_FOR_PERFORMANCE
1037 1037
1038config CC_OPTIMIZE_FOR_PERFORMANCE 1038config CC_OPTIMIZE_FOR_PERFORMANCE
1039 bool "Optimize for performance" 1039 bool "Optimize for performance"
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 2b6eb35ae5d3..dbd7b322a86b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -93,13 +93,27 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
93 return rcu_dereference_sk_user_data(sk); 93 return rcu_dereference_sk_user_data(sk);
94} 94}
95 95
96/* compute the linear packet data range [data, data_end) for skb when
97 * sk_skb type programs are in use.
98 */
99static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
100{
101 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
102}
103
104enum __sk_action {
105 __SK_DROP = 0,
106 __SK_PASS,
107 __SK_REDIRECT,
108};
109
96static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) 110static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
97{ 111{
98 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); 112 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
99 int rc; 113 int rc;
100 114
101 if (unlikely(!prog)) 115 if (unlikely(!prog))
102 return SK_DROP; 116 return __SK_DROP;
103 117
104 skb_orphan(skb); 118 skb_orphan(skb);
105 /* We need to ensure that BPF metadata for maps is also cleared 119 /* We need to ensure that BPF metadata for maps is also cleared
@@ -108,13 +122,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
108 */ 122 */
109 TCP_SKB_CB(skb)->bpf.map = NULL; 123 TCP_SKB_CB(skb)->bpf.map = NULL;
110 skb->sk = psock->sock; 124 skb->sk = psock->sock;
111 bpf_compute_data_end(skb); 125 bpf_compute_data_end_sk_skb(skb);
112 preempt_disable(); 126 preempt_disable();
113 rc = (*prog->bpf_func)(skb, prog->insnsi); 127 rc = (*prog->bpf_func)(skb, prog->insnsi);
114 preempt_enable(); 128 preempt_enable();
115 skb->sk = NULL; 129 skb->sk = NULL;
116 130
117 return rc; 131 /* Moving return codes from UAPI namespace into internal namespace */
132 return rc == SK_PASS ?
133 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
134 __SK_DROP;
118} 135}
119 136
120static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) 137static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
@@ -124,7 +141,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
124 141
125 rc = smap_verdict_func(psock, skb); 142 rc = smap_verdict_func(psock, skb);
126 switch (rc) { 143 switch (rc) {
127 case SK_REDIRECT: 144 case __SK_REDIRECT:
128 sk = do_sk_redirect_map(skb); 145 sk = do_sk_redirect_map(skb);
129 if (likely(sk)) { 146 if (likely(sk)) {
130 struct smap_psock *peer = smap_psock_sk(sk); 147 struct smap_psock *peer = smap_psock_sk(sk);
@@ -140,7 +157,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
140 } 157 }
141 } 158 }
142 /* Fall through and free skb otherwise */ 159 /* Fall through and free skb otherwise */
143 case SK_DROP: 160 case __SK_DROP:
144 default: 161 default:
145 kfree_skb(skb); 162 kfree_skb(skb);
146 } 163 }
@@ -368,7 +385,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
368 * any socket yet. 385 * any socket yet.
369 */ 386 */
370 skb->sk = psock->sock; 387 skb->sk = psock->sock;
371 bpf_compute_data_end(skb); 388 bpf_compute_data_end_sk_skb(skb);
372 rc = (*prog->bpf_func)(skb, prog->insnsi); 389 rc = (*prog->bpf_func)(skb, prog->insnsi);
373 skb->sk = NULL; 390 skb->sk = NULL;
374 rcu_read_unlock(); 391 rcu_read_unlock();
diff --git a/kernel/futex.c b/kernel/futex.c
index 0518a0bfc746..0d638f008bb1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1570,8 +1570,16 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1570 int oldval, ret; 1570 int oldval, ret;
1571 1571
1572 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { 1572 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1573 if (oparg < 0 || oparg > 31) 1573 if (oparg < 0 || oparg > 31) {
1574 return -EINVAL; 1574 char comm[sizeof(current->comm)];
1575 /*
1576 * kill this print and return -EINVAL when userspace
1577 * is sane again
1578 */
1579 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1580 get_task_comm(comm, current), oparg);
1581 oparg &= 31;
1582 }
1575 oparg = 1 << oparg; 1583 oparg = 1 << oparg;
1576 } 1584 }
1577 1585
diff --git a/kernel/signal.c b/kernel/signal.c
index 800a18f77732..8dcd8825b2de 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2698,7 +2698,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
2698 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 2698 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2699 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 2699 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2700 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 2700 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2701#if defined(SIGMET) && defined(NSIGEMT) 2701#if defined(SIGEMT) && defined(NSIGEMT)
2702 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 2702 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2703#endif 2703#endif
2704 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 2704 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 64d0edf428f8..a2dccfe1acec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,7 @@ enum {
68 * attach_mutex to avoid changing binding state while 68 * attach_mutex to avoid changing binding state while
69 * worker_attach_to_pool() is in progress. 69 * worker_attach_to_pool() is in progress.
70 */ 70 */
71 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 72 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
72 73
73 /* worker flags */ 74 /* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
165 /* L: hash of busy workers */ 166 /* L: hash of busy workers */
166 167
167 /* see manage_workers() for details on the two manager mutexes */ 168 /* see manage_workers() for details on the two manager mutexes */
168 struct mutex manager_arb; /* manager arbitration */
169 struct worker *manager; /* L: purely informational */ 169 struct worker *manager; /* L: purely informational */
170 struct mutex attach_mutex; /* attach/detach exclusion */ 170 struct mutex attach_mutex; /* attach/detach exclusion */
171 struct list_head workers; /* A: attached workers */ 171 struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
299 299
300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
302static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
302 303
303static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 304static LIST_HEAD(workqueues); /* PR: list of all workqueues */
304static bool workqueue_freezing; /* PL: have wqs started freezing? */ 305static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
801/* Do we have too many workers and should some go away? */ 802/* Do we have too many workers and should some go away? */
802static bool too_many_workers(struct worker_pool *pool) 803static bool too_many_workers(struct worker_pool *pool)
803{ 804{
804 bool managing = mutex_is_locked(&pool->manager_arb); 805 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
805 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 806 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
806 int nr_busy = pool->nr_workers - nr_idle; 807 int nr_busy = pool->nr_workers - nr_idle;
807 808
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
1980{ 1981{
1981 struct worker_pool *pool = worker->pool; 1982 struct worker_pool *pool = worker->pool;
1982 1983
1983 /* 1984 if (pool->flags & POOL_MANAGER_ACTIVE)
1984 * Anyone who successfully grabs manager_arb wins the arbitration
1985 * and becomes the manager. mutex_trylock() on pool->manager_arb
1986 * failure while holding pool->lock reliably indicates that someone
1987 * else is managing the pool and the worker which failed trylock
1988 * can proceed to executing work items. This means that anyone
1989 * grabbing manager_arb is responsible for actually performing
1990 * manager duties. If manager_arb is grabbed and released without
1991 * actual management, the pool may stall indefinitely.
1992 */
1993 if (!mutex_trylock(&pool->manager_arb))
1994 return false; 1985 return false;
1986
1987 pool->flags |= POOL_MANAGER_ACTIVE;
1995 pool->manager = worker; 1988 pool->manager = worker;
1996 1989
1997 maybe_create_worker(pool); 1990 maybe_create_worker(pool);
1998 1991
1999 pool->manager = NULL; 1992 pool->manager = NULL;
2000 mutex_unlock(&pool->manager_arb); 1993 pool->flags &= ~POOL_MANAGER_ACTIVE;
1994 wake_up(&wq_manager_wait);
2001 return true; 1995 return true;
2002} 1996}
2003 1997
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
3248 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3242 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3249 (unsigned long)pool); 3243 (unsigned long)pool);
3250 3244
3251 mutex_init(&pool->manager_arb);
3252 mutex_init(&pool->attach_mutex); 3245 mutex_init(&pool->attach_mutex);
3253 INIT_LIST_HEAD(&pool->workers); 3246 INIT_LIST_HEAD(&pool->workers);
3254 3247
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
3318 hash_del(&pool->hash_node); 3311 hash_del(&pool->hash_node);
3319 3312
3320 /* 3313 /*
3321 * Become the manager and destroy all workers. Grabbing 3314 * Become the manager and destroy all workers. This prevents
3322 * manager_arb prevents @pool's workers from blocking on 3315 * @pool's workers from blocking on attach_mutex. We're the last
3323 * attach_mutex. 3316 * manager and @pool gets freed with the flag set.
3324 */ 3317 */
3325 mutex_lock(&pool->manager_arb);
3326
3327 spin_lock_irq(&pool->lock); 3318 spin_lock_irq(&pool->lock);
3319 wait_event_lock_irq(wq_manager_wait,
3320 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3321 pool->flags |= POOL_MANAGER_ACTIVE;
3322
3328 while ((worker = first_idle_worker(pool))) 3323 while ((worker = first_idle_worker(pool)))
3329 destroy_worker(worker); 3324 destroy_worker(worker);
3330 WARN_ON(pool->nr_workers || pool->nr_idle); 3325 WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
3338 if (pool->detach_completion) 3333 if (pool->detach_completion)
3339 wait_for_completion(pool->detach_completion); 3334 wait_for_completion(pool->detach_completion);
3340 3335
3341 mutex_unlock(&pool->manager_arb);
3342
3343 /* shut down the timers */ 3336 /* shut down the timers */
3344 del_timer_sync(&pool->idle_timer); 3337 del_timer_sync(&pool->idle_timer);
3345 del_timer_sync(&pool->mayday_timer); 3338 del_timer_sync(&pool->mayday_timer);
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 0bd8a611eb83..fef5d2e114be 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -284,6 +284,9 @@ next_op:
284 if (unlikely(len > datalen - dp)) 284 if (unlikely(len > datalen - dp))
285 goto data_overrun_error; 285 goto data_overrun_error;
286 } 286 }
287 } else {
288 if (unlikely(len > datalen - dp))
289 goto data_overrun_error;
287 } 290 }
288 291
289 if (flags & FLAG_CONS) { 292 if (flags & FLAG_CONS) {
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 155c55d8db5f..4e53be8bc590 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
598 if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) 598 if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
599 goto all_leaves_cluster_together; 599 goto all_leaves_cluster_together;
600 600
601 /* Otherwise we can just insert a new node ahead of the old 601 /* Otherwise all the old leaves cluster in the same slot, but
602 * one. 602 * the new leaf wants to go into a different slot - so we
603 * create a new node (n0) to hold the new leaf and a pointer to
604 * a new node (n1) holding all the old leaves.
605 *
606 * This can be done by falling through to the node splitting
607 * path.
603 */ 608 */
604 goto present_leaves_cluster_but_not_new_leaf; 609 pr_devel("present leaves cluster but not new leaf\n");
605 } 610 }
606 611
607split_node: 612split_node:
608 pr_devel("split node\n"); 613 pr_devel("split node\n");
609 614
610 /* We need to split the current node; we know that the node doesn't 615 /* We need to split the current node. The node must contain anything
611 * simply contain a full set of leaves that cluster together (it 616 * from a single leaf (in the one leaf case, this leaf will cluster
612 * contains meta pointers and/or non-clustering leaves). 617 * with the new leaf) and the rest meta-pointers, to all leaves, some
618 * of which may cluster.
619 *
620 * It won't contain the case in which all the current leaves plus the
621 * new leaves want to cluster in the same slot.
613 * 622 *
614 * We need to expel at least two leaves out of a set consisting of the 623 * We need to expel at least two leaves out of a set consisting of the
615 * leaves in the node and the new leaf. 624 * leaves in the node and the new leaf. The current meta pointers can
625 * just be copied as they shouldn't cluster with any of the leaves.
616 * 626 *
617 * We need a new node (n0) to replace the current one and a new node to 627 * We need a new node (n0) to replace the current one and a new node to
618 * take the expelled nodes (n1). 628 * take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
717 pr_devel("<--%s() = ok [split node]\n", __func__); 727 pr_devel("<--%s() = ok [split node]\n", __func__);
718 return true; 728 return true;
719 729
720present_leaves_cluster_but_not_new_leaf:
721 /* All the old leaves cluster in the same slot, but the new leaf wants
722 * to go into a different slot, so we create a new node to hold the new
723 * leaf and a pointer to a new node holding all the old leaves.
724 */
725 pr_devel("present leaves cluster but not new leaf\n");
726
727 new_n0->back_pointer = node->back_pointer;
728 new_n0->parent_slot = node->parent_slot;
729 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
730 new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
731 new_n1->parent_slot = edit->segment_cache[0];
732 new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
733 edit->adjust_count_on = new_n0;
734
735 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
736 new_n1->slots[i] = node->slots[i];
737
738 new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
739 edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
740
741 edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
742 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
743 edit->excised_meta[0] = assoc_array_node_to_ptr(node);
744 pr_devel("<--%s() = ok [insert node before]\n", __func__);
745 return true;
746
747all_leaves_cluster_together: 730all_leaves_cluster_together:
748 /* All the leaves, new and old, want to cluster together in this node 731 /* All the leaves, new and old, want to cluster together in this node
749 * in the same slot, so we have to replace this node with a shortcut to 732 * in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/ioremap.c b/lib/ioremap.c
index ac7802b0a117..b808a390e4c3 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -162,6 +162,7 @@ int ioremap_page_range(unsigned long addr,
162 unsigned long next; 162 unsigned long next;
163 int err; 163 int err;
164 164
165 might_sleep();
165 BUG_ON(addr >= end); 166 BUG_ON(addr >= end);
166 167
167 start = addr; 168 start = addr;
diff --git a/net/core/filter.c b/net/core/filter.c
index aa0265997f93..6ae94f825f72 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1844,14 +1844,15 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1844{ 1844{
1845 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 1845 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1846 1846
1847 /* If user passes invalid input drop the packet. */
1847 if (unlikely(flags)) 1848 if (unlikely(flags))
1848 return SK_ABORTED; 1849 return SK_DROP;
1849 1850
1850 tcb->bpf.key = key; 1851 tcb->bpf.key = key;
1851 tcb->bpf.flags = flags; 1852 tcb->bpf.flags = flags;
1852 tcb->bpf.map = map; 1853 tcb->bpf.map = map;
1853 1854
1854 return SK_REDIRECT; 1855 return SK_PASS;
1855} 1856}
1856 1857
1857struct sock *do_sk_redirect_map(struct sk_buff *skb) 1858struct sock *do_sk_redirect_map(struct sk_buff *skb)
@@ -4243,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
4243 return insn - insn_buf; 4244 return insn - insn_buf;
4244} 4245}
4245 4246
4247static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
4248 const struct bpf_insn *si,
4249 struct bpf_insn *insn_buf,
4250 struct bpf_prog *prog, u32 *target_size)
4251{
4252 struct bpf_insn *insn = insn_buf;
4253 int off;
4254
4255 switch (si->off) {
4256 case offsetof(struct __sk_buff, data_end):
4257 off = si->off;
4258 off -= offsetof(struct __sk_buff, data_end);
4259 off += offsetof(struct sk_buff, cb);
4260 off += offsetof(struct tcp_skb_cb, bpf.data_end);
4261 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4262 si->src_reg, off);
4263 break;
4264 default:
4265 return bpf_convert_ctx_access(type, si, insn_buf, prog,
4266 target_size);
4267 }
4268
4269 return insn - insn_buf;
4270}
4271
4246const struct bpf_verifier_ops sk_filter_prog_ops = { 4272const struct bpf_verifier_ops sk_filter_prog_ops = {
4247 .get_func_proto = sk_filter_func_proto, 4273 .get_func_proto = sk_filter_func_proto,
4248 .is_valid_access = sk_filter_is_valid_access, 4274 .is_valid_access = sk_filter_is_valid_access,
@@ -4301,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = {
4301const struct bpf_verifier_ops sk_skb_prog_ops = { 4327const struct bpf_verifier_ops sk_skb_prog_ops = {
4302 .get_func_proto = sk_skb_func_proto, 4328 .get_func_proto = sk_skb_func_proto,
4303 .is_valid_access = sk_skb_is_valid_access, 4329 .is_valid_access = sk_skb_is_valid_access,
4304 .convert_ctx_access = bpf_convert_ctx_access, 4330 .convert_ctx_access = sk_skb_convert_ctx_access,
4305 .gen_prologue = sk_skb_prologue, 4331 .gen_prologue = sk_skb_prologue,
4306}; 4332};
4307 4333
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0490916864f9..e65fcb45c3f6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -495,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
495 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
497 ireq->ir_rmt_addr, 497 ireq->ir_rmt_addr,
498 rcu_dereference(ireq->ireq_opt)); 498 ireq_opt_deref(ireq));
499 err = net_xmit_eval(err); 499 err = net_xmit_eval(err);
500 } 500 }
501 501
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 873af0108e24..045d8a176279 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
496 if (!ethernet) 496 if (!ethernet)
497 return -EINVAL; 497 return -EINVAL;
498 ethernet_dev = of_find_net_device_by_node(ethernet); 498 ethernet_dev = of_find_net_device_by_node(ethernet);
499 if (!ethernet_dev)
500 return -EPROBE_DEFER;
499 } else { 501 } else {
500 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); 502 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
503 if (!ethernet_dev)
504 return -EPROBE_DEFER;
501 dev_put(ethernet_dev); 505 dev_put(ethernet_dev);
502 } 506 }
503 507
504 if (!ethernet_dev)
505 return -EPROBE_DEFER;
506
507 if (!dst->cpu_dp) { 508 if (!dst->cpu_dp) {
508 dst->cpu_dp = port; 509 dst->cpu_dp = port;
509 dst->cpu_dp->netdev = ethernet_dev; 510 dst->cpu_dp->netdev = ethernet_dev;
diff --git a/net/ife/ife.c b/net/ife/ife.c
index f360341c72eb..7d1ec76e7f43 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -137,6 +137,6 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
137EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); 137EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
138 138
139MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>"); 139MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>");
140MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 140MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
141MODULE_DESCRIPTION("Inter-FE LFB action"); 141MODULE_DESCRIPTION("Inter-FE LFB action");
142MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 5ec9136a7c36..b47a59cb3573 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -543,7 +543,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
543 struct ip_options_rcu *opt; 543 struct ip_options_rcu *opt;
544 struct rtable *rt; 544 struct rtable *rt;
545 545
546 opt = rcu_dereference(ireq->ireq_opt); 546 opt = ireq_opt_deref(ireq);
547
547 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 548 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
548 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 549 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
549 sk->sk_protocol, inet_sk_flowi_flags(sk), 550 sk->sk_protocol, inet_sk_flowi_flags(sk),
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index fb1ad22b5e29..cdd627355ed1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
128 128
129static int ipip_err(struct sk_buff *skb, u32 info) 129static int ipip_err(struct sk_buff *skb, u32 info)
130{ 130{
131 131 /* All the routers (except for Linux) return only
132/* All the routers (except for Linux) return only 132 * 8 bytes of packet payload. It means, that precise relaying of
133 8 bytes of packet payload. It means, that precise relaying of 133 * ICMP in the real Internet is absolutely infeasible.
134 ICMP in the real Internet is absolutely infeasible. 134 */
135 */
136 struct net *net = dev_net(skb->dev); 135 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); 136 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
138 const struct iphdr *iph = (const struct iphdr *)skb->data; 137 const struct iphdr *iph = (const struct iphdr *)skb->data;
139 struct ip_tunnel *t;
140 int err;
141 const int type = icmp_hdr(skb)->type; 138 const int type = icmp_hdr(skb)->type;
142 const int code = icmp_hdr(skb)->code; 139 const int code = icmp_hdr(skb)->code;
140 struct ip_tunnel *t;
141 int err = 0;
142
143 switch (type) {
144 case ICMP_DEST_UNREACH:
145 switch (code) {
146 case ICMP_SR_FAILED:
147 /* Impossible event. */
148 goto out;
149 default:
150 /* All others are translated to HOST_UNREACH.
151 * rfc2003 contains "deep thoughts" about NET_UNREACH,
152 * I believe they are just ether pollution. --ANK
153 */
154 break;
155 }
156 break;
157
158 case ICMP_TIME_EXCEEDED:
159 if (code != ICMP_EXC_TTL)
160 goto out;
161 break;
162
163 case ICMP_REDIRECT:
164 break;
165
166 default:
167 goto out;
168 }
143 169
144 err = -ENOENT;
145 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 170 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
146 iph->daddr, iph->saddr, 0); 171 iph->daddr, iph->saddr, 0);
147 if (!t) 172 if (!t) {
173 err = -ENOENT;
148 goto out; 174 goto out;
175 }
149 176
150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 177 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
151 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 178 ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
152 t->parms.link, 0, iph->protocol, 0); 179 iph->protocol, 0);
153 err = 0;
154 goto out; 180 goto out;
155 } 181 }
156 182
157 if (type == ICMP_REDIRECT) { 183 if (type == ICMP_REDIRECT) {
158 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 184 ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
159 iph->protocol, 0);
160 err = 0;
161 goto out; 185 goto out;
162 } 186 }
163 187
164 if (t->parms.iph.daddr == 0) 188 if (t->parms.iph.daddr == 0) {
189 err = -ENOENT;
165 goto out; 190 goto out;
191 }
166 192
167 err = 0;
168 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 193 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
169 goto out; 194 goto out;
170 195
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4c43365c374c..5b027c69cbc5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
877 877
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 ireq->ir_rmt_addr, 879 ireq->ir_rmt_addr,
880 rcu_dereference(ireq->ireq_opt)); 880 ireq_opt_deref(ireq));
881 err = net_xmit_eval(err); 881 err = net_xmit_eval(err);
882 } 882 }
883 883
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0bc9e46a5369..823003eef3a2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
739 struct tcp_sock *tp = tcp_sk(sk); 739 struct tcp_sock *tp = tcp_sk(sk);
740 740
741 if (tp->lost_out > tp->retrans_out && 741 if (tp->lost_out > tp->retrans_out &&
742 tp->snd_cwnd > tcp_packets_in_flight(tp)) 742 tp->snd_cwnd > tcp_packets_in_flight(tp)) {
743 tcp_mstamp_refresh(tp);
743 tcp_xmit_retransmit_queue(sk); 744 tcp_xmit_retransmit_queue(sk);
745 }
744 746
745 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 747 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
746 0, GFP_ATOMIC); 748 0, GFP_ATOMIC);
@@ -2060,6 +2062,7 @@ static int tcp_mtu_probe(struct sock *sk)
2060 nskb->ip_summed = skb->ip_summed; 2062 nskb->ip_summed = skb->ip_summed;
2061 2063
2062 tcp_insert_write_queue_before(nskb, skb, sk); 2064 tcp_insert_write_queue_before(nskb, skb, sk);
2065 tcp_highest_sack_replace(sk, skb, nskb);
2063 2066
2064 len = 0; 2067 len = 0;
2065 tcp_for_write_queue_from_safe(skb, next, sk) { 2068 tcp_for_write_queue_from_safe(skb, next, sk) {
@@ -2237,6 +2240,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2237 2240
2238 sent_pkts = 0; 2241 sent_pkts = 0;
2239 2242
2243 tcp_mstamp_refresh(tp);
2240 if (!push_one) { 2244 if (!push_one) {
2241 /* Do MTU probing. */ 2245 /* Do MTU probing. */
2242 result = tcp_mtu_probe(sk); 2246 result = tcp_mtu_probe(sk);
@@ -2248,7 +2252,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2248 } 2252 }
2249 2253
2250 max_segs = tcp_tso_segs(sk, mss_now); 2254 max_segs = tcp_tso_segs(sk, mss_now);
2251 tcp_mstamp_refresh(tp);
2252 while ((skb = tcp_send_head(sk))) { 2255 while ((skb = tcp_send_head(sk))) {
2253 unsigned int limit; 2256 unsigned int limit;
2254 2257
@@ -2663,7 +2666,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2663 else if (!skb_shift(skb, next_skb, next_skb_size)) 2666 else if (!skb_shift(skb, next_skb, next_skb_size))
2664 return false; 2667 return false;
2665 } 2668 }
2666 tcp_highest_sack_combine(sk, next_skb, skb); 2669 tcp_highest_sack_replace(sk, next_skb, skb);
2667 2670
2668 tcp_unlink_write_queue(next_skb, sk); 2671 tcp_unlink_write_queue(next_skb, sk);
2669 2672
@@ -2841,8 +2844,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2841 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2844 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2842 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2845 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2843 -ENOBUFS; 2846 -ENOBUFS;
2844 if (!err) 2847 if (!err) {
2845 skb->skb_mstamp = tp->tcp_mstamp; 2848 skb->skb_mstamp = tp->tcp_mstamp;
2849 tcp_rate_skb_sent(sk, skb);
2850 }
2846 } else { 2851 } else {
2847 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2852 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2848 } 2853 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4a96ebbf8eda..8a1c846d3df9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3335,6 +3335,7 @@ static void addrconf_permanent_addr(struct net_device *dev)
3335 if ((ifp->flags & IFA_F_PERMANENT) && 3335 if ((ifp->flags & IFA_F_PERMANENT) &&
3336 fixup_permanent_addr(idev, ifp) < 0) { 3336 fixup_permanent_addr(idev, ifp) < 0) {
3337 write_unlock_bh(&idev->lock); 3337 write_unlock_bh(&idev->lock);
3338 in6_ifa_hold(ifp);
3338 ipv6_del_addr(ifp); 3339 ipv6_del_addr(ifp);
3339 write_lock_bh(&idev->lock); 3340 write_lock_bh(&idev->lock);
3340 3341
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 1602b491b281..59c121b932ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
408 case ICMPV6_DEST_UNREACH: 408 case ICMPV6_DEST_UNREACH:
409 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 409 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
410 t->parms.name); 410 t->parms.name);
411 break; 411 if (code != ICMPV6_PORT_UNREACH)
412 break;
413 return;
412 case ICMPV6_TIME_EXCEED: 414 case ICMPV6_TIME_EXCEED:
413 if (code == ICMPV6_EXC_HOPLIMIT) { 415 if (code == ICMPV6_EXC_HOPLIMIT) {
414 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 416 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
415 t->parms.name); 417 t->parms.name);
418 break;
416 } 419 }
417 break; 420 return;
418 case ICMPV6_PARAMPROB: 421 case ICMPV6_PARAMPROB:
419 teli = 0; 422 teli = 0;
420 if (code == ICMPV6_HDR_FIELD) 423 if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 433 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
431 t->parms.name); 434 t->parms.name);
432 } 435 }
433 break; 436 return;
434 case ICMPV6_PKT_TOOBIG: 437 case ICMPV6_PKT_TOOBIG:
435 mtu = be32_to_cpu(info) - offset - t->tun_hlen; 438 mtu = be32_to_cpu(info) - offset - t->tun_hlen;
436 if (t->dev->type == ARPHRD_ETHER) 439 if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 if (mtu < IPV6_MIN_MTU) 441 if (mtu < IPV6_MIN_MTU)
439 mtu = IPV6_MIN_MTU; 442 mtu = IPV6_MIN_MTU;
440 t->dev->mtu = mtu; 443 t->dev->mtu = mtu;
441 break; 444 return;
442 } 445 }
443 446
444 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 447 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
500 __u32 *pmtu, __be16 proto) 503 __u32 *pmtu, __be16 proto)
501{ 504{
502 struct ip6_tnl *tunnel = netdev_priv(dev); 505 struct ip6_tnl *tunnel = netdev_priv(dev);
503 __be16 protocol = (dev->type == ARPHRD_ETHER) ? 506 struct dst_entry *dst = skb_dst(skb);
504 htons(ETH_P_TEB) : proto; 507 __be16 protocol;
505 508
506 if (dev->type == ARPHRD_ETHER) 509 if (dev->type == ARPHRD_ETHER)
507 IPCB(skb)->flags = 0; 510 IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
515 tunnel->o_seqno++; 518 tunnel->o_seqno++;
516 519
517 /* Push GRE header. */ 520 /* Push GRE header. */
521 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
518 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 522 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
519 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 523 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
520 524
525 /* TooBig packet may have updated dst->dev's mtu */
526 if (dst && dst_mtu(dst) > dst->dev->mtu)
527 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
528
521 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 529 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
522 NEXTHDR_GRE); 530 NEXTHDR_GRE);
523} 531}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f50452b919d5..0c2738349442 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -584,6 +584,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
584 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
585 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false; 586 bool drop_refcnt = false;
587 bool drop_tunnel = false;
587 int ver = 2; 588 int ver = 2;
588 int fd; 589 int fd;
589 590
@@ -652,7 +653,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
652 if (tunnel_id == 0) 653 if (tunnel_id == 0)
653 goto end; 654 goto end;
654 655
655 tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); 656 tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
657 if (tunnel)
658 drop_tunnel = true;
656 659
657 /* Special case: create tunnel context if session_id and 660 /* Special case: create tunnel context if session_id and
658 * peer_session_id is 0. Otherwise look up tunnel using supplied 661 * peer_session_id is 0. Otherwise look up tunnel using supplied
@@ -781,6 +784,8 @@ out_no_ppp:
781end: 784end:
782 if (drop_refcnt) 785 if (drop_refcnt)
783 l2tp_session_dec_refcount(session); 786 l2tp_session_dec_refcount(session);
787 if (drop_tunnel)
788 l2tp_tunnel_dec_refcount(tunnel);
784 release_sock(sk); 789 release_sock(sk);
785 790
786 return error; 791 return error;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a354f1939e49..fb15d3b97cb2 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2727 if (!ieee80211_sdata_running(sdata)) 2727 if (!ieee80211_sdata_running(sdata))
2728 return -ENETDOWN; 2728 return -ENETDOWN;
2729 2729
2730 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
2731 ret = drv_set_bitrate_mask(local, sdata, mask);
2732 if (ret)
2733 return ret;
2734 }
2735
2736 /* 2730 /*
2737 * If active validate the setting and reject it if it doesn't leave 2731 * If active validate the setting and reject it if it doesn't leave
2738 * at least one basic rate usable, since we really have to be able 2732 * at least one basic rate usable, since we really have to be able
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2748 return -EINVAL; 2742 return -EINVAL;
2749 } 2743 }
2750 2744
2745 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
2746 ret = drv_set_bitrate_mask(local, sdata, mask);
2747 if (ret)
2748 return ret;
2749 }
2750
2751 for (i = 0; i < NUM_NL80211_BANDS; i++) { 2751 for (i = 0; i < NUM_NL80211_BANDS; i++) {
2752 struct ieee80211_supported_band *sband = wiphy->bands[i]; 2752 struct ieee80211_supported_band *sband = wiphy->bands[i];
2753 int j; 2753 int j;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ae995c8480db..938049395f90 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -19,6 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <net/mac80211.h> 21#include <net/mac80211.h>
22#include <crypto/algapi.h>
22#include <asm/unaligned.h> 23#include <asm/unaligned.h>
23#include "ieee80211_i.h" 24#include "ieee80211_i.h"
24#include "driver-ops.h" 25#include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
609 ieee80211_key_free_common(key); 610 ieee80211_key_free_common(key);
610} 611}
611 612
613static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
614 struct ieee80211_key *old,
615 struct ieee80211_key *new)
616{
617 u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
618 u8 *tk_old, *tk_new;
619
620 if (!old || new->conf.keylen != old->conf.keylen)
621 return false;
622
623 tk_old = old->conf.key;
624 tk_new = new->conf.key;
625
626 /*
627 * In station mode, don't compare the TX MIC key, as it's never used
628 * and offloaded rekeying may not care to send it to the host. This
629 * is the case in iwlwifi, for example.
630 */
631 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
632 new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
633 new->conf.keylen == WLAN_KEY_LEN_TKIP &&
634 !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
635 memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
636 memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
637 memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
638 memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
639 tk_old = tkip_old;
640 tk_new = tkip_new;
641 }
642
643 return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
644}
645
612int ieee80211_key_link(struct ieee80211_key *key, 646int ieee80211_key_link(struct ieee80211_key *key,
613 struct ieee80211_sub_if_data *sdata, 647 struct ieee80211_sub_if_data *sdata,
614 struct sta_info *sta) 648 struct sta_info *sta)
@@ -634,8 +668,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
634 * Silently accept key re-installation without really installing the 668 * Silently accept key re-installation without really installing the
635 * new version of the key to avoid nonce reuse or replay issues. 669 * new version of the key to avoid nonce reuse or replay issues.
636 */ 670 */
637 if (old_key && key->conf.keylen == old_key->conf.keylen && 671 if (ieee80211_key_identical(sdata, old_key, key)) {
638 !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
639 ieee80211_key_free_unused(key); 672 ieee80211_key_free_unused(key);
640 ret = 0; 673 ret = 0;
641 goto out; 674 goto out;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 3a6ad0f438dc..64f95624f219 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -296,6 +296,6 @@ static void __exit psample_module_exit(void)
296module_init(psample_module_init); 296module_init(psample_module_init);
297module_exit(psample_module_exit); 297module_exit(psample_module_exit);
298 298
299MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 299MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
300MODULE_DESCRIPTION("netlink channel for packet sampling"); 300MODULE_DESCRIPTION("netlink channel for packet sampling");
301MODULE_LICENSE("GPL v2"); 301MODULE_LICENSE("GPL v2");
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 6ab39dbcca01..8557a1cae041 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
661 } 661 }
662 } 662 }
663 663
664 rds_ib_set_wr_signal_state(ic, send, 0); 664 rds_ib_set_wr_signal_state(ic, send, false);
665 665
666 /* 666 /*
667 * Always signal the last one if we're stopping due to flow control. 667 * Always signal the last one if we're stopping due to flow control.
668 */ 668 */
669 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) 669 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
670 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 670 rds_ib_set_wr_signal_state(ic, send, true);
671 send->s_wr.send_flags |= IB_SEND_SOLICITED;
672 }
671 673
672 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 674 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
673 nr_sig++; 675 nr_sig++;
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
705 if (scat == &rm->data.op_sg[rm->data.op_count]) { 707 if (scat == &rm->data.op_sg[rm->data.op_count]) {
706 prev->s_op = ic->i_data_op; 708 prev->s_op = ic->i_data_op;
707 prev->s_wr.send_flags |= IB_SEND_SOLICITED; 709 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
708 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) { 710 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
709 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 711 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
710 prev->s_wr.send_flags |= IB_SEND_SIGNALED;
711 nr_sig++;
712 }
713 ic->i_data_op = NULL; 712 ic->i_data_op = NULL;
714 } 713 }
715 714
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
792 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; 791 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
793 send->s_atomic_wr.swap_mask = 0; 792 send->s_atomic_wr.swap_mask = 0;
794 } 793 }
794 send->s_wr.send_flags = 0;
795 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 795 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
796 send->s_atomic_wr.wr.num_sge = 1; 796 send->s_atomic_wr.wr.num_sge = 1;
797 send->s_atomic_wr.wr.next = NULL; 797 send->s_atomic_wr.wr.next = NULL;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ec986ae52808..8b5abcd2f32f 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -264,12 +264,13 @@ static int __init sample_init_module(void)
264 264
265static void __exit sample_cleanup_module(void) 265static void __exit sample_cleanup_module(void)
266{ 266{
267 rcu_barrier();
267 tcf_unregister_action(&act_sample_ops, &sample_net_ops); 268 tcf_unregister_action(&act_sample_ops, &sample_net_ops);
268} 269}
269 270
270module_init(sample_init_module); 271module_init(sample_init_module);
271module_exit(sample_cleanup_module); 272module_exit(sample_cleanup_module);
272 273
273MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 274MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
274MODULE_DESCRIPTION("Packet sampling action"); 275MODULE_DESCRIPTION("Packet sampling action");
275MODULE_LICENSE("GPL v2"); 276MODULE_LICENSE("GPL v2");
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0b2219adf520..b2d310745487 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -77,6 +77,8 @@ out:
77} 77}
78EXPORT_SYMBOL(register_tcf_proto_ops); 78EXPORT_SYMBOL(register_tcf_proto_ops);
79 79
80static struct workqueue_struct *tc_filter_wq;
81
80int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 82int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81{ 83{
82 struct tcf_proto_ops *t; 84 struct tcf_proto_ops *t;
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
86 * tcf_proto_ops's destroy() handler. 88 * tcf_proto_ops's destroy() handler.
87 */ 89 */
88 rcu_barrier(); 90 rcu_barrier();
91 flush_workqueue(tc_filter_wq);
89 92
90 write_lock(&cls_mod_lock); 93 write_lock(&cls_mod_lock);
91 list_for_each_entry(t, &tcf_proto_base, head) { 94 list_for_each_entry(t, &tcf_proto_base, head) {
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
100} 103}
101EXPORT_SYMBOL(unregister_tcf_proto_ops); 104EXPORT_SYMBOL(unregister_tcf_proto_ops);
102 105
106bool tcf_queue_work(struct work_struct *work)
107{
108 return queue_work(tc_filter_wq, work);
109}
110EXPORT_SYMBOL(tcf_queue_work);
111
103/* Select new prio value from the range, managed by kernel. */ 112/* Select new prio value from the range, managed by kernel. */
104 113
105static inline u32 tcf_auto_prio(struct tcf_proto *tp) 114static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@@ -266,6 +275,23 @@ err_chain_create:
266} 275}
267EXPORT_SYMBOL(tcf_block_get); 276EXPORT_SYMBOL(tcf_block_get);
268 277
278static void tcf_block_put_final(struct work_struct *work)
279{
280 struct tcf_block *block = container_of(work, struct tcf_block, work);
281 struct tcf_chain *chain, *tmp;
282
283 rtnl_lock();
284 /* Only chain 0 should be still here. */
285 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
286 tcf_chain_put(chain);
287 rtnl_unlock();
288 kfree(block);
289}
290
291/* XXX: Standalone actions are not allowed to jump to any chain, and bound
292 * actions should be all removed after flushing. However, filters are now
293 * destroyed in tc filter workqueue with RTNL lock, they can not race here.
294 */
269void tcf_block_put(struct tcf_block *block) 295void tcf_block_put(struct tcf_block *block)
270{ 296{
271 struct tcf_chain *chain, *tmp; 297 struct tcf_chain *chain, *tmp;
@@ -273,32 +299,15 @@ void tcf_block_put(struct tcf_block *block)
273 if (!block) 299 if (!block)
274 return; 300 return;
275 301
276 /* XXX: Standalone actions are not allowed to jump to any chain, and 302 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
277 * bound actions should be all removed after flushing. However,
278 * filters are destroyed in RCU callbacks, we have to hold the chains
279 * first, otherwise we would always race with RCU callbacks on this list
280 * without proper locking.
281 */
282
283 /* Wait for existing RCU callbacks to cool down. */
284 rcu_barrier();
285
286 /* Hold a refcnt for all chains, except 0, in case they are gone. */
287 list_for_each_entry(chain, &block->chain_list, list)
288 if (chain->index)
289 tcf_chain_hold(chain);
290
291 /* No race on the list, because no chain could be destroyed. */
292 list_for_each_entry(chain, &block->chain_list, list)
293 tcf_chain_flush(chain); 303 tcf_chain_flush(chain);
294 304
295 /* Wait for RCU callbacks to release the reference count. */ 305 INIT_WORK(&block->work, tcf_block_put_final);
306 /* Wait for RCU callbacks to release the reference count and make
307 * sure their works have been queued before this.
308 */
296 rcu_barrier(); 309 rcu_barrier();
297 310 tcf_queue_work(&block->work);
298 /* At this point, all the chains should have refcnt == 1. */
299 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
300 tcf_chain_put(chain);
301 kfree(block);
302} 311}
303EXPORT_SYMBOL(tcf_block_put); 312EXPORT_SYMBOL(tcf_block_put);
304 313
@@ -879,6 +888,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
879#ifdef CONFIG_NET_CLS_ACT 888#ifdef CONFIG_NET_CLS_ACT
880 LIST_HEAD(actions); 889 LIST_HEAD(actions);
881 890
891 ASSERT_RTNL();
882 tcf_exts_to_list(exts, &actions); 892 tcf_exts_to_list(exts, &actions);
883 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 893 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
884 kfree(exts->actions); 894 kfree(exts->actions);
@@ -1030,6 +1040,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
1030 1040
1031static int __init tc_filter_init(void) 1041static int __init tc_filter_init(void)
1032{ 1042{
1043 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1044 if (!tc_filter_wq)
1045 return -ENOMEM;
1046
1033 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1047 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1034 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1048 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1035 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1049 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d89ebafd2239..f177649a2419 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -34,7 +34,10 @@ struct basic_filter {
34 struct tcf_result res; 34 struct tcf_result res;
35 struct tcf_proto *tp; 35 struct tcf_proto *tp;
36 struct list_head link; 36 struct list_head link;
37 struct rcu_head rcu; 37 union {
38 struct work_struct work;
39 struct rcu_head rcu;
40 };
38}; 41};
39 42
40static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, 43static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -82,15 +85,26 @@ static int basic_init(struct tcf_proto *tp)
82 return 0; 85 return 0;
83} 86}
84 87
85static void basic_delete_filter(struct rcu_head *head) 88static void basic_delete_filter_work(struct work_struct *work)
86{ 89{
87 struct basic_filter *f = container_of(head, struct basic_filter, rcu); 90 struct basic_filter *f = container_of(work, struct basic_filter, work);
88 91
92 rtnl_lock();
89 tcf_exts_destroy(&f->exts); 93 tcf_exts_destroy(&f->exts);
90 tcf_em_tree_destroy(&f->ematches); 94 tcf_em_tree_destroy(&f->ematches);
95 rtnl_unlock();
96
91 kfree(f); 97 kfree(f);
92} 98}
93 99
100static void basic_delete_filter(struct rcu_head *head)
101{
102 struct basic_filter *f = container_of(head, struct basic_filter, rcu);
103
104 INIT_WORK(&f->work, basic_delete_filter_work);
105 tcf_queue_work(&f->work);
106}
107
94static void basic_destroy(struct tcf_proto *tp) 108static void basic_destroy(struct tcf_proto *tp)
95{ 109{
96 struct basic_head *head = rtnl_dereference(tp->root); 110 struct basic_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 520c5027646a..037a3ae86829 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -49,7 +49,10 @@ struct cls_bpf_prog {
49 struct sock_filter *bpf_ops; 49 struct sock_filter *bpf_ops;
50 const char *bpf_name; 50 const char *bpf_name;
51 struct tcf_proto *tp; 51 struct tcf_proto *tp;
52 struct rcu_head rcu; 52 union {
53 struct work_struct work;
54 struct rcu_head rcu;
55 };
53}; 56};
54 57
55static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
257 kfree(prog); 260 kfree(prog);
258} 261}
259 262
263static void cls_bpf_delete_prog_work(struct work_struct *work)
264{
265 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
266
267 rtnl_lock();
268 __cls_bpf_delete_prog(prog);
269 rtnl_unlock();
270}
271
260static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 272static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
261{ 273{
262 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu)); 274 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
275
276 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
277 tcf_queue_work(&prog->work);
263} 278}
264 279
265static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) 280static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d48452f87975..a97e069bee89 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,7 +23,10 @@ struct cls_cgroup_head {
23 struct tcf_exts exts; 23 struct tcf_exts exts;
24 struct tcf_ematch_tree ematches; 24 struct tcf_ematch_tree ematches;
25 struct tcf_proto *tp; 25 struct tcf_proto *tp;
26 struct rcu_head rcu; 26 union {
27 struct work_struct work;
28 struct rcu_head rcu;
29 };
27}; 30};
28 31
29static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 32static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
57 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
58}; 61};
59 62
63static void cls_cgroup_destroy_work(struct work_struct *work)
64{
65 struct cls_cgroup_head *head = container_of(work,
66 struct cls_cgroup_head,
67 work);
68 rtnl_lock();
69 tcf_exts_destroy(&head->exts);
70 tcf_em_tree_destroy(&head->ematches);
71 kfree(head);
72 rtnl_unlock();
73}
74
60static void cls_cgroup_destroy_rcu(struct rcu_head *root) 75static void cls_cgroup_destroy_rcu(struct rcu_head *root)
61{ 76{
62 struct cls_cgroup_head *head = container_of(root, 77 struct cls_cgroup_head *head = container_of(root,
63 struct cls_cgroup_head, 78 struct cls_cgroup_head,
64 rcu); 79 rcu);
65 80
66 tcf_exts_destroy(&head->exts); 81 INIT_WORK(&head->work, cls_cgroup_destroy_work);
67 tcf_em_tree_destroy(&head->ematches); 82 tcf_queue_work(&head->work);
68 kfree(head);
69} 83}
70 84
71static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 85static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2a3a60ec5b86..67f3a2af6aab 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -57,7 +57,10 @@ struct flow_filter {
57 u32 divisor; 57 u32 divisor;
58 u32 baseclass; 58 u32 baseclass;
59 u32 hashrnd; 59 u32 hashrnd;
60 struct rcu_head rcu; 60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
61}; 64};
62 65
63static inline u32 addr_fold(void *addr) 66static inline u32 addr_fold(void *addr)
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
369 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
370}; 373};
371 374
372static void flow_destroy_filter(struct rcu_head *head) 375static void flow_destroy_filter_work(struct work_struct *work)
373{ 376{
374 struct flow_filter *f = container_of(head, struct flow_filter, rcu); 377 struct flow_filter *f = container_of(work, struct flow_filter, work);
375 378
379 rtnl_lock();
376 del_timer_sync(&f->perturb_timer); 380 del_timer_sync(&f->perturb_timer);
377 tcf_exts_destroy(&f->exts); 381 tcf_exts_destroy(&f->exts);
378 tcf_em_tree_destroy(&f->ematches); 382 tcf_em_tree_destroy(&f->ematches);
379 kfree(f); 383 kfree(f);
384 rtnl_unlock();
385}
386
387static void flow_destroy_filter(struct rcu_head *head)
388{
389 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
390
391 INIT_WORK(&f->work, flow_destroy_filter_work);
392 tcf_queue_work(&f->work);
380} 393}
381 394
382static int flow_change(struct net *net, struct sk_buff *in_skb, 395static int flow_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index b480d7c792ba..5b5722c8b32c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -87,7 +87,10 @@ struct cls_fl_filter {
87 struct list_head list; 87 struct list_head list;
88 u32 handle; 88 u32 handle;
89 u32 flags; 89 u32 flags;
90 struct rcu_head rcu; 90 union {
91 struct work_struct work;
92 struct rcu_head rcu;
93 };
91 struct net_device *hw_dev; 94 struct net_device *hw_dev;
92}; 95};
93 96
@@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
215 return 0; 218 return 0;
216} 219}
217 220
218static void fl_destroy_filter(struct rcu_head *head) 221static void fl_destroy_filter_work(struct work_struct *work)
219{ 222{
220 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); 223 struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
221 224
225 rtnl_lock();
222 tcf_exts_destroy(&f->exts); 226 tcf_exts_destroy(&f->exts);
223 kfree(f); 227 kfree(f);
228 rtnl_unlock();
229}
230
231static void fl_destroy_filter(struct rcu_head *head)
232{
233 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
234
235 INIT_WORK(&f->work, fl_destroy_filter_work);
236 tcf_queue_work(&f->work);
224} 237}
225 238
226static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) 239static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 941245ad07fd..99183b8621ec 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -46,7 +46,10 @@ struct fw_filter {
46#endif /* CONFIG_NET_CLS_IND */ 46#endif /* CONFIG_NET_CLS_IND */
47 struct tcf_exts exts; 47 struct tcf_exts exts;
48 struct tcf_proto *tp; 48 struct tcf_proto *tp;
49 struct rcu_head rcu; 49 union {
50 struct work_struct work;
51 struct rcu_head rcu;
52 };
50}; 53};
51 54
52static u32 fw_hash(u32 handle) 55static u32 fw_hash(u32 handle)
@@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
119 return 0; 122 return 0;
120} 123}
121 124
122static void fw_delete_filter(struct rcu_head *head) 125static void fw_delete_filter_work(struct work_struct *work)
123{ 126{
124 struct fw_filter *f = container_of(head, struct fw_filter, rcu); 127 struct fw_filter *f = container_of(work, struct fw_filter, work);
125 128
129 rtnl_lock();
126 tcf_exts_destroy(&f->exts); 130 tcf_exts_destroy(&f->exts);
127 kfree(f); 131 kfree(f);
132 rtnl_unlock();
133}
134
135static void fw_delete_filter(struct rcu_head *head)
136{
137 struct fw_filter *f = container_of(head, struct fw_filter, rcu);
138
139 INIT_WORK(&f->work, fw_delete_filter_work);
140 tcf_queue_work(&f->work);
128} 141}
129 142
130static void fw_destroy(struct tcf_proto *tp) 143static void fw_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index eeac606c95ab..c33f711b9019 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,7 +21,10 @@ struct cls_mall_head {
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 u32 flags; 23 u32 flags;
24 struct rcu_head rcu; 24 union {
25 struct work_struct work;
26 struct rcu_head rcu;
27 };
25}; 28};
26 29
27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, 30static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
41 return 0; 44 return 0;
42} 45}
43 46
47static void mall_destroy_work(struct work_struct *work)
48{
49 struct cls_mall_head *head = container_of(work, struct cls_mall_head,
50 work);
51 rtnl_lock();
52 tcf_exts_destroy(&head->exts);
53 kfree(head);
54 rtnl_unlock();
55}
56
44static void mall_destroy_rcu(struct rcu_head *rcu) 57static void mall_destroy_rcu(struct rcu_head *rcu)
45{ 58{
46 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, 59 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
47 rcu); 60 rcu);
48 61
49 tcf_exts_destroy(&head->exts); 62 INIT_WORK(&head->work, mall_destroy_work);
50 kfree(head); 63 tcf_queue_work(&head->work);
51} 64}
52 65
53static int mall_replace_hw_filter(struct tcf_proto *tp, 66static int mall_replace_hw_filter(struct tcf_proto *tp,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 9ddde65915d2..4b14ccd8b8f2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -57,7 +57,10 @@ struct route4_filter {
57 u32 handle; 57 u32 handle;
58 struct route4_bucket *bkt; 58 struct route4_bucket *bkt;
59 struct tcf_proto *tp; 59 struct tcf_proto *tp;
60 struct rcu_head rcu; 60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
61}; 64};
62 65
63#define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) 66#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
254 return 0; 257 return 0;
255} 258}
256 259
257static void route4_delete_filter(struct rcu_head *head) 260static void route4_delete_filter_work(struct work_struct *work)
258{ 261{
259 struct route4_filter *f = container_of(head, struct route4_filter, rcu); 262 struct route4_filter *f = container_of(work, struct route4_filter, work);
260 263
264 rtnl_lock();
261 tcf_exts_destroy(&f->exts); 265 tcf_exts_destroy(&f->exts);
262 kfree(f); 266 kfree(f);
267 rtnl_unlock();
268}
269
270static void route4_delete_filter(struct rcu_head *head)
271{
272 struct route4_filter *f = container_of(head, struct route4_filter, rcu);
273
274 INIT_WORK(&f->work, route4_delete_filter_work);
275 tcf_queue_work(&f->work);
263} 276}
264 277
265static void route4_destroy(struct tcf_proto *tp) 278static void route4_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b1f6ed48bc72..bdbc541787f8 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -97,7 +97,10 @@ struct rsvp_filter {
97 97
98 u32 handle; 98 u32 handle;
99 struct rsvp_session *sess; 99 struct rsvp_session *sess;
100 struct rcu_head rcu; 100 union {
101 struct work_struct work;
102 struct rcu_head rcu;
103 };
101}; 104};
102 105
103static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) 106static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
282 return -ENOBUFS; 285 return -ENOBUFS;
283} 286}
284 287
285static void rsvp_delete_filter_rcu(struct rcu_head *head) 288static void rsvp_delete_filter_work(struct work_struct *work)
286{ 289{
287 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu); 290 struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
288 291
292 rtnl_lock();
289 tcf_exts_destroy(&f->exts); 293 tcf_exts_destroy(&f->exts);
290 kfree(f); 294 kfree(f);
295 rtnl_unlock();
296}
297
298static void rsvp_delete_filter_rcu(struct rcu_head *head)
299{
300 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
301
302 INIT_WORK(&f->work, rsvp_delete_filter_work);
303 tcf_queue_work(&f->work);
291} 304}
292 305
293static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) 306static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 14a7e08b2fa9..beaa95e09c25 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,14 +27,20 @@
27struct tcindex_filter_result { 27struct tcindex_filter_result {
28 struct tcf_exts exts; 28 struct tcf_exts exts;
29 struct tcf_result res; 29 struct tcf_result res;
30 struct rcu_head rcu; 30 union {
31 struct work_struct work;
32 struct rcu_head rcu;
33 };
31}; 34};
32 35
33struct tcindex_filter { 36struct tcindex_filter {
34 u16 key; 37 u16 key;
35 struct tcindex_filter_result result; 38 struct tcindex_filter_result result;
36 struct tcindex_filter __rcu *next; 39 struct tcindex_filter __rcu *next;
37 struct rcu_head rcu; 40 union {
41 struct work_struct work;
42 struct rcu_head rcu;
43 };
38}; 44};
39 45
40 46
@@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
133 return 0; 139 return 0;
134} 140}
135 141
142static void tcindex_destroy_rexts_work(struct work_struct *work)
143{
144 struct tcindex_filter_result *r;
145
146 r = container_of(work, struct tcindex_filter_result, work);
147 rtnl_lock();
148 tcf_exts_destroy(&r->exts);
149 rtnl_unlock();
150}
151
136static void tcindex_destroy_rexts(struct rcu_head *head) 152static void tcindex_destroy_rexts(struct rcu_head *head)
137{ 153{
138 struct tcindex_filter_result *r; 154 struct tcindex_filter_result *r;
139 155
140 r = container_of(head, struct tcindex_filter_result, rcu); 156 r = container_of(head, struct tcindex_filter_result, rcu);
141 tcf_exts_destroy(&r->exts); 157 INIT_WORK(&r->work, tcindex_destroy_rexts_work);
158 tcf_queue_work(&r->work);
159}
160
161static void tcindex_destroy_fexts_work(struct work_struct *work)
162{
163 struct tcindex_filter *f = container_of(work, struct tcindex_filter,
164 work);
165
166 rtnl_lock();
167 tcf_exts_destroy(&f->result.exts);
168 kfree(f);
169 rtnl_unlock();
142} 170}
143 171
144static void tcindex_destroy_fexts(struct rcu_head *head) 172static void tcindex_destroy_fexts(struct rcu_head *head)
@@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
146 struct tcindex_filter *f = container_of(head, struct tcindex_filter, 174 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
147 rcu); 175 rcu);
148 176
149 tcf_exts_destroy(&f->result.exts); 177 INIT_WORK(&f->work, tcindex_destroy_fexts_work);
150 kfree(f); 178 tcf_queue_work(&f->work);
151} 179}
152 180
153static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) 181static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 10b8d851fc6b..dadd1b344497 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -68,7 +68,10 @@ struct tc_u_knode {
68 u32 __percpu *pcpu_success; 68 u32 __percpu *pcpu_success;
69#endif 69#endif
70 struct tcf_proto *tp; 70 struct tcf_proto *tp;
71 struct rcu_head rcu; 71 union {
72 struct work_struct work;
73 struct rcu_head rcu;
74 };
72 /* The 'sel' field MUST be the last field in structure to allow for 75 /* The 'sel' field MUST be the last field in structure to allow for
73 * tc_u32_keys allocated at end of structure. 76 * tc_u32_keys allocated at end of structure.
74 */ 77 */
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
418 * this the u32_delete_key_rcu variant does not free the percpu 421 * this the u32_delete_key_rcu variant does not free the percpu
419 * statistics. 422 * statistics.
420 */ 423 */
424static void u32_delete_key_work(struct work_struct *work)
425{
426 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
427
428 rtnl_lock();
429 u32_destroy_key(key->tp, key, false);
430 rtnl_unlock();
431}
432
421static void u32_delete_key_rcu(struct rcu_head *rcu) 433static void u32_delete_key_rcu(struct rcu_head *rcu)
422{ 434{
423 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 435 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
424 436
425 u32_destroy_key(key->tp, key, false); 437 INIT_WORK(&key->work, u32_delete_key_work);
438 tcf_queue_work(&key->work);
426} 439}
427 440
428/* u32_delete_key_freepf_rcu is the rcu callback variant 441/* u32_delete_key_freepf_rcu is the rcu callback variant
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
432 * for the variant that should be used with keys return from 445 * for the variant that should be used with keys return from
433 * u32_init_knode() 446 * u32_init_knode()
434 */ 447 */
448static void u32_delete_key_freepf_work(struct work_struct *work)
449{
450 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
451
452 rtnl_lock();
453 u32_destroy_key(key->tp, key, true);
454 rtnl_unlock();
455}
456
435static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) 457static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
436{ 458{
437 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 459 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
438 460
439 u32_destroy_key(key->tp, key, true); 461 INIT_WORK(&key->work, u32_delete_key_freepf_work);
462 tcf_queue_work(&key->work);
440} 463}
441 464
442static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) 465static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c6deb74e3d2f..22bc6fc48311 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{ 301{
302 struct Qdisc *q; 302 struct Qdisc *q;
303 303
304 if (!handle)
305 return NULL;
304 q = qdisc_match_from_root(dev->qdisc, handle); 306 q = qdisc_match_from_root(dev->qdisc, handle);
305 if (q) 307 if (q)
306 goto out; 308 goto out;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 34f10e75f3b9..621b5ca3fd1c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -794,7 +794,7 @@ hit:
794struct sctp_hash_cmp_arg { 794struct sctp_hash_cmp_arg {
795 const union sctp_addr *paddr; 795 const union sctp_addr *paddr;
796 const struct net *net; 796 const struct net *net;
797 u16 lport; 797 __be16 lport;
798}; 798};
799 799
800static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, 800static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -820,37 +820,37 @@ out:
820 return err; 820 return err;
821} 821}
822 822
823static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed) 823static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
824{ 824{
825 const struct sctp_transport *t = data; 825 const struct sctp_transport *t = data;
826 const union sctp_addr *paddr = &t->ipaddr; 826 const union sctp_addr *paddr = &t->ipaddr;
827 const struct net *net = sock_net(t->asoc->base.sk); 827 const struct net *net = sock_net(t->asoc->base.sk);
828 u16 lport = htons(t->asoc->base.bind_addr.port); 828 __be16 lport = htons(t->asoc->base.bind_addr.port);
829 u32 addr; 829 __u32 addr;
830 830
831 if (paddr->sa.sa_family == AF_INET6) 831 if (paddr->sa.sa_family == AF_INET6)
832 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 832 addr = jhash(&paddr->v6.sin6_addr, 16, seed);
833 else 833 else
834 addr = paddr->v4.sin_addr.s_addr; 834 addr = (__force __u32)paddr->v4.sin_addr.s_addr;
835 835
836 return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 836 return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
837 (__force __u32)lport, net_hash_mix(net), seed); 837 (__force __u32)lport, net_hash_mix(net), seed);
838} 838}
839 839
840static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed) 840static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
841{ 841{
842 const struct sctp_hash_cmp_arg *x = data; 842 const struct sctp_hash_cmp_arg *x = data;
843 const union sctp_addr *paddr = x->paddr; 843 const union sctp_addr *paddr = x->paddr;
844 const struct net *net = x->net; 844 const struct net *net = x->net;
845 u16 lport = x->lport; 845 __be16 lport = x->lport;
846 u32 addr; 846 __u32 addr;
847 847
848 if (paddr->sa.sa_family == AF_INET6) 848 if (paddr->sa.sa_family == AF_INET6)
849 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 849 addr = jhash(&paddr->v6.sin6_addr, 16, seed);
850 else 850 else
851 addr = paddr->v4.sin_addr.s_addr; 851 addr = (__force __u32)paddr->v4.sin_addr.s_addr;
852 852
853 return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 853 return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
854 (__force __u32)lport, net_hash_mix(net), seed); 854 (__force __u32)lport, net_hash_mix(net), seed);
855} 855}
856 856
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 51c488769590..a6dfa86c0201 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
738/* Was this packet marked by Explicit Congestion Notification? */ 738/* Was this packet marked by Explicit Congestion Notification? */
739static int sctp_v6_is_ce(const struct sk_buff *skb) 739static int sctp_v6_is_ce(const struct sk_buff *skb)
740{ 740{
741 return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); 741 return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
742} 742}
743 743
744/* Dump the v6 addr to the seq file. */ 744/* Dump the v6 addr to the seq file. */
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
882 net = sock_net(&opt->inet.sk); 882 net = sock_net(&opt->inet.sk);
883 rcu_read_lock(); 883 rcu_read_lock();
884 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); 884 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
885 if (!dev || 885 if (!dev || !(opt->inet.freebind ||
886 !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) { 886 net->ipv6.sysctl.ip_nonlocal_bind ||
887 ipv6_chk_addr(net, &addr->v6.sin6_addr,
888 dev, 0))) {
887 rcu_read_unlock(); 889 rcu_read_unlock();
888 return 0; 890 return 0;
889 } 891 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ca8f196b6c6c..514465b03829 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2854 addr_param_len = af->to_addr_param(addr, &addr_param); 2854 addr_param_len = af->to_addr_param(addr, &addr_param);
2855 param.param_hdr.type = flags; 2855 param.param_hdr.type = flags;
2856 param.param_hdr.length = htons(paramlen + addr_param_len); 2856 param.param_hdr.length = htons(paramlen + addr_param_len);
2857 param.crr_id = i; 2857 param.crr_id = htonl(i);
2858 2858
2859 sctp_addto_chunk(retval, paramlen, &param); 2859 sctp_addto_chunk(retval, paramlen, &param);
2860 sctp_addto_chunk(retval, addr_param_len, &addr_param); 2860 sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2867 addr_param_len = af->to_addr_param(addr, &addr_param); 2867 addr_param_len = af->to_addr_param(addr, &addr_param);
2868 param.param_hdr.type = SCTP_PARAM_DEL_IP; 2868 param.param_hdr.type = SCTP_PARAM_DEL_IP;
2869 param.param_hdr.length = htons(paramlen + addr_param_len); 2869 param.param_hdr.length = htons(paramlen + addr_param_len);
2870 param.crr_id = i; 2870 param.crr_id = htonl(i);
2871 2871
2872 sctp_addto_chunk(retval, paramlen, &param); 2872 sctp_addto_chunk(retval, paramlen, &param);
2873 sctp_addto_chunk(retval, addr_param_len, &addr_param); 2873 sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
3591 */ 3591 */
3592struct sctp_chunk *sctp_make_strreset_req( 3592struct sctp_chunk *sctp_make_strreset_req(
3593 const struct sctp_association *asoc, 3593 const struct sctp_association *asoc,
3594 __u16 stream_num, __u16 *stream_list, 3594 __u16 stream_num, __be16 *stream_list,
3595 bool out, bool in) 3595 bool out, bool in)
3596{ 3596{
3597 struct sctp_strreset_outreq outreq; 3597 struct sctp_strreset_outreq outreq;
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
3788{ 3788{
3789 struct sctp_reconf_chunk *hdr; 3789 struct sctp_reconf_chunk *hdr;
3790 union sctp_params param; 3790 union sctp_params param;
3791 __u16 last = 0, cnt = 0; 3791 __be16 last = 0;
3792 __u16 cnt = 0;
3792 3793
3793 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; 3794 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
3794 sctp_walk_params(param, hdr, params) { 3795 sctp_walk_params(param, hdr, params) {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e6a2974e020e..e2d9a4b49c9c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
1607 break; 1607 break;
1608 1608
1609 case SCTP_CMD_INIT_FAILED: 1609 case SCTP_CMD_INIT_FAILED:
1610 sctp_cmd_init_failed(commands, asoc, cmd->obj.err); 1610 sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
1611 break; 1611 break;
1612 1612
1613 case SCTP_CMD_ASSOC_FAILED: 1613 case SCTP_CMD_ASSOC_FAILED:
1614 sctp_cmd_assoc_failed(commands, asoc, event_type, 1614 sctp_cmd_assoc_failed(commands, asoc, event_type,
1615 subtype, chunk, cmd->obj.err); 1615 subtype, chunk, cmd->obj.u32);
1616 break; 1616 break;
1617 1617
1618 case SCTP_CMD_INIT_COUNTER_INC: 1618 case SCTP_CMD_INIT_COUNTER_INC:
@@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
1680 case SCTP_CMD_PROCESS_CTSN: 1680 case SCTP_CMD_PROCESS_CTSN:
1681 /* Dummy up a SACK for processing. */ 1681 /* Dummy up a SACK for processing. */
1682 sackh.cum_tsn_ack = cmd->obj.be32; 1682 sackh.cum_tsn_ack = cmd->obj.be32;
1683 sackh.a_rwnd = asoc->peer.rwnd + 1683 sackh.a_rwnd = htonl(asoc->peer.rwnd +
1684 asoc->outqueue.outstanding_bytes; 1684 asoc->outqueue.outstanding_bytes);
1685 sackh.num_gap_ack_blocks = 0; 1685 sackh.num_gap_ack_blocks = 0;
1686 sackh.num_dup_tsns = 0; 1686 sackh.num_dup_tsns = 0;
1687 chunk->subh.sack_hdr = &sackh; 1687 chunk->subh.sack_hdr = &sackh;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 17841ab30798..6f45d1713452 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
170 sk_mem_charge(sk, chunk->skb->truesize); 170 sk_mem_charge(sk, chunk->skb->truesize);
171} 171}
172 172
173static void sctp_clear_owner_w(struct sctp_chunk *chunk)
174{
175 skb_orphan(chunk->skb);
176}
177
178static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
179 void (*cb)(struct sctp_chunk *))
180
181{
182 struct sctp_outq *q = &asoc->outqueue;
183 struct sctp_transport *t;
184 struct sctp_chunk *chunk;
185
186 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
187 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
188 cb(chunk);
189
190 list_for_each_entry(chunk, &q->retransmit, list)
191 cb(chunk);
192
193 list_for_each_entry(chunk, &q->sacked, list)
194 cb(chunk);
195
196 list_for_each_entry(chunk, &q->abandoned, list)
197 cb(chunk);
198
199 list_for_each_entry(chunk, &q->out_chunk_list, list)
200 cb(chunk);
201}
202
173/* Verify that this is a valid address. */ 203/* Verify that this is a valid address. */
174static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 204static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
175 int len) 205 int len)
@@ -8212,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
8212 * paths won't try to lock it and then oldsk. 8242 * paths won't try to lock it and then oldsk.
8213 */ 8243 */
8214 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 8244 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
8245 sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
8215 sctp_assoc_migrate(assoc, newsk); 8246 sctp_assoc_migrate(assoc, newsk);
8247 sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
8216 8248
8217 /* If the association on the newsk is already closed before accept() 8249 /* If the association on the newsk is already closed before accept()
8218 * is called, set RCV_SHUTDOWN flag. 8250 * is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 63ea15503714..fa8371ff05c4 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
118 __u16 i, str_nums, *str_list; 118 __u16 i, str_nums, *str_list;
119 struct sctp_chunk *chunk; 119 struct sctp_chunk *chunk;
120 int retval = -EINVAL; 120 int retval = -EINVAL;
121 __be16 *nstr_list;
121 bool out, in; 122 bool out, in;
122 123
123 if (!asoc->peer.reconf_capable || 124 if (!asoc->peer.reconf_capable ||
@@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
148 if (str_list[i] >= stream->incnt) 149 if (str_list[i] >= stream->incnt)
149 goto out; 150 goto out;
150 151
152 nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
153 if (!nstr_list) {
154 retval = -ENOMEM;
155 goto out;
156 }
157
151 for (i = 0; i < str_nums; i++) 158 for (i = 0; i < str_nums; i++)
152 str_list[i] = htons(str_list[i]); 159 nstr_list[i] = htons(str_list[i]);
153 160
154 chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); 161 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
155 162
156 for (i = 0; i < str_nums; i++) 163 kfree(nstr_list);
157 str_list[i] = ntohs(str_list[i]);
158 164
159 if (!chunk) { 165 if (!chunk) {
160 retval = -ENOMEM; 166 retval = -ENOMEM;
@@ -305,7 +311,7 @@ out:
305} 311}
306 312
307static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param( 313static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
308 struct sctp_association *asoc, __u32 resp_seq, 314 struct sctp_association *asoc, __be32 resp_seq,
309 __be16 type) 315 __be16 type)
310{ 316{
311 struct sctp_chunk *chunk = asoc->strreset_chunk; 317 struct sctp_chunk *chunk = asoc->strreset_chunk;
@@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
345{ 351{
346 struct sctp_strreset_outreq *outreq = param.v; 352 struct sctp_strreset_outreq *outreq = param.v;
347 struct sctp_stream *stream = &asoc->stream; 353 struct sctp_stream *stream = &asoc->stream;
348 __u16 i, nums, flags = 0, *str_p = NULL;
349 __u32 result = SCTP_STRRESET_DENIED; 354 __u32 result = SCTP_STRRESET_DENIED;
355 __u16 i, nums, flags = 0;
356 __be16 *str_p = NULL;
350 __u32 request_seq; 357 __u32 request_seq;
351 358
352 request_seq = ntohl(outreq->request_seq); 359 request_seq = ntohl(outreq->request_seq);
@@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
439 struct sctp_stream *stream = &asoc->stream; 446 struct sctp_stream *stream = &asoc->stream;
440 __u32 result = SCTP_STRRESET_DENIED; 447 __u32 result = SCTP_STRRESET_DENIED;
441 struct sctp_chunk *chunk = NULL; 448 struct sctp_chunk *chunk = NULL;
442 __u16 i, nums, *str_p;
443 __u32 request_seq; 449 __u32 request_seq;
450 __u16 i, nums;
451 __be16 *str_p;
444 452
445 request_seq = ntohl(inreq->request_seq); 453 request_seq = ntohl(inreq->request_seq);
446 if (TSN_lt(asoc->strreset_inseq, request_seq) || 454 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
769 777
770 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { 778 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
771 struct sctp_strreset_outreq *outreq; 779 struct sctp_strreset_outreq *outreq;
772 __u16 *str_p; 780 __be16 *str_p;
773 781
774 outreq = (struct sctp_strreset_outreq *)req; 782 outreq = (struct sctp_strreset_outreq *)req;
775 str_p = outreq->list_of_streams; 783 str_p = outreq->list_of_streams;
@@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
794 nums, str_p, GFP_ATOMIC); 802 nums, str_p, GFP_ATOMIC);
795 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { 803 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
796 struct sctp_strreset_inreq *inreq; 804 struct sctp_strreset_inreq *inreq;
797 __u16 *str_p; 805 __be16 *str_p;
798 806
799 /* if the result is performed, it's impossible for inreq */ 807 /* if the result is performed, it's impossible for inreq */
800 if (result == SCTP_STRRESET_PERFORMED) 808 if (result == SCTP_STRRESET_PERFORMED)
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 67abc0194f30..5447228bf1a0 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
847 847
848struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 848struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
849 const struct sctp_association *asoc, __u16 flags, __u16 stream_num, 849 const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
850 __u16 *stream_list, gfp_t gfp) 850 __be16 *stream_list, gfp_t gfp)
851{ 851{
852 struct sctp_stream_reset_event *sreset; 852 struct sctp_stream_reset_event *sreset;
853 struct sctp_ulpevent *event; 853 struct sctp_ulpevent *event;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d4ea46a5f233..c5fda15ba319 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
49{ 49{
50 /* Unrecoverable error in receive */ 50 /* Unrecoverable error in receive */
51 51
52 del_timer(&strp->msg_timer); 52 cancel_delayed_work(&strp->msg_timer_work);
53 53
54 if (strp->stopped) 54 if (strp->stopped)
55 return; 55 return;
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
68static void strp_start_timer(struct strparser *strp, long timeo) 68static void strp_start_timer(struct strparser *strp, long timeo)
69{ 69{
70 if (timeo) 70 if (timeo)
71 mod_timer(&strp->msg_timer, timeo); 71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
72} 72}
73 73
74/* Lower lock held */ 74/* Lower lock held */
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
319 eaten += (cand_len - extra); 319 eaten += (cand_len - extra);
320 320
321 /* Hurray, we have a new message! */ 321 /* Hurray, we have a new message! */
322 del_timer(&strp->msg_timer); 322 cancel_delayed_work(&strp->msg_timer_work);
323 strp->skb_head = NULL; 323 strp->skb_head = NULL;
324 STRP_STATS_INCR(strp->stats.msgs); 324 STRP_STATS_INCR(strp->stats.msgs);
325 325
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
450 do_strp_work(container_of(w, struct strparser, work)); 450 do_strp_work(container_of(w, struct strparser, work));
451} 451}
452 452
453static void strp_msg_timeout(unsigned long arg) 453static void strp_msg_timeout(struct work_struct *w)
454{ 454{
455 struct strparser *strp = (struct strparser *)arg; 455 struct strparser *strp = container_of(w, struct strparser,
456 msg_timer_work.work);
456 457
457 /* Message assembly timed out */ 458 /* Message assembly timed out */
458 STRP_STATS_INCR(strp->stats.msg_timeouts); 459 STRP_STATS_INCR(strp->stats.msg_timeouts);
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
505 strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; 506 strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
506 strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; 507 strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
507 508
508 setup_timer(&strp->msg_timer, strp_msg_timeout, 509 INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
509 (unsigned long)strp);
510
511 INIT_WORK(&strp->work, strp_work); 510 INIT_WORK(&strp->work, strp_work);
512 511
513 return 0; 512 return 0;
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
532{ 531{
533 WARN_ON(!strp->stopped); 532 WARN_ON(!strp->stopped);
534 533
535 del_timer_sync(&strp->msg_timer); 534 cancel_delayed_work_sync(&strp->msg_timer_work);
536 cancel_work_sync(&strp->work); 535 cancel_work_sync(&strp->work);
537 536
538 if (strp->skb_head) { 537 if (strp->skb_head) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e741ec2b4d8e..898485e3ece4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task)
1333 rpc_count_iostats(task, task->tk_client->cl_metrics); 1333 rpc_count_iostats(task, task->tk_client->cl_metrics);
1334 spin_lock(&xprt->recv_lock); 1334 spin_lock(&xprt->recv_lock);
1335 if (!list_empty(&req->rq_list)) { 1335 if (!list_empty(&req->rq_list)) {
1336 list_del(&req->rq_list); 1336 list_del_init(&req->rq_list);
1337 xprt_wait_on_pinned_rqst(req); 1337 xprt_wait_on_pinned_rqst(req);
1338 } 1338 }
1339 spin_unlock(&xprt->recv_lock); 1339 spin_unlock(&xprt->recv_lock);
@@ -1445,6 +1445,23 @@ out:
1445 return xprt; 1445 return xprt;
1446} 1446}
1447 1447
1448static void xprt_destroy_cb(struct work_struct *work)
1449{
1450 struct rpc_xprt *xprt =
1451 container_of(work, struct rpc_xprt, task_cleanup);
1452
1453 rpc_xprt_debugfs_unregister(xprt);
1454 rpc_destroy_wait_queue(&xprt->binding);
1455 rpc_destroy_wait_queue(&xprt->pending);
1456 rpc_destroy_wait_queue(&xprt->sending);
1457 rpc_destroy_wait_queue(&xprt->backlog);
1458 kfree(xprt->servername);
1459 /*
1460 * Tear down transport state and free the rpc_xprt
1461 */
1462 xprt->ops->destroy(xprt);
1463}
1464
1448/** 1465/**
1449 * xprt_destroy - destroy an RPC transport, killing off all requests. 1466 * xprt_destroy - destroy an RPC transport, killing off all requests.
1450 * @xprt: transport to destroy 1467 * @xprt: transport to destroy
@@ -1454,22 +1471,19 @@ static void xprt_destroy(struct rpc_xprt *xprt)
1454{ 1471{
1455 dprintk("RPC: destroying transport %p\n", xprt); 1472 dprintk("RPC: destroying transport %p\n", xprt);
1456 1473
1457 /* Exclude transport connect/disconnect handlers */ 1474 /*
1475 * Exclude transport connect/disconnect handlers and autoclose
1476 */
1458 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 1477 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1459 1478
1460 del_timer_sync(&xprt->timer); 1479 del_timer_sync(&xprt->timer);
1461 1480
1462 rpc_xprt_debugfs_unregister(xprt);
1463 rpc_destroy_wait_queue(&xprt->binding);
1464 rpc_destroy_wait_queue(&xprt->pending);
1465 rpc_destroy_wait_queue(&xprt->sending);
1466 rpc_destroy_wait_queue(&xprt->backlog);
1467 cancel_work_sync(&xprt->task_cleanup);
1468 kfree(xprt->servername);
1469 /* 1481 /*
1470 * Tear down transport state and free the rpc_xprt 1482 * Destroy sockets etc from the system workqueue so they can
1483 * safely flush receive work running on rpciod.
1471 */ 1484 */
1472 xprt->ops->destroy(xprt); 1485 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1486 schedule_work(&xprt->task_cleanup);
1473} 1487}
1474 1488
1475static void xprt_destroy_kref(struct kref *kref) 1489static void xprt_destroy_kref(struct kref *kref)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
257 err = -ENOENT; 257 err = -ENOENT;
258 if (sk == NULL) 258 if (sk == NULL)
259 goto out_nosk; 259 goto out_nosk;
260 if (!net_eq(sock_net(sk), net))
261 goto out;
260 262
261 err = sock_diag_check_cookie(sk, req->udiag_cookie); 263 err = sock_diag_check_cookie(sk, req->udiag_cookie);
262 if (err) 264 if (err)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 37cea4d98879..3dd05a08c60a 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -523,11 +523,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
523 return -EOPNOTSUPP; 523 return -EOPNOTSUPP;
524 524
525 if (wdev->current_bss) { 525 if (wdev->current_bss) {
526 if (!prev_bssid)
527 return -EALREADY;
528 if (prev_bssid &&
529 !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
530 return -ENOTCONN;
531 cfg80211_unhold_bss(wdev->current_bss); 526 cfg80211_unhold_bss(wdev->current_bss);
532 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); 527 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
533 wdev->current_bss = NULL; 528 wdev->current_bss = NULL;
@@ -1064,11 +1059,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1064 1059
1065 ASSERT_WDEV_LOCK(wdev); 1060 ASSERT_WDEV_LOCK(wdev);
1066 1061
1067 if (WARN_ON(wdev->connect_keys)) { 1062 /*
1068 kzfree(wdev->connect_keys); 1063 * If we have an ssid_len, we're trying to connect or are
1069 wdev->connect_keys = NULL; 1064 * already connected, so reject a new SSID unless it's the
1065 * same (which is the case for re-association.)
1066 */
1067 if (wdev->ssid_len &&
1068 (wdev->ssid_len != connect->ssid_len ||
1069 memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
1070 return -EALREADY;
1071
1072 /*
1073 * If connected, reject (re-)association unless prev_bssid
1074 * matches the current BSSID.
1075 */
1076 if (wdev->current_bss) {
1077 if (!prev_bssid)
1078 return -EALREADY;
1079 if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
1080 return -ENOTCONN;
1070 } 1081 }
1071 1082
1083 /*
1084 * Reject if we're in the process of connecting with WEP,
1085 * this case isn't very interesting and trying to handle
1086 * it would make the code much more complex.
1087 */
1088 if (wdev->connect_keys)
1089 return -EINPROGRESS;
1090
1072 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, 1091 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
1073 rdev->wiphy.ht_capa_mod_mask); 1092 rdev->wiphy.ht_capa_mod_mask);
1074 1093
@@ -1119,7 +1138,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1119 1138
1120 if (err) { 1139 if (err) {
1121 wdev->connect_keys = NULL; 1140 wdev->connect_keys = NULL;
1122 wdev->ssid_len = 0; 1141 /*
1142 * This could be reassoc getting refused, don't clear
1143 * ssid_len in that case.
1144 */
1145 if (!wdev->current_bss)
1146 wdev->ssid_len = 0;
1123 return err; 1147 return err;
1124 } 1148 }
1125 1149
@@ -1146,6 +1170,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
1146 else if (wdev->ssid_len) 1170 else if (wdev->ssid_len)
1147 err = rdev_disconnect(rdev, dev, reason); 1171 err = rdev_disconnect(rdev, dev, reason);
1148 1172
1173 /*
1174 * Clear ssid_len unless we actually were fully connected,
1175 * in which case cfg80211_disconnected() will take care of
1176 * this later.
1177 */
1178 if (!wdev->current_bss)
1179 wdev->ssid_len = 0;
1180
1149 return err; 1181 return err;
1150} 1182}
1151 1183
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 31a2e6d34dba..73ad8c8ef344 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -105,6 +105,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
105 if (xfrm_offload(skb)) { 105 if (xfrm_offload(skb)) {
106 x->type_offload->encap(x, skb); 106 x->type_offload->encap(x, skb);
107 } else { 107 } else {
108 /* Inner headers are invalid now. */
109 skb->encapsulation = 0;
110
108 err = x->type->output(x, skb); 111 err = x->type->output(x, skb);
109 if (err == -EINPROGRESS) 112 if (err == -EINPROGRESS)
110 goto out; 113 goto out;
@@ -208,7 +211,6 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
208 int err; 211 int err;
209 212
210 secpath_reset(skb); 213 secpath_reset(skb);
211 skb->encapsulation = 0;
212 214
213 if (xfrm_dev_offload_ok(skb, x)) { 215 if (xfrm_dev_offload_ok(skb, x)) {
214 struct sec_path *sp; 216 struct sec_path *sp;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f06253969972..8cafb3c0a4ac 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1573 goto put_states; 1573 goto put_states;
1574 } 1574 }
1575 1575
1576 if (!dst_prev)
1577 dst0 = dst1;
1578 else
1579 /* Ref count is taken during xfrm_alloc_dst()
1580 * No need to do dst_clone() on dst1
1581 */
1582 dst_prev->child = dst1;
1583
1576 if (xfrm[i]->sel.family == AF_UNSPEC) { 1584 if (xfrm[i]->sel.family == AF_UNSPEC) {
1577 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1585 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1578 xfrm_af2proto(family)); 1586 xfrm_af2proto(family));
@@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1584 } else 1592 } else
1585 inner_mode = xfrm[i]->inner_mode; 1593 inner_mode = xfrm[i]->inner_mode;
1586 1594
1587 if (!dst_prev)
1588 dst0 = dst1;
1589 else
1590 /* Ref count is taken during xfrm_alloc_dst()
1591 * No need to do dst_clone() on dst1
1592 */
1593 dst_prev->child = dst1;
1594
1595 xdst->route = dst; 1595 xdst->route = dst;
1596 dst_copy_metrics(dst1, dst); 1596 dst_copy_metrics(dst1, dst);
1597 1597
@@ -2076,7 +2076,6 @@ make_dummy_bundle:
2076 xdst->num_xfrms = num_xfrms; 2076 xdst->num_xfrms = num_xfrms;
2077 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2077 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2078 2078
2079 dst_hold(&xdst->u.dst);
2080 return xdst; 2079 return xdst;
2081 2080
2082inc_error: 2081inc_error:
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 12213477cd3a..1f5cee2269af 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2069,6 +2069,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2069 if (err >= 0) { 2069 if (err >= 0) {
2070 xfrm_sk_policy_insert(sk, err, pol); 2070 xfrm_sk_policy_insert(sk, err, pol);
2071 xfrm_pol_put(pol); 2071 xfrm_pol_put(pol);
2072 __sk_dst_reset(sk);
2072 err = 0; 2073 err = 0;
2073 } 2074 }
2074 2075
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b997f1395357..e44a0fed48dd 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1693,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1693 1693
1694static int xfrm_dump_policy_done(struct netlink_callback *cb) 1694static int xfrm_dump_policy_done(struct netlink_callback *cb)
1695{ 1695{
1696 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1696 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1697 struct net *net = sock_net(cb->skb->sk); 1697 struct net *net = sock_net(cb->skb->sk);
1698 1698
1699 xfrm_policy_walk_done(walk, net); 1699 xfrm_policy_walk_done(walk, net);
1700 return 0; 1700 return 0;
1701} 1701}
1702 1702
1703static int xfrm_dump_policy_start(struct netlink_callback *cb)
1704{
1705 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1706
1707 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
1708
1709 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1710 return 0;
1711}
1712
1703static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1713static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1704{ 1714{
1705 struct net *net = sock_net(skb->sk); 1715 struct net *net = sock_net(skb->sk);
1706 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1716 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1707 struct xfrm_dump_info info; 1717 struct xfrm_dump_info info;
1708 1718
1709 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1710 sizeof(cb->args) - sizeof(cb->args[0]));
1711
1712 info.in_skb = cb->skb; 1719 info.in_skb = cb->skb;
1713 info.out_skb = skb; 1720 info.out_skb = skb;
1714 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1721 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1715 info.nlmsg_flags = NLM_F_MULTI; 1722 info.nlmsg_flags = NLM_F_MULTI;
1716 1723
1717 if (!cb->args[0]) {
1718 cb->args[0] = 1;
1719 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1720 }
1721
1722 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1724 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1723 1725
1724 return skb->len; 1726 return skb->len;
@@ -2474,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
2474 2476
2475static const struct xfrm_link { 2477static const struct xfrm_link {
2476 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2478 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2479 int (*start)(struct netlink_callback *);
2477 int (*dump)(struct sk_buff *, struct netlink_callback *); 2480 int (*dump)(struct sk_buff *, struct netlink_callback *);
2478 int (*done)(struct netlink_callback *); 2481 int (*done)(struct netlink_callback *);
2479 const struct nla_policy *nla_pol; 2482 const struct nla_policy *nla_pol;
@@ -2487,6 +2490,7 @@ static const struct xfrm_link {
2487 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2490 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2488 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2491 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2489 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2492 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2493 .start = xfrm_dump_policy_start,
2490 .dump = xfrm_dump_policy, 2494 .dump = xfrm_dump_policy,
2491 .done = xfrm_dump_policy_done }, 2495 .done = xfrm_dump_policy_done },
2492 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2496 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2539,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
2539 2543
2540 { 2544 {
2541 struct netlink_dump_control c = { 2545 struct netlink_dump_control c = {
2546 .start = link->start,
2542 .dump = link->dump, 2547 .dump = link->dump,
2543 .done = link->done, 2548 .done = link->done,
2544 }; 2549 };
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 446beb7ac48d..5522692100ba 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,7 +78,7 @@ static int simple_thread_fn(void *arg)
78} 78}
79 79
80static DEFINE_MUTEX(thread_mutex); 80static DEFINE_MUTEX(thread_mutex);
81static bool simple_thread_cnt; 81static int simple_thread_cnt;
82 82
83int foo_bar_reg(void) 83int foo_bar_reg(void)
84{ 84{
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index b8278b51cd9a..991db7d6e4df 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -98,7 +98,6 @@ vmlinux.o: FORCE
98 $(call cmd,kernel-mod) 98 $(call cmd,kernel-mod)
99 99
100# Declare generated files as targets for modpost 100# Declare generated files as targets for modpost
101$(symverfile): __modpost ;
102$(modules:.ko=.mod.c): __modpost ; 101$(modules:.ko=.mod.c): __modpost ;
103 102
104 103
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index d5b291e94264..9cdec70d72b8 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,5 @@
1# 1#
2# Generated include files 2# Generated include files
3# 3#
4net_names.h
5capability_names.h 4capability_names.h
6rlim_names.h 5rlim_names.h
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index e7ff2183532a..9a6b4033d52b 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -5,44 +5,11 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
5 5
6apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ 6apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
7 path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ 7 path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
8 resource.o secid.o file.o policy_ns.o label.o mount.o net.o 8 resource.o secid.o file.o policy_ns.o label.o mount.o
9apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o 9apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
10 10
11clean-files := capability_names.h rlim_names.h net_names.h 11clean-files := capability_names.h rlim_names.h
12 12
13# Build a lower case string table of address family names
14# Transform lines from
15# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
16# #define AF_INET 2 /* Internet IP Protocol */
17# to
18# [1] = "local",
19# [2] = "inet",
20#
21# and build the securityfs entries for the mapping.
22# Transforms lines from
23# #define AF_INET 2 /* Internet IP Protocol */
24# to
25# #define AA_SFS_AF_MASK "local inet"
26quiet_cmd_make-af = GEN $@
27cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
28 sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
29 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
30 echo "};" >> $@ ;\
31 printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
32 sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
33 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
34 $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
35
36# Build a lower case string table of sock type names
37# Transform lines from
38# SOCK_STREAM = 1,
39# to
40# [1] = "stream",
41quiet_cmd_make-sock = GEN $@
42cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
43 sed $^ >>$@ -r -n \
44 -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
45 echo "};" >> $@
46 13
47# Build a lower case string table of capability names 14# Build a lower case string table of capability names
48# Transforms lines from 15# Transforms lines from
@@ -95,7 +62,6 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
95 tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ 62 tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
96 63
97$(obj)/capability.o : $(obj)/capability_names.h 64$(obj)/capability.o : $(obj)/capability_names.h
98$(obj)/net.o : $(obj)/net_names.h
99$(obj)/resource.o : $(obj)/rlim_names.h 65$(obj)/resource.o : $(obj)/rlim_names.h
100$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ 66$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
101 $(src)/Makefile 67 $(src)/Makefile
@@ -103,8 +69,3 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
103$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ 69$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
104 $(src)/Makefile 70 $(src)/Makefile
105 $(call cmd,make-rlim) 71 $(call cmd,make-rlim)
106$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
107 $(srctree)/include/linux/net.h \
108 $(src)/Makefile
109 $(call cmd,make-af)
110 $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 518d5928661b..caaf51dda648 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2202,7 +2202,6 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
2202 AA_SFS_DIR("policy", aa_sfs_entry_policy), 2202 AA_SFS_DIR("policy", aa_sfs_entry_policy),
2203 AA_SFS_DIR("domain", aa_sfs_entry_domain), 2203 AA_SFS_DIR("domain", aa_sfs_entry_domain),
2204 AA_SFS_DIR("file", aa_sfs_entry_file), 2204 AA_SFS_DIR("file", aa_sfs_entry_file),
2205 AA_SFS_DIR("network", aa_sfs_entry_network),
2206 AA_SFS_DIR("mount", aa_sfs_entry_mount), 2205 AA_SFS_DIR("mount", aa_sfs_entry_mount),
2207 AA_SFS_DIR("namespaces", aa_sfs_entry_ns), 2206 AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
2208 AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), 2207 AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index db80221891c6..3382518b87fa 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -21,7 +21,6 @@
21#include "include/context.h" 21#include "include/context.h"
22#include "include/file.h" 22#include "include/file.h"
23#include "include/match.h" 23#include "include/match.h"
24#include "include/net.h"
25#include "include/path.h" 24#include "include/path.h"
26#include "include/policy.h" 25#include "include/policy.h"
27#include "include/label.h" 26#include "include/label.h"
@@ -567,32 +566,6 @@ static int __file_path_perm(const char *op, struct aa_label *label,
567 return error; 566 return error;
568} 567}
569 568
570static int __file_sock_perm(const char *op, struct aa_label *label,
571 struct aa_label *flabel, struct file *file,
572 u32 request, u32 denied)
573{
574 struct socket *sock = (struct socket *) file->private_data;
575 int error;
576
577 AA_BUG(!sock);
578
579 /* revalidation due to label out of date. No revocation at this time */
580 if (!denied && aa_label_is_subset(flabel, label))
581 return 0;
582
583 /* TODO: improve to skip profiles cached in flabel */
584 error = aa_sock_file_perm(label, op, request, sock);
585 if (denied) {
586 /* TODO: improve to skip profiles checked above */
587 /* check every profile in file label to is cached */
588 last_error(error, aa_sock_file_perm(flabel, op, request, sock));
589 }
590 if (!error)
591 update_file_ctx(file_ctx(file), label, request);
592
593 return error;
594}
595
596/** 569/**
597 * aa_file_perm - do permission revalidation check & audit for @file 570 * aa_file_perm - do permission revalidation check & audit for @file
598 * @op: operation being checked 571 * @op: operation being checked
@@ -637,9 +610,6 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
637 error = __file_path_perm(op, label, flabel, file, request, 610 error = __file_path_perm(op, label, flabel, file, request,
638 denied); 611 denied);
639 612
640 else if (S_ISSOCK(file_inode(file)->i_mode))
641 error = __file_sock_perm(op, label, flabel, file, request,
642 denied);
643done: 613done:
644 rcu_read_unlock(); 614 rcu_read_unlock();
645 615
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index ff4316e1068d..620e81169659 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -121,29 +121,21 @@ struct apparmor_audit_data {
121 /* these entries require a custom callback fn */ 121 /* these entries require a custom callback fn */
122 struct { 122 struct {
123 struct aa_label *peer; 123 struct aa_label *peer;
124 union { 124 struct {
125 struct { 125 const char *target;
126 kuid_t ouid; 126 kuid_t ouid;
127 const char *target; 127 } fs;
128 } fs;
129 struct {
130 int type, protocol;
131 struct sock *peer_sk;
132 void *addr;
133 int addrlen;
134 } net;
135 int signal;
136 struct {
137 int rlim;
138 unsigned long max;
139 } rlim;
140 };
141 }; 128 };
142 struct { 129 struct {
143 struct aa_profile *profile; 130 struct aa_profile *profile;
144 const char *ns; 131 const char *ns;
145 long pos; 132 long pos;
146 } iface; 133 } iface;
134 int signal;
135 struct {
136 int rlim;
137 unsigned long max;
138 } rlim;
147 struct { 139 struct {
148 const char *src_name; 140 const char *src_name;
149 const char *type; 141 const char *type;
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
deleted file mode 100644
index 140c8efcf364..000000000000
--- a/security/apparmor/include/net.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * AppArmor security module
3 *
4 * This file contains AppArmor network mediation definitions.
5 *
6 * Copyright (C) 1998-2008 Novell/SUSE
7 * Copyright 2009-2017 Canonical Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
13 */
14
15#ifndef __AA_NET_H
16#define __AA_NET_H
17
18#include <net/sock.h>
19#include <linux/path.h>
20
21#include "apparmorfs.h"
22#include "label.h"
23#include "perms.h"
24#include "policy.h"
25
26#define AA_MAY_SEND AA_MAY_WRITE
27#define AA_MAY_RECEIVE AA_MAY_READ
28
29#define AA_MAY_SHUTDOWN AA_MAY_DELETE
30
31#define AA_MAY_CONNECT AA_MAY_OPEN
32#define AA_MAY_ACCEPT 0x00100000
33
34#define AA_MAY_BIND 0x00200000
35#define AA_MAY_LISTEN 0x00400000
36
37#define AA_MAY_SETOPT 0x01000000
38#define AA_MAY_GETOPT 0x02000000
39
40#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
41 AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
42 AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
43 AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
44
45#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
46 AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
47 AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
48 AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
49 AA_MAY_MPROT)
50
51#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
52 AA_MAY_ACCEPT)
53struct aa_sk_ctx {
54 struct aa_label *label;
55 struct aa_label *peer;
56 struct path path;
57};
58
59#define SK_CTX(X) ((X)->sk_security)
60#define SOCK_ctx(X) SOCK_INODE(X)->i_security
61#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
62 struct lsm_network_audit NAME ## _net = { .sk = (SK), \
63 .family = (F)}; \
64 DEFINE_AUDIT_DATA(NAME, \
65 ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
66 LSM_AUDIT_DATA_NONE, \
67 OP); \
68 NAME.u.net = &(NAME ## _net); \
69 aad(&NAME)->net.type = (T); \
70 aad(&NAME)->net.protocol = (P)
71
72#define DEFINE_AUDIT_SK(NAME, OP, SK) \
73 DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
74 (SK)->sk_protocol)
75
76/* struct aa_net - network confinement data
77 * @allow: basic network families permissions
78 * @audit: which network permissions to force audit
79 * @quiet: which network permissions to quiet rejects
80 */
81struct aa_net {
82 u16 allow[AF_MAX];
83 u16 audit[AF_MAX];
84 u16 quiet[AF_MAX];
85};
86
87
88extern struct aa_sfs_entry aa_sfs_entry_network[];
89
90void audit_net_cb(struct audit_buffer *ab, void *va);
91int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
92 u32 request, u16 family, int type);
93int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
94 int type, int protocol);
95static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
96 struct common_audit_data *sa,
97 u32 request,
98 struct sock *sk)
99{
100 return aa_profile_af_perm(profile, sa, request, sk->sk_family,
101 sk->sk_type);
102}
103int aa_sk_perm(const char *op, u32 request, struct sock *sk);
104
105int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
106 struct socket *sock);
107
108
109static inline void aa_free_net_rules(struct aa_net *new)
110{
111 /* NOP */
112}
113
114#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index af04d5a7d73d..2b27bb79aec4 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -135,10 +135,9 @@ extern struct aa_perms allperms;
135 135
136 136
137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); 137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
138void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, 138void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
139 u32 mask);
140void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, 139void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
141 u32 chrsmask, const char * const *names, u32 namesmask); 140 u32 chrsmask, const char **names, u32 namesmask);
142void aa_apply_modes_to_perms(struct aa_profile *profile, 141void aa_apply_modes_to_perms(struct aa_profile *profile,
143 struct aa_perms *perms); 142 struct aa_perms *perms);
144void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, 143void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 4364088a0b9e..17fe41a9cac3 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -30,7 +30,6 @@
30#include "file.h" 30#include "file.h"
31#include "lib.h" 31#include "lib.h"
32#include "label.h" 32#include "label.h"
33#include "net.h"
34#include "perms.h" 33#include "perms.h"
35#include "resource.h" 34#include "resource.h"
36 35
@@ -112,7 +111,6 @@ struct aa_data {
112 * @policy: general match rules governing policy 111 * @policy: general match rules governing policy
113 * @file: The set of rules governing basic file access and domain transitions 112 * @file: The set of rules governing basic file access and domain transitions
114 * @caps: capabilities for the profile 113 * @caps: capabilities for the profile
115 * @net: network controls for the profile
116 * @rlimits: rlimits for the profile 114 * @rlimits: rlimits for the profile
117 * 115 *
118 * @dents: dentries for the profiles file entries in apparmorfs 116 * @dents: dentries for the profiles file entries in apparmorfs
@@ -150,7 +148,6 @@ struct aa_profile {
150 struct aa_policydb policy; 148 struct aa_policydb policy;
151 struct aa_file_rules file; 149 struct aa_file_rules file;
152 struct aa_caps caps; 150 struct aa_caps caps;
153 struct aa_net net;
154 struct aa_rlimit rlimits; 151 struct aa_rlimit rlimits;
155 152
156 struct aa_loaddata *rawdata; 153 struct aa_loaddata *rawdata;
@@ -223,16 +220,6 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
223 return 0; 220 return 0;
224} 221}
225 222
226static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
227 u16 AF) {
228 unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
229 u16 be_af = cpu_to_be16(AF);
230
231 if (!state)
232 return 0;
233 return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
234}
235
236/** 223/**
237 * aa_get_profile - increment refcount on profile @p 224 * aa_get_profile - increment refcount on profile @p
238 * @p: profile (MAYBE NULL) 225 * @p: profile (MAYBE NULL)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 8818621b5d95..08ca26bcca77 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -211,8 +211,7 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
211 *str = '\0'; 211 *str = '\0';
212} 212}
213 213
214void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, 214void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
215 u32 mask)
216{ 215{
217 const char *fmt = "%s"; 216 const char *fmt = "%s";
218 unsigned int i, perm = 1; 217 unsigned int i, perm = 1;
@@ -230,7 +229,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
230} 229}
231 230
232void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, 231void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
233 u32 chrsmask, const char * const *names, u32 namesmask) 232 u32 chrsmask, const char **names, u32 namesmask)
234{ 233{
235 char str[33]; 234 char str[33];
236 235
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 72b915dfcaf7..1346ee5be04f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -33,7 +33,6 @@
33#include "include/context.h" 33#include "include/context.h"
34#include "include/file.h" 34#include "include/file.h"
35#include "include/ipc.h" 35#include "include/ipc.h"
36#include "include/net.h"
37#include "include/path.h" 36#include "include/path.h"
38#include "include/label.h" 37#include "include/label.h"
39#include "include/policy.h" 38#include "include/policy.h"
@@ -737,368 +736,6 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
737 return error; 736 return error;
738} 737}
739 738
740/**
741 * apparmor_sk_alloc_security - allocate and attach the sk_security field
742 */
743static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
744{
745 struct aa_sk_ctx *ctx;
746
747 ctx = kzalloc(sizeof(*ctx), flags);
748 if (!ctx)
749 return -ENOMEM;
750
751 SK_CTX(sk) = ctx;
752
753 return 0;
754}
755
756/**
757 * apparmor_sk_free_security - free the sk_security field
758 */
759static void apparmor_sk_free_security(struct sock *sk)
760{
761 struct aa_sk_ctx *ctx = SK_CTX(sk);
762
763 SK_CTX(sk) = NULL;
764 aa_put_label(ctx->label);
765 aa_put_label(ctx->peer);
766 path_put(&ctx->path);
767 kfree(ctx);
768}
769
770/**
771 * apparmor_clone_security - clone the sk_security field
772 */
773static void apparmor_sk_clone_security(const struct sock *sk,
774 struct sock *newsk)
775{
776 struct aa_sk_ctx *ctx = SK_CTX(sk);
777 struct aa_sk_ctx *new = SK_CTX(newsk);
778
779 new->label = aa_get_label(ctx->label);
780 new->peer = aa_get_label(ctx->peer);
781 new->path = ctx->path;
782 path_get(&new->path);
783}
784
785static int aa_sock_create_perm(struct aa_label *label, int family, int type,
786 int protocol)
787{
788 AA_BUG(!label);
789 AA_BUG(in_interrupt());
790
791 return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type,
792 protocol);
793}
794
795
796/**
797 * apparmor_socket_create - check perms before creating a new socket
798 */
799static int apparmor_socket_create(int family, int type, int protocol, int kern)
800{
801 struct aa_label *label;
802 int error = 0;
803
804 label = begin_current_label_crit_section();
805 if (!(kern || unconfined(label)))
806 error = aa_sock_create_perm(label, family, type, protocol);
807 end_current_label_crit_section(label);
808
809 return error;
810}
811
812/**
813 * apparmor_socket_post_create - setup the per-socket security struct
814 *
815 * Note:
816 * - kernel sockets currently labeled unconfined but we may want to
817 * move to a special kernel label
818 * - socket may not have sk here if created with sock_create_lite or
819 * sock_alloc. These should be accept cases which will be handled in
820 * sock_graft.
821 */
822static int apparmor_socket_post_create(struct socket *sock, int family,
823 int type, int protocol, int kern)
824{
825 struct aa_label *label;
826
827 if (kern) {
828 struct aa_ns *ns = aa_get_current_ns();
829
830 label = aa_get_label(ns_unconfined(ns));
831 aa_put_ns(ns);
832 } else
833 label = aa_get_current_label();
834
835 if (sock->sk) {
836 struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
837
838 aa_put_label(ctx->label);
839 ctx->label = aa_get_label(label);
840 }
841 aa_put_label(label);
842
843 return 0;
844}
845
846/**
847 * apparmor_socket_bind - check perms before bind addr to socket
848 */
849static int apparmor_socket_bind(struct socket *sock,
850 struct sockaddr *address, int addrlen)
851{
852 AA_BUG(!sock);
853 AA_BUG(!sock->sk);
854 AA_BUG(!address);
855 AA_BUG(in_interrupt());
856
857 return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
858}
859
860/**
861 * apparmor_socket_connect - check perms before connecting @sock to @address
862 */
863static int apparmor_socket_connect(struct socket *sock,
864 struct sockaddr *address, int addrlen)
865{
866 AA_BUG(!sock);
867 AA_BUG(!sock->sk);
868 AA_BUG(!address);
869 AA_BUG(in_interrupt());
870
871 return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
872}
873
874/**
875 * apparmor_socket_list - check perms before allowing listen
876 */
877static int apparmor_socket_listen(struct socket *sock, int backlog)
878{
879 AA_BUG(!sock);
880 AA_BUG(!sock->sk);
881 AA_BUG(in_interrupt());
882
883 return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
884}
885
886/**
887 * apparmor_socket_accept - check perms before accepting a new connection.
888 *
889 * Note: while @newsock is created and has some information, the accept
890 * has not been done.
891 */
892static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
893{
894 AA_BUG(!sock);
895 AA_BUG(!sock->sk);
896 AA_BUG(!newsock);
897 AA_BUG(in_interrupt());
898
899 return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
900}
901
902static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
903 struct msghdr *msg, int size)
904{
905 AA_BUG(!sock);
906 AA_BUG(!sock->sk);
907 AA_BUG(!msg);
908 AA_BUG(in_interrupt());
909
910 return aa_sk_perm(op, request, sock->sk);
911}
912
913/**
914 * apparmor_socket_sendmsg - check perms before sending msg to another socket
915 */
916static int apparmor_socket_sendmsg(struct socket *sock,
917 struct msghdr *msg, int size)
918{
919 return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
920}
921
922/**
923 * apparmor_socket_recvmsg - check perms before receiving a message
924 */
925static int apparmor_socket_recvmsg(struct socket *sock,
926 struct msghdr *msg, int size, int flags)
927{
928 return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
929}
930
931/* revaliation, get/set attr, shutdown */
932static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
933{
934 AA_BUG(!sock);
935 AA_BUG(!sock->sk);
936 AA_BUG(in_interrupt());
937
938 return aa_sk_perm(op, request, sock->sk);
939}
940
941/**
942 * apparmor_socket_getsockname - check perms before getting the local address
943 */
944static int apparmor_socket_getsockname(struct socket *sock)
945{
946 return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
947}
948
949/**
950 * apparmor_socket_getpeername - check perms before getting remote address
951 */
952static int apparmor_socket_getpeername(struct socket *sock)
953{
954 return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
955}
956
957/* revaliation, get/set attr, opt */
958static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
959 int level, int optname)
960{
961 AA_BUG(!sock);
962 AA_BUG(!sock->sk);
963 AA_BUG(in_interrupt());
964
965 return aa_sk_perm(op, request, sock->sk);
966}
967
968/**
969 * apparmor_getsockopt - check perms before getting socket options
970 */
971static int apparmor_socket_getsockopt(struct socket *sock, int level,
972 int optname)
973{
974 return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
975 level, optname);
976}
977
978/**
979 * apparmor_setsockopt - check perms before setting socket options
980 */
981static int apparmor_socket_setsockopt(struct socket *sock, int level,
982 int optname)
983{
984 return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
985 level, optname);
986}
987
988/**
989 * apparmor_socket_shutdown - check perms before shutting down @sock conn
990 */
991static int apparmor_socket_shutdown(struct socket *sock, int how)
992{
993 return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
994}
995
996/**
997 * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
998 *
999 * Note: can not sleep may be called with locks held
1000 *
1001 * dont want protocol specific in __skb_recv_datagram()
1002 * to deny an incoming connection socket_sock_rcv_skb()
1003 */
1004static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
1005{
1006 return 0;
1007}
1008
1009
1010static struct aa_label *sk_peer_label(struct sock *sk)
1011{
1012 struct aa_sk_ctx *ctx = SK_CTX(sk);
1013
1014 if (ctx->peer)
1015 return ctx->peer;
1016
1017 return ERR_PTR(-ENOPROTOOPT);
1018}
1019
1020/**
1021 * apparmor_socket_getpeersec_stream - get security context of peer
1022 *
1023 * Note: for tcp only valid if using ipsec or cipso on lan
1024 */
1025static int apparmor_socket_getpeersec_stream(struct socket *sock,
1026 char __user *optval,
1027 int __user *optlen,
1028 unsigned int len)
1029{
1030 char *name;
1031 int slen, error = 0;
1032 struct aa_label *label;
1033 struct aa_label *peer;
1034
1035 label = begin_current_label_crit_section();
1036 peer = sk_peer_label(sock->sk);
1037 if (IS_ERR(peer)) {
1038 error = PTR_ERR(peer);
1039 goto done;
1040 }
1041 slen = aa_label_asxprint(&name, labels_ns(label), peer,
1042 FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
1043 FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
1044 /* don't include terminating \0 in slen, it breaks some apps */
1045 if (slen < 0) {
1046 error = -ENOMEM;
1047 } else {
1048 if (slen > len) {
1049 error = -ERANGE;
1050 } else if (copy_to_user(optval, name, slen)) {
1051 error = -EFAULT;
1052 goto out;
1053 }
1054 if (put_user(slen, optlen))
1055 error = -EFAULT;
1056out:
1057 kfree(name);
1058
1059 }
1060
1061done:
1062 end_current_label_crit_section(label);
1063
1064 return error;
1065}
1066
1067/**
1068 * apparmor_socket_getpeersec_dgram - get security label of packet
1069 * @sock: the peer socket
1070 * @skb: packet data
1071 * @secid: pointer to where to put the secid of the packet
1072 *
1073 * Sets the netlabel socket state on sk from parent
1074 */
1075static int apparmor_socket_getpeersec_dgram(struct socket *sock,
1076 struct sk_buff *skb, u32 *secid)
1077
1078{
1079 /* TODO: requires secid support */
1080 return -ENOPROTOOPT;
1081}
1082
1083/**
1084 * apparmor_sock_graft - Initialize newly created socket
1085 * @sk: child sock
1086 * @parent: parent socket
1087 *
1088 * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
1089 * just set sk security information off of current creating process label
1090 * Labeling of sk for accept case - probably should be sock based
1091 * instead of task, because of the case where an implicitly labeled
1092 * socket is shared by different tasks.
1093 */
1094static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
1095{
1096 struct aa_sk_ctx *ctx = SK_CTX(sk);
1097
1098 if (!ctx->label)
1099 ctx->label = aa_get_current_label();
1100}
1101
1102static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { 739static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
1103 LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), 740 LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
1104 LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), 741 LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1133,30 +770,6 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
1133 LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), 770 LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
1134 LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), 771 LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
1135 772
1136 LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
1137 LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
1138 LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
1139
1140 LSM_HOOK_INIT(socket_create, apparmor_socket_create),
1141 LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
1142 LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
1143 LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
1144 LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
1145 LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
1146 LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
1147 LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
1148 LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
1149 LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
1150 LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
1151 LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
1152 LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
1153 LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
1154 LSM_HOOK_INIT(socket_getpeersec_stream,
1155 apparmor_socket_getpeersec_stream),
1156 LSM_HOOK_INIT(socket_getpeersec_dgram,
1157 apparmor_socket_getpeersec_dgram),
1158 LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
1159
1160 LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), 773 LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
1161 LSM_HOOK_INIT(cred_free, apparmor_cred_free), 774 LSM_HOOK_INIT(cred_free, apparmor_cred_free),
1162 LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), 775 LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
deleted file mode 100644
index 33d54435f8d6..000000000000
--- a/security/apparmor/net.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * AppArmor security module
3 *
4 * This file contains AppArmor network mediation
5 *
6 * Copyright (C) 1998-2008 Novell/SUSE
7 * Copyright 2009-2017 Canonical Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
13 */
14
15#include "include/apparmor.h"
16#include "include/audit.h"
17#include "include/context.h"
18#include "include/label.h"
19#include "include/net.h"
20#include "include/policy.h"
21
22#include "net_names.h"
23
24
25struct aa_sfs_entry aa_sfs_entry_network[] = {
26 AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
27 { }
28};
29
30static const char * const net_mask_names[] = {
31 "unknown",
32 "send",
33 "receive",
34 "unknown",
35
36 "create",
37 "shutdown",
38 "connect",
39 "unknown",
40
41 "setattr",
42 "getattr",
43 "setcred",
44 "getcred",
45
46 "chmod",
47 "chown",
48 "chgrp",
49 "lock",
50
51 "mmap",
52 "mprot",
53 "unknown",
54 "unknown",
55
56 "accept",
57 "bind",
58 "listen",
59 "unknown",
60
61 "setopt",
62 "getopt",
63 "unknown",
64 "unknown",
65
66 "unknown",
67 "unknown",
68 "unknown",
69 "unknown",
70};
71
72
73/* audit callback for net specific fields */
74void audit_net_cb(struct audit_buffer *ab, void *va)
75{
76 struct common_audit_data *sa = va;
77
78 audit_log_format(ab, " family=");
79 if (address_family_names[sa->u.net->family])
80 audit_log_string(ab, address_family_names[sa->u.net->family]);
81 else
82 audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family);
83 audit_log_format(ab, " sock_type=");
84 if (sock_type_names[aad(sa)->net.type])
85 audit_log_string(ab, sock_type_names[aad(sa)->net.type]);
86 else
87 audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type);
88 audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
89
90 if (aad(sa)->request & NET_PERMS_MASK) {
91 audit_log_format(ab, " requested_mask=");
92 aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
93 net_mask_names, NET_PERMS_MASK);
94
95 if (aad(sa)->denied & NET_PERMS_MASK) {
96 audit_log_format(ab, " denied_mask=");
97 aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
98 net_mask_names, NET_PERMS_MASK);
99 }
100 }
101 if (aad(sa)->peer) {
102 audit_log_format(ab, " peer=");
103 aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
104 FLAGS_NONE, GFP_ATOMIC);
105 }
106}
107
108
109/* Generic af perm */
110int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
111 u32 request, u16 family, int type)
112{
113 struct aa_perms perms = { };
114
115 AA_BUG(family >= AF_MAX);
116 AA_BUG(type < 0 || type >= SOCK_MAX);
117
118 if (profile_unconfined(profile))
119 return 0;
120
121 perms.allow = (profile->net.allow[family] & (1 << type)) ?
122 ALL_PERMS_MASK : 0;
123 perms.audit = (profile->net.audit[family] & (1 << type)) ?
124 ALL_PERMS_MASK : 0;
125 perms.quiet = (profile->net.quiet[family] & (1 << type)) ?
126 ALL_PERMS_MASK : 0;
127 aa_apply_modes_to_perms(profile, &perms);
128
129 return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
130}
131
132int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
133 int type, int protocol)
134{
135 struct aa_profile *profile;
136 DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
137
138 return fn_for_each_confined(label, profile,
139 aa_profile_af_perm(profile, &sa, request, family,
140 type));
141}
142
143static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
144 struct sock *sk)
145{
146 struct aa_profile *profile;
147 DEFINE_AUDIT_SK(sa, op, sk);
148
149 AA_BUG(!label);
150 AA_BUG(!sk);
151
152 if (unconfined(label))
153 return 0;
154
155 return fn_for_each_confined(label, profile,
156 aa_profile_af_sk_perm(profile, &sa, request, sk));
157}
158
159int aa_sk_perm(const char *op, u32 request, struct sock *sk)
160{
161 struct aa_label *label;
162 int error;
163
164 AA_BUG(!sk);
165 AA_BUG(in_interrupt());
166
167 /* TODO: switch to begin_current_label ???? */
168 label = begin_current_label_crit_section();
169 error = aa_label_sk_perm(label, op, request, sk);
170 end_current_label_crit_section(label);
171
172 return error;
173}
174
175
176int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
177 struct socket *sock)
178{
179 AA_BUG(!label);
180 AA_BUG(!sock);
181 AA_BUG(!sock->sk);
182
183 return aa_label_sk_perm(label, op, request, sock->sk);
184}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 5a2aec358322..4ede87c30f8b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -275,19 +275,6 @@ fail:
275 return 0; 275 return 0;
276} 276}
277 277
278static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
279{
280 if (unpack_nameX(e, AA_U16, name)) {
281 if (!inbounds(e, sizeof(u16)))
282 return 0;
283 if (data)
284 *data = le16_to_cpu(get_unaligned((__le16 *) e->pos));
285 e->pos += sizeof(u16);
286 return 1;
287 }
288 return 0;
289}
290
291static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) 278static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
292{ 279{
293 if (unpack_nameX(e, AA_U32, name)) { 280 if (unpack_nameX(e, AA_U32, name)) {
@@ -597,7 +584,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
597 struct aa_profile *profile = NULL; 584 struct aa_profile *profile = NULL;
598 const char *tmpname, *tmpns = NULL, *name = NULL; 585 const char *tmpname, *tmpns = NULL, *name = NULL;
599 const char *info = "failed to unpack profile"; 586 const char *info = "failed to unpack profile";
600 size_t size = 0, ns_len; 587 size_t ns_len;
601 struct rhashtable_params params = { 0 }; 588 struct rhashtable_params params = { 0 };
602 char *key = NULL; 589 char *key = NULL;
603 struct aa_data *data; 590 struct aa_data *data;
@@ -730,38 +717,6 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
730 goto fail; 717 goto fail;
731 } 718 }
732 719
733 size = unpack_array(e, "net_allowed_af");
734 if (size) {
735
736 for (i = 0; i < size; i++) {
737 /* discard extraneous rules that this kernel will
738 * never request
739 */
740 if (i >= AF_MAX) {
741 u16 tmp;
742
743 if (!unpack_u16(e, &tmp, NULL) ||
744 !unpack_u16(e, &tmp, NULL) ||
745 !unpack_u16(e, &tmp, NULL))
746 goto fail;
747 continue;
748 }
749 if (!unpack_u16(e, &profile->net.allow[i], NULL))
750 goto fail;
751 if (!unpack_u16(e, &profile->net.audit[i], NULL))
752 goto fail;
753 if (!unpack_u16(e, &profile->net.quiet[i], NULL))
754 goto fail;
755 }
756 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
757 goto fail;
758 }
759 if (VERSION_LT(e->version, v7)) {
760 /* pre v7 policy always allowed these */
761 profile->net.allow[AF_UNIX] = 0xffff;
762 profile->net.allow[AF_NETLINK] = 0xffff;
763 }
764
765 if (unpack_nameX(e, AA_STRUCT, "policydb")) { 720 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
766 /* generic policy dfa - optional and may be NULL */ 721 /* generic policy dfa - optional and may be NULL */
767 info = "failed to unpack policydb"; 722 info = "failed to unpack policydb";
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index a7e51f793867..36f842ec87f0 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -459,34 +459,33 @@ static long keyring_read(const struct key *keyring,
459 char __user *buffer, size_t buflen) 459 char __user *buffer, size_t buflen)
460{ 460{
461 struct keyring_read_iterator_context ctx; 461 struct keyring_read_iterator_context ctx;
462 unsigned long nr_keys; 462 long ret;
463 int ret;
464 463
465 kenter("{%d},,%zu", key_serial(keyring), buflen); 464 kenter("{%d},,%zu", key_serial(keyring), buflen);
466 465
467 if (buflen & (sizeof(key_serial_t) - 1)) 466 if (buflen & (sizeof(key_serial_t) - 1))
468 return -EINVAL; 467 return -EINVAL;
469 468
470 nr_keys = keyring->keys.nr_leaves_on_tree; 469 /* Copy as many key IDs as fit into the buffer */
471 if (nr_keys == 0) 470 if (buffer && buflen) {
472 return 0; 471 ctx.buffer = (key_serial_t __user *)buffer;
473 472 ctx.buflen = buflen;
474 /* Calculate how much data we could return */ 473 ctx.count = 0;
475 if (!buffer || !buflen) 474 ret = assoc_array_iterate(&keyring->keys,
476 return nr_keys * sizeof(key_serial_t); 475 keyring_read_iterator, &ctx);
477 476 if (ret < 0) {
478 /* Copy the IDs of the subscribed keys into the buffer */ 477 kleave(" = %ld [iterate]", ret);
479 ctx.buffer = (key_serial_t __user *)buffer; 478 return ret;
480 ctx.buflen = buflen; 479 }
481 ctx.count = 0;
482 ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
483 if (ret < 0) {
484 kleave(" = %d [iterate]", ret);
485 return ret;
486 } 480 }
487 481
488 kleave(" = %zu [ok]", ctx.count); 482 /* Return the size of the buffer needed */
489 return ctx.count; 483 ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
484 if (ret <= buflen)
485 kleave("= %ld [ok]", ret);
486 else
487 kleave("= %ld [buffer too small]", ret);
488 return ret;
490} 489}
491 490
492/* 491/*
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index bd85315cbfeb..98aa89ff7bfd 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1147,20 +1147,21 @@ static long trusted_read(const struct key *key, char __user *buffer,
1147 p = dereference_key_locked(key); 1147 p = dereference_key_locked(key);
1148 if (!p) 1148 if (!p)
1149 return -EINVAL; 1149 return -EINVAL;
1150 if (!buffer || buflen <= 0)
1151 return 2 * p->blob_len;
1152 ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
1153 if (!ascii_buf)
1154 return -ENOMEM;
1155 1150
1156 bufp = ascii_buf; 1151 if (buffer && buflen >= 2 * p->blob_len) {
1157 for (i = 0; i < p->blob_len; i++) 1152 ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
1158 bufp = hex_byte_pack(bufp, p->blob[i]); 1153 if (!ascii_buf)
1159 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { 1154 return -ENOMEM;
1155
1156 bufp = ascii_buf;
1157 for (i = 0; i < p->blob_len; i++)
1158 bufp = hex_byte_pack(bufp, p->blob[i]);
1159 if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
1160 kzfree(ascii_buf);
1161 return -EFAULT;
1162 }
1160 kzfree(ascii_buf); 1163 kzfree(ascii_buf);
1161 return -EFAULT;
1162 } 1164 }
1163 kzfree(ascii_buf);
1164 return 2 * p->blob_len; 1165 return 2 * p->blob_len;
1165} 1166}
1166 1167
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6c9cba2166d9..d10c780dfd54 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
663 if (atomic) 663 if (atomic)
664 read_lock(&grp->list_lock); 664 read_lock(&grp->list_lock);
665 else 665 else
666 down_read(&grp->list_mutex); 666 down_read_nested(&grp->list_mutex, hop);
667 list_for_each_entry(subs, &grp->list_head, src_list) { 667 list_for_each_entry(subs, &grp->list_head, src_list) {
668 /* both ports ready? */ 668 /* both ports ready? */
669 if (atomic_read(&subs->ref_count) != 2) 669 if (atomic_read(&subs->ref_count) != 2)
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 6a437eb66115..59127b6ef39e 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -133,7 +133,8 @@ enum {
133#endif /* CONFIG_X86_X32 */ 133#endif /* CONFIG_X86_X32 */
134}; 134};
135 135
136static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) 136static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
137 unsigned long arg)
137{ 138{
138 void __user *argp = compat_ptr(arg); 139 void __user *argp = compat_ptr(arg);
139 140
@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
153 case SNDRV_TIMER_IOCTL_PAUSE: 154 case SNDRV_TIMER_IOCTL_PAUSE:
154 case SNDRV_TIMER_IOCTL_PAUSE_OLD: 155 case SNDRV_TIMER_IOCTL_PAUSE_OLD:
155 case SNDRV_TIMER_IOCTL_NEXT_DEVICE: 156 case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
156 return snd_timer_user_ioctl(file, cmd, (unsigned long)argp); 157 return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
157 case SNDRV_TIMER_IOCTL_GPARAMS32: 158 case SNDRV_TIMER_IOCTL_GPARAMS32:
158 return snd_timer_user_gparams_compat(file, argp); 159 return snd_timer_user_gparams_compat(file, argp);
159 case SNDRV_TIMER_IOCTL_INFO32: 160 case SNDRV_TIMER_IOCTL_INFO32:
@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
167 } 168 }
168 return -ENOIOCTLCMD; 169 return -ENOIOCTLCMD;
169} 170}
171
172static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
173 unsigned long arg)
174{
175 struct snd_timer_user *tu = file->private_data;
176 long ret;
177
178 mutex_lock(&tu->ioctl_lock);
179 ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
180 mutex_unlock(&tu->ioctl_lock);
181 return ret;
182}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0ce71111b4e3..546d515f3c1f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
327 case 0x10ec0215: 327 case 0x10ec0215:
328 case 0x10ec0225: 328 case 0x10ec0225:
329 case 0x10ec0233: 329 case 0x10ec0233:
330 case 0x10ec0236:
330 case 0x10ec0255: 331 case 0x10ec0255:
331 case 0x10ec0256: 332 case 0x10ec0256:
332 case 0x10ec0282: 333 case 0x10ec0282:
@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
911 { 0x10ec0275, 0x1028, 0, "ALC3260" }, 912 { 0x10ec0275, 0x1028, 0, "ALC3260" },
912 { 0x10ec0899, 0x1028, 0, "ALC3861" }, 913 { 0x10ec0899, 0x1028, 0, "ALC3861" },
913 { 0x10ec0298, 0x1028, 0, "ALC3266" }, 914 { 0x10ec0298, 0x1028, 0, "ALC3266" },
915 { 0x10ec0236, 0x1028, 0, "ALC3204" },
914 { 0x10ec0256, 0x1028, 0, "ALC3246" }, 916 { 0x10ec0256, 0x1028, 0, "ALC3246" },
915 { 0x10ec0225, 0x1028, 0, "ALC3253" }, 917 { 0x10ec0225, 0x1028, 0, "ALC3253" },
916 { 0x10ec0295, 0x1028, 0, "ALC3254" }, 918 { 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3930 alc_process_coef_fw(codec, coef0255_1); 3932 alc_process_coef_fw(codec, coef0255_1);
3931 alc_process_coef_fw(codec, coef0255); 3933 alc_process_coef_fw(codec, coef0255);
3932 break; 3934 break;
3935 case 0x10ec0236:
3933 case 0x10ec0256: 3936 case 0x10ec0256:
3934 alc_process_coef_fw(codec, coef0256); 3937 alc_process_coef_fw(codec, coef0256);
3935 alc_process_coef_fw(codec, coef0255); 3938 alc_process_coef_fw(codec, coef0255);
@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4028 }; 4031 };
4029 4032
4030 switch (codec->core.vendor_id) { 4033 switch (codec->core.vendor_id) {
4034 case 0x10ec0236:
4031 case 0x10ec0255: 4035 case 0x10ec0255:
4032 case 0x10ec0256: 4036 case 0x10ec0256:
4033 alc_write_coef_idx(codec, 0x45, 0xc489); 4037 alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
4160 alc_process_coef_fw(codec, alc225_pre_hsmode); 4164 alc_process_coef_fw(codec, alc225_pre_hsmode);
4161 alc_process_coef_fw(codec, coef0225); 4165 alc_process_coef_fw(codec, coef0225);
4162 break; 4166 break;
4167 case 0x10ec0236:
4163 case 0x10ec0255: 4168 case 0x10ec0255:
4164 case 0x10ec0256: 4169 case 0x10ec0256:
4165 alc_process_coef_fw(codec, coef0255); 4170 alc_process_coef_fw(codec, coef0255);
@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4256 case 0x10ec0255: 4261 case 0x10ec0255:
4257 alc_process_coef_fw(codec, coef0255); 4262 alc_process_coef_fw(codec, coef0255);
4258 break; 4263 break;
4264 case 0x10ec0236:
4259 case 0x10ec0256: 4265 case 0x10ec0256:
4260 alc_process_coef_fw(codec, coef0256); 4266 alc_process_coef_fw(codec, coef0256);
4261 break; 4267 break;
@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4366 case 0x10ec0255: 4372 case 0x10ec0255:
4367 alc_process_coef_fw(codec, coef0255); 4373 alc_process_coef_fw(codec, coef0255);
4368 break; 4374 break;
4375 case 0x10ec0236:
4369 case 0x10ec0256: 4376 case 0x10ec0256:
4370 alc_process_coef_fw(codec, coef0256); 4377 alc_process_coef_fw(codec, coef0256);
4371 break; 4378 break;
@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4451 }; 4458 };
4452 4459
4453 switch (codec->core.vendor_id) { 4460 switch (codec->core.vendor_id) {
4461 case 0x10ec0236:
4454 case 0x10ec0255: 4462 case 0x10ec0255:
4455 case 0x10ec0256: 4463 case 0x10ec0256:
4456 alc_process_coef_fw(codec, coef0255); 4464 alc_process_coef_fw(codec, coef0255);
@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
4705 case 0x10ec0255: 4713 case 0x10ec0255:
4706 alc_process_coef_fw(codec, alc255fw); 4714 alc_process_coef_fw(codec, alc255fw);
4707 break; 4715 break;
4716 case 0x10ec0236:
4708 case 0x10ec0256: 4717 case 0x10ec0256:
4709 alc_process_coef_fw(codec, alc256fw); 4718 alc_process_coef_fw(codec, alc256fw);
4710 break; 4719 break;
@@ -6419,6 +6428,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6419 ALC225_STANDARD_PINS, 6428 ALC225_STANDARD_PINS,
6420 {0x12, 0xb7a60130}, 6429 {0x12, 0xb7a60130},
6421 {0x1b, 0x90170110}), 6430 {0x1b, 0x90170110}),
6431 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6432 {0x12, 0x90a60140},
6433 {0x14, 0x90170110},
6434 {0x21, 0x02211020}),
6435 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6436 {0x12, 0x90a60140},
6437 {0x14, 0x90170150},
6438 {0x21, 0x02211020}),
6422 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, 6439 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
6423 {0x14, 0x90170110}, 6440 {0x14, 0x90170110},
6424 {0x21, 0x02211020}), 6441 {0x21, 0x02211020}),
@@ -6806,6 +6823,7 @@ static int patch_alc269(struct hda_codec *codec)
6806 case 0x10ec0255: 6823 case 0x10ec0255:
6807 spec->codec_variant = ALC269_TYPE_ALC255; 6824 spec->codec_variant = ALC269_TYPE_ALC255;
6808 break; 6825 break;
6826 case 0x10ec0236:
6809 case 0x10ec0256: 6827 case 0x10ec0256:
6810 spec->codec_variant = ALC269_TYPE_ALC256; 6828 spec->codec_variant = ALC269_TYPE_ALC256;
6811 spec->shutup = alc256_shutup; 6829 spec->shutup = alc256_shutup;
@@ -7857,6 +7875,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7857 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), 7875 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
7858 HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), 7876 HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
7859 HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), 7877 HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
7878 HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
7860 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), 7879 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
7861 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), 7880 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
7862 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), 7881 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 2c1bd2763864..6758f789b712 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
90 return 0; 90 return 0;
91} 91}
92 92
93static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
94 struct snd_kcontrol *kcontrol, int event)
95{
96 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
97 struct adau *adau = snd_soc_codec_get_drvdata(codec);
98
99 /*
100 * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
101 * avoid losing SNR (workaround from ADI). This must be done after
102 * the ADC(s) have been enabled. According to the data sheet, it is
103 * normally illegal to set this bit when the sampling rate is 96 kHz,
104 * but according to ADI it is acceptable for this workaround.
105 */
106 regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
107 ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
108 regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
109 ADAU17X1_CONVERTER0_ADOSR, 0);
110
111 return 0;
112}
113
93static const char * const adau17x1_mono_stereo_text[] = { 114static const char * const adau17x1_mono_stereo_text[] = {
94 "Stereo", 115 "Stereo",
95 "Mono Left Channel (L+R)", 116 "Mono Left Channel (L+R)",
@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
121 SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0, 142 SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
122 &adau17x1_dac_mode_mux), 143 &adau17x1_dac_mode_mux),
123 144
124 SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0), 145 SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
146 adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
125 SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0), 147 SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
126 SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0), 148 SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
127 SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0), 149 SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index ebae545241a4..eaf8f933bab8 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -130,5 +130,7 @@ bool adau17x1_has_dsp(struct adau *adau);
130 130
131#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7 131#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
132 132
133#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
134
133 135
134#endif 136#endif
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index ed6e5373916c..12f2ecf3a4fe 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -145,9 +145,8 @@ done:
145 mutex_unlock(&rt5514_dsp->dma_lock); 145 mutex_unlock(&rt5514_dsp->dma_lock);
146} 146}
147 147
148static irqreturn_t rt5514_spi_irq(int irq, void *data) 148static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
149{ 149{
150 struct rt5514_dsp *rt5514_dsp = data;
151 u8 buf[8]; 150 u8 buf[8];
152 151
153 rt5514_dsp->get_size = 0; 152 rt5514_dsp->get_size = 0;
@@ -180,6 +179,13 @@ static irqreturn_t rt5514_spi_irq(int irq, void *data)
180 if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit && 179 if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
181 rt5514_dsp->buf_rp && rt5514_dsp->buf_size) 180 rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
182 schedule_delayed_work(&rt5514_dsp->copy_work, 0); 181 schedule_delayed_work(&rt5514_dsp->copy_work, 0);
182}
183
184static irqreturn_t rt5514_spi_irq(int irq, void *data)
185{
186 struct rt5514_dsp *rt5514_dsp = data;
187
188 rt5514_schedule_copy(rt5514_dsp);
183 189
184 return IRQ_HANDLED; 190 return IRQ_HANDLED;
185} 191}
@@ -199,12 +205,19 @@ static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
199 struct rt5514_dsp *rt5514_dsp = 205 struct rt5514_dsp *rt5514_dsp =
200 snd_soc_platform_get_drvdata(rtd->platform); 206 snd_soc_platform_get_drvdata(rtd->platform);
201 int ret; 207 int ret;
208 u8 buf[8];
202 209
203 mutex_lock(&rt5514_dsp->dma_lock); 210 mutex_lock(&rt5514_dsp->dma_lock);
204 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, 211 ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
205 params_buffer_bytes(hw_params)); 212 params_buffer_bytes(hw_params));
206 rt5514_dsp->substream = substream; 213 rt5514_dsp->substream = substream;
207 rt5514_dsp->dma_offset = 0; 214 rt5514_dsp->dma_offset = 0;
215
216 /* Read IRQ status and schedule copy accordingly. */
217 rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf));
218 if (buf[0] & RT5514_IRQ_STATUS_BIT)
219 rt5514_schedule_copy(rt5514_dsp);
220
208 mutex_unlock(&rt5514_dsp->dma_lock); 221 mutex_unlock(&rt5514_dsp->dma_lock);
209 222
210 return ret; 223 return ret;
diff --git a/sound/soc/codecs/rt5514-spi.h b/sound/soc/codecs/rt5514-spi.h
index a6434ee6ff03..c1a36647c119 100644
--- a/sound/soc/codecs/rt5514-spi.h
+++ b/sound/soc/codecs/rt5514-spi.h
@@ -20,6 +20,9 @@
20#define RT5514_BUFFER_VOICE_BASE 0x18000200 20#define RT5514_BUFFER_VOICE_BASE 0x18000200
21#define RT5514_BUFFER_VOICE_LIMIT 0x18000204 21#define RT5514_BUFFER_VOICE_LIMIT 0x18000204
22#define RT5514_BUFFER_VOICE_WP 0x1800020c 22#define RT5514_BUFFER_VOICE_WP 0x1800020c
23#define RT5514_IRQ_CTRL 0x18002094
24
25#define RT5514_IRQ_STATUS_BIT (0x1 << 5)
23 26
24/* SPI Command */ 27/* SPI Command */
25enum { 28enum {
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 0945d212b8dc..d7956ababd11 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -338,39 +338,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
338 fw = NULL; 338 fw = NULL;
339 } 339 }
340 340
341 if (rt5514->model_buf && rt5514->model_len) {
342#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
343 int ret;
344
345 ret = rt5514_spi_burst_write(0x4ff80000,
346 rt5514->model_buf,
347 ((rt5514->model_len / 8) + 1) * 8);
348 if (ret) {
349 dev_err(codec->dev,
350 "Model load failed %d\n", ret);
351 return ret;
352 }
353#else
354 dev_err(codec->dev,
355 "No SPI driver for loading firmware\n");
356#endif
357 } else {
358 request_firmware(&fw, RT5514_FIRMWARE3,
359 codec->dev);
360 if (fw) {
361#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
362 rt5514_spi_burst_write(0x4ff80000,
363 fw->data,
364 ((fw->size/8)+1)*8);
365#else
366 dev_err(codec->dev,
367 "No SPI driver to load fw\n");
368#endif
369 release_firmware(fw);
370 fw = NULL;
371 }
372 }
373
374 /* DSP run */ 341 /* DSP run */
375 regmap_write(rt5514->i2c_regmap, 0x18002f00, 342 regmap_write(rt5514->i2c_regmap, 0x18002f00,
376 0x00055148); 343 0x00055148);
@@ -385,34 +352,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
385 return 0; 352 return 0;
386} 353}
387 354
388static int rt5514_hotword_model_put(struct snd_kcontrol *kcontrol,
389 const unsigned int __user *bytes, unsigned int size)
390{
391 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
392 struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
393 struct snd_soc_codec *codec = rt5514->codec;
394 int ret = 0;
395
396 if (rt5514->model_buf || rt5514->model_len < size) {
397 if (rt5514->model_buf)
398 devm_kfree(codec->dev, rt5514->model_buf);
399 rt5514->model_buf = devm_kmalloc(codec->dev, size, GFP_KERNEL);
400 if (!rt5514->model_buf) {
401 ret = -ENOMEM;
402 goto done;
403 }
404 }
405
406 /* Skips the TLV header. */
407 bytes += 2;
408
409 if (copy_from_user(rt5514->model_buf, bytes, size))
410 ret = -EFAULT;
411done:
412 rt5514->model_len = (ret ? 0 : size);
413 return ret;
414}
415
416static const struct snd_kcontrol_new rt5514_snd_controls[] = { 355static const struct snd_kcontrol_new rt5514_snd_controls[] = {
417 SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST, 356 SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
418 RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv), 357 RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
@@ -424,8 +363,6 @@ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
424 adc_vol_tlv), 363 adc_vol_tlv),
425 SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0, 364 SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0,
426 rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put), 365 rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put),
427 SND_SOC_BYTES_TLV("Hotword Model", 0x8504,
428 NULL, rt5514_hotword_model_put),
429}; 366};
430 367
431/* ADC Mixer*/ 368/* ADC Mixer*/
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 803311cb7e2a..2dc40e6d8b3f 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -255,7 +255,6 @@
255 255
256#define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin" 256#define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin"
257#define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin" 257#define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin"
258#define RT5514_FIRMWARE3 "rt5514_dsp_fw3.bin"
259 258
260/* System Clock Source */ 259/* System Clock Source */
261enum { 260enum {
@@ -282,8 +281,6 @@ struct rt5514_priv {
282 int pll_in; 281 int pll_in;
283 int pll_out; 282 int pll_out;
284 int dsp_enabled; 283 int dsp_enabled;
285 u8 *model_buf;
286 unsigned int model_len;
287}; 284};
288 285
289#endif /* __RT5514_H__ */ 286#endif /* __RT5514_H__ */
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index c94e94fe8297..0e5f54a9bc7e 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -98,7 +98,7 @@ static const struct reg_default rt5616_reg[] = {
98 { 0x8e, 0x0004 }, 98 { 0x8e, 0x0004 },
99 { 0x8f, 0x1100 }, 99 { 0x8f, 0x1100 },
100 { 0x90, 0x0000 }, 100 { 0x90, 0x0000 },
101 { 0x91, 0x0000 }, 101 { 0x91, 0x0c00 },
102 { 0x92, 0x0000 }, 102 { 0x92, 0x0000 },
103 { 0x93, 0x2000 }, 103 { 0x93, 0x2000 },
104 { 0x94, 0x0200 }, 104 { 0x94, 0x0200 },
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 71216db15eab..fa66b11df8d4 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -2744,7 +2744,8 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
2744 SND_SOC_DAPM_PRE_PMU), 2744 SND_SOC_DAPM_PRE_PMU),
2745 SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5659_hp_event, 2745 SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5659_hp_event,
2746 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), 2746 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
2747 SND_SOC_DAPM_PGA("LOUT Amp", SND_SOC_NOPM, 0, 0, NULL, 0), 2747 SND_SOC_DAPM_PGA_S("LOUT Amp", 1, RT5659_PWR_ANLG_1, RT5659_PWR_LM_BIT,
2748 0, NULL, 0),
2748 2749
2749 SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0, 2750 SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0,
2750 rt5659_charge_pump_event, SND_SOC_DAPM_PRE_PMU | 2751 rt5659_charge_pump_event, SND_SOC_DAPM_PRE_PMU |
@@ -3208,6 +3209,7 @@ static const struct snd_soc_dapm_route rt5659_dapm_routes[] = {
3208 { "LOUT R MIX", "OUTVOL R Switch", "OUTVOL R" }, 3209 { "LOUT R MIX", "OUTVOL R Switch", "OUTVOL R" },
3209 { "LOUT Amp", NULL, "LOUT L MIX" }, 3210 { "LOUT Amp", NULL, "LOUT L MIX" },
3210 { "LOUT Amp", NULL, "LOUT R MIX" }, 3211 { "LOUT Amp", NULL, "LOUT R MIX" },
3212 { "LOUT Amp", NULL, "Charge Pump" },
3211 { "LOUT Amp", NULL, "SYS CLK DET" }, 3213 { "LOUT Amp", NULL, "SYS CLK DET" },
3212 { "LOUT L Playback", "Switch", "LOUT Amp" }, 3214 { "LOUT L Playback", "Switch", "LOUT Amp" },
3213 { "LOUT R Playback", "Switch", "LOUT Amp" }, 3215 { "LOUT R Playback", "Switch", "LOUT Amp" },
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index ab9e0ebff5a7..e45b895d8279 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1639,7 +1639,8 @@ static irqreturn_t rt5663_irq(int irq, void *data)
1639{ 1639{
1640 struct rt5663_priv *rt5663 = data; 1640 struct rt5663_priv *rt5663 = data;
1641 1641
1642 dev_dbg(rt5663->codec->dev, "%s IRQ queue work\n", __func__); 1642 dev_dbg(regmap_get_device(rt5663->regmap), "%s IRQ queue work\n",
1643 __func__);
1643 1644
1644 queue_delayed_work(system_wq, &rt5663->jack_detect_work, 1645 queue_delayed_work(system_wq, &rt5663->jack_detect_work,
1645 msecs_to_jiffies(250)); 1646 msecs_to_jiffies(250));
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index dd471d2c0266..01a50413c66f 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1301,7 +1301,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
1301 /* validate kcontrol */ 1301 /* validate kcontrol */
1302 if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 1302 if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
1303 SNDRV_CTL_ELEM_ID_NAME_MAXLEN) 1303 SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
1304 return NULL; 1304 goto err;
1305 1305
1306 se = kzalloc(sizeof(*se), GFP_KERNEL); 1306 se = kzalloc(sizeof(*se), GFP_KERNEL);
1307 if (se == NULL) 1307 if (se == NULL)
@@ -1378,6 +1378,9 @@ err_se:
1378 for (; i >= 0; i--) { 1378 for (; i >= 0; i--) {
1379 /* free values and texts */ 1379 /* free values and texts */
1380 se = (struct soc_enum *)kc[i].private_value; 1380 se = (struct soc_enum *)kc[i].private_value;
1381 if (!se)
1382 continue;
1383
1381 kfree(se->dobj.control.dvalues); 1384 kfree(se->dobj.control.dvalues);
1382 for (j = 0; j < ec->items; j++) 1385 for (j = 0; j < ec->items; j++)
1383 kfree(se->dobj.control.dtexts[j]); 1386 kfree(se->dobj.control.dtexts[j]);
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 24b35a1fd4d6..01cc7ba39924 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -576,7 +576,7 @@ union bpf_attr {
576 * @map: pointer to sockmap 576 * @map: pointer to sockmap
577 * @key: key to lookup sock in map 577 * @key: key to lookup sock in map
578 * @flags: reserved for future use 578 * @flags: reserved for future use
579 * Return: SK_REDIRECT 579 * Return: SK_PASS
580 * 580 *
581 * int bpf_sock_map_update(skops, map, key, flags) 581 * int bpf_sock_map_update(skops, map, key, flags)
582 * @skops: pointer to bpf_sock_ops 582 * @skops: pointer to bpf_sock_ops
@@ -787,9 +787,8 @@ struct xdp_md {
787}; 787};
788 788
789enum sk_action { 789enum sk_action {
790 SK_ABORTED = 0, 790 SK_DROP = 0,
791 SK_DROP, 791 SK_PASS,
792 SK_REDIRECT,
793}; 792};
794 793
795#define BPF_TAG_SIZE 8 794#define BPF_TAG_SIZE 8
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 4c5a481a850c..d6e1c02ddcfe 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,7 +26,7 @@ endif
26 26
27ifneq ($(OUTPUT),) 27ifneq ($(OUTPUT),)
28# check that the output directory actually exists 28# check that the output directory actually exists
29OUTDIR := $(realpath $(OUTPUT)) 29OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
31endif 31endif
32 32
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index f5c95d4f56a1..654efd9768fd 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -1,8 +1,8 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2ifneq ($(O),) 2ifneq ($(O),)
3ifeq ($(origin O), command line) 3ifeq ($(origin O), command line)
4 ABSOLUTE_O := $(realpath $(O)) 4 dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
5 dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist)) 5 ABSOLUTE_O := $(shell cd $(O) ; pwd)
6 OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) 6 OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
7 COMMAND_O := O=$(ABSOLUTE_O) 7 COMMAND_O := O=$(ABSOLUTE_O)
8ifeq ($(objtree),) 8ifeq ($(objtree),)
@@ -13,7 +13,7 @@ endif
13 13
14# check that the output directory actually exists 14# check that the output directory actually exists
15ifneq ($(OUTPUT),) 15ifneq ($(OUTPUT),)
16OUTDIR := $(realpath $(OUTPUT)) 16OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
17$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 17$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
18endif 18endif
19 19
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index f65886af7c0c..5bef05d6ba39 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -31,7 +31,11 @@ define RUN_TESTS
31 echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\ 31 echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
32 echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \ 32 echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \
33 else \ 33 else \
34 cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\ 34 if [ "X$(summary)" != "X" ]; then \
35 cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\
36 else \
37 cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\
38 fi; \
35 fi; \ 39 fi; \
36 done; 40 done;
37endef 41endef
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index c727b96a59b0..5fa02d86b35f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -17,5 +17,26 @@
17 "teardown": [ 17 "teardown": [
18 "$TC qdisc del dev $DEV1 ingress" 18 "$TC qdisc del dev $DEV1 ingress"
19 ] 19 ]
20 },
21 {
22 "id": "d052",
23 "name": "Add 1M filters with the same action",
24 "category": [
25 "filter",
26 "flower"
27 ],
28 "setup": [
29 "$TC qdisc add dev $DEV2 ingress",
30 "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
31 ],
32 "cmdUnderTest": "$TC -b $BATCH_FILE",
33 "expExitCode": "0",
34 "verifyCmd": "$TC actions list action gact",
35 "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
36 "matchCount": "1",
37 "teardown": [
38 "$TC qdisc del dev $DEV2 ingress",
39 "/bin/rm $BATCH_FILE"
40 ]
20 } 41 }
21] \ No newline at end of file 42]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index f8ccc7e0ab05..b8462e1b74f9 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -89,7 +89,7 @@ def prepare_env(cmdlist):
89 exit(1) 89 exit(1)
90 90
91 91
92def test_runner(filtered_tests): 92def test_runner(filtered_tests, args):
93 """ 93 """
94 Driver function for the unit tests. 94 Driver function for the unit tests.
95 95
@@ -106,6 +106,8 @@ def test_runner(filtered_tests):
106 for tidx in testlist: 106 for tidx in testlist:
107 result = True 107 result = True
108 tresult = "" 108 tresult = ""
109 if "flower" in tidx["category"] and args.device == None:
110 continue
109 print("Test " + tidx["id"] + ": " + tidx["name"]) 111 print("Test " + tidx["id"] + ": " + tidx["name"])
110 prepare_env(tidx["setup"]) 112 prepare_env(tidx["setup"])
111 (p, procout) = exec_cmd(tidx["cmdUnderTest"]) 113 (p, procout) = exec_cmd(tidx["cmdUnderTest"])
@@ -151,7 +153,11 @@ def ns_create():
151 exec_cmd(cmd, False) 153 exec_cmd(cmd, False)
152 cmd = 'ip link set $DEV0 up' 154 cmd = 'ip link set $DEV0 up'
153 exec_cmd(cmd, False) 155 exec_cmd(cmd, False)
154 cmd = 'ip -s $NS link set $DEV1 up' 156 cmd = 'ip -n $NS link set $DEV1 up'
157 exec_cmd(cmd, False)
158 cmd = 'ip link set $DEV2 netns $NS'
159 exec_cmd(cmd, False)
160 cmd = 'ip -n $NS link set $DEV2 up'
155 exec_cmd(cmd, False) 161 exec_cmd(cmd, False)
156 162
157 163
@@ -212,7 +218,8 @@ def set_args(parser):
212 help='Execute the single test case with specified ID') 218 help='Execute the single test case with specified ID')
213 parser.add_argument('-i', '--id', action='store_true', dest='gen_id', 219 parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
214 help='Generate ID numbers for new test cases') 220 help='Generate ID numbers for new test cases')
215 return parser 221 parser.add_argument('-d', '--device',
222 help='Execute the test case in flower category')
216 return parser 223 return parser
217 224
218 225
@@ -226,6 +233,8 @@ def check_default_settings(args):
226 233
227 if args.path != None: 234 if args.path != None:
228 NAMES['TC'] = args.path 235 NAMES['TC'] = args.path
236 if args.device != None:
237 NAMES['DEV2'] = args.device
229 if not os.path.isfile(NAMES['TC']): 238 if not os.path.isfile(NAMES['TC']):
230 print("The specified tc path " + NAMES['TC'] + " does not exist.") 239 print("The specified tc path " + NAMES['TC'] + " does not exist.")
231 exit(1) 240 exit(1)
@@ -382,14 +391,17 @@ def set_operation_mode(args):
382 if (len(alltests) == 0): 391 if (len(alltests) == 0):
383 print("Cannot find a test case with ID matching " + target_id) 392 print("Cannot find a test case with ID matching " + target_id)
384 exit(1) 393 exit(1)
385 catresults = test_runner(alltests) 394 catresults = test_runner(alltests, args)
386 print("All test results: " + "\n\n" + catresults) 395 print("All test results: " + "\n\n" + catresults)
387 elif (len(target_category) > 0): 396 elif (len(target_category) > 0):
397 if (target_category == "flower") and args.device == None:
398 print("Please specify a NIC device (-d) to run category flower")
399 exit(1)
388 if (target_category not in ucat): 400 if (target_category not in ucat):
389 print("Specified category is not present in this file.") 401 print("Specified category is not present in this file.")
390 exit(1) 402 exit(1)
391 else: 403 else:
392 catresults = test_runner(testcases[target_category]) 404 catresults = test_runner(testcases[target_category], args)
393 print("Category " + target_category + "\n\n" + catresults) 405 print("Category " + target_category + "\n\n" + catresults)
394 406
395 ns_destroy() 407 ns_destroy()
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
new file mode 100755
index 000000000000..707c6bfef689
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -0,0 +1,62 @@
1#!/usr/bin/python3
2
3"""
4tdc_batch.py - a script to generate TC batch file
5
6Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
7"""
8
9import argparse
10
11parser = argparse.ArgumentParser(description='TC batch file generator')
12parser.add_argument("device", help="device name")
13parser.add_argument("file", help="batch file name")
14parser.add_argument("-n", "--number", type=int,
15 help="how many lines in batch file")
16parser.add_argument("-o", "--skip_sw",
17 help="skip_sw (offload), by default skip_hw",
18 action="store_true")
19parser.add_argument("-s", "--share_action",
20 help="all filters share the same action",
21 action="store_true")
22parser.add_argument("-p", "--prio",
23 help="all filters have different prio",
24 action="store_true")
25args = parser.parse_args()
26
27device = args.device
28file = open(args.file, 'w')
29
30number = 1
31if args.number:
32 number = args.number
33
34skip = "skip_hw"
35if args.skip_sw:
36 skip = "skip_sw"
37
38share_action = ""
39if args.share_action:
40 share_action = "index 1"
41
42prio = "prio 1"
43if args.prio:
44 prio = ""
45 if number > 0x4000:
46 number = 0x4000
47
48index = 0
49for i in range(0x100):
50 for j in range(0x100):
51 for k in range(0x100):
52 mac = ("%02x:%02x:%02x" % (i, j, k))
53 src_mac = "e4:11:00:" + mac
54 dst_mac = "e4:12:00:" + mac
55 cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
56 "src_mac %s dst_mac %s action drop %s" %
57 (device, prio, skip, src_mac, dst_mac, share_action))
58 file.write("%s\n" % cmd)
59 index += 1
60 if index >= number:
61 file.close()
62 exit(0)
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index c56269c7cac8..eb188c729dd6 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -13,6 +13,8 @@ NAMES = {
13 # Name of veth devices to be created for the namespace 13 # Name of veth devices to be created for the namespace
14 'DEV0': 'v0p0', 14 'DEV0': 'v0p0',
15 'DEV1': 'v0p1', 15 'DEV1': 'v0p1',
16 'DEV2': '',
17 'BATCH_FILE': './batch.txt',
16 # Name of the namespace to use 18 # Name of the namespace to use
17 'NS': 'tcut' 19 'NS': 'tcut'
18 } 20 }