aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-05-02 22:14:21 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-02 22:14:21 -0400
commitff24e4980a68d83090a02fda081741a410fe8eef (patch)
tree4d874dfcaf2bb8c3abc2446af9447a983402c0ae
parent26f146ed971c0e4a264ce525d7a66a71ef73690d (diff)
parentea9866793d1e925b4d320eaea409263b2a568f38 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three trivial overlapping conflicts. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/driver-api/usb/power-management.rst14
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/networking/netdev-FAQ.rst2
-rw-r--r--Documentation/sysctl/vm.txt16
-rw-r--r--Makefile4
-rw-r--r--arch/arc/boot/dts/hsdk.dts13
-rw-r--r--arch/arc/lib/memset-archs.S4
-rw-r--r--arch/arc/mm/cache.c31
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/boot/compressed/head.S16
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm64/kernel/ftrace.c9
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/mips/net/ebpf_jit.c5
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c97
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c4
-rw-r--r--arch/x86/events/intel/cstate.c10
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c30
-rw-r--r--drivers/gpio/gpio-eic-sprd.c1
-rw-r--r--drivers/gpio/gpiolib.c12
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c33
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c12
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_main.c52
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c17
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/rmi4/rmi_f11.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c12
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/slip/slhc.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/pci/pci.c19
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/portdrv.h4
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/power/supply/cpcap-battery.c3
-rw-r--r--drivers/power/supply/power_supply_sysfs.c6
-rw-r--r--drivers/usb/core/driver.c13
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c19
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/storage/realtek_cr.c13
-rw-r--r--drivers/usb/usbip/stub_rx.c12
-rw-r--r--drivers/usb/usbip/usbip_common.h7
-rw-r--r--drivers/w1/masters/ds2490.c6
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/file-item.c15
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ordered-data.c3
-rw-r--r--fs/ceph/dir.c6
-rw-r--r--fs/ceph/inode.c16
-rw-r--r--fs/ceph/mds_client.c70
-rw-r--r--fs/ceph/snap.c7
-rw-r--r--fs/cifs/file.c15
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/misc.c23
-rw-r--r--fs/cifs/smb2pdu.c1
-rw-r--r--fs/io_uring.c291
-rw-r--r--fs/notify/fanotify/fanotify.c14
-rw-r--r--fs/notify/mark.c12
-rw-r--r--fs/proc/proc_sysctl.c6
-rw-r--r--fs/splice.c4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h1
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/xfrm.h20
-rw-r--r--include/uapi/rdma/mlx5-abi.h1
-rw-r--r--kernel/bpf/verifier.c76
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/seccomp.c17
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c35
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/test_vmalloc.c6
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/page_alloc.c27
-rw-r--r--net/appletalk/ddp.c1
-rw-r--r--net/ipv4/esp4.c20
-rw-r--r--net/ipv4/esp4_offload.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/udp_offload.c16
-rw-r--r--net/ipv6/esp6_offload.c8
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c22
-rw-r--r--net/ipv6/route.c70
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_core.c10
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/rds/ib_recv.c8
-rw-r--r--net/rxrpc/call_object.c32
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c35
-rw-r--r--net/tls/tls_device.c39
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/xfrm/xfrm_interface.c17
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c16
-rw-r--r--scripts/selinux/genheaders/genheaders.c1
-rw-r--r--scripts/selinux/mdp/mdp.c1
-rw-r--r--security/selinux/include/classmap.h1
-rw-r--r--tools/bpf/bpftool/map.c3
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c22
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh10
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c34
167 files changed, 1373 insertions, 725 deletions
diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst
index 79beb807996b..4a74cf6f2797 100644
--- a/Documentation/driver-api/usb/power-management.rst
+++ b/Documentation/driver-api/usb/power-management.rst
@@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
370then the interface is considered to be idle, and the kernel may 370then the interface is considered to be idle, and the kernel may
371autosuspend the device. 371autosuspend the device.
372 372
373Drivers need not be concerned about balancing changes to the usage 373Drivers must be careful to balance their overall changes to the usage
374counter; the USB core will undo any remaining "get"s when a driver 374counter. Unbalanced "get"s will remain in effect when a driver is
375is unbound from its interface. As a corollary, drivers must not call 375unbound from its interface, preventing the device from going into
376any of the ``usb_autopm_*`` functions after their ``disconnect`` 376runtime suspend should the interface be bound to a driver again. On
377routine has returned. 377the other hand, drivers are allowed to achieve this balance by calling
378the ``usb_autopm_*`` functions even after their ``disconnect`` routine
379has returned -- say from within a work-queue routine -- provided they
380retain an active reference to the interface (via ``usb_get_intf`` and
381``usb_put_intf``).
378 382
379Drivers using the async routines are responsible for their own 383Drivers using the async routines are responsible for their own
380synchronization and mutual exclusion. 384synchronization and mutual exclusion.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c9538a30ef7e..725b8bea58a7 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1342,6 +1342,7 @@ tag - INTEGER
1342 Default value is 0. 1342 Default value is 0.
1343 1343
1344xfrm4_gc_thresh - INTEGER 1344xfrm4_gc_thresh - INTEGER
1345 (Obsolete since linux-4.14)
1345 The threshold at which we will start garbage collecting for IPv4 1346 The threshold at which we will start garbage collecting for IPv4
1346 destination cache entries. At twice this value the system will 1347 destination cache entries. At twice this value the system will
1347 refuse new allocations. 1348 refuse new allocations.
@@ -1950,6 +1951,7 @@ echo_ignore_anycast - BOOLEAN
1950 Default: 0 1951 Default: 0
1951 1952
1952xfrm6_gc_thresh - INTEGER 1953xfrm6_gc_thresh - INTEGER
1954 (Obsolete since linux-4.14)
1953 The threshold at which we will start garbage collecting for IPv6 1955 The threshold at which we will start garbage collecting for IPv6
1954 destination cache entries. At twice this value the system will 1956 destination cache entries. At twice this value the system will
1955 refuse new allocations. 1957 refuse new allocations.
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 8c7a713cf657..642fa963be3c 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -132,7 +132,7 @@ version that should be applied. If there is any doubt, the maintainer
132will reply and ask what should be done. 132will reply and ask what should be done.
133 133
134Q: I made changes to only a few patches in a patch series should I resend only those changed? 134Q: I made changes to only a few patches in a patch series should I resend only those changed?
135-------------------------------------------------------------------------------------------- 135---------------------------------------------------------------------------------------------
136A: No, please resend the entire patch series and make sure you do number your 136A: No, please resend the entire patch series and make sure you do number your
137patches such that it is clear this is the latest and greatest set of patches 137patches such that it is clear this is the latest and greatest set of patches
138that can be applied. 138that can be applied.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6af24cdb25cc..3f13d8599337 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
866increase the success rate of future high-order allocations such as SLUB 866increase the success rate of future high-order allocations such as SLUB
867allocations, THP and hugetlbfs pages. 867allocations, THP and hugetlbfs pages.
868 868
869To make it sensible with respect to the watermark_scale_factor parameter, 869To make it sensible with respect to the watermark_scale_factor
870the unit is in fractions of 10,000. The default value of 15,000 means 870parameter, the unit is in fractions of 10,000. The default value of
871that up to 150% of the high watermark will be reclaimed in the event of 87115,000 on !DISCONTIGMEM configurations means that up to 150% of the high
872a pageblock being mixed due to fragmentation. The level of reclaim is 872watermark will be reclaimed in the event of a pageblock being mixed due
873determined by the number of fragmentation events that occurred in the 873to fragmentation. The level of reclaim is determined by the number of
874recent past. If this value is smaller than a pageblock then a pageblocks 874fragmentation events that occurred in the recent past. If this value is
875worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor 875smaller than a pageblock then a pageblocks worth of pages will be reclaimed
876of 0 will disable the feature. 876(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
877 877
878============================================================= 878=============================================================
879 879
diff --git a/Makefile b/Makefile
index b6e7ee4f1fc4..e1bb7345cdd1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 1 3PATCHLEVEL = 1
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -679,6 +679,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
679KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) 679KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
680KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) 680KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
681KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) 681KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
682KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
682 683
683ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 684ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
684KBUILD_CFLAGS += -Os 685KBUILD_CFLAGS += -Os
@@ -720,7 +721,6 @@ ifdef CONFIG_CC_IS_CLANG
720KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) 721KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
721KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) 722KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
722KBUILD_CFLAGS += $(call cc-disable-warning, gnu) 723KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
723KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
724# Quiet clang warning: comparison of unsigned expression < 0 is always false 724# Quiet clang warning: comparison of unsigned expression < 0 is always false
725KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) 725KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
726# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the 726# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 69bc1c9e8e50..7425bb0f2d1b 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -18,8 +18,8 @@
18 model = "snps,hsdk"; 18 model = "snps,hsdk";
19 compatible = "snps,hsdk"; 19 compatible = "snps,hsdk";
20 20
21 #address-cells = <1>; 21 #address-cells = <2>;
22 #size-cells = <1>; 22 #size-cells = <2>;
23 23
24 chosen { 24 chosen {
25 bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; 25 bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
@@ -105,7 +105,7 @@
105 #size-cells = <1>; 105 #size-cells = <1>;
106 interrupt-parent = <&idu_intc>; 106 interrupt-parent = <&idu_intc>;
107 107
108 ranges = <0x00000000 0xf0000000 0x10000000>; 108 ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
109 109
110 cgu_rst: reset-controller@8a0 { 110 cgu_rst: reset-controller@8a0 {
111 compatible = "snps,hsdk-reset"; 111 compatible = "snps,hsdk-reset";
@@ -269,9 +269,10 @@
269 }; 269 };
270 270
271 memory@80000000 { 271 memory@80000000 {
272 #address-cells = <1>; 272 #address-cells = <2>;
273 #size-cells = <1>; 273 #size-cells = <2>;
274 device_type = "memory"; 274 device_type = "memory";
275 reg = <0x80000000 0x40000000>; /* 1 GiB */ 275 reg = <0x0 0x80000000 0x0 0x40000000>; /* 1 GB lowmem */
276 /* 0x1 0x00000000 0x0 0x40000000>; 1 GB highmem */
276 }; 277 };
277}; 278};
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index f230bb7092fd..b3373f5c88e0 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -30,10 +30,10 @@
30 30
31#else 31#else
32 32
33.macro PREALLOC_INSTR 33.macro PREALLOC_INSTR reg, off
34.endm 34.endm
35 35
36.macro PREFETCHW_INSTR 36.macro PREFETCHW_INSTR reg, off
37.endm 37.endm
38 38
39#endif 39#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 4135abec3fb0..63e6e6504699 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
113 } 113 }
114 114
115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
116 if (cbcr.c) 116 if (cbcr.c) {
117 ioc_exists = 1; 117 ioc_exists = 1;
118 else 118
119 /*
120 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
121 * simultaneously. This happens because as of today IOC aperture covers
122 * only ZONE_NORMAL (low mem) and any dma transactions outside this
123 * region won't be HW coherent.
124 * If we want to use both IOC and ZONE_HIGHMEM we can use
125 * bounce_buffer to handle dma transactions to HIGHMEM.
126 * Also it is possible to modify dma_direct cache ops or increase IOC
127 * aperture size if we are planning to use HIGHMEM without PAE.
128 */
129 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
130 ioc_enable = 0;
131 } else {
119 ioc_enable = 0; 132 ioc_enable = 0;
133 }
120 134
121 /* HS 2.0 didn't have AUX_VOL */ 135 /* HS 2.0 didn't have AUX_VOL */
122 if (cpuinfo_arc700[cpu].core.family > 0x51) { 136 if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
1158 if (!ioc_enable) 1172 if (!ioc_enable)
1159 return; 1173 return;
1160 1174
1161 /*
1162 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
1163 * simultaneously. This happens because as of today IOC aperture covers
1164 * only ZONE_NORMAL (low mem) and any dma transactions outside this
1165 * region won't be HW coherent.
1166 * If we want to use both IOC and ZONE_HIGHMEM we can use
1167 * bounce_buffer to handle dma transactions to HIGHMEM.
1168 * Also it is possible to modify dma_direct cache ops or increase IOC
1169 * aperture size if we are planning to use HIGHMEM without PAE.
1170 */
1171 if (IS_ENABLED(CONFIG_HIGHMEM))
1172 panic("IOC and HIGHMEM can't be used simultaneously");
1173
1174 /* Flush + invalidate + disable L1 dcache */ 1175 /* Flush + invalidate + disable L1 dcache */
1175 __dc_disable(); 1176 __dc_disable();
1176 1177
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 850b4805e2d1..9aed25a6019b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -73,7 +73,7 @@ config ARM
73 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU 73 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
74 select HAVE_EXIT_THREAD 74 select HAVE_EXIT_THREAD
75 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL 75 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
76 select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL 76 select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
77 select HAVE_FUNCTION_TRACER if !XIP_KERNEL 77 select HAVE_FUNCTION_TRACER if !XIP_KERNEL
78 select HAVE_GCC_PLUGINS 78 select HAVE_GCC_PLUGINS
79 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) 79 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 6d6e0330930b..e388af4594a6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -47,8 +47,8 @@ config DEBUG_WX
47 47
48choice 48choice
49 prompt "Choose kernel unwinder" 49 prompt "Choose kernel unwinder"
50 default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER 50 default UNWINDER_ARM if AEABI
51 default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER 51 default UNWINDER_FRAME_POINTER if !AEABI
52 help 52 help
53 This determines which method will be used for unwinding kernel stack 53 This determines which method will be used for unwinding kernel stack
54 traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, 54 traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
65 65
66config UNWINDER_ARM 66config UNWINDER_ARM
67 bool "ARM EABI stack unwinder" 67 bool "ARM EABI stack unwinder"
68 depends on AEABI 68 depends on AEABI && !FUNCTION_GRAPH_TRACER
69 select ARM_UNWIND 69 select ARM_UNWIND
70 help 70 help
71 This option enables stack unwinding support in the kernel 71 This option enables stack unwinding support in the kernel
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6c7ccb428c07..7135820f76d4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
1438 1438
1439 @ Preserve return value of efi_entry() in r4 1439 @ Preserve return value of efi_entry() in r4
1440 mov r4, r0 1440 mov r4, r0
1441 bl cache_clean_flush 1441
1442 @ our cache maintenance code relies on CP15 barrier instructions
1443 @ but since we arrived here with the MMU and caches configured
1444 @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
1445 @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
1446 @ the enable path will be executed on v7+ only.
1447 mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
1448 tst r1, #(1 << 5) @ CP15BEN bit set?
1449 bne 0f
1450 orr r1, r1, #(1 << 5) @ CP15 barrier instructions
1451 mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
1452 ARM( .inst 0xf57ff06f @ v7+ isb )
1453 THUMB( isb )
1454
14550: bl cache_clean_flush
1442 bl cache_off 1456 bl cache_off
1443 1457
1444 @ Set parameters for booting zImage according to boot protocol 1458 @ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index c08d2d890f7b..b38bbd011b35 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -133,9 +133,9 @@ __secondary_data:
133 */ 133 */
134 .text 134 .text
135__after_proc_init: 135__after_proc_init:
136#ifdef CONFIG_ARM_MPU
137M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB) 136M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
138M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB) 137M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
138#ifdef CONFIG_ARM_MPU
139M_CLASS(ldr r3, [r12, 0x50]) 139M_CLASS(ldr r3, [r12, 0x50])
140AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0 140AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
141 and r3, r3, #(MMFR0_PMSA) @ PMSA field 141 and r3, r3, #(MMFR0_PMSA) @ PMSA field
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 07b298120182..65a51331088e 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -103,10 +103,15 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
103 * to be revisited if support for multiple ftrace entry points 103 * to be revisited if support for multiple ftrace entry points
104 * is added in the future, but for now, the pr_err() below 104 * is added in the future, but for now, the pr_err() below
105 * deals with a theoretical issue only. 105 * deals with a theoretical issue only.
106 *
107 * Note that PLTs are place relative, and plt_entries_equal()
108 * checks whether they point to the same target. Here, we need
109 * to check if the actual opcodes are in fact identical,
110 * regardless of the offset in memory so use memcmp() instead.
106 */ 111 */
107 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 112 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
108 if (!plt_entries_equal(mod->arch.ftrace_trampoline, 113 if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
109 &trampoline)) { 114 sizeof(trampoline))) {
110 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 115 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
111 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 116 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
112 return -EINVAL; 117 return -EINVAL;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 6bc135042f5e..7cae155e81a5 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
363 * Otherwise, this is a no-op 363 * Otherwise, this is a no-op
364 */ 364 */
365 u64 base = phys_initrd_start & PAGE_MASK; 365 u64 base = phys_initrd_start & PAGE_MASK;
366 u64 size = PAGE_ALIGN(phys_initrd_size); 366 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
367 367
368 /* 368 /*
369 * We can only add back the initrd memory if we don't end up 369 * We can only add back the initrd memory if we don't end up
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 0effd3cba9a7..98bf0c222b5f 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -186,8 +186,9 @@ enum which_ebpf_reg {
186 * separate frame pointer, so BPF_REG_10 relative accesses are 186 * separate frame pointer, so BPF_REG_10 relative accesses are
187 * adjusted to be $sp relative. 187 * adjusted to be $sp relative.
188 */ 188 */
189int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn, 189static int ebpf_to_mips_reg(struct jit_ctx *ctx,
190 enum which_ebpf_reg w) 190 const struct bpf_insn *insn,
191 enum which_ebpf_reg w)
191{ 192{
192 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? 193 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
193 insn->src_reg : insn->dst_reg; 194 insn->src_reg : insn->dst_reg;
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 5ba131c30f6b..1bcd468ab422 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
266CONFIG_MSDOS_FS=m 266CONFIG_MSDOS_FS=m
267CONFIG_VFAT_FS=m 267CONFIG_VFAT_FS=m
268CONFIG_PROC_KCORE=y 268CONFIG_PROC_KCORE=y
269CONFIG_HUGETLBFS=y
269# CONFIG_MISC_FILESYSTEMS is not set 270# CONFIG_MISC_FILESYSTEMS is not set
270# CONFIG_NETWORK_FILESYSTEMS is not set 271# CONFIG_NETWORK_FILESYSTEMS is not set
271CONFIG_NLS=y 272CONFIG_NLS=y
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e7a9c4f6bfca..8330f135294f 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
95 unsigned long entries, unsigned long dev_hpa, 95 unsigned long entries, unsigned long dev_hpa,
96 struct mm_iommu_table_group_mem_t **pmem) 96 struct mm_iommu_table_group_mem_t **pmem)
97{ 97{
98 struct mm_iommu_table_group_mem_t *mem; 98 struct mm_iommu_table_group_mem_t *mem, *mem2;
99 long i, ret, locked_entries = 0; 99 long i, ret, locked_entries = 0, pinned = 0;
100 unsigned int pageshift; 100 unsigned int pageshift;
101 101 unsigned long entry, chunk;
102 mutex_lock(&mem_list_mutex);
103
104 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
105 next) {
106 /* Overlap? */
107 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
108 (ua < (mem->ua +
109 (mem->entries << PAGE_SHIFT)))) {
110 ret = -EINVAL;
111 goto unlock_exit;
112 }
113
114 }
115 102
116 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { 103 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
117 ret = mm_iommu_adjust_locked_vm(mm, entries, true); 104 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
118 if (ret) 105 if (ret)
119 goto unlock_exit; 106 return ret;
120 107
121 locked_entries = entries; 108 locked_entries = entries;
122 } 109 }
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
148 } 135 }
149 136
150 down_read(&mm->mmap_sem); 137 down_read(&mm->mmap_sem);
151 ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL); 138 chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
139 sizeof(struct vm_area_struct *);
140 chunk = min(chunk, entries);
141 for (entry = 0; entry < entries; entry += chunk) {
142 unsigned long n = min(entries - entry, chunk);
143
144 ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
145 FOLL_WRITE, mem->hpages + entry, NULL);
146 if (ret == n) {
147 pinned += n;
148 continue;
149 }
150 if (ret > 0)
151 pinned += ret;
152 break;
153 }
152 up_read(&mm->mmap_sem); 154 up_read(&mm->mmap_sem);
153 if (ret != entries) { 155 if (pinned != entries) {
154 /* free the reference taken */ 156 if (!ret)
155 for (i = 0; i < ret; i++) 157 ret = -EFAULT;
156 put_page(mem->hpages[i]); 158 goto free_exit;
157
158 vfree(mem->hpas);
159 kfree(mem);
160 ret = -EFAULT;
161 goto unlock_exit;
162 } 159 }
163 160
164 pageshift = PAGE_SHIFT; 161 pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
183 } 180 }
184 181
185good_exit: 182good_exit:
186 ret = 0;
187 atomic64_set(&mem->mapped, 1); 183 atomic64_set(&mem->mapped, 1);
188 mem->used = 1; 184 mem->used = 1;
189 mem->ua = ua; 185 mem->ua = ua;
190 mem->entries = entries; 186 mem->entries = entries;
191 *pmem = mem;
192 187
193 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); 188 mutex_lock(&mem_list_mutex);
194 189
195unlock_exit: 190 list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
196 if (locked_entries && ret) 191 /* Overlap? */
197 mm_iommu_adjust_locked_vm(mm, locked_entries, false); 192 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
193 (ua < (mem2->ua +
194 (mem2->entries << PAGE_SHIFT)))) {
195 ret = -EINVAL;
196 mutex_unlock(&mem_list_mutex);
197 goto free_exit;
198 }
199 }
200
201 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
198 202
199 mutex_unlock(&mem_list_mutex); 203 mutex_unlock(&mem_list_mutex);
200 204
205 *pmem = mem;
206
207 return 0;
208
209free_exit:
210 /* free the reference taken */
211 for (i = 0; i < pinned; i++)
212 put_page(mem->hpages[i]);
213
214 vfree(mem->hpas);
215 kfree(mem);
216
217unlock_exit:
218 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
219
201 return ret; 220 return ret;
202} 221}
203 222
@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
266long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) 285long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
267{ 286{
268 long ret = 0; 287 long ret = 0;
269 unsigned long entries, dev_hpa; 288 unsigned long unlock_entries = 0;
270 289
271 mutex_lock(&mem_list_mutex); 290 mutex_lock(&mem_list_mutex);
272 291
@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
287 goto unlock_exit; 306 goto unlock_exit;
288 } 307 }
289 308
309 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
310 unlock_entries = mem->entries;
311
290 /* @mapped became 0 so now mappings are disabled, release the region */ 312 /* @mapped became 0 so now mappings are disabled, release the region */
291 entries = mem->entries;
292 dev_hpa = mem->dev_hpa;
293 mm_iommu_release(mem); 313 mm_iommu_release(mem);
294 314
295 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
296 mm_iommu_adjust_locked_vm(mm, entries, false);
297
298unlock_exit: 315unlock_exit:
299 mutex_unlock(&mem_list_mutex); 316 mutex_unlock(&mem_list_mutex);
300 317
318 mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
319
301 return ret; 320 return ret;
302} 321}
303EXPORT_SYMBOL_GPL(mm_iommu_put); 322EXPORT_SYMBOL_GPL(mm_iommu_put);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 842b2c7e156a..50cd09b4e05d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
324 324
325config PPC_RADIX_MMU 325config PPC_RADIX_MMU
326 bool "Radix MMU Support" 326 bool "Radix MMU Support"
327 depends on PPC_BOOK3S_64 327 depends on PPC_BOOK3S_64 && HUGETLB_PAGE
328 select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA 328 select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
329 default y 329 default y
330 help 330 help
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index c0d6c560df69..5a237e8dbf8d 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
352 boot_params->hdr.loadflags &= ~KASLR_FLAG; 352 boot_params->hdr.loadflags &= ~KASLR_FLAG;
353 353
354 /* Save RSDP address for later use. */ 354 /* Save RSDP address for later use. */
355 boot_params->acpi_rsdp_addr = get_rsdp_addr(); 355 /* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
356 356
357 sanitize_boot_params(boot_params); 357 sanitize_boot_params(boot_params);
358 358
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 007b3fe9d727..98c7d12b945c 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
29extern time_t __vdso_time(time_t *t); 29extern time_t __vdso_time(time_t *t);
30 30
31#ifdef CONFIG_PARAVIRT_CLOCK 31#ifdef CONFIG_PARAVIRT_CLOCK
32extern u8 pvclock_page 32extern u8 pvclock_page[PAGE_SIZE]
33 __attribute__((visibility("hidden"))); 33 __attribute__((visibility("hidden")));
34#endif 34#endif
35 35
36#ifdef CONFIG_HYPERV_TSCPAGE 36#ifdef CONFIG_HYPERV_TSCPAGE
37extern u8 hvclock_page 37extern u8 hvclock_page[PAGE_SIZE]
38 __attribute__((visibility("hidden"))); 38 __attribute__((visibility("hidden")));
39#endif 39#endif
40 40
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 94a4b7fc75d0..d41de9af7a39 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -76,15 +76,15 @@
76 * Scope: Package (physical package) 76 * Scope: Package (physical package)
77 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. 77 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
78 * perf code: 0x04 78 * perf code: 0x04
79 * Available model: HSW ULT,CNL 79 * Available model: HSW ULT,KBL,CNL
80 * Scope: Package (physical package) 80 * Scope: Package (physical package)
81 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. 81 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
82 * perf code: 0x05 82 * perf code: 0x05
83 * Available model: HSW ULT,CNL 83 * Available model: HSW ULT,KBL,CNL
84 * Scope: Package (physical package) 84 * Scope: Package (physical package)
85 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. 85 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
86 * perf code: 0x06 86 * perf code: 0x06
87 * Available model: HSW ULT,GLM,CNL 87 * Available model: HSW ULT,KBL,GLM,CNL
88 * Scope: Package (physical package) 88 * Scope: Package (physical package)
89 * 89 *
90 */ 90 */
@@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
566 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), 566 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
567 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), 567 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
568 568
569 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), 569 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
570 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), 570 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
571 571
572 X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), 572 X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
573 573
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 2779ace16d23..50b3e2d963c9 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -46,7 +46,7 @@ void ptdump_walk_user_pgd_level_checkwx(void);
46 */ 46 */
47extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 47extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
48 __visible; 48 __visible;
49#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 49#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
50 50
51extern spinlock_t pgd_lock; 51extern spinlock_t pgd_lock;
52extern struct list_head pgd_list; 52extern struct list_head pgd_list;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f905a2371080..8dacdb96899e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -5,6 +5,7 @@
5#include <linux/memblock.h> 5#include <linux/memblock.h>
6#include <linux/swapfile.h> 6#include <linux/swapfile.h>
7#include <linux/swapops.h> 7#include <linux/swapops.h>
8#include <linux/kmemleak.h>
8 9
9#include <asm/set_memory.h> 10#include <asm/set_memory.h>
10#include <asm/e820/api.h> 11#include <asm/e820/api.h>
@@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
766 if (debug_pagealloc_enabled()) { 767 if (debug_pagealloc_enabled()) {
767 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", 768 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
768 begin, end - 1); 769 begin, end - 1);
770 /*
771 * Inform kmemleak about the hole in the memory since the
772 * corresponding pages will be unmapped.
773 */
774 kmemleak_free_part((void *)begin, end - begin);
769 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 775 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
770 } else { 776 } else {
771 /* 777 /*
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 0430ccd08728..08a0e458bc3e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
212{ 212{
213 struct skcipher_request *req = areq->data; 213 struct skcipher_request *req = areq->data;
214 214
215 if (!err) 215 if (!err) {
216 struct rctx *rctx = skcipher_request_ctx(req);
217
218 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
216 err = xor_tweak_post(req); 219 err = xor_tweak_post(req);
220 }
217 221
218 skcipher_request_complete(req, err); 222 skcipher_request_complete(req, err);
219} 223}
diff --git a/crypto/xts.c b/crypto/xts.c
index 847f54f76789..2f948328cabb 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
137{ 137{
138 struct skcipher_request *req = areq->data; 138 struct skcipher_request *req = areq->data;
139 139
140 if (!err) 140 if (!err) {
141 struct rctx *rctx = skcipher_request_ctx(req);
142
143 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
141 err = xor_tweak_post(req); 144 err = xor_tweak_post(req);
145 }
142 146
143 skcipher_request_complete(req, err); 147 skcipher_request_complete(req, err);
144} 148}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 5e9d7348c16f..62d3aa74277b 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
81 81
82 ACPI_FUNCTION_TRACE(ev_enable_gpe); 82 ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 83
84 /* Clear the GPE status */
85 status = acpi_hw_clear_gpe(gpe_event_info);
86 if (ACPI_FAILURE(status))
87 return_ACPI_STATUS(status);
88
89 /* Enable the requested GPE */ 84 /* Enable the requested GPE */
85
90 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 86 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
91 return_ACPI_STATUS(status); 87 return_ACPI_STATUS(status);
92} 88}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 399cad7daae7..d58a359a6622 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -774,18 +774,18 @@ struct zram_work {
774 struct zram *zram; 774 struct zram *zram;
775 unsigned long entry; 775 unsigned long entry;
776 struct bio *bio; 776 struct bio *bio;
777 struct bio_vec bvec;
777}; 778};
778 779
779#if PAGE_SIZE != 4096 780#if PAGE_SIZE != 4096
780static void zram_sync_read(struct work_struct *work) 781static void zram_sync_read(struct work_struct *work)
781{ 782{
782 struct bio_vec bvec;
783 struct zram_work *zw = container_of(work, struct zram_work, work); 783 struct zram_work *zw = container_of(work, struct zram_work, work);
784 struct zram *zram = zw->zram; 784 struct zram *zram = zw->zram;
785 unsigned long entry = zw->entry; 785 unsigned long entry = zw->entry;
786 struct bio *bio = zw->bio; 786 struct bio *bio = zw->bio;
787 787
788 read_from_bdev_async(zram, &bvec, entry, bio); 788 read_from_bdev_async(zram, &zw->bvec, entry, bio);
789} 789}
790 790
791/* 791/*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
798{ 798{
799 struct zram_work work; 799 struct zram_work work;
800 800
801 work.bvec = *bvec;
801 work.zram = zram; 802 work.zram = zram;
802 work.entry = entry; 803 work.entry = entry;
803 work.bio = bio; 804 work.bio = bio;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index ec8a291d62ba..54093ffd0aef 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
671 d = bcm2835_dma_create_cb_chain(chan, direction, false, 671 d = bcm2835_dma_create_cb_chain(chan, direction, false,
672 info, extra, 672 info, extra,
673 frames, src, dst, 0, 0, 673 frames, src, dst, 0, 0,
674 GFP_KERNEL); 674 GFP_NOWAIT);
675 if (!d) 675 if (!d)
676 return NULL; 676 return NULL;
677 677
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 131f3974740d..814853842e29 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
253#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 253#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
254 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); 254 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
255#else 255#else
256 mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); 256 mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
257#endif 257#endif
258 258
259 /* setup the length */ 259 /* setup the length */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b4f25698169..e2a5398f89b5 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1282 enum dma_status status; 1282 enum dma_status status;
1283 unsigned int residue = 0; 1283 unsigned int residue = 0;
1284 unsigned int dptr = 0; 1284 unsigned int dptr = 0;
1285 unsigned int chcrb;
1286 unsigned int tcrb;
1287 unsigned int i;
1285 1288
1286 if (!desc) 1289 if (!desc)
1287 return 0; 1290 return 0;
@@ -1330,14 +1333,31 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1330 } 1333 }
1331 1334
1332 /* 1335 /*
1336 * We need to read two registers.
1337 * Make sure the control register does not skip to next chunk
1338 * while reading the counter.
1339 * Trying it 3 times should be enough: Initial read, retry, retry
1340 * for the paranoid.
1341 */
1342 for (i = 0; i < 3; i++) {
1343 chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1344 RCAR_DMACHCRB_DPTR_MASK;
1345 tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
1346 /* Still the same? */
1347 if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1348 RCAR_DMACHCRB_DPTR_MASK))
1349 break;
1350 }
1351 WARN_ONCE(i >= 3, "residue might be not continuous!");
1352
1353 /*
1333 * In descriptor mode the descriptor running pointer is not maintained 1354 * In descriptor mode the descriptor running pointer is not maintained
1334 * by the interrupt handler, find the running descriptor from the 1355 * by the interrupt handler, find the running descriptor from the
1335 * descriptor pointer field in the CHCRB register. In non-descriptor 1356 * descriptor pointer field in the CHCRB register. In non-descriptor
1336 * mode just use the running descriptor pointer. 1357 * mode just use the running descriptor pointer.
1337 */ 1358 */
1338 if (desc->hwdescs.use) { 1359 if (desc->hwdescs.use) {
1339 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1360 dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
1340 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1341 if (dptr == 0) 1361 if (dptr == 0)
1342 dptr = desc->nchunks; 1362 dptr = desc->nchunks;
1343 dptr--; 1363 dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1355 } 1375 }
1356 1376
1357 /* Add the residue for the current chunk. */ 1377 /* Add the residue for the current chunk. */
1358 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; 1378 residue += tcrb << desc->xfer_shift;
1359 1379
1360 return residue; 1380 return residue;
1361} 1381}
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1368 enum dma_status status; 1388 enum dma_status status;
1369 unsigned long flags; 1389 unsigned long flags;
1370 unsigned int residue; 1390 unsigned int residue;
1391 bool cyclic;
1371 1392
1372 status = dma_cookie_status(chan, cookie, txstate); 1393 status = dma_cookie_status(chan, cookie, txstate);
1373 if (status == DMA_COMPLETE || !txstate) 1394 if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1375 1396
1376 spin_lock_irqsave(&rchan->lock, flags); 1397 spin_lock_irqsave(&rchan->lock, flags);
1377 residue = rcar_dmac_chan_get_residue(rchan, cookie); 1398 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1399 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
1378 spin_unlock_irqrestore(&rchan->lock, flags); 1400 spin_unlock_irqrestore(&rchan->lock, flags);
1379 1401
1380 /* if there's no residue, the cookie is complete */ 1402 /* if there's no residue, the cookie is complete */
1381 if (!residue) 1403 if (!residue && !cyclic)
1382 return DMA_COMPLETE; 1404 return DMA_COMPLETE;
1383 1405
1384 dma_set_residue(txstate, residue); 1406 dma_set_residue(txstate, residue);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f0223cee9774..77092268ee95 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
414 irq_set_handler_locked(data, handle_edge_irq); 414 irq_set_handler_locked(data, handle_edge_irq);
415 break; 415 break;
416 case IRQ_TYPE_EDGE_BOTH: 416 case IRQ_TYPE_EDGE_BOTH:
417 sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
417 sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1); 418 sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
418 irq_set_handler_locked(data, handle_edge_irq); 419 irq_set_handler_locked(data, handle_edge_irq);
419 break; 420 break;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0495bf1d480a..bca3e7740ef6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1379 1379
1380 status = gpiochip_add_irqchip(chip, lock_key, request_key); 1380 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1381 if (status) 1381 if (status)
1382 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1383 1383
1384 status = of_gpiochip_add(chip); 1384 status = of_gpiochip_add(chip);
1385 if (status) 1385 if (status)
@@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1387 1387
1388 status = gpiochip_init_valid_mask(chip); 1388 status = gpiochip_init_valid_mask(chip);
1389 if (status) 1389 if (status)
1390 goto err_remove_chip; 1390 goto err_remove_of_chip;
1391 1391
1392 for (i = 0; i < chip->ngpio; i++) { 1392 for (i = 0; i < chip->ngpio; i++) {
1393 struct gpio_desc *desc = &gdev->descs[i]; 1393 struct gpio_desc *desc = &gdev->descs[i];
@@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1415 if (gpiolib_initialized) { 1415 if (gpiolib_initialized) {
1416 status = gpiochip_setup_dev(gdev); 1416 status = gpiochip_setup_dev(gdev);
1417 if (status) 1417 if (status)
1418 goto err_remove_chip; 1418 goto err_remove_acpi_chip;
1419 } 1419 }
1420 return 0; 1420 return 0;
1421 1421
1422err_remove_chip: 1422err_remove_acpi_chip:
1423 acpi_gpiochip_remove(chip); 1423 acpi_gpiochip_remove(chip);
1424err_remove_of_chip:
1424 gpiochip_free_hogs(chip); 1425 gpiochip_free_hogs(chip);
1425 of_gpiochip_remove(chip); 1426 of_gpiochip_remove(chip);
1427err_remove_chip:
1428 gpiochip_irqchip_remove(chip);
1429err_free_gpiochip_mask:
1426 gpiochip_free_valid_mask(chip); 1430 gpiochip_free_valid_mask(chip);
1427err_remove_irqchip_mask: 1431err_remove_irqchip_mask:
1428 gpiochip_irqchip_free_valid_mask(chip); 1432 gpiochip_irqchip_free_valid_mask(chip);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index db761329a1e3..ab7968c8f6a2 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
1046 if (hdmi->version < 0x200a) 1046 if (hdmi->version < 0x200a)
1047 return false; 1047 return false;
1048 1048
1049 /* Disable if no DDC bus */
1050 if (!hdmi->ddc)
1051 return false;
1052
1049 /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ 1053 /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
1050 if (!display->hdmi.scdc.supported || 1054 if (!display->hdmi.scdc.supported ||
1051 !display->hdmi.scdc.scrambling.supported) 1055 !display->hdmi.scdc.scrambling.supported)
@@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1684 * Source Devices compliant shall set the 1688 * Source Devices compliant shall set the
1685 * Source Version = 1. 1689 * Source Version = 1.
1686 */ 1690 */
1687 drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION, 1691 drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
1688 &bytes); 1692 &bytes);
1689 drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION, 1693 drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
1690 min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); 1694 min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
1691 1695
1692 /* Enabled Scrambling in the Sink */ 1696 /* Enabled Scrambling in the Sink */
1693 drm_scdc_set_scrambling(&hdmi->i2c->adap, 1); 1697 drm_scdc_set_scrambling(hdmi->ddc, 1);
1694 1698
1695 /* 1699 /*
1696 * To activate the scrambler feature, you must ensure 1700 * To activate the scrambler feature, you must ensure
@@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
1706 hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); 1710 hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
1707 hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, 1711 hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
1708 HDMI_MC_SWRSTZ); 1712 HDMI_MC_SWRSTZ);
1709 drm_scdc_set_scrambling(&hdmi->i2c->adap, 0); 1713 drm_scdc_set_scrambling(hdmi->ddc, 0);
1710 } 1714 }
1711 } 1715 }
1712 1716
@@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
1800 * iteration for others. 1804 * iteration for others.
1801 * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing 1805 * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
1802 * the workaround with a single iteration. 1806 * the workaround with a single iteration.
1807 * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
1808 * been identified as needing the workaround with a single iteration.
1803 */ 1809 */
1804 1810
1805 switch (hdmi->version) { 1811 switch (hdmi->version) {
@@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
1808 break; 1814 break;
1809 case 0x131a: 1815 case 0x131a:
1810 case 0x132a: 1816 case 0x132a:
1817 case 0x200a:
1811 case 0x201a: 1818 case 0x201a:
1819 case 0x211a:
1812 case 0x212a: 1820 case 0x212a:
1813 count = 1; 1821 count = 1;
1814 break; 1822 break;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ab4e60dfd6a3..98cea1f4b3bf 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
3862 ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); 3862 ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
3863 else 3863 else
3864 ret = intel_dp_compute_config(encoder, pipe_config, conn_state); 3864 ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
3865 if (ret)
3866 return ret;
3865 3867
3866 if (IS_GEN9_LP(dev_priv) && ret) 3868 if (IS_GEN9_LP(dev_priv))
3867 pipe_config->lane_lat_optim_mask = 3869 pipe_config->lane_lat_optim_mask =
3868 bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); 3870 bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
3869 3871
3870 intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); 3872 intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
3871 3873
3872 return ret; 3874 return 0;
3873 3875
3874} 3876}
3875 3877
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8891f29a8c7f..48da4a969a0a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1886 int pipe_bpp; 1886 int pipe_bpp;
1887 int ret; 1887 int ret;
1888 1888
1889 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1890 intel_dp_supports_fec(intel_dp, pipe_config);
1891
1889 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1892 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1890 return -EINVAL; 1893 return -EINVAL;
1891 1894
@@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2116 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2119 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2117 return -EINVAL; 2120 return -EINVAL;
2118 2121
2119 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2120 intel_dp_supports_fec(intel_dp, pipe_config);
2121
2122 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2122 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2123 if (ret < 0) 2123 if (ret < 0)
2124 return ret; 2124 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index ec3602ebbc1c..54011df8c2e8 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
71 if (disable_partial) 71 if (disable_partial)
72 ipu_plane_disable(ipu_crtc->plane[1], true); 72 ipu_plane_disable(ipu_crtc->plane[1], true);
73 if (disable_full) 73 if (disable_full)
74 ipu_plane_disable(ipu_crtc->plane[0], false); 74 ipu_plane_disable(ipu_crtc->plane[0], true);
75} 75}
76 76
77static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, 77static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 19fc601c9eeb..a1bec2779e76 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
366EXPORT_SYMBOL(drm_sched_increase_karma); 366EXPORT_SYMBOL(drm_sched_increase_karma);
367 367
368/** 368/**
369 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job 369 * drm_sched_stop - stop the scheduler
370 * 370 *
371 * @sched: scheduler instance 371 * @sched: scheduler instance
372 * @bad: bad scheduler job
373 * 372 *
374 */ 373 */
375void drm_sched_stop(struct drm_gpu_scheduler *sched) 374void drm_sched_stop(struct drm_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 3ebd9f5e2719..29258b404e54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -16,6 +16,7 @@
16#include <linux/of_reserved_mem.h> 16#include <linux/of_reserved_mem.h>
17 17
18#include <drm/drmP.h> 18#include <drm/drmP.h>
19#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fb_cma_helper.h> 20#include <drm/drm_fb_cma_helper.h>
20#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
21#include <drm/drm_gem_cma_helper.h> 22#include <drm/drm_gem_cma_helper.h>
@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
85 ret = -ENOMEM; 86 ret = -ENOMEM;
86 goto free_drm; 87 goto free_drm;
87 } 88 }
89
90 dev_set_drvdata(dev, drm);
88 drm->dev_private = drv; 91 drm->dev_private = drv;
89 INIT_LIST_HEAD(&drv->frontend_list); 92 INIT_LIST_HEAD(&drv->frontend_list);
90 INIT_LIST_HEAD(&drv->engine_list); 93 INIT_LIST_HEAD(&drv->engine_list);
@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
144 147
145 drm_dev_unregister(drm); 148 drm_dev_unregister(drm);
146 drm_kms_helper_poll_fini(drm); 149 drm_kms_helper_poll_fini(drm);
150 drm_atomic_helper_shutdown(drm);
147 drm_mode_config_cleanup(drm); 151 drm_mode_config_cleanup(drm);
152
153 component_unbind_all(dev, NULL);
148 of_reserved_mem_device_release(dev); 154 of_reserved_mem_device_release(dev);
155
149 drm_dev_put(drm); 156 drm_dev_put(drm);
150} 157}
151 158
@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
395 402
396static int sun4i_drv_remove(struct platform_device *pdev) 403static int sun4i_drv_remove(struct platform_device *pdev)
397{ 404{
405 component_master_del(&pdev->dev, &sun4i_drv_master_ops);
406
398 return 0; 407 return 0;
399} 408}
400 409
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0fa5034b9f9e..1a01669b159a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
49 * ttm_global_mutex - protecting the global BO state 49 * ttm_global_mutex - protecting the global BO state
50 */ 50 */
51DEFINE_MUTEX(ttm_global_mutex); 51DEFINE_MUTEX(ttm_global_mutex);
52struct ttm_bo_global ttm_bo_glob = { 52unsigned ttm_bo_glob_use_count;
53 .use_count = 0 53struct ttm_bo_global ttm_bo_glob;
54};
55 54
56static struct attribute ttm_bo_count = { 55static struct attribute ttm_bo_count = {
57 .name = "bo_count", 56 .name = "bo_count",
@@ -1531,12 +1530,13 @@ static void ttm_bo_global_release(void)
1531 struct ttm_bo_global *glob = &ttm_bo_glob; 1530 struct ttm_bo_global *glob = &ttm_bo_glob;
1532 1531
1533 mutex_lock(&ttm_global_mutex); 1532 mutex_lock(&ttm_global_mutex);
1534 if (--glob->use_count > 0) 1533 if (--ttm_bo_glob_use_count > 0)
1535 goto out; 1534 goto out;
1536 1535
1537 kobject_del(&glob->kobj); 1536 kobject_del(&glob->kobj);
1538 kobject_put(&glob->kobj); 1537 kobject_put(&glob->kobj);
1539 ttm_mem_global_release(&ttm_mem_glob); 1538 ttm_mem_global_release(&ttm_mem_glob);
1539 memset(glob, 0, sizeof(*glob));
1540out: 1540out:
1541 mutex_unlock(&ttm_global_mutex); 1541 mutex_unlock(&ttm_global_mutex);
1542} 1542}
@@ -1548,7 +1548,7 @@ static int ttm_bo_global_init(void)
1548 unsigned i; 1548 unsigned i;
1549 1549
1550 mutex_lock(&ttm_global_mutex); 1550 mutex_lock(&ttm_global_mutex);
1551 if (++glob->use_count > 1) 1551 if (++ttm_bo_glob_use_count > 1)
1552 goto out; 1552 goto out;
1553 1553
1554 ret = ttm_mem_global_init(&ttm_mem_glob); 1554 ret = ttm_mem_global_init(&ttm_mem_glob);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index f1567c353b54..9a0909decb36 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -461,8 +461,8 @@ out_no_zone:
461 461
462void ttm_mem_global_release(struct ttm_mem_global *glob) 462void ttm_mem_global_release(struct ttm_mem_global *glob)
463{ 463{
464 unsigned int i;
465 struct ttm_mem_zone *zone; 464 struct ttm_mem_zone *zone;
465 unsigned int i;
466 466
467 /* let the page allocator first stop the shrink work. */ 467 /* let the page allocator first stop the shrink work. */
468 ttm_page_alloc_fini(); 468 ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
475 zone = glob->zones[i]; 475 zone = glob->zones[i];
476 kobject_del(&zone->kobj); 476 kobject_del(&zone->kobj);
477 kobject_put(&zone->kobj); 477 kobject_put(&zone->kobj);
478 } 478 }
479 kobject_del(&glob->kobj); 479 kobject_del(&glob->kobj);
480 kobject_put(&glob->kobj); 480 kobject_put(&glob->kobj);
481 memset(glob, 0, sizeof(*glob));
481} 482}
482 483
483static void ttm_check_swapping(struct ttm_mem_global *glob) 484static void ttm_check_swapping(struct ttm_mem_global *glob)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 730008d3da76..1baa10e94484 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -1042,7 +1042,7 @@ static void
1042vc4_crtc_reset(struct drm_crtc *crtc) 1042vc4_crtc_reset(struct drm_crtc *crtc)
1043{ 1043{
1044 if (crtc->state) 1044 if (crtc->state)
1045 __drm_atomic_helper_crtc_destroy_state(crtc->state); 1045 vc4_crtc_destroy_state(crtc, crtc->state);
1046 1046
1047 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); 1047 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
1048 if (crtc->state) 1048 if (crtc->state)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6165fe2c4504..1bfa353d995c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -546,29 +546,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
546} 546}
547 547
548/** 548/**
549 * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
550 * taking place.
551 * @dev: Pointer to the struct drm_device.
552 *
553 * Return: true if iommu present, false otherwise.
554 */
555static bool vmw_assume_iommu(struct drm_device *dev)
556{
557 const struct dma_map_ops *ops = get_dma_ops(dev->dev);
558
559 return !dma_is_direct(ops) && ops &&
560 ops->map_page != dma_direct_map_page;
561}
562
563/**
564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
565 * system. 550 * system.
566 * 551 *
567 * @dev_priv: Pointer to a struct vmw_private 552 * @dev_priv: Pointer to a struct vmw_private
568 * 553 *
569 * This functions tries to determine the IOMMU setup and what actions 554 * This functions tries to determine what actions need to be taken by the
570 * need to be taken by the driver to make system pages visible to the 555 * driver to make system pages visible to the device.
571 * device.
572 * If this function decides that DMA is not possible, it returns -EINVAL. 556 * If this function decides that DMA is not possible, it returns -EINVAL.
573 * The driver may then try to disable features of the device that require 557 * The driver may then try to disable features of the device that require
574 * DMA. 558 * DMA.
@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
578 static const char *names[vmw_dma_map_max] = { 562 static const char *names[vmw_dma_map_max] = {
579 [vmw_dma_phys] = "Using physical TTM page addresses.", 563 [vmw_dma_phys] = "Using physical TTM page addresses.",
580 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 564 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
581 [vmw_dma_map_populate] = "Keeping DMA mappings.", 565 [vmw_dma_map_populate] = "Caching DMA mappings.",
582 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 566 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
583 567
584 if (vmw_force_coherent) 568 if (vmw_force_coherent)
585 dev_priv->map_mode = vmw_dma_alloc_coherent; 569 dev_priv->map_mode = vmw_dma_alloc_coherent;
586 else if (vmw_assume_iommu(dev_priv->dev)) 570 else if (vmw_restrict_iommu)
587 dev_priv->map_mode = vmw_dma_map_populate; 571 dev_priv->map_mode = vmw_dma_map_bind;
588 else if (!vmw_force_iommu)
589 dev_priv->map_mode = vmw_dma_phys;
590 else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
591 dev_priv->map_mode = vmw_dma_alloc_coherent;
592 else 572 else
593 dev_priv->map_mode = vmw_dma_map_populate; 573 dev_priv->map_mode = vmw_dma_map_populate;
594 574
595 if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
596 dev_priv->map_mode = vmw_dma_map_bind;
597
598 /* No TTM coherent page pool? FIXME: Ask TTM instead! */ 575 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
599 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && 576 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
600 (dev_priv->map_mode == vmw_dma_alloc_coherent)) 577 (dev_priv->map_mode == vmw_dma_alloc_coherent))
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 9b2b3fa479c4..5e44ff1f2085 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
195 ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, 195 ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
196 DP_COM_CONF_CSC_DEF_BOTH); 196 DP_COM_CONF_CSC_DEF_BOTH);
197 } else { 197 } else {
198 if (flow->foreground.in_cs == flow->out_cs) 198 if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
199 flow->foreground.in_cs == flow->out_cs)
199 /* 200 /*
200 * foreground identical to output, apply color 201 * foreground identical to output, apply color
201 * conversion on background 202 * conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
261 struct ipu_dp_priv *priv = flow->priv; 262 struct ipu_dp_priv *priv = flow->priv;
262 u32 reg, csc; 263 u32 reg, csc;
263 264
265 dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
266
264 if (!dp->foreground) 267 if (!dp->foreground)
265 return; 268 return;
266 269
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
268 271
269 reg = readl(flow->base + DP_COM_CONF); 272 reg = readl(flow->base + DP_COM_CONF);
270 csc = reg & DP_COM_CONF_CSC_DEF_MASK; 273 csc = reg & DP_COM_CONF_CSC_DEF_MASK;
271 if (csc == DP_COM_CONF_CSC_DEF_FG) 274 reg &= ~DP_COM_CONF_CSC_DEF_MASK;
272 reg &= ~DP_COM_CONF_CSC_DEF_MASK; 275 if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
276 reg |= DP_COM_CONF_CSC_DEF_BG;
273 277
274 reg &= ~DP_COM_CONF_FG_EN; 278 reg &= ~DP_COM_CONF_FG_EN;
275 writel(reg, flow->base + DP_COM_CONF); 279 writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
347 mutex_init(&priv->mutex); 351 mutex_init(&priv->mutex);
348 352
349 for (i = 0; i < IPUV3_NUM_FLOWS; i++) { 353 for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
354 priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
355 priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
350 priv->flow[i].foreground.foreground = true; 356 priv->flow[i].foreground.foreground = true;
351 priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; 357 priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
352 priv->flow[i].priv = priv; 358 priv->flow[i].priv = priv;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 38af18645133..c480ca385ffb 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
185int i2c_generic_scl_recovery(struct i2c_adapter *adap) 185int i2c_generic_scl_recovery(struct i2c_adapter *adap)
186{ 186{
187 struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; 187 struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
188 int i = 0, scl = 1, ret; 188 int i = 0, scl = 1, ret = 0;
189 189
190 if (bri->prepare_recovery) 190 if (bri->prepare_recovery)
191 bri->prepare_recovery(adap); 191 bri->prepare_recovery(adap);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index ea0bc6885517..32cc8fe7902f 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -160,6 +160,7 @@ struct ib_uverbs_file {
160 160
161 struct mutex umap_lock; 161 struct mutex umap_lock;
162 struct list_head umaps; 162 struct list_head umaps;
163 struct page *disassociate_page;
163 164
164 struct idr idr; 165 struct idr idr;
165 /* spinlock protects write access to idr */ 166 /* spinlock protects write access to idr */
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index f2e7ffe6fc54..c489f545baae 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
208 kref_put(&file->async_file->ref, 208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file); 209 ib_uverbs_release_async_event_file);
210 put_device(&file->device->dev); 210 put_device(&file->device->dev);
211
212 if (file->disassociate_page)
213 __free_pages(file->disassociate_page, 0);
211 kfree(file); 214 kfree(file);
212} 215}
213 216
@@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
877 kfree(priv); 880 kfree(priv);
878} 881}
879 882
883/*
884 * Once the zap_vma_ptes has been called touches to the VMA will come here and
885 * we return a dummy writable zero page for all the pfns.
886 */
887static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
888{
889 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
890 struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
891 vm_fault_t ret = 0;
892
893 if (!priv)
894 return VM_FAULT_SIGBUS;
895
896 /* Read only pages can just use the system zero page. */
897 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
898 vmf->page = ZERO_PAGE(vmf->address);
899 get_page(vmf->page);
900 return 0;
901 }
902
903 mutex_lock(&ufile->umap_lock);
904 if (!ufile->disassociate_page)
905 ufile->disassociate_page =
906 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
907
908 if (ufile->disassociate_page) {
909 /*
910 * This VMA is forced to always be shared so this doesn't have
911 * to worry about COW.
912 */
913 vmf->page = ufile->disassociate_page;
914 get_page(vmf->page);
915 } else {
916 ret = VM_FAULT_SIGBUS;
917 }
918 mutex_unlock(&ufile->umap_lock);
919
920 return ret;
921}
922
880static const struct vm_operations_struct rdma_umap_ops = { 923static const struct vm_operations_struct rdma_umap_ops = {
881 .open = rdma_umap_open, 924 .open = rdma_umap_open,
882 .close = rdma_umap_close, 925 .close = rdma_umap_close,
926 .fault = rdma_umap_fault,
883}; 927};
884 928
885static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, 929static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
889 struct ib_uverbs_file *ufile = ucontext->ufile; 933 struct ib_uverbs_file *ufile = ucontext->ufile;
890 struct rdma_umap_priv *priv; 934 struct rdma_umap_priv *priv;
891 935
936 if (!(vma->vm_flags & VM_SHARED))
937 return ERR_PTR(-EINVAL);
938
892 if (vma->vm_end - vma->vm_start != size) 939 if (vma->vm_end - vma->vm_start != size)
893 return ERR_PTR(-EINVAL); 940 return ERR_PTR(-EINVAL);
894 941
@@ -992,7 +1039,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
992 * at a time to get the lock ordering right. Typically there 1039 * at a time to get the lock ordering right. Typically there
993 * will only be one mm, so no big deal. 1040 * will only be one mm, so no big deal.
994 */ 1041 */
995 down_write(&mm->mmap_sem); 1042 down_read(&mm->mmap_sem);
996 if (!mmget_still_valid(mm)) 1043 if (!mmget_still_valid(mm))
997 goto skip_mm; 1044 goto skip_mm;
998 mutex_lock(&ufile->umap_lock); 1045 mutex_lock(&ufile->umap_lock);
@@ -1006,11 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
1006 1053
1007 zap_vma_ptes(vma, vma->vm_start, 1054 zap_vma_ptes(vma, vma->vm_start,
1008 vma->vm_end - vma->vm_start); 1055 vma->vm_end - vma->vm_start);
1009 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1010 } 1056 }
1011 mutex_unlock(&ufile->umap_lock); 1057 mutex_unlock(&ufile->umap_lock);
1012 skip_mm: 1058 skip_mm:
1013 up_write(&mm->mmap_sem); 1059 up_read(&mm->mmap_sem);
1014 mmput(mm); 1060 mmput(mm);
1015 } 1061 }
1016} 1062}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 66cdf625534f..60cf9f03e941 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -533,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
533 533
534static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 534static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
535{ 535{
536 if (attr->qp_type == IB_QPT_XRC_TGT) 536 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
537 return 0; 537 return 0;
538 538
539 return 1; 539 return 1;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 0845e95d2d11..347e3cac254e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1119,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1119 if (MLX5_CAP_GEN(mdev, qp_packet_based)) 1119 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1120 resp.flags |= 1120 resp.flags |=
1121 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; 1121 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1122
1123 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1122 } 1124 }
1123 1125
1124 if (field_avail(typeof(resp), sw_parsing_caps, 1126 if (field_avail(typeof(resp), sw_parsing_caps,
@@ -2066,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2066 2068
2067 if (vma->vm_flags & VM_WRITE) 2069 if (vma->vm_flags & VM_WRITE)
2068 return -EPERM; 2070 return -EPERM;
2071 vma->vm_flags &= ~VM_MAYWRITE;
2069 2072
2070 if (!dev->mdev->clock_info_page) 2073 if (!dev->mdev->clock_info_page)
2071 return -EOPNOTSUPP; 2074 return -EOPNOTSUPP;
@@ -2231,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
2231 2234
2232 if (vma->vm_flags & VM_WRITE) 2235 if (vma->vm_flags & VM_WRITE)
2233 return -EPERM; 2236 return -EPERM;
2237 vma->vm_flags &= ~VM_MAYWRITE;
2234 2238
2235 /* Don't expose to user-space information it shouldn't have */ 2239 /* Don't expose to user-space information it shouldn't have */
2236 if (PAGE_SIZE > 4096) 2240 if (PAGE_SIZE > 4096)
2237 return -EOPNOTSUPP; 2241 return -EOPNOTSUPP;
2238 2242
2239 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2240 pfn = (dev->mdev->iseg_base + 2243 pfn = (dev->mdev->iseg_base +
2241 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 2244 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2242 PAGE_SHIFT; 2245 PAGE_SHIFT;
2243 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 2246 return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2244 PAGE_SIZE, vma->vm_page_prot)) 2247 PAGE_SIZE,
2245 return -EAGAIN; 2248 pgprot_noncached(vma->vm_page_prot));
2246 break;
2247 case MLX5_IB_MMAP_CLOCK_INFO: 2249 case MLX5_IB_MMAP_CLOCK_INFO:
2248 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2250 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2249 2251
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ef7d69269a88..fc67d78ca959 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
1818 1818
1819 rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq); 1819 rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
1820 1820
1821 if (rcqe_sz == 128) { 1821 if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
1822 MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 1822 if (rcqe_sz == 128)
1823 MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
1824
1823 return; 1825 return;
1824 } 1826 }
1825 1827
1826 if (init_attr->qp_type != MLX5_IB_QPT_DCT) 1828 MLX5_SET(qpc, qpc, cs_res,
1827 MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); 1829 rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
1830 MLX5_RES_SCAT_DATA32_CQE);
1828} 1831}
1829 1832
1830static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, 1833static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 728795043496..0bb6e39dd03a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
608 if (unlikely(mapped_segs == mr->mr.max_segs)) 608 if (unlikely(mapped_segs == mr->mr.max_segs))
609 return -ENOMEM; 609 return -ENOMEM;
610 610
611 if (mr->mr.length == 0) {
612 mr->mr.user_base = addr;
613 mr->mr.iova = addr;
614 }
615
616 m = mapped_segs / RVT_SEGSZ; 611 m = mapped_segs / RVT_SEGSZ;
617 n = mapped_segs % RVT_SEGSZ; 612 n = mapped_segs % RVT_SEGSZ;
618 mr->mr.map[m]->segs[n].vaddr = (void *)addr; 613 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
630 * @sg_nents: number of entries in sg 625 * @sg_nents: number of entries in sg
631 * @sg_offset: offset in bytes into sg 626 * @sg_offset: offset in bytes into sg
632 * 627 *
628 * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
629 *
633 * Return: number of sg elements mapped to the memory region 630 * Return: number of sg elements mapped to the memory region
634 */ 631 */
635int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 632int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
636 int sg_nents, unsigned int *sg_offset) 633 int sg_nents, unsigned int *sg_offset)
637{ 634{
638 struct rvt_mr *mr = to_imr(ibmr); 635 struct rvt_mr *mr = to_imr(ibmr);
636 int ret;
639 637
640 mr->mr.length = 0; 638 mr->mr.length = 0;
641 mr->mr.page_shift = PAGE_SHIFT; 639 mr->mr.page_shift = PAGE_SHIFT;
642 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 640 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
643 rvt_set_page); 641 mr->mr.user_base = ibmr->iova;
642 mr->mr.iova = ibmr->iova;
643 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
644 mr->mr.length = (size_t)ibmr->length;
645 return ret;
644} 646}
645 647
646/** 648/**
@@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
671 ibmr->rkey = key; 673 ibmr->rkey = key;
672 mr->mr.lkey = key; 674 mr->mr.lkey = key;
673 mr->mr.access_flags = access; 675 mr->mr.access_flags = access;
676 mr->mr.iova = ibmr->iova;
674 atomic_set(&mr->mr.lkey_invalid, 0); 677 atomic_set(&mr->mr.lkey_invalid, 0);
675 678
676 return 0; 679 return 0;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index a878351f1643..52d7f55fca32 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
420 420
421config KEYBOARD_SNVS_PWRKEY 421config KEYBOARD_SNVS_PWRKEY
422 tristate "IMX SNVS Power Key Driver" 422 tristate "IMX SNVS Power Key Driver"
423 depends on SOC_IMX6SX || SOC_IMX7D 423 depends on ARCH_MXC || COMPILE_TEST
424 depends on OF 424 depends on OF
425 help 425 help
426 This is the snvs powerkey driver for the Freescale i.MX application 426 This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index fc3ab93b7aea..7fb358f96195 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
860 860
861 error = rmi_register_function(fn); 861 error = rmi_register_function(fn);
862 if (error) 862 if (error)
863 goto err_put_fn; 863 return error;
864 864
865 if (pdt->function_number == 0x01) 865 if (pdt->function_number == 0x01)
866 data->f01_container = fn; 866 data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
870 list_add_tail(&fn->node, &data->function_list); 870 list_add_tail(&fn->node, &data->function_list);
871 871
872 return RMI_SCAN_CONTINUE; 872 return RMI_SCAN_CONTINUE;
873
874err_put_fn:
875 put_device(&fn->dev);
876 return error;
877} 873}
878 874
879void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) 875void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index df64d6aed4f7..93901ebd122a 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
1230 } 1230 }
1231 1231
1232 rc = f11_write_control_regs(fn, &f11->sens_query, 1232 rc = f11_write_control_regs(fn, &f11->sens_query,
1233 &f11->dev_controls, fn->fd.query_base_addr); 1233 &f11->dev_controls, fn->fd.control_base_addr);
1234 if (rc) 1234 if (rc)
1235 dev_warn(&fn->dev, "Failed to write control registers\n"); 1235 dev_warn(&fn->dev, "Failed to write control registers\n");
1236 1236
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index f38e5c1b87e4..d984538980e2 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
722 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); 722 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
723 u32 ndcr_generic; 723 u32 ndcr_generic;
724 724
725 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
726 return;
727
728 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
729 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
730
731 /* 725 /*
732 * Reset the NDCR register to a clean state for this particular chip, 726 * Reset the NDCR register to a clean state for this particular chip,
733 * also clear ND_RUN bit. 727 * also clear ND_RUN bit.
@@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
739 /* Also reset the interrupt status register */ 733 /* Also reset the interrupt status register */
740 marvell_nfc_clear_int(nfc, NDCR_ALL_INT); 734 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
741 735
736 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
737 return;
738
739 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
740 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
741
742 nfc->selected_chip = chip; 742 nfc->selected_chip = chip;
743 marvell_nand->selected_die = die_nr; 743 marvell_nand->selected_die = die_nr;
744} 744}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e6234d209787..4212bc4a5f31 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
886 fs->m_ext.data[1])) 886 fs->m_ext.data[1]))
887 return -EINVAL; 887 return -EINVAL;
888 888
889 if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
890 return -EINVAL;
891
889 if (fs->location != RX_CLS_LOC_ANY && 892 if (fs->location != RX_CLS_LOC_ANY &&
890 test_bit(fs->location, priv->cfp.used)) 893 test_bit(fs->location, priv->cfp.used))
891 return -EBUSY; 894 return -EBUSY;
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
974 struct cfp_rule *rule; 977 struct cfp_rule *rule;
975 int ret; 978 int ret;
976 979
980 if (loc >= CFP_NUM_RULES)
981 return -EINVAL;
982
977 /* Refuse deleting unused rules, and those that are not unique since 983 /* Refuse deleting unused rules, and those that are not unique since
978 * that could leave IPv6 rules with one of the chained rule in the 984 * that could leave IPv6 rules with one of the chained rule in the
979 * table. 985 * table.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 526f36dcb204..a0de3c368f4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1625,7 +1625,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1625 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); 1625 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1626 bnxt_sched_reset(bp, rxr); 1626 bnxt_sched_reset(bp, rxr);
1627 } 1627 }
1628 goto next_rx; 1628 goto next_rx_no_len;
1629 } 1629 }
1630 1630
1631 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 1631 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1706,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1706 rc = 1; 1706 rc = 1;
1707 1707
1708next_rx: 1708next_rx:
1709 rxr->rx_prod = NEXT_RX(prod);
1710 rxr->rx_next_cons = NEXT_RX(cons);
1711
1712 cpr->rx_packets += 1; 1709 cpr->rx_packets += 1;
1713 cpr->rx_bytes += len; 1710 cpr->rx_bytes += len;
1714 1711
1712next_rx_no_len:
1713 rxr->rx_prod = NEXT_RX(prod);
1714 rxr->rx_next_cons = NEXT_RX(cons);
1715
1715next_rx_no_prod_no_len: 1716next_rx_no_prod_no_len:
1716 *raw_cons = tmp_raw_cons; 1717 *raw_cons = tmp_raw_cons;
1717 1718
@@ -5135,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5135 for (i = 0; i < bp->tx_nr_rings; i++) { 5136 for (i = 0; i < bp->tx_nr_rings; i++) {
5136 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5137 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5137 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 5138 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5138 u32 cmpl_ring_id;
5139 5139
5140 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5141 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5140 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5141 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5142
5142 hwrm_ring_free_send_msg(bp, ring, 5143 hwrm_ring_free_send_msg(bp, ring,
5143 RING_FREE_REQ_RING_TYPE_TX, 5144 RING_FREE_REQ_RING_TYPE_TX,
5144 close_path ? cmpl_ring_id : 5145 close_path ? cmpl_ring_id :
@@ -5151,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5151 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5152 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5152 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 5153 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5153 u32 grp_idx = rxr->bnapi->index; 5154 u32 grp_idx = rxr->bnapi->index;
5154 u32 cmpl_ring_id;
5155 5155
5156 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5157 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5156 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5157 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5158
5158 hwrm_ring_free_send_msg(bp, ring, 5159 hwrm_ring_free_send_msg(bp, ring,
5159 RING_FREE_REQ_RING_TYPE_RX, 5160 RING_FREE_REQ_RING_TYPE_RX,
5160 close_path ? cmpl_ring_id : 5161 close_path ? cmpl_ring_id :
@@ -5173,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5173 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5174 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5174 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 5175 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5175 u32 grp_idx = rxr->bnapi->index; 5176 u32 grp_idx = rxr->bnapi->index;
5176 u32 cmpl_ring_id;
5177 5177
5178 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5179 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 5178 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5179 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5180
5180 hwrm_ring_free_send_msg(bp, ring, type, 5181 hwrm_ring_free_send_msg(bp, ring, type,
5181 close_path ? cmpl_ring_id : 5182 close_path ? cmpl_ring_id :
5182 INVALID_HW_RING_ID); 5183 INVALID_HW_RING_ID);
@@ -5315,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5315 req->num_tx_rings = cpu_to_le16(tx_rings); 5316 req->num_tx_rings = cpu_to_le16(tx_rings);
5316 if (BNXT_NEW_RM(bp)) { 5317 if (BNXT_NEW_RM(bp)) {
5317 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 5318 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5319 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5318 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5320 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5319 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 5321 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5320 enables |= tx_rings + ring_grps ? 5322 enables |= tx_rings + ring_grps ?
5321 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5323 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5322 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5323 enables |= rx_rings ? 5324 enables |= rx_rings ?
5324 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5325 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5325 } else { 5326 } else {
5326 enables |= cp_rings ? 5327 enables |= cp_rings ?
5327 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5328 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5328 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5329 enables |= ring_grps ? 5329 enables |= ring_grps ?
5330 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | 5330 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5331 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5331 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5365,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5365 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 5365 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5366 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 5366 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5367 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 5367 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5368 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5368 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5369 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5369 enables |= tx_rings + ring_grps ? 5370 enables |= tx_rings + ring_grps ?
5370 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5371 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5371 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5372 } else { 5372 } else {
5373 enables |= cp_rings ? 5373 enables |= cp_rings ?
5374 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 5374 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5375 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5376 enables |= ring_grps ? 5375 enables |= ring_grps ?
5377 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 5376 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5378 } 5377 }
@@ -6753,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6753 struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; 6752 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
6754 struct hwrm_port_qstats_ext_input req = {0}; 6753 struct hwrm_port_qstats_ext_input req = {0};
6755 struct bnxt_pf_info *pf = &bp->pf; 6754 struct bnxt_pf_info *pf = &bp->pf;
6755 u32 tx_stat_size;
6756 int rc; 6756 int rc;
6757 6757
6758 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 6758 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6762,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6762 req.port_id = cpu_to_le16(pf->port_id); 6762 req.port_id = cpu_to_le16(pf->port_id);
6763 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 6763 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6764 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); 6764 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
6765 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext)); 6765 tx_stat_size = bp->hw_tx_port_stats_ext ?
6766 sizeof(*bp->hw_tx_port_stats_ext) : 0;
6767 req.tx_stat_size = cpu_to_le16(tx_stat_size);
6766 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); 6768 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6767 mutex_lock(&bp->hwrm_cmd_lock); 6769 mutex_lock(&bp->hwrm_cmd_lock);
6768 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 6770 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6769 if (!rc) { 6771 if (!rc) {
6770 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; 6772 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6771 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8; 6773 bp->fw_tx_stats_ext_size = tx_stat_size ?
6774 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
6772 } else { 6775 } else {
6773 bp->fw_rx_stats_ext_size = 0; 6776 bp->fw_rx_stats_ext_size = 0;
6774 bp->fw_tx_stats_ext_size = 0; 6777 bp->fw_tx_stats_ext_size = 0;
@@ -8961,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
8961 8964
8962skip_uc: 8965skip_uc:
8963 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 8966 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8967 if (rc && vnic->mc_list_count) {
8968 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
8969 rc);
8970 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8971 vnic->mc_list_count = 0;
8972 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8973 }
8964 if (rc) 8974 if (rc)
8965 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 8975 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
8966 rc); 8976 rc);
8967 8977
8968 return rc; 8978 return rc;
@@ -10699,6 +10709,7 @@ init_err_cleanup_tc:
10699 bnxt_clear_int_mode(bp); 10709 bnxt_clear_int_mode(bp);
10700 10710
10701init_err_pci_clean: 10711init_err_pci_clean:
10712 bnxt_free_hwrm_short_cmd_req(bp);
10702 bnxt_free_hwrm_resources(bp); 10713 bnxt_free_hwrm_resources(bp);
10703 bnxt_free_ctx_mem(bp); 10714 bnxt_free_ctx_mem(bp);
10704 kfree(bp->ctx); 10715 kfree(bp->ctx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 062a600fa5a7..21428537e231 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
333 */ 333 */
334 dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev, 334 dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
335 "stm32_pwr_wakeup"); 335 "stm32_pwr_wakeup");
336 if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
337 return -EPROBE_DEFER;
338
336 if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) { 339 if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
337 err = device_init_wakeup(&pdev->dev, true); 340 err = device_init_wakeup(&pdev->dev, true);
338 if (err) { 341 if (err) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index cc1e887e47b5..26db6aa002d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -160,7 +160,7 @@ static const struct dmi_system_id quark_pci_dmi[] = {
160 .driver_data = (void *)&galileo_stmmac_dmi_data, 160 .driver_data = (void *)&galileo_stmmac_dmi_data,
161 }, 161 },
162 /* 162 /*
163 * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040. 163 * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
164 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which 164 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
165 * has only one pci network device while other asset tags are 165 * has only one pci network device while other asset tags are
166 * for IOT2040 which has two. 166 * for IOT2040 which has two.
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index c589f5ae75bb..8bb53ec8d9cf 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
533 dev_dbg(printdev(lp), "no slotted operation\n"); 533 dev_dbg(printdev(lp), "no slotted operation\n");
534 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 534 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
535 DAR_PHY_CTRL1_SLOTTED, 0x0); 535 DAR_PHY_CTRL1_SLOTTED, 0x0);
536 if (ret < 0)
537 return ret;
536 538
537 /* enable irq */ 539 /* enable irq */
538 enable_irq(lp->spi->irq); 540 enable_irq(lp->spi->irq);
@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
540 /* Unmask SEQ interrupt */ 542 /* Unmask SEQ interrupt */
541 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2, 543 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
542 DAR_PHY_CTRL2_SEQMSK, 0x0); 544 DAR_PHY_CTRL2_SEQMSK, 0x0);
545 if (ret < 0)
546 return ret;
543 547
544 /* Start the RX sequence */ 548 /* Start the RX sequence */
545 dev_dbg(printdev(lp), "start the RX sequence\n"); 549 dev_dbg(printdev(lp), "start the RX sequence\n");
546 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1, 550 ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
547 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX); 551 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
552 if (ret < 0)
553 return ret;
548 554
549 return 0; 555 return 0;
550} 556}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a7e8c8113d97..a7796134e3be 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1597,9 +1597,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
1597 1597
1598static void marvell_get_strings(struct phy_device *phydev, u8 *data) 1598static void marvell_get_strings(struct phy_device *phydev, u8 *data)
1599{ 1599{
1600 int count = marvell_get_sset_count(phydev);
1600 int i; 1601 int i;
1601 1602
1602 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { 1603 for (i = 0; i < count; i++) {
1603 strlcpy(data + i * ETH_GSTRING_LEN, 1604 strlcpy(data + i * ETH_GSTRING_LEN,
1604 marvell_hw_stats[i].string, ETH_GSTRING_LEN); 1605 marvell_hw_stats[i].string, ETH_GSTRING_LEN);
1605 } 1606 }
@@ -1627,9 +1628,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
1627static void marvell_get_stats(struct phy_device *phydev, 1628static void marvell_get_stats(struct phy_device *phydev,
1628 struct ethtool_stats *stats, u64 *data) 1629 struct ethtool_stats *stats, u64 *data)
1629{ 1630{
1631 int count = marvell_get_sset_count(phydev);
1630 int i; 1632 int i;
1631 1633
1632 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) 1634 for (i = 0; i < count; i++)
1633 data[i] = marvell_get_stat(phydev, i); 1635 data[i] = marvell_get_stat(phydev, i);
1634} 1636}
1635 1637
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index f4e93f5fc204..ea90db3c7705 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ out_fail:
153void 153void
154slhc_free(struct slcompress *comp) 154slhc_free(struct slcompress *comp)
155{ 155{
156 if ( comp == NULLSLCOMPR ) 156 if ( IS_ERR_OR_NULL(comp) )
157 return; 157 return;
158 158
159 if ( comp->tstate != NULLSLSTATE ) 159 if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 18c4e5d17b05..5c3ac97519b7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1131,9 +1131,16 @@ static const struct usb_device_id products[] = {
1131 {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ 1131 {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
1132 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1132 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1133 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1133 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1134 {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
1135 {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
1136 {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
1137 {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
1138 {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
1134 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ 1139 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1135 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ 1140 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1136 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ 1141 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1142 {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
1143 {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
1137 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ 1144 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1138 {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */ 1145 {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
1139 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1146 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -1189,6 +1196,7 @@ static const struct usb_device_id products[] = {
1189 {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ 1196 {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
1190 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ 1197 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
1191 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 1198 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
1199 {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
1192 {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ 1200 {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
1193 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ 1201 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
1194 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ 1202 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
@@ -1209,7 +1217,9 @@ static const struct usb_device_id products[] = {
1209 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, 1217 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
1210 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 1218 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
1211 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1219 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1220 {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
1212 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1221 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1222 {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
1213 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1223 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1214 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1224 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1215 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ 1225 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 24b983edb357..eca87f7c5b6c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
1855 struct ath10k_ce_crash_data ce_data; 1855 struct ath10k_ce_crash_data ce_data;
1856 u32 addr, id; 1856 u32 addr, id;
1857 1857
1858 lockdep_assert_held(&ar->data_lock); 1858 lockdep_assert_held(&ar->dump_mutex);
1859 1859
1860 ath10k_err(ar, "Copy Engine register dump:\n"); 1860 ath10k_err(ar, "Copy Engine register dump:\n");
1861 1861
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 835b8de92d55..aff585658fc0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
3119 goto err_free_wq; 3119 goto err_free_wq;
3120 3120
3121 mutex_init(&ar->conf_mutex); 3121 mutex_init(&ar->conf_mutex);
3122 mutex_init(&ar->dump_mutex);
3122 spin_lock_init(&ar->data_lock); 3123 spin_lock_init(&ar->data_lock);
3123 3124
3124 INIT_LIST_HEAD(&ar->peers); 3125 INIT_LIST_HEAD(&ar->peers);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e08a17b01e03..e35aae5146f1 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1063,6 +1063,9 @@ struct ath10k {
1063 /* prevents concurrent FW reconfiguration */ 1063 /* prevents concurrent FW reconfiguration */
1064 struct mutex conf_mutex; 1064 struct mutex conf_mutex;
1065 1065
1066 /* protects coredump data */
1067 struct mutex dump_mutex;
1068
1066 /* protects shared structure data */ 1069 /* protects shared structure data */
1067 spinlock_t data_lock; 1070 spinlock_t data_lock;
1068 1071
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 33838d9c1cb6..45a355fb62b9 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
1102{ 1102{
1103 struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; 1103 struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
1104 1104
1105 lockdep_assert_held(&ar->data_lock); 1105 lockdep_assert_held(&ar->dump_mutex);
1106 1106
1107 if (ath10k_coredump_mask == 0) 1107 if (ath10k_coredump_mask == 0)
1108 /* coredump disabled */ 1108 /* coredump disabled */
@@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
1146 if (!buf) 1146 if (!buf)
1147 return NULL; 1147 return NULL;
1148 1148
1149 spin_lock_bh(&ar->data_lock); 1149 mutex_lock(&ar->dump_mutex);
1150 1150
1151 dump_data = (struct ath10k_dump_file_data *)(buf); 1151 dump_data = (struct ath10k_dump_file_data *)(buf);
1152 strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", 1152 strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
@@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
1213 sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; 1213 sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
1214 } 1214 }
1215 1215
1216 spin_unlock_bh(&ar->data_lock); 1216 mutex_unlock(&ar->dump_mutex);
1217 1217
1218 return dump_data; 1218 return dump_data;
1219} 1219}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 41e89db244d2..9c703d287333 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5774 } 5774 }
5775 5775
5776 if (changed & BSS_CHANGED_MCAST_RATE && 5776 if (changed & BSS_CHANGED_MCAST_RATE &&
5777 !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) { 5777 !ath10k_mac_vif_chan(arvif->vif, &def)) {
5778 band = def.chan->band; 5778 band = def.chan->band;
5779 rateidx = vif->bss_conf.mcast_rate[band] - 1; 5779 rateidx = vif->bss_conf.mcast_rate[band] - 1;
5780 5780
@@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5812 } 5812 }
5813 5813
5814 if (changed & BSS_CHANGED_BASIC_RATES) { 5814 if (changed & BSS_CHANGED_BASIC_RATES) {
5815 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) { 5815 if (ath10k_mac_vif_chan(vif, &def)) {
5816 mutex_unlock(&ar->conf_mutex); 5816 mutex_unlock(&ar->conf_mutex);
5817 return; 5817 return;
5818 } 5818 }
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 271f92c24d44..2c27f407a851 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
1441 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1441 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1442 int i, ret; 1442 int i, ret;
1443 1443
1444 lockdep_assert_held(&ar->data_lock); 1444 lockdep_assert_held(&ar->dump_mutex);
1445 1445
1446 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0], 1446 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1447 hi_failure_state, 1447 hi_failure_state,
@@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
1656 int ret, i; 1656 int ret, i;
1657 u8 *buf; 1657 u8 *buf;
1658 1658
1659 lockdep_assert_held(&ar->data_lock); 1659 lockdep_assert_held(&ar->dump_mutex);
1660 1660
1661 if (!crash_data) 1661 if (!crash_data)
1662 return; 1662 return;
@@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
1734 } 1734 }
1735} 1735}
1736 1736
1737static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1737static void ath10k_pci_fw_dump_work(struct work_struct *work)
1738{ 1738{
1739 struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1740 dump_work);
1739 struct ath10k_fw_crash_data *crash_data; 1741 struct ath10k_fw_crash_data *crash_data;
1742 struct ath10k *ar = ar_pci->ar;
1740 char guid[UUID_STRING_LEN + 1]; 1743 char guid[UUID_STRING_LEN + 1];
1741 1744
1742 spin_lock_bh(&ar->data_lock); 1745 mutex_lock(&ar->dump_mutex);
1743 1746
1747 spin_lock_bh(&ar->data_lock);
1744 ar->stats.fw_crash_counter++; 1748 ar->stats.fw_crash_counter++;
1749 spin_unlock_bh(&ar->data_lock);
1745 1750
1746 crash_data = ath10k_coredump_new(ar); 1751 crash_data = ath10k_coredump_new(ar);
1747 1752
@@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1756 ath10k_ce_dump_registers(ar, crash_data); 1761 ath10k_ce_dump_registers(ar, crash_data);
1757 ath10k_pci_dump_memory(ar, crash_data); 1762 ath10k_pci_dump_memory(ar, crash_data);
1758 1763
1759 spin_unlock_bh(&ar->data_lock); 1764 mutex_unlock(&ar->dump_mutex);
1760 1765
1761 queue_work(ar->workqueue, &ar->restart_work); 1766 queue_work(ar->workqueue, &ar->restart_work);
1762} 1767}
1763 1768
1769static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1770{
1771 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1772
1773 queue_work(ar->workqueue, &ar_pci->dump_work);
1774}
1775
1764void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1776void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1765 int force) 1777 int force)
1766{ 1778{
@@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
3442 spin_lock_init(&ar_pci->ps_lock); 3454 spin_lock_init(&ar_pci->ps_lock);
3443 mutex_init(&ar_pci->ce_diag_mutex); 3455 mutex_init(&ar_pci->ce_diag_mutex);
3444 3456
3457 INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3458
3445 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); 3459 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3446 3460
3447 if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) 3461 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 3773c79f322f..4455ed6c5275 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -121,6 +121,8 @@ struct ath10k_pci {
121 /* For protecting ce_diag */ 121 /* For protecting ce_diag */
122 struct mutex ce_diag_mutex; 122 struct mutex ce_diag_mutex;
123 123
124 struct work_struct dump_work;
125
124 struct ath10k_ce ce; 126 struct ath10k_ce ce;
125 struct timer_list rx_post_retry; 127 struct timer_list rx_post_retry;
126 128
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index fc915ecfb06e..17b34f6e4515 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -207,7 +207,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
207#define IWL_DEVICE_AX210 \ 207#define IWL_DEVICE_AX210 \
208 IWL_DEVICE_AX200_COMMON, \ 208 IWL_DEVICE_AX200_COMMON, \
209 .device_family = IWL_DEVICE_FAMILY_AX210, \ 209 .device_family = IWL_DEVICE_FAMILY_AX210, \
210 .base_params = &iwl_22000_base_params, \ 210 .base_params = &iwl_22560_base_params, \
211 .csr = &iwl_csr_v1, \ 211 .csr = &iwl_csr_v1, \
212 .min_txq_size = 128, \ 212 .min_txq_size = 128, \
213 .gp2_reg_addr = 0xd02c68, \ 213 .gp2_reg_addr = 0xd02c68, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 575a7022d045..3846064d51a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2018 Intel Corporation 4 * Copyright(c) 2018 - 2019 Intel Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as 7 * under the terms of version 2 of the GNU General Public License as
@@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
136 .ht_params = &iwl5000_ht_params, 136 .ht_params = &iwl5000_ht_params,
137 .led_mode = IWL_LED_BLINK, 137 .led_mode = IWL_LED_BLINK,
138 .internal_wimax_coex = true, 138 .internal_wimax_coex = true,
139 .csr = &iwl_csr_v1,
139}; 140};
140 141
141#define IWL_DEVICE_5150 \ 142#define IWL_DEVICE_5150 \
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index abfdcabdcbf7..cd622af90077 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
93 } u; 93 } u;
94}; 94};
95 95
96#define IWL_UCODE_INI_TLV_GROUP BIT(24) 96#define IWL_UCODE_INI_TLV_GROUP 0x1000000
97 97
98/* 98/*
99 * new TLV uCode file layout 99 * new TLV uCode file layout
@@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type {
148 IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, 148 IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
149 IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, 149 IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
150 IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, 150 IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
151 IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1, 151
152 IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2, 152 IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
153 IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3, 153 IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
154 IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4, 154 IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
155 IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5, 155 IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
156 IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
157 IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
158 IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
156 159
157 /* TLVs 0x1000-0x2000 are for internal driver usage */ 160 /* TLVs 0x1000-0x2000 are for internal driver usage */
158 IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, 161 IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 9107302cc444..0e8664375298 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -129,7 +129,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
129 len -= ALIGN(tlv_len, 4); 129 len -= ALIGN(tlv_len, 4);
130 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 130 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
131 131
132 if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP)) 132 if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
133 tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
133 continue; 134 continue;
134 135
135 hdr = (void *)&tlv->data[0]; 136 hdr = (void *)&tlv->data[0];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 9bf2407c9b4b..f043eefabb4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -773,6 +773,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
773 return; 773 return;
774 774
775 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); 775 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
776 if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
777 IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
778 dbgfs_dir);
779 return;
780 }
776 781
777 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && 782 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
778 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || 783 ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 00a47f6f1d81..ab68b5d53ec9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
1121 ret = iwl_mvm_load_rt_fw(mvm); 1121 ret = iwl_mvm_load_rt_fw(mvm);
1122 if (ret) { 1122 if (ret) {
1123 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 1123 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1124 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); 1124 if (ret != -ERFKILL)
1125 iwl_fw_dbg_error_collect(&mvm->fwrt,
1126 FW_DBG_TRIGGER_DRIVER);
1125 goto error; 1127 goto error;
1126 } 1128 }
1127 1129
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 55d399899d1c..8da9e5572fcf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
834 mutex_lock(&mvm->mutex); 834 mutex_lock(&mvm->mutex);
835 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); 835 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
836 err = iwl_run_init_mvm_ucode(mvm, true); 836 err = iwl_run_init_mvm_ucode(mvm, true);
837 if (err) 837 if (err && err != -ERFKILL)
838 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); 838 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
839 if (!iwlmvm_mod_params.init_dbg || !err) 839 if (!iwlmvm_mod_params.init_dbg || !err)
840 iwl_mvm_stop_device(mvm); 840 iwl_mvm_stop_device(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 0b1b208de767..1824566d08fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
169} 169}
170 170
171/* iwl_mvm_create_skb Adds the rxb to a new skb */ 171/* iwl_mvm_create_skb Adds the rxb to a new skb */
172static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, 172static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
173 u16 len, u8 crypt_len, 173 struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
174 struct iwl_rx_cmd_buffer *rxb) 174 struct iwl_rx_cmd_buffer *rxb)
175{ 175{
176 struct iwl_rx_packet *pkt = rxb_addr(rxb); 176 struct iwl_rx_packet *pkt = rxb_addr(rxb);
177 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; 177 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
@@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
204 * present before copying packet data. 204 * present before copying packet data.
205 */ 205 */
206 hdrlen += crypt_len; 206 hdrlen += crypt_len;
207
208 if (WARN_ONCE(headlen < hdrlen,
209 "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
210 hdrlen, len, crypt_len)) {
211 /*
212 * We warn and trace because we want to be able to see
213 * it in trace-cmd as well.
214 */
215 IWL_DEBUG_RX(mvm,
216 "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
217 hdrlen, len, crypt_len);
218 return -EINVAL;
219 }
220
207 skb_put_data(skb, hdr, hdrlen); 221 skb_put_data(skb, hdr, hdrlen);
208 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); 222 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
209 223
@@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
216 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, 230 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
217 fraglen, rxb->truesize); 231 fraglen, rxb->truesize);
218 } 232 }
233
234 return 0;
219} 235}
220 236
221static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, 237static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
@@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1671 rx_status->boottime_ns = ktime_get_boot_ns(); 1687 rx_status->boottime_ns = ktime_get_boot_ns();
1672 } 1688 }
1673 1689
1674 iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); 1690 if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
1691 kfree_skb(skb);
1692 goto out;
1693 }
1694
1675 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) 1695 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1676 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, 1696 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
1677 sta, csi); 1697 sta, csi);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index c5baaae8d38e..cccb8bbd7ea7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3654,20 +3654,27 @@ out_no_pci:
3654 3654
3655void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3655void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3656{ 3656{
3657 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3657 unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; 3658 unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3659 u32 inta_addr, sw_err_bit;
3660
3661 if (trans_pcie->msix_enabled) {
3662 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3663 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3664 } else {
3665 inta_addr = CSR_INT;
3666 sw_err_bit = CSR_INT_BIT_SW_ERR;
3667 }
3658 3668
3659 iwl_disable_interrupts(trans); 3669 iwl_disable_interrupts(trans);
3660 iwl_force_nmi(trans); 3670 iwl_force_nmi(trans);
3661 while (time_after(timeout, jiffies)) { 3671 while (time_after(timeout, jiffies)) {
3662 u32 inta_hw = iwl_read32(trans, 3672 u32 inta_hw = iwl_read32(trans, inta_addr);
3663 CSR_MSIX_HW_INT_CAUSES_AD);
3664 3673
3665 /* Error detected by uCode */ 3674 /* Error detected by uCode */
3666 if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) { 3675 if (inta_hw & sw_err_bit) {
3667 /* Clear causes register */ 3676 /* Clear causes register */
3668 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, 3677 iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
3669 inta_hw &
3670 MSIX_HW_INT_CAUSES_REG_SW_ERR);
3671 break; 3678 break;
3672 } 3679 }
3673 3680
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a85648342d15..d5a70340a945 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
181 181
182 adapter = card->adapter; 182 adapter = card->adapter;
183 183
184 if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { 184 if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
185 mwifiex_dbg(adapter, WARN, 185 mwifiex_dbg(adapter, WARN,
186 "device already resumed\n"); 186 "device already resumed\n");
187 return 0; 187 return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7c1b362f599a..766f5779db92 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6262,8 +6262,7 @@ static int __init pci_setup(char *str)
6262 } else if (!strncmp(str, "pcie_scan_all", 13)) { 6262 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6263 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 6263 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6264 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 6264 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6265 disable_acs_redir_param = 6265 disable_acs_redir_param = str + 18;
6266 kstrdup(str + 18, GFP_KERNEL);
6267 } else { 6266 } else {
6268 printk(KERN_ERR "PCI: Unknown option `%s'\n", 6267 printk(KERN_ERR "PCI: Unknown option `%s'\n",
6269 str); 6268 str);
@@ -6274,3 +6273,19 @@ static int __init pci_setup(char *str)
6274 return 0; 6273 return 0;
6275} 6274}
6276early_param("pci", pci_setup); 6275early_param("pci", pci_setup);
6276
6277/*
6278 * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
6279 * to data in the __initdata section which will be freed after the init
6280 * sequence is complete. We can't allocate memory in pci_setup() because some
6281 * architectures do not have any memory allocation service available during
6282 * an early_param() call. So we allocate memory and copy the variable here
6283 * before the init section is freed.
6284 */
6285static int __init pci_realloc_setup_params(void)
6286{
6287 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6288
6289 return 0;
6290}
6291pure_initcall(pci_realloc_setup_params);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 5cbdbca904ac..362eb8cfa53b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -142,3 +142,11 @@ config PCIE_PTM
142 142
143 This is only useful if you have devices that support PTM, but it 143 This is only useful if you have devices that support PTM, but it
144 is safe to enable even if you don't. 144 is safe to enable even if you don't.
145
146config PCIE_BW
147 bool "PCI Express Bandwidth Change Notification"
148 depends on PCIEPORTBUS
149 help
150 This enables PCI Express Bandwidth Change Notification. If
151 you know link width or rate changes occur only to correct
152 unreliable links, you may answer Y.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index f1d7bc1e5efa..efb9d2e71e9e 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -3,7 +3,6 @@
3# Makefile for PCI Express features and port driver 3# Makefile for PCI Express features and port driver
4 4
5pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o 5pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
6pcieportdrv-y += bw_notification.o
7 6
8obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o 7obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
9 8
@@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
13obj-$(CONFIG_PCIE_PME) += pme.o 12obj-$(CONFIG_PCIE_PME) += pme.o
14obj-$(CONFIG_PCIE_DPC) += dpc.o 13obj-$(CONFIG_PCIE_DPC) += dpc.o
15obj-$(CONFIG_PCIE_PTM) += ptm.o 14obj-$(CONFIG_PCIE_PTM) += ptm.o
15obj-$(CONFIG_PCIE_BW) += bw_notification.o
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1d50dc58ac40..944827a8c7d3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -49,7 +49,11 @@ int pcie_dpc_init(void);
49static inline int pcie_dpc_init(void) { return 0; } 49static inline int pcie_dpc_init(void) { return 0; }
50#endif 50#endif
51 51
52#ifdef CONFIG_PCIE_BW
52int pcie_bandwidth_notification_init(void); 53int pcie_bandwidth_notification_init(void);
54#else
55static inline int pcie_bandwidth_notification_init(void) { return 0; }
56#endif
53 57
54/* Port Type */ 58/* Port Type */
55#define PCIE_ANY_PORT (~0) 59#define PCIE_ANY_PORT (~0)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 7d04f9d087a6..1b330129089f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
55 * 7.8.2, 7.10.10, 7.31.2. 55 * 7.8.2, 7.10.10, 7.31.2.
56 */ 56 */
57 57
58 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { 58 if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
59 PCIE_PORT_SERVICE_BWNOTIF)) {
59 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16); 60 pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
60 *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; 61 *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
61 nvec = *pme + 1; 62 nvec = *pme + 1;
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 08d5037fd052..6887870ba32c 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
221 int avg_current; 221 int avg_current;
222 u32 cc_lsb; 222 u32 cc_lsb;
223 223
224 if (!divider)
225 return 0;
226
224 sample &= 0xffffff; /* 24-bits, unsigned */ 227 sample &= 0xffffff; /* 24-bits, unsigned */
225 offset &= 0x7ff; /* 10-bits, signed */ 228 offset &= 0x7ff; /* 10-bits, signed */
226 229
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index dce24f596160..5358a80d854f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
383 char *prop_buf; 383 char *prop_buf;
384 char *attrname; 384 char *attrname;
385 385
386 dev_dbg(dev, "uevent\n");
387
388 if (!psy || !psy->desc) { 386 if (!psy || !psy->desc) {
389 dev_dbg(dev, "No power supply yet\n"); 387 dev_dbg(dev, "No power supply yet\n");
390 return ret; 388 return ret;
391 } 389 }
392 390
393 dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
394
395 ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name); 391 ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
396 if (ret) 392 if (ret)
397 return ret; 393 return ret;
@@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
427 goto out; 423 goto out;
428 } 424 }
429 425
430 dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
431
432 ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); 426 ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
433 kfree(attrname); 427 kfree(attrname);
434 if (ret) 428 if (ret)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 8987cec9549d..ebcadaad89d1 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
473 pm_runtime_disable(dev); 473 pm_runtime_disable(dev);
474 pm_runtime_set_suspended(dev); 474 pm_runtime_set_suspended(dev);
475 475
476 /* Undo any residual pm_autopm_get_interface_* calls */
477 for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
478 usb_autopm_put_interface_no_suspend(intf);
479 atomic_set(&intf->pm_usage_cnt, 0);
480
481 if (!error) 476 if (!error)
482 usb_autosuspend_device(udev); 477 usb_autosuspend_device(udev);
483 478
@@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
1633 int status; 1628 int status;
1634 1629
1635 usb_mark_last_busy(udev); 1630 usb_mark_last_busy(udev);
1636 atomic_dec(&intf->pm_usage_cnt);
1637 status = pm_runtime_put_sync(&intf->dev); 1631 status = pm_runtime_put_sync(&intf->dev);
1638 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1632 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1639 __func__, atomic_read(&intf->dev.power.usage_count), 1633 __func__, atomic_read(&intf->dev.power.usage_count),
@@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
1662 int status; 1656 int status;
1663 1657
1664 usb_mark_last_busy(udev); 1658 usb_mark_last_busy(udev);
1665 atomic_dec(&intf->pm_usage_cnt);
1666 status = pm_runtime_put(&intf->dev); 1659 status = pm_runtime_put(&intf->dev);
1667 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1660 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1668 __func__, atomic_read(&intf->dev.power.usage_count), 1661 __func__, atomic_read(&intf->dev.power.usage_count),
@@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
1684 struct usb_device *udev = interface_to_usbdev(intf); 1677 struct usb_device *udev = interface_to_usbdev(intf);
1685 1678
1686 usb_mark_last_busy(udev); 1679 usb_mark_last_busy(udev);
1687 atomic_dec(&intf->pm_usage_cnt);
1688 pm_runtime_put_noidle(&intf->dev); 1680 pm_runtime_put_noidle(&intf->dev);
1689} 1681}
1690EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend); 1682EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
1715 status = pm_runtime_get_sync(&intf->dev); 1707 status = pm_runtime_get_sync(&intf->dev);
1716 if (status < 0) 1708 if (status < 0)
1717 pm_runtime_put_sync(&intf->dev); 1709 pm_runtime_put_sync(&intf->dev);
1718 else
1719 atomic_inc(&intf->pm_usage_cnt);
1720 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1710 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1721 __func__, atomic_read(&intf->dev.power.usage_count), 1711 __func__, atomic_read(&intf->dev.power.usage_count),
1722 status); 1712 status);
@@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
1750 status = pm_runtime_get(&intf->dev); 1740 status = pm_runtime_get(&intf->dev);
1751 if (status < 0 && status != -EINPROGRESS) 1741 if (status < 0 && status != -EINPROGRESS)
1752 pm_runtime_put_noidle(&intf->dev); 1742 pm_runtime_put_noidle(&intf->dev);
1753 else
1754 atomic_inc(&intf->pm_usage_cnt);
1755 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", 1743 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1756 __func__, atomic_read(&intf->dev.power.usage_count), 1744 __func__, atomic_read(&intf->dev.power.usage_count),
1757 status); 1745 status);
@@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
1775 struct usb_device *udev = interface_to_usbdev(intf); 1763 struct usb_device *udev = interface_to_usbdev(intf);
1776 1764
1777 usb_mark_last_busy(udev); 1765 usb_mark_last_busy(udev);
1778 atomic_inc(&intf->pm_usage_cnt);
1779 pm_runtime_get_noresume(&intf->dev); 1766 pm_runtime_get_noresume(&intf->dev);
1780} 1767}
1781EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); 1768EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 82239f27c4cc..e844bb7b5676 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
820 820
821 if (dev->state == USB_STATE_SUSPENDED) 821 if (dev->state == USB_STATE_SUSPENDED)
822 return -EHOSTUNREACH; 822 return -EHOSTUNREACH;
823 if (size <= 0 || !buf || !index) 823 if (size <= 0 || !buf)
824 return -EINVAL; 824 return -EINVAL;
825 buf[0] = 0; 825 buf[0] = 0;
826 if (index <= 0 || index >= 256)
827 return -EINVAL;
826 tbuf = kmalloc(256, GFP_NOIO); 828 tbuf = kmalloc(256, GFP_NOIO);
827 if (!tbuf) 829 if (!tbuf)
828 return -ENOMEM; 830 return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index baf72f95f0f1..213b52508621 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
979 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 979 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
980 struct dummy *dum = dum_hcd->dum; 980 struct dummy *dum = dum_hcd->dum;
981 981
982 if (driver->max_speed == USB_SPEED_UNKNOWN) 982 switch (g->speed) {
983 /* All the speeds we support */
984 case USB_SPEED_LOW:
985 case USB_SPEED_FULL:
986 case USB_SPEED_HIGH:
987 case USB_SPEED_SUPER:
988 break;
989 default:
990 dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
991 driver->max_speed);
983 return -EINVAL; 992 return -EINVAL;
993 }
984 994
985 /* 995 /*
986 * SLAVE side init ... the layer above hardware, which 996 * SLAVE side init ... the layer above hardware, which
@@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
1784 /* Bus speed is 500000 bytes/ms, so use a little less */ 1794 /* Bus speed is 500000 bytes/ms, so use a little less */
1785 total = 490000; 1795 total = 490000;
1786 break; 1796 break;
1787 default: 1797 default: /* Can't happen */
1788 dev_err(dummy_dev(dum_hcd), "bogus device speed\n"); 1798 dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
1789 return; 1799 total = 0;
1800 break;
1790 } 1801 }
1791 1802
1792 /* FIXME if HZ != 1000 this will probably misbehave ... */ 1803 /* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1828,7 +1839,7 @@ restart:
1828 1839
1829 /* Used up this frame's bandwidth? */ 1840 /* Used up this frame's bandwidth? */
1830 if (total <= 0) 1841 if (total <= 0)
1831 break; 1842 continue;
1832 1843
1833 /* find the gadget's ep for this request (if configured) */ 1844 /* find the gadget's ep for this request (if configured) */
1834 address = usb_pipeendpoint (urb->pipe); 1845 address = usb_pipeendpoint (urb->pipe);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6d9fd5f64903..7b306aa22d25 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
314 usb_deregister_dev(interface, &yurex_class); 314 usb_deregister_dev(interface, &yurex_class);
315 315
316 /* prevent more I/O from starting */ 316 /* prevent more I/O from starting */
317 usb_poison_urb(dev->urb);
317 mutex_lock(&dev->io_mutex); 318 mutex_lock(&dev->io_mutex);
318 dev->interface = NULL; 319 dev->interface = NULL;
319 mutex_unlock(&dev->io_mutex); 320 mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 31b024441938..cc794e25a0b6 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
763 break; 763 break;
764 case RTS51X_STAT_IDLE: 764 case RTS51X_STAT_IDLE:
765 case RTS51X_STAT_SS: 765 case RTS51X_STAT_SS:
766 usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n", 766 usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
767 atomic_read(&us->pusb_intf->pm_usage_cnt),
768 atomic_read(&us->pusb_intf->dev.power.usage_count)); 767 atomic_read(&us->pusb_intf->dev.power.usage_count));
769 768
770 if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) { 769 if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
771 usb_stor_dbg(us, "Ready to enter SS state\n"); 770 usb_stor_dbg(us, "Ready to enter SS state\n");
772 rts51x_set_stat(chip, RTS51X_STAT_SS); 771 rts51x_set_stat(chip, RTS51X_STAT_SS);
773 /* ignore mass storage interface's children */ 772 /* ignore mass storage interface's children */
774 pm_suspend_ignore_children(&us->pusb_intf->dev, true); 773 pm_suspend_ignore_children(&us->pusb_intf->dev, true);
775 usb_autopm_put_interface_async(us->pusb_intf); 774 usb_autopm_put_interface_async(us->pusb_intf);
776 usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n", 775 usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
777 atomic_read(&us->pusb_intf->pm_usage_cnt),
778 atomic_read(&us->pusb_intf->dev.power.usage_count)); 776 atomic_read(&us->pusb_intf->dev.power.usage_count));
779 } 777 }
780 break; 778 break;
@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
807 int ret; 805 int ret;
808 806
809 if (working_scsi(srb)) { 807 if (working_scsi(srb)) {
810 usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n", 808 usb_stor_dbg(us, "working scsi, power.usage:%d\n",
811 atomic_read(&us->pusb_intf->pm_usage_cnt),
812 atomic_read(&us->pusb_intf->dev.power.usage_count)); 809 atomic_read(&us->pusb_intf->dev.power.usage_count));
813 810
814 if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) { 811 if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
815 ret = usb_autopm_get_interface(us->pusb_intf); 812 ret = usb_autopm_get_interface(us->pusb_intf);
816 usb_stor_dbg(us, "working scsi, ret=%d\n", ret); 813 usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
817 } 814 }
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 97b09a42a10c..dbfb2f24d71e 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
361 } 361 }
362 362
363 if (usb_endpoint_xfer_isoc(epd)) { 363 if (usb_endpoint_xfer_isoc(epd)) {
364 /* validate packet size and number of packets */ 364 /* validate number of packets */
365 unsigned int maxp, packets, bytes;
366
367 maxp = usb_endpoint_maxp(epd);
368 maxp *= usb_endpoint_maxp_mult(epd);
369 bytes = pdu->u.cmd_submit.transfer_buffer_length;
370 packets = DIV_ROUND_UP(bytes, maxp);
371
372 if (pdu->u.cmd_submit.number_of_packets < 0 || 365 if (pdu->u.cmd_submit.number_of_packets < 0 ||
373 pdu->u.cmd_submit.number_of_packets > packets) { 366 pdu->u.cmd_submit.number_of_packets >
367 USBIP_MAX_ISO_PACKETS) {
374 dev_err(&sdev->udev->dev, 368 dev_err(&sdev->udev->dev,
375 "CMD_SUBMIT: isoc invalid num packets %d\n", 369 "CMD_SUBMIT: isoc invalid num packets %d\n",
376 pdu->u.cmd_submit.number_of_packets); 370 pdu->u.cmd_submit.number_of_packets);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index bf8afe9b5883..8be857a4fa13 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
121#define USBIP_DIR_OUT 0x00 121#define USBIP_DIR_OUT 0x00
122#define USBIP_DIR_IN 0x01 122#define USBIP_DIR_IN 0x01
123 123
124/*
125 * Arbitrary limit for the maximum number of isochronous packets in an URB,
126 * compare for example the uhci_submit_isochronous function in
127 * drivers/usb/host/uhci-q.c
128 */
129#define USBIP_MAX_ISO_PACKETS 1024
130
124/** 131/**
125 * struct usbip_header_basic - data pertinent to every request 132 * struct usbip_header_basic - data pertinent to every request
126 * @command: the usbip request type 133 * @command: the usbip request type
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 0f4ecfcdb549..a9fb77585272 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
1016 /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */ 1016 /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
1017 alt = 3; 1017 alt = 3;
1018 err = usb_set_interface(dev->udev, 1018 err = usb_set_interface(dev->udev,
1019 intf->altsetting[alt].desc.bInterfaceNumber, alt); 1019 intf->cur_altsetting->desc.bInterfaceNumber, alt);
1020 if (err) { 1020 if (err) {
1021 dev_err(&dev->udev->dev, "Failed to set alternative setting %d " 1021 dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
1022 "for %d interface: err=%d.\n", alt, 1022 "for %d interface: err=%d.\n", alt,
1023 intf->altsetting[alt].desc.bInterfaceNumber, err); 1023 intf->cur_altsetting->desc.bInterfaceNumber, err);
1024 goto err_out_clear; 1024 goto err_out_clear;
1025 } 1025 }
1026 1026
1027 iface_desc = &intf->altsetting[alt]; 1027 iface_desc = intf->cur_altsetting;
1028 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { 1028 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
1029 pr_info("Num endpoints=%d. It is not DS9490R.\n", 1029 pr_info("Num endpoints=%d. It is not DS9490R.\n",
1030 iface_desc->desc.bNumEndpoints); 1030 iface_desc->desc.bNumEndpoints);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 24615c76c1d0..bb28e2ead679 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -264,7 +264,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
264 bio_for_each_segment_all(bvec, &bio, i, iter_all) { 264 bio_for_each_segment_all(bvec, &bio, i, iter_all) {
265 if (should_dirty && !PageCompound(bvec->bv_page)) 265 if (should_dirty && !PageCompound(bvec->bv_page))
266 set_page_dirty_lock(bvec->bv_page); 266 set_page_dirty_lock(bvec->bv_page);
267 put_page(bvec->bv_page); 267 if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
268 put_page(bvec->bv_page);
268 } 269 }
269 270
270 if (unlikely(bio.bi_status)) 271 if (unlikely(bio.bi_status))
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 920bf3b4b0ef..cccc75d15970 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -7,6 +7,7 @@
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/highmem.h> 9#include <linux/highmem.h>
10#include <linux/sched/mm.h>
10#include "ctree.h" 11#include "ctree.h"
11#include "disk-io.h" 12#include "disk-io.h"
12#include "transaction.h" 13#include "transaction.h"
@@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
427 unsigned long this_sum_bytes = 0; 428 unsigned long this_sum_bytes = 0;
428 int i; 429 int i;
429 u64 offset; 430 u64 offset;
431 unsigned nofs_flag;
432
433 nofs_flag = memalloc_nofs_save();
434 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
435 GFP_KERNEL);
436 memalloc_nofs_restore(nofs_flag);
430 437
431 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
432 GFP_NOFS);
433 if (!sums) 438 if (!sums)
434 return BLK_STS_RESOURCE; 439 return BLK_STS_RESOURCE;
435 440
@@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
472 477
473 bytes_left = bio->bi_iter.bi_size - total_bytes; 478 bytes_left = bio->bi_iter.bi_size - total_bytes;
474 479
475 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left), 480 nofs_flag = memalloc_nofs_save();
476 GFP_NOFS); 481 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
482 bytes_left), GFP_KERNEL);
483 memalloc_nofs_restore(nofs_flag);
477 BUG_ON(!sums); /* -ENOMEM */ 484 BUG_ON(!sums); /* -ENOMEM */
478 sums->len = bytes_left; 485 sums->len = bytes_left;
479 ordered = btrfs_lookup_ordered_extent(inode, 486 ordered = btrfs_lookup_ordered_extent(inode,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 82fdda8ff5ab..2973608824ec 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6783,7 +6783,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6783 u64 extent_start = 0; 6783 u64 extent_start = 0;
6784 u64 extent_end = 0; 6784 u64 extent_end = 0;
6785 u64 objectid = btrfs_ino(inode); 6785 u64 objectid = btrfs_ino(inode);
6786 u8 extent_type; 6786 int extent_type = -1;
6787 struct btrfs_path *path = NULL; 6787 struct btrfs_path *path = NULL;
6788 struct btrfs_root *root = inode->root; 6788 struct btrfs_root *root = inode->root;
6789 struct btrfs_file_extent_item *item; 6789 struct btrfs_file_extent_item *item;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 6fde2b2741ef..45e3cfd1198b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -6,6 +6,7 @@
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <linux/writeback.h> 8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
9#include "ctree.h" 10#include "ctree.h"
10#include "transaction.h" 11#include "transaction.h"
11#include "btrfs_inode.h" 12#include "btrfs_inode.h"
@@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
442 cur = entry->list.next; 443 cur = entry->list.next;
443 sum = list_entry(cur, struct btrfs_ordered_sum, list); 444 sum = list_entry(cur, struct btrfs_ordered_sum, list);
444 list_del(&sum->list); 445 list_del(&sum->list);
445 kfree(sum); 446 kvfree(sum);
446 } 447 }
447 kmem_cache_free(btrfs_ordered_extent_cache, entry); 448 kmem_cache_free(btrfs_ordered_extent_cache, entry);
448 } 449 }
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index a8f429882249..0637149fb9f9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1766,6 +1766,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1766unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1766unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1767{ 1767{
1768 struct ceph_inode_info *dci = ceph_inode(dir); 1768 struct ceph_inode_info *dci = ceph_inode(dir);
1769 unsigned hash;
1769 1770
1770 switch (dci->i_dir_layout.dl_dir_hash) { 1771 switch (dci->i_dir_layout.dl_dir_hash) {
1771 case 0: /* for backward compat */ 1772 case 0: /* for backward compat */
@@ -1773,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1773 return dn->d_name.hash; 1774 return dn->d_name.hash;
1774 1775
1775 default: 1776 default:
1776 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1777 spin_lock(&dn->d_lock);
1778 hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1777 dn->d_name.name, dn->d_name.len); 1779 dn->d_name.name, dn->d_name.len);
1780 spin_unlock(&dn->d_lock);
1781 return hash;
1778 } 1782 }
1779} 1783}
1780 1784
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 2d61ddda9bf5..c2feb310ac1e 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1163,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
1163 return 0; 1163 return 0;
1164} 1164}
1165 1165
1166static int d_name_cmp(struct dentry *dentry, const char *name, size_t len)
1167{
1168 int ret;
1169
1170 /* take d_lock to ensure dentry->d_name stability */
1171 spin_lock(&dentry->d_lock);
1172 ret = dentry->d_name.len - len;
1173 if (!ret)
1174 ret = memcmp(dentry->d_name.name, name, len);
1175 spin_unlock(&dentry->d_lock);
1176 return ret;
1177}
1178
1166/* 1179/*
1167 * Incorporate results into the local cache. This is either just 1180 * Incorporate results into the local cache. This is either just
1168 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 1181 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
@@ -1412,7 +1425,8 @@ retry_lookup:
1412 err = splice_dentry(&req->r_dentry, in); 1425 err = splice_dentry(&req->r_dentry, in);
1413 if (err < 0) 1426 if (err < 0)
1414 goto done; 1427 goto done;
1415 } else if (rinfo->head->is_dentry) { 1428 } else if (rinfo->head->is_dentry &&
1429 !d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) {
1416 struct ceph_vino *ptvino = NULL; 1430 struct ceph_vino *ptvino = NULL;
1417 1431
1418 if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) || 1432 if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 21c33ed048ed..9049c2a3e972 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1414,6 +1414,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1414 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); 1414 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1415 ci->i_prealloc_cap_flush = NULL; 1415 ci->i_prealloc_cap_flush = NULL;
1416 } 1416 }
1417
1418 if (drop &&
1419 ci->i_wrbuffer_ref_head == 0 &&
1420 ci->i_wr_ref == 0 &&
1421 ci->i_dirty_caps == 0 &&
1422 ci->i_flushing_caps == 0) {
1423 ceph_put_snap_context(ci->i_head_snapc);
1424 ci->i_head_snapc = NULL;
1425 }
1417 } 1426 }
1418 spin_unlock(&ci->i_ceph_lock); 1427 spin_unlock(&ci->i_ceph_lock);
1419 while (!list_empty(&to_remove)) { 1428 while (!list_empty(&to_remove)) {
@@ -2161,10 +2170,39 @@ retry:
2161 return path; 2170 return path;
2162} 2171}
2163 2172
2173/* Duplicate the dentry->d_name.name safely */
2174static int clone_dentry_name(struct dentry *dentry, const char **ppath,
2175 int *ppathlen)
2176{
2177 u32 len;
2178 char *name;
2179
2180retry:
2181 len = READ_ONCE(dentry->d_name.len);
2182 name = kmalloc(len + 1, GFP_NOFS);
2183 if (!name)
2184 return -ENOMEM;
2185
2186 spin_lock(&dentry->d_lock);
2187 if (dentry->d_name.len != len) {
2188 spin_unlock(&dentry->d_lock);
2189 kfree(name);
2190 goto retry;
2191 }
2192 memcpy(name, dentry->d_name.name, len);
2193 spin_unlock(&dentry->d_lock);
2194
2195 name[len] = '\0';
2196 *ppath = name;
2197 *ppathlen = len;
2198 return 0;
2199}
2200
2164static int build_dentry_path(struct dentry *dentry, struct inode *dir, 2201static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2165 const char **ppath, int *ppathlen, u64 *pino, 2202 const char **ppath, int *ppathlen, u64 *pino,
2166 int *pfreepath) 2203 bool *pfreepath, bool parent_locked)
2167{ 2204{
2205 int ret;
2168 char *path; 2206 char *path;
2169 2207
2170 rcu_read_lock(); 2208 rcu_read_lock();
@@ -2173,8 +2211,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2173 if (dir && ceph_snap(dir) == CEPH_NOSNAP) { 2211 if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
2174 *pino = ceph_ino(dir); 2212 *pino = ceph_ino(dir);
2175 rcu_read_unlock(); 2213 rcu_read_unlock();
2176 *ppath = dentry->d_name.name; 2214 if (parent_locked) {
2177 *ppathlen = dentry->d_name.len; 2215 *ppath = dentry->d_name.name;
2216 *ppathlen = dentry->d_name.len;
2217 } else {
2218 ret = clone_dentry_name(dentry, ppath, ppathlen);
2219 if (ret)
2220 return ret;
2221 *pfreepath = true;
2222 }
2178 return 0; 2223 return 0;
2179 } 2224 }
2180 rcu_read_unlock(); 2225 rcu_read_unlock();
@@ -2182,13 +2227,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2182 if (IS_ERR(path)) 2227 if (IS_ERR(path))
2183 return PTR_ERR(path); 2228 return PTR_ERR(path);
2184 *ppath = path; 2229 *ppath = path;
2185 *pfreepath = 1; 2230 *pfreepath = true;
2186 return 0; 2231 return 0;
2187} 2232}
2188 2233
2189static int build_inode_path(struct inode *inode, 2234static int build_inode_path(struct inode *inode,
2190 const char **ppath, int *ppathlen, u64 *pino, 2235 const char **ppath, int *ppathlen, u64 *pino,
2191 int *pfreepath) 2236 bool *pfreepath)
2192{ 2237{
2193 struct dentry *dentry; 2238 struct dentry *dentry;
2194 char *path; 2239 char *path;
@@ -2204,7 +2249,7 @@ static int build_inode_path(struct inode *inode,
2204 if (IS_ERR(path)) 2249 if (IS_ERR(path))
2205 return PTR_ERR(path); 2250 return PTR_ERR(path);
2206 *ppath = path; 2251 *ppath = path;
2207 *pfreepath = 1; 2252 *pfreepath = true;
2208 return 0; 2253 return 0;
2209} 2254}
2210 2255
@@ -2215,7 +2260,7 @@ static int build_inode_path(struct inode *inode,
2215static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, 2260static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2216 struct inode *rdiri, const char *rpath, 2261 struct inode *rdiri, const char *rpath,
2217 u64 rino, const char **ppath, int *pathlen, 2262 u64 rino, const char **ppath, int *pathlen,
2218 u64 *ino, int *freepath) 2263 u64 *ino, bool *freepath, bool parent_locked)
2219{ 2264{
2220 int r = 0; 2265 int r = 0;
2221 2266
@@ -2225,7 +2270,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2225 ceph_snap(rinode)); 2270 ceph_snap(rinode));
2226 } else if (rdentry) { 2271 } else if (rdentry) {
2227 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, 2272 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2228 freepath); 2273 freepath, parent_locked);
2229 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 2274 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2230 *ppath); 2275 *ppath);
2231 } else if (rpath || rino) { 2276 } else if (rpath || rino) {
@@ -2251,7 +2296,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2251 const char *path2 = NULL; 2296 const char *path2 = NULL;
2252 u64 ino1 = 0, ino2 = 0; 2297 u64 ino1 = 0, ino2 = 0;
2253 int pathlen1 = 0, pathlen2 = 0; 2298 int pathlen1 = 0, pathlen2 = 0;
2254 int freepath1 = 0, freepath2 = 0; 2299 bool freepath1 = false, freepath2 = false;
2255 int len; 2300 int len;
2256 u16 releases; 2301 u16 releases;
2257 void *p, *end; 2302 void *p, *end;
@@ -2259,16 +2304,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2259 2304
2260 ret = set_request_path_attr(req->r_inode, req->r_dentry, 2305 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2261 req->r_parent, req->r_path1, req->r_ino1.ino, 2306 req->r_parent, req->r_path1, req->r_ino1.ino,
2262 &path1, &pathlen1, &ino1, &freepath1); 2307 &path1, &pathlen1, &ino1, &freepath1,
2308 test_bit(CEPH_MDS_R_PARENT_LOCKED,
2309 &req->r_req_flags));
2263 if (ret < 0) { 2310 if (ret < 0) {
2264 msg = ERR_PTR(ret); 2311 msg = ERR_PTR(ret);
2265 goto out; 2312 goto out;
2266 } 2313 }
2267 2314
2315 /* If r_old_dentry is set, then assume that its parent is locked */
2268 ret = set_request_path_attr(NULL, req->r_old_dentry, 2316 ret = set_request_path_attr(NULL, req->r_old_dentry,
2269 req->r_old_dentry_dir, 2317 req->r_old_dentry_dir,
2270 req->r_path2, req->r_ino2.ino, 2318 req->r_path2, req->r_ino2.ino,
2271 &path2, &pathlen2, &ino2, &freepath2); 2319 &path2, &pathlen2, &ino2, &freepath2, true);
2272 if (ret < 0) { 2320 if (ret < 0) {
2273 msg = ERR_PTR(ret); 2321 msg = ERR_PTR(ret);
2274 goto out_free1; 2322 goto out_free1;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 89aa37fa0f84..b26e12cd8ec3 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -572,7 +572,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
572 old_snapc = NULL; 572 old_snapc = NULL;
573 573
574update_snapc: 574update_snapc:
575 if (ci->i_head_snapc) { 575 if (ci->i_wrbuffer_ref_head == 0 &&
576 ci->i_wr_ref == 0 &&
577 ci->i_dirty_caps == 0 &&
578 ci->i_flushing_caps == 0) {
579 ci->i_head_snapc = NULL;
580 } else {
576 ci->i_head_snapc = ceph_get_snap_context(new_snapc); 581 ci->i_head_snapc = ceph_get_snap_context(new_snapc);
577 dout(" new snapc is %p\n", new_snapc); 582 dout(" new snapc is %p\n", new_snapc);
578 } 583 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9c0ccc06d172..7037a137fa53 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2877,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2877 struct cifs_tcon *tcon; 2877 struct cifs_tcon *tcon;
2878 struct cifs_sb_info *cifs_sb; 2878 struct cifs_sb_info *cifs_sb;
2879 struct dentry *dentry = ctx->cfile->dentry; 2879 struct dentry *dentry = ctx->cfile->dentry;
2880 unsigned int i;
2881 int rc; 2880 int rc;
2882 2881
2883 tcon = tlink_tcon(ctx->cfile->tlink); 2882 tcon = tlink_tcon(ctx->cfile->tlink);
@@ -2941,10 +2940,6 @@ restart_loop:
2941 kref_put(&wdata->refcount, cifs_uncached_writedata_release); 2940 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2942 } 2941 }
2943 2942
2944 if (!ctx->direct_io)
2945 for (i = 0; i < ctx->npages; i++)
2946 put_page(ctx->bv[i].bv_page);
2947
2948 cifs_stats_bytes_written(tcon, ctx->total_len); 2943 cifs_stats_bytes_written(tcon, ctx->total_len);
2949 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); 2944 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2950 2945
@@ -3582,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3582 struct iov_iter *to = &ctx->iter; 3577 struct iov_iter *to = &ctx->iter;
3583 struct cifs_sb_info *cifs_sb; 3578 struct cifs_sb_info *cifs_sb;
3584 struct cifs_tcon *tcon; 3579 struct cifs_tcon *tcon;
3585 unsigned int i;
3586 int rc; 3580 int rc;
3587 3581
3588 tcon = tlink_tcon(ctx->cfile->tlink); 3582 tcon = tlink_tcon(ctx->cfile->tlink);
@@ -3666,15 +3660,8 @@ again:
3666 kref_put(&rdata->refcount, cifs_uncached_readdata_release); 3660 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3667 } 3661 }
3668 3662
3669 if (!ctx->direct_io) { 3663 if (!ctx->direct_io)
3670 for (i = 0; i < ctx->npages; i++) {
3671 if (ctx->should_dirty)
3672 set_page_dirty(ctx->bv[i].bv_page);
3673 put_page(ctx->bv[i].bv_page);
3674 }
3675
3676 ctx->total_len = ctx->len - iov_iter_count(to); 3664 ctx->total_len = ctx->len - iov_iter_count(to);
3677 }
3678 3665
3679 /* mask nodata case */ 3666 /* mask nodata case */
3680 if (rc == -ENODATA) 3667 if (rc == -ENODATA)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 53fdb5df0d2e..538fd7d807e4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
1735 if (rc == 0 || rc != -EBUSY) 1735 if (rc == 0 || rc != -EBUSY)
1736 goto do_rename_exit; 1736 goto do_rename_exit;
1737 1737
1738 /* Don't fall back to using SMB on SMB 2+ mount */
1739 if (server->vals->protocol_id != 0)
1740 goto do_rename_exit;
1741
1738 /* open-file renames don't work across directories */ 1742 /* open-file renames don't work across directories */
1739 if (to_dentry->d_parent != from_dentry->d_parent) 1743 if (to_dentry->d_parent != from_dentry->d_parent)
1740 goto do_rename_exit; 1744 goto do_rename_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1e1626a2cfc3..0dc6f08020ac 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -789,6 +789,11 @@ cifs_aio_ctx_alloc(void)
789{ 789{
790 struct cifs_aio_ctx *ctx; 790 struct cifs_aio_ctx *ctx;
791 791
792 /*
793 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
794 * to false so that we know when we have to unreference pages within
795 * cifs_aio_ctx_release()
796 */
792 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); 797 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
793 if (!ctx) 798 if (!ctx)
794 return NULL; 799 return NULL;
@@ -807,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
807 struct cifs_aio_ctx, refcount); 812 struct cifs_aio_ctx, refcount);
808 813
809 cifsFileInfo_put(ctx->cfile); 814 cifsFileInfo_put(ctx->cfile);
810 kvfree(ctx->bv); 815
816 /*
817 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
818 * which means that iov_iter_get_pages() was a success and thus that
819 * we have taken reference on pages.
820 */
821 if (ctx->bv) {
822 unsigned i;
823
824 for (i = 0; i < ctx->npages; i++) {
825 if (ctx->should_dirty)
826 set_page_dirty(ctx->bv[i].bv_page);
827 put_page(ctx->bv[i].bv_page);
828 }
829 kvfree(ctx->bv);
830 }
831
811 kfree(ctx); 832 kfree(ctx);
812} 833}
813 834
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b8f7262ac354..a37774a55f3a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3466,6 +3466,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
3466 io_parms->tcon->tid, ses->Suid, 3466 io_parms->tcon->tid, ses->Suid,
3467 io_parms->offset, 0); 3467 io_parms->offset, 0);
3468 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3468 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3469 cifs_small_buf_release(req);
3469 return rc == -ENODATA ? 0 : rc; 3470 return rc == -ENODATA ? 0 : rc;
3470 } else 3471 } else
3471 trace_smb3_read_done(xid, req->PersistentFileId, 3472 trace_smb3_read_done(xid, req->PersistentFileId,
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f65f85d89217..84efb8956734 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4,15 +4,28 @@
4 * supporting fast/efficient IO. 4 * supporting fast/efficient IO.
5 * 5 *
6 * A note on the read/write ordering memory barriers that are matched between 6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side. When the application reads the CQ ring 7 * the application and kernel side.
8 * tail, it must use an appropriate smp_rmb() to order with the smp_wmb() 8 *
9 * the kernel uses after writing the tail. Failure to do so could cause a 9 * After the application reads the CQ ring tail, it must use an
10 * delay in when the application notices that completion events available. 10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * This isn't a fatal condition. Likewise, the application must use an 11 * before writing the tail (using smp_load_acquire to read the tail will
12 * appropriate smp_wmb() both before writing the SQ tail, and after writing 12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * the SQ tail. The first one orders the sqe writes with the tail write, and 13 * entry load(s) with the head store), pairing with an implicit barrier
14 * the latter is paired with the smp_rmb() the kernel will issue before 14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * reading the SQ tail on submission. 15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
16 * 29 *
17 * Also see the examples in the liburing library: 30 * Also see the examples in the liburing library:
18 * 31 *
@@ -70,20 +83,108 @@ struct io_uring {
70 u32 tail ____cacheline_aligned_in_smp; 83 u32 tail ____cacheline_aligned_in_smp;
71}; 84};
72 85
86/*
87 * This data is shared with the application through the mmap at offset
88 * IORING_OFF_SQ_RING.
89 *
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
92 */
73struct io_sq_ring { 93struct io_sq_ring {
94 /*
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
97 *
98 * The kernel controls head and the application controls tail.
99 */
74 struct io_uring r; 100 struct io_uring r;
101 /*
102 * Bitmask to apply to head and tail offsets (constant, equals
103 * ring_entries - 1)
104 */
75 u32 ring_mask; 105 u32 ring_mask;
106 /* Ring size (constant, power of 2) */
76 u32 ring_entries; 107 u32 ring_entries;
108 /*
109 * Number of invalid entries dropped by the kernel due to
110 * invalid index stored in array
111 *
112 * Written by the kernel, shouldn't be modified by the
113 * application (i.e. get number of "new events" by comparing to
114 * cached value).
115 *
116 * After a new SQ head value was read by the application this
117 * counter includes all submissions that were dropped reaching
118 * the new SQ head (and possibly more).
119 */
77 u32 dropped; 120 u32 dropped;
121 /*
122 * Runtime flags
123 *
124 * Written by the kernel, shouldn't be modified by the
125 * application.
126 *
127 * The application needs a full memory barrier before checking
128 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
129 */
78 u32 flags; 130 u32 flags;
131 /*
132 * Ring buffer of indices into array of io_uring_sqe, which is
133 * mmapped by the application using the IORING_OFF_SQES offset.
134 *
135 * This indirection could e.g. be used to assign fixed
136 * io_uring_sqe entries to operations and only submit them to
137 * the queue when needed.
138 *
139 * The kernel modifies neither the indices array nor the entries
140 * array.
141 */
79 u32 array[]; 142 u32 array[];
80}; 143};
81 144
145/*
146 * This data is shared with the application through the mmap at offset
147 * IORING_OFF_CQ_RING.
148 *
149 * The offsets to the member fields are published through struct
150 * io_cqring_offsets when calling io_uring_setup.
151 */
82struct io_cq_ring { 152struct io_cq_ring {
153 /*
154 * Head and tail offsets into the ring; the offsets need to be
155 * masked to get valid indices.
156 *
157 * The application controls head and the kernel tail.
158 */
83 struct io_uring r; 159 struct io_uring r;
160 /*
161 * Bitmask to apply to head and tail offsets (constant, equals
162 * ring_entries - 1)
163 */
84 u32 ring_mask; 164 u32 ring_mask;
165 /* Ring size (constant, power of 2) */
85 u32 ring_entries; 166 u32 ring_entries;
167 /*
168 * Number of completion events lost because the queue was full;
169 * this should be avoided by the application by making sure
170 * there are not more requests pending thatn there is space in
171 * the completion queue.
172 *
173 * Written by the kernel, shouldn't be modified by the
174 * application (i.e. get number of "new events" by comparing to
175 * cached value).
176 *
177 * As completion events come in out of order this counter is not
178 * ordered with any other data.
179 */
86 u32 overflow; 180 u32 overflow;
181 /*
182 * Ring buffer of completion events.
183 *
184 * The kernel writes completion events fresh every time they are
185 * produced, so the application is allowed to modify pending
186 * entries.
187 */
87 struct io_uring_cqe cqes[]; 188 struct io_uring_cqe cqes[];
88}; 189};
89 190
@@ -221,7 +322,7 @@ struct io_kiocb {
221 struct list_head list; 322 struct list_head list;
222 unsigned int flags; 323 unsigned int flags;
223 refcount_t refs; 324 refcount_t refs;
224#define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */ 325#define REQ_F_NOWAIT 1 /* must not punt to workers */
225#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ 326#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
226#define REQ_F_FIXED_FILE 4 /* ctx owns file */ 327#define REQ_F_FIXED_FILE 4 /* ctx owns file */
227#define REQ_F_SEQ_PREV 8 /* sequential with previous */ 328#define REQ_F_SEQ_PREV 8 /* sequential with previous */
@@ -317,12 +418,6 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
317 /* order cqe stores with ring update */ 418 /* order cqe stores with ring update */
318 smp_store_release(&ring->r.tail, ctx->cached_cq_tail); 419 smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
319 420
320 /*
321 * Write sider barrier of tail update, app has read side. See
322 * comment at the top of this file.
323 */
324 smp_wmb();
325
326 if (wq_has_sleeper(&ctx->cq_wait)) { 421 if (wq_has_sleeper(&ctx->cq_wait)) {
327 wake_up_interruptible(&ctx->cq_wait); 422 wake_up_interruptible(&ctx->cq_wait);
328 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); 423 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
@@ -336,8 +431,11 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
336 unsigned tail; 431 unsigned tail;
337 432
338 tail = ctx->cached_cq_tail; 433 tail = ctx->cached_cq_tail;
339 /* See comment at the top of the file */ 434 /*
340 smp_rmb(); 435 * writes to the cq entry need to come after reading head; the
436 * control dependency is enough as we're using WRITE_ONCE to
437 * fill the cq entry
438 */
341 if (tail - READ_ONCE(ring->r.head) == ring->ring_entries) 439 if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
342 return NULL; 440 return NULL;
343 441
@@ -740,7 +838,7 @@ static bool io_file_supports_async(struct file *file)
740} 838}
741 839
742static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, 840static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
743 bool force_nonblock, struct io_submit_state *state) 841 bool force_nonblock)
744{ 842{
745 const struct io_uring_sqe *sqe = s->sqe; 843 const struct io_uring_sqe *sqe = s->sqe;
746 struct io_ring_ctx *ctx = req->ctx; 844 struct io_ring_ctx *ctx = req->ctx;
@@ -774,10 +872,14 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
774 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); 872 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
775 if (unlikely(ret)) 873 if (unlikely(ret))
776 return ret; 874 return ret;
777 if (force_nonblock) { 875
876 /* don't allow async punt if RWF_NOWAIT was requested */
877 if (kiocb->ki_flags & IOCB_NOWAIT)
878 req->flags |= REQ_F_NOWAIT;
879
880 if (force_nonblock)
778 kiocb->ki_flags |= IOCB_NOWAIT; 881 kiocb->ki_flags |= IOCB_NOWAIT;
779 req->flags |= REQ_F_FORCE_NONBLOCK; 882
780 }
781 if (ctx->flags & IORING_SETUP_IOPOLL) { 883 if (ctx->flags & IORING_SETUP_IOPOLL) {
782 if (!(kiocb->ki_flags & IOCB_DIRECT) || 884 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
783 !kiocb->ki_filp->f_op->iopoll) 885 !kiocb->ki_filp->f_op->iopoll)
@@ -938,7 +1040,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
938} 1040}
939 1041
940static int io_read(struct io_kiocb *req, const struct sqe_submit *s, 1042static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
941 bool force_nonblock, struct io_submit_state *state) 1043 bool force_nonblock)
942{ 1044{
943 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1045 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
944 struct kiocb *kiocb = &req->rw; 1046 struct kiocb *kiocb = &req->rw;
@@ -947,7 +1049,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
947 size_t iov_count; 1049 size_t iov_count;
948 int ret; 1050 int ret;
949 1051
950 ret = io_prep_rw(req, s, force_nonblock, state); 1052 ret = io_prep_rw(req, s, force_nonblock);
951 if (ret) 1053 if (ret)
952 return ret; 1054 return ret;
953 file = kiocb->ki_filp; 1055 file = kiocb->ki_filp;
@@ -985,7 +1087,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
985} 1087}
986 1088
987static int io_write(struct io_kiocb *req, const struct sqe_submit *s, 1089static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
988 bool force_nonblock, struct io_submit_state *state) 1090 bool force_nonblock)
989{ 1091{
990 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1092 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
991 struct kiocb *kiocb = &req->rw; 1093 struct kiocb *kiocb = &req->rw;
@@ -994,7 +1096,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
994 size_t iov_count; 1096 size_t iov_count;
995 int ret; 1097 int ret;
996 1098
997 ret = io_prep_rw(req, s, force_nonblock, state); 1099 ret = io_prep_rw(req, s, force_nonblock);
998 if (ret) 1100 if (ret)
999 return ret; 1101 return ret;
1000 1102
@@ -1336,8 +1438,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1336} 1438}
1337 1439
1338static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 1440static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1339 const struct sqe_submit *s, bool force_nonblock, 1441 const struct sqe_submit *s, bool force_nonblock)
1340 struct io_submit_state *state)
1341{ 1442{
1342 int ret, opcode; 1443 int ret, opcode;
1343 1444
@@ -1353,18 +1454,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1353 case IORING_OP_READV: 1454 case IORING_OP_READV:
1354 if (unlikely(s->sqe->buf_index)) 1455 if (unlikely(s->sqe->buf_index))
1355 return -EINVAL; 1456 return -EINVAL;
1356 ret = io_read(req, s, force_nonblock, state); 1457 ret = io_read(req, s, force_nonblock);
1357 break; 1458 break;
1358 case IORING_OP_WRITEV: 1459 case IORING_OP_WRITEV:
1359 if (unlikely(s->sqe->buf_index)) 1460 if (unlikely(s->sqe->buf_index))
1360 return -EINVAL; 1461 return -EINVAL;
1361 ret = io_write(req, s, force_nonblock, state); 1462 ret = io_write(req, s, force_nonblock);
1362 break; 1463 break;
1363 case IORING_OP_READ_FIXED: 1464 case IORING_OP_READ_FIXED:
1364 ret = io_read(req, s, force_nonblock, state); 1465 ret = io_read(req, s, force_nonblock);
1365 break; 1466 break;
1366 case IORING_OP_WRITE_FIXED: 1467 case IORING_OP_WRITE_FIXED:
1367 ret = io_write(req, s, force_nonblock, state); 1468 ret = io_write(req, s, force_nonblock);
1368 break; 1469 break;
1369 case IORING_OP_FSYNC: 1470 case IORING_OP_FSYNC:
1370 ret = io_fsync(req, s->sqe, force_nonblock); 1471 ret = io_fsync(req, s->sqe, force_nonblock);
@@ -1437,8 +1538,7 @@ restart:
1437 struct sqe_submit *s = &req->submit; 1538 struct sqe_submit *s = &req->submit;
1438 const struct io_uring_sqe *sqe = s->sqe; 1539 const struct io_uring_sqe *sqe = s->sqe;
1439 1540
1440 /* Ensure we clear previously set forced non-block flag */ 1541 /* Ensure we clear previously set non-block flag */
1441 req->flags &= ~REQ_F_FORCE_NONBLOCK;
1442 req->rw.ki_flags &= ~IOCB_NOWAIT; 1542 req->rw.ki_flags &= ~IOCB_NOWAIT;
1443 1543
1444 ret = 0; 1544 ret = 0;
@@ -1457,7 +1557,7 @@ restart:
1457 s->has_user = cur_mm != NULL; 1557 s->has_user = cur_mm != NULL;
1458 s->needs_lock = true; 1558 s->needs_lock = true;
1459 do { 1559 do {
1460 ret = __io_submit_sqe(ctx, req, s, false, NULL); 1560 ret = __io_submit_sqe(ctx, req, s, false);
1461 /* 1561 /*
1462 * We can get EAGAIN for polled IO even though 1562 * We can get EAGAIN for polled IO even though
1463 * we're forcing a sync submission from here, 1563 * we're forcing a sync submission from here,
@@ -1468,10 +1568,11 @@ restart:
1468 break; 1568 break;
1469 cond_resched(); 1569 cond_resched();
1470 } while (1); 1570 } while (1);
1471
1472 /* drop submission reference */
1473 io_put_req(req);
1474 } 1571 }
1572
1573 /* drop submission reference */
1574 io_put_req(req);
1575
1475 if (ret) { 1576 if (ret) {
1476 io_cqring_add_event(ctx, sqe->user_data, ret, 0); 1577 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
1477 io_put_req(req); 1578 io_put_req(req);
@@ -1623,8 +1724,8 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1623 if (unlikely(ret)) 1724 if (unlikely(ret))
1624 goto out; 1725 goto out;
1625 1726
1626 ret = __io_submit_sqe(ctx, req, s, true, state); 1727 ret = __io_submit_sqe(ctx, req, s, true);
1627 if (ret == -EAGAIN) { 1728 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
1628 struct io_uring_sqe *sqe_copy; 1729 struct io_uring_sqe *sqe_copy;
1629 1730
1630 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); 1731 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -1698,24 +1799,10 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
1698 * write new data to them. 1799 * write new data to them.
1699 */ 1800 */
1700 smp_store_release(&ring->r.head, ctx->cached_sq_head); 1801 smp_store_release(&ring->r.head, ctx->cached_sq_head);
1701
1702 /*
1703 * write side barrier of head update, app has read side. See
1704 * comment at the top of this file
1705 */
1706 smp_wmb();
1707 } 1802 }
1708} 1803}
1709 1804
1710/* 1805/*
1711 * Undo last io_get_sqring()
1712 */
1713static void io_drop_sqring(struct io_ring_ctx *ctx)
1714{
1715 ctx->cached_sq_head--;
1716}
1717
1718/*
1719 * Fetch an sqe, if one is available. Note that s->sqe will point to memory 1806 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
1720 * that is mapped by userspace. This means that care needs to be taken to 1807 * that is mapped by userspace. This means that care needs to be taken to
1721 * ensure that reads are stable, as we cannot rely on userspace always 1808 * ensure that reads are stable, as we cannot rely on userspace always
@@ -1737,9 +1824,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
1737 * though the application is the one updating it. 1824 * though the application is the one updating it.
1738 */ 1825 */
1739 head = ctx->cached_sq_head; 1826 head = ctx->cached_sq_head;
1740 /* See comment at the top of this file */ 1827 /* make sure SQ entry isn't read before tail */
1741 smp_rmb(); 1828 if (head == smp_load_acquire(&ring->r.tail))
1742 if (head == READ_ONCE(ring->r.tail))
1743 return false; 1829 return false;
1744 1830
1745 head = READ_ONCE(ring->array[head & ctx->sq_mask]); 1831 head = READ_ONCE(ring->array[head & ctx->sq_mask]);
@@ -1753,8 +1839,6 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
1753 /* drop invalid entries */ 1839 /* drop invalid entries */
1754 ctx->cached_sq_head++; 1840 ctx->cached_sq_head++;
1755 ring->dropped++; 1841 ring->dropped++;
1756 /* See comment at the top of this file */
1757 smp_wmb();
1758 return false; 1842 return false;
1759} 1843}
1760 1844
@@ -1864,7 +1948,8 @@ static int io_sq_thread(void *data)
1864 1948
1865 /* Tell userspace we may need a wakeup call */ 1949 /* Tell userspace we may need a wakeup call */
1866 ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP; 1950 ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
1867 smp_wmb(); 1951 /* make sure to read SQ tail after writing flags */
1952 smp_mb();
1868 1953
1869 if (!io_get_sqring(ctx, &sqes[0])) { 1954 if (!io_get_sqring(ctx, &sqes[0])) {
1870 if (kthread_should_stop()) { 1955 if (kthread_should_stop()) {
@@ -1877,13 +1962,11 @@ static int io_sq_thread(void *data)
1877 finish_wait(&ctx->sqo_wait, &wait); 1962 finish_wait(&ctx->sqo_wait, &wait);
1878 1963
1879 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP; 1964 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
1880 smp_wmb();
1881 continue; 1965 continue;
1882 } 1966 }
1883 finish_wait(&ctx->sqo_wait, &wait); 1967 finish_wait(&ctx->sqo_wait, &wait);
1884 1968
1885 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP; 1969 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
1886 smp_wmb();
1887 } 1970 }
1888 1971
1889 i = 0; 1972 i = 0;
@@ -1928,7 +2011,7 @@ static int io_sq_thread(void *data)
1928static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) 2011static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1929{ 2012{
1930 struct io_submit_state state, *statep = NULL; 2013 struct io_submit_state state, *statep = NULL;
1931 int i, ret = 0, submit = 0; 2014 int i, submit = 0;
1932 2015
1933 if (to_submit > IO_PLUG_THRESHOLD) { 2016 if (to_submit > IO_PLUG_THRESHOLD) {
1934 io_submit_state_start(&state, ctx, to_submit); 2017 io_submit_state_start(&state, ctx, to_submit);
@@ -1937,6 +2020,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1937 2020
1938 for (i = 0; i < to_submit; i++) { 2021 for (i = 0; i < to_submit; i++) {
1939 struct sqe_submit s; 2022 struct sqe_submit s;
2023 int ret;
1940 2024
1941 if (!io_get_sqring(ctx, &s)) 2025 if (!io_get_sqring(ctx, &s))
1942 break; 2026 break;
@@ -1944,21 +2028,18 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1944 s.has_user = true; 2028 s.has_user = true;
1945 s.needs_lock = false; 2029 s.needs_lock = false;
1946 s.needs_fixed_file = false; 2030 s.needs_fixed_file = false;
2031 submit++;
1947 2032
1948 ret = io_submit_sqe(ctx, &s, statep); 2033 ret = io_submit_sqe(ctx, &s, statep);
1949 if (ret) { 2034 if (ret)
1950 io_drop_sqring(ctx); 2035 io_cqring_add_event(ctx, s.sqe->user_data, ret, 0);
1951 break;
1952 }
1953
1954 submit++;
1955 } 2036 }
1956 io_commit_sqring(ctx); 2037 io_commit_sqring(ctx);
1957 2038
1958 if (statep) 2039 if (statep)
1959 io_submit_state_end(statep); 2040 io_submit_state_end(statep);
1960 2041
1961 return submit ? submit : ret; 2042 return submit;
1962} 2043}
1963 2044
1964static unsigned io_cqring_events(struct io_cq_ring *ring) 2045static unsigned io_cqring_events(struct io_cq_ring *ring)
@@ -2239,10 +2320,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
2239 mmgrab(current->mm); 2320 mmgrab(current->mm);
2240 ctx->sqo_mm = current->mm; 2321 ctx->sqo_mm = current->mm;
2241 2322
2242 ret = -EINVAL;
2243 if (!cpu_possible(p->sq_thread_cpu))
2244 goto err;
2245
2246 if (ctx->flags & IORING_SETUP_SQPOLL) { 2323 if (ctx->flags & IORING_SETUP_SQPOLL) {
2247 ret = -EPERM; 2324 ret = -EPERM;
2248 if (!capable(CAP_SYS_ADMIN)) 2325 if (!capable(CAP_SYS_ADMIN))
@@ -2253,11 +2330,11 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
2253 ctx->sq_thread_idle = HZ; 2330 ctx->sq_thread_idle = HZ;
2254 2331
2255 if (p->flags & IORING_SETUP_SQ_AFF) { 2332 if (p->flags & IORING_SETUP_SQ_AFF) {
2256 int cpu; 2333 int cpu = array_index_nospec(p->sq_thread_cpu,
2334 nr_cpu_ids);
2257 2335
2258 cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
2259 ret = -EINVAL; 2336 ret = -EINVAL;
2260 if (!cpu_possible(p->sq_thread_cpu)) 2337 if (!cpu_possible(cpu))
2261 goto err; 2338 goto err;
2262 2339
2263 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, 2340 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
@@ -2320,8 +2397,12 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
2320 2397
2321static void io_mem_free(void *ptr) 2398static void io_mem_free(void *ptr)
2322{ 2399{
2323 struct page *page = virt_to_head_page(ptr); 2400 struct page *page;
2401
2402 if (!ptr)
2403 return;
2324 2404
2405 page = virt_to_head_page(ptr);
2325 if (put_page_testzero(page)) 2406 if (put_page_testzero(page))
2326 free_compound_page(page); 2407 free_compound_page(page);
2327} 2408}
@@ -2362,7 +2443,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
2362 2443
2363 if (ctx->account_mem) 2444 if (ctx->account_mem)
2364 io_unaccount_mem(ctx->user, imu->nr_bvecs); 2445 io_unaccount_mem(ctx->user, imu->nr_bvecs);
2365 kfree(imu->bvec); 2446 kvfree(imu->bvec);
2366 imu->nr_bvecs = 0; 2447 imu->nr_bvecs = 0;
2367 } 2448 }
2368 2449
@@ -2454,9 +2535,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2454 if (!pages || nr_pages > got_pages) { 2535 if (!pages || nr_pages > got_pages) {
2455 kfree(vmas); 2536 kfree(vmas);
2456 kfree(pages); 2537 kfree(pages);
2457 pages = kmalloc_array(nr_pages, sizeof(struct page *), 2538 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
2458 GFP_KERNEL); 2539 GFP_KERNEL);
2459 vmas = kmalloc_array(nr_pages, 2540 vmas = kvmalloc_array(nr_pages,
2460 sizeof(struct vm_area_struct *), 2541 sizeof(struct vm_area_struct *),
2461 GFP_KERNEL); 2542 GFP_KERNEL);
2462 if (!pages || !vmas) { 2543 if (!pages || !vmas) {
@@ -2468,7 +2549,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2468 got_pages = nr_pages; 2549 got_pages = nr_pages;
2469 } 2550 }
2470 2551
2471 imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec), 2552 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
2472 GFP_KERNEL); 2553 GFP_KERNEL);
2473 ret = -ENOMEM; 2554 ret = -ENOMEM;
2474 if (!imu->bvec) { 2555 if (!imu->bvec) {
@@ -2507,6 +2588,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2507 } 2588 }
2508 if (ctx->account_mem) 2589 if (ctx->account_mem)
2509 io_unaccount_mem(ctx->user, nr_pages); 2590 io_unaccount_mem(ctx->user, nr_pages);
2591 kvfree(imu->bvec);
2510 goto err; 2592 goto err;
2511 } 2593 }
2512 2594
@@ -2529,12 +2611,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2529 2611
2530 ctx->nr_user_bufs++; 2612 ctx->nr_user_bufs++;
2531 } 2613 }
2532 kfree(pages); 2614 kvfree(pages);
2533 kfree(vmas); 2615 kvfree(vmas);
2534 return 0; 2616 return 0;
2535err: 2617err:
2536 kfree(pages); 2618 kvfree(pages);
2537 kfree(vmas); 2619 kvfree(vmas);
2538 io_sqe_buffer_unregister(ctx); 2620 io_sqe_buffer_unregister(ctx);
2539 return ret; 2621 return ret;
2540} 2622}
@@ -2572,9 +2654,13 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2572 __poll_t mask = 0; 2654 __poll_t mask = 0;
2573 2655
2574 poll_wait(file, &ctx->cq_wait, wait); 2656 poll_wait(file, &ctx->cq_wait, wait);
2575 /* See comment at the top of this file */ 2657 /*
2658 * synchronizes with barrier from wq_has_sleeper call in
2659 * io_commit_cqring
2660 */
2576 smp_rmb(); 2661 smp_rmb();
2577 if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head) 2662 if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
2663 ctx->sq_ring->ring_entries)
2578 mask |= EPOLLOUT | EPOLLWRNORM; 2664 mask |= EPOLLOUT | EPOLLWRNORM;
2579 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail) 2665 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
2580 mask |= EPOLLIN | EPOLLRDNORM; 2666 mask |= EPOLLIN | EPOLLRDNORM;
@@ -2685,24 +2771,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
2685 mutex_lock(&ctx->uring_lock); 2771 mutex_lock(&ctx->uring_lock);
2686 submitted = io_ring_submit(ctx, to_submit); 2772 submitted = io_ring_submit(ctx, to_submit);
2687 mutex_unlock(&ctx->uring_lock); 2773 mutex_unlock(&ctx->uring_lock);
2688
2689 if (submitted < 0)
2690 goto out_ctx;
2691 } 2774 }
2692 if (flags & IORING_ENTER_GETEVENTS) { 2775 if (flags & IORING_ENTER_GETEVENTS) {
2693 unsigned nr_events = 0; 2776 unsigned nr_events = 0;
2694 2777
2695 min_complete = min(min_complete, ctx->cq_entries); 2778 min_complete = min(min_complete, ctx->cq_entries);
2696 2779
2697 /*
2698 * The application could have included the 'to_submit' count
2699 * in how many events it wanted to wait for. If we failed to
2700 * submit the desired count, we may need to adjust the number
2701 * of events to poll/wait for.
2702 */
2703 if (submitted < to_submit)
2704 min_complete = min_t(unsigned, submitted, min_complete);
2705
2706 if (ctx->flags & IORING_SETUP_IOPOLL) { 2780 if (ctx->flags & IORING_SETUP_IOPOLL) {
2707 mutex_lock(&ctx->uring_lock); 2781 mutex_lock(&ctx->uring_lock);
2708 ret = io_iopoll_check(ctx, &nr_events, min_complete); 2782 ret = io_iopoll_check(ctx, &nr_events, min_complete);
@@ -2748,17 +2822,12 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
2748 return -EOVERFLOW; 2822 return -EOVERFLOW;
2749 2823
2750 ctx->sq_sqes = io_mem_alloc(size); 2824 ctx->sq_sqes = io_mem_alloc(size);
2751 if (!ctx->sq_sqes) { 2825 if (!ctx->sq_sqes)
2752 io_mem_free(ctx->sq_ring);
2753 return -ENOMEM; 2826 return -ENOMEM;
2754 }
2755 2827
2756 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries)); 2828 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
2757 if (!cq_ring) { 2829 if (!cq_ring)
2758 io_mem_free(ctx->sq_ring);
2759 io_mem_free(ctx->sq_sqes);
2760 return -ENOMEM; 2830 return -ENOMEM;
2761 }
2762 2831
2763 ctx->cq_ring = cq_ring; 2832 ctx->cq_ring = cq_ring;
2764 cq_ring->ring_mask = p->cq_entries - 1; 2833 cq_ring->ring_mask = p->cq_entries - 1;
@@ -2934,6 +3003,14 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
2934{ 3003{
2935 int ret; 3004 int ret;
2936 3005
3006 /*
3007 * We're inside the ring mutex, if the ref is already dying, then
3008 * someone else killed the ctx or is already going through
3009 * io_uring_register().
3010 */
3011 if (percpu_ref_is_dying(&ctx->refs))
3012 return -ENXIO;
3013
2937 percpu_ref_kill(&ctx->refs); 3014 percpu_ref_kill(&ctx->refs);
2938 3015
2939 /* 3016 /*
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 6b9c27548997..63c6bb1f8c4d 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -346,10 +346,16 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
346 __kernel_fsid_t fsid = {}; 346 __kernel_fsid_t fsid = {};
347 347
348 fsnotify_foreach_obj_type(type) { 348 fsnotify_foreach_obj_type(type) {
349 struct fsnotify_mark_connector *conn;
350
349 if (!fsnotify_iter_should_report_type(iter_info, type)) 351 if (!fsnotify_iter_should_report_type(iter_info, type))
350 continue; 352 continue;
351 353
352 fsid = iter_info->marks[type]->connector->fsid; 354 conn = READ_ONCE(iter_info->marks[type]->connector);
355 /* Mark is just getting destroyed or created? */
356 if (!conn)
357 continue;
358 fsid = conn->fsid;
353 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) 359 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
354 continue; 360 continue;
355 return fsid; 361 return fsid;
@@ -408,8 +414,12 @@ static int fanotify_handle_event(struct fsnotify_group *group,
408 return 0; 414 return 0;
409 } 415 }
410 416
411 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) 417 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
412 fsid = fanotify_get_fsid(iter_info); 418 fsid = fanotify_get_fsid(iter_info);
419 /* Racing with mark destruction or creation? */
420 if (!fsid.val[0] && !fsid.val[1])
421 return 0;
422 }
413 423
414 event = fanotify_alloc_event(group, inode, mask, data, data_type, 424 event = fanotify_alloc_event(group, inode, mask, data, data_type,
415 &fsid); 425 &fsid);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d593d4269561..22acb0a79b53 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -239,13 +239,13 @@ static void fsnotify_drop_object(unsigned int type, void *objp)
239 239
240void fsnotify_put_mark(struct fsnotify_mark *mark) 240void fsnotify_put_mark(struct fsnotify_mark *mark)
241{ 241{
242 struct fsnotify_mark_connector *conn; 242 struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
243 void *objp = NULL; 243 void *objp = NULL;
244 unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED; 244 unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
245 bool free_conn = false; 245 bool free_conn = false;
246 246
247 /* Catch marks that were actually never attached to object */ 247 /* Catch marks that were actually never attached to object */
248 if (!mark->connector) { 248 if (!conn) {
249 if (refcount_dec_and_test(&mark->refcnt)) 249 if (refcount_dec_and_test(&mark->refcnt))
250 fsnotify_final_mark_destroy(mark); 250 fsnotify_final_mark_destroy(mark);
251 return; 251 return;
@@ -255,10 +255,9 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
255 * We have to be careful so that traversals of obj_list under lock can 255 * We have to be careful so that traversals of obj_list under lock can
256 * safely grab mark reference. 256 * safely grab mark reference.
257 */ 257 */
258 if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock)) 258 if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
259 return; 259 return;
260 260
261 conn = mark->connector;
262 hlist_del_init_rcu(&mark->obj_list); 261 hlist_del_init_rcu(&mark->obj_list);
263 if (hlist_empty(&conn->list)) { 262 if (hlist_empty(&conn->list)) {
264 objp = fsnotify_detach_connector_from_object(conn, &type); 263 objp = fsnotify_detach_connector_from_object(conn, &type);
@@ -266,7 +265,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
266 } else { 265 } else {
267 __fsnotify_recalc_mask(conn); 266 __fsnotify_recalc_mask(conn);
268 } 267 }
269 mark->connector = NULL; 268 WRITE_ONCE(mark->connector, NULL);
270 spin_unlock(&conn->lock); 269 spin_unlock(&conn->lock);
271 270
272 fsnotify_drop_object(type, objp); 271 fsnotify_drop_object(type, objp);
@@ -620,7 +619,7 @@ restart:
620 /* mark should be the last entry. last is the current last entry */ 619 /* mark should be the last entry. last is the current last entry */
621 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list); 620 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
622added: 621added:
623 mark->connector = conn; 622 WRITE_ONCE(mark->connector, conn);
624out_err: 623out_err:
625 spin_unlock(&conn->lock); 624 spin_unlock(&conn->lock);
626 spin_unlock(&mark->lock); 625 spin_unlock(&mark->lock);
@@ -808,6 +807,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
808 refcount_set(&mark->refcnt, 1); 807 refcount_set(&mark->refcnt, 1);
809 fsnotify_get_group(group); 808 fsnotify_get_group(group);
810 mark->group = group; 809 mark->group = group;
810 WRITE_ONCE(mark->connector, NULL);
811} 811}
812 812
813/* 813/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 2d61e5e8c863..c74570736b24 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1643,9 +1643,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
1643 if (--header->nreg) 1643 if (--header->nreg)
1644 return; 1644 return;
1645 1645
1646 if (parent) 1646 if (parent) {
1647 put_links(header); 1647 put_links(header);
1648 start_unregistering(header); 1648 start_unregistering(header);
1649 }
1650
1649 if (!--header->count) 1651 if (!--header->count)
1650 kfree_rcu(header, rcu); 1652 kfree_rcu(header, rcu);
1651 1653
diff --git a/fs/splice.c b/fs/splice.c
index 98943d9b219c..25212dcca2df 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -330,8 +330,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
330 .get = generic_pipe_buf_get, 330 .get = generic_pipe_buf_get,
331}; 331};
332 332
333static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, 333int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
334 struct pipe_buffer *buf) 334 struct pipe_buffer *buf)
335{ 335{
336 return 1; 336 return 1;
337} 337}
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cbf3180cb612..668ad971cd7b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
420 /** 420 /**
421 * Protected by ttm_global_mutex. 421 * Protected by ttm_global_mutex.
422 */ 422 */
423 unsigned int use_count;
424 struct list_head device_list; 423 struct list_head device_list;
425 424
426 /** 425 /**
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9a21848fdb07..59631dd0777c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -546,7 +546,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
546 } \ 546 } \
547_out: \ 547_out: \
548 rcu_read_unlock(); \ 548 rcu_read_unlock(); \
549 preempt_enable_no_resched(); \ 549 preempt_enable(); \
550 _ret; \ 550 _ret; \
551 }) 551 })
552 552
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index abb2dac3da9b..5c626fdc10db 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -176,6 +176,7 @@ void free_pipe_info(struct pipe_inode_info *);
176bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 176bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
177int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 177int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
178int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 178int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
179int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
179void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); 180void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
180void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); 181void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
181 182
diff --git a/include/linux/uio.h b/include/linux/uio.h
index f184af1999a8..2d0131ad4604 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -60,7 +60,7 @@ struct iov_iter {
60 60
61static inline enum iter_type iov_iter_type(const struct iov_iter *i) 61static inline enum iter_type iov_iter_type(const struct iov_iter *i)
62{ 62{
63 return i->type & ~(READ | WRITE); 63 return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF);
64} 64}
65 65
66static inline bool iter_is_iovec(const struct iov_iter *i) 66static inline bool iter_is_iovec(const struct iov_iter *i)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5e49e82c4368..ff010d1fd1c7 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
200 * @dev: driver model's view of this device 200 * @dev: driver model's view of this device
201 * @usb_dev: if an interface is bound to the USB major, this will point 201 * @usb_dev: if an interface is bound to the USB major, this will point
202 * to the sysfs representation for that device. 202 * to the sysfs representation for that device.
203 * @pm_usage_cnt: PM usage counter for this interface
204 * @reset_ws: Used for scheduling resets from atomic context. 203 * @reset_ws: Used for scheduling resets from atomic context.
205 * @resetting_device: USB core reset the device, so use alt setting 0 as 204 * @resetting_device: USB core reset the device, so use alt setting 0 as
206 * current; needs bandwidth alloc after reset. 205 * current; needs bandwidth alloc after reset.
@@ -257,7 +256,6 @@ struct usb_interface {
257 256
258 struct device dev; /* interface specific device info */ 257 struct device dev; /* interface specific device info */
259 struct device *usb_dev; 258 struct device *usb_dev;
260 atomic_t pm_usage_cnt; /* usage counter for autosuspend */
261 struct work_struct reset_ws; /* for resets in atomic context */ 259 struct work_struct reset_ws; /* for resets in atomic context */
262}; 260};
263#define to_usb_interface(d) container_of(d, struct usb_interface, dev) 261#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6640f84fe536..6d5beac29bc1 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -105,7 +105,6 @@ enum sctp_verb {
105 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ 105 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
106 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 106 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
107 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 107 SCTP_CMD_SEND_MSG, /* Send the whole use message */
108 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
109 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 108 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
110 SCTP_CMD_SET_ASOC, /* Restore association context */ 109 SCTP_CMD_SET_ASOC, /* Restore association context */
111 SCTP_CMD_LAST 110 SCTP_CMD_LAST
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index eb5018b1cf9c..debcc5198e33 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -306,7 +306,8 @@ struct xfrm_replay {
306}; 306};
307 307
308struct xfrm_if_cb { 308struct xfrm_if_cb {
309 struct xfrm_if *(*decode_session)(struct sk_buff *skb); 309 struct xfrm_if *(*decode_session)(struct sk_buff *skb,
310 unsigned short family);
310}; 311};
311 312
312void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb); 313void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -1335,6 +1336,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
1335 return atomic_read(&x->tunnel_users); 1336 return atomic_read(&x->tunnel_users);
1336} 1337}
1337 1338
1339static inline bool xfrm_id_proto_valid(u8 proto)
1340{
1341 switch (proto) {
1342 case IPPROTO_AH:
1343 case IPPROTO_ESP:
1344 case IPPROTO_COMP:
1345#if IS_ENABLED(CONFIG_IPV6)
1346 case IPPROTO_ROUTING:
1347 case IPPROTO_DSTOPTS:
1348#endif
1349 return true;
1350 default:
1351 return false;
1352 }
1353}
1354
1355/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
1338static inline int xfrm_id_proto_match(u8 proto, u8 userproto) 1356static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1339{ 1357{
1340 return (!userproto || proto == userproto || 1358 return (!userproto || proto == userproto ||
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 87b3198f4b5d..f4d4010b7e3e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
238 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, 238 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
239 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, 239 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
240 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2, 240 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
241 MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
241}; 242};
242 243
243enum mlx5_ib_tunnel_offloads { 244enum mlx5_ib_tunnel_offloads {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 271717246af3..7b05e8938d5c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4349,15 +4349,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
4349 return 0; 4349 return 0;
4350} 4350}
4351 4351
4352static void __find_good_pkt_pointers(struct bpf_func_state *state,
4353 struct bpf_reg_state *dst_reg,
4354 enum bpf_reg_type type, u16 new_range)
4355{
4356 struct bpf_reg_state *reg;
4357 int i;
4358
4359 for (i = 0; i < MAX_BPF_REG; i++) {
4360 reg = &state->regs[i];
4361 if (reg->type == type && reg->id == dst_reg->id)
4362 /* keep the maximum range already checked */
4363 reg->range = max(reg->range, new_range);
4364 }
4365
4366 bpf_for_each_spilled_reg(i, state, reg) {
4367 if (!reg)
4368 continue;
4369 if (reg->type == type && reg->id == dst_reg->id)
4370 reg->range = max(reg->range, new_range);
4371 }
4372}
4373
4352static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 4374static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
4353 struct bpf_reg_state *dst_reg, 4375 struct bpf_reg_state *dst_reg,
4354 enum bpf_reg_type type, 4376 enum bpf_reg_type type,
4355 bool range_right_open) 4377 bool range_right_open)
4356{ 4378{
4357 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4358 struct bpf_reg_state *regs = state->regs, *reg;
4359 u16 new_range; 4379 u16 new_range;
4360 int i, j; 4380 int i;
4361 4381
4362 if (dst_reg->off < 0 || 4382 if (dst_reg->off < 0 ||
4363 (dst_reg->off == 0 && range_right_open)) 4383 (dst_reg->off == 0 && range_right_open))
@@ -4422,20 +4442,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
4422 * the range won't allow anything. 4442 * the range won't allow anything.
4423 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 4443 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
4424 */ 4444 */
4425 for (i = 0; i < MAX_BPF_REG; i++) 4445 for (i = 0; i <= vstate->curframe; i++)
4426 if (regs[i].type == type && regs[i].id == dst_reg->id) 4446 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
4427 /* keep the maximum range already checked */ 4447 new_range);
4428 regs[i].range = max(regs[i].range, new_range);
4429
4430 for (j = 0; j <= vstate->curframe; j++) {
4431 state = vstate->frame[j];
4432 bpf_for_each_spilled_reg(i, state, reg) {
4433 if (!reg)
4434 continue;
4435 if (reg->type == type && reg->id == dst_reg->id)
4436 reg->range = max(reg->range, new_range);
4437 }
4438 }
4439} 4448}
4440 4449
4441/* compute branch direction of the expression "if (reg opcode val) goto target;" 4450/* compute branch direction of the expression "if (reg opcode val) goto target;"
@@ -4909,6 +4918,22 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
4909 } 4918 }
4910} 4919}
4911 4920
4921static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
4922 bool is_null)
4923{
4924 struct bpf_reg_state *reg;
4925 int i;
4926
4927 for (i = 0; i < MAX_BPF_REG; i++)
4928 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
4929
4930 bpf_for_each_spilled_reg(i, state, reg) {
4931 if (!reg)
4932 continue;
4933 mark_ptr_or_null_reg(state, reg, id, is_null);
4934 }
4935}
4936
4912/* The logic is similar to find_good_pkt_pointers(), both could eventually 4937/* The logic is similar to find_good_pkt_pointers(), both could eventually
4913 * be folded together at some point. 4938 * be folded together at some point.
4914 */ 4939 */
@@ -4916,10 +4941,10 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4916 bool is_null) 4941 bool is_null)
4917{ 4942{
4918 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4943 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4919 struct bpf_reg_state *reg, *regs = state->regs; 4944 struct bpf_reg_state *regs = state->regs;
4920 u32 ref_obj_id = regs[regno].ref_obj_id; 4945 u32 ref_obj_id = regs[regno].ref_obj_id;
4921 u32 id = regs[regno].id; 4946 u32 id = regs[regno].id;
4922 int i, j; 4947 int i;
4923 4948
4924 if (ref_obj_id && ref_obj_id == id && is_null) 4949 if (ref_obj_id && ref_obj_id == id && is_null)
4925 /* regs[regno] is in the " == NULL" branch. 4950 /* regs[regno] is in the " == NULL" branch.
@@ -4928,17 +4953,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4928 */ 4953 */
4929 WARN_ON_ONCE(release_reference_state(state, id)); 4954 WARN_ON_ONCE(release_reference_state(state, id));
4930 4955
4931 for (i = 0; i < MAX_BPF_REG; i++) 4956 for (i = 0; i <= vstate->curframe; i++)
4932 mark_ptr_or_null_reg(state, &regs[i], id, is_null); 4957 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
4933
4934 for (j = 0; j <= vstate->curframe; j++) {
4935 state = vstate->frame[j];
4936 bpf_for_each_spilled_reg(i, state, reg) {
4937 if (!reg)
4938 continue;
4939 mark_ptr_or_null_reg(state, reg, id, is_null);
4940 }
4941 }
4942} 4958}
4943 4959
4944static bool try_match_pkt_pointers(const struct bpf_insn *insn, 4960static bool try_match_pkt_pointers(const struct bpf_insn *insn,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a4d9e14bf138..35f3ea375084 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2007 if (p->last_task_numa_placement) { 2007 if (p->last_task_numa_placement) {
2008 delta = runtime - p->last_sum_exec_runtime; 2008 delta = runtime - p->last_sum_exec_runtime;
2009 *period = now - p->last_task_numa_placement; 2009 *period = now - p->last_task_numa_placement;
2010
2011 /* Avoid time going backwards, prevent potential divide error: */
2012 if (unlikely((s64)*period < 0))
2013 *period = 0;
2010 } else { 2014 } else {
2011 delta = p->se.avg.load_sum; 2015 delta = p->se.avg.load_sum;
2012 *period = LOAD_AVG_MAX; 2016 *period = LOAD_AVG_MAX;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index df27e499956a..3582eeb59893 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -502,7 +502,10 @@ out:
502 * 502 *
503 * Caller must be holding current->sighand->siglock lock. 503 * Caller must be holding current->sighand->siglock lock.
504 * 504 *
505 * Returns 0 on success, -ve on error. 505 * Returns 0 on success, -ve on error, or
506 * - in TSYNC mode: the pid of a thread which was either not in the correct
507 * seccomp mode or did not have an ancestral seccomp filter
508 * - in NEW_LISTENER mode: the fd of the new listener
506 */ 509 */
507static long seccomp_attach_filter(unsigned int flags, 510static long seccomp_attach_filter(unsigned int flags,
508 struct seccomp_filter *filter) 511 struct seccomp_filter *filter)
@@ -1258,6 +1261,16 @@ static long seccomp_set_mode_filter(unsigned int flags,
1258 if (flags & ~SECCOMP_FILTER_FLAG_MASK) 1261 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
1259 return -EINVAL; 1262 return -EINVAL;
1260 1263
1264 /*
1265 * In the successful case, NEW_LISTENER returns the new listener fd.
1266 * But in the failure case, TSYNC returns the thread that died. If you
1267 * combine these two flags, there's no way to tell whether something
1268 * succeeded or failed. So, let's disallow this combination.
1269 */
1270 if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
1271 (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
1272 return -EINVAL;
1273
1261 /* Prepare the new filter before holding any locks. */ 1274 /* Prepare the new filter before holding any locks. */
1262 prepared = seccomp_prepare_user_filter(filter); 1275 prepared = seccomp_prepare_user_filter(filter);
1263 if (IS_ERR(prepared)) 1276 if (IS_ERR(prepared))
@@ -1304,7 +1317,7 @@ out:
1304 mutex_unlock(&current->signal->cred_guard_mutex); 1317 mutex_unlock(&current->signal->cred_guard_mutex);
1305out_put_fd: 1318out_put_fd:
1306 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1319 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1307 if (ret < 0) { 1320 if (ret) {
1308 listener_f->private_data = NULL; 1321 listener_f->private_data = NULL;
1309 fput(listener_f); 1322 fput(listener_f);
1310 put_unused_fd(listener); 1323 put_unused_fd(listener);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 41b6f96e5366..4ee8d8aa3d0f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -762,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
762 762
763 preempt_disable_notrace(); 763 preempt_disable_notrace();
764 time = rb_time_stamp(buffer); 764 time = rb_time_stamp(buffer);
765 preempt_enable_no_resched_notrace(); 765 preempt_enable_notrace();
766 766
767 return time; 767 return time;
768} 768}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6c24755655c7..ca1ee656d6d8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
496 * not modified. 496 * not modified.
497 */ 497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); 498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list) 499 if (!pid_list) {
500 trace_parser_put(&parser);
500 return -ENOMEM; 501 return -ENOMEM;
502 }
501 503
502 pid_list->pid_max = READ_ONCE(pid_max); 504 pid_list->pid_max = READ_ONCE(pid_max);
503 505
@@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
507 509
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); 510 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) { 511 if (!pid_list->pids) {
512 trace_parser_put(&parser);
510 kfree(pid_list); 513 kfree(pid_list);
511 return -ENOMEM; 514 return -ENOMEM;
512 } 515 }
@@ -7025,19 +7028,23 @@ struct buffer_ref {
7025 struct ring_buffer *buffer; 7028 struct ring_buffer *buffer;
7026 void *page; 7029 void *page;
7027 int cpu; 7030 int cpu;
7028 int ref; 7031 refcount_t refcount;
7029}; 7032};
7030 7033
7034static void buffer_ref_release(struct buffer_ref *ref)
7035{
7036 if (!refcount_dec_and_test(&ref->refcount))
7037 return;
7038 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7039 kfree(ref);
7040}
7041
7031static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 7042static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7032 struct pipe_buffer *buf) 7043 struct pipe_buffer *buf)
7033{ 7044{
7034 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 7045 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7035 7046
7036 if (--ref->ref) 7047 buffer_ref_release(ref);
7037 return;
7038
7039 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7040 kfree(ref);
7041 buf->private = 0; 7048 buf->private = 0;
7042} 7049}
7043 7050
@@ -7046,10 +7053,10 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7046{ 7053{
7047 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 7054 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7048 7055
7049 if (ref->ref > INT_MAX/2) 7056 if (refcount_read(&ref->refcount) > INT_MAX/2)
7050 return false; 7057 return false;
7051 7058
7052 ref->ref++; 7059 refcount_inc(&ref->refcount);
7053 return true; 7060 return true;
7054} 7061}
7055 7062
@@ -7057,7 +7064,7 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7057static const struct pipe_buf_operations buffer_pipe_buf_ops = { 7064static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7058 .confirm = generic_pipe_buf_confirm, 7065 .confirm = generic_pipe_buf_confirm,
7059 .release = buffer_pipe_buf_release, 7066 .release = buffer_pipe_buf_release,
7060 .steal = generic_pipe_buf_steal, 7067 .steal = generic_pipe_buf_nosteal,
7061 .get = buffer_pipe_buf_get, 7068 .get = buffer_pipe_buf_get,
7062}; 7069};
7063 7070
@@ -7070,11 +7077,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7070 struct buffer_ref *ref = 7077 struct buffer_ref *ref =
7071 (struct buffer_ref *)spd->partial[i].private; 7078 (struct buffer_ref *)spd->partial[i].private;
7072 7079
7073 if (--ref->ref) 7080 buffer_ref_release(ref);
7074 return;
7075
7076 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7077 kfree(ref);
7078 spd->partial[i].private = 0; 7081 spd->partial[i].private = 0;
7079} 7082}
7080 7083
@@ -7129,7 +7132,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7129 break; 7132 break;
7130 } 7133 }
7131 7134
7132 ref->ref = 1; 7135 refcount_set(&ref->refcount, 1);
7133 ref->buffer = iter->trace_buffer->buffer; 7136 ref->buffer = iter->trace_buffer->buffer;
7134 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 7137 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7135 if (IS_ERR(ref->page)) { 7138 if (IS_ERR(ref->page)) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7117ac61174e..8ed7d276fe7d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1937,6 +1937,7 @@ config TEST_KMOD
1937 depends on m 1937 depends on m
1938 depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS 1938 depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
1939 depends on NETDEVICES && NET_CORE && INET # for TUN 1939 depends on NETDEVICES && NET_CORE && INET # for TUN
1940 depends on BLOCK
1940 select TEST_LKM 1941 select TEST_LKM
1941 select XFS_FS 1942 select XFS_FS
1942 select TUN 1943 select TUN
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 83cdcaa82bf6..f832b095afba 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
383static int test_func(void *private) 383static int test_func(void *private)
384{ 384{
385 struct test_driver *t = private; 385 struct test_driver *t = private;
386 cpumask_t newmask = CPU_MASK_NONE;
387 int random_array[ARRAY_SIZE(test_case_array)]; 386 int random_array[ARRAY_SIZE(test_case_array)];
388 int index, i, j, ret; 387 int index, i, j, ret;
389 ktime_t kt; 388 ktime_t kt;
390 u64 delta; 389 u64 delta;
391 390
392 cpumask_set_cpu(t->cpu, &newmask); 391 ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
393 set_cpus_allowed_ptr(current, &newmask); 392 if (ret < 0)
393 pr_err("Failed to set affinity to %d CPU\n", t->cpu);
394 394
395 for (i = 0; i < ARRAY_SIZE(test_case_array); i++) 395 for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
396 random_array[i] = i; 396 random_array[i] = i;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0082d699be94..b236069ff0d8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
874 */ 874 */
875 mem = find_memory_block(__pfn_to_section(pfn)); 875 mem = find_memory_block(__pfn_to_section(pfn));
876 nid = mem->nid; 876 nid = mem->nid;
877 put_device(&mem->dev);
877 878
878 /* associate pfn range with the zone */ 879 /* associate pfn range with the zone */
879 zone = move_pfn_range(online_type, nid, pfn, nr_pages); 880 zone = move_pfn_range(online_type, nid, pfn, nr_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6ce20aaf80b..c02cff1ed56e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
266 266
267int min_free_kbytes = 1024; 267int min_free_kbytes = 1024;
268int user_min_free_kbytes = -1; 268int user_min_free_kbytes = -1;
269#ifdef CONFIG_DISCONTIGMEM
270/*
271 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
272 * are not on separate NUMA nodes. Functionally this works but with
273 * watermark_boost_factor, it can reclaim prematurely as the ranges can be
274 * quite small. By default, do not boost watermarks on discontigmem as in
275 * many cases very high-order allocations like THP are likely to be
276 * unsupported and the premature reclaim offsets the advantage of long-term
277 * fragmentation avoidance.
278 */
279int watermark_boost_factor __read_mostly;
280#else
269int watermark_boost_factor __read_mostly = 15000; 281int watermark_boost_factor __read_mostly = 15000;
282#endif
270int watermark_scale_factor = 10; 283int watermark_scale_factor = 10;
271 284
272static unsigned long nr_kernel_pages __initdata; 285static unsigned long nr_kernel_pages __initdata;
@@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3419 alloc_flags |= ALLOC_KSWAPD; 3432 alloc_flags |= ALLOC_KSWAPD;
3420 3433
3421#ifdef CONFIG_ZONE_DMA32 3434#ifdef CONFIG_ZONE_DMA32
3435 if (!zone)
3436 return alloc_flags;
3437
3422 if (zone_idx(zone) != ZONE_NORMAL) 3438 if (zone_idx(zone) != ZONE_NORMAL)
3423 goto out; 3439 return alloc_flags;
3424 3440
3425 /* 3441 /*
3426 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3442 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3429 */ 3445 */
3430 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3446 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3431 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3447 if (nr_online_nodes > 1 && !populated_zone(--zone))
3432 goto out; 3448 return alloc_flags;
3433 3449
3434out: 3450 alloc_flags |= ALLOC_NOFRAGMENT;
3435#endif /* CONFIG_ZONE_DMA32 */ 3451#endif /* CONFIG_ZONE_DMA32 */
3436 return alloc_flags; 3452 return alloc_flags;
3437} 3453}
@@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3773 memalloc_noreclaim_restore(noreclaim_flag); 3789 memalloc_noreclaim_restore(noreclaim_flag);
3774 psi_memstall_leave(&pflags); 3790 psi_memstall_leave(&pflags);
3775 3791
3776 if (*compact_result <= COMPACT_INACTIVE) {
3777 WARN_ON_ONCE(page);
3778 return NULL;
3779 }
3780
3781 /* 3792 /*
3782 * At least in one zone compaction wasn't deferred or skipped, so let's 3793 * At least in one zone compaction wasn't deferred or skipped, so let's
3783 * count a compaction stall 3794 * count a compaction stall
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index e2511027d19b..a2555023c654 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1915,6 +1915,7 @@ static int __init atalk_init(void)
1915 ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); 1915 ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
1916 if (!ddp_dl) { 1916 if (!ddp_dl) {
1917 pr_crit("Unable to register DDP with SNAP.\n"); 1917 pr_crit("Unable to register DDP with SNAP.\n");
1918 rc = -ENOMEM;
1918 goto out_sock; 1919 goto out_sock;
1919 } 1920 }
1920 1921
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 10e809b296ec..fb065a8937ea 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
226 tail[plen - 1] = proto; 226 tail[plen - 1] = proto;
227} 227}
228 228
229static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 229static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
230{ 230{
231 int encap_type; 231 int encap_type;
232 struct udphdr *uh; 232 struct udphdr *uh;
@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
234 __be16 sport, dport; 234 __be16 sport, dport;
235 struct xfrm_encap_tmpl *encap = x->encap; 235 struct xfrm_encap_tmpl *encap = x->encap;
236 struct ip_esp_hdr *esph = esp->esph; 236 struct ip_esp_hdr *esph = esp->esph;
237 unsigned int len;
237 238
238 spin_lock_bh(&x->lock); 239 spin_lock_bh(&x->lock);
239 sport = encap->encap_sport; 240 sport = encap->encap_sport;
@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
241 encap_type = encap->encap_type; 242 encap_type = encap->encap_type;
242 spin_unlock_bh(&x->lock); 243 spin_unlock_bh(&x->lock);
243 244
245 len = skb->len + esp->tailen - skb_transport_offset(skb);
246 if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
247 return -EMSGSIZE;
248
244 uh = (struct udphdr *)esph; 249 uh = (struct udphdr *)esph;
245 uh->source = sport; 250 uh->source = sport;
246 uh->dest = dport; 251 uh->dest = dport;
247 uh->len = htons(skb->len + esp->tailen 252 uh->len = htons(len);
248 - skb_transport_offset(skb));
249 uh->check = 0; 253 uh->check = 0;
250 254
251 switch (encap_type) { 255 switch (encap_type) {
@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
262 266
263 *skb_mac_header(skb) = IPPROTO_UDP; 267 *skb_mac_header(skb) = IPPROTO_UDP;
264 esp->esph = esph; 268 esp->esph = esph;
269
270 return 0;
265} 271}
266 272
267int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 273int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
275 int tailen = esp->tailen; 281 int tailen = esp->tailen;
276 282
277 /* this is non-NULL only with UDP Encapsulation */ 283 /* this is non-NULL only with UDP Encapsulation */
278 if (x->encap) 284 if (x->encap) {
279 esp_output_udp_encap(x, skb, esp); 285 int err = esp_output_udp_encap(x, skb, esp);
286
287 if (err < 0)
288 return err;
289 }
280 290
281 if (!skb_cloned(skb)) { 291 if (!skb_cloned(skb)) {
282 if (tailen <= skb_tailroom(skb)) { 292 if (tailen <= skb_tailroom(skb)) {
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index b61a8ff558f9..8edcfa66d1e5 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
52 goto out; 52 goto out;
53 53
54 if (sp->len == XFRM_MAX_DEPTH) 54 if (sp->len == XFRM_MAX_DEPTH)
55 goto out; 55 goto out_reset;
56 56
57 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 57 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
58 (xfrm_address_t *)&ip_hdr(skb)->daddr, 58 (xfrm_address_t *)&ip_hdr(skb)->daddr,
59 spi, IPPROTO_ESP, AF_INET); 59 spi, IPPROTO_ESP, AF_INET);
60 if (!x) 60 if (!x)
61 goto out; 61 goto out_reset;
62 62
63 sp->xvec[sp->len++] = x; 63 sp->xvec[sp->len++] = x;
64 sp->olen++; 64 sp->olen++;
@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
66 xo = xfrm_offload(skb); 66 xo = xfrm_offload(skb);
67 if (!xo) { 67 if (!xo) {
68 xfrm_state_put(x); 68 xfrm_state_put(x);
69 goto out; 69 goto out_reset;
70 } 70 }
71 } 71 }
72 72
@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
82 xfrm_input(skb, IPPROTO_ESP, spi, -2); 82 xfrm_input(skb, IPPROTO_ESP, spi, -2);
83 83
84 return ERR_PTR(-EINPROGRESS); 84 return ERR_PTR(-EINPROGRESS);
85out_reset:
86 secpath_reset(skb);
85out: 87out:
86 skb_push(skb, offset); 88 skb_push(skb, offset);
87 NAPI_GRO_CB(skb)->same_flow = 0; 89 NAPI_GRO_CB(skb)->same_flow = 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4e42c1974ba2..ac880beda8a7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -516,6 +516,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
516 to->pkt_type = from->pkt_type; 516 to->pkt_type = from->pkt_type;
517 to->priority = from->priority; 517 to->priority = from->priority;
518 to->protocol = from->protocol; 518 to->protocol = from->protocol;
519 to->skb_iif = from->skb_iif;
519 skb_dst_drop(to); 520 skb_dst_drop(to);
520 skb_dst_copy(to, from); 521 skb_dst_copy(to, from);
521 to->dev = from->dev; 522 to->dev = from->dev;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index cc5d9c0a8a10..254a42e83ff9 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -630,10 +630,8 @@ static int __init vti_init(void)
630 630
631 msg = "ipip tunnel"; 631 msg = "ipip tunnel";
632 err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 632 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
633 if (err < 0) { 633 if (err < 0)
634 pr_info("%s: cant't register tunnel\n",__func__);
635 goto xfrm_tunnel_failed; 634 goto xfrm_tunnel_failed;
636 }
637 635
638 msg = "netlink interface"; 636 msg = "netlink interface";
639 err = rtnl_link_register(&vti_link_ops); 637 err = rtnl_link_register(&vti_link_ops);
@@ -643,9 +641,9 @@ static int __init vti_init(void)
643 return err; 641 return err;
644 642
645rtnl_link_failed: 643rtnl_link_failed:
646 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
647xfrm_tunnel_failed:
648 xfrm4_tunnel_deregister(&ipip_handler, AF_INET); 644 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
645xfrm_tunnel_failed:
646 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
649xfrm_proto_comp_failed: 647xfrm_proto_comp_failed:
650 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 648 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
651xfrm_proto_ah_failed: 649xfrm_proto_ah_failed:
@@ -660,6 +658,7 @@ pernet_dev_failed:
660static void __exit vti_fini(void) 658static void __exit vti_fini(void)
661{ 659{
662 rtnl_link_unregister(&vti_link_ops); 660 rtnl_link_unregister(&vti_link_ops);
661 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
663 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 662 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
664 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 663 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
665 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 664 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index faa6fa619f59..af81e4a6a8d8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1673 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || 1673 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1674 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || 1674 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1675 ((TCP_SKB_CB(tail)->tcp_flags | 1675 ((TCP_SKB_CB(tail)->tcp_flags |
1676 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) || 1676 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1677 !((TCP_SKB_CB(tail)->tcp_flags &
1678 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1677 ((TCP_SKB_CB(tail)->tcp_flags ^ 1679 ((TCP_SKB_CB(tail)->tcp_flags ^
1678 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || 1680 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1679#ifdef CONFIG_TLS_DEVICE 1681#ifdef CONFIG_TLS_DEVICE
@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1692 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq)) 1694 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1693 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq; 1695 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1694 1696
1697 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1698 * thtail->fin, so that the fast path in tcp_rcv_established()
1699 * is not entered if we append a packet with a FIN.
1700 * SYN, RST, URG are not present.
1701 * ACK is set on both packets.
1702 * PSH : we do not really care in TCP stack,
1703 * at least for 'GRO' packets.
1704 */
1705 thtail->fin |= th->fin;
1695 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1706 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1696 1707
1697 if (TCP_SKB_CB(skb)->has_rxtstamp) { 1708 if (TCP_SKB_CB(skb)->has_rxtstamp) {
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 64f9715173ac..065334b41d57 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
352 struct sk_buff *pp = NULL; 352 struct sk_buff *pp = NULL;
353 struct udphdr *uh2; 353 struct udphdr *uh2;
354 struct sk_buff *p; 354 struct sk_buff *p;
355 unsigned int ulen;
355 356
356 /* requires non zero csum, for symmetry with GSO */ 357 /* requires non zero csum, for symmetry with GSO */
357 if (!uh->check) { 358 if (!uh->check) {
@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
359 return NULL; 360 return NULL;
360 } 361 }
361 362
363 /* Do not deal with padded or malicious packets, sorry ! */
364 ulen = ntohs(uh->len);
365 if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
366 NAPI_GRO_CB(skb)->flush = 1;
367 return NULL;
368 }
362 /* pull encapsulating udp header */ 369 /* pull encapsulating udp header */
363 skb_gro_pull(skb, sizeof(struct udphdr)); 370 skb_gro_pull(skb, sizeof(struct udphdr));
364 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 371 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
377 384
378 /* Terminate the flow on len mismatch or if it grow "too much". 385 /* Terminate the flow on len mismatch or if it grow "too much".
379 * Under small packet flood GRO count could elsewhere grow a lot 386 * Under small packet flood GRO count could elsewhere grow a lot
380 * leading to execessive truesize values 387 * leading to excessive truesize values.
388 * On len mismatch merge the first packet shorter than gso_size,
389 * otherwise complete the GRO packet.
381 */ 390 */
382 if (!skb_gro_receive(p, skb) && 391 if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
392 ulen != ntohs(uh2->len) ||
383 NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX) 393 NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
384 pp = p; 394 pp = p;
385 else if (uh->len != uh2->len)
386 pp = p;
387 395
388 return pp; 396 return pp;
389 } 397 }
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index bff83279d76f..d453cf417b03 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
74 goto out; 74 goto out;
75 75
76 if (sp->len == XFRM_MAX_DEPTH) 76 if (sp->len == XFRM_MAX_DEPTH)
77 goto out; 77 goto out_reset;
78 78
79 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 79 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
80 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 80 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
81 spi, IPPROTO_ESP, AF_INET6); 81 spi, IPPROTO_ESP, AF_INET6);
82 if (!x) 82 if (!x)
83 goto out; 83 goto out_reset;
84 84
85 sp->xvec[sp->len++] = x; 85 sp->xvec[sp->len++] = x;
86 sp->olen++; 86 sp->olen++;
@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
88 xo = xfrm_offload(skb); 88 xo = xfrm_offload(skb);
89 if (!xo) { 89 if (!xo) {
90 xfrm_state_put(x); 90 xfrm_state_put(x);
91 goto out; 91 goto out_reset;
92 } 92 }
93 } 93 }
94 94
@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
109 xfrm_input(skb, IPPROTO_ESP, spi, -2); 109 xfrm_input(skb, IPPROTO_ESP, spi, -2);
110 110
111 return ERR_PTR(-EINPROGRESS); 111 return ERR_PTR(-EINPROGRESS);
112out_reset:
113 secpath_reset(skb);
112out: 114out:
113 skb_push(skb, offset); 115 skb_push(skb, offset);
114 NAPI_GRO_CB(skb)->same_flow = 0; 116 NAPI_GRO_CB(skb)->same_flow = 0;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index a8919c217cc2..08e0390e001c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -916,9 +916,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
916 if (pcpu_rt) { 916 if (pcpu_rt) {
917 struct fib6_info *from; 917 struct fib6_info *from;
918 918
919 from = rcu_dereference_protected(pcpu_rt->from, 919 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
920 lockdep_is_held(&table->tb6_lock));
921 rcu_assign_pointer(pcpu_rt->from, NULL);
922 fib6_info_release(from); 920 fib6_info_release(from);
923 } 921 }
924 } 922 }
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index cb54a8a3c273..be5f3d7ceb96 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
94 return fl; 94 return fl;
95} 95}
96 96
97static void fl_free_rcu(struct rcu_head *head)
98{
99 struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
100
101 if (fl->share == IPV6_FL_S_PROCESS)
102 put_pid(fl->owner.pid);
103 kfree(fl->opt);
104 kfree(fl);
105}
106
97 107
98static void fl_free(struct ip6_flowlabel *fl) 108static void fl_free(struct ip6_flowlabel *fl)
99{ 109{
100 if (fl) { 110 if (fl)
101 if (fl->share == IPV6_FL_S_PROCESS) 111 call_rcu(&fl->rcu, fl_free_rcu);
102 put_pid(fl->owner.pid);
103 kfree(fl->opt);
104 kfree_rcu(fl, rcu);
105 }
106} 112}
107 113
108static void fl_release(struct ip6_flowlabel *fl) 114static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ recheck:
633 if (fl1->share == IPV6_FL_S_EXCL || 639 if (fl1->share == IPV6_FL_S_EXCL ||
634 fl1->share != fl->share || 640 fl1->share != fl->share ||
635 ((fl1->share == IPV6_FL_S_PROCESS) && 641 ((fl1->share == IPV6_FL_S_PROCESS) &&
636 (fl1->owner.pid == fl->owner.pid)) || 642 (fl1->owner.pid != fl->owner.pid)) ||
637 ((fl1->share == IPV6_FL_S_USER) && 643 ((fl1->share == IPV6_FL_S_USER) &&
638 uid_eq(fl1->owner.uid, fl->owner.uid))) 644 !uid_eq(fl1->owner.uid, fl->owner.uid)))
639 goto release; 645 goto release;
640 646
641 err = -ENOMEM; 647 err = -ENOMEM;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b18e85cd7587..23a20d62daac 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -380,11 +380,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
380 in6_dev_put(idev); 380 in6_dev_put(idev);
381 } 381 }
382 382
383 rcu_read_lock(); 383 from = xchg((__force struct fib6_info **)&rt->from, NULL);
384 from = rcu_dereference(rt->from);
385 rcu_assign_pointer(rt->from, NULL);
386 fib6_info_release(from); 384 fib6_info_release(from);
387 rcu_read_unlock();
388} 385}
389 386
390static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 387static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1323,9 +1320,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1323 /* purge completely the exception to allow releasing the held resources: 1320 /* purge completely the exception to allow releasing the held resources:
1324 * some [sk] cache may keep the dst around for unlimited time 1321 * some [sk] cache may keep the dst around for unlimited time
1325 */ 1322 */
1326 from = rcu_dereference_protected(rt6_ex->rt6i->from, 1323 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1327 lockdep_is_held(&rt6_exception_lock));
1328 rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1329 fib6_info_release(from); 1324 fib6_info_release(from);
1330 dst_dev_put(&rt6_ex->rt6i->dst); 1325 dst_dev_put(&rt6_ex->rt6i->dst);
1331 1326
@@ -3495,11 +3490,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
3495 3490
3496 rcu_read_lock(); 3491 rcu_read_lock();
3497 res.f6i = rcu_dereference(rt->from); 3492 res.f6i = rcu_dereference(rt->from);
3498 /* This fib6_info_hold() is safe here because we hold reference to rt 3493 if (!res.f6i)
3499 * and rt already holds reference to fib6_info. 3494 goto out;
3500 */
3501 fib6_info_hold(res.f6i);
3502 rcu_read_unlock();
3503 3495
3504 res.nh = &res.f6i->fib6_nh; 3496 res.nh = &res.f6i->fib6_nh;
3505 res.fib6_flags = res.f6i->fib6_flags; 3497 res.fib6_flags = res.f6i->fib6_flags;
@@ -3514,10 +3506,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
3514 3506
3515 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 3507 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3516 3508
3517 /* No need to remove rt from the exception table if rt is 3509 /* rt6_insert_exception() will take care of duplicated exceptions */
3518 * a cached route because rt6_insert_exception() will
3519 * takes care of it
3520 */
3521 if (rt6_insert_exception(nrt, &res)) { 3510 if (rt6_insert_exception(nrt, &res)) {
3522 dst_release_immediate(&nrt->dst); 3511 dst_release_immediate(&nrt->dst);
3523 goto out; 3512 goto out;
@@ -3530,7 +3519,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
3530 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 3519 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3531 3520
3532out: 3521out:
3533 fib6_info_release(res.f6i); 3522 rcu_read_unlock();
3534 neigh_release(neigh); 3523 neigh_release(neigh);
3535} 3524}
3536 3525
@@ -3772,23 +3761,34 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3772 3761
3773static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 3762static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3774{ 3763{
3775 int type;
3776 struct dst_entry *dst = skb_dst(skb); 3764 struct dst_entry *dst = skb_dst(skb);
3765 struct net *net = dev_net(dst->dev);
3766 struct inet6_dev *idev;
3767 int type;
3768
3769 if (netif_is_l3_master(skb->dev) &&
3770 dst->dev == net->loopback_dev)
3771 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
3772 else
3773 idev = ip6_dst_idev(dst);
3774
3777 switch (ipstats_mib_noroutes) { 3775 switch (ipstats_mib_noroutes) {
3778 case IPSTATS_MIB_INNOROUTES: 3776 case IPSTATS_MIB_INNOROUTES:
3779 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 3777 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3780 if (type == IPV6_ADDR_ANY) { 3778 if (type == IPV6_ADDR_ANY) {
3781 IP6_INC_STATS(dev_net(dst->dev), 3779 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
3782 __in6_dev_get_safely(skb->dev),
3783 IPSTATS_MIB_INADDRERRORS);
3784 break; 3780 break;
3785 } 3781 }
3786 /* FALLTHROUGH */ 3782 /* FALLTHROUGH */
3787 case IPSTATS_MIB_OUTNOROUTES: 3783 case IPSTATS_MIB_OUTNOROUTES:
3788 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 3784 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
3789 ipstats_mib_noroutes);
3790 break; 3785 break;
3791 } 3786 }
3787
3788 /* Start over by dropping the dst for l3mdev case */
3789 if (netif_is_l3_master(skb->dev))
3790 skb_dst_drop(skb);
3791
3792 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); 3792 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3793 kfree_skb(skb); 3793 kfree_skb(skb);
3794 return 0; 3794 return 0;
@@ -5056,16 +5056,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5056 5056
5057 rcu_read_lock(); 5057 rcu_read_lock();
5058 from = rcu_dereference(rt->from); 5058 from = rcu_dereference(rt->from);
5059 5059 if (from) {
5060 if (fibmatch) 5060 if (fibmatch)
5061 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif, 5061 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5062 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 5062 iif, RTM_NEWROUTE,
5063 nlh->nlmsg_seq, 0); 5063 NETLINK_CB(in_skb).portid,
5064 else 5064 nlh->nlmsg_seq, 0);
5065 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr, 5065 else
5066 &fl6.saddr, iif, RTM_NEWROUTE, 5066 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5067 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 5067 &fl6.saddr, iif, RTM_NEWROUTE,
5068 0); 5068 NETLINK_CB(in_skb).portid,
5069 nlh->nlmsg_seq, 0);
5070 } else {
5071 err = -ENETUNREACH;
5072 }
5069 rcu_read_unlock(); 5073 rcu_read_unlock();
5070 5074
5071 if (err < 0) { 5075 if (err < 0) {
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index bc65db782bfb..d9e5f6808811 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
345 unsigned int i; 345 unsigned int i;
346 346
347 xfrm_flush_gc(); 347 xfrm_flush_gc();
348 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); 348 xfrm_state_flush(net, 0, false, true);
349 349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); 351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
402 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 402 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
403 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 403 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
404 unregister_pernet_subsys(&xfrm6_tunnel_net_ops); 404 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
405 /* Someone maybe has gotten the xfrm6_tunnel_spi.
406 * So need to wait it.
407 */
408 rcu_barrier();
405 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 409 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
406} 410}
407 411
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5651c29cb5bd..4af1e1d60b9f 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1951 1951
1952 if (rq->sadb_x_ipsecrequest_mode == 0) 1952 if (rq->sadb_x_ipsecrequest_mode == 0)
1953 return -EINVAL; 1953 return -EINVAL;
1954 if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
1955 return -EINVAL;
1954 1956
1955 t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ 1957 t->id.proto = rq->sadb_x_ipsecrequest_proto;
1956 if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) 1958 if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
1957 return -EINVAL; 1959 return -EINVAL;
1958 t->mode = mode; 1960 t->mode = mode;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index fed6becc5daf..52b5a2797c0c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
169 169
170 rcu_read_lock_bh(); 170 rcu_read_lock_bh();
171 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 171 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
172 if (tunnel->tunnel_id == tunnel_id) { 172 if (tunnel->tunnel_id == tunnel_id &&
173 l2tp_tunnel_inc_refcount(tunnel); 173 refcount_inc_not_zero(&tunnel->ref_count)) {
174 rcu_read_unlock_bh(); 174 rcu_read_unlock_bh();
175 175
176 return tunnel; 176 return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
190 190
191 rcu_read_lock_bh(); 191 rcu_read_lock_bh();
192 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 192 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
193 if (++count > nth) { 193 if (++count > nth &&
194 l2tp_tunnel_inc_refcount(tunnel); 194 refcount_inc_not_zero(&tunnel->ref_count)) {
195 rcu_read_unlock_bh(); 195 rcu_read_unlock_bh();
196 return tunnel; 196 return tunnel;
197 } 197 }
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
909{ 909{
910 struct l2tp_tunnel *tunnel; 910 struct l2tp_tunnel *tunnel;
911 911
912 tunnel = l2tp_tunnel(sk); 912 tunnel = rcu_dereference_sk_user_data(sk);
913 if (tunnel == NULL) 913 if (tunnel == NULL)
914 goto pass_up; 914 goto pass_up;
915 915
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cff0fb3578c9..deb3faf08337 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
841 841
842 dir = sdata->vif.debugfs_dir; 842 dir = sdata->vif.debugfs_dir;
843 843
844 if (!dir) 844 if (IS_ERR_OR_NULL(dir))
845 return; 845 return;
846 846
847 sprintf(buf, "netdev:%s", sdata->name); 847 sprintf(buf, "netdev:%s", sdata->name);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index e03c46ac8e4d..c62101857b9b 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -112,8 +112,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
112 IEEE80211_HT_CAP_TX_STBC); 112 IEEE80211_HT_CAP_TX_STBC);
113 113
114 /* Allow user to configure RX STBC bits */ 114 /* Allow user to configure RX STBC bits */
115 if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC) 115 if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
116 ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC; 116 ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
117 IEEE80211_HT_CAP_RX_STBC;
117 118
118 /* Allow user to decrease AMPDU factor */ 119 /* Allow user to decrease AMPDU factor */
119 if (ht_capa_mask->ampdu_params_info & 120 if (ht_capa_mask->ampdu_params_info &
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 94459b2b3d2a..410685d38c46 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1907,6 +1907,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1907 list_del_rcu(&sdata->list); 1907 list_del_rcu(&sdata->list);
1908 mutex_unlock(&sdata->local->iflist_mtx); 1908 mutex_unlock(&sdata->local->iflist_mtx);
1909 1909
1910 if (sdata->vif.txq)
1911 ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
1912
1910 synchronize_rcu(); 1913 synchronize_rcu();
1911 1914
1912 if (sdata->dev) { 1915 if (sdata->dev) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 72668759cd2b..79cfa031dc7d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -362,8 +362,8 @@ int genl_register_family(struct genl_family *family)
362 } else 362 } else
363 family->attrbuf = NULL; 363 family->attrbuf = NULL;
364 364
365 family->id = idr_alloc(&genl_fam_idr, family, 365 family->id = idr_alloc_cyclic(&genl_fam_idr, family,
366 start, end + 1, GFP_KERNEL); 366 start, end + 1, GFP_KERNEL);
367 if (family->id < 0) { 367 if (family->id < 0) {
368 err = family->id; 368 err = family->id;
369 goto errout_free; 369 goto errout_free;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5c4a118d6f96..90d4e3ce00e5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2600,8 +2600,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2600 void *ph; 2600 void *ph;
2601 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); 2601 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2602 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); 2602 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2603 unsigned char *addr = NULL;
2603 int tp_len, size_max; 2604 int tp_len, size_max;
2604 unsigned char *addr;
2605 void *data; 2605 void *data;
2606 int len_sum = 0; 2606 int len_sum = 0;
2607 int status = TP_STATUS_AVAILABLE; 2607 int status = TP_STATUS_AVAILABLE;
@@ -2612,7 +2612,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2612 if (likely(saddr == NULL)) { 2612 if (likely(saddr == NULL)) {
2613 dev = packet_cached_dev_get(po); 2613 dev = packet_cached_dev_get(po);
2614 proto = po->num; 2614 proto = po->num;
2615 addr = NULL;
2616 } else { 2615 } else {
2617 err = -EINVAL; 2616 err = -EINVAL;
2618 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2617 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2622,10 +2621,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2622 sll_addr))) 2621 sll_addr)))
2623 goto out; 2622 goto out;
2624 proto = saddr->sll_protocol; 2623 proto = saddr->sll_protocol;
2625 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2626 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2624 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2627 if (addr && dev && saddr->sll_halen < dev->addr_len) 2625 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2628 goto out_put; 2626 if (dev && msg->msg_namelen < dev->addr_len +
2627 offsetof(struct sockaddr_ll, sll_addr))
2628 goto out_put;
2629 addr = saddr->sll_addr;
2630 }
2629 } 2631 }
2630 2632
2631 err = -ENXIO; 2633 err = -ENXIO;
@@ -2797,7 +2799,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2797 struct sk_buff *skb; 2799 struct sk_buff *skb;
2798 struct net_device *dev; 2800 struct net_device *dev;
2799 __be16 proto; 2801 __be16 proto;
2800 unsigned char *addr; 2802 unsigned char *addr = NULL;
2801 int err, reserve = 0; 2803 int err, reserve = 0;
2802 struct sockcm_cookie sockc; 2804 struct sockcm_cookie sockc;
2803 struct virtio_net_hdr vnet_hdr = { 0 }; 2805 struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2814,7 +2816,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2814 if (likely(saddr == NULL)) { 2816 if (likely(saddr == NULL)) {
2815 dev = packet_cached_dev_get(po); 2817 dev = packet_cached_dev_get(po);
2816 proto = po->num; 2818 proto = po->num;
2817 addr = NULL;
2818 } else { 2819 } else {
2819 err = -EINVAL; 2820 err = -EINVAL;
2820 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) 2821 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2822,10 +2823,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2822 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 2823 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2823 goto out; 2824 goto out;
2824 proto = saddr->sll_protocol; 2825 proto = saddr->sll_protocol;
2825 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2826 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2826 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2827 if (addr && dev && saddr->sll_halen < dev->addr_len) 2827 if (sock->type == SOCK_DGRAM) {
2828 goto out_unlock; 2828 if (dev && msg->msg_namelen < dev->addr_len +
2829 offsetof(struct sockaddr_ll, sll_addr))
2830 goto out_unlock;
2831 addr = saddr->sll_addr;
2832 }
2829 } 2833 }
2830 2834
2831 err = -ENXIO; 2835 err = -ENXIO;
@@ -3342,20 +3346,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3342 sock_recv_ts_and_drops(msg, sk, skb); 3346 sock_recv_ts_and_drops(msg, sk, skb);
3343 3347
3344 if (msg->msg_name) { 3348 if (msg->msg_name) {
3349 int copy_len;
3350
3345 /* If the address length field is there to be filled 3351 /* If the address length field is there to be filled
3346 * in, we fill it in now. 3352 * in, we fill it in now.
3347 */ 3353 */
3348 if (sock->type == SOCK_PACKET) { 3354 if (sock->type == SOCK_PACKET) {
3349 __sockaddr_check_size(sizeof(struct sockaddr_pkt)); 3355 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3350 msg->msg_namelen = sizeof(struct sockaddr_pkt); 3356 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3357 copy_len = msg->msg_namelen;
3351 } else { 3358 } else {
3352 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3359 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3353 3360
3354 msg->msg_namelen = sll->sll_halen + 3361 msg->msg_namelen = sll->sll_halen +
3355 offsetof(struct sockaddr_ll, sll_addr); 3362 offsetof(struct sockaddr_ll, sll_addr);
3363 copy_len = msg->msg_namelen;
3364 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3365 memset(msg->msg_name +
3366 offsetof(struct sockaddr_ll, sll_addr),
3367 0, sizeof(sll->sll_addr));
3368 msg->msg_namelen = sizeof(struct sockaddr_ll);
3369 }
3356 } 3370 }
3357 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, 3371 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3358 msg->msg_namelen);
3359 } 3372 }
3360 3373
3361 if (pkt_sk(sk)->auxdata) { 3374 if (pkt_sk(sk)->auxdata) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 70559854837e..8946c89d7392 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -772,7 +772,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
772 unsigned long frag_off; 772 unsigned long frag_off;
773 unsigned long to_copy; 773 unsigned long to_copy;
774 unsigned long copied; 774 unsigned long copied;
775 uint64_t uncongested = 0; 775 __le64 uncongested = 0;
776 void *addr; 776 void *addr;
777 777
778 /* catch completely corrupt packets */ 778 /* catch completely corrupt packets */
@@ -789,7 +789,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
789 copied = 0; 789 copied = 0;
790 790
791 while (copied < RDS_CONG_MAP_BYTES) { 791 while (copied < RDS_CONG_MAP_BYTES) {
792 uint64_t *src, *dst; 792 __le64 *src, *dst;
793 unsigned int k; 793 unsigned int k;
794 794
795 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 795 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
@@ -824,9 +824,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
824 } 824 }
825 825
826 /* the congestion map is in little endian order */ 826 /* the congestion map is in little endian order */
827 uncongested = le64_to_cpu(uncongested); 827 rds_cong_map_updated(map, le64_to_cpu(uncongested));
828
829 rds_cong_map_updated(map, uncongested);
830} 828}
831 829
832static void rds_ib_process_recv(struct rds_connection *conn, 830static void rds_ib_process_recv(struct rds_connection *conn,
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8aa2937b069f..fe96881a334d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
604 604
605 _enter(""); 605 _enter("");
606 606
607 if (list_empty(&rxnet->calls)) 607 if (!list_empty(&rxnet->calls)) {
608 return; 608 write_lock(&rxnet->call_lock);
609 609
610 write_lock(&rxnet->call_lock); 610 while (!list_empty(&rxnet->calls)) {
611 call = list_entry(rxnet->calls.next,
612 struct rxrpc_call, link);
613 _debug("Zapping call %p", call);
611 614
612 while (!list_empty(&rxnet->calls)) { 615 rxrpc_see_call(call);
613 call = list_entry(rxnet->calls.next, struct rxrpc_call, link); 616 list_del_init(&call->link);
614 _debug("Zapping call %p", call);
615 617
616 rxrpc_see_call(call); 618 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
617 list_del_init(&call->link); 619 call, atomic_read(&call->usage),
620 rxrpc_call_states[call->state],
621 call->flags, call->events);
618 622
619 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 623 write_unlock(&rxnet->call_lock);
620 call, atomic_read(&call->usage), 624 cond_resched();
621 rxrpc_call_states[call->state], 625 write_lock(&rxnet->call_lock);
622 call->flags, call->events); 626 }
623 627
624 write_unlock(&rxnet->call_lock); 628 write_unlock(&rxnet->call_lock);
625 cond_resched();
626 write_lock(&rxnet->call_lock);
627 } 629 }
628 630
629 write_unlock(&rxnet->call_lock);
630
631 atomic_dec(&rxnet->nr_calls); 631 atomic_dec(&rxnet->nr_calls);
632 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); 632 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
633} 633}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1d143bc3f73d..4aa03588f87b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
1112} 1112}
1113 1113
1114 1114
1115/* Sent the next ASCONF packet currently stored in the association.
1116 * This happens after the ASCONF_ACK was succeffully processed.
1117 */
1118static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1119{
1120 struct net *net = sock_net(asoc->base.sk);
1121
1122 /* Send the next asconf chunk from the addip chunk
1123 * queue.
1124 */
1125 if (!list_empty(&asoc->addip_chunk_list)) {
1126 struct list_head *entry = asoc->addip_chunk_list.next;
1127 struct sctp_chunk *asconf = list_entry(entry,
1128 struct sctp_chunk, list);
1129 list_del_init(entry);
1130
1131 /* Hold the chunk until an ASCONF_ACK is received. */
1132 sctp_chunk_hold(asconf);
1133 if (sctp_primitive_ASCONF(net, asoc, asconf))
1134 sctp_chunk_free(asconf);
1135 else
1136 asoc->addip_last_asconf = asconf;
1137 }
1138}
1139
1140
1141/* These three macros allow us to pull the debugging code out of the 1115/* These three macros allow us to pull the debugging code out of the
1142 * main flow of sctp_do_sm() to keep attention focused on the real 1116 * main flow of sctp_do_sm() to keep attention focused on the real
1143 * functionality there. 1117 * functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
1783 } 1757 }
1784 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp); 1758 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1785 break; 1759 break;
1786 case SCTP_CMD_SEND_NEXT_ASCONF:
1787 sctp_cmd_send_asconf(asoc);
1788 break;
1789 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1760 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1790 sctp_asconf_queue_teardown(asoc); 1761 sctp_asconf_queue_teardown(asoc);
1791 break; 1762 break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7dfc34b28f4f..e3f4abe6134e 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
3824 return SCTP_DISPOSITION_CONSUME; 3824 return SCTP_DISPOSITION_CONSUME;
3825} 3825}
3826 3826
3827static enum sctp_disposition sctp_send_next_asconf(
3828 struct net *net,
3829 const struct sctp_endpoint *ep,
3830 struct sctp_association *asoc,
3831 const union sctp_subtype type,
3832 struct sctp_cmd_seq *commands)
3833{
3834 struct sctp_chunk *asconf;
3835 struct list_head *entry;
3836
3837 if (list_empty(&asoc->addip_chunk_list))
3838 return SCTP_DISPOSITION_CONSUME;
3839
3840 entry = asoc->addip_chunk_list.next;
3841 asconf = list_entry(entry, struct sctp_chunk, list);
3842
3843 list_del_init(entry);
3844 sctp_chunk_hold(asconf);
3845 asoc->addip_last_asconf = asconf;
3846
3847 return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
3848}
3849
3827/* 3850/*
3828 * ADDIP Section 4.3 General rules for address manipulation 3851 * ADDIP Section 4.3 General rules for address manipulation
3829 * When building TLV parameters for the ASCONF Chunk that will add or 3852 * When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
3915 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3938 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3916 3939
3917 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3940 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3918 asconf_ack)) { 3941 asconf_ack))
3919 /* Successfully processed ASCONF_ACK. We can 3942 return sctp_send_next_asconf(net, ep,
3920 * release the next asconf if we have one. 3943 (struct sctp_association *)asoc,
3921 */ 3944 type, commands);
3922 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3923 SCTP_NULL());
3924 return SCTP_DISPOSITION_CONSUME;
3925 }
3926 3945
3927 abort = sctp_make_abort(asoc, asconf_ack, 3946 abort = sctp_make_abort(asoc, asconf_ack,
3928 sizeof(struct sctp_errhdr)); 3947 sizeof(struct sctp_errhdr));
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 26f26e71ef3f..e225c81e6b35 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -580,7 +580,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
580static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) 580static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
581{ 581{
582 struct strp_msg *rxm = strp_msg(skb); 582 struct strp_msg *rxm = strp_msg(skb);
583 int err = 0, offset = rxm->offset, copy, nsg; 583 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
584 struct sk_buff *skb_iter, *unused; 584 struct sk_buff *skb_iter, *unused;
585 struct scatterlist sg[1]; 585 struct scatterlist sg[1];
586 char *orig_buf, *buf; 586 char *orig_buf, *buf;
@@ -611,25 +611,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
611 else 611 else
612 err = 0; 612 err = 0;
613 613
614 copy = min_t(int, skb_pagelen(skb) - offset, 614 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
615 rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
616 615
617 if (skb->decrypted) 616 if (skb_pagelen(skb) > offset) {
618 skb_store_bits(skb, offset, buf, copy); 617 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
619 618
620 offset += copy; 619 if (skb->decrypted)
621 buf += copy; 620 skb_store_bits(skb, offset, buf, copy);
622 621
622 offset += copy;
623 buf += copy;
624 }
625
626 pos = skb_pagelen(skb);
623 skb_walk_frags(skb, skb_iter) { 627 skb_walk_frags(skb, skb_iter) {
624 copy = min_t(int, skb_iter->len, 628 int frag_pos;
625 rxm->full_len - offset + rxm->offset - 629
626 TLS_CIPHER_AES_GCM_128_TAG_SIZE); 630 /* Practically all frags must belong to msg if reencrypt
631 * is needed with current strparser and coalescing logic,
632 * but strparser may "get optimized", so let's be safe.
633 */
634 if (pos + skb_iter->len <= offset)
635 goto done_with_frag;
636 if (pos >= data_len + rxm->offset)
637 break;
638
639 frag_pos = offset - pos;
640 copy = min_t(int, skb_iter->len - frag_pos,
641 data_len + rxm->offset - offset);
627 642
628 if (skb_iter->decrypted) 643 if (skb_iter->decrypted)
629 skb_store_bits(skb_iter, offset, buf, copy); 644 skb_store_bits(skb_iter, frag_pos, buf, copy);
630 645
631 offset += copy; 646 offset += copy;
632 buf += copy; 647 buf += copy;
648done_with_frag:
649 pos += skb_iter->len;
633 } 650 }
634 651
635free_buf: 652free_buf:
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index a3ebd4b02714..c3a5fe624b4e 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -201,13 +201,14 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
201 201
202 skb_put(nskb, skb->len); 202 skb_put(nskb, skb->len);
203 memcpy(nskb->data, skb->data, headln); 203 memcpy(nskb->data, skb->data, headln);
204 update_chksum(nskb, headln);
205 204
206 nskb->destructor = skb->destructor; 205 nskb->destructor = skb->destructor;
207 nskb->sk = sk; 206 nskb->sk = sk;
208 skb->destructor = NULL; 207 skb->destructor = NULL;
209 skb->sk = NULL; 208 skb->sk = NULL;
210 209
210 update_chksum(nskb, headln);
211
211 delta = nskb->truesize - skb->truesize; 212 delta = nskb->truesize - skb->truesize;
212 if (likely(delta < 0)) 213 if (likely(delta < 0))
213 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); 214 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 816425ffe05a..4831ad745f91 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3769,10 +3769,9 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
3769 /* 3769 /*
3770 * The last request may have been received before this 3770 * The last request may have been received before this
3771 * registration call. Call the driver notifier if 3771 * registration call. Call the driver notifier if
3772 * initiator is USER and user type is CELL_BASE. 3772 * initiator is USER.
3773 */ 3773 */
3774 if (lr->initiator == NL80211_REGDOM_SET_BY_USER && 3774 if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
3775 lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
3776 reg_call_notifier(wiphy, lr); 3775 reg_call_notifier(wiphy, lr);
3777 } 3776 }
3778 3777
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index b9f118530db6..ad3a2555c517 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
70 return NULL; 70 return NULL;
71} 71}
72 72
73static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) 73static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
74 unsigned short family)
74{ 75{
75 struct xfrmi_net *xfrmn; 76 struct xfrmi_net *xfrmn;
76 int ifindex;
77 struct xfrm_if *xi; 77 struct xfrm_if *xi;
78 int ifindex = 0;
78 79
79 if (!secpath_exists(skb) || !skb->dev) 80 if (!secpath_exists(skb) || !skb->dev)
80 return NULL; 81 return NULL;
81 82
83 switch (family) {
84 case AF_INET6:
85 ifindex = inet6_sdif(skb);
86 break;
87 case AF_INET:
88 ifindex = inet_sdif(skb);
89 break;
90 }
91 if (!ifindex)
92 ifindex = skb->dev->ifindex;
93
82 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); 94 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
83 ifindex = skb->dev->ifindex;
84 95
85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { 96 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
86 if (ifindex == xi->dev->ifindex && 97 if (ifindex == xi->dev->ifindex &&
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 03b6bf85d70b..410233c5681e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3519,7 +3519,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3519 ifcb = xfrm_if_get_cb(); 3519 ifcb = xfrm_if_get_cb();
3520 3520
3521 if (ifcb) { 3521 if (ifcb) {
3522 xi = ifcb->decode_session(skb); 3522 xi = ifcb->decode_session(skb, family);
3523 if (xi) { 3523 if (xi) {
3524 if_id = xi->p.if_id; 3524 if_id = xi->p.if_id;
3525 net = xi->net; 3525 net = xi->net;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index ed25eb81aabe..3edbf4b26116 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2340,7 +2340,7 @@ void xfrm_state_fini(struct net *net)
2340 2340
2341 flush_work(&net->xfrm.state_hash_work); 2341 flush_work(&net->xfrm.state_hash_work);
2342 flush_work(&xfrm_state_gc_work); 2342 flush_work(&xfrm_state_gc_work);
2343 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); 2343 xfrm_state_flush(net, 0, false, true);
2344 2344
2345 WARN_ON(!list_empty(&net->xfrm.state_all)); 2345 WARN_ON(!list_empty(&net->xfrm.state_all));
2346 2346
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d7cb16f0df5b..eb8d14389601 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1424 ret = verify_policy_dir(p->dir); 1424 ret = verify_policy_dir(p->dir);
1425 if (ret) 1425 if (ret)
1426 return ret; 1426 return ret;
1427 if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir)) 1427 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
1428 return -EINVAL; 1428 return -EINVAL;
1429 1429
1430 return 0; 1430 return 0;
@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1513 return -EINVAL; 1513 return -EINVAL;
1514 } 1514 }
1515 1515
1516 switch (ut[i].id.proto) { 1516 if (!xfrm_id_proto_valid(ut[i].id.proto))
1517 case IPPROTO_AH:
1518 case IPPROTO_ESP:
1519 case IPPROTO_COMP:
1520#if IS_ENABLED(CONFIG_IPV6)
1521 case IPPROTO_ROUTING:
1522 case IPPROTO_DSTOPTS:
1523#endif
1524 case IPSEC_PROTO_ANY:
1525 break;
1526 default:
1527 return -EINVAL; 1517 return -EINVAL;
1528 }
1529
1530 } 1518 }
1531 1519
1532 return 0; 1520 return 0;
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
index 1ceedea847dd..544ca126a8a8 100644
--- a/scripts/selinux/genheaders/genheaders.c
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -9,7 +9,6 @@
9#include <string.h> 9#include <string.h>
10#include <errno.h> 10#include <errno.h>
11#include <ctype.h> 11#include <ctype.h>
12#include <sys/socket.h>
13 12
14struct security_class_mapping { 13struct security_class_mapping {
15 const char *name; 14 const char *name;
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index 073fe7537f6c..6d51b74bc679 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -32,7 +32,6 @@
32#include <stdlib.h> 32#include <stdlib.h>
33#include <unistd.h> 33#include <unistd.h>
34#include <string.h> 34#include <string.h>
35#include <sys/socket.h>
36 35
37static void usage(char *name) 36static void usage(char *name)
38{ 37{
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index bd5fe0d3204a..201f7e588a29 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,5 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/capability.h> 2#include <linux/capability.h>
3#include <linux/socket.h>
3 4
4#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ 5#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
5 "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" 6 "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e951d45c0131..3ec82904ccec 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1163,6 +1163,9 @@ static int do_create(int argc, char **argv)
1163 return -1; 1163 return -1;
1164 } 1164 }
1165 NEXT_ARG(); 1165 NEXT_ARG();
1166 } else {
1167 p_err("unknown arg %s", *argv);
1168 return -1;
1166 } 1169 }
1167 } 1170 }
1168 1171
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 7d9e182a1f51..d9e9dec04605 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -2,3 +2,4 @@ libbpf_version.h
2libbpf.pc 2libbpf.pc
3FEATURE-DUMP.libbpf 3FEATURE-DUMP.libbpf
4test_libbpf 4test_libbpf
5libbpf.so.*
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index fb11240b758b..9093a8f64dc6 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -375,6 +375,31 @@
375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 375 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
376}, 376},
377{ 377{
378 "calls: ptr null check in subprog",
379 .insns = {
380 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
381 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
383 BPF_LD_MAP_FD(BPF_REG_1, 0),
384 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
385 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
386 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
388 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
389 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
390 BPF_EXIT_INSN(),
391 BPF_MOV64_IMM(BPF_REG_0, 0),
392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
393 BPF_MOV64_IMM(BPF_REG_0, 1),
394 BPF_EXIT_INSN(),
395 },
396 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
397 .fixup_map_hash_48b = { 3 },
398 .result_unpriv = REJECT,
399 .result = ACCEPT,
400 .retval = 0,
401},
402{
378 "calls: two calls with args", 403 "calls: two calls with args",
379 .insns = { 404 .insns = {
380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), 405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index e3fc22e672c2..d5c596fdc4b9 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -631,3 +631,25 @@
631 .errstr = "invalid access to packet", 631 .errstr = "invalid access to packet",
632 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 632 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
633}, 633},
634{
635 "direct packet access: test29 (reg > pkt_end in subprog)",
636 .insns = {
637 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
638 offsetof(struct __sk_buff, data)),
639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
640 offsetof(struct __sk_buff, data_end)),
641 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
645 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
646 BPF_MOV64_IMM(BPF_REG_0, 0),
647 BPF_EXIT_INSN(),
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
650 BPF_MOV64_IMM(BPF_REG_0, 1),
651 BPF_EXIT_INSN(),
652 },
653 .result = ACCEPT,
654 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
655},
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index d4cfb6a7a086..4b7e107865bf 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -27,6 +27,7 @@ log_test()
27 nsuccess=$((nsuccess+1)) 27 nsuccess=$((nsuccess+1))
28 printf "\n TEST: %-50s [ OK ]\n" "${msg}" 28 printf "\n TEST: %-50s [ OK ]\n" "${msg}"
29 else 29 else
30 ret=1
30 nfail=$((nfail+1)) 31 nfail=$((nfail+1))
31 printf "\n TEST: %-50s [FAIL]\n" "${msg}" 32 printf "\n TEST: %-50s [FAIL]\n" "${msg}"
32 if [ "${PAUSE_ON_FAIL}" = "yes" ]; then 33 if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
@@ -147,8 +148,8 @@ fib_rule6_test()
147 148
148 fib_check_iproute_support "ipproto" "ipproto" 149 fib_check_iproute_support "ipproto" "ipproto"
149 if [ $? -eq 0 ]; then 150 if [ $? -eq 0 ]; then
150 match="ipproto icmp" 151 match="ipproto ipv6-icmp"
151 fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match" 152 fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
152 fi 153 fi
153} 154}
154 155
@@ -245,4 +246,9 @@ setup
245run_fibrule_tests 246run_fibrule_tests
246cleanup 247cleanup
247 248
249if [ "$TESTS" != "none" ]; then
250 printf "\nTests passed: %3d\n" ${nsuccess}
251 printf "Tests failed: %3d\n" ${nfail}
252fi
253
248exit $ret 254exit $ret
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index f69d2ee29742..5019cdae5d0b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2166,11 +2166,14 @@ TEST(detect_seccomp_filter_flags)
2166 SECCOMP_FILTER_FLAG_LOG, 2166 SECCOMP_FILTER_FLAG_LOG,
2167 SECCOMP_FILTER_FLAG_SPEC_ALLOW, 2167 SECCOMP_FILTER_FLAG_SPEC_ALLOW,
2168 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2168 SECCOMP_FILTER_FLAG_NEW_LISTENER };
2169 unsigned int flag, all_flags; 2169 unsigned int exclusive[] = {
2170 SECCOMP_FILTER_FLAG_TSYNC,
2171 SECCOMP_FILTER_FLAG_NEW_LISTENER };
2172 unsigned int flag, all_flags, exclusive_mask;
2170 int i; 2173 int i;
2171 long ret; 2174 long ret;
2172 2175
2173 /* Test detection of known-good filter flags */ 2176 /* Test detection of individual known-good filter flags */
2174 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2177 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
2175 int bits = 0; 2178 int bits = 0;
2176 2179
@@ -2197,16 +2200,29 @@ TEST(detect_seccomp_filter_flags)
2197 all_flags |= flag; 2200 all_flags |= flag;
2198 } 2201 }
2199 2202
2200 /* Test detection of all known-good filter flags */ 2203 /*
2201 ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL); 2204 * Test detection of all known-good filter flags combined. But
2202 EXPECT_EQ(-1, ret); 2205 * for the exclusive flags we need to mask them out and try them
2203 EXPECT_EQ(EFAULT, errno) { 2206 * individually for the "all flags" testing.
2204 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", 2207 */
2205 all_flags); 2208 exclusive_mask = 0;
2209 for (i = 0; i < ARRAY_SIZE(exclusive); i++)
2210 exclusive_mask |= exclusive[i];
2211 for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
2212 flag = all_flags & ~exclusive_mask;
2213 flag |= exclusive[i];
2214
2215 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2216 EXPECT_EQ(-1, ret);
2217 EXPECT_EQ(EFAULT, errno) {
2218 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
2219 flag);
2220 }
2206 } 2221 }
2207 2222
2208 /* Test detection of an unknown filter flag */ 2223 /* Test detection of an unknown filter flags, without exclusives. */
2209 flag = -1; 2224 flag = -1;
2225 flag &= ~exclusive_mask;
2210 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2226 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2211 EXPECT_EQ(-1, ret); 2227 EXPECT_EQ(-1, ret);
2212 EXPECT_EQ(EINVAL, errno) { 2228 EXPECT_EQ(EINVAL, errno) {