aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/cpuid.txt6
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h16
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h16
-rw-r--r--arch/s390/configs/debug_defconfig9
-rw-r--r--arch/s390/configs/performance_defconfig8
-rw-r--r--arch/s390/crypto/crc32be-vx.S5
-rw-r--r--arch/s390/crypto/crc32le-vx.S4
-rw-r--r--arch/s390/include/asm/nospec-insn.h196
-rw-r--r--arch/s390/include/asm/purgatory.h6
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/base.S24
-rw-r--r--arch/s390/kernel/entry.S105
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/mcount.S14
-rw-r--r--arch/s390/kernel/nospec-branch.c44
-rw-r--r--arch/s390/kernel/nospec-sysfs.c21
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c4
-rw-r--r--arch/s390/kernel/reipl.S7
-rw-r--r--arch/s390/kernel/swsusp.S10
-rw-r--r--arch/s390/lib/mem.S19
-rw-r--r--arch/s390/net/bpf_jit.S16
-rw-r--r--arch/s390/net/bpf_jit_comp.c63
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/x86/kernel/amd_nb.c6
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/kvm/hyperv.c6
-rw-r--r--arch/x86/kvm/vmx.c28
-rw-r--r--arch/x86/kvm/x86.c26
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/mmu_pv.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c7
-rw-r--r--drivers/gpu/drm/drm_file.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/k10temp.c51
-rw-r--r--drivers/md/bcache/debug.c4
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c8
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/spi/spi-bcm-qspi.c28
-rw-r--r--drivers/spi/spi-bcm2835aux.c5
-rw-r--r--drivers/spi/spi-cadence.c8
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c43
-rw-r--r--drivers/usb/usbip/stub_main.c105
-rw-r--r--fs/afs/addr_list.c25
-rw-r--r--fs/afs/callback.c84
-rw-r--r--fs/afs/cmservice.c67
-rw-r--r--fs/afs/dir.c54
-rw-r--r--fs/afs/file.c2
-rw-r--r--fs/afs/flock.c6
-rw-r--r--fs/afs/fsclient.c31
-rw-r--r--fs/afs/inode.c19
-rw-r--r--fs/afs/internal.h25
-rw-r--r--fs/afs/rotate.c20
-rw-r--r--fs/afs/rxrpc.c18
-rw-r--r--fs/afs/security.c7
-rw-r--r--fs/afs/server.c21
-rw-r--r--fs/afs/server_list.c7
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/proc/base.c8
-rw-r--r--include/linux/kvm_host.h8
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/mtd/rawnand.h16
-rw-r--r--include/trace/events/afs.h42
-rw-r--r--include/trace/events/xen.h16
-rw-r--r--lib/vsprintf.c26
-rw-r--r--mm/gup.c3
-rw-r--r--security/selinux/hooks.c50
-rw-r--r--sound/core/control_compat.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/usb/mixer.c8
-rw-r--r--sound/usb/stream.c9
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h1
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c16
-rw-r--r--tools/testing/selftests/kvm/sync_regs_test.c40
-rw-r--r--tools/testing/selftests/kvm/vmx_tsc_adjust_test.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c4
-rw-r--r--virt/kvm/arm/vgic/vgic.c22
103 files changed, 1186 insertions, 576 deletions
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index d4f33eb805dd..ab022dcd0911 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
72 72
73flag || value || meaning 73flag || value || meaning
74================================================================================== 74==================================================================================
75KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to 75KVM_HINTS_REALTIME || 0 || guest checks this feature bit to
76 || || determine if there is vCPU pinning 76 || || determine that vCPUs are never
77 || || and there is no vCPU over-commitment, 77 || || preempted for an unlimited time,
78 || || allowing optimizations 78 || || allowing optimizations
79---------------------------------------------------------------------------------- 79----------------------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 58b9861ccf99..92e47b5b0480 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12220,7 +12220,7 @@ F: Documentation/s390/vfio-ccw.txt
12220F: include/uapi/linux/vfio_ccw.h 12220F: include/uapi/linux/vfio_ccw.h
12221 12221
12222S390 ZCRYPT DRIVER 12222S390 ZCRYPT DRIVER
12223M: Harald Freudenberger <freude@de.ibm.com> 12223M: Harald Freudenberger <freude@linux.ibm.com>
12224L: linux-s390@vger.kernel.org 12224L: linux-s390@vger.kernel.org
12225W: http://www.ibm.com/developerworks/linux/linux390/ 12225W: http://www.ibm.com/developerworks/linux/linux390/
12226S: Supported 12226S: Supported
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 707a1f06dc5d..f675162663f0 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
309 return 8; 309 return 8;
310} 310}
311 311
312/*
313 * We are not in the kvm->srcu critical section most of the time, so we take
314 * the SRCU read lock here. Since we copy the data from the user page, we
315 * can immediately drop the lock again.
316 */
317static inline int kvm_read_guest_lock(struct kvm *kvm,
318 gpa_t gpa, void *data, unsigned long len)
319{
320 int srcu_idx = srcu_read_lock(&kvm->srcu);
321 int ret = kvm_read_guest(kvm, gpa, data, len);
322
323 srcu_read_unlock(&kvm->srcu, srcu_idx);
324
325 return ret;
326}
327
312static inline void *kvm_get_hyp_vector(void) 328static inline void *kvm_get_hyp_vector(void)
313{ 329{
314 return kvm_ksym_ref(__kvm_hyp_vector); 330 return kvm_ksym_ref(__kvm_hyp_vector);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 082110993647..6128992c2ded 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
360 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; 360 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
361} 361}
362 362
363/*
364 * We are not in the kvm->srcu critical section most of the time, so we take
365 * the SRCU read lock here. Since we copy the data from the user page, we
366 * can immediately drop the lock again.
367 */
368static inline int kvm_read_guest_lock(struct kvm *kvm,
369 gpa_t gpa, void *data, unsigned long len)
370{
371 int srcu_idx = srcu_read_lock(&kvm->srcu);
372 int ret = kvm_read_guest(kvm, gpa, data, len);
373
374 srcu_read_unlock(&kvm->srcu, srcu_idx);
375
376 return ret;
377}
378
363#ifdef CONFIG_KVM_INDIRECT_VECTORS 379#ifdef CONFIG_KVM_INDIRECT_VECTORS
364/* 380/*
365 * EL2 vectors can be mapped and rerouted in a number of ways, 381 * EL2 vectors can be mapped and rerouted in a number of ways,
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 6176fe9795ca..941d8cc6c9f5 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -261,9 +261,9 @@ CONFIG_IP_VS_NQ=m
261CONFIG_IP_VS_FTP=m 261CONFIG_IP_VS_FTP=m
262CONFIG_IP_VS_PE_SIP=m 262CONFIG_IP_VS_PE_SIP=m
263CONFIG_NF_CONNTRACK_IPV4=m 263CONFIG_NF_CONNTRACK_IPV4=m
264CONFIG_NF_TABLES_IPV4=m 264CONFIG_NF_TABLES_IPV4=y
265CONFIG_NFT_CHAIN_ROUTE_IPV4=m 265CONFIG_NFT_CHAIN_ROUTE_IPV4=m
266CONFIG_NF_TABLES_ARP=m 266CONFIG_NF_TABLES_ARP=y
267CONFIG_NFT_CHAIN_NAT_IPV4=m 267CONFIG_NFT_CHAIN_NAT_IPV4=m
268CONFIG_IP_NF_IPTABLES=m 268CONFIG_IP_NF_IPTABLES=m
269CONFIG_IP_NF_MATCH_AH=m 269CONFIG_IP_NF_MATCH_AH=m
@@ -284,7 +284,7 @@ CONFIG_IP_NF_ARPTABLES=m
284CONFIG_IP_NF_ARPFILTER=m 284CONFIG_IP_NF_ARPFILTER=m
285CONFIG_IP_NF_ARP_MANGLE=m 285CONFIG_IP_NF_ARP_MANGLE=m
286CONFIG_NF_CONNTRACK_IPV6=m 286CONFIG_NF_CONNTRACK_IPV6=m
287CONFIG_NF_TABLES_IPV6=m 287CONFIG_NF_TABLES_IPV6=y
288CONFIG_NFT_CHAIN_ROUTE_IPV6=m 288CONFIG_NFT_CHAIN_ROUTE_IPV6=m
289CONFIG_NFT_CHAIN_NAT_IPV6=m 289CONFIG_NFT_CHAIN_NAT_IPV6=m
290CONFIG_IP6_NF_IPTABLES=m 290CONFIG_IP6_NF_IPTABLES=m
@@ -305,7 +305,7 @@ CONFIG_IP6_NF_RAW=m
305CONFIG_IP6_NF_SECURITY=m 305CONFIG_IP6_NF_SECURITY=m
306CONFIG_IP6_NF_NAT=m 306CONFIG_IP6_NF_NAT=m
307CONFIG_IP6_NF_TARGET_MASQUERADE=m 307CONFIG_IP6_NF_TARGET_MASQUERADE=m
308CONFIG_NF_TABLES_BRIDGE=m 308CONFIG_NF_TABLES_BRIDGE=y
309CONFIG_RDS=m 309CONFIG_RDS=m
310CONFIG_RDS_RDMA=m 310CONFIG_RDS_RDMA=m
311CONFIG_RDS_TCP=m 311CONFIG_RDS_TCP=m
@@ -604,7 +604,6 @@ CONFIG_DETECT_HUNG_TASK=y
604CONFIG_WQ_WATCHDOG=y 604CONFIG_WQ_WATCHDOG=y
605CONFIG_PANIC_ON_OOPS=y 605CONFIG_PANIC_ON_OOPS=y
606CONFIG_DEBUG_TIMEKEEPING=y 606CONFIG_DEBUG_TIMEKEEPING=y
607CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
608CONFIG_PROVE_LOCKING=y 607CONFIG_PROVE_LOCKING=y
609CONFIG_LOCK_STAT=y 608CONFIG_LOCK_STAT=y
610CONFIG_DEBUG_LOCKDEP=y 609CONFIG_DEBUG_LOCKDEP=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index c105bcc6d7a6..eb6f75f24208 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -259,9 +259,9 @@ CONFIG_IP_VS_NQ=m
259CONFIG_IP_VS_FTP=m 259CONFIG_IP_VS_FTP=m
260CONFIG_IP_VS_PE_SIP=m 260CONFIG_IP_VS_PE_SIP=m
261CONFIG_NF_CONNTRACK_IPV4=m 261CONFIG_NF_CONNTRACK_IPV4=m
262CONFIG_NF_TABLES_IPV4=m 262CONFIG_NF_TABLES_IPV4=y
263CONFIG_NFT_CHAIN_ROUTE_IPV4=m 263CONFIG_NFT_CHAIN_ROUTE_IPV4=m
264CONFIG_NF_TABLES_ARP=m 264CONFIG_NF_TABLES_ARP=y
265CONFIG_NFT_CHAIN_NAT_IPV4=m 265CONFIG_NFT_CHAIN_NAT_IPV4=m
266CONFIG_IP_NF_IPTABLES=m 266CONFIG_IP_NF_IPTABLES=m
267CONFIG_IP_NF_MATCH_AH=m 267CONFIG_IP_NF_MATCH_AH=m
@@ -282,7 +282,7 @@ CONFIG_IP_NF_ARPTABLES=m
282CONFIG_IP_NF_ARPFILTER=m 282CONFIG_IP_NF_ARPFILTER=m
283CONFIG_IP_NF_ARP_MANGLE=m 283CONFIG_IP_NF_ARP_MANGLE=m
284CONFIG_NF_CONNTRACK_IPV6=m 284CONFIG_NF_CONNTRACK_IPV6=m
285CONFIG_NF_TABLES_IPV6=m 285CONFIG_NF_TABLES_IPV6=y
286CONFIG_NFT_CHAIN_ROUTE_IPV6=m 286CONFIG_NFT_CHAIN_ROUTE_IPV6=m
287CONFIG_NFT_CHAIN_NAT_IPV6=m 287CONFIG_NFT_CHAIN_NAT_IPV6=m
288CONFIG_IP6_NF_IPTABLES=m 288CONFIG_IP6_NF_IPTABLES=m
@@ -303,7 +303,7 @@ CONFIG_IP6_NF_RAW=m
303CONFIG_IP6_NF_SECURITY=m 303CONFIG_IP6_NF_SECURITY=m
304CONFIG_IP6_NF_NAT=m 304CONFIG_IP6_NF_NAT=m
305CONFIG_IP6_NF_TARGET_MASQUERADE=m 305CONFIG_IP6_NF_TARGET_MASQUERADE=m
306CONFIG_NF_TABLES_BRIDGE=m 306CONFIG_NF_TABLES_BRIDGE=y
307CONFIG_RDS=m 307CONFIG_RDS=m
308CONFIG_RDS_RDMA=m 308CONFIG_RDS_RDMA=m
309CONFIG_RDS_TCP=m 309CONFIG_RDS_TCP=m
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index e8077f0971f8..2bf01ba44107 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/nospec-insn.h>
16#include <asm/vx-insn.h> 17#include <asm/vx-insn.h>
17 18
18/* Vector register range containing CRC-32 constants */ 19/* Vector register range containing CRC-32 constants */
@@ -67,6 +68,8 @@
67 68
68.previous 69.previous
69 70
71 GEN_BR_THUNK %r14
72
70.text 73.text
71/* 74/*
72 * The CRC-32 function(s) use these calling conventions: 75 * The CRC-32 function(s) use these calling conventions:
@@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
203 206
204.Ldone: 207.Ldone:
205 VLGVF %r2,%v2,3 208 VLGVF %r2,%v2,3
206 br %r14 209 BR_EX %r14
207 210
208.previous 211.previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index d8c67a58c0c5..7d6f568bd3ad 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/nospec-insn.h>
17#include <asm/vx-insn.h> 18#include <asm/vx-insn.h>
18 19
19/* Vector register range containing CRC-32 constants */ 20/* Vector register range containing CRC-32 constants */
@@ -76,6 +77,7 @@
76 77
77.previous 78.previous
78 79
80 GEN_BR_THUNK %r14
79 81
80.text 82.text
81 83
@@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
264 266
265.Ldone: 267.Ldone:
266 VLGVF %r2,%v2,2 268 VLGVF %r2,%v2,2
267 br %r14 269 BR_EX %r14
268 270
269.previous 271.previous
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644
index 000000000000..a01f81186e86
--- /dev/null
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -0,0 +1,196 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_NOSPEC_ASM_H
3#define _ASM_S390_NOSPEC_ASM_H
4
5#include <asm/alternative-asm.h>
6#include <asm/asm-offsets.h>
7#include <asm/dwarf.h>
8
9#ifdef __ASSEMBLY__
10
11#ifdef CONFIG_EXPOLINE
12
13_LC_BR_R1 = __LC_BR_R1
14
15/*
16 * The expoline macros are used to create thunks in the same format
17 * as gcc generates them. The 'comdat' section flag makes sure that
18 * the various thunks are merged into a single copy.
19 */
20 .macro __THUNK_PROLOG_NAME name
21 .pushsection .text.\name,"axG",@progbits,\name,comdat
22 .globl \name
23 .hidden \name
24 .type \name,@function
25\name:
26 CFI_STARTPROC
27 .endm
28
29 .macro __THUNK_EPILOG
30 CFI_ENDPROC
31 .popsection
32 .endm
33
34 .macro __THUNK_PROLOG_BR r1,r2
35 __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
36 .endm
37
38 .macro __THUNK_PROLOG_BC d0,r1,r2
39 __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
40 .endm
41
42 .macro __THUNK_BR r1,r2
43 jg __s390x_indirect_jump_r\r2\()use_r\r1
44 .endm
45
46 .macro __THUNK_BC d0,r1,r2
47 jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
48 .endm
49
50 .macro __THUNK_BRASL r1,r2,r3
51 brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
52 .endm
53
54 .macro __DECODE_RR expand,reg,ruse
55 .set __decode_fail,1
56 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
57 .ifc \reg,%r\r1
58 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
59 .ifc \ruse,%r\r2
60 \expand \r1,\r2
61 .set __decode_fail,0
62 .endif
63 .endr
64 .endif
65 .endr
66 .if __decode_fail == 1
67 .error "__DECODE_RR failed"
68 .endif
69 .endm
70
71 .macro __DECODE_RRR expand,rsave,rtarget,ruse
72 .set __decode_fail,1
73 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
74 .ifc \rsave,%r\r1
75 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
76 .ifc \rtarget,%r\r2
77 .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
78 .ifc \ruse,%r\r3
79 \expand \r1,\r2,\r3
80 .set __decode_fail,0
81 .endif
82 .endr
83 .endif
84 .endr
85 .endif
86 .endr
87 .if __decode_fail == 1
88 .error "__DECODE_RRR failed"
89 .endif
90 .endm
91
92 .macro __DECODE_DRR expand,disp,reg,ruse
93 .set __decode_fail,1
94 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
95 .ifc \reg,%r\r1
96 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
97 .ifc \ruse,%r\r2
98 \expand \disp,\r1,\r2
99 .set __decode_fail,0
100 .endif
101 .endr
102 .endif
103 .endr
104 .if __decode_fail == 1
105 .error "__DECODE_DRR failed"
106 .endif
107 .endm
108
109 .macro __THUNK_EX_BR reg,ruse
110 # Be very careful when adding instructions to this macro!
111 # The ALTERNATIVE replacement code has a .+10 which targets
112 # the "br \reg" after the code has been patched.
113#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
114 exrl 0,555f
115 j .
116#else
117 .ifc \reg,%r1
118 ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
119 j .
120 .else
121 larl \ruse,555f
122 ex 0,0(\ruse)
123 j .
124 .endif
125#endif
126555: br \reg
127 .endm
128
129 .macro __THUNK_EX_BC disp,reg,ruse
130#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
131 exrl 0,556f
132 j .
133#else
134 larl \ruse,556f
135 ex 0,0(\ruse)
136 j .
137#endif
138556: b \disp(\reg)
139 .endm
140
141 .macro GEN_BR_THUNK reg,ruse=%r1
142 __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
143 __THUNK_EX_BR \reg,\ruse
144 __THUNK_EPILOG
145 .endm
146
147 .macro GEN_B_THUNK disp,reg,ruse=%r1
148 __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
149 __THUNK_EX_BC \disp,\reg,\ruse
150 __THUNK_EPILOG
151 .endm
152
153 .macro BR_EX reg,ruse=%r1
154557: __DECODE_RR __THUNK_BR,\reg,\ruse
155 .pushsection .s390_indirect_branches,"a",@progbits
156 .long 557b-.
157 .popsection
158 .endm
159
160 .macro B_EX disp,reg,ruse=%r1
161558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
162 .pushsection .s390_indirect_branches,"a",@progbits
163 .long 558b-.
164 .popsection
165 .endm
166
167 .macro BASR_EX rsave,rtarget,ruse=%r1
168559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
169 .pushsection .s390_indirect_branches,"a",@progbits
170 .long 559b-.
171 .popsection
172 .endm
173
174#else
175 .macro GEN_BR_THUNK reg,ruse=%r1
176 .endm
177
178 .macro GEN_B_THUNK disp,reg,ruse=%r1
179 .endm
180
181 .macro BR_EX reg,ruse=%r1
182 br \reg
183 .endm
184
185 .macro B_EX disp,reg,ruse=%r1
186 b \disp(\reg)
187 .endm
188
189 .macro BASR_EX rsave,rtarget,ruse=%r1
190 basr \rsave,\rtarget
191 .endm
192#endif
193
194#endif /* __ASSEMBLY__ */
195
196#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h
index e297bcfc476f..6090670df51f 100644
--- a/arch/s390/include/asm/purgatory.h
+++ b/arch/s390/include/asm/purgatory.h
@@ -13,5 +13,11 @@
13 13
14int verify_sha256_digest(void); 14int verify_sha256_digest(void);
15 15
16extern u64 kernel_entry;
17extern u64 kernel_type;
18
19extern u64 crash_start;
20extern u64 crash_size;
21
16#endif /* __ASSEMBLY__ */ 22#endif /* __ASSEMBLY__ */
17#endif /* _S390_PURGATORY_H_ */ 23#endif /* _S390_PURGATORY_H_ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 84ea6225efb4..f92dd8ed3884 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -65,6 +65,7 @@ obj-y += nospec-branch.o
65 65
66extra-y += head.o head64.o vmlinux.lds 66extra-y += head.o head64.o vmlinux.lds
67 67
68obj-$(CONFIG_SYSFS) += nospec-sysfs.o
68CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) 69CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
69 70
70obj-$(CONFIG_MODULES) += module.o 71obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index eb2a5c0443cd..11aea745a2a6 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -181,6 +181,7 @@ int main(void)
181 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); 181 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
182 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); 182 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
183 OFFSET(__LC_GMAP, lowcore, gmap); 183 OFFSET(__LC_GMAP, lowcore, gmap);
184 OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
184 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 185 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
185 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 186 OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
186 /* hardware defined lowcore locations 0x1000 - 0x18ff */ 187 /* hardware defined lowcore locations 0x1000 - 0x18ff */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f6c56009e822..b65874b0b412 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -9,18 +9,22 @@
9 9
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
12#include <asm/nospec-insn.h>
12#include <asm/ptrace.h> 13#include <asm/ptrace.h>
13#include <asm/sigp.h> 14#include <asm/sigp.h>
14 15
16 GEN_BR_THUNK %r9
17 GEN_BR_THUNK %r14
18
15ENTRY(s390_base_mcck_handler) 19ENTRY(s390_base_mcck_handler)
16 basr %r13,0 20 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack 210: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD 22 aghi %r15,-STACK_FRAME_OVERHEAD
19 larl %r1,s390_base_mcck_handler_fn 23 larl %r1,s390_base_mcck_handler_fn
20 lg %r1,0(%r1) 24 lg %r9,0(%r1)
21 ltgr %r1,%r1 25 ltgr %r9,%r9
22 jz 1f 26 jz 1f
23 basr %r14,%r1 27 BASR_EX %r14,%r9
241: la %r1,4095 281: la %r1,4095
25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) 29 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
26 lpswe __LC_MCK_OLD_PSW 30 lpswe __LC_MCK_OLD_PSW
@@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
37 basr %r13,0 41 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD 420: aghi %r15,-STACK_FRAME_OVERHEAD
39 larl %r1,s390_base_ext_handler_fn 43 larl %r1,s390_base_ext_handler_fn
40 lg %r1,0(%r1) 44 lg %r9,0(%r1)
41 ltgr %r1,%r1 45 ltgr %r9,%r9
42 jz 1f 46 jz 1f
43 basr %r14,%r1 47 BASR_EX %r14,%r9
441: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC 481: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 49 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
46 lpswe __LC_EXT_OLD_PSW 50 lpswe __LC_EXT_OLD_PSW
@@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
57 basr %r13,0 61 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD 620: aghi %r15,-STACK_FRAME_OVERHEAD
59 larl %r1,s390_base_pgm_handler_fn 63 larl %r1,s390_base_pgm_handler_fn
60 lg %r1,0(%r1) 64 lg %r9,0(%r1)
61 ltgr %r1,%r1 65 ltgr %r9,%r9
62 jz 1f 66 jz 1f
63 basr %r14,%r1 67 BASR_EX %r14,%r9
64 lmg %r0,%r15,__LC_SAVE_AREA_SYNC 68 lmg %r0,%r15,__LC_SAVE_AREA_SYNC
65 lpswe __LC_PGM_OLD_PSW 69 lpswe __LC_PGM_OLD_PSW
661: lpswe disabled_wait_psw-0b(%r13) 701: lpswe disabled_wait_psw-0b(%r13)
@@ -117,7 +121,7 @@ ENTRY(diag308_reset)
117 larl %r4,.Lcontinue_psw # Restore PSW flags 121 larl %r4,.Lcontinue_psw # Restore PSW flags
118 lpswe 0(%r4) 122 lpswe 0(%r4)
119.Lcontinue: 123.Lcontinue:
120 br %r14 124 BR_EX %r14
121.align 16 125.align 16
122.Lrestart_psw: 126.Lrestart_psw:
123 .long 0x00080000,0x80000000 + .Lrestart_part2 127 .long 0x00080000,0x80000000 + .Lrestart_part2
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3f22f139a041..f03402efab4b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -28,6 +28,7 @@
28#include <asm/setup.h> 28#include <asm/setup.h>
29#include <asm/nmi.h> 29#include <asm/nmi.h>
30#include <asm/export.h> 30#include <asm/export.h>
31#include <asm/nospec-insn.h>
31 32
32__PT_R0 = __PT_GPRS 33__PT_R0 = __PT_GPRS
33__PT_R1 = __PT_GPRS + 8 34__PT_R1 = __PT_GPRS + 8
@@ -183,67 +184,9 @@ _LPP_OFFSET = __LC_LPP
183 "jnz .+8; .long 0xb2e8d000", 82 184 "jnz .+8; .long 0xb2e8d000", 82
184 .endm 185 .endm
185 186
186#ifdef CONFIG_EXPOLINE 187 GEN_BR_THUNK %r9
187 188 GEN_BR_THUNK %r14
188 .macro GEN_BR_THUNK name,reg,tmp 189 GEN_BR_THUNK %r14,%r11
189 .section .text.\name,"axG",@progbits,\name,comdat
190 .globl \name
191 .hidden \name
192 .type \name,@function
193\name:
194 CFI_STARTPROC
195#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
196 exrl 0,0f
197#else
198 larl \tmp,0f
199 ex 0,0(\tmp)
200#endif
201 j .
2020: br \reg
203 CFI_ENDPROC
204 .endm
205
206 GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
207 GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
208 GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
209
210 .macro BASR_R14_R9
2110: brasl %r14,__s390x_indirect_jump_r1use_r9
212 .pushsection .s390_indirect_branches,"a",@progbits
213 .long 0b-.
214 .popsection
215 .endm
216
217 .macro BR_R1USE_R14
2180: jg __s390x_indirect_jump_r1use_r14
219 .pushsection .s390_indirect_branches,"a",@progbits
220 .long 0b-.
221 .popsection
222 .endm
223
224 .macro BR_R11USE_R14
2250: jg __s390x_indirect_jump_r11use_r14
226 .pushsection .s390_indirect_branches,"a",@progbits
227 .long 0b-.
228 .popsection
229 .endm
230
231#else /* CONFIG_EXPOLINE */
232
233 .macro BASR_R14_R9
234 basr %r14,%r9
235 .endm
236
237 .macro BR_R1USE_R14
238 br %r14
239 .endm
240
241 .macro BR_R11USE_R14
242 br %r14
243 .endm
244
245#endif /* CONFIG_EXPOLINE */
246
247 190
248 .section .kprobes.text, "ax" 191 .section .kprobes.text, "ax"
249.Ldummy: 192.Ldummy:
@@ -260,7 +203,7 @@ _LPP_OFFSET = __LC_LPP
260ENTRY(__bpon) 203ENTRY(__bpon)
261 .globl __bpon 204 .globl __bpon
262 BPON 205 BPON
263 BR_R1USE_R14 206 BR_EX %r14
264 207
265/* 208/*
266 * Scheduler resume function, called by switch_to 209 * Scheduler resume function, called by switch_to
@@ -284,7 +227,7 @@ ENTRY(__switch_to)
284 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 227 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
285 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 228 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
286 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 229 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
287 BR_R1USE_R14 230 BR_EX %r14
288 231
289.L__critical_start: 232.L__critical_start:
290 233
@@ -351,7 +294,7 @@ sie_exit:
351 xgr %r5,%r5 294 xgr %r5,%r5
352 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 295 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
353 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 296 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
354 BR_R1USE_R14 297 BR_EX %r14
355.Lsie_fault: 298.Lsie_fault:
356 lghi %r14,-EFAULT 299 lghi %r14,-EFAULT
357 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 300 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
@@ -410,7 +353,7 @@ ENTRY(system_call)
410 lgf %r9,0(%r8,%r10) # get system call add. 353 lgf %r9,0(%r8,%r10) # get system call add.
411 TSTMSK __TI_flags(%r12),_TIF_TRACE 354 TSTMSK __TI_flags(%r12),_TIF_TRACE
412 jnz .Lsysc_tracesys 355 jnz .Lsysc_tracesys
413 BASR_R14_R9 # call sys_xxxx 356 BASR_EX %r14,%r9 # call sys_xxxx
414 stg %r2,__PT_R2(%r11) # store return value 357 stg %r2,__PT_R2(%r11) # store return value
415 358
416.Lsysc_return: 359.Lsysc_return:
@@ -595,7 +538,7 @@ ENTRY(system_call)
595 lmg %r3,%r7,__PT_R3(%r11) 538 lmg %r3,%r7,__PT_R3(%r11)
596 stg %r7,STACK_FRAME_OVERHEAD(%r15) 539 stg %r7,STACK_FRAME_OVERHEAD(%r15)
597 lg %r2,__PT_ORIG_GPR2(%r11) 540 lg %r2,__PT_ORIG_GPR2(%r11)
598 BASR_R14_R9 # call sys_xxx 541 BASR_EX %r14,%r9 # call sys_xxx
599 stg %r2,__PT_R2(%r11) # store return value 542 stg %r2,__PT_R2(%r11) # store return value
600.Lsysc_tracenogo: 543.Lsysc_tracenogo:
601 TSTMSK __TI_flags(%r12),_TIF_TRACE 544 TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -619,7 +562,7 @@ ENTRY(ret_from_fork)
619 lmg %r9,%r10,__PT_R9(%r11) # load gprs 562 lmg %r9,%r10,__PT_R9(%r11) # load gprs
620ENTRY(kernel_thread_starter) 563ENTRY(kernel_thread_starter)
621 la %r2,0(%r10) 564 la %r2,0(%r10)
622 BASR_R14_R9 565 BASR_EX %r14,%r9
623 j .Lsysc_tracenogo 566 j .Lsysc_tracenogo
624 567
625/* 568/*
@@ -701,7 +644,7 @@ ENTRY(pgm_check_handler)
701 je .Lpgm_return 644 je .Lpgm_return
702 lgf %r9,0(%r10,%r1) # load address of handler routine 645 lgf %r9,0(%r10,%r1) # load address of handler routine
703 lgr %r2,%r11 # pass pointer to pt_regs 646 lgr %r2,%r11 # pass pointer to pt_regs
704 BASR_R14_R9 # branch to interrupt-handler 647 BASR_EX %r14,%r9 # branch to interrupt-handler
705.Lpgm_return: 648.Lpgm_return:
706 LOCKDEP_SYS_EXIT 649 LOCKDEP_SYS_EXIT
707 tm __PT_PSW+1(%r11),0x01 # returning to user ? 650 tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -1019,7 +962,7 @@ ENTRY(psw_idle)
1019 stpt __TIMER_IDLE_ENTER(%r2) 962 stpt __TIMER_IDLE_ENTER(%r2)
1020.Lpsw_idle_lpsw: 963.Lpsw_idle_lpsw:
1021 lpswe __SF_EMPTY(%r15) 964 lpswe __SF_EMPTY(%r15)
1022 BR_R1USE_R14 965 BR_EX %r14
1023.Lpsw_idle_end: 966.Lpsw_idle_end:
1024 967
1025/* 968/*
@@ -1061,7 +1004,7 @@ ENTRY(save_fpu_regs)
1061.Lsave_fpu_regs_done: 1004.Lsave_fpu_regs_done:
1062 oi __LC_CPU_FLAGS+7,_CIF_FPU 1005 oi __LC_CPU_FLAGS+7,_CIF_FPU
1063.Lsave_fpu_regs_exit: 1006.Lsave_fpu_regs_exit:
1064 BR_R1USE_R14 1007 BR_EX %r14
1065.Lsave_fpu_regs_end: 1008.Lsave_fpu_regs_end:
1066EXPORT_SYMBOL(save_fpu_regs) 1009EXPORT_SYMBOL(save_fpu_regs)
1067 1010
@@ -1107,7 +1050,7 @@ load_fpu_regs:
1107.Lload_fpu_regs_done: 1050.Lload_fpu_regs_done:
1108 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1051 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1109.Lload_fpu_regs_exit: 1052.Lload_fpu_regs_exit:
1110 BR_R1USE_R14 1053 BR_EX %r14
1111.Lload_fpu_regs_end: 1054.Lload_fpu_regs_end:
1112 1055
1113.L__critical_end: 1056.L__critical_end:
@@ -1322,7 +1265,7 @@ cleanup_critical:
1322 jl 0f 1265 jl 0f
1323 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1266 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1324 jl .Lcleanup_load_fpu_regs 1267 jl .Lcleanup_load_fpu_regs
13250: BR_R11USE_R14 12680: BR_EX %r14
1326 1269
1327 .align 8 1270 .align 8
1328.Lcleanup_table: 1271.Lcleanup_table:
@@ -1358,7 +1301,7 @@ cleanup_critical:
1358 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1301 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1359 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1360 larl %r9,sie_exit # skip forward to sie_exit 1303 larl %r9,sie_exit # skip forward to sie_exit
1361 BR_R11USE_R14 1304 BR_EX %r14
1362#endif 1305#endif
1363 1306
1364.Lcleanup_system_call: 1307.Lcleanup_system_call:
@@ -1412,7 +1355,7 @@ cleanup_critical:
1412 stg %r15,56(%r11) # r15 stack pointer 1355 stg %r15,56(%r11) # r15 stack pointer
1413 # set new psw address and exit 1356 # set new psw address and exit
1414 larl %r9,.Lsysc_do_svc 1357 larl %r9,.Lsysc_do_svc
1415 BR_R11USE_R14 1358 BR_EX %r14,%r11
1416.Lcleanup_system_call_insn: 1359.Lcleanup_system_call_insn:
1417 .quad system_call 1360 .quad system_call
1418 .quad .Lsysc_stmg 1361 .quad .Lsysc_stmg
@@ -1424,7 +1367,7 @@ cleanup_critical:
1424 1367
1425.Lcleanup_sysc_tif: 1368.Lcleanup_sysc_tif:
1426 larl %r9,.Lsysc_tif 1369 larl %r9,.Lsysc_tif
1427 BR_R11USE_R14 1370 BR_EX %r14,%r11
1428 1371
1429.Lcleanup_sysc_restore: 1372.Lcleanup_sysc_restore:
1430 # check if stpt has been executed 1373 # check if stpt has been executed
@@ -1441,14 +1384,14 @@ cleanup_critical:
1441 mvc 0(64,%r11),__PT_R8(%r9) 1384 mvc 0(64,%r11),__PT_R8(%r9)
1442 lmg %r0,%r7,__PT_R0(%r9) 1385 lmg %r0,%r7,__PT_R0(%r9)
14431: lmg %r8,%r9,__LC_RETURN_PSW 13861: lmg %r8,%r9,__LC_RETURN_PSW
1444 BR_R11USE_R14 1387 BR_EX %r14,%r11
1445.Lcleanup_sysc_restore_insn: 1388.Lcleanup_sysc_restore_insn:
1446 .quad .Lsysc_exit_timer 1389 .quad .Lsysc_exit_timer
1447 .quad .Lsysc_done - 4 1390 .quad .Lsysc_done - 4
1448 1391
1449.Lcleanup_io_tif: 1392.Lcleanup_io_tif:
1450 larl %r9,.Lio_tif 1393 larl %r9,.Lio_tif
1451 BR_R11USE_R14 1394 BR_EX %r14,%r11
1452 1395
1453.Lcleanup_io_restore: 1396.Lcleanup_io_restore:
1454 # check if stpt has been executed 1397 # check if stpt has been executed
@@ -1462,7 +1405,7 @@ cleanup_critical:
1462 mvc 0(64,%r11),__PT_R8(%r9) 1405 mvc 0(64,%r11),__PT_R8(%r9)
1463 lmg %r0,%r7,__PT_R0(%r9) 1406 lmg %r0,%r7,__PT_R0(%r9)
14641: lmg %r8,%r9,__LC_RETURN_PSW 14071: lmg %r8,%r9,__LC_RETURN_PSW
1465 BR_R11USE_R14 1408 BR_EX %r14,%r11
1466.Lcleanup_io_restore_insn: 1409.Lcleanup_io_restore_insn:
1467 .quad .Lio_exit_timer 1410 .quad .Lio_exit_timer
1468 .quad .Lio_done - 4 1411 .quad .Lio_done - 4
@@ -1515,17 +1458,17 @@ cleanup_critical:
1515 # prepare return psw 1458 # prepare return psw
1516 nihh %r8,0xfcfd # clear irq & wait state bits 1459 nihh %r8,0xfcfd # clear irq & wait state bits
1517 lg %r9,48(%r11) # return from psw_idle 1460 lg %r9,48(%r11) # return from psw_idle
1518 BR_R11USE_R14 1461 BR_EX %r14,%r11
1519.Lcleanup_idle_insn: 1462.Lcleanup_idle_insn:
1520 .quad .Lpsw_idle_lpsw 1463 .quad .Lpsw_idle_lpsw
1521 1464
1522.Lcleanup_save_fpu_regs: 1465.Lcleanup_save_fpu_regs:
1523 larl %r9,save_fpu_regs 1466 larl %r9,save_fpu_regs
1524 BR_R11USE_R14 1467 BR_EX %r14,%r11
1525 1468
1526.Lcleanup_load_fpu_regs: 1469.Lcleanup_load_fpu_regs:
1527 larl %r9,load_fpu_regs 1470 larl %r9,load_fpu_regs
1528 BR_R11USE_R14 1471 BR_EX %r14,%r11
1529 1472
1530/* 1473/*
1531 * Integer constants 1474 * Integer constants
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 94f2099bceb0..3d17c41074ca 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
176 new -= STACK_FRAME_OVERHEAD; 176 new -= STACK_FRAME_OVERHEAD;
177 ((struct stack_frame *) new)->back_chain = old; 177 ((struct stack_frame *) new)->back_chain = old;
178 asm volatile(" la 15,0(%0)\n" 178 asm volatile(" la 15,0(%0)\n"
179 " basr 14,%2\n" 179 " brasl 14,__do_softirq\n"
180 " la 15,0(%1)\n" 180 " la 15,0(%1)\n"
181 : : "a" (new), "a" (old), 181 : : "a" (new), "a" (old)
182 "a" (__do_softirq)
183 : "0", "1", "2", "3", "4", "5", "14", 182 : "0", "1", "2", "3", "4", "5", "14",
184 "cc", "memory" ); 183 "cc", "memory" );
185 } else { 184 } else {
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 82df7d80fab2..27110f3294ed 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,13 +9,17 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/ftrace.h> 11#include <asm/ftrace.h>
12#include <asm/nospec-insn.h>
12#include <asm/ptrace.h> 13#include <asm/ptrace.h>
13#include <asm/export.h> 14#include <asm/export.h>
14 15
16 GEN_BR_THUNK %r1
17 GEN_BR_THUNK %r14
18
15 .section .kprobes.text, "ax" 19 .section .kprobes.text, "ax"
16 20
17ENTRY(ftrace_stub) 21ENTRY(ftrace_stub)
18 br %r14 22 BR_EX %r14
19 23
20#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 24#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
21#define STACK_PTREGS (STACK_FRAME_OVERHEAD) 25#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
23#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 27#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
24 28
25ENTRY(_mcount) 29ENTRY(_mcount)
26 br %r14 30 BR_EX %r14
27 31
28EXPORT_SYMBOL(_mcount) 32EXPORT_SYMBOL(_mcount)
29 33
@@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
53#endif 57#endif
54 lgr %r3,%r14 58 lgr %r3,%r14
55 la %r5,STACK_PTREGS(%r15) 59 la %r5,STACK_PTREGS(%r15)
56 basr %r14,%r1 60 BASR_EX %r14,%r1
57#ifdef CONFIG_FUNCTION_GRAPH_TRACER 61#ifdef CONFIG_FUNCTION_GRAPH_TRACER
58# The j instruction gets runtime patched to a nop instruction. 62# The j instruction gets runtime patched to a nop instruction.
59# See ftrace_enable_ftrace_graph_caller. 63# See ftrace_enable_ftrace_graph_caller.
@@ -68,7 +72,7 @@ ftrace_graph_caller_end:
68#endif 72#endif
69 lg %r1,(STACK_PTREGS_PSW+8)(%r15) 73 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
70 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) 74 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
71 br %r1 75 BR_EX %r1
72 76
73#ifdef CONFIG_FUNCTION_GRAPH_TRACER 77#ifdef CONFIG_FUNCTION_GRAPH_TRACER
74 78
@@ -81,6 +85,6 @@ ENTRY(return_to_handler)
81 aghi %r15,STACK_FRAME_OVERHEAD 85 aghi %r15,STACK_FRAME_OVERHEAD
82 lgr %r14,%r2 86 lgr %r14,%r2
83 lmg %r2,%r5,32(%r15) 87 lmg %r2,%r5,32(%r15)
84 br %r14 88 BR_EX %r14
85 89
86#endif 90#endif
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 46d49a11663f..8ad6a7128b3a 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,7 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/cpu.h>
5#include <asm/nospec-branch.h> 4#include <asm/nospec-branch.h>
6 5
7static int __init nobp_setup_early(char *str) 6static int __init nobp_setup_early(char *str)
@@ -44,24 +43,6 @@ static int __init nospec_report(void)
44} 43}
45arch_initcall(nospec_report); 44arch_initcall(nospec_report);
46 45
47#ifdef CONFIG_SYSFS
48ssize_t cpu_show_spectre_v1(struct device *dev,
49 struct device_attribute *attr, char *buf)
50{
51 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
52}
53
54ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
58 return sprintf(buf, "Mitigation: execute trampolines\n");
59 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
60 return sprintf(buf, "Mitigation: limited branch prediction.\n");
61 return sprintf(buf, "Vulnerable\n");
62}
63#endif
64
65#ifdef CONFIG_EXPOLINE 46#ifdef CONFIG_EXPOLINE
66 47
67int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); 48int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
@@ -112,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
112 s32 *epo; 93 s32 *epo;
113 94
114 /* Second part of the instruction replace is always a nop */ 95 /* Second part of the instruction replace is always a nop */
115 memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
116 for (epo = start; epo < end; epo++) { 96 for (epo = start; epo < end; epo++) {
117 instr = (u8 *) epo + *epo; 97 instr = (u8 *) epo + *epo;
118 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) 98 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
@@ -133,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
133 br = thunk + (*(int *)(thunk + 2)) * 2; 113 br = thunk + (*(int *)(thunk + 2)) * 2;
134 else 114 else
135 continue; 115 continue;
136 if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) 116 /* Check for unconditional branch 0x07f? or 0x47f???? */
117 if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
137 continue; 118 continue;
119
120 memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
138 switch (type) { 121 switch (type) {
139 case BRCL_EXPOLINE: 122 case BRCL_EXPOLINE:
140 /* brcl to thunk, replace with br + nop */
141 insnbuf[0] = br[0]; 123 insnbuf[0] = br[0];
142 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 124 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
125 if (br[0] == 0x47) {
126 /* brcl to b, replace with bc + nopr */
127 insnbuf[2] = br[2];
128 insnbuf[3] = br[3];
129 } else {
130 /* brcl to br, replace with bcr + nop */
131 }
143 break; 132 break;
144 case BRASL_EXPOLINE: 133 case BRASL_EXPOLINE:
145 /* brasl to thunk, replace with basr + nop */
146 insnbuf[0] = 0x0d;
147 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 134 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
135 if (br[0] == 0x47) {
136 /* brasl to b, replace with bas + nopr */
137 insnbuf[0] = 0x4d;
138 insnbuf[2] = br[2];
139 insnbuf[3] = br[3];
140 } else {
141 /* brasl to br, replace with basr + nop */
142 insnbuf[0] = 0x0d;
143 }
148 break; 144 break;
149 } 145 }
150 146
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644
index 000000000000..8affad5f18cb
--- /dev/null
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -0,0 +1,21 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/device.h>
3#include <linux/cpu.h>
4#include <asm/facility.h>
5#include <asm/nospec-branch.h>
6
7ssize_t cpu_show_spectre_v1(struct device *dev,
8 struct device_attribute *attr, char *buf)
9{
10 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
11}
12
13ssize_t cpu_show_spectre_v2(struct device *dev,
14 struct device_attribute *attr, char *buf)
15{
16 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
17 return sprintf(buf, "Mitigation: execute trampolines\n");
18 if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
19 return sprintf(buf, "Mitigation: limited branch prediction\n");
20 return sprintf(buf, "Vulnerable\n");
21}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 1c9ddd7aa5ec..0292d68e7dde 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event)
753 */ 753 */
754 rate = 0; 754 rate = 0;
755 if (attr->freq) { 755 if (attr->freq) {
756 if (!attr->sample_freq) {
757 err = -EINVAL;
758 goto out;
759 }
756 rate = freq_to_sample_rate(&si, attr->sample_freq); 760 rate = freq_to_sample_rate(&si, attr->sample_freq);
757 rate = hw_limit_rate(&si, rate); 761 rate = hw_limit_rate(&si, rate);
758 attr->freq = 0; 762 attr->freq = 0;
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 73cc3750f0d3..7f14adf512c6 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -7,8 +7,11 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/nospec-insn.h>
10#include <asm/sigp.h> 11#include <asm/sigp.h>
11 12
13 GEN_BR_THUNK %r9
14
12# 15#
13# Issue "store status" for the current CPU to its prefix page 16# Issue "store status" for the current CPU to its prefix page
14# and call passed function afterwards 17# and call passed function afterwards
@@ -67,9 +70,9 @@ ENTRY(store_status)
67 st %r4,0(%r1) 70 st %r4,0(%r1)
68 st %r5,4(%r1) 71 st %r5,4(%r1)
69 stg %r2,8(%r1) 72 stg %r2,8(%r1)
70 lgr %r1,%r2 73 lgr %r9,%r2
71 lgr %r2,%r3 74 lgr %r2,%r3
72 br %r1 75 BR_EX %r9
73 76
74 .section .bss 77 .section .bss
75 .align 8 78 .align 8
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index e99187149f17..a049a7b9d6e8 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -13,6 +13,7 @@
13#include <asm/ptrace.h> 13#include <asm/ptrace.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/nospec-insn.h>
16#include <asm/sigp.h> 17#include <asm/sigp.h>
17 18
18/* 19/*
@@ -24,6 +25,8 @@
24 * (see below) in the resume process. 25 * (see below) in the resume process.
25 * This function runs with disabled interrupts. 26 * This function runs with disabled interrupts.
26 */ 27 */
28 GEN_BR_THUNK %r14
29
27 .section .text 30 .section .text
28ENTRY(swsusp_arch_suspend) 31ENTRY(swsusp_arch_suspend)
29 stmg %r6,%r15,__SF_GPRS(%r15) 32 stmg %r6,%r15,__SF_GPRS(%r15)
@@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
103 spx 0x318(%r1) 106 spx 0x318(%r1)
104 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 107 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
105 lghi %r2,0 108 lghi %r2,0
106 br %r14 109 BR_EX %r14
107 110
108/* 111/*
109 * Restore saved memory image to correct place and restore register context. 112 * Restore saved memory image to correct place and restore register context.
@@ -197,11 +200,10 @@ pgm_check_entry:
197 larl %r15,init_thread_union 200 larl %r15,init_thread_union
198 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 201 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
199 larl %r2,.Lpanic_string 202 larl %r2,.Lpanic_string
200 larl %r3,sclp_early_printk
201 lghi %r1,0 203 lghi %r1,0
202 sam31 204 sam31
203 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 205 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
204 basr %r14,%r3 206 brasl %r14,sclp_early_printk
205 larl %r3,.Ldisabled_wait_31 207 larl %r3,.Ldisabled_wait_31
206 lpsw 0(%r3) 208 lpsw 0(%r3)
2074: 2094:
@@ -267,7 +269,7 @@ restore_registers:
267 /* Return 0 */ 269 /* Return 0 */
268 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 270 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
269 lghi %r2,0 271 lghi %r2,0
270 br %r14 272 BR_EX %r14
271 273
272 .section .data..nosave,"aw",@progbits 274 .section .data..nosave,"aw",@progbits
273 .align 8 275 .align 8
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 495c9c4bacc7..2311f15be9cf 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -7,6 +7,9 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/export.h> 9#include <asm/export.h>
10#include <asm/nospec-insn.h>
11
12 GEN_BR_THUNK %r14
10 13
11/* 14/*
12 * void *memmove(void *dest, const void *src, size_t n) 15 * void *memmove(void *dest, const void *src, size_t n)
@@ -33,14 +36,14 @@ ENTRY(memmove)
33.Lmemmove_forward_remainder: 36.Lmemmove_forward_remainder:
34 larl %r5,.Lmemmove_mvc 37 larl %r5,.Lmemmove_mvc
35 ex %r4,0(%r5) 38 ex %r4,0(%r5)
36 br %r14 39 BR_EX %r14
37.Lmemmove_reverse: 40.Lmemmove_reverse:
38 ic %r0,0(%r4,%r3) 41 ic %r0,0(%r4,%r3)
39 stc %r0,0(%r4,%r1) 42 stc %r0,0(%r4,%r1)
40 brctg %r4,.Lmemmove_reverse 43 brctg %r4,.Lmemmove_reverse
41 ic %r0,0(%r4,%r3) 44 ic %r0,0(%r4,%r3)
42 stc %r0,0(%r4,%r1) 45 stc %r0,0(%r4,%r1)
43 br %r14 46 BR_EX %r14
44.Lmemmove_mvc: 47.Lmemmove_mvc:
45 mvc 0(1,%r1),0(%r3) 48 mvc 0(1,%r1),0(%r3)
46EXPORT_SYMBOL(memmove) 49EXPORT_SYMBOL(memmove)
@@ -77,7 +80,7 @@ ENTRY(memset)
77.Lmemset_clear_remainder: 80.Lmemset_clear_remainder:
78 larl %r3,.Lmemset_xc 81 larl %r3,.Lmemset_xc
79 ex %r4,0(%r3) 82 ex %r4,0(%r3)
80 br %r14 83 BR_EX %r14
81.Lmemset_fill: 84.Lmemset_fill:
82 cghi %r4,1 85 cghi %r4,1
83 lgr %r1,%r2 86 lgr %r1,%r2
@@ -95,10 +98,10 @@ ENTRY(memset)
95 stc %r3,0(%r1) 98 stc %r3,0(%r1)
96 larl %r5,.Lmemset_mvc 99 larl %r5,.Lmemset_mvc
97 ex %r4,0(%r5) 100 ex %r4,0(%r5)
98 br %r14 101 BR_EX %r14
99.Lmemset_fill_exit: 102.Lmemset_fill_exit:
100 stc %r3,0(%r1) 103 stc %r3,0(%r1)
101 br %r14 104 BR_EX %r14
102.Lmemset_xc: 105.Lmemset_xc:
103 xc 0(1,%r1),0(%r1) 106 xc 0(1,%r1),0(%r1)
104.Lmemset_mvc: 107.Lmemset_mvc:
@@ -121,7 +124,7 @@ ENTRY(memcpy)
121.Lmemcpy_remainder: 124.Lmemcpy_remainder:
122 larl %r5,.Lmemcpy_mvc 125 larl %r5,.Lmemcpy_mvc
123 ex %r4,0(%r5) 126 ex %r4,0(%r5)
124 br %r14 127 BR_EX %r14
125.Lmemcpy_loop: 128.Lmemcpy_loop:
126 mvc 0(256,%r1),0(%r3) 129 mvc 0(256,%r1),0(%r3)
127 la %r1,256(%r1) 130 la %r1,256(%r1)
@@ -159,10 +162,10 @@ ENTRY(__memset\bits)
159 \insn %r3,0(%r1) 162 \insn %r3,0(%r1)
160 larl %r5,.L__memset_mvc\bits 163 larl %r5,.L__memset_mvc\bits
161 ex %r4,0(%r5) 164 ex %r4,0(%r5)
162 br %r14 165 BR_EX %r14
163.L__memset_exit\bits: 166.L__memset_exit\bits:
164 \insn %r3,0(%r2) 167 \insn %r3,0(%r2)
165 br %r14 168 BR_EX %r14
166.L__memset_mvc\bits: 169.L__memset_mvc\bits:
167 mvc \bytes(1,%r1),0(%r1) 170 mvc \bytes(1,%r1),0(%r1)
168.endm 171.endm
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index 25bb4643c4f4..9f794869c1b0 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/nospec-insn.h>
12#include "bpf_jit.h" 13#include "bpf_jit.h"
13 14
14/* 15/*
@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \
54 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ 55 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
55 jh sk_load_##NAME##_slow; \ 56 jh sk_load_##NAME##_slow; \
56 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ 57 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
57 b OFF_OK(%r6); /* Return */ \ 58 B_EX OFF_OK,%r6; /* Return */ \
58 \ 59 \
59sk_load_##NAME##_slow:; \ 60sk_load_##NAME##_slow:; \
60 lgr %r2,%r7; /* Arg1 = skb pointer */ \ 61 lgr %r2,%r7; /* Arg1 = skb pointer */ \
@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \
64 brasl %r14,skb_copy_bits; /* Get data from skb */ \ 65 brasl %r14,skb_copy_bits; /* Get data from skb */ \
65 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ 66 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
66 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ 67 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
67 br %r6; /* Return */ 68 BR_EX %r6; /* Return */
68 69
69sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ 70sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
70sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ 71sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
71 72
73 GEN_BR_THUNK %r6
74 GEN_B_THUNK OFF_OK,%r6
75
72/* 76/*
73 * Load 1 byte from SKB (optimized version) 77 * Load 1 byte from SKB (optimized version)
74 */ 78 */
@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
80 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? 84 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
81 jnl sk_load_byte_slow 85 jnl sk_load_byte_slow
82 llgc %r14,0(%r3,%r12) # Get byte from skb 86 llgc %r14,0(%r3,%r12) # Get byte from skb
83 b OFF_OK(%r6) # Return OK 87 B_EX OFF_OK,%r6 # Return OK
84 88
85sk_load_byte_slow: 89sk_load_byte_slow:
86 lgr %r2,%r7 # Arg1 = skb pointer 90 lgr %r2,%r7 # Arg1 = skb pointer
@@ -90,7 +94,7 @@ sk_load_byte_slow:
90 brasl %r14,skb_copy_bits # Get data from skb 94 brasl %r14,skb_copy_bits # Get data from skb
91 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer 95 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
92 ltgr %r2,%r2 # Set cc to (%r2 != 0) 96 ltgr %r2,%r2 # Set cc to (%r2 != 0)
93 br %r6 # Return cc 97 BR_EX %r6 # Return cc
94 98
95#define sk_negative_common(NAME, SIZE, LOAD) \ 99#define sk_negative_common(NAME, SIZE, LOAD) \
96sk_load_##NAME##_slow_neg:; \ 100sk_load_##NAME##_slow_neg:; \
@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \
104 jz bpf_error; \ 108 jz bpf_error; \
105 LOAD %r14,0(%r2); /* Get data from pointer */ \ 109 LOAD %r14,0(%r2); /* Get data from pointer */ \
106 xr %r3,%r3; /* Set cc to zero */ \ 110 xr %r3,%r3; /* Set cc to zero */ \
107 br %r6; /* Return cc */ 111 BR_EX %r6; /* Return cc */
108 112
109sk_negative_common(word, 4, llgf) 113sk_negative_common(word, 4, llgf)
110sk_negative_common(half, 2, llgh) 114sk_negative_common(half, 2, llgh)
@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
113bpf_error: 117bpf_error:
114# force a return 0 from jit handler 118# force a return 0 from jit handler
115 ltgr %r15,%r15 # Set condition code 119 ltgr %r15,%r15 # Set condition code
116 br %r6 120 BR_EX %r6
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 78a19c93b380..dd2bcf0e7d00 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -25,6 +25,8 @@
25#include <linux/bpf.h> 25#include <linux/bpf.h>
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/dis.h> 27#include <asm/dis.h>
28#include <asm/facility.h>
29#include <asm/nospec-branch.h>
28#include <asm/set_memory.h> 30#include <asm/set_memory.h>
29#include "bpf_jit.h" 31#include "bpf_jit.h"
30 32
@@ -41,6 +43,8 @@ struct bpf_jit {
41 int base_ip; /* Base address for literal pool */ 43 int base_ip; /* Base address for literal pool */
42 int ret0_ip; /* Address of return 0 */ 44 int ret0_ip; /* Address of return 0 */
43 int exit_ip; /* Address of exit */ 45 int exit_ip; /* Address of exit */
46 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
47 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
44 int tail_call_start; /* Tail call start offset */ 48 int tail_call_start; /* Tail call start offset */
45 int labels[1]; /* Labels for local jumps */ 49 int labels[1]; /* Labels for local jumps */
46}; 50};
@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
250 REG_SET_SEEN(b2); \ 254 REG_SET_SEEN(b2); \
251}) 255})
252 256
257#define EMIT6_PCREL_RILB(op, b, target) \
258({ \
259 int rel = (target - jit->prg) / 2; \
260 _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
261 REG_SET_SEEN(b); \
262})
263
264#define EMIT6_PCREL_RIL(op, target) \
265({ \
266 int rel = (target - jit->prg) / 2; \
267 _EMIT6(op | rel >> 16, rel & 0xffff); \
268})
269
253#define _EMIT6_IMM(op, imm) \ 270#define _EMIT6_IMM(op, imm) \
254({ \ 271({ \
255 unsigned int __imm = (imm); \ 272 unsigned int __imm = (imm); \
@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
469 EMIT4(0xb9040000, REG_2, BPF_REG_0); 486 EMIT4(0xb9040000, REG_2, BPF_REG_0);
470 /* Restore registers */ 487 /* Restore registers */
471 save_restore_regs(jit, REGS_RESTORE, stack_depth); 488 save_restore_regs(jit, REGS_RESTORE, stack_depth);
489 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
490 jit->r14_thunk_ip = jit->prg;
491 /* Generate __s390_indirect_jump_r14 thunk */
492 if (test_facility(35)) {
493 /* exrl %r0,.+10 */
494 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
495 } else {
496 /* larl %r1,.+14 */
497 EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
498 /* ex 0,0(%r1) */
499 EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
500 }
501 /* j . */
502 EMIT4_PCREL(0xa7f40000, 0);
503 }
472 /* br %r14 */ 504 /* br %r14 */
473 _EMIT2(0x07fe); 505 _EMIT2(0x07fe);
506
507 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
508 (jit->seen & SEEN_FUNC)) {
509 jit->r1_thunk_ip = jit->prg;
510 /* Generate __s390_indirect_jump_r1 thunk */
511 if (test_facility(35)) {
512 /* exrl %r0,.+10 */
513 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
514 /* j . */
515 EMIT4_PCREL(0xa7f40000, 0);
516 /* br %r1 */
517 _EMIT2(0x07f1);
518 } else {
519 /* larl %r1,.+14 */
520 EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
521 /* ex 0,S390_lowcore.br_r1_tampoline */
522 EMIT4_DISP(0x44000000, REG_0, REG_0,
523 offsetof(struct lowcore, br_r1_trampoline));
524 /* j . */
525 EMIT4_PCREL(0xa7f40000, 0);
526 }
527 }
474} 528}
475 529
476/* 530/*
@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
966 /* lg %w1,<d(imm)>(%l) */ 1020 /* lg %w1,<d(imm)>(%l) */
967 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, 1021 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
968 EMIT_CONST_U64(func)); 1022 EMIT_CONST_U64(func));
969 /* basr %r14,%w1 */ 1023 if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
970 EMIT2(0x0d00, REG_14, REG_W1); 1024 /* brasl %r14,__s390_indirect_jump_r1 */
1025 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1026 } else {
1027 /* basr %r14,%w1 */
1028 EMIT2(0x0d00, REG_14, REG_W1);
1029 }
971 /* lgr %b0,%r2: load return value into %b0 */ 1030 /* lgr %b0,%r2: load return value into %b0 */
972 EMIT4(0xb9040000, BPF_REG_0, REG_2); 1031 EMIT4(0xb9040000, BPF_REG_0, REG_2);
973 if ((jit->seen & SEEN_SKB) && 1032 if ((jit->seen & SEEN_SKB) &&
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 4c851ebb3ceb..0ede697c3961 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -29,7 +29,7 @@
29#define KVM_FEATURE_PV_TLB_FLUSH 9 29#define KVM_FEATURE_PV_TLB_FLUSH 9
30#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 30#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
31 31
32#define KVM_HINTS_DEDICATED 0 32#define KVM_HINTS_REALTIME 0
33 33
34/* The last 8 bits are used to indicate how to interpret the flags field 34/* The last 8 bits are used to indicate how to interpret the flags field
35 * in pvclock structure. If no bits are set, all flags are ignored. 35 * in pvclock structure. If no bits are set, all flags are ignored.
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index c88e0b127810..b481b95bd8f6 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -14,8 +14,11 @@
14#include <asm/amd_nb.h> 14#include <asm/amd_nb.h>
15 15
16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 16#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
17#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
17#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 18#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
18#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 19#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
20#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
21#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
19 22
20/* Protect the PCI config register pairs used for SMN and DF indirect access. */ 23/* Protect the PCI config register pairs used for SMN and DF indirect access. */
21static DEFINE_MUTEX(smn_mutex); 24static DEFINE_MUTEX(smn_mutex);
@@ -24,6 +27,7 @@ static u32 *flush_words;
24 27
25static const struct pci_device_id amd_root_ids[] = { 28static const struct pci_device_id amd_root_ids[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
27 {} 31 {}
28}; 32};
29 33
@@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 43 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 44 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 45 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
46 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
43 {} 48 {}
44}; 49};
@@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
55 {} 61 {}
56}; 62};
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 7867417cfaff..5b2300b818af 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void)
457static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) 457static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
458{ 458{
459 native_smp_prepare_cpus(max_cpus); 459 native_smp_prepare_cpus(max_cpus);
460 if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) 460 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
461 static_branch_disable(&virt_spin_lock_key); 461 static_branch_disable(&virt_spin_lock_key);
462} 462}
463 463
@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void)
553 } 553 }
554 554
555 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 555 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
556 !kvm_para_has_hint(KVM_HINTS_DEDICATED) && 556 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
557 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) 557 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
558 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; 558 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
559 559
@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
649 int cpu; 649 int cpu;
650 650
651 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 651 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
652 !kvm_para_has_hint(KVM_HINTS_DEDICATED) && 652 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
653 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 653 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
654 for_each_possible_cpu(cpu) { 654 for_each_possible_cpu(cpu) {
655 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), 655 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void)
745 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 745 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
746 return; 746 return;
747 747
748 if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) 748 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
749 return; 749 return;
750 750
751 __pv_init_lock_hash(); 751 __pv_init_lock_hash();
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 98618e397342..5708e951a5c6 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1265 struct kvm_run *run = vcpu->run; 1265 struct kvm_run *run = vcpu->run;
1266 1266
1267 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); 1267 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1268 return 1; 1268 return kvm_skip_emulated_instruction(vcpu);
1269} 1269}
1270 1270
1271static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) 1271static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
@@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1296 if (param & ~KVM_HYPERV_CONN_ID_MASK) 1296 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1297 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1297 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1298 1298
1299 /* conn_to_evt is protected by vcpu->kvm->srcu */ 1299 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1300 rcu_read_lock();
1300 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); 1301 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1302 rcu_read_unlock();
1301 if (!eventfd) 1303 if (!eventfd)
1302 return HV_STATUS_INVALID_PORT_ID; 1304 return HV_STATUS_INVALID_PORT_ID;
1303 1305
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c7668806163f..3f1696570b41 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
1494 SECONDARY_EXEC_ENABLE_VMFUNC; 1494 SECONDARY_EXEC_ENABLE_VMFUNC;
1495} 1495}
1496 1496
1497static bool vmx_umip_emulated(void)
1498{
1499 return vmcs_config.cpu_based_2nd_exec_ctrl &
1500 SECONDARY_EXEC_DESC;
1501}
1502
1497static inline bool report_flexpriority(void) 1503static inline bool report_flexpriority(void)
1498{ 1504{
1499 return flexpriority_enabled; 1505 return flexpriority_enabled;
@@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
4761 else 4767 else
4762 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 4768 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
4763 4769
4764 if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { 4770 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
4765 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 4771 if (cr4 & X86_CR4_UMIP) {
4766 SECONDARY_EXEC_DESC); 4772 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4767 hw_cr4 &= ~X86_CR4_UMIP;
4768 } else if (!is_guest_mode(vcpu) ||
4769 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
4770 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
4771 SECONDARY_EXEC_DESC); 4773 SECONDARY_EXEC_DESC);
4774 hw_cr4 &= ~X86_CR4_UMIP;
4775 } else if (!is_guest_mode(vcpu) ||
4776 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
4777 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
4778 SECONDARY_EXEC_DESC);
4779 }
4772 4780
4773 if (cr4 & X86_CR4_VMXE) { 4781 if (cr4 & X86_CR4_VMXE) {
4774 /* 4782 /*
@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void)
9497 SECONDARY_EXEC_XSAVES; 9505 SECONDARY_EXEC_XSAVES;
9498} 9506}
9499 9507
9500static bool vmx_umip_emulated(void)
9501{
9502 return vmcs_config.cpu_based_2nd_exec_ctrl &
9503 SECONDARY_EXEC_DESC;
9504}
9505
9506static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 9508static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
9507{ 9509{
9508 u32 exit_intr_info; 9510 u32 exit_intr_info;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51ecd381793b..59371de5d722 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
114static bool __read_mostly report_ignored_msrs = true; 114static bool __read_mostly report_ignored_msrs = true;
115module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 115module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
116 116
117unsigned int min_timer_period_us = 500; 117unsigned int min_timer_period_us = 200;
118module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 118module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
119 119
120static bool __read_mostly kvmclock_periodic_sync = true; 120static bool __read_mostly kvmclock_periodic_sync = true;
@@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
843int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 843int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
844{ 844{
845#ifdef CONFIG_X86_64 845#ifdef CONFIG_X86_64
846 cr3 &= ~CR3_PCID_INVD; 846 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
847
848 if (pcid_enabled)
849 cr3 &= ~CR3_PCID_INVD;
847#endif 850#endif
848 851
849 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { 852 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
@@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
6671int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 6674int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6672{ 6675{
6673 unsigned long nr, a0, a1, a2, a3, ret; 6676 unsigned long nr, a0, a1, a2, a3, ret;
6674 int op_64_bit, r; 6677 int op_64_bit;
6675
6676 r = kvm_skip_emulated_instruction(vcpu);
6677 6678
6678 if (kvm_hv_hypercall_enabled(vcpu->kvm)) 6679 if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
6679 return kvm_hv_hypercall(vcpu); 6680 if (!kvm_hv_hypercall(vcpu))
6681 return 0;
6682 goto out;
6683 }
6680 6684
6681 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 6685 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
6682 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 6686 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6697 6701
6698 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 6702 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
6699 ret = -KVM_EPERM; 6703 ret = -KVM_EPERM;
6700 goto out; 6704 goto out_error;
6701 } 6705 }
6702 6706
6703 switch (nr) { 6707 switch (nr) {
@@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6717 ret = -KVM_ENOSYS; 6721 ret = -KVM_ENOSYS;
6718 break; 6722 break;
6719 } 6723 }
6720out: 6724out_error:
6721 if (!op_64_bit) 6725 if (!op_64_bit)
6722 ret = (u32)ret; 6726 ret = (u32)ret;
6723 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6727 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6728
6729out:
6724 ++vcpu->stat.hypercalls; 6730 ++vcpu->stat.hypercalls;
6725 return r; 6731 return kvm_skip_emulated_instruction(vcpu);
6726} 6732}
6727EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 6733EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
6728 6734
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d33e7dbe3129..2d76106788a3 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
42} 42}
43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
44 44
45static void xen_flush_tlb_all(void) 45static noinline void xen_flush_tlb_all(void)
46{ 46{
47 struct mmuext_op *op; 47 struct mmuext_op *op;
48 struct multicall_space mcs; 48 struct multicall_space mcs;
49 49
50 trace_xen_mmu_flush_tlb_all(0);
51
52 preempt_disable(); 50 preempt_disable();
53 51
54 mcs = xen_mc_entry(sizeof(*op)); 52 mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 486c0a34d00b..2c30cabfda90 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1310,13 +1310,11 @@ unsigned long xen_read_cr2_direct(void)
1310 return this_cpu_read(xen_vcpu_info.arch.cr2); 1310 return this_cpu_read(xen_vcpu_info.arch.cr2);
1311} 1311}
1312 1312
1313static void xen_flush_tlb(void) 1313static noinline void xen_flush_tlb(void)
1314{ 1314{
1315 struct mmuext_op *op; 1315 struct mmuext_op *op;
1316 struct multicall_space mcs; 1316 struct multicall_space mcs;
1317 1317
1318 trace_xen_mmu_flush_tlb(0);
1319
1320 preempt_disable(); 1318 preempt_disable();
1321 1319
1322 mcs = xen_mc_entry(sizeof(*op)); 1320 mcs = xen_mc_entry(sizeof(*op));
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index de55c7d57438..96b35b8b3606 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
20 20
21config ARM_ARMADA_37XX_CPUFREQ 21config ARM_ARMADA_37XX_CPUFREQ
22 tristate "Armada 37xx CPUFreq support" 22 tristate "Armada 37xx CPUFreq support"
23 depends on ARCH_MVEBU 23 depends on ARCH_MVEBU && CPUFREQ_DT
24 help 24 help
25 This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. 25 This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
26 The Armada 37xx PMU supports 4 frequency and VDD levels. 26 The Armada 37xx PMU supports 4 frequency and VDD levels.
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..c2c21d839727 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
716 if (!minor) 716 if (!minor)
717 return; 717 return;
718 718
719 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); 719 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
720 if (!name) 720 if (!name)
721 return; 721 return;
722 722
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 39ac15ce4702..9e2ae02f31e0 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
65 return -EINVAL; 65 return -EINVAL;
66 66
67 /* overflow checks for 32bit size calculations */ 67 /* overflow checks for 32bit size calculations */
68 /* NOTE: DIV_ROUND_UP() can overflow */ 68 if (args->bpp > U32_MAX - 8)
69 return -EINVAL;
69 cpp = DIV_ROUND_UP(args->bpp, 8); 70 cpp = DIV_ROUND_UP(args->bpp, 8);
70 if (!cpp || cpp > 0xffffffffU / args->width) 71 if (cpp > U32_MAX / args->width)
71 return -EINVAL; 72 return -EINVAL;
72 stride = cpp * args->width; 73 stride = cpp * args->width;
73 if (args->height > 0xffffffffU / stride) 74 if (args->height > U32_MAX / stride)
74 return -EINVAL; 75 return -EINVAL;
75 76
76 /* test for wrap-around */ 77 /* test for wrap-around */
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e394799979a6..6d9b9453707c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 filp->private_data = priv; 214 filp->private_data = priv;
215 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
215 priv->filp = filp; 216 priv->filp = filp;
216 priv->pid = get_pid(task_pid(current)); 217 priv->pid = get_pid(task_pid(current));
217 priv->minor = minor; 218 priv->minor = minor;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d596a8302ca3..854bd51b9478 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
778 I915_USERPTR_UNSYNCHRONIZED)) 778 I915_USERPTR_UNSYNCHRONIZED))
779 return -EINVAL; 779 return -EINVAL;
780 780
781 if (!args->user_size)
782 return -EINVAL;
783
781 if (offset_in_page(args->user_ptr | args->user_size)) 784 if (offset_in_page(args->user_ptr | args->user_size))
782 return -EINVAL; 785 return -EINVAL;
783 786
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e6a8c0ee7df1..8a69a9275e28 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7326,6 +7326,9 @@ enum {
7326#define SLICE_ECO_CHICKEN0 _MMIO(0x7308) 7326#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
7327#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) 7327#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
7328 7328
7329#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
7330#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
7331
7329/* WaCatErrorRejectionIssue */ 7332/* WaCatErrorRejectionIssue */
7330#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) 7333#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
7331#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 7334#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4ba139c27fba..f7c25828d3bb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1149 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, 1149 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1150 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); 1150 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1151 1151
1152 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
1153 if (IS_GEN9_LP(dev_priv))
1154 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
1155
1152 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ 1156 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1153 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 1157 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1154 if (ret) 1158 if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e3a5f673ff67..8704f7f8d072 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -884,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data)
884 884
885 head = execlists->csb_head; 885 head = execlists->csb_head;
886 tail = READ_ONCE(buf[write_idx]); 886 tail = READ_ONCE(buf[write_idx]);
887 rmb(); /* Hopefully paired with a wmb() in HW */
887 } 888 }
888 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", 889 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
889 engine->name, 890 engine->name,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..7c95ed5c5cac 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
130 struct vc4_file *vc4file = file->driver_priv; 130 struct vc4_file *vc4file = file->driver_priv;
131 131
132 vc4_perfmon_close_file(vc4file); 132 vc4_perfmon_close_file(vc4file);
133 kfree(vc4file);
133} 134}
134 135
135static const struct vm_operations_struct vc4_vm_ops = { 136static const struct vm_operations_struct vc4_vm_ops = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 648f8127f65a..3d667e903beb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
482 return ret; 482 return ret;
483 } 483 }
484 484
485 vps->dmabuf_size = size;
486
485 /* 487 /*
486 * TTM already thinks the buffer is pinned, but make sure the 488 * TTM already thinks the buffer is pinned, but make sure the
487 * pin_count is upped. 489 * pin_count is upped.
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f249a4428458..6ec307c93ece 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
272 272
273config SENSORS_K10TEMP 273config SENSORS_K10TEMP
274 tristate "AMD Family 10h+ temperature sensor" 274 tristate "AMD Family 10h+ temperature sensor"
275 depends on X86 && PCI 275 depends on X86 && PCI && AMD_NB
276 help 276 help
277 If you say yes here you get support for the temperature 277 If you say yes here you get support for the temperature
278 sensor(s) inside your CPU. Supported are later revisions of 278 sensor(s) inside your CPU. Supported are later revisions of
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d2cc55e21374..3b73dee6fdc6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/amd_nb.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
27 28
28MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); 29MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 41#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 42#endif
42 43
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB 44#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 45#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
45#endif 46#endif
46 47
47/* CPUID function 0x80000001, ebx */ 48/* CPUID function 0x80000001, ebx */
@@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
63#define NB_CAP_HTC 0x00000400 64#define NB_CAP_HTC 0x00000400
64 65
65/* 66/*
66 * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE 67 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
67 * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature 68 * and REG_REPORTED_TEMPERATURE have been moved to
68 * Control] 69 * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
70 * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
69 */ 71 */
72#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
70#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 73#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
71 74
72/* F17h M01h Access througn SMN */ 75/* F17h M01h Access througn SMN */
@@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
74 77
75struct k10temp_data { 78struct k10temp_data {
76 struct pci_dev *pdev; 79 struct pci_dev *pdev;
80 void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 81 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
78 int temp_offset; 82 int temp_offset;
79 u32 temp_adjust_mask; 83 u32 temp_adjust_mask;
@@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
98 { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, 102 { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
99}; 103};
100 104
105static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
106{
107 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
108}
109
101static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) 110static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
102{ 111{
103 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); 112 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
@@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
114 mutex_unlock(&nb_smu_ind_mutex); 123 mutex_unlock(&nb_smu_ind_mutex);
115} 124}
116 125
126static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
127{
128 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
129 F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
130}
131
117static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) 132static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
118{ 133{
119 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, 134 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
@@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
122 137
123static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) 138static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
124{ 139{
125 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60, 140 amd_smn_read(amd_pci_dev_to_node_id(pdev),
126 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); 141 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
127} 142}
128 143
129static ssize_t temp1_input_show(struct device *dev, 144static ssize_t temp1_input_show(struct device *dev,
@@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
160 u32 regval; 175 u32 regval;
161 int value; 176 int value;
162 177
163 pci_read_config_dword(data->pdev, 178 data->read_htcreg(data->pdev, &regval);
164 REG_HARDWARE_THERMAL_CONTROL, &regval);
165 value = ((regval >> 16) & 0x7f) * 500 + 52000; 179 value = ((regval >> 16) & 0x7f) * 500 + 52000;
166 if (show_hyst) 180 if (show_hyst)
167 value -= ((regval >> 24) & 0xf) * 500; 181 value -= ((regval >> 24) & 0xf) * 500;
@@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
181 struct pci_dev *pdev = data->pdev; 195 struct pci_dev *pdev = data->pdev;
182 196
183 if (index >= 2) { 197 if (index >= 2) {
184 u32 reg_caps, reg_htc; 198 u32 reg;
199
200 if (!data->read_htcreg)
201 return 0;
185 202
186 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, 203 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
187 &reg_caps); 204 &reg);
188 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, 205 if (!(reg & NB_CAP_HTC))
189 &reg_htc); 206 return 0;
190 if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE)) 207
208 data->read_htcreg(data->pdev, &reg);
209 if (!(reg & HTC_ENABLE))
191 return 0; 210 return 0;
192 } 211 }
193 return attr->mode; 212 return attr->mode;
@@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev,
268 287
269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 288 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
270 boot_cpu_data.x86_model == 0x70)) { 289 boot_cpu_data.x86_model == 0x70)) {
290 data->read_htcreg = read_htcreg_nb_f15;
271 data->read_tempreg = read_tempreg_nb_f15; 291 data->read_tempreg = read_tempreg_nb_f15;
272 } else if (boot_cpu_data.x86 == 0x17) { 292 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000; 293 data->temp_adjust_mask = 0x80000;
274 data->read_tempreg = read_tempreg_nb_f17; 294 data->read_tempreg = read_tempreg_nb_f17;
275 } else { 295 } else {
296 data->read_htcreg = read_htcreg_pci;
276 data->read_tempreg = read_tempreg_pci; 297 data->read_tempreg = read_tempreg_pci;
277 } 298 }
278 299
@@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 323 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 324 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 325 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, 326 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
306 {} 327 {}
307}; 328};
308MODULE_DEVICE_TABLE(pci, k10temp_id_table); 329MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 4e63c6f6c04d..d030ce3025a6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -250,7 +250,9 @@ void bch_debug_exit(void)
250 250
251int __init bch_debug_init(struct kobject *kobj) 251int __init bch_debug_init(struct kobject *kobj)
252{ 252{
253 bcache_debug = debugfs_create_dir("bcache", NULL); 253 if (!IS_ENABLED(CONFIG_DEBUG_FS))
254 return 0;
254 255
256 bcache_debug = debugfs_create_dir("bcache", NULL);
255 return IS_ERR_OR_NULL(bcache_debug); 257 return IS_ERR_OR_NULL(bcache_debug);
256} 258}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index db5ec4e8bde9..ebb1d141b900 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1194 NDCB0_CMD2(NAND_CMD_READSTART); 1194 NDCB0_CMD2(NAND_CMD_READSTART);
1195 1195
1196 /* 1196 /*
1197 * Trigger the naked read operation only on the last chunk. 1197 * Trigger the monolithic read on the first chunk, then naked read on
1198 * Otherwise, use monolithic read. 1198 * intermediate chunks and finally a last naked read on the last chunk.
1199 */ 1199 */
1200 if (lt->nchunks == 1 || (chunk < lt->nchunks - 1)) 1200 if (chunk == 0)
1201 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); 1201 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1202 else if (chunk < lt->nchunks - 1)
1203 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1202 else 1204 else
1203 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1205 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1204 1206
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 439991d71b14..4c14ce428e92 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
141 int i; 141 int i;
142 142
143 for (i = 0; i < nr_queues; i++) { 143 for (i = 0; i < nr_queues; i++) {
144 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 144 q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
145 if (!q) 145 if (!q)
146 return -ENOMEM; 146 return -ENOMEM;
147 147
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
456{ 456{
457 struct ciw *ciw; 457 struct ciw *ciw;
458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
459 int rc;
460 459
461 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 460 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
462 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 461 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
493 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 492 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
494 if (!ciw) { 493 if (!ciw) {
495 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 494 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
496 rc = -EINVAL; 495 return -EINVAL;
497 goto out_err;
498 } 496 }
499 irq_ptr->equeue = *ciw; 497 irq_ptr->equeue = *ciw;
500 498
501 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 499 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
502 if (!ciw) { 500 if (!ciw) {
503 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 501 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
504 rc = -EINVAL; 502 return -EINVAL;
505 goto out_err;
506 } 503 }
507 irq_ptr->aqueue = *ciw; 504 irq_ptr->aqueue = *ciw;
508 505
@@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
512 init_data->cdev->handler = qdio_int_handler; 509 init_data->cdev->handler = qdio_int_handler;
513 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 510 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
514 return 0; 511 return 0;
515out_err:
516 qdio_release_memory(irq_ptr);
517 return rc;
518} 512}
519 513
520void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 514void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 2c7550797ec2..dce92b2a895d 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
715 * and stores the result to ccwchain list. @cp must have been 715 * and stores the result to ccwchain list. @cp must have been
716 * initialized by a previous call with cp_init(). Otherwise, undefined 716 * initialized by a previous call with cp_init(). Otherwise, undefined
717 * behavior occurs. 717 * behavior occurs.
718 * For each chain composing the channel program:
719 * - On entry ch_len holds the count of CCWs to be translated.
720 * - On exit ch_len is adjusted to the count of successfully translated CCWs.
721 * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
718 * 722 *
719 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 723 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
720 * as helpers to do ccw chain translation inside the kernel. Basically 724 * as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
749 for (idx = 0; idx < len; idx++) { 753 for (idx = 0; idx < len; idx++) {
750 ret = ccwchain_fetch_one(chain, idx, cp); 754 ret = ccwchain_fetch_one(chain, idx, cp);
751 if (ret) 755 if (ret)
752 return ret; 756 goto out_err;
753 } 757 }
754 } 758 }
755 759
756 return 0; 760 return 0;
761out_err:
762 /* Only cleanup the chain elements that were actually translated. */
763 chain->ch_len = idx;
764 list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
765 chain->ch_len = 0;
766 }
767 return ret;
757} 768}
758 769
759/** 770/**
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0156c9623c35..d62ddd63f4fe 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
724 int wait; 724 int wait;
725 unsigned long flags = 0; 725 unsigned long flags = 0;
726 unsigned long mflags = 0; 726 unsigned long mflags = 0;
727 struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
728 fibptr->hw_fib_va;
727 729
728 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); 730 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
729 if (callback) { 731 if (callback) {
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
734 wait = 1; 736 wait = 1;
735 737
736 738
737 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { 739 hbacmd->iu_type = command;
738 struct aac_hba_cmd_req *hbacmd =
739 (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
740 740
741 hbacmd->iu_type = command; 741 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
742 /* bit1 of request_id must be 0 */ 742 /* bit1 of request_id must be 0 */
743 hbacmd->request_id = 743 hbacmd->request_id =
744 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); 744 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c374e3b5c678..777e5f1e52d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
609 break; 609 break;
610 610
611 case BTSTAT_ABORTQUEUE: 611 case BTSTAT_ABORTQUEUE:
612 cmd->result = (DID_ABORT << 16); 612 cmd->result = (DID_BUS_BUSY << 16);
613 break; 613 break;
614 614
615 case BTSTAT_SCSIPARITY: 615 case BTSTAT_SCSIPARITY:
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 1596d35498c5..6573152ce893 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
490 490
491static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) 491static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
492{ 492{
493 if (!has_bspi(qspi) || (qspi->bspi_enabled)) 493 if (!has_bspi(qspi))
494 return; 494 return;
495 495
496 qspi->bspi_enabled = 1; 496 qspi->bspi_enabled = 1;
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
505 505
506static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) 506static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
507{ 507{
508 if (!has_bspi(qspi) || (!qspi->bspi_enabled)) 508 if (!has_bspi(qspi))
509 return; 509 return;
510 510
511 qspi->bspi_enabled = 0; 511 qspi->bspi_enabled = 0;
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
519 519
520static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) 520static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
521{ 521{
522 u32 data = 0; 522 u32 rd = 0;
523 u32 wr = 0;
523 524
524 if (qspi->curr_cs == cs)
525 return;
526 if (qspi->base[CHIP_SELECT]) { 525 if (qspi->base[CHIP_SELECT]) {
527 data = bcm_qspi_read(qspi, CHIP_SELECT, 0); 526 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
528 data = (data & ~0xff) | (1 << cs); 527 wr = (rd & ~0xff) | (1 << cs);
529 bcm_qspi_write(qspi, CHIP_SELECT, 0, data); 528 if (rd == wr)
529 return;
530 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
530 usleep_range(10, 20); 531 usleep_range(10, 20);
531 } 532 }
533
534 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
532 qspi->curr_cs = cs; 535 qspi->curr_cs = cs;
533} 536}
534 537
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
755 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); 758 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
756 } 759 }
757 mspi_cdram = MSPI_CDRAM_CONT_BIT; 760 mspi_cdram = MSPI_CDRAM_CONT_BIT;
758 mspi_cdram |= (~(1 << spi->chip_select) & 761
759 MSPI_CDRAM_PCS); 762 if (has_bspi(qspi))
763 mspi_cdram &= ~1;
764 else
765 mspi_cdram |= (~(1 << spi->chip_select) &
766 MSPI_CDRAM_PCS);
767
760 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : 768 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
761 MSPI_CDRAM_BITSE_BIT); 769 MSPI_CDRAM_BITSE_BIT);
762 770
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 1431cb98fe40..3094d818cf06 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
184 struct bcm2835aux_spi *bs = spi_master_get_devdata(master); 184 struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
185 irqreturn_t ret = IRQ_NONE; 185 irqreturn_t ret = IRQ_NONE;
186 186
187 /* IRQ may be shared, so return if our interrupts are disabled */
188 if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
189 (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
190 return ret;
191
187 /* check if we have data to read */ 192 /* check if we have data to read */
188 while (bs->rx_len && 193 while (bs->rx_len &&
189 (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & 194 (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 5c9516ae4942..4a001634023e 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
313 313
314 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && 314 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
315 (xspi->tx_bytes > 0)) { 315 (xspi->tx_bytes > 0)) {
316
317 /* When xspi in busy condition, bytes may send failed,
318 * then spi control did't work thoroughly, add one byte delay
319 */
320 if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
321 CDNS_SPI_IXR_TXFULL)
322 usleep_range(10, 20);
323
316 if (xspi->txbuf) 324 if (xspi->txbuf)
317 cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); 325 cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
318 else 326 else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6f57592a7f95..a056ee88a960 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = {
1701}; 1701};
1702module_platform_driver(spi_imx_driver); 1702module_platform_driver(spi_imx_driver);
1703 1703
1704MODULE_DESCRIPTION("SPI Master Controller driver"); 1704MODULE_DESCRIPTION("SPI Controller driver");
1705MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1705MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1706MODULE_LICENSE("GPL"); 1706MODULE_LICENSE("GPL");
1707MODULE_ALIAS("platform:" DRIVER_NAME); 1707MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 513ec6c6e25b..0ae7defd3492 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
38 38
39 /* SSP register addresses */ 39 /* SSP register addresses */
40 void __iomem *ioaddr; 40 void __iomem *ioaddr;
41 u32 ssdr_physical; 41 phys_addr_t ssdr_physical;
42 42
43 /* SSP masks*/ 43 /* SSP masks*/
44 u32 dma_cr1; 44 u32 dma_cr1;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index ae086aab57d5..8171eedbfc90 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
283 } 283 }
284 284
285 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); 285 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
286 brps = min_t(int, brps, 32);
286 287
287 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); 288 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
288 sh_msiof_write(p, TSCR, scr); 289 sh_msiof_write(p, TSCR, scr);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 72ebbc908e19..32cd52ca8318 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
354 354
355 slot_id = 0; 355 slot_id = 0;
356 for (i = 0; i < MAX_HC_SLOTS; i++) { 356 for (i = 0; i < MAX_HC_SLOTS; i++) {
357 if (!xhci->devs[i]) 357 if (!xhci->devs[i] || !xhci->devs[i]->udev)
358 continue; 358 continue;
359 speed = xhci->devs[i]->udev->speed; 359 speed = xhci->devs[i]->udev->speed;
360 if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) 360 if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e7f99d55922a..15a42cee0a9c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2524,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
2524{ 2524{
2525 struct musb *musb = hcd_to_musb(hcd); 2525 struct musb *musb = hcd_to_musb(hcd);
2526 u8 devctl; 2526 u8 devctl;
2527 int ret;
2527 2528
2528 musb_port_suspend(musb, true); 2529 ret = musb_port_suspend(musb, true);
2530 if (ret)
2531 return ret;
2529 2532
2530 if (!is_host_active(musb)) 2533 if (!is_host_active(musb))
2531 return 0; 2534 return 0;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 72392bbcd0a4..2999845632ce 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8);
67extern void musb_root_disconnect(struct musb *musb); 67extern void musb_root_disconnect(struct musb *musb);
68extern void musb_host_resume_root_hub(struct musb *musb); 68extern void musb_host_resume_root_hub(struct musb *musb);
69extern void musb_host_poke_root_hub(struct musb *musb); 69extern void musb_host_poke_root_hub(struct musb *musb);
70extern void musb_port_suspend(struct musb *musb, bool do_suspend); 70extern int musb_port_suspend(struct musb *musb, bool do_suspend);
71extern void musb_port_reset(struct musb *musb, bool do_reset); 71extern void musb_port_reset(struct musb *musb, bool do_reset);
72extern void musb_host_finish_resume(struct work_struct *work); 72extern void musb_host_finish_resume(struct work_struct *work);
73#else 73#else
@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
99static inline void musb_host_resume_root_hub(struct musb *musb) {} 99static inline void musb_host_resume_root_hub(struct musb *musb) {}
100static inline void musb_host_poll_rh_status(struct musb *musb) {} 100static inline void musb_host_poll_rh_status(struct musb *musb) {}
101static inline void musb_host_poke_root_hub(struct musb *musb) {} 101static inline void musb_host_poke_root_hub(struct musb *musb) {}
102static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} 102static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
103{
104 return 0;
105}
103static inline void musb_port_reset(struct musb *musb, bool do_reset) {} 106static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
104static inline void musb_host_finish_resume(struct work_struct *work) {} 107static inline void musb_host_finish_resume(struct work_struct *work) {}
105#endif 108#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 5165d2b07ade..2f8dd9826e94 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work)
48 spin_unlock_irqrestore(&musb->lock, flags); 48 spin_unlock_irqrestore(&musb->lock, flags);
49} 49}
50 50
51void musb_port_suspend(struct musb *musb, bool do_suspend) 51int musb_port_suspend(struct musb *musb, bool do_suspend)
52{ 52{
53 struct usb_otg *otg = musb->xceiv->otg; 53 struct usb_otg *otg = musb->xceiv->otg;
54 u8 power; 54 u8 power;
55 void __iomem *mbase = musb->mregs; 55 void __iomem *mbase = musb->mregs;
56 56
57 if (!is_host_active(musb)) 57 if (!is_host_active(musb))
58 return; 58 return 0;
59 59
60 /* NOTE: this doesn't necessarily put PHY into low power mode, 60 /* NOTE: this doesn't necessarily put PHY into low power mode,
61 * turning off its clock; that's a function of PHY integration and 61 * turning off its clock; that's a function of PHY integration and
@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
66 if (do_suspend) { 66 if (do_suspend) {
67 int retries = 10000; 67 int retries = 10000;
68 68
69 power &= ~MUSB_POWER_RESUME; 69 if (power & MUSB_POWER_RESUME)
70 power |= MUSB_POWER_SUSPENDM; 70 return -EBUSY;
71 musb_writeb(mbase, MUSB_POWER, power);
72 71
73 /* Needed for OPT A tests */ 72 if (!(power & MUSB_POWER_SUSPENDM)) {
74 power = musb_readb(mbase, MUSB_POWER); 73 power |= MUSB_POWER_SUSPENDM;
75 while (power & MUSB_POWER_SUSPENDM) { 74 musb_writeb(mbase, MUSB_POWER, power);
75
76 /* Needed for OPT A tests */
76 power = musb_readb(mbase, MUSB_POWER); 77 power = musb_readb(mbase, MUSB_POWER);
77 if (retries-- < 1) 78 while (power & MUSB_POWER_SUSPENDM) {
78 break; 79 power = musb_readb(mbase, MUSB_POWER);
80 if (retries-- < 1)
81 break;
82 }
79 } 83 }
80 84
81 musb_dbg(musb, "Root port suspended, power %02x", power); 85 musb_dbg(musb, "Root port suspended, power %02x", power);
@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
111 schedule_delayed_work(&musb->finish_resume_work, 115 schedule_delayed_work(&musb->finish_resume_work,
112 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 116 msecs_to_jiffies(USB_RESUME_TIMEOUT));
113 } 117 }
118 return 0;
114} 119}
115 120
116void musb_port_reset(struct musb *musb, bool do_reset) 121void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 14a72357800a..35618ceb2791 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -73,6 +73,7 @@ struct bus_id_priv {
73 struct stub_device *sdev; 73 struct stub_device *sdev;
74 struct usb_device *udev; 74 struct usb_device *udev;
75 char shutdown_busid; 75 char shutdown_busid;
76 spinlock_t busid_lock;
76}; 77};
77 78
78/* stub_priv is allocated from stub_priv_cache */ 79/* stub_priv is allocated from stub_priv_cache */
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
83 84
84/* stub_main.c */ 85/* stub_main.c */
85struct bus_id_priv *get_busid_priv(const char *busid); 86struct bus_id_priv *get_busid_priv(const char *busid);
87void put_busid_priv(struct bus_id_priv *bid);
86int del_match_busid(char *busid); 88int del_match_busid(char *busid);
87void stub_device_cleanup_urbs(struct stub_device *sdev); 89void stub_device_cleanup_urbs(struct stub_device *sdev);
88 90
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index dd8ef36ab10e..c0d6ff1baa72 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
300 struct stub_device *sdev = NULL; 300 struct stub_device *sdev = NULL;
301 const char *udev_busid = dev_name(&udev->dev); 301 const char *udev_busid = dev_name(&udev->dev);
302 struct bus_id_priv *busid_priv; 302 struct bus_id_priv *busid_priv;
303 int rc; 303 int rc = 0;
304 304
305 dev_dbg(&udev->dev, "Enter\n"); 305 dev_dbg(&udev->dev, "Enter probe\n");
306 306
307 /* check we should claim or not by busid_table */ 307 /* check we should claim or not by busid_table */
308 busid_priv = get_busid_priv(udev_busid); 308 busid_priv = get_busid_priv(udev_busid);
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
317 * other matched drivers by the driver core. 317 * other matched drivers by the driver core.
318 * See driver_probe_device() in driver/base/dd.c 318 * See driver_probe_device() in driver/base/dd.c
319 */ 319 */
320 return -ENODEV; 320 rc = -ENODEV;
321 goto call_put_busid_priv;
321 } 322 }
322 323
323 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { 324 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
324 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", 325 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
325 udev_busid); 326 udev_busid);
326 return -ENODEV; 327 rc = -ENODEV;
328 goto call_put_busid_priv;
327 } 329 }
328 330
329 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { 331 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
331 "%s is attached on vhci_hcd... skip!\n", 333 "%s is attached on vhci_hcd... skip!\n",
332 udev_busid); 334 udev_busid);
333 335
334 return -ENODEV; 336 rc = -ENODEV;
337 goto call_put_busid_priv;
335 } 338 }
336 339
337 /* ok, this is my device */ 340 /* ok, this is my device */
338 sdev = stub_device_alloc(udev); 341 sdev = stub_device_alloc(udev);
339 if (!sdev) 342 if (!sdev) {
340 return -ENOMEM; 343 rc = -ENOMEM;
344 goto call_put_busid_priv;
345 }
341 346
342 dev_info(&udev->dev, 347 dev_info(&udev->dev,
343 "usbip-host: register new device (bus %u dev %u)\n", 348 "usbip-host: register new device (bus %u dev %u)\n",
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
369 } 374 }
370 busid_priv->status = STUB_BUSID_ALLOC; 375 busid_priv->status = STUB_BUSID_ALLOC;
371 376
372 return 0; 377 rc = 0;
378 goto call_put_busid_priv;
379
373err_files: 380err_files:
374 usb_hub_release_port(udev->parent, udev->portnum, 381 usb_hub_release_port(udev->parent, udev->portnum,
375 (struct usb_dev_state *) udev); 382 (struct usb_dev_state *) udev);
@@ -379,6 +386,9 @@ err_port:
379 386
380 busid_priv->sdev = NULL; 387 busid_priv->sdev = NULL;
381 stub_device_free(sdev); 388 stub_device_free(sdev);
389
390call_put_busid_priv:
391 put_busid_priv(busid_priv);
382 return rc; 392 return rc;
383} 393}
384 394
@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
404 struct bus_id_priv *busid_priv; 414 struct bus_id_priv *busid_priv;
405 int rc; 415 int rc;
406 416
407 dev_dbg(&udev->dev, "Enter\n"); 417 dev_dbg(&udev->dev, "Enter disconnect\n");
408 418
409 busid_priv = get_busid_priv(udev_busid); 419 busid_priv = get_busid_priv(udev_busid);
410 if (!busid_priv) { 420 if (!busid_priv) {
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
417 /* get stub_device */ 427 /* get stub_device */
418 if (!sdev) { 428 if (!sdev) {
419 dev_err(&udev->dev, "could not get device"); 429 dev_err(&udev->dev, "could not get device");
420 return; 430 goto call_put_busid_priv;
421 } 431 }
422 432
423 dev_set_drvdata(&udev->dev, NULL); 433 dev_set_drvdata(&udev->dev, NULL);
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
432 (struct usb_dev_state *) udev); 442 (struct usb_dev_state *) udev);
433 if (rc) { 443 if (rc) {
434 dev_dbg(&udev->dev, "unable to release port\n"); 444 dev_dbg(&udev->dev, "unable to release port\n");
435 return; 445 goto call_put_busid_priv;
436 } 446 }
437 447
438 /* If usb reset is called from event handler */ 448 /* If usb reset is called from event handler */
439 if (usbip_in_eh(current)) 449 if (usbip_in_eh(current))
440 return; 450 goto call_put_busid_priv;
441 451
442 /* shutdown the current connection */ 452 /* shutdown the current connection */
443 shutdown_busid(busid_priv); 453 shutdown_busid(busid_priv);
@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
448 busid_priv->sdev = NULL; 458 busid_priv->sdev = NULL;
449 stub_device_free(sdev); 459 stub_device_free(sdev);
450 460
451 if (busid_priv->status == STUB_BUSID_ALLOC) { 461 if (busid_priv->status == STUB_BUSID_ALLOC)
452 busid_priv->status = STUB_BUSID_ADDED; 462 busid_priv->status = STUB_BUSID_ADDED;
453 } else { 463
454 busid_priv->status = STUB_BUSID_OTHER; 464call_put_busid_priv:
455 del_match_busid((char *)udev_busid); 465 put_busid_priv(busid_priv);
456 }
457} 466}
458 467
459#ifdef CONFIG_PM 468#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index d41d0cdeec0f..bf8a5feb0ee9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -14,6 +14,7 @@
14#define DRIVER_DESC "USB/IP Host Driver" 14#define DRIVER_DESC "USB/IP Host Driver"
15 15
16struct kmem_cache *stub_priv_cache; 16struct kmem_cache *stub_priv_cache;
17
17/* 18/*
18 * busid_tables defines matching busids that usbip can grab. A user can change 19 * busid_tables defines matching busids that usbip can grab. A user can change
19 * dynamically what device is locally used and what device is exported to a 20 * dynamically what device is locally used and what device is exported to a
@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
25 26
26static void init_busid_table(void) 27static void init_busid_table(void)
27{ 28{
29 int i;
30
28 /* 31 /*
29 * This also sets the bus_table[i].status to 32 * This also sets the bus_table[i].status to
30 * STUB_BUSID_OTHER, which is 0. 33 * STUB_BUSID_OTHER, which is 0.
@@ -32,6 +35,9 @@ static void init_busid_table(void)
32 memset(busid_table, 0, sizeof(busid_table)); 35 memset(busid_table, 0, sizeof(busid_table));
33 36
34 spin_lock_init(&busid_table_lock); 37 spin_lock_init(&busid_table_lock);
38
39 for (i = 0; i < MAX_BUSID; i++)
40 spin_lock_init(&busid_table[i].busid_lock);
35} 41}
36 42
37/* 43/*
@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
43 int i; 49 int i;
44 int idx = -1; 50 int idx = -1;
45 51
46 for (i = 0; i < MAX_BUSID; i++) 52 for (i = 0; i < MAX_BUSID; i++) {
53 spin_lock(&busid_table[i].busid_lock);
47 if (busid_table[i].name[0]) 54 if (busid_table[i].name[0])
48 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { 55 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
49 idx = i; 56 idx = i;
57 spin_unlock(&busid_table[i].busid_lock);
50 break; 58 break;
51 } 59 }
60 spin_unlock(&busid_table[i].busid_lock);
61 }
52 return idx; 62 return idx;
53} 63}
54 64
65/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
55struct bus_id_priv *get_busid_priv(const char *busid) 66struct bus_id_priv *get_busid_priv(const char *busid)
56{ 67{
57 int idx; 68 int idx;
@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
59 70
60 spin_lock(&busid_table_lock); 71 spin_lock(&busid_table_lock);
61 idx = get_busid_idx(busid); 72 idx = get_busid_idx(busid);
62 if (idx >= 0) 73 if (idx >= 0) {
63 bid = &(busid_table[idx]); 74 bid = &(busid_table[idx]);
75 /* get busid_lock before returning */
76 spin_lock(&bid->busid_lock);
77 }
64 spin_unlock(&busid_table_lock); 78 spin_unlock(&busid_table_lock);
65 79
66 return bid; 80 return bid;
67} 81}
68 82
83void put_busid_priv(struct bus_id_priv *bid)
84{
85 if (bid)
86 spin_unlock(&bid->busid_lock);
87}
88
69static int add_match_busid(char *busid) 89static int add_match_busid(char *busid)
70{ 90{
71 int i; 91 int i;
@@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
78 goto out; 98 goto out;
79 } 99 }
80 100
81 for (i = 0; i < MAX_BUSID; i++) 101 for (i = 0; i < MAX_BUSID; i++) {
102 spin_lock(&busid_table[i].busid_lock);
82 if (!busid_table[i].name[0]) { 103 if (!busid_table[i].name[0]) {
83 strlcpy(busid_table[i].name, busid, BUSID_SIZE); 104 strlcpy(busid_table[i].name, busid, BUSID_SIZE);
84 if ((busid_table[i].status != STUB_BUSID_ALLOC) && 105 if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
85 (busid_table[i].status != STUB_BUSID_REMOV)) 106 (busid_table[i].status != STUB_BUSID_REMOV))
86 busid_table[i].status = STUB_BUSID_ADDED; 107 busid_table[i].status = STUB_BUSID_ADDED;
87 ret = 0; 108 ret = 0;
109 spin_unlock(&busid_table[i].busid_lock);
88 break; 110 break;
89 } 111 }
112 spin_unlock(&busid_table[i].busid_lock);
113 }
90 114
91out: 115out:
92 spin_unlock(&busid_table_lock); 116 spin_unlock(&busid_table_lock);
@@ -107,6 +131,8 @@ int del_match_busid(char *busid)
107 /* found */ 131 /* found */
108 ret = 0; 132 ret = 0;
109 133
134 spin_lock(&busid_table[idx].busid_lock);
135
110 if (busid_table[idx].status == STUB_BUSID_OTHER) 136 if (busid_table[idx].status == STUB_BUSID_OTHER)
111 memset(busid_table[idx].name, 0, BUSID_SIZE); 137 memset(busid_table[idx].name, 0, BUSID_SIZE);
112 138
@@ -114,6 +140,7 @@ int del_match_busid(char *busid)
114 (busid_table[idx].status != STUB_BUSID_ADDED)) 140 (busid_table[idx].status != STUB_BUSID_ADDED))
115 busid_table[idx].status = STUB_BUSID_REMOV; 141 busid_table[idx].status = STUB_BUSID_REMOV;
116 142
143 spin_unlock(&busid_table[idx].busid_lock);
117out: 144out:
118 spin_unlock(&busid_table_lock); 145 spin_unlock(&busid_table_lock);
119 146
@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
126 char *out = buf; 153 char *out = buf;
127 154
128 spin_lock(&busid_table_lock); 155 spin_lock(&busid_table_lock);
129 for (i = 0; i < MAX_BUSID; i++) 156 for (i = 0; i < MAX_BUSID; i++) {
157 spin_lock(&busid_table[i].busid_lock);
130 if (busid_table[i].name[0]) 158 if (busid_table[i].name[0])
131 out += sprintf(out, "%s ", busid_table[i].name); 159 out += sprintf(out, "%s ", busid_table[i].name);
160 spin_unlock(&busid_table[i].busid_lock);
161 }
132 spin_unlock(&busid_table_lock); 162 spin_unlock(&busid_table_lock);
133 out += sprintf(out, "\n"); 163 out += sprintf(out, "\n");
134 164
@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
169} 199}
170static DRIVER_ATTR_RW(match_busid); 200static DRIVER_ATTR_RW(match_busid);
171 201
202static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
203{
204 int ret;
205
206 /* device_attach() callers should hold parent lock for USB */
207 if (busid_priv->udev->dev.parent)
208 device_lock(busid_priv->udev->dev.parent);
209 ret = device_attach(&busid_priv->udev->dev);
210 if (busid_priv->udev->dev.parent)
211 device_unlock(busid_priv->udev->dev.parent);
212 if (ret < 0) {
213 dev_err(&busid_priv->udev->dev, "rebind failed\n");
214 return ret;
215 }
216 return 0;
217}
218
219static void stub_device_rebind(void)
220{
221#if IS_MODULE(CONFIG_USBIP_HOST)
222 struct bus_id_priv *busid_priv;
223 int i;
224
225 /* update status to STUB_BUSID_OTHER so probe ignores the device */
226 spin_lock(&busid_table_lock);
227 for (i = 0; i < MAX_BUSID; i++) {
228 if (busid_table[i].name[0] &&
229 busid_table[i].shutdown_busid) {
230 busid_priv = &(busid_table[i]);
231 busid_priv->status = STUB_BUSID_OTHER;
232 }
233 }
234 spin_unlock(&busid_table_lock);
235
236 /* now run rebind - no need to hold locks. driver files are removed */
237 for (i = 0; i < MAX_BUSID; i++) {
238 if (busid_table[i].name[0] &&
239 busid_table[i].shutdown_busid) {
240 busid_priv = &(busid_table[i]);
241 do_rebind(busid_table[i].name, busid_priv);
242 }
243 }
244#endif
245}
246
172static ssize_t rebind_store(struct device_driver *dev, const char *buf, 247static ssize_t rebind_store(struct device_driver *dev, const char *buf,
173 size_t count) 248 size_t count)
174{ 249{
@@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
186 if (!bid) 261 if (!bid)
187 return -ENODEV; 262 return -ENODEV;
188 263
189 /* device_attach() callers should hold parent lock for USB */ 264 /* mark the device for deletion so probe ignores it during rescan */
190 if (bid->udev->dev.parent) 265 bid->status = STUB_BUSID_OTHER;
191 device_lock(bid->udev->dev.parent); 266 /* release the busid lock */
192 ret = device_attach(&bid->udev->dev); 267 put_busid_priv(bid);
193 if (bid->udev->dev.parent) 268
194 device_unlock(bid->udev->dev.parent); 269 ret = do_rebind((char *) buf, bid);
195 if (ret < 0) { 270 if (ret < 0)
196 dev_err(&bid->udev->dev, "rebind failed\n");
197 return ret; 271 return ret;
198 } 272
273 /* delete device from busid_table */
274 del_match_busid((char *) buf);
199 275
200 return count; 276 return count;
201} 277}
@@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void)
317 */ 393 */
318 usb_deregister_device_driver(&stub_driver); 394 usb_deregister_device_driver(&stub_driver);
319 395
396 /* initiate scan to attach devices */
397 stub_device_rebind();
398
320 kmem_cache_destroy(stub_priv_cache); 399 kmem_cache_destroy(stub_priv_cache);
321} 400}
322 401
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 3bedfed608a2..7587fb665ff1 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -121,7 +121,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
121 p = text; 121 p = text;
122 do { 122 do {
123 struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs]; 123 struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs];
124 char tdelim = delim; 124 const char *q, *stop;
125 125
126 if (*p == delim) { 126 if (*p == delim) {
127 p++; 127 p++;
@@ -130,28 +130,33 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
130 130
131 if (*p == '[') { 131 if (*p == '[') {
132 p++; 132 p++;
133 tdelim = ']'; 133 q = memchr(p, ']', end - p);
134 } else {
135 for (q = p; q < end; q++)
136 if (*q == '+' || *q == delim)
137 break;
134 } 138 }
135 139
136 if (in4_pton(p, end - p, 140 if (in4_pton(p, q - p,
137 (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3], 141 (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3],
138 tdelim, &p)) { 142 -1, &stop)) {
139 srx->transport.sin6.sin6_addr.s6_addr32[0] = 0; 143 srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
140 srx->transport.sin6.sin6_addr.s6_addr32[1] = 0; 144 srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
141 srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 145 srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
142 } else if (in6_pton(p, end - p, 146 } else if (in6_pton(p, q - p,
143 srx->transport.sin6.sin6_addr.s6_addr, 147 srx->transport.sin6.sin6_addr.s6_addr,
144 tdelim, &p)) { 148 -1, &stop)) {
145 /* Nothing to do */ 149 /* Nothing to do */
146 } else { 150 } else {
147 goto bad_address; 151 goto bad_address;
148 } 152 }
149 153
150 if (tdelim == ']') { 154 if (stop != q)
151 if (p == end || *p != ']') 155 goto bad_address;
152 goto bad_address; 156
157 p = q;
158 if (q < end && *q == ']')
153 p++; 159 p++;
154 }
155 160
156 if (p < end) { 161 if (p < end) {
157 if (*p == '+') { 162 if (*p == '+') {
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index abd9a84f4e88..571437dcb252 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -23,36 +23,55 @@
23/* 23/*
24 * Set up an interest-in-callbacks record for a volume on a server and 24 * Set up an interest-in-callbacks record for a volume on a server and
25 * register it with the server. 25 * register it with the server.
26 * - Called with volume->server_sem held. 26 * - Called with vnode->io_lock held.
27 */ 27 */
28int afs_register_server_cb_interest(struct afs_vnode *vnode, 28int afs_register_server_cb_interest(struct afs_vnode *vnode,
29 struct afs_server_entry *entry) 29 struct afs_server_list *slist,
30 unsigned int index)
30{ 31{
31 struct afs_cb_interest *cbi = entry->cb_interest, *vcbi, *new, *x; 32 struct afs_server_entry *entry = &slist->servers[index];
33 struct afs_cb_interest *cbi, *vcbi, *new, *old;
32 struct afs_server *server = entry->server; 34 struct afs_server *server = entry->server;
33 35
34again: 36again:
37 if (vnode->cb_interest &&
38 likely(vnode->cb_interest == entry->cb_interest))
39 return 0;
40
41 read_lock(&slist->lock);
42 cbi = afs_get_cb_interest(entry->cb_interest);
43 read_unlock(&slist->lock);
44
35 vcbi = vnode->cb_interest; 45 vcbi = vnode->cb_interest;
36 if (vcbi) { 46 if (vcbi) {
37 if (vcbi == cbi) 47 if (vcbi == cbi) {
48 afs_put_cb_interest(afs_v2net(vnode), cbi);
38 return 0; 49 return 0;
50 }
39 51
52 /* Use a new interest in the server list for the same server
53 * rather than an old one that's still attached to a vnode.
54 */
40 if (cbi && vcbi->server == cbi->server) { 55 if (cbi && vcbi->server == cbi->server) {
41 write_seqlock(&vnode->cb_lock); 56 write_seqlock(&vnode->cb_lock);
42 vnode->cb_interest = afs_get_cb_interest(cbi); 57 old = vnode->cb_interest;
58 vnode->cb_interest = cbi;
43 write_sequnlock(&vnode->cb_lock); 59 write_sequnlock(&vnode->cb_lock);
44 afs_put_cb_interest(afs_v2net(vnode), cbi); 60 afs_put_cb_interest(afs_v2net(vnode), old);
45 return 0; 61 return 0;
46 } 62 }
47 63
64 /* Re-use the one attached to the vnode. */
48 if (!cbi && vcbi->server == server) { 65 if (!cbi && vcbi->server == server) {
49 afs_get_cb_interest(vcbi); 66 write_lock(&slist->lock);
50 x = cmpxchg(&entry->cb_interest, cbi, vcbi); 67 if (entry->cb_interest) {
51 if (x != cbi) { 68 write_unlock(&slist->lock);
52 cbi = x; 69 afs_put_cb_interest(afs_v2net(vnode), cbi);
53 afs_put_cb_interest(afs_v2net(vnode), vcbi);
54 goto again; 70 goto again;
55 } 71 }
72
73 entry->cb_interest = cbi;
74 write_unlock(&slist->lock);
56 return 0; 75 return 0;
57 } 76 }
58 } 77 }
@@ -72,13 +91,16 @@ again:
72 list_add_tail(&new->cb_link, &server->cb_interests); 91 list_add_tail(&new->cb_link, &server->cb_interests);
73 write_unlock(&server->cb_break_lock); 92 write_unlock(&server->cb_break_lock);
74 93
75 x = cmpxchg(&entry->cb_interest, cbi, new); 94 write_lock(&slist->lock);
76 if (x == cbi) { 95 if (!entry->cb_interest) {
96 entry->cb_interest = afs_get_cb_interest(new);
77 cbi = new; 97 cbi = new;
98 new = NULL;
78 } else { 99 } else {
79 cbi = x; 100 cbi = afs_get_cb_interest(entry->cb_interest);
80 afs_put_cb_interest(afs_v2net(vnode), new);
81 } 101 }
102 write_unlock(&slist->lock);
103 afs_put_cb_interest(afs_v2net(vnode), new);
82 } 104 }
83 105
84 ASSERT(cbi); 106 ASSERT(cbi);
@@ -88,11 +110,14 @@ again:
88 */ 110 */
89 write_seqlock(&vnode->cb_lock); 111 write_seqlock(&vnode->cb_lock);
90 112
91 vnode->cb_interest = afs_get_cb_interest(cbi); 113 old = vnode->cb_interest;
114 vnode->cb_interest = cbi;
92 vnode->cb_s_break = cbi->server->cb_s_break; 115 vnode->cb_s_break = cbi->server->cb_s_break;
116 vnode->cb_v_break = vnode->volume->cb_v_break;
93 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); 117 clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
94 118
95 write_sequnlock(&vnode->cb_lock); 119 write_sequnlock(&vnode->cb_lock);
120 afs_put_cb_interest(afs_v2net(vnode), old);
96 return 0; 121 return 0;
97} 122}
98 123
@@ -171,13 +196,24 @@ static void afs_break_one_callback(struct afs_server *server,
171 if (cbi->vid != fid->vid) 196 if (cbi->vid != fid->vid)
172 continue; 197 continue;
173 198
174 data.volume = NULL; 199 if (fid->vnode == 0 && fid->unique == 0) {
175 data.fid = *fid; 200 /* The callback break applies to an entire volume. */
176 inode = ilookup5_nowait(cbi->sb, fid->vnode, afs_iget5_test, &data); 201 struct afs_super_info *as = AFS_FS_S(cbi->sb);
177 if (inode) { 202 struct afs_volume *volume = as->volume;
178 vnode = AFS_FS_I(inode); 203
179 afs_break_callback(vnode); 204 write_lock(&volume->cb_break_lock);
180 iput(inode); 205 volume->cb_v_break++;
206 write_unlock(&volume->cb_break_lock);
207 } else {
208 data.volume = NULL;
209 data.fid = *fid;
210 inode = ilookup5_nowait(cbi->sb, fid->vnode,
211 afs_iget5_test, &data);
212 if (inode) {
213 vnode = AFS_FS_I(inode);
214 afs_break_callback(vnode);
215 iput(inode);
216 }
181 } 217 }
182 } 218 }
183 219
@@ -195,6 +231,8 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
195 ASSERT(server != NULL); 231 ASSERT(server != NULL);
196 ASSERTCMP(count, <=, AFSCBMAX); 232 ASSERTCMP(count, <=, AFSCBMAX);
197 233
234 /* TODO: Sort the callback break list by volume ID */
235
198 for (; count > 0; callbacks++, count--) { 236 for (; count > 0; callbacks++, count--) {
199 _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", 237 _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
200 callbacks->fid.vid, 238 callbacks->fid.vid,
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 357de908df3a..c332c95a6940 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -133,21 +133,10 @@ bool afs_cm_incoming_call(struct afs_call *call)
133} 133}
134 134
135/* 135/*
136 * clean up a cache manager call 136 * Clean up a cache manager call.
137 */ 137 */
138static void afs_cm_destructor(struct afs_call *call) 138static void afs_cm_destructor(struct afs_call *call)
139{ 139{
140 _enter("");
141
142 /* Break the callbacks here so that we do it after the final ACK is
143 * received. The step number here must match the final number in
144 * afs_deliver_cb_callback().
145 */
146 if (call->unmarshall == 5) {
147 ASSERT(call->cm_server && call->count && call->request);
148 afs_break_callbacks(call->cm_server, call->count, call->request);
149 }
150
151 kfree(call->buffer); 140 kfree(call->buffer);
152 call->buffer = NULL; 141 call->buffer = NULL;
153} 142}
@@ -161,14 +150,14 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
161 150
162 _enter(""); 151 _enter("");
163 152
164 /* be sure to send the reply *before* attempting to spam the AFS server 153 /* We need to break the callbacks before sending the reply as the
165 * with FSFetchStatus requests on the vnodes with broken callbacks lest 154 * server holds up change visibility till it receives our reply so as
166 * the AFS server get into a vicious cycle of trying to break further 155 * to maintain cache coherency.
167 * callbacks because it hadn't received completion of the CBCallBack op 156 */
168 * yet */ 157 if (call->cm_server)
169 afs_send_empty_reply(call); 158 afs_break_callbacks(call->cm_server, call->count, call->request);
170 159
171 afs_break_callbacks(call->cm_server, call->count, call->request); 160 afs_send_empty_reply(call);
172 afs_put_call(call); 161 afs_put_call(call);
173 _leave(""); 162 _leave("");
174} 163}
@@ -180,7 +169,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
180{ 169{
181 struct afs_callback_break *cb; 170 struct afs_callback_break *cb;
182 struct sockaddr_rxrpc srx; 171 struct sockaddr_rxrpc srx;
183 struct afs_server *server;
184 __be32 *bp; 172 __be32 *bp;
185 int ret, loop; 173 int ret, loop;
186 174
@@ -267,15 +255,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
267 255
268 call->offset = 0; 256 call->offset = 0;
269 call->unmarshall++; 257 call->unmarshall++;
270
271 /* Record that the message was unmarshalled successfully so
272 * that the call destructor can know do the callback breaking
273 * work, even if the final ACK isn't received.
274 *
275 * If the step number changes, then afs_cm_destructor() must be
276 * updated also.
277 */
278 call->unmarshall++;
279 case 5: 258 case 5:
280 break; 259 break;
281 } 260 }
@@ -286,10 +265,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
286 /* we'll need the file server record as that tells us which set of 265 /* we'll need the file server record as that tells us which set of
287 * vnodes to operate upon */ 266 * vnodes to operate upon */
288 rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); 267 rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
289 server = afs_find_server(call->net, &srx); 268 call->cm_server = afs_find_server(call->net, &srx);
290 if (!server) 269 if (!call->cm_server)
291 return -ENOTCONN; 270 trace_afs_cm_no_server(call, &srx);
292 call->cm_server = server;
293 271
294 return afs_queue_call_work(call); 272 return afs_queue_call_work(call);
295} 273}
@@ -303,7 +281,8 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
303 281
304 _enter("{%p}", call->cm_server); 282 _enter("{%p}", call->cm_server);
305 283
306 afs_init_callback_state(call->cm_server); 284 if (call->cm_server)
285 afs_init_callback_state(call->cm_server);
307 afs_send_empty_reply(call); 286 afs_send_empty_reply(call);
308 afs_put_call(call); 287 afs_put_call(call);
309 _leave(""); 288 _leave("");
@@ -315,7 +294,6 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
315static int afs_deliver_cb_init_call_back_state(struct afs_call *call) 294static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
316{ 295{
317 struct sockaddr_rxrpc srx; 296 struct sockaddr_rxrpc srx;
318 struct afs_server *server;
319 int ret; 297 int ret;
320 298
321 _enter(""); 299 _enter("");
@@ -328,10 +306,9 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
328 306
329 /* we'll need the file server record as that tells us which set of 307 /* we'll need the file server record as that tells us which set of
330 * vnodes to operate upon */ 308 * vnodes to operate upon */
331 server = afs_find_server(call->net, &srx); 309 call->cm_server = afs_find_server(call->net, &srx);
332 if (!server) 310 if (!call->cm_server)
333 return -ENOTCONN; 311 trace_afs_cm_no_server(call, &srx);
334 call->cm_server = server;
335 312
336 return afs_queue_call_work(call); 313 return afs_queue_call_work(call);
337} 314}
@@ -341,8 +318,6 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
341 */ 318 */
342static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) 319static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
343{ 320{
344 struct sockaddr_rxrpc srx;
345 struct afs_server *server;
346 struct afs_uuid *r; 321 struct afs_uuid *r;
347 unsigned loop; 322 unsigned loop;
348 __be32 *b; 323 __be32 *b;
@@ -398,11 +373,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
398 373
399 /* we'll need the file server record as that tells us which set of 374 /* we'll need the file server record as that tells us which set of
400 * vnodes to operate upon */ 375 * vnodes to operate upon */
401 rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); 376 rcu_read_lock();
402 server = afs_find_server(call->net, &srx); 377 call->cm_server = afs_find_server_by_uuid(call->net, call->request);
403 if (!server) 378 rcu_read_unlock();
404 return -ENOTCONN; 379 if (!call->cm_server)
405 call->cm_server = server; 380 trace_afs_cm_no_server_u(call, call->request);
406 381
407 return afs_queue_call_work(call); 382 return afs_queue_call_work(call);
408} 383}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 5889f70d4d27..7d623008157f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -180,6 +180,7 @@ static int afs_dir_open(struct inode *inode, struct file *file)
180 * get reclaimed during the iteration. 180 * get reclaimed during the iteration.
181 */ 181 */
182static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) 182static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
183 __acquires(&dvnode->validate_lock)
183{ 184{
184 struct afs_read *req; 185 struct afs_read *req;
185 loff_t i_size; 186 loff_t i_size;
@@ -261,18 +262,21 @@ retry:
261 /* If we're going to reload, we need to lock all the pages to prevent 262 /* If we're going to reload, we need to lock all the pages to prevent
262 * races. 263 * races.
263 */ 264 */
264 if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { 265 ret = -ERESTARTSYS;
265 ret = -ERESTARTSYS; 266 if (down_read_killable(&dvnode->validate_lock) < 0)
266 for (i = 0; i < req->nr_pages; i++) 267 goto error;
267 if (lock_page_killable(req->pages[i]) < 0)
268 goto error_unlock;
269 268
270 if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) 269 if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
271 goto success; 270 goto success;
271
272 up_read(&dvnode->validate_lock);
273 if (down_write_killable(&dvnode->validate_lock) < 0)
274 goto error;
272 275
276 if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
273 ret = afs_fetch_data(dvnode, key, req); 277 ret = afs_fetch_data(dvnode, key, req);
274 if (ret < 0) 278 if (ret < 0)
275 goto error_unlock_all; 279 goto error_unlock;
276 280
277 task_io_account_read(PAGE_SIZE * req->nr_pages); 281 task_io_account_read(PAGE_SIZE * req->nr_pages);
278 282
@@ -284,33 +288,26 @@ retry:
284 for (i = 0; i < req->nr_pages; i++) 288 for (i = 0; i < req->nr_pages; i++)
285 if (!afs_dir_check_page(dvnode, req->pages[i], 289 if (!afs_dir_check_page(dvnode, req->pages[i],
286 req->actual_len)) 290 req->actual_len))
287 goto error_unlock_all; 291 goto error_unlock;
288 292
289 // TODO: Trim excess pages 293 // TODO: Trim excess pages
290 294
291 set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); 295 set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
292 } 296 }
293 297
298 downgrade_write(&dvnode->validate_lock);
294success: 299success:
295 i = req->nr_pages;
296 while (i > 0)
297 unlock_page(req->pages[--i]);
298 return req; 300 return req;
299 301
300error_unlock_all:
301 i = req->nr_pages;
302error_unlock: 302error_unlock:
303 while (i > 0) 303 up_write(&dvnode->validate_lock);
304 unlock_page(req->pages[--i]);
305error: 304error:
306 afs_put_read(req); 305 afs_put_read(req);
307 _leave(" = %d", ret); 306 _leave(" = %d", ret);
308 return ERR_PTR(ret); 307 return ERR_PTR(ret);
309 308
310content_has_grown: 309content_has_grown:
311 i = req->nr_pages; 310 up_write(&dvnode->validate_lock);
312 while (i > 0)
313 unlock_page(req->pages[--i]);
314 afs_put_read(req); 311 afs_put_read(req);
315 goto retry; 312 goto retry;
316} 313}
@@ -473,6 +470,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
473 } 470 }
474 471
475out: 472out:
473 up_read(&dvnode->validate_lock);
476 afs_put_read(req); 474 afs_put_read(req);
477 _leave(" = %d", ret); 475 _leave(" = %d", ret);
478 return ret; 476 return ret;
@@ -1143,7 +1141,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1143 ret = -ERESTARTSYS; 1141 ret = -ERESTARTSYS;
1144 if (afs_begin_vnode_operation(&fc, dvnode, key)) { 1142 if (afs_begin_vnode_operation(&fc, dvnode, key)) {
1145 while (afs_select_fileserver(&fc)) { 1143 while (afs_select_fileserver(&fc)) {
1146 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1144 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1147 afs_fs_create(&fc, dentry->d_name.name, mode, data_version, 1145 afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
1148 &newfid, &newstatus, &newcb); 1146 &newfid, &newstatus, &newcb);
1149 } 1147 }
@@ -1213,7 +1211,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
1213 ret = -ERESTARTSYS; 1211 ret = -ERESTARTSYS;
1214 if (afs_begin_vnode_operation(&fc, dvnode, key)) { 1212 if (afs_begin_vnode_operation(&fc, dvnode, key)) {
1215 while (afs_select_fileserver(&fc)) { 1213 while (afs_select_fileserver(&fc)) {
1216 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1214 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1217 afs_fs_remove(&fc, dentry->d_name.name, true, 1215 afs_fs_remove(&fc, dentry->d_name.name, true,
1218 data_version); 1216 data_version);
1219 } 1217 }
@@ -1316,7 +1314,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
1316 ret = -ERESTARTSYS; 1314 ret = -ERESTARTSYS;
1317 if (afs_begin_vnode_operation(&fc, dvnode, key)) { 1315 if (afs_begin_vnode_operation(&fc, dvnode, key)) {
1318 while (afs_select_fileserver(&fc)) { 1316 while (afs_select_fileserver(&fc)) {
1319 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1317 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1320 afs_fs_remove(&fc, dentry->d_name.name, false, 1318 afs_fs_remove(&fc, dentry->d_name.name, false,
1321 data_version); 1319 data_version);
1322 } 1320 }
@@ -1373,7 +1371,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1373 ret = -ERESTARTSYS; 1371 ret = -ERESTARTSYS;
1374 if (afs_begin_vnode_operation(&fc, dvnode, key)) { 1372 if (afs_begin_vnode_operation(&fc, dvnode, key)) {
1375 while (afs_select_fileserver(&fc)) { 1373 while (afs_select_fileserver(&fc)) {
1376 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1374 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1377 afs_fs_create(&fc, dentry->d_name.name, mode, data_version, 1375 afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
1378 &newfid, &newstatus, &newcb); 1376 &newfid, &newstatus, &newcb);
1379 } 1377 }
@@ -1443,8 +1441,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
1443 } 1441 }
1444 1442
1445 while (afs_select_fileserver(&fc)) { 1443 while (afs_select_fileserver(&fc)) {
1446 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1444 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1447 fc.cb_break_2 = vnode->cb_break + vnode->cb_s_break; 1445 fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
1448 afs_fs_link(&fc, vnode, dentry->d_name.name, data_version); 1446 afs_fs_link(&fc, vnode, dentry->d_name.name, data_version);
1449 } 1447 }
1450 1448
@@ -1512,7 +1510,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
1512 ret = -ERESTARTSYS; 1510 ret = -ERESTARTSYS;
1513 if (afs_begin_vnode_operation(&fc, dvnode, key)) { 1511 if (afs_begin_vnode_operation(&fc, dvnode, key)) {
1514 while (afs_select_fileserver(&fc)) { 1512 while (afs_select_fileserver(&fc)) {
1515 fc.cb_break = dvnode->cb_break + dvnode->cb_s_break; 1513 fc.cb_break = afs_calc_vnode_cb_break(dvnode);
1516 afs_fs_symlink(&fc, dentry->d_name.name, 1514 afs_fs_symlink(&fc, dentry->d_name.name,
1517 content, data_version, 1515 content, data_version,
1518 &newfid, &newstatus); 1516 &newfid, &newstatus);
@@ -1588,8 +1586,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1588 } 1586 }
1589 } 1587 }
1590 while (afs_select_fileserver(&fc)) { 1588 while (afs_select_fileserver(&fc)) {
1591 fc.cb_break = orig_dvnode->cb_break + orig_dvnode->cb_s_break; 1589 fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode);
1592 fc.cb_break_2 = new_dvnode->cb_break + new_dvnode->cb_s_break; 1590 fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
1593 afs_fs_rename(&fc, old_dentry->d_name.name, 1591 afs_fs_rename(&fc, old_dentry->d_name.name,
1594 new_dvnode, new_dentry->d_name.name, 1592 new_dvnode, new_dentry->d_name.name,
1595 orig_data_version, new_data_version); 1593 orig_data_version, new_data_version);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index c24c08016dd9..7d4f26198573 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -238,7 +238,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
238 ret = -ERESTARTSYS; 238 ret = -ERESTARTSYS;
239 if (afs_begin_vnode_operation(&fc, vnode, key)) { 239 if (afs_begin_vnode_operation(&fc, vnode, key)) {
240 while (afs_select_fileserver(&fc)) { 240 while (afs_select_fileserver(&fc)) {
241 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 241 fc.cb_break = afs_calc_vnode_cb_break(vnode);
242 afs_fs_fetch_data(&fc, desc); 242 afs_fs_fetch_data(&fc, desc);
243 } 243 }
244 244
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 7a0e017070ec..dc62d15a964b 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -86,7 +86,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
86 ret = -ERESTARTSYS; 86 ret = -ERESTARTSYS;
87 if (afs_begin_vnode_operation(&fc, vnode, key)) { 87 if (afs_begin_vnode_operation(&fc, vnode, key)) {
88 while (afs_select_fileserver(&fc)) { 88 while (afs_select_fileserver(&fc)) {
89 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 89 fc.cb_break = afs_calc_vnode_cb_break(vnode);
90 afs_fs_set_lock(&fc, type); 90 afs_fs_set_lock(&fc, type);
91 } 91 }
92 92
@@ -117,7 +117,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
117 ret = -ERESTARTSYS; 117 ret = -ERESTARTSYS;
118 if (afs_begin_vnode_operation(&fc, vnode, key)) { 118 if (afs_begin_vnode_operation(&fc, vnode, key)) {
119 while (afs_select_current_fileserver(&fc)) { 119 while (afs_select_current_fileserver(&fc)) {
120 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 120 fc.cb_break = afs_calc_vnode_cb_break(vnode);
121 afs_fs_extend_lock(&fc); 121 afs_fs_extend_lock(&fc);
122 } 122 }
123 123
@@ -148,7 +148,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
148 ret = -ERESTARTSYS; 148 ret = -ERESTARTSYS;
149 if (afs_begin_vnode_operation(&fc, vnode, key)) { 149 if (afs_begin_vnode_operation(&fc, vnode, key)) {
150 while (afs_select_current_fileserver(&fc)) { 150 while (afs_select_current_fileserver(&fc)) {
151 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 151 fc.cb_break = afs_calc_vnode_cb_break(vnode);
152 afs_fs_release_lock(&fc); 152 afs_fs_release_lock(&fc);
153 } 153 }
154 154
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index efacdb7c1dee..b273e1d60478 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -134,6 +134,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
134 struct afs_read *read_req) 134 struct afs_read *read_req)
135{ 135{
136 const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; 136 const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
137 bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
137 u64 data_version, size; 138 u64 data_version, size;
138 u32 type, abort_code; 139 u32 type, abort_code;
139 u8 flags = 0; 140 u8 flags = 0;
@@ -142,13 +143,32 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
142 if (vnode) 143 if (vnode)
143 write_seqlock(&vnode->cb_lock); 144 write_seqlock(&vnode->cb_lock);
144 145
146 abort_code = ntohl(xdr->abort_code);
147
145 if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) { 148 if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) {
149 if (xdr->if_version == htonl(0) &&
150 abort_code != 0 &&
151 inline_error) {
152 /* The OpenAFS fileserver has a bug in FS.InlineBulkStatus
153 * whereby it doesn't set the interface version in the error
154 * case.
155 */
156 status->abort_code = abort_code;
157 ret = 0;
158 goto out;
159 }
160
146 pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); 161 pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
147 goto bad; 162 goto bad;
148 } 163 }
149 164
165 if (abort_code != 0 && inline_error) {
166 status->abort_code = abort_code;
167 ret = 0;
168 goto out;
169 }
170
150 type = ntohl(xdr->type); 171 type = ntohl(xdr->type);
151 abort_code = ntohl(xdr->abort_code);
152 switch (type) { 172 switch (type) {
153 case AFS_FTYPE_FILE: 173 case AFS_FTYPE_FILE:
154 case AFS_FTYPE_DIR: 174 case AFS_FTYPE_DIR:
@@ -165,13 +185,6 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
165 } 185 }
166 status->type = type; 186 status->type = type;
167 break; 187 break;
168 case AFS_FTYPE_INVALID:
169 if (abort_code != 0) {
170 status->abort_code = abort_code;
171 ret = 0;
172 goto out;
173 }
174 /* Fall through */
175 default: 188 default:
176 goto bad; 189 goto bad;
177 } 190 }
@@ -248,7 +261,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
248 261
249 write_seqlock(&vnode->cb_lock); 262 write_seqlock(&vnode->cb_lock);
250 263
251 if (call->cb_break == (vnode->cb_break + cbi->server->cb_s_break)) { 264 if (call->cb_break == afs_cb_break_sum(vnode, cbi)) {
252 vnode->cb_version = ntohl(*bp++); 265 vnode->cb_version = ntohl(*bp++);
253 cb_expiry = ntohl(*bp++); 266 cb_expiry = ntohl(*bp++);
254 vnode->cb_type = ntohl(*bp++); 267 vnode->cb_type = ntohl(*bp++);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 06194cfe9724..479b7fdda124 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -108,7 +108,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
108 ret = -ERESTARTSYS; 108 ret = -ERESTARTSYS;
109 if (afs_begin_vnode_operation(&fc, vnode, key)) { 109 if (afs_begin_vnode_operation(&fc, vnode, key)) {
110 while (afs_select_fileserver(&fc)) { 110 while (afs_select_fileserver(&fc)) {
111 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 111 fc.cb_break = afs_calc_vnode_cb_break(vnode);
112 afs_fs_fetch_file_status(&fc, NULL, new_inode); 112 afs_fs_fetch_file_status(&fc, NULL, new_inode);
113 } 113 }
114 114
@@ -393,15 +393,18 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
393 read_seqlock_excl(&vnode->cb_lock); 393 read_seqlock_excl(&vnode->cb_lock);
394 394
395 if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { 395 if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
396 if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) { 396 if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break ||
397 vnode->cb_v_break != vnode->volume->cb_v_break) {
397 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; 398 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
399 vnode->cb_v_break = vnode->volume->cb_v_break;
400 valid = false;
398 } else if (vnode->status.type == AFS_FTYPE_DIR && 401 } else if (vnode->status.type == AFS_FTYPE_DIR &&
399 test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) && 402 test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
400 vnode->cb_expires_at - 10 > now) { 403 vnode->cb_expires_at - 10 > now) {
401 valid = true; 404 valid = true;
402 } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && 405 } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
403 vnode->cb_expires_at - 10 > now) { 406 vnode->cb_expires_at - 10 > now) {
404 valid = true; 407 valid = true;
405 } 408 }
406 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 409 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
407 valid = true; 410 valid = true;
@@ -415,7 +418,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
415 if (valid) 418 if (valid)
416 goto valid; 419 goto valid;
417 420
418 mutex_lock(&vnode->validate_lock); 421 down_write(&vnode->validate_lock);
419 422
420 /* if the promise has expired, we need to check the server again to get 423 /* if the promise has expired, we need to check the server again to get
421 * a new promise - note that if the (parent) directory's metadata was 424 * a new promise - note that if the (parent) directory's metadata was
@@ -444,13 +447,13 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
444 * different */ 447 * different */
445 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) 448 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
446 afs_zap_data(vnode); 449 afs_zap_data(vnode);
447 mutex_unlock(&vnode->validate_lock); 450 up_write(&vnode->validate_lock);
448valid: 451valid:
449 _leave(" = 0"); 452 _leave(" = 0");
450 return 0; 453 return 0;
451 454
452error_unlock: 455error_unlock:
453 mutex_unlock(&vnode->validate_lock); 456 up_write(&vnode->validate_lock);
454 _leave(" = %d", ret); 457 _leave(" = %d", ret);
455 return ret; 458 return ret;
456} 459}
@@ -574,7 +577,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
574 ret = -ERESTARTSYS; 577 ret = -ERESTARTSYS;
575 if (afs_begin_vnode_operation(&fc, vnode, key)) { 578 if (afs_begin_vnode_operation(&fc, vnode, key)) {
576 while (afs_select_fileserver(&fc)) { 579 while (afs_select_fileserver(&fc)) {
577 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 580 fc.cb_break = afs_calc_vnode_cb_break(vnode);
578 afs_fs_setattr(&fc, attr); 581 afs_fs_setattr(&fc, attr);
579 } 582 }
580 583
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index f8086ec95e24..e3f8a46663db 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -396,6 +396,7 @@ struct afs_server {
396#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */ 396#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */
397#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */ 397#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */
398#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */ 398#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */
399#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
399 atomic_t usage; 400 atomic_t usage;
400 u32 addr_version; /* Address list version */ 401 u32 addr_version; /* Address list version */
401 402
@@ -433,6 +434,7 @@ struct afs_server_list {
433 unsigned short index; /* Server currently in use */ 434 unsigned short index; /* Server currently in use */
434 unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */ 435 unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */
435 unsigned int seq; /* Set to ->servers_seq when installed */ 436 unsigned int seq; /* Set to ->servers_seq when installed */
437 rwlock_t lock;
436 struct afs_server_entry servers[]; 438 struct afs_server_entry servers[];
437}; 439};
438 440
@@ -459,6 +461,9 @@ struct afs_volume {
459 rwlock_t servers_lock; /* Lock for ->servers */ 461 rwlock_t servers_lock; /* Lock for ->servers */
460 unsigned int servers_seq; /* Incremented each time ->servers changes */ 462 unsigned int servers_seq; /* Incremented each time ->servers changes */
461 463
464 unsigned cb_v_break; /* Break-everything counter. */
465 rwlock_t cb_break_lock;
466
462 afs_voltype_t type; /* type of volume */ 467 afs_voltype_t type; /* type of volume */
463 short error; 468 short error;
464 char type_force; /* force volume type (suppress R/O -> R/W) */ 469 char type_force; /* force volume type (suppress R/O -> R/W) */
@@ -494,7 +499,7 @@ struct afs_vnode {
494#endif 499#endif
495 struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */ 500 struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
496 struct mutex io_lock; /* Lock for serialising I/O on this mutex */ 501 struct mutex io_lock; /* Lock for serialising I/O on this mutex */
497 struct mutex validate_lock; /* lock for validating this vnode */ 502 struct rw_semaphore validate_lock; /* lock for validating this vnode */
498 spinlock_t wb_lock; /* lock for wb_keys */ 503 spinlock_t wb_lock; /* lock for wb_keys */
499 spinlock_t lock; /* waitqueue/flags lock */ 504 spinlock_t lock; /* waitqueue/flags lock */
500 unsigned long flags; 505 unsigned long flags;
@@ -519,6 +524,7 @@ struct afs_vnode {
519 /* outstanding callback notification on this file */ 524 /* outstanding callback notification on this file */
520 struct afs_cb_interest *cb_interest; /* Server on which this resides */ 525 struct afs_cb_interest *cb_interest; /* Server on which this resides */
521 unsigned int cb_s_break; /* Mass break counter on ->server */ 526 unsigned int cb_s_break; /* Mass break counter on ->server */
527 unsigned int cb_v_break; /* Mass break counter on ->volume */
522 unsigned int cb_break; /* Break counter on vnode */ 528 unsigned int cb_break; /* Break counter on vnode */
523 seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */ 529 seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */
524 530
@@ -648,16 +654,29 @@ extern void afs_init_callback_state(struct afs_server *);
648extern void afs_break_callback(struct afs_vnode *); 654extern void afs_break_callback(struct afs_vnode *);
649extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*); 655extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
650 656
651extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_entry *); 657extern int afs_register_server_cb_interest(struct afs_vnode *,
658 struct afs_server_list *, unsigned int);
652extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *); 659extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
653extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *); 660extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *);
654 661
655static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi) 662static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi)
656{ 663{
657 refcount_inc(&cbi->usage); 664 if (cbi)
665 refcount_inc(&cbi->usage);
658 return cbi; 666 return cbi;
659} 667}
660 668
669static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
670{
671 return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
672}
673
674static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode,
675 struct afs_cb_interest *cbi)
676{
677 return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break;
678}
679
661/* 680/*
662 * cell.c 681 * cell.c
663 */ 682 */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index ac0feac9d746..e065bc0768e6 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -179,7 +179,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
179 */ 179 */
180 if (fc->flags & AFS_FS_CURSOR_VNOVOL) { 180 if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
181 fc->ac.error = -EREMOTEIO; 181 fc->ac.error = -EREMOTEIO;
182 goto failed; 182 goto next_server;
183 } 183 }
184 184
185 write_lock(&vnode->volume->servers_lock); 185 write_lock(&vnode->volume->servers_lock);
@@ -201,7 +201,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
201 */ 201 */
202 if (vnode->volume->servers == fc->server_list) { 202 if (vnode->volume->servers == fc->server_list) {
203 fc->ac.error = -EREMOTEIO; 203 fc->ac.error = -EREMOTEIO;
204 goto failed; 204 goto next_server;
205 } 205 }
206 206
207 /* Try again */ 207 /* Try again */
@@ -350,8 +350,8 @@ use_server:
350 * break request before we've finished decoding the reply and 350 * break request before we've finished decoding the reply and
351 * installing the vnode. 351 * installing the vnode.
352 */ 352 */
353 fc->ac.error = afs_register_server_cb_interest( 353 fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list,
354 vnode, &fc->server_list->servers[fc->index]); 354 fc->index);
355 if (fc->ac.error < 0) 355 if (fc->ac.error < 0)
356 goto failed; 356 goto failed;
357 357
@@ -369,8 +369,16 @@ use_server:
369 if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) { 369 if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
370 fc->ac.alist = afs_get_addrlist(alist); 370 fc->ac.alist = afs_get_addrlist(alist);
371 371
372 if (!afs_probe_fileserver(fc)) 372 if (!afs_probe_fileserver(fc)) {
373 goto failed; 373 switch (fc->ac.error) {
374 case -ENOMEM:
375 case -ERESTARTSYS:
376 case -EINTR:
377 goto failed;
378 default:
379 goto next_server;
380 }
381 }
374 } 382 }
375 383
376 if (!fc->ac.alist) 384 if (!fc->ac.alist)
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 5c6263972ec9..08735948f15d 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -41,6 +41,7 @@ int afs_open_socket(struct afs_net *net)
41{ 41{
42 struct sockaddr_rxrpc srx; 42 struct sockaddr_rxrpc srx;
43 struct socket *socket; 43 struct socket *socket;
44 unsigned int min_level;
44 int ret; 45 int ret;
45 46
46 _enter(""); 47 _enter("");
@@ -60,6 +61,12 @@ int afs_open_socket(struct afs_net *net)
60 srx.transport.sin6.sin6_family = AF_INET6; 61 srx.transport.sin6.sin6_family = AF_INET6;
61 srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); 62 srx.transport.sin6.sin6_port = htons(AFS_CM_PORT);
62 63
64 min_level = RXRPC_SECURITY_ENCRYPT;
65 ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
66 (void *)&min_level, sizeof(min_level));
67 if (ret < 0)
68 goto error_2;
69
63 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); 70 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
64 if (ret == -EADDRINUSE) { 71 if (ret == -EADDRINUSE) {
65 srx.transport.sin6.sin6_port = 0; 72 srx.transport.sin6.sin6_port = 0;
@@ -482,8 +489,12 @@ static void afs_deliver_to_call(struct afs_call *call)
482 state = READ_ONCE(call->state); 489 state = READ_ONCE(call->state);
483 switch (ret) { 490 switch (ret) {
484 case 0: 491 case 0:
485 if (state == AFS_CALL_CL_PROC_REPLY) 492 if (state == AFS_CALL_CL_PROC_REPLY) {
493 if (call->cbi)
494 set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
495 &call->cbi->server->flags);
486 goto call_complete; 496 goto call_complete;
497 }
487 ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); 498 ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY);
488 goto done; 499 goto done;
489 case -EINPROGRESS: 500 case -EINPROGRESS:
@@ -493,11 +504,6 @@ static void afs_deliver_to_call(struct afs_call *call)
493 case -ECONNABORTED: 504 case -ECONNABORTED:
494 ASSERTCMP(state, ==, AFS_CALL_COMPLETE); 505 ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
495 goto done; 506 goto done;
496 case -ENOTCONN:
497 abort_code = RX_CALL_DEAD;
498 rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
499 abort_code, ret, "KNC");
500 goto local_abort;
501 case -ENOTSUPP: 507 case -ENOTSUPP:
502 abort_code = RXGEN_OPCODE; 508 abort_code = RXGEN_OPCODE;
503 rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 509 rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
diff --git a/fs/afs/security.c b/fs/afs/security.c
index cea2fff313dc..1992b0ffa543 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -147,8 +147,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
147 break; 147 break;
148 } 148 }
149 149
150 if (cb_break != (vnode->cb_break + 150 if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) {
151 vnode->cb_interest->server->cb_s_break)) {
152 changed = true; 151 changed = true;
153 break; 152 break;
154 } 153 }
@@ -178,7 +177,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
178 } 177 }
179 } 178 }
180 179
181 if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break)) 180 if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest))
182 goto someone_else_changed_it; 181 goto someone_else_changed_it;
183 182
184 /* We need a ref on any permits list we want to copy as we'll have to 183 /* We need a ref on any permits list we want to copy as we'll have to
@@ -257,7 +256,7 @@ found:
257 256
258 spin_lock(&vnode->lock); 257 spin_lock(&vnode->lock);
259 zap = rcu_access_pointer(vnode->permit_cache); 258 zap = rcu_access_pointer(vnode->permit_cache);
260 if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) && 259 if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) &&
261 zap == permits) 260 zap == permits)
262 rcu_assign_pointer(vnode->permit_cache, replacement); 261 rcu_assign_pointer(vnode->permit_cache, replacement);
263 else 262 else
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 629c74986cff..3af4625e2f8c 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -67,12 +67,6 @@ struct afs_server *afs_find_server(struct afs_net *net,
67 sizeof(struct in6_addr)); 67 sizeof(struct in6_addr));
68 if (diff == 0) 68 if (diff == 0)
69 goto found; 69 goto found;
70 if (diff < 0) {
71 // TODO: Sort the list
72 //if (i == alist->nr_ipv4)
73 // goto not_found;
74 break;
75 }
76 } 70 }
77 } 71 }
78 } else { 72 } else {
@@ -87,17 +81,10 @@ struct afs_server *afs_find_server(struct afs_net *net,
87 (u32 __force)b->sin6_addr.s6_addr32[3]); 81 (u32 __force)b->sin6_addr.s6_addr32[3]);
88 if (diff == 0) 82 if (diff == 0)
89 goto found; 83 goto found;
90 if (diff < 0) {
91 // TODO: Sort the list
92 //if (i == 0)
93 // goto not_found;
94 break;
95 }
96 } 84 }
97 } 85 }
98 } 86 }
99 87
100 //not_found:
101 server = NULL; 88 server = NULL;
102 found: 89 found:
103 if (server && !atomic_inc_not_zero(&server->usage)) 90 if (server && !atomic_inc_not_zero(&server->usage))
@@ -395,14 +382,16 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
395 struct afs_addr_list *alist = rcu_access_pointer(server->addresses); 382 struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
396 struct afs_addr_cursor ac = { 383 struct afs_addr_cursor ac = {
397 .alist = alist, 384 .alist = alist,
398 .addr = &alist->addrs[0],
399 .start = alist->index, 385 .start = alist->index,
400 .index = alist->index, 386 .index = 0,
387 .addr = &alist->addrs[alist->index],
401 .error = 0, 388 .error = 0,
402 }; 389 };
403 _enter("%p", server); 390 _enter("%p", server);
404 391
405 afs_fs_give_up_all_callbacks(net, server, &ac, NULL); 392 if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
393 afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
394
406 call_rcu(&server->rcu, afs_server_rcu); 395 call_rcu(&server->rcu, afs_server_rcu);
407 afs_dec_servers_outstanding(net); 396 afs_dec_servers_outstanding(net);
408} 397}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 0f8dc4c8f07c..8a5760aa5832 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -49,6 +49,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
49 goto error; 49 goto error;
50 50
51 refcount_set(&slist->usage, 1); 51 refcount_set(&slist->usage, 1);
52 rwlock_init(&slist->lock);
52 53
53 /* Make sure a records exists for each server in the list. */ 54 /* Make sure a records exists for each server in the list. */
54 for (i = 0; i < vldb->nr_servers; i++) { 55 for (i = 0; i < vldb->nr_servers; i++) {
@@ -64,9 +65,11 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
64 goto error_2; 65 goto error_2;
65 } 66 }
66 67
67 /* Insertion-sort by server pointer */ 68 /* Insertion-sort by UUID */
68 for (j = 0; j < slist->nr_servers; j++) 69 for (j = 0; j < slist->nr_servers; j++)
69 if (slist->servers[j].server >= server) 70 if (memcmp(&slist->servers[j].server->uuid,
71 &server->uuid,
72 sizeof(server->uuid)) >= 0)
70 break; 73 break;
71 if (j < slist->nr_servers) { 74 if (j < slist->nr_servers) {
72 if (slist->servers[j].server == server) { 75 if (slist->servers[j].server == server) {
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 65081ec3c36e..9e5d7966621c 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -590,7 +590,7 @@ static void afs_i_init_once(void *_vnode)
590 memset(vnode, 0, sizeof(*vnode)); 590 memset(vnode, 0, sizeof(*vnode));
591 inode_init_once(&vnode->vfs_inode); 591 inode_init_once(&vnode->vfs_inode);
592 mutex_init(&vnode->io_lock); 592 mutex_init(&vnode->io_lock);
593 mutex_init(&vnode->validate_lock); 593 init_rwsem(&vnode->validate_lock);
594 spin_lock_init(&vnode->wb_lock); 594 spin_lock_init(&vnode->wb_lock);
595 spin_lock_init(&vnode->lock); 595 spin_lock_init(&vnode->lock);
596 INIT_LIST_HEAD(&vnode->wb_keys); 596 INIT_LIST_HEAD(&vnode->wb_keys);
@@ -688,7 +688,7 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
688 if (afs_begin_vnode_operation(&fc, vnode, key)) { 688 if (afs_begin_vnode_operation(&fc, vnode, key)) {
689 fc.flags |= AFS_FS_CURSOR_NO_VSLEEP; 689 fc.flags |= AFS_FS_CURSOR_NO_VSLEEP;
690 while (afs_select_fileserver(&fc)) { 690 while (afs_select_fileserver(&fc)) {
691 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 691 fc.cb_break = afs_calc_vnode_cb_break(vnode);
692 afs_fs_get_volume_status(&fc, &vs); 692 afs_fs_get_volume_status(&fc, &vs);
693 } 693 }
694 694
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c164698dc304..8b39e6ebb40b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -351,7 +351,7 @@ found_key:
351 ret = -ERESTARTSYS; 351 ret = -ERESTARTSYS;
352 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) { 352 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
353 while (afs_select_fileserver(&fc)) { 353 while (afs_select_fileserver(&fc)) {
354 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 354 fc.cb_break = afs_calc_vnode_cb_break(vnode);
355 afs_fs_store_data(&fc, mapping, first, last, offset, to); 355 afs_fs_store_data(&fc, mapping, first, last, offset, to);
356 } 356 }
357 357
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1b2ede6abcdf..1a76d751cf3c 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -261,7 +261,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
261 * Inherently racy -- command line shares address space 261 * Inherently racy -- command line shares address space
262 * with code and data. 262 * with code and data.
263 */ 263 */
264 rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); 264 rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
265 if (rv <= 0) 265 if (rv <= 0)
266 goto out_free_page; 266 goto out_free_page;
267 267
@@ -279,7 +279,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
279 int nr_read; 279 int nr_read;
280 280
281 _count = min3(count, len, PAGE_SIZE); 281 _count = min3(count, len, PAGE_SIZE);
282 nr_read = access_remote_vm(mm, p, page, _count, 0); 282 nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
283 if (nr_read < 0) 283 if (nr_read < 0)
284 rv = nr_read; 284 rv = nr_read;
285 if (nr_read <= 0) 285 if (nr_read <= 0)
@@ -325,7 +325,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
325 bool final; 325 bool final;
326 326
327 _count = min3(count, len, PAGE_SIZE); 327 _count = min3(count, len, PAGE_SIZE);
328 nr_read = access_remote_vm(mm, p, page, _count, 0); 328 nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
329 if (nr_read < 0) 329 if (nr_read < 0)
330 rv = nr_read; 330 rv = nr_read;
331 if (nr_read <= 0) 331 if (nr_read <= 0)
@@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
946 max_len = min_t(size_t, PAGE_SIZE, count); 946 max_len = min_t(size_t, PAGE_SIZE, count);
947 this_len = min(max_len, this_len); 947 this_len = min(max_len, this_len);
948 948
949 retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); 949 retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
950 950
951 if (retval <= 0) { 951 if (retval <= 0) {
952 ret = retval; 952 ret = retval;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6930c63126c7..6d6e79c59e68 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1045 1045
1046#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 1046#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1047 1047
1048#ifdef CONFIG_S390 1048#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1049#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
1050#elif defined(CONFIG_ARM64)
1051#define KVM_MAX_IRQ_ROUTES 4096
1052#else
1053#define KVM_MAX_IRQ_ROUTES 1024
1054#endif
1055 1049
1056bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 1050bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1057int kvm_set_irq_routing(struct kvm *kvm, 1051int kvm_set_irq_routing(struct kvm *kvm,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1ac1f06a4be6..c080af584ddd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2493,6 +2493,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2493#define FOLL_MLOCK 0x1000 /* lock present pages */ 2493#define FOLL_MLOCK 0x1000 /* lock present pages */
2494#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2494#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2495#define FOLL_COW 0x4000 /* internal GUP flag */ 2495#define FOLL_COW 0x4000 /* internal GUP flag */
2496#define FOLL_ANON 0x8000 /* don't do file mappings */
2496 2497
2497static inline int vm_fault_to_errno(int vm_fault, int foll_flags) 2498static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2498{ 2499{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index b5b43f94f311..01b990e4b228 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
312({ \ 312({ \
313 int i, ret = 1; \ 313 int i, ret = 1; \
314 for (i = 0; i < map_words(map); i++) { \ 314 for (i = 0; i < map_words(map); i++) { \
315 if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ 315 if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
316 ret = 0; \ 316 ret = 0; \
317 break; \ 317 break; \
318 } \ 318 } \
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5dad59b31244..17c919436f48 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -867,12 +867,18 @@ struct nand_op_instr {
867 * tBERS (during an erase) which all of them are u64 values that cannot be 867 * tBERS (during an erase) which all of them are u64 values that cannot be
868 * divided by usual kernel macros and must be handled with the special 868 * divided by usual kernel macros and must be handled with the special
869 * DIV_ROUND_UP_ULL() macro. 869 * DIV_ROUND_UP_ULL() macro.
870 *
871 * Cast to type of dividend is needed here to guarantee that the result won't
872 * be an unsigned long long when the dividend is an unsigned long (or smaller),
873 * which is what the compiler does when it sees ternary operator with 2
874 * different return types (picks the largest type to make sure there's no
875 * loss).
870 */ 876 */
871#define __DIVIDE(dividend, divisor) ({ \ 877#define __DIVIDE(dividend, divisor) ({ \
872 sizeof(dividend) == sizeof(u32) ? \ 878 (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
873 DIV_ROUND_UP(dividend, divisor) : \ 879 DIV_ROUND_UP(dividend, divisor) : \
874 DIV_ROUND_UP_ULL(dividend, divisor); \ 880 DIV_ROUND_UP_ULL(dividend, divisor)); \
875 }) 881 })
876#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) 882#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
877#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) 883#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
878 884
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index f0820554caa9..d0a341bc4540 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -575,6 +575,48 @@ TRACE_EVENT(afs_protocol_error,
575 __entry->call, __entry->error, __entry->where) 575 __entry->call, __entry->error, __entry->where)
576 ); 576 );
577 577
578TRACE_EVENT(afs_cm_no_server,
579 TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
580
581 TP_ARGS(call, srx),
582
583 TP_STRUCT__entry(
584 __field(unsigned int, call )
585 __field(unsigned int, op_id )
586 __field_struct(struct sockaddr_rxrpc, srx )
587 ),
588
589 TP_fast_assign(
590 __entry->call = call->debug_id;
591 __entry->op_id = call->operation_ID;
592 memcpy(&__entry->srx, srx, sizeof(__entry->srx));
593 ),
594
595 TP_printk("c=%08x op=%u %pISpc",
596 __entry->call, __entry->op_id, &__entry->srx.transport)
597 );
598
599TRACE_EVENT(afs_cm_no_server_u,
600 TP_PROTO(struct afs_call *call, const uuid_t *uuid),
601
602 TP_ARGS(call, uuid),
603
604 TP_STRUCT__entry(
605 __field(unsigned int, call )
606 __field(unsigned int, op_id )
607 __field_struct(uuid_t, uuid )
608 ),
609
610 TP_fast_assign(
611 __entry->call = call->debug_id;
612 __entry->op_id = call->operation_ID;
613 memcpy(&__entry->uuid, uuid, sizeof(__entry->uuid));
614 ),
615
616 TP_printk("c=%08x op=%u %pU",
617 __entry->call, __entry->op_id, &__entry->uuid)
618 );
619
578#endif /* _TRACE_AFS_H */ 620#endif /* _TRACE_AFS_H */
579 621
580/* This part must be outside protection */ 622/* This part must be outside protection */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 7dd8f34c37df..fdcf88bcf0ea 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
352DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); 352DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
353DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); 353DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
354 354
355TRACE_EVENT(xen_mmu_flush_tlb_all,
356 TP_PROTO(int x),
357 TP_ARGS(x),
358 TP_STRUCT__entry(__array(char, x, 0)),
359 TP_fast_assign((void)x),
360 TP_printk("%s", "")
361 );
362
363TRACE_EVENT(xen_mmu_flush_tlb,
364 TP_PROTO(int x),
365 TP_ARGS(x),
366 TP_STRUCT__entry(__array(char, x, 0)),
367 TP_fast_assign((void)x),
368 TP_printk("%s", "")
369 );
370
371TRACE_EVENT(xen_mmu_flush_tlb_one_user, 355TRACE_EVENT(xen_mmu_flush_tlb_one_user,
372 TP_PROTO(unsigned long addr), 356 TP_PROTO(unsigned long addr),
373 TP_ARGS(addr), 357 TP_ARGS(addr),
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 30c0cb8cc9bc..23920c5ff728 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
1669 return number(buf, end, (unsigned long int)ptr, spec); 1669 return number(buf, end, (unsigned long int)ptr, spec);
1670} 1670}
1671 1671
1672static bool have_filled_random_ptr_key __read_mostly; 1672static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
1673static siphash_key_t ptr_key __read_mostly; 1673static siphash_key_t ptr_key __read_mostly;
1674 1674
1675static void fill_random_ptr_key(struct random_ready_callback *unused) 1675static void enable_ptr_key_workfn(struct work_struct *work)
1676{ 1676{
1677 get_random_bytes(&ptr_key, sizeof(ptr_key)); 1677 get_random_bytes(&ptr_key, sizeof(ptr_key));
1678 /* 1678 /* Needs to run from preemptible context */
1679 * have_filled_random_ptr_key==true is dependent on get_random_bytes(). 1679 static_branch_disable(&not_filled_random_ptr_key);
1680 * ptr_to_id() needs to see have_filled_random_ptr_key==true 1680}
1681 * after get_random_bytes() returns. 1681
1682 */ 1682static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
1683 smp_mb(); 1683
1684 WRITE_ONCE(have_filled_random_ptr_key, true); 1684static void fill_random_ptr_key(struct random_ready_callback *unused)
1685{
1686 /* This may be in an interrupt handler. */
1687 queue_work(system_unbound_wq, &enable_ptr_key_work);
1685} 1688}
1686 1689
1687static struct random_ready_callback random_ready = { 1690static struct random_ready_callback random_ready = {
@@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void)
1695 if (!ret) { 1698 if (!ret) {
1696 return 0; 1699 return 0;
1697 } else if (ret == -EALREADY) { 1700 } else if (ret == -EALREADY) {
1698 fill_random_ptr_key(&random_ready); 1701 /* This is in preemptible context */
1702 enable_ptr_key_workfn(&enable_ptr_key_work);
1699 return 0; 1703 return 0;
1700 } 1704 }
1701 1705
@@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
1709 unsigned long hashval; 1713 unsigned long hashval;
1710 const int default_width = 2 * sizeof(ptr); 1714 const int default_width = 2 * sizeof(ptr);
1711 1715
1712 if (unlikely(!have_filled_random_ptr_key)) { 1716 if (static_branch_unlikely(&not_filled_random_ptr_key)) {
1713 spec.field_width = default_width; 1717 spec.field_width = default_width;
1714 /* string length must be less than default_width */ 1718 /* string length must be less than default_width */
1715 return string(buf, end, "(ptrval)", spec); 1719 return string(buf, end, "(ptrval)", spec);
diff --git a/mm/gup.c b/mm/gup.c
index 76af4cfeaf68..541904a7c60f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
544 if (vm_flags & (VM_IO | VM_PFNMAP)) 544 if (vm_flags & (VM_IO | VM_PFNMAP))
545 return -EFAULT; 545 return -EFAULT;
546 546
547 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
548 return -EFAULT;
549
547 if (write) { 550 if (write) {
548 if (!(vm_flags & VM_WRITE)) { 551 if (!(vm_flags & VM_WRITE)) {
549 if (!(gup_flags & FOLL_FORCE)) 552 if (!(gup_flags & FOLL_FORCE))
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4cafe6a19167..be5817df0a9d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4576,6 +4576,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
4576static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) 4576static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
4577{ 4577{
4578 struct sock *sk = sock->sk; 4578 struct sock *sk = sock->sk;
4579 struct sk_security_struct *sksec = sk->sk_security;
4579 u16 family; 4580 u16 family;
4580 int err; 4581 int err;
4581 4582
@@ -4587,11 +4588,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4587 family = sk->sk_family; 4588 family = sk->sk_family;
4588 if (family == PF_INET || family == PF_INET6) { 4589 if (family == PF_INET || family == PF_INET6) {
4589 char *addrp; 4590 char *addrp;
4590 struct sk_security_struct *sksec = sk->sk_security;
4591 struct common_audit_data ad; 4591 struct common_audit_data ad;
4592 struct lsm_network_audit net = {0,}; 4592 struct lsm_network_audit net = {0,};
4593 struct sockaddr_in *addr4 = NULL; 4593 struct sockaddr_in *addr4 = NULL;
4594 struct sockaddr_in6 *addr6 = NULL; 4594 struct sockaddr_in6 *addr6 = NULL;
4595 u16 family_sa = address->sa_family;
4595 unsigned short snum; 4596 unsigned short snum;
4596 u32 sid, node_perm; 4597 u32 sid, node_perm;
4597 4598
@@ -4601,11 +4602,20 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4601 * need to check address->sa_family as it is possible to have 4602 * need to check address->sa_family as it is possible to have
4602 * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET. 4603 * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET.
4603 */ 4604 */
4604 switch (address->sa_family) { 4605 switch (family_sa) {
4606 case AF_UNSPEC:
4605 case AF_INET: 4607 case AF_INET:
4606 if (addrlen < sizeof(struct sockaddr_in)) 4608 if (addrlen < sizeof(struct sockaddr_in))
4607 return -EINVAL; 4609 return -EINVAL;
4608 addr4 = (struct sockaddr_in *)address; 4610 addr4 = (struct sockaddr_in *)address;
4611 if (family_sa == AF_UNSPEC) {
4612 /* see __inet_bind(), we only want to allow
4613 * AF_UNSPEC if the address is INADDR_ANY
4614 */
4615 if (addr4->sin_addr.s_addr != htonl(INADDR_ANY))
4616 goto err_af;
4617 family_sa = AF_INET;
4618 }
4609 snum = ntohs(addr4->sin_port); 4619 snum = ntohs(addr4->sin_port);
4610 addrp = (char *)&addr4->sin_addr.s_addr; 4620 addrp = (char *)&addr4->sin_addr.s_addr;
4611 break; 4621 break;
@@ -4617,15 +4627,14 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4617 addrp = (char *)&addr6->sin6_addr.s6_addr; 4627 addrp = (char *)&addr6->sin6_addr.s6_addr;
4618 break; 4628 break;
4619 default: 4629 default:
4620 /* Note that SCTP services expect -EINVAL, whereas 4630 goto err_af;
4621 * others expect -EAFNOSUPPORT.
4622 */
4623 if (sksec->sclass == SECCLASS_SCTP_SOCKET)
4624 return -EINVAL;
4625 else
4626 return -EAFNOSUPPORT;
4627 } 4631 }
4628 4632
4633 ad.type = LSM_AUDIT_DATA_NET;
4634 ad.u.net = &net;
4635 ad.u.net->sport = htons(snum);
4636 ad.u.net->family = family_sa;
4637
4629 if (snum) { 4638 if (snum) {
4630 int low, high; 4639 int low, high;
4631 4640
@@ -4637,10 +4646,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4637 snum, &sid); 4646 snum, &sid);
4638 if (err) 4647 if (err)
4639 goto out; 4648 goto out;
4640 ad.type = LSM_AUDIT_DATA_NET;
4641 ad.u.net = &net;
4642 ad.u.net->sport = htons(snum);
4643 ad.u.net->family = family;
4644 err = avc_has_perm(&selinux_state, 4649 err = avc_has_perm(&selinux_state,
4645 sksec->sid, sid, 4650 sksec->sid, sid,
4646 sksec->sclass, 4651 sksec->sclass,
@@ -4672,16 +4677,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4672 break; 4677 break;
4673 } 4678 }
4674 4679
4675 err = sel_netnode_sid(addrp, family, &sid); 4680 err = sel_netnode_sid(addrp, family_sa, &sid);
4676 if (err) 4681 if (err)
4677 goto out; 4682 goto out;
4678 4683
4679 ad.type = LSM_AUDIT_DATA_NET; 4684 if (family_sa == AF_INET)
4680 ad.u.net = &net;
4681 ad.u.net->sport = htons(snum);
4682 ad.u.net->family = family;
4683
4684 if (address->sa_family == AF_INET)
4685 ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; 4685 ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
4686 else 4686 else
4687 ad.u.net->v6info.saddr = addr6->sin6_addr; 4687 ad.u.net->v6info.saddr = addr6->sin6_addr;
@@ -4694,6 +4694,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
4694 } 4694 }
4695out: 4695out:
4696 return err; 4696 return err;
4697err_af:
4698 /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */
4699 if (sksec->sclass == SECCLASS_SCTP_SOCKET)
4700 return -EINVAL;
4701 return -EAFNOSUPPORT;
4697} 4702}
4698 4703
4699/* This supports connect(2) and SCTP connect services such as sctp_connectx(3) 4704/* This supports connect(2) and SCTP connect services such as sctp_connectx(3)
@@ -4771,7 +4776,7 @@ static int selinux_socket_connect_helper(struct socket *sock,
4771 ad.type = LSM_AUDIT_DATA_NET; 4776 ad.type = LSM_AUDIT_DATA_NET;
4772 ad.u.net = &net; 4777 ad.u.net = &net;
4773 ad.u.net->dport = htons(snum); 4778 ad.u.net->dport = htons(snum);
4774 ad.u.net->family = sk->sk_family; 4779 ad.u.net->family = address->sa_family;
4775 err = avc_has_perm(&selinux_state, 4780 err = avc_has_perm(&selinux_state,
4776 sksec->sid, sid, sksec->sclass, perm, &ad); 4781 sksec->sid, sid, sksec->sclass, perm, &ad);
4777 if (err) 4782 if (err)
@@ -5272,6 +5277,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
5272 while (walk_size < addrlen) { 5277 while (walk_size < addrlen) {
5273 addr = addr_buf; 5278 addr = addr_buf;
5274 switch (addr->sa_family) { 5279 switch (addr->sa_family) {
5280 case AF_UNSPEC:
5275 case AF_INET: 5281 case AF_INET:
5276 len = sizeof(struct sockaddr_in); 5282 len = sizeof(struct sockaddr_in);
5277 break; 5283 break;
@@ -5279,7 +5285,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
5279 len = sizeof(struct sockaddr_in6); 5285 len = sizeof(struct sockaddr_in6);
5280 break; 5286 break;
5281 default: 5287 default:
5282 return -EAFNOSUPPORT; 5288 return -EINVAL;
5283 } 5289 }
5284 5290
5285 err = -EINVAL; 5291 err = -EINVAL;
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index a848836a5de0..507fd5210c1c 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
396 if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || 396 if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
397 copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) 397 copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
398 goto error; 398 goto error;
399 if (get_user(data->owner, &data32->owner) || 399 if (get_user(data->owner, &data32->owner))
400 get_user(data->type, &data32->type))
401 goto error; 400 goto error;
402 switch (data->type) { 401 switch (data->type) {
403 case SNDRV_CTL_ELEM_TYPE_BOOLEAN: 402 case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b0c8c79848a9..a0c93b9c9a28 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2210,6 +2210,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2210 SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), 2210 SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
2211 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2211 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2212 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), 2212 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
2213 /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
2214 SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
2213 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ 2215 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
2214 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), 2216 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
2215 {} 2217 {}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2dd34dd77447..01a6643fc7d4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2363 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2363 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2364 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2364 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2365 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), 2365 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
2366 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
2366 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2367 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2367 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2368 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
2368 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), 2369 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 344d7b069d59..bb5ab7a7dfa5 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -967,6 +967,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
967 } 967 }
968 break; 968 break;
969 969
970 case USB_ID(0x0d8c, 0x0103):
971 if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
972 usb_audio_info(chip,
973 "set volume quirk for CM102-A+/102S+\n");
974 cval->min = -256;
975 }
976 break;
977
970 case USB_ID(0x0471, 0x0101): 978 case USB_ID(0x0471, 0x0101):
971 case USB_ID(0x0471, 0x0104): 979 case USB_ID(0x0471, 0x0104):
972 case USB_ID(0x0471, 0x0105): 980 case USB_ID(0x0471, 0x0105):
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 956be9f7c72a..5ed334575fc7 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -576,7 +576,7 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
576 576
577 if (protocol == UAC_VERSION_1) { 577 if (protocol == UAC_VERSION_1) {
578 attributes = csep->bmAttributes; 578 attributes = csep->bmAttributes;
579 } else { 579 } else if (protocol == UAC_VERSION_2) {
580 struct uac2_iso_endpoint_descriptor *csep2 = 580 struct uac2_iso_endpoint_descriptor *csep2 =
581 (struct uac2_iso_endpoint_descriptor *) csep; 581 (struct uac2_iso_endpoint_descriptor *) csep;
582 582
@@ -585,6 +585,13 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
585 /* emulate the endpoint attributes of a v1 device */ 585 /* emulate the endpoint attributes of a v1 device */
586 if (csep2->bmControls & UAC2_CONTROL_PITCH) 586 if (csep2->bmControls & UAC2_CONTROL_PITCH)
587 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; 587 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
588 } else { /* UAC_VERSION_3 */
589 struct uac3_iso_endpoint_descriptor *csep3 =
590 (struct uac3_iso_endpoint_descriptor *) csep;
591
592 /* emulate the endpoint attributes of a v1 device */
593 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
594 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
588 } 595 }
589 596
590 return attributes; 597 return attributes;
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 2ddcc96ae456..d9d00319b07c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
15 15
16INSTALL_HDR_PATH = $(top_srcdir)/usr 16INSTALL_HDR_PATH = $(top_srcdir)/usr
17LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 17LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
18CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) 18CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
19 19
20# After inclusion, $(OUTPUT) is defined and 20# After inclusion, $(OUTPUT) is defined and
21# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 21# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 7ab98e41324f..ac53730b30aa 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -19,6 +19,7 @@
19#include <errno.h> 19#include <errno.h>
20#include <unistd.h> 20#include <unistd.h>
21#include <fcntl.h> 21#include <fcntl.h>
22#include "kselftest.h"
22 23
23ssize_t test_write(int fd, const void *buf, size_t count); 24ssize_t test_write(int fd, const void *buf, size_t count);
24ssize_t test_read(int fd, void *buf, size_t count); 25ssize_t test_read(int fd, void *buf, size_t count);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 2cedfda181d4..37e2a787d2fc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -50,8 +50,8 @@ int kvm_check_cap(long cap)
50 int kvm_fd; 50 int kvm_fd;
51 51
52 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 52 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
53 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", 53 if (kvm_fd < 0)
54 KVM_DEV_PATH, kvm_fd, errno); 54 exit(KSFT_SKIP);
55 55
56 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); 56 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
57 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" 57 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
@@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
91 91
92 vm->mode = mode; 92 vm->mode = mode;
93 kvm_fd = open(KVM_DEV_PATH, perm); 93 kvm_fd = open(KVM_DEV_PATH, perm);
94 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", 94 if (kvm_fd < 0)
95 KVM_DEV_PATH, kvm_fd, errno); 95 exit(KSFT_SKIP);
96 96
97 /* Create VM. */ 97 /* Create VM. */
98 vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); 98 vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL);
@@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
418 418
419 cpuid = allocate_kvm_cpuid2(); 419 cpuid = allocate_kvm_cpuid2();
420 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 420 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
421 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", 421 if (kvm_fd < 0)
422 KVM_DEV_PATH, kvm_fd, errno); 422 exit(KSFT_SKIP);
423 423
424 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); 424 ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
425 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", 425 TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
@@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void)
675 int dev_fd, ret; 675 int dev_fd, ret;
676 676
677 dev_fd = open(KVM_DEV_PATH, O_RDONLY); 677 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
678 TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i", 678 if (dev_fd < 0)
679 __func__, KVM_DEV_PATH, dev_fd, errno); 679 exit(KSFT_SKIP);
680 680
681 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); 681 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
682 TEST_ASSERT(ret >= sizeof(struct kvm_run), 682 TEST_ASSERT(ret >= sizeof(struct kvm_run),
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/sync_regs_test.c
index 428e9473f5e2..eae1ece3c31b 100644
--- a/tools/testing/selftests/kvm/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/sync_regs_test.c
@@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
85{ 85{
86} 86}
87 87
88#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
89#define INVALID_SYNC_FIELD 0x80000000
90
88int main(int argc, char *argv[]) 91int main(int argc, char *argv[])
89{ 92{
90 struct kvm_vm *vm; 93 struct kvm_vm *vm;
@@ -98,9 +101,14 @@ int main(int argc, char *argv[])
98 setbuf(stdout, NULL); 101 setbuf(stdout, NULL);
99 102
100 cap = kvm_check_cap(KVM_CAP_SYNC_REGS); 103 cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
101 TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS, 104 if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
102 "KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n", 105 fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n");
103 cap, KVM_SYNC_X86_VALID_FIELDS); 106 exit(KSFT_SKIP);
107 }
108 if ((cap & INVALID_SYNC_FIELD) != 0) {
109 fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n");
110 exit(KSFT_SKIP);
111 }
104 112
105 /* Create VM */ 113 /* Create VM */
106 vm = vm_create_default(VCPU_ID, guest_code); 114 vm = vm_create_default(VCPU_ID, guest_code);
@@ -108,7 +116,14 @@ int main(int argc, char *argv[])
108 run = vcpu_state(vm, VCPU_ID); 116 run = vcpu_state(vm, VCPU_ID);
109 117
110 /* Request reading invalid register set from VCPU. */ 118 /* Request reading invalid register set from VCPU. */
111 run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1; 119 run->kvm_valid_regs = INVALID_SYNC_FIELD;
120 rv = _vcpu_run(vm, VCPU_ID);
121 TEST_ASSERT(rv < 0 && errno == EINVAL,
122 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
123 rv);
124 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
125
126 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
112 rv = _vcpu_run(vm, VCPU_ID); 127 rv = _vcpu_run(vm, VCPU_ID);
113 TEST_ASSERT(rv < 0 && errno == EINVAL, 128 TEST_ASSERT(rv < 0 && errno == EINVAL,
114 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", 129 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
@@ -116,7 +131,14 @@ int main(int argc, char *argv[])
116 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; 131 vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
117 132
118 /* Request setting invalid register set into VCPU. */ 133 /* Request setting invalid register set into VCPU. */
119 run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1; 134 run->kvm_dirty_regs = INVALID_SYNC_FIELD;
135 rv = _vcpu_run(vm, VCPU_ID);
136 TEST_ASSERT(rv < 0 && errno == EINVAL,
137 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
138 rv);
139 vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
140
141 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
120 rv = _vcpu_run(vm, VCPU_ID); 142 rv = _vcpu_run(vm, VCPU_ID);
121 TEST_ASSERT(rv < 0 && errno == EINVAL, 143 TEST_ASSERT(rv < 0 && errno == EINVAL,
122 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", 144 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
@@ -125,7 +147,7 @@ int main(int argc, char *argv[])
125 147
126 /* Request and verify all valid register sets. */ 148 /* Request and verify all valid register sets. */
127 /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ 149 /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
128 run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; 150 run->kvm_valid_regs = TEST_SYNC_FIELDS;
129 rv = _vcpu_run(vm, VCPU_ID); 151 rv = _vcpu_run(vm, VCPU_ID);
130 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 152 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
131 "Unexpected exit reason: %u (%s),\n", 153 "Unexpected exit reason: %u (%s),\n",
@@ -146,7 +168,7 @@ int main(int argc, char *argv[])
146 run->s.regs.sregs.apic_base = 1 << 11; 168 run->s.regs.sregs.apic_base = 1 << 11;
147 /* TODO run->s.regs.events.XYZ = ABC; */ 169 /* TODO run->s.regs.events.XYZ = ABC; */
148 170
149 run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; 171 run->kvm_valid_regs = TEST_SYNC_FIELDS;
150 run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; 172 run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
151 rv = _vcpu_run(vm, VCPU_ID); 173 rv = _vcpu_run(vm, VCPU_ID);
152 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 174 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -172,7 +194,7 @@ int main(int argc, char *argv[])
172 /* Clear kvm_dirty_regs bits, verify new s.regs values are 194 /* Clear kvm_dirty_regs bits, verify new s.regs values are
173 * overwritten with existing guest values. 195 * overwritten with existing guest values.
174 */ 196 */
175 run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; 197 run->kvm_valid_regs = TEST_SYNC_FIELDS;
176 run->kvm_dirty_regs = 0; 198 run->kvm_dirty_regs = 0;
177 run->s.regs.regs.r11 = 0xDEADBEEF; 199 run->s.regs.regs.r11 = 0xDEADBEEF;
178 rv = _vcpu_run(vm, VCPU_ID); 200 rv = _vcpu_run(vm, VCPU_ID);
@@ -211,7 +233,7 @@ int main(int argc, char *argv[])
211 * with kvm_sync_regs values. 233 * with kvm_sync_regs values.
212 */ 234 */
213 run->kvm_valid_regs = 0; 235 run->kvm_valid_regs = 0;
214 run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS; 236 run->kvm_dirty_regs = TEST_SYNC_FIELDS;
215 run->s.regs.regs.r11 = 0xBBBB; 237 run->s.regs.regs.r11 = 0xBBBB;
216 rv = _vcpu_run(vm, VCPU_ID); 238 rv = _vcpu_run(vm, VCPU_ID);
217 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 239 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
index 8f7f62093add..aaa633263b2c 100644
--- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
@@ -189,8 +189,8 @@ int main(int argc, char *argv[])
189 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 189 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
190 190
191 if (!(entry->ecx & CPUID_VMX)) { 191 if (!(entry->ecx & CPUID_VMX)) {
192 printf("nested VMX not enabled, skipping test"); 192 fprintf(stderr, "nested VMX not enabled, skipping test\n");
193 return 0; 193 exit(KSFT_SKIP);
194 } 194 }
195 195
196 vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); 196 vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 10b38178cff2..4ffc0b5e6105 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
211 struct vgic_state_iter *iter = (struct vgic_state_iter *)v; 211 struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
212 struct vgic_irq *irq; 212 struct vgic_irq *irq;
213 struct kvm_vcpu *vcpu = NULL; 213 struct kvm_vcpu *vcpu = NULL;
214 unsigned long flags;
214 215
215 if (iter->dist_id == 0) { 216 if (iter->dist_id == 0) {
216 print_dist_state(s, &kvm->arch.vgic); 217 print_dist_state(s, &kvm->arch.vgic);
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
227 irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; 228 irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
228 } 229 }
229 230
230 spin_lock(&irq->irq_lock); 231 spin_lock_irqsave(&irq->irq_lock, flags);
231 print_irq_state(s, irq, vcpu); 232 print_irq_state(s, irq, vcpu);
232 spin_unlock(&irq->irq_lock); 233 spin_unlock_irqrestore(&irq->irq_lock, flags);
233 234
234 return 0; 235 return 0;
235} 236}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index a8f07243aa9f..4ed79c939fb4 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
52{ 52{
53 struct vgic_dist *dist = &kvm->arch.vgic; 53 struct vgic_dist *dist = &kvm->arch.vgic;
54 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; 54 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
55 unsigned long flags;
55 int ret; 56 int ret;
56 57
57 /* In this case there is no put, since we keep the reference. */ 58 /* In this case there is no put, since we keep the reference. */
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
71 irq->intid = intid; 72 irq->intid = intid;
72 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
73 74
74 spin_lock(&dist->lpi_list_lock); 75 spin_lock_irqsave(&dist->lpi_list_lock, flags);
75 76
76 /* 77 /*
77 * There could be a race with another vgic_add_lpi(), so we need to 78 * There could be a race with another vgic_add_lpi(), so we need to
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
99 dist->lpi_list_count++; 100 dist->lpi_list_count++;
100 101
101out_unlock: 102out_unlock:
102 spin_unlock(&dist->lpi_list_lock); 103 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
103 104
104 /* 105 /*
105 * We "cache" the configuration table entries in our struct vgic_irq's. 106 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
280 int ret; 281 int ret;
281 unsigned long flags; 282 unsigned long flags;
282 283
283 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, 284 ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
284 &prop, 1); 285 &prop, 1);
285 286
286 if (ret) 287 if (ret)
287 return ret; 288 return ret;
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
315{ 316{
316 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 317 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
317 struct vgic_irq *irq; 318 struct vgic_irq *irq;
319 unsigned long flags;
318 u32 *intids; 320 u32 *intids;
319 int irq_count, i = 0; 321 int irq_count, i = 0;
320 322
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
330 if (!intids) 332 if (!intids)
331 return -ENOMEM; 333 return -ENOMEM;
332 334
333 spin_lock(&dist->lpi_list_lock); 335 spin_lock_irqsave(&dist->lpi_list_lock, flags);
334 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
335 if (i == irq_count) 337 if (i == irq_count)
336 break; 338 break;
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
339 continue; 341 continue;
340 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
341 } 343 }
342 spin_unlock(&dist->lpi_list_lock); 344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
343 345
344 *intid_ptr = intids; 346 *intid_ptr = intids;
345 return i; 347 return i;
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
348static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) 350static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
349{ 351{
350 int ret = 0; 352 int ret = 0;
353 unsigned long flags;
351 354
352 spin_lock(&irq->irq_lock); 355 spin_lock_irqsave(&irq->irq_lock, flags);
353 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
354 spin_unlock(&irq->irq_lock); 357 spin_unlock_irqrestore(&irq->irq_lock, flags);
355 358
356 if (irq->hw) { 359 if (irq->hw) {
357 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
441 * this very same byte in the last iteration. Reuse that. 444 * this very same byte in the last iteration. Reuse that.
442 */ 445 */
443 if (byte_offset != last_byte_offset) { 446 if (byte_offset != last_byte_offset) {
444 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, 447 ret = kvm_read_guest_lock(vcpu->kvm,
445 &pendmask, 1); 448 pendbase + byte_offset,
449 &pendmask, 1);
446 if (ret) { 450 if (ret) {
447 kfree(intids); 451 kfree(intids);
448 return ret; 452 return ret;
@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
786 return false; 790 return false;
787 791
788 /* Each 1st level entry is represented by a 64-bit value. */ 792 /* Each 1st level entry is represented by a 64-bit value. */
789 if (kvm_read_guest(its->dev->kvm, 793 if (kvm_read_guest_lock(its->dev->kvm,
790 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), 794 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
791 &indirect_ptr, sizeof(indirect_ptr))) 795 &indirect_ptr, sizeof(indirect_ptr)))
792 return false; 796 return false;
@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1367 cbaser = CBASER_ADDRESS(its->cbaser); 1371 cbaser = CBASER_ADDRESS(its->cbaser);
1368 1372
1369 while (its->cwriter != its->creadr) { 1373 while (its->cwriter != its->creadr) {
1370 int ret = kvm_read_guest(kvm, cbaser + its->creadr, 1374 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1371 cmd_buf, ITS_CMD_SIZE); 1375 cmd_buf, ITS_CMD_SIZE);
1372 /* 1376 /*
1373 * If kvm_read_guest() fails, this could be due to the guest 1377 * If kvm_read_guest() fails, this could be due to the guest
1374 * programming a bogus value in CBASER or something else going 1378 * programming a bogus value in CBASER or something else going
@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
1893 int next_offset; 1897 int next_offset;
1894 size_t byte_offset; 1898 size_t byte_offset;
1895 1899
1896 ret = kvm_read_guest(kvm, gpa, entry, esz); 1900 ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
1897 if (ret) 1901 if (ret)
1898 return ret; 1902 return ret;
1899 1903
@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2263 int ret; 2267 int ret;
2264 2268
2265 BUG_ON(esz > sizeof(val)); 2269 BUG_ON(esz > sizeof(val));
2266 ret = kvm_read_guest(kvm, gpa, &val, esz); 2270 ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2267 if (ret) 2271 if (ret)
2268 return ret; 2272 return ret;
2269 val = le64_to_cpu(val); 2273 val = le64_to_cpu(val);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index c7423f3768e5..bdcf8e7a6161 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -344,7 +344,7 @@ retry:
344 bit_nr = irq->intid % BITS_PER_BYTE; 344 bit_nr = irq->intid % BITS_PER_BYTE;
345 ptr = pendbase + byte_offset; 345 ptr = pendbase + byte_offset;
346 346
347 ret = kvm_read_guest(kvm, ptr, &val, 1); 347 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
348 if (ret) 348 if (ret)
349 return ret; 349 return ret;
350 350
@@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
397 ptr = pendbase + byte_offset; 397 ptr = pendbase + byte_offset;
398 398
399 if (byte_offset != last_byte_offset) { 399 if (byte_offset != last_byte_offset) {
400 ret = kvm_read_guest(kvm, ptr, &val, 1); 400 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
401 if (ret) 401 if (ret)
402 return ret; 402 return ret;
403 last_byte_offset = byte_offset; 403 last_byte_offset = byte_offset;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 97bfba8d9a59..33c8325c8f35 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
43 * kvm->lock (mutex) 43 * kvm->lock (mutex)
44 * its->cmd_lock (mutex) 44 * its->cmd_lock (mutex)
45 * its->its_lock (mutex) 45 * its->its_lock (mutex)
46 * vgic_cpu->ap_list_lock 46 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
47 * kvm->lpi_list_lock 47 * kvm->lpi_list_lock must be taken with IRQs disabled
48 * vgic_irq->irq_lock 48 * vgic_irq->irq_lock must be taken with IRQs disabled
49 *
50 * As the ap_list_lock might be taken from the timer interrupt handler,
51 * we have to disable IRQs before taking this lock and everything lower
52 * than it.
49 * 53 *
50 * If you need to take multiple locks, always take the upper lock first, 54 * If you need to take multiple locks, always take the upper lock first,
51 * then the lower ones, e.g. first take the its_lock, then the irq_lock. 55 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72{ 76{
73 struct vgic_dist *dist = &kvm->arch.vgic; 77 struct vgic_dist *dist = &kvm->arch.vgic;
74 struct vgic_irq *irq = NULL; 78 struct vgic_irq *irq = NULL;
79 unsigned long flags;
75 80
76 spin_lock(&dist->lpi_list_lock); 81 spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 82
78 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 83 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
79 if (irq->intid != intid) 84 if (irq->intid != intid)
@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
89 irq = NULL; 94 irq = NULL;
90 95
91out_unlock: 96out_unlock:
92 spin_unlock(&dist->lpi_list_lock); 97 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
93 98
94 return irq; 99 return irq;
95} 100}
@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref)
134void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 139void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
135{ 140{
136 struct vgic_dist *dist = &kvm->arch.vgic; 141 struct vgic_dist *dist = &kvm->arch.vgic;
142 unsigned long flags;
137 143
138 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)
139 return; 145 return;
140 146
141 spin_lock(&dist->lpi_list_lock); 147 spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 148 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock(&dist->lpi_list_lock); 149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 150 return;
145 }; 151 };
146 152
147 list_del(&irq->lpi_list); 153 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 154 dist->lpi_list_count--;
149 spin_unlock(&dist->lpi_list_lock); 155 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 156
151 kfree(irq); 157 kfree(irq);
152} 158}