aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt15
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c57
-rw-r--r--arch/arm64/kvm/hyp/switch.c37
-rw-r--r--arch/s390/kvm/Makefile5
-rw-r--r--arch/s390/kvm/diag.c5
-rw-r--r--arch/s390/kvm/gaccess.h5
-rw-r--r--arch/s390/kvm/guestdbg.c5
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/irq.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c11
-rw-r--r--arch/s390/kvm/kvm-s390.h5
-rw-r--r--arch/s390/kvm/priv.c16
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/s390/kvm/vsie.c5
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/kvm/emulate.c24
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c63
-rw-r--r--include/kvm/arm_arch_timer.h3
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/arm/arm.c7
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c6
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
-rw-r--r--virt/kvm/kvm_main.c8
38 files changed, 239 insertions, 201 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f670e4b9e7f3..57d3ee9e4bde 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2901,14 +2901,19 @@ userspace buffer and its length:
2901 2901
2902struct kvm_s390_irq_state { 2902struct kvm_s390_irq_state {
2903 __u64 buf; 2903 __u64 buf;
2904 __u32 flags; 2904 __u32 flags; /* will stay unused for compatibility reasons */
2905 __u32 len; 2905 __u32 len;
2906 __u32 reserved[4]; 2906 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2907}; 2907};
2908 2908
2909Userspace passes in the above struct and for each pending interrupt a 2909Userspace passes in the above struct and for each pending interrupt a
2910struct kvm_s390_irq is copied to the provided buffer. 2910struct kvm_s390_irq is copied to the provided buffer.
2911 2911
2912The structure contains a flags and a reserved field for future extensions. As
2913the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and
2914reserved, these fields can not be used in the future without breaking
2915compatibility.
2916
2912If -ENOBUFS is returned the buffer provided was too small and userspace 2917If -ENOBUFS is returned the buffer provided was too small and userspace
2913may retry with a bigger buffer. 2918may retry with a bigger buffer.
2914 2919
@@ -2932,10 +2937,14 @@ containing a struct kvm_s390_irq_state:
2932 2937
2933struct kvm_s390_irq_state { 2938struct kvm_s390_irq_state {
2934 __u64 buf; 2939 __u64 buf;
2940 __u32 flags; /* will stay unused for compatibility reasons */
2935 __u32 len; 2941 __u32 len;
2936 __u32 pad; 2942 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2937}; 2943};
2938 2944
2945The restrictions for flags and reserved apply as well.
2946(see KVM_S390_GET_IRQ_STATE)
2947
2939The userspace memory referenced by buf contains a struct kvm_s390_irq 2948The userspace memory referenced by buf contains a struct kvm_s390_irq
2940for each interrupt to be injected into the guest. 2949for each interrupt to be injected into the guest.
2941If one of the interrupts could not be injected for some reason the 2950If one of the interrupts could not be injected for some reason the
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index c8781450905b..3ab8b3781bfe 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
161#else 161#else
162#define VTTBR_X (5 - KVM_T0SZ) 162#define VTTBR_X (5 - KVM_T0SZ)
163#endif 163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 164#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
165#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT _AC(48, ULL) 165#define VTTBR_VMID_SHIFT _AC(48, ULL)
167#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 166#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
168 167
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 242151ea6908..a9f7d3f47134 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} 285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} 286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} 287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
288static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
289 struct kvm_run *run)
290{
291 return false;
292}
288 293
289int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 294int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
290 struct kvm_device_attr *attr); 295 struct kvm_device_attr *attr);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7f069ff37f06..715d395ef45b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) 170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) 171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
172 172
173#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 173#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
174#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
175#define VTTBR_VMID_SHIFT (UL(48)) 174#define VTTBR_VMID_SHIFT (UL(48))
176#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 175#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
177 176
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 674912d7a571..ea6cb5b24258 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
373bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
373int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 374int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
374 struct kvm_device_attr *attr); 375 struct kvm_device_attr *attr);
375int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 376int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index dbadfaf850a7..fa63b28c65e0 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
221 } 221 }
222 } 222 }
223} 223}
224
225
226/*
227 * After successfully emulating an instruction, we might want to
228 * return to user space with a KVM_EXIT_DEBUG. We can only do this
229 * once the emulation is complete, though, so for userspace emulations
230 * we have to wait until we have re-entered KVM before calling this
231 * helper.
232 *
233 * Return true (and set exit_reason) to return to userspace or false
234 * if no further action is required.
235 */
236bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
237{
238 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
239 run->exit_reason = KVM_EXIT_DEBUG;
240 run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
241 return true;
242 }
243 return false;
244}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b71247995469..304203fa9e33 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -28,6 +28,7 @@
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_mmu.h> 29#include <asm/kvm_mmu.h>
30#include <asm/kvm_psci.h> 30#include <asm/kvm_psci.h>
31#include <asm/debug-monitors.h>
31 32
32#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
33#include "trace.h" 34#include "trace.h"
@@ -187,14 +188,46 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
187} 188}
188 189
189/* 190/*
191 * We may be single-stepping an emulated instruction. If the emulation
192 * has been completed in the kernel, we can return to userspace with a
193 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
194 * emulation first.
195 */
196static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
197{
198 int handled;
199
200 /*
201 * See ARM ARM B1.14.1: "Hyp traps on instructions
202 * that fail their condition code check"
203 */
204 if (!kvm_condition_valid(vcpu)) {
205 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
206 handled = 1;
207 } else {
208 exit_handle_fn exit_handler;
209
210 exit_handler = kvm_get_exit_handler(vcpu);
211 handled = exit_handler(vcpu, run);
212 }
213
214 /*
215 * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
216 * structure if we need to return to userspace.
217 */
218 if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
219 handled = 0;
220
221 return handled;
222}
223
224/*
190 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 225 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
191 * proper exit to userspace. 226 * proper exit to userspace.
192 */ 227 */
193int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 228int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
194 int exception_index) 229 int exception_index)
195{ 230{
196 exit_handle_fn exit_handler;
197
198 if (ARM_SERROR_PENDING(exception_index)) { 231 if (ARM_SERROR_PENDING(exception_index)) {
199 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 232 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
200 233
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
220 return 1; 253 return 1;
221 case ARM_EXCEPTION_EL1_SERROR: 254 case ARM_EXCEPTION_EL1_SERROR:
222 kvm_inject_vabt(vcpu); 255 kvm_inject_vabt(vcpu);
223 return 1; 256 /* We may still need to return for single-step */
224 case ARM_EXCEPTION_TRAP: 257 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
225 /* 258 && kvm_arm_handle_step_debug(vcpu, run))
226 * See ARM ARM B1.14.1: "Hyp traps on instructions 259 return 0;
227 * that fail their condition code check" 260 else
228 */
229 if (!kvm_condition_valid(vcpu)) {
230 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
231 return 1; 261 return 1;
232 } 262 case ARM_EXCEPTION_TRAP:
233 263 return handle_trap_exceptions(vcpu, run);
234 exit_handler = kvm_get_exit_handler(vcpu);
235
236 return exit_handler(vcpu, run);
237 case ARM_EXCEPTION_HYP_GONE: 264 case ARM_EXCEPTION_HYP_GONE:
238 /* 265 /*
239 * EL2 has been reset to the hyp-stub. This happens when a guest 266 * EL2 has been reset to the hyp-stub. This happens when a guest
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f48867..f7c651f3a8c0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 23#include <asm/kvm_hyp.h>
24#include <asm/fpsimd.h> 24#include <asm/fpsimd.h>
25#include <asm/debug-monitors.h>
25 26
26static bool __hyp_text __fpsimd_enabled_nvhe(void) 27static bool __hyp_text __fpsimd_enabled_nvhe(void)
27{ 28{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
269 return true; 270 return true;
270} 271}
271 272
272static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 273/* Skip an instruction which has been emulated. Returns true if
274 * execution can continue or false if we need to exit hyp mode because
275 * single-step was in effect.
276 */
277static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
273{ 278{
274 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 279 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
275 280
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
282 } 287 }
283 288
284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 289 write_sysreg_el2(*vcpu_pc(vcpu), elr);
290
291 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
292 vcpu->arch.fault.esr_el2 =
293 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
294 return false;
295 } else {
296 return true;
297 }
285} 298}
286 299
287int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) 300int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 355 int ret = __vgic_v2_perform_cpuif_access(vcpu);
343 356
344 if (ret == 1) { 357 if (ret == 1) {
345 __skip_instr(vcpu); 358 if (__skip_instr(vcpu))
346 goto again; 359 goto again;
360 else
361 exit_code = ARM_EXCEPTION_TRAP;
347 } 362 }
348 363
349 if (ret == -1) { 364 if (ret == -1) {
350 /* Promote an illegal access to an SError */ 365 /* Promote an illegal access to an
351 __skip_instr(vcpu); 366 * SError. If we would be returning
367 * due to single-step clear the SS
368 * bit so handle_exit knows what to
369 * do after dealing with the error.
370 */
371 if (!__skip_instr(vcpu))
372 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
352 exit_code = ARM_EXCEPTION_EL1_SERROR; 373 exit_code = ARM_EXCEPTION_EL1_SERROR;
353 } 374 }
354 375
@@ -363,8 +384,10 @@ again:
363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 384 int ret = __vgic_v3_perform_cpuif_access(vcpu);
364 385
365 if (ret == 1) { 386 if (ret == 1) {
366 __skip_instr(vcpu); 387 if (__skip_instr(vcpu))
367 goto again; 388 goto again;
389 else
390 exit_code = ARM_EXCEPTION_TRAP;
368 } 391 }
369 392
370 /* 0 falls through to be handled out of EL2 */ 393 /* 0 falls through to be handled out of EL2 */
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 6048b1c6e580..05ee90a5ea08 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -1,10 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0
1# Makefile for kernel virtual machines on s390 2# Makefile for kernel virtual machines on s390
2# 3#
3# Copyright IBM Corp. 2008 4# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8 5
9KVM := ../../../virt/kvm 6KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o 7common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index d93a2c0474bf..89aa114a2cba 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling diagnose instructions 3 * handling diagnose instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2011 5 * Copyright IBM Corp. 2008, 2011
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index bec42b852246..f4c51756c462 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * access guest memory 3 * access guest memory
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index bcbd86621d01..b5f3e82006d0 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm guest debug support 3 * kvm guest debug support
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 8fe034beb623..9c7d70715862 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * in-kernel handling for sie intercepts 3 * in-kernel handling for sie intercepts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fa557372d600..024ad8bcc516 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling kvm guest interrupts 3 * handling kvm guest interrupts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2015 5 * Copyright IBM Corp. 2008, 2015
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h
index d98e4159643d..484608c71dd0 100644
--- a/arch/s390/kvm/irq.h
+++ b/arch/s390/kvm/irq.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * s390 irqchip routines 3 * s390 irqchip routines
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */ 8 */
12#ifndef __KVM_IRQ_H 9#ifndef __KVM_IRQ_H
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9614aea5839b..ec8b68e97d3c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,11 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * hosting zSeries kernel virtual machines 3 * hosting IBM Z kernel virtual machines (s390x)
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2017
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 * 6 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
@@ -3808,6 +3805,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3808 r = -EINVAL; 3805 r = -EINVAL;
3809 break; 3806 break;
3810 } 3807 }
3808 /* do not use irq_state.flags, it will break old QEMUs */
3811 r = kvm_s390_set_irq_state(vcpu, 3809 r = kvm_s390_set_irq_state(vcpu,
3812 (void __user *) irq_state.buf, 3810 (void __user *) irq_state.buf,
3813 irq_state.len); 3811 irq_state.len);
@@ -3823,6 +3821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3823 r = -EINVAL; 3821 r = -EINVAL;
3824 break; 3822 break;
3825 } 3823 }
3824 /* do not use irq_state.flags, it will break old QEMUs */
3826 r = kvm_s390_get_irq_state(vcpu, 3825 r = kvm_s390_get_irq_state(vcpu,
3827 (__u8 __user *) irq_state.buf, 3826 (__u8 __user *) irq_state.buf,
3828 irq_state.len); 3827 irq_state.len);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 10d65dfbc306..5e46ba429bcb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * definition for kvm on s390 3 * definition for kvm on s390
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2009
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c954ac49eee4..572496c688cc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling privileged instructions 3 * handling privileged instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 232 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
236 return -EAGAIN; 233 return -EAGAIN;
237 } 234 }
238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
240 return 0; 235 return 0;
241} 236}
242 237
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
247 int reg1, reg2; 242 int reg1, reg2;
248 int rc; 243 int rc;
249 244
245 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
246 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
247
250 rc = try_handle_skey(vcpu); 248 rc = try_handle_skey(vcpu);
251 if (rc) 249 if (rc)
252 return rc != -EAGAIN ? rc : 0; 250 return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
276 int reg1, reg2; 274 int reg1, reg2;
277 int rc; 275 int rc;
278 276
277 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
278 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
279
279 rc = try_handle_skey(vcpu); 280 rc = try_handle_skey(vcpu);
280 if (rc) 281 if (rc)
281 return rc != -EAGAIN ? rc : 0; 282 return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
311 int reg1, reg2; 312 int reg1, reg2;
312 int rc; 313 int rc;
313 314
315 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
316 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
317
314 rc = try_handle_skey(vcpu); 318 rc = try_handle_skey(vcpu);
315 if (rc) 319 if (rc)
316 return rc != -EAGAIN ? rc : 0; 320 return rc != -EAGAIN ? rc : 0;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 9d592ef4104b..c1f5cde2c878 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling interprocessor communication 3 * handling interprocessor communication
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a311938b63b3..5d6ae0326d9e 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm nested virtualization support for s390x 3 * kvm nested virtualization support for s390x
3 * 4 *
4 * Copyright IBM Corp. 2016 5 * Copyright IBM Corp. 2016
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 034caa1a084e..b24b1c8b3979 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -214,8 +214,6 @@ struct x86_emulate_ops {
214 void (*halt)(struct x86_emulate_ctxt *ctxt); 214 void (*halt)(struct x86_emulate_ctxt *ctxt);
215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
217 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
218 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
219 int (*intercept)(struct x86_emulate_ctxt *ctxt, 217 int (*intercept)(struct x86_emulate_ctxt *ctxt,
220 struct x86_instruction_info *info, 218 struct x86_instruction_info *info,
221 enum x86_intercept_stage stage); 219 enum x86_intercept_stage stage);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5fb968b..516798431328 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -536,7 +536,20 @@ struct kvm_vcpu_arch {
536 struct kvm_mmu_memory_cache mmu_page_cache; 536 struct kvm_mmu_memory_cache mmu_page_cache;
537 struct kvm_mmu_memory_cache mmu_page_header_cache; 537 struct kvm_mmu_memory_cache mmu_page_header_cache;
538 538
539 /*
540 * QEMU userspace and the guest each have their own FPU state.
541 * In vcpu_run, we switch between the user and guest FPU contexts.
542 * While running a VCPU, the VCPU thread will have the guest FPU
543 * context.
544 *
545 * Note that while the PKRU state lives inside the fpu registers,
546 * it is switched out separately at VMENTER and VMEXIT time. The
547 * "guest_fpu" state here contains the guest FPU context, with the
548 * host PRKU bits.
549 */
550 struct fpu user_fpu;
539 struct fpu guest_fpu; 551 struct fpu guest_fpu;
552
540 u64 xcr0; 553 u64 xcr0;
541 u64 guest_supported_xcr0; 554 u64 guest_supported_xcr0;
542 u32 guest_xstate_size; 555 u32 guest_xstate_size;
@@ -1435,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
1435#define put_smstate(type, buf, offset, val) \ 1448#define put_smstate(type, buf, offset, val) \
1436 *(type *)((buf) + (offset) - 0x7e00) = val 1449 *(type *)((buf) + (offset) - 0x7e00) = val
1437 1450
1451void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1452 unsigned long start, unsigned long end);
1453
1438#endif /* _ASM_X86_KVM_HOST_H */ 1454#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e7d04d0c8008..abe74f779f9d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1046,7 +1046,6 @@ static void fetch_register_operand(struct operand *op)
1046 1046
1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048{ 1048{
1049 ctxt->ops->get_fpu(ctxt);
1050 switch (reg) { 1049 switch (reg) {
1051 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1068,13 +1067,11 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1068#endif 1067#endif
1069 default: BUG(); 1068 default: BUG();
1070 } 1069 }
1071 ctxt->ops->put_fpu(ctxt);
1072} 1070}
1073 1071
1074static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1072static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 int reg) 1073 int reg)
1076{ 1074{
1077 ctxt->ops->get_fpu(ctxt);
1078 switch (reg) { 1075 switch (reg) {
1079 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1076 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1080 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 1077 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1096,12 +1093,10 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1096#endif 1093#endif
1097 default: BUG(); 1094 default: BUG();
1098 } 1095 }
1099 ctxt->ops->put_fpu(ctxt);
1100} 1096}
1101 1097
1102static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1098static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1103{ 1099{
1104 ctxt->ops->get_fpu(ctxt);
1105 switch (reg) { 1100 switch (reg) {
1106 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1107 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1113,12 +1108,10 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1113 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 default: BUG(); 1109 default: BUG();
1115 } 1110 }
1116 ctxt->ops->put_fpu(ctxt);
1117} 1111}
1118 1112
1119static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1113static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1120{ 1114{
1121 ctxt->ops->get_fpu(ctxt);
1122 switch (reg) { 1115 switch (reg) {
1123 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1116 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1124 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 1117 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1130,7 +1123,6 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1123 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 default: BUG(); 1124 default: BUG();
1132 } 1125 }
1133 ctxt->ops->put_fpu(ctxt);
1134} 1126}
1135 1127
1136static int em_fninit(struct x86_emulate_ctxt *ctxt) 1128static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1138,9 +1130,7 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
1138 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1139 return emulate_nm(ctxt); 1131 return emulate_nm(ctxt);
1140 1132
1141 ctxt->ops->get_fpu(ctxt);
1142 asm volatile("fninit"); 1133 asm volatile("fninit");
1143 ctxt->ops->put_fpu(ctxt);
1144 return X86EMUL_CONTINUE; 1134 return X86EMUL_CONTINUE;
1145} 1135}
1146 1136
@@ -1151,9 +1141,7 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1151 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1141 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1152 return emulate_nm(ctxt); 1142 return emulate_nm(ctxt);
1153 1143
1154 ctxt->ops->get_fpu(ctxt);
1155 asm volatile("fnstcw %0": "+m"(fcw)); 1144 asm volatile("fnstcw %0": "+m"(fcw));
1156 ctxt->ops->put_fpu(ctxt);
1157 1145
1158 ctxt->dst.val = fcw; 1146 ctxt->dst.val = fcw;
1159 1147
@@ -1167,9 +1155,7 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1155 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1168 return emulate_nm(ctxt); 1156 return emulate_nm(ctxt);
1169 1157
1170 ctxt->ops->get_fpu(ctxt);
1171 asm volatile("fnstsw %0": "+m"(fsw)); 1158 asm volatile("fnstsw %0": "+m"(fsw));
1172 ctxt->ops->put_fpu(ctxt);
1173 1159
1174 ctxt->dst.val = fsw; 1160 ctxt->dst.val = fsw;
1175 1161
@@ -4001,12 +3987,8 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4001 if (rc != X86EMUL_CONTINUE) 3987 if (rc != X86EMUL_CONTINUE)
4002 return rc; 3988 return rc;
4003 3989
4004 ctxt->ops->get_fpu(ctxt);
4005
4006 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 3990 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4007 3991
4008 ctxt->ops->put_fpu(ctxt);
4009
4010 if (rc != X86EMUL_CONTINUE) 3992 if (rc != X86EMUL_CONTINUE)
4011 return rc; 3993 return rc;
4012 3994
@@ -4049,8 +4031,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4049 if (rc != X86EMUL_CONTINUE) 4031 if (rc != X86EMUL_CONTINUE)
4050 return rc; 4032 return rc;
4051 4033
4052 ctxt->ops->get_fpu(ctxt);
4053
4054 if (size < __fxstate_size(16)) { 4034 if (size < __fxstate_size(16)) {
4055 rc = fxregs_fixup(&fx_state, size); 4035 rc = fxregs_fixup(&fx_state, size);
4056 if (rc != X86EMUL_CONTINUE) 4036 if (rc != X86EMUL_CONTINUE)
@@ -4066,8 +4046,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4066 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4046 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4067 4047
4068out: 4048out:
4069 ctxt->ops->put_fpu(ctxt);
4070
4071 return rc; 4049 return rc;
4072} 4050}
4073 4051
@@ -5317,9 +5295,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5317{ 5295{
5318 int rc; 5296 int rc;
5319 5297
5320 ctxt->ops->get_fpu(ctxt);
5321 rc = asm_safe("fwait"); 5298 rc = asm_safe("fwait");
5322 ctxt->ops->put_fpu(ctxt);
5323 5299
5324 if (unlikely(rc != X86EMUL_CONTINUE)) 5300 if (unlikely(rc != X86EMUL_CONTINUE))
5325 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5301 return emulate_exception(ctxt, MF_VECTOR, 0, false);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4704aaf6d19e..8eba631c4dbd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6751,16 +6751,10 @@ static __init int hardware_setup(void)
6751 goto out; 6751 goto out;
6752 } 6752 }
6753 6753
6754 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
6755 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 6754 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
6756 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 6755 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
6757 6756
6758 /*
6759 * Allow direct access to the PC debug port (it is often used for I/O
6760 * delays, but the vmexits simply slow things down).
6761 */
6762 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); 6757 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
6763 clear_bit(0x80, vmx_io_bitmap_a);
6764 6758
6765 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 6759 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
6766 6760
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eee8e7faf1af..faf843c9b916 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2937,7 +2937,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2937 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2937 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2938 pagefault_enable(); 2938 pagefault_enable();
2939 kvm_x86_ops->vcpu_put(vcpu); 2939 kvm_x86_ops->vcpu_put(vcpu);
2940 kvm_put_guest_fpu(vcpu);
2941 vcpu->arch.last_host_tsc = rdtsc(); 2940 vcpu->arch.last_host_tsc = rdtsc();
2942} 2941}
2943 2942
@@ -5252,17 +5251,6 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt)
5252 emul_to_vcpu(ctxt)->arch.halt_request = 1; 5251 emul_to_vcpu(ctxt)->arch.halt_request = 1;
5253} 5252}
5254 5253
5255static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
5256{
5257 preempt_disable();
5258 kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5259}
5260
5261static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
5262{
5263 preempt_enable();
5264}
5265
5266static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 5254static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5267 struct x86_instruction_info *info, 5255 struct x86_instruction_info *info,
5268 enum x86_intercept_stage stage) 5256 enum x86_intercept_stage stage)
@@ -5340,8 +5328,6 @@ static const struct x86_emulate_ops emulate_ops = {
5340 .halt = emulator_halt, 5328 .halt = emulator_halt,
5341 .wbinvd = emulator_wbinvd, 5329 .wbinvd = emulator_wbinvd,
5342 .fix_hypercall = emulator_fix_hypercall, 5330 .fix_hypercall = emulator_fix_hypercall,
5343 .get_fpu = emulator_get_fpu,
5344 .put_fpu = emulator_put_fpu,
5345 .intercept = emulator_intercept, 5331 .intercept = emulator_intercept,
5346 .get_cpuid = emulator_get_cpuid, 5332 .get_cpuid = emulator_get_cpuid,
5347 .set_nmi_mask = emulator_set_nmi_mask, 5333 .set_nmi_mask = emulator_set_nmi_mask,
@@ -6778,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6778 kvm_x86_ops->tlb_flush(vcpu); 6764 kvm_x86_ops->tlb_flush(vcpu);
6779} 6765}
6780 6766
6767void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
6768 unsigned long start, unsigned long end)
6769{
6770 unsigned long apic_address;
6771
6772 /*
6773 * The physical address of apic access page is stored in the VMCS.
6774 * Update it when it becomes invalid.
6775 */
6776 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6777 if (start <= apic_address && apic_address < end)
6778 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6779}
6780
6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6782{ 6782{
6783 struct page *page = NULL; 6783 struct page *page = NULL;
@@ -6952,7 +6952,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6952 preempt_disable(); 6952 preempt_disable();
6953 6953
6954 kvm_x86_ops->prepare_guest_switch(vcpu); 6954 kvm_x86_ops->prepare_guest_switch(vcpu);
6955 kvm_load_guest_fpu(vcpu);
6956 6955
6957 /* 6956 /*
6958 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 6957 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
@@ -7297,12 +7296,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7297 } 7296 }
7298 } 7297 }
7299 7298
7299 kvm_load_guest_fpu(vcpu);
7300
7300 if (unlikely(vcpu->arch.complete_userspace_io)) { 7301 if (unlikely(vcpu->arch.complete_userspace_io)) {
7301 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 7302 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
7302 vcpu->arch.complete_userspace_io = NULL; 7303 vcpu->arch.complete_userspace_io = NULL;
7303 r = cui(vcpu); 7304 r = cui(vcpu);
7304 if (r <= 0) 7305 if (r <= 0)
7305 goto out; 7306 goto out_fpu;
7306 } else 7307 } else
7307 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 7308 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
7308 7309
@@ -7311,6 +7312,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7311 else 7312 else
7312 r = vcpu_run(vcpu); 7313 r = vcpu_run(vcpu);
7313 7314
7315out_fpu:
7316 kvm_put_guest_fpu(vcpu);
7314out: 7317out:
7315 post_kvm_run_save(vcpu); 7318 post_kvm_run_save(vcpu);
7316 kvm_sigset_deactivate(vcpu); 7319 kvm_sigset_deactivate(vcpu);
@@ -7704,32 +7707,25 @@ static void fx_init(struct kvm_vcpu *vcpu)
7704 vcpu->arch.cr0 |= X86_CR0_ET; 7707 vcpu->arch.cr0 |= X86_CR0_ET;
7705} 7708}
7706 7709
7710/* Swap (qemu) user FPU context for the guest FPU context. */
7707void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 7711void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7708{ 7712{
7709 if (vcpu->guest_fpu_loaded) 7713 preempt_disable();
7710 return; 7714 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
7711
7712 /*
7713 * Restore all possible states in the guest,
7714 * and assume host would use all available bits.
7715 * Guest xcr0 would be loaded later.
7716 */
7717 vcpu->guest_fpu_loaded = 1;
7718 __kernel_fpu_begin();
7719 /* PKRU is separately restored in kvm_x86_ops->run. */ 7715 /* PKRU is separately restored in kvm_x86_ops->run. */
7720 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, 7716 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
7721 ~XFEATURE_MASK_PKRU); 7717 ~XFEATURE_MASK_PKRU);
7718 preempt_enable();
7722 trace_kvm_fpu(1); 7719 trace_kvm_fpu(1);
7723} 7720}
7724 7721
7722/* When vcpu_run ends, restore user space FPU context. */
7725void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7723void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7726{ 7724{
7727 if (!vcpu->guest_fpu_loaded) 7725 preempt_disable();
7728 return;
7729
7730 vcpu->guest_fpu_loaded = 0;
7731 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); 7726 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7732 __kernel_fpu_end(); 7727 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
7728 preempt_enable();
7733 ++vcpu->stat.fpu_reload; 7729 ++vcpu->stat.fpu_reload;
7734 trace_kvm_fpu(0); 7730 trace_kvm_fpu(0);
7735} 7731}
@@ -7846,7 +7842,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7846 * To avoid have the INIT path from kvm_apic_has_events() that be 7842 * To avoid have the INIT path from kvm_apic_has_events() that be
7847 * called with loaded FPU and does not let userspace fix the state. 7843 * called with loaded FPU and does not let userspace fix the state.
7848 */ 7844 */
7849 kvm_put_guest_fpu(vcpu); 7845 if (init_event)
7846 kvm_put_guest_fpu(vcpu);
7850 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, 7847 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
7851 XFEATURE_MASK_BNDREGS); 7848 XFEATURE_MASK_BNDREGS);
7852 if (mpx_state_buffer) 7849 if (mpx_state_buffer)
@@ -7855,6 +7852,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7855 XFEATURE_MASK_BNDCSR); 7852 XFEATURE_MASK_BNDCSR);
7856 if (mpx_state_buffer) 7853 if (mpx_state_buffer)
7857 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); 7854 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
7855 if (init_event)
7856 kvm_load_guest_fpu(vcpu);
7858 } 7857 }
7859 7858
7860 if (!init_event) { 7859 if (!init_event) {
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 01ee473517e2..6e45608b2399 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) 93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) 94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
95 95
96void enable_el1_phys_timer_access(void);
97void disable_el1_phys_timer_access(void);
98
99#endif 96#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 893d6d606cd0..6bdd4b9f6611 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -232,7 +232,7 @@ struct kvm_vcpu {
232 struct mutex mutex; 232 struct mutex mutex;
233 struct kvm_run *run; 233 struct kvm_run *run;
234 234
235 int guest_fpu_loaded, guest_xcr0_loaded; 235 int guest_xcr0_loaded;
236 struct swait_queue_head wq; 236 struct swait_queue_head wq;
237 struct pid __rcu *pid; 237 struct pid __rcu *pid;
238 int sigset_active; 238 int sigset_active;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 282d7613fce8..496e59a2738b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4151250ce8da..f9555b1e7f15 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
479 479
480 vtimer_restore_state(vcpu); 480 vtimer_restore_state(vcpu);
481 481
482 if (has_vhe())
483 disable_el1_phys_timer_access();
484
485 /* Set the background timer for the physical timer emulation. */ 482 /* Set the background timer for the physical timer emulation. */
486 phys_timer_emulate(vcpu); 483 phys_timer_emulate(vcpu);
487} 484}
@@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
510 if (unlikely(!timer->enabled)) 507 if (unlikely(!timer->enabled))
511 return; 508 return;
512 509
513 if (has_vhe())
514 enable_el1_phys_timer_access();
515
516 vtimer_save_state(vcpu); 510 vtimer_save_state(vcpu);
517 511
518 /* 512 /*
@@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
841no_vgic: 835no_vgic:
842 preempt_disable(); 836 preempt_disable();
843 timer->enabled = 1; 837 timer->enabled = 1;
844 kvm_timer_vcpu_load_vgic(vcpu); 838 if (!irqchip_in_kernel(vcpu->kvm))
839 kvm_timer_vcpu_load_user(vcpu);
840 else
841 kvm_timer_vcpu_load_vgic(vcpu);
845 preempt_enable(); 842 preempt_enable();
846 843
847 return 0; 844 return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a67c106d73f5..6b60c98a6e22 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
188 kvm->vcpus[i] = NULL; 188 kvm->vcpus[i] = NULL;
189 } 189 }
190 } 190 }
191 atomic_set(&kvm->online_vcpus, 0);
191} 192}
192 193
193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 194int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296{ 297{
297 kvm_mmu_free_memory_caches(vcpu); 298 kvm_mmu_free_memory_caches(vcpu);
298 kvm_timer_vcpu_terminate(vcpu); 299 kvm_timer_vcpu_terminate(vcpu);
299 kvm_vgic_vcpu_destroy(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu); 300 kvm_pmu_vcpu_destroy(vcpu);
301 kvm_vcpu_uninit(vcpu); 301 kvm_vcpu_uninit(vcpu);
302 kmem_cache_free(kvm_vcpu_cache, vcpu); 302 kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
627 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 627 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
628 if (ret) 628 if (ret)
629 return ret; 629 return ret;
630 if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
631 return 0;
632
630 } 633 }
631 634
632 if (run->immediate_exit) 635 if (run->immediate_exit)
@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque)
1502 bool in_hyp_mode; 1505 bool in_hyp_mode;
1503 1506
1504 if (!is_hyp_mode_available()) { 1507 if (!is_hyp_mode_available()) {
1505 kvm_err("HYP mode not available\n"); 1508 kvm_info("HYP mode not available\n");
1506 return -ENODEV; 1509 return -ENODEV;
1507 } 1510 }
1508 1511
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index f39861639f08..f24404b3c8df 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
27 write_sysreg(cntvoff, cntvoff_el2); 27 write_sysreg(cntvoff, cntvoff_el2);
28} 28}
29 29
30void __hyp_text enable_el1_phys_timer_access(void)
31{
32 u64 val;
33
34 /* Allow physical timer/counter access for the host */
35 val = read_sysreg(cnthctl_el2);
36 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
37 write_sysreg(val, cnthctl_el2);
38}
39
40void __hyp_text disable_el1_phys_timer_access(void)
41{
42 u64 val;
43
44 /*
45 * Disallow physical timer access for the guest
46 * Physical counter access is allowed
47 */
48 val = read_sysreg(cnthctl_el2);
49 val &= ~CNTHCTL_EL1PCEN;
50 val |= CNTHCTL_EL1PCTEN;
51 write_sysreg(val, cnthctl_el2);
52}
53
54void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) 30void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
55{ 31{
56 /* 32 /*
57 * We don't need to do this for VHE since the host kernel runs in EL2 33 * We don't need to do this for VHE since the host kernel runs in EL2
58 * with HCR_EL2.TGE ==1, which makes those bits have no impact. 34 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
59 */ 35 */
60 if (!has_vhe()) 36 if (!has_vhe()) {
61 enable_el1_phys_timer_access(); 37 u64 val;
38
39 /* Allow physical timer/counter access for the host */
40 val = read_sysreg(cnthctl_el2);
41 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
42 write_sysreg(val, cnthctl_el2);
43 }
62} 44}
63 45
64void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) 46void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
65{ 47{
66 if (!has_vhe()) 48 if (!has_vhe()) {
67 disable_el1_phys_timer_access(); 49 u64 val;
50
51 /*
52 * Disallow physical timer access for the guest
53 * Physical counter access is allowed
54 */
55 val = read_sysreg(cnthctl_el2);
56 val &= ~CNTHCTL_EL1PCEN;
57 val |= CNTHCTL_EL1PCTEN;
58 write_sysreg(val, cnthctl_el2);
59 }
68} 60}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f18d362366..d7fd46fe9efb 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
34 else 34 else
35 elrsr1 = 0; 35 elrsr1 = 0;
36 36
37#ifdef CONFIG_CPU_BIG_ENDIAN
38 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
39#else
40 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 37 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
41#endif
42} 38}
43 39
44static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) 40static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b7baf581611a..99e026d2dade 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
112 u32 nr = dist->nr_spis; 112 u32 nr = dist->nr_spis;
113 int i, ret; 113 int i, ret;
114 114
115 entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), 115 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
116 GFP_KERNEL);
117 if (!entries) 116 if (!entries)
118 return -ENOMEM; 117 return -ENOMEM;
119 118
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1f761a9991e7..8e633bd9cc1e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
421 u32 *intids; 421 u32 *intids;
422 int nr_irqs, i; 422 int nr_irqs, i;
423 unsigned long flags; 423 unsigned long flags;
424 u8 pendmask;
424 425
425 nr_irqs = vgic_copy_lpi_list(vcpu, &intids); 426 nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
426 if (nr_irqs < 0) 427 if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
428 429
429 for (i = 0; i < nr_irqs; i++) { 430 for (i = 0; i < nr_irqs; i++) {
430 int byte_offset, bit_nr; 431 int byte_offset, bit_nr;
431 u8 pendmask;
432 432
433 byte_offset = intids[i] / BITS_PER_BYTE; 433 byte_offset = intids[i] / BITS_PER_BYTE;
434 bit_nr = intids[i] % BITS_PER_BYTE; 434 bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
821 return E_ITS_MAPC_COLLECTION_OOR; 821 return E_ITS_MAPC_COLLECTION_OOR;
822 822
823 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 823 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
824 if (!collection)
825 return -ENOMEM;
824 826
825 collection->collection_id = coll_id; 827 collection->collection_id = coll_id;
826 collection->target_addr = COLLECTION_NOT_MAPPED; 828 collection->target_addr = COLLECTION_NOT_MAPPED;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 2f05f732d3fd..f47e8481fa45 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
327 int last_byte_offset = -1; 327 int last_byte_offset = -1;
328 struct vgic_irq *irq; 328 struct vgic_irq *irq;
329 int ret; 329 int ret;
330 u8 val;
330 331
331 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
332 int byte_offset, bit_nr; 333 int byte_offset, bit_nr;
333 struct kvm_vcpu *vcpu; 334 struct kvm_vcpu *vcpu;
334 gpa_t pendbase, ptr; 335 gpa_t pendbase, ptr;
335 bool stored; 336 bool stored;
336 u8 val;
337 337
338 vcpu = irq->target_vcpu; 338 vcpu = irq->target_vcpu;
339 if (!vcpu) 339 if (!vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 53c324aa44ef..4a37292855bc 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
337 goto out; 337 goto out;
338 338
339 WARN_ON(!(irq->hw && irq->host_irq == virq)); 339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 irq->hw = false; 340 if (irq->hw) {
341 ret = its_unmap_vlpi(virq); 341 irq->hw = false;
342 ret = its_unmap_vlpi(virq);
343 }
342 344
343out: 345out:
344 mutex_unlock(&its->its_lock); 346 mutex_unlock(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index b168a328a9e0..ecb8e25f5fe5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
493{ 493{
494 struct vgic_irq *irq; 494 struct vgic_irq *irq;
495 unsigned long flags;
495 int ret = 0; 496 int ret = 0;
496 497
497 if (!vgic_initialized(vcpu->kvm)) 498 if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
502 return -EINVAL; 503 return -EINVAL;
503 504
504 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 505 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
505 spin_lock(&irq->irq_lock); 506 spin_lock_irqsave(&irq->irq_lock, flags);
506 if (irq->owner && irq->owner != owner) 507 if (irq->owner && irq->owner != owner)
507 ret = -EEXIST; 508 ret = -EEXIST;
508 else 509 else
509 irq->owner = owner; 510 irq->owner = owner;
510 spin_unlock(&irq->irq_lock); 511 spin_unlock_irqrestore(&irq->irq_lock, flags);
511 512
512 return ret; 513 return ret;
513} 514}
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
823 824
824bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 825bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
825{ 826{
826 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 827 struct vgic_irq *irq;
827 bool map_is_active; 828 bool map_is_active;
828 unsigned long flags; 829 unsigned long flags;
829 830
830 if (!vgic_initialized(vcpu->kvm)) 831 if (!vgic_initialized(vcpu->kvm))
831 return false; 832 return false;
832 833
834 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
833 spin_lock_irqsave(&irq->irq_lock, flags); 835 spin_lock_irqsave(&irq->irq_lock, flags);
834 map_is_active = irq->hw && irq->active; 836 map_is_active = irq->hw && irq->active;
835 spin_unlock_irqrestore(&irq->irq_lock, flags); 837 spin_unlock_irqrestore(&irq->irq_lock, flags);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c422c10cd1dd..210bf820385a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
135static unsigned long long kvm_createvm_count; 135static unsigned long long kvm_createvm_count;
136static unsigned long long kvm_active_vms; 136static unsigned long long kvm_active_vms;
137 137
138__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
139 unsigned long start, unsigned long end)
140{
141}
142
138bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 143bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
139{ 144{
140 if (pfn_valid(pfn)) 145 if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
360 kvm_flush_remote_tlbs(kvm); 365 kvm_flush_remote_tlbs(kvm);
361 366
362 spin_unlock(&kvm->mmu_lock); 367 spin_unlock(&kvm->mmu_lock);
368
369 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
370
363 srcu_read_unlock(&kvm->srcu, idx); 371 srcu_read_unlock(&kvm->srcu, idx);
364} 372}
365 373