aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2017-12-05 12:02:03 -0500
committerRadim Krčmář <rkrcmar@redhat.com>2017-12-05 12:02:03 -0500
commit609b7002705ae72a6ca45b633b7ff1a09a7a0d86 (patch)
tree515db242be433c34cd45c522dcf295580f864837
parentae64f9bd1d3621b5e60d7363bc20afb46aede215 (diff)
parentfc396e066318c0a02208c1d3f0b62950a7714999 (diff)
Merge tag 'kvm-arm-fixes-for-v4.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
KVM/ARM Fixes for v4.15. Fixes: - A number of issues in the vgic discovered using SMATCH - A bit one-off calculation in out stage base address mask (32-bit and 64-bit) - Fixes to single-step debugging instructions that trap for other reasons such as MMMIO aborts - Printing unavailable hyp mode as error - Potential spinlock deadlock in the vgic - Avoid calling vgic vcpu free more than once - Broken bit calculation for big endian systems
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c57
-rw-r--r--arch/arm64/kvm/hyp/switch.c37
-rw-r--r--include/kvm/arm_arch_timer.h3
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/arm/arm.c7
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c6
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
17 files changed, 144 insertions, 79 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index c8781450905b..3ab8b3781bfe 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
161#else 161#else
162#define VTTBR_X (5 - KVM_T0SZ) 162#define VTTBR_X (5 - KVM_T0SZ)
163#endif 163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 164#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
165#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT _AC(48, ULL) 165#define VTTBR_VMID_SHIFT _AC(48, ULL)
167#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 166#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
168 167
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 242151ea6908..a9f7d3f47134 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} 285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} 286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} 287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
288static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
289 struct kvm_run *run)
290{
291 return false;
292}
288 293
289int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 294int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
290 struct kvm_device_attr *attr); 295 struct kvm_device_attr *attr);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7f069ff37f06..715d395ef45b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) 170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) 171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
172 172
173#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 173#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
174#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
175#define VTTBR_VMID_SHIFT (UL(48)) 174#define VTTBR_VMID_SHIFT (UL(48))
176#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 175#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
177 176
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 674912d7a571..ea6cb5b24258 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
373bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
373int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 374int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
374 struct kvm_device_attr *attr); 375 struct kvm_device_attr *attr);
375int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 376int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index dbadfaf850a7..fa63b28c65e0 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
221 } 221 }
222 } 222 }
223} 223}
224
225
226/*
227 * After successfully emulating an instruction, we might want to
228 * return to user space with a KVM_EXIT_DEBUG. We can only do this
229 * once the emulation is complete, though, so for userspace emulations
230 * we have to wait until we have re-entered KVM before calling this
231 * helper.
232 *
233 * Return true (and set exit_reason) to return to userspace or false
234 * if no further action is required.
235 */
236bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
237{
238 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
239 run->exit_reason = KVM_EXIT_DEBUG;
240 run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
241 return true;
242 }
243 return false;
244}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b71247995469..304203fa9e33 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -28,6 +28,7 @@
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_mmu.h> 29#include <asm/kvm_mmu.h>
30#include <asm/kvm_psci.h> 30#include <asm/kvm_psci.h>
31#include <asm/debug-monitors.h>
31 32
32#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
33#include "trace.h" 34#include "trace.h"
@@ -187,14 +188,46 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
187} 188}
188 189
189/* 190/*
191 * We may be single-stepping an emulated instruction. If the emulation
192 * has been completed in the kernel, we can return to userspace with a
193 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
194 * emulation first.
195 */
196static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
197{
198 int handled;
199
200 /*
201 * See ARM ARM B1.14.1: "Hyp traps on instructions
202 * that fail their condition code check"
203 */
204 if (!kvm_condition_valid(vcpu)) {
205 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
206 handled = 1;
207 } else {
208 exit_handle_fn exit_handler;
209
210 exit_handler = kvm_get_exit_handler(vcpu);
211 handled = exit_handler(vcpu, run);
212 }
213
214 /*
215 * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
216 * structure if we need to return to userspace.
217 */
218 if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
219 handled = 0;
220
221 return handled;
222}
223
224/*
190 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 225 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
191 * proper exit to userspace. 226 * proper exit to userspace.
192 */ 227 */
193int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 228int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
194 int exception_index) 229 int exception_index)
195{ 230{
196 exit_handle_fn exit_handler;
197
198 if (ARM_SERROR_PENDING(exception_index)) { 231 if (ARM_SERROR_PENDING(exception_index)) {
199 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 232 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
200 233
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
220 return 1; 253 return 1;
221 case ARM_EXCEPTION_EL1_SERROR: 254 case ARM_EXCEPTION_EL1_SERROR:
222 kvm_inject_vabt(vcpu); 255 kvm_inject_vabt(vcpu);
223 return 1; 256 /* We may still need to return for single-step */
224 case ARM_EXCEPTION_TRAP: 257 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
225 /* 258 && kvm_arm_handle_step_debug(vcpu, run))
226 * See ARM ARM B1.14.1: "Hyp traps on instructions 259 return 0;
227 * that fail their condition code check" 260 else
228 */
229 if (!kvm_condition_valid(vcpu)) {
230 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
231 return 1; 261 return 1;
232 } 262 case ARM_EXCEPTION_TRAP:
233 263 return handle_trap_exceptions(vcpu, run);
234 exit_handler = kvm_get_exit_handler(vcpu);
235
236 return exit_handler(vcpu, run);
237 case ARM_EXCEPTION_HYP_GONE: 264 case ARM_EXCEPTION_HYP_GONE:
238 /* 265 /*
239 * EL2 has been reset to the hyp-stub. This happens when a guest 266 * EL2 has been reset to the hyp-stub. This happens when a guest
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f48867..f7c651f3a8c0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 23#include <asm/kvm_hyp.h>
24#include <asm/fpsimd.h> 24#include <asm/fpsimd.h>
25#include <asm/debug-monitors.h>
25 26
26static bool __hyp_text __fpsimd_enabled_nvhe(void) 27static bool __hyp_text __fpsimd_enabled_nvhe(void)
27{ 28{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
269 return true; 270 return true;
270} 271}
271 272
272static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 273/* Skip an instruction which has been emulated. Returns true if
274 * execution can continue or false if we need to exit hyp mode because
275 * single-step was in effect.
276 */
277static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
273{ 278{
274 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 279 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
275 280
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
282 } 287 }
283 288
284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 289 write_sysreg_el2(*vcpu_pc(vcpu), elr);
290
291 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
292 vcpu->arch.fault.esr_el2 =
293 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
294 return false;
295 } else {
296 return true;
297 }
285} 298}
286 299
287int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) 300int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 355 int ret = __vgic_v2_perform_cpuif_access(vcpu);
343 356
344 if (ret == 1) { 357 if (ret == 1) {
345 __skip_instr(vcpu); 358 if (__skip_instr(vcpu))
346 goto again; 359 goto again;
360 else
361 exit_code = ARM_EXCEPTION_TRAP;
347 } 362 }
348 363
349 if (ret == -1) { 364 if (ret == -1) {
350 /* Promote an illegal access to an SError */ 365 /* Promote an illegal access to an
351 __skip_instr(vcpu); 366 * SError. If we would be returning
367 * due to single-step clear the SS
368 * bit so handle_exit knows what to
369 * do after dealing with the error.
370 */
371 if (!__skip_instr(vcpu))
372 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
352 exit_code = ARM_EXCEPTION_EL1_SERROR; 373 exit_code = ARM_EXCEPTION_EL1_SERROR;
353 } 374 }
354 375
@@ -363,8 +384,10 @@ again:
363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 384 int ret = __vgic_v3_perform_cpuif_access(vcpu);
364 385
365 if (ret == 1) { 386 if (ret == 1) {
366 __skip_instr(vcpu); 387 if (__skip_instr(vcpu))
367 goto again; 388 goto again;
389 else
390 exit_code = ARM_EXCEPTION_TRAP;
368 } 391 }
369 392
370 /* 0 falls through to be handled out of EL2 */ 393 /* 0 falls through to be handled out of EL2 */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 01ee473517e2..6e45608b2399 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) 93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) 94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
95 95
96void enable_el1_phys_timer_access(void);
97void disable_el1_phys_timer_access(void);
98
99#endif 96#endif
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4151250ce8da..f9555b1e7f15 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
479 479
480 vtimer_restore_state(vcpu); 480 vtimer_restore_state(vcpu);
481 481
482 if (has_vhe())
483 disable_el1_phys_timer_access();
484
485 /* Set the background timer for the physical timer emulation. */ 482 /* Set the background timer for the physical timer emulation. */
486 phys_timer_emulate(vcpu); 483 phys_timer_emulate(vcpu);
487} 484}
@@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
510 if (unlikely(!timer->enabled)) 507 if (unlikely(!timer->enabled))
511 return; 508 return;
512 509
513 if (has_vhe())
514 enable_el1_phys_timer_access();
515
516 vtimer_save_state(vcpu); 510 vtimer_save_state(vcpu);
517 511
518 /* 512 /*
@@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
841no_vgic: 835no_vgic:
842 preempt_disable(); 836 preempt_disable();
843 timer->enabled = 1; 837 timer->enabled = 1;
844 kvm_timer_vcpu_load_vgic(vcpu); 838 if (!irqchip_in_kernel(vcpu->kvm))
839 kvm_timer_vcpu_load_user(vcpu);
840 else
841 kvm_timer_vcpu_load_vgic(vcpu);
845 preempt_enable(); 842 preempt_enable();
846 843
847 return 0; 844 return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a67c106d73f5..6b60c98a6e22 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
188 kvm->vcpus[i] = NULL; 188 kvm->vcpus[i] = NULL;
189 } 189 }
190 } 190 }
191 atomic_set(&kvm->online_vcpus, 0);
191} 192}
192 193
193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 194int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296{ 297{
297 kvm_mmu_free_memory_caches(vcpu); 298 kvm_mmu_free_memory_caches(vcpu);
298 kvm_timer_vcpu_terminate(vcpu); 299 kvm_timer_vcpu_terminate(vcpu);
299 kvm_vgic_vcpu_destroy(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu); 300 kvm_pmu_vcpu_destroy(vcpu);
301 kvm_vcpu_uninit(vcpu); 301 kvm_vcpu_uninit(vcpu);
302 kmem_cache_free(kvm_vcpu_cache, vcpu); 302 kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
627 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 627 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
628 if (ret) 628 if (ret)
629 return ret; 629 return ret;
630 if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
631 return 0;
632
630 } 633 }
631 634
632 if (run->immediate_exit) 635 if (run->immediate_exit)
@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque)
1502 bool in_hyp_mode; 1505 bool in_hyp_mode;
1503 1506
1504 if (!is_hyp_mode_available()) { 1507 if (!is_hyp_mode_available()) {
1505 kvm_err("HYP mode not available\n"); 1508 kvm_info("HYP mode not available\n");
1506 return -ENODEV; 1509 return -ENODEV;
1507 } 1510 }
1508 1511
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index f39861639f08..f24404b3c8df 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
27 write_sysreg(cntvoff, cntvoff_el2); 27 write_sysreg(cntvoff, cntvoff_el2);
28} 28}
29 29
30void __hyp_text enable_el1_phys_timer_access(void)
31{
32 u64 val;
33
34 /* Allow physical timer/counter access for the host */
35 val = read_sysreg(cnthctl_el2);
36 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
37 write_sysreg(val, cnthctl_el2);
38}
39
40void __hyp_text disable_el1_phys_timer_access(void)
41{
42 u64 val;
43
44 /*
45 * Disallow physical timer access for the guest
46 * Physical counter access is allowed
47 */
48 val = read_sysreg(cnthctl_el2);
49 val &= ~CNTHCTL_EL1PCEN;
50 val |= CNTHCTL_EL1PCTEN;
51 write_sysreg(val, cnthctl_el2);
52}
53
54void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) 30void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
55{ 31{
56 /* 32 /*
57 * We don't need to do this for VHE since the host kernel runs in EL2 33 * We don't need to do this for VHE since the host kernel runs in EL2
58 * with HCR_EL2.TGE ==1, which makes those bits have no impact. 34 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
59 */ 35 */
60 if (!has_vhe()) 36 if (!has_vhe()) {
61 enable_el1_phys_timer_access(); 37 u64 val;
38
39 /* Allow physical timer/counter access for the host */
40 val = read_sysreg(cnthctl_el2);
41 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
42 write_sysreg(val, cnthctl_el2);
43 }
62} 44}
63 45
64void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) 46void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
65{ 47{
66 if (!has_vhe()) 48 if (!has_vhe()) {
67 disable_el1_phys_timer_access(); 49 u64 val;
50
51 /*
52 * Disallow physical timer access for the guest
53 * Physical counter access is allowed
54 */
55 val = read_sysreg(cnthctl_el2);
56 val &= ~CNTHCTL_EL1PCEN;
57 val |= CNTHCTL_EL1PCTEN;
58 write_sysreg(val, cnthctl_el2);
59 }
68} 60}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f18d362366..d7fd46fe9efb 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
34 else 34 else
35 elrsr1 = 0; 35 elrsr1 = 0;
36 36
37#ifdef CONFIG_CPU_BIG_ENDIAN
38 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
39#else
40 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 37 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
41#endif
42} 38}
43 39
44static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) 40static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b7baf581611a..99e026d2dade 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
112 u32 nr = dist->nr_spis; 112 u32 nr = dist->nr_spis;
113 int i, ret; 113 int i, ret;
114 114
115 entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), 115 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
116 GFP_KERNEL);
117 if (!entries) 116 if (!entries)
118 return -ENOMEM; 117 return -ENOMEM;
119 118
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1f761a9991e7..8e633bd9cc1e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
421 u32 *intids; 421 u32 *intids;
422 int nr_irqs, i; 422 int nr_irqs, i;
423 unsigned long flags; 423 unsigned long flags;
424 u8 pendmask;
424 425
425 nr_irqs = vgic_copy_lpi_list(vcpu, &intids); 426 nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
426 if (nr_irqs < 0) 427 if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
428 429
429 for (i = 0; i < nr_irqs; i++) { 430 for (i = 0; i < nr_irqs; i++) {
430 int byte_offset, bit_nr; 431 int byte_offset, bit_nr;
431 u8 pendmask;
432 432
433 byte_offset = intids[i] / BITS_PER_BYTE; 433 byte_offset = intids[i] / BITS_PER_BYTE;
434 bit_nr = intids[i] % BITS_PER_BYTE; 434 bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
821 return E_ITS_MAPC_COLLECTION_OOR; 821 return E_ITS_MAPC_COLLECTION_OOR;
822 822
823 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 823 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
824 if (!collection)
825 return -ENOMEM;
824 826
825 collection->collection_id = coll_id; 827 collection->collection_id = coll_id;
826 collection->target_addr = COLLECTION_NOT_MAPPED; 828 collection->target_addr = COLLECTION_NOT_MAPPED;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 2f05f732d3fd..f47e8481fa45 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
327 int last_byte_offset = -1; 327 int last_byte_offset = -1;
328 struct vgic_irq *irq; 328 struct vgic_irq *irq;
329 int ret; 329 int ret;
330 u8 val;
330 331
331 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
332 int byte_offset, bit_nr; 333 int byte_offset, bit_nr;
333 struct kvm_vcpu *vcpu; 334 struct kvm_vcpu *vcpu;
334 gpa_t pendbase, ptr; 335 gpa_t pendbase, ptr;
335 bool stored; 336 bool stored;
336 u8 val;
337 337
338 vcpu = irq->target_vcpu; 338 vcpu = irq->target_vcpu;
339 if (!vcpu) 339 if (!vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 53c324aa44ef..4a37292855bc 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
337 goto out; 337 goto out;
338 338
339 WARN_ON(!(irq->hw && irq->host_irq == virq)); 339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 irq->hw = false; 340 if (irq->hw) {
341 ret = its_unmap_vlpi(virq); 341 irq->hw = false;
342 ret = its_unmap_vlpi(virq);
343 }
342 344
343out: 345out:
344 mutex_unlock(&its->its_lock); 346 mutex_unlock(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index b168a328a9e0..ecb8e25f5fe5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
493{ 493{
494 struct vgic_irq *irq; 494 struct vgic_irq *irq;
495 unsigned long flags;
495 int ret = 0; 496 int ret = 0;
496 497
497 if (!vgic_initialized(vcpu->kvm)) 498 if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
502 return -EINVAL; 503 return -EINVAL;
503 504
504 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 505 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
505 spin_lock(&irq->irq_lock); 506 spin_lock_irqsave(&irq->irq_lock, flags);
506 if (irq->owner && irq->owner != owner) 507 if (irq->owner && irq->owner != owner)
507 ret = -EEXIST; 508 ret = -EEXIST;
508 else 509 else
509 irq->owner = owner; 510 irq->owner = owner;
510 spin_unlock(&irq->irq_lock); 511 spin_unlock_irqrestore(&irq->irq_lock, flags);
511 512
512 return ret; 513 return ret;
513} 514}
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
823 824
824bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 825bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
825{ 826{
826 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 827 struct vgic_irq *irq;
827 bool map_is_active; 828 bool map_is_active;
828 unsigned long flags; 829 unsigned long flags;
829 830
830 if (!vgic_initialized(vcpu->kvm)) 831 if (!vgic_initialized(vcpu->kvm))
831 return false; 832 return false;
832 833
834 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
833 spin_lock_irqsave(&irq->irq_lock, flags); 835 spin_lock_irqsave(&irq->irq_lock, flags);
834 map_is_active = irq->hw && irq->active; 836 map_is_active = irq->hw && irq->active;
835 spin_unlock_irqrestore(&irq->irq_lock, flags); 837 spin_unlock_irqrestore(&irq->irq_lock, flags);