aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-27 18:51:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-27 18:51:50 -0400
commitaf56ff27eba54fceee5f5643e79bf6531f2e1739 (patch)
tree99d683c0c7698f472ce66f6973ce78efddc6500e
parent5e608a027082ae426e100a582031e0ff40becc83 (diff)
parentba913e4f72fc9cfd03dad968dfb110eb49211d80 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM: - fixes for ITS init issues, error handling, IRQ leakage, race conditions - an erratum workaround for timers - some removal of misleading use of errors and comments - a fix for GICv3 on 32-bit guests MIPS: - fix for where the guest could wrongly map the first page of physical memory x86: - nested virtualization fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: MIPS: KVM: Check for pfn noslot case kvm: nVMX: fix nested tsc scaling KVM: nVMX: postpone VMCS changes on MSR_IA32_APICBASE write KVM: nVMX: fix msr bitmaps to prevent L2 from accessing L0 x2APIC arm64: KVM: report configured SRE value to 32-bit world arm64: KVM: remove misleading comment on pmu status KVM: arm/arm64: timer: Workaround misconfigured timer interrupt arm64: Document workaround for Cortex-A72 erratum #853709 KVM: arm/arm64: Change misleading use of is_error_pfn KVM: arm64: ITS: avoid re-mapping LPIs KVM: arm64: check for ITS device on MSI injection KVM: arm64: ITS: move ITS registration into first VCPU run KVM: arm64: vgic-its: Make updates to propbaser/pendbaser atomic KVM: arm64: vgic-its: Plug race in vgic_put_irq KVM: arm64: vgic-its: Handle errors from vgic_add_lpi KVM: arm64: ITS: return 1 on successful MSI injection
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--arch/arm/kvm/mmu.c2
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/mips/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c136
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c158
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c26
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c10
-rw-r--r--virt/kvm/arm/vgic/vgic.h6
13 files changed, 234 insertions, 139 deletions
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 4da60b463995..ccc60324e738 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -53,6 +53,7 @@ stable kernels.
53| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | 53| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
54| ARM | Cortex-A57 | #852523 | N/A | 54| ARM | Cortex-A57 | #852523 | N/A |
55| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | 55| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
56| ARM | Cortex-A72 | #853709 | N/A |
56| ARM | MMU-500 | #841119,#826419 | N/A | 57| ARM | MMU-500 | #841119,#826419 | N/A |
57| | | | | 58| | | | |
58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 59| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index bda27b6b1aa2..29d0b23af2a9 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1309 smp_rmb(); 1309 smp_rmb();
1310 1310
1311 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); 1311 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1312 if (is_error_pfn(pfn)) 1312 if (is_error_noslot_pfn(pfn))
1313 return -EFAULT; 1313 return -EFAULT;
1314 1314
1315 if (kvm_is_device_pfn(pfn)) { 1315 if (kvm_is_device_pfn(pfn)) {
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ae7855f16ec2..5a84b4562603 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
256 256
257 /* 257 /*
258 * We must restore the 32-bit state before the sysregs, thanks 258 * We must restore the 32-bit state before the sysregs, thanks
259 * to Cortex-A57 erratum #852523. 259 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
260 */ 260 */
261 __sysreg32_restore_state(vcpu); 261 __sysreg32_restore_state(vcpu);
262 __sysreg_restore_guest_state(guest_ctxt); 262 __sysreg_restore_guest_state(guest_ctxt);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b0b225ceca18..e51367d159d0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
823 * Architected system registers. 823 * Architected system registers.
824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
825 * 825 *
826 * We could trap ID_DFR0 and tell the guest we don't support performance
827 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
828 * NAKed, so it will read the PMCR anyway.
829 *
830 * Therefore we tell the guest we have 0 counters. Unfortunately, we
831 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
832 * all PM registers, which doesn't crash the guest kernel at least.
833 *
834 * Debug handling: We do trap most, if not all debug related system 826 * Debug handling: We do trap most, if not all debug related system
835 * registers. The implementation is good enough to ensure that a guest 827 * registers. The implementation is good enough to ensure that a guest
836 * can use these with minimal performance degradation. The drawback is 828 * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
1360 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 1352 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1361 1353
1362 /* ICC_SRE */ 1354 /* ICC_SRE */
1363 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 1355 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1364 1356
1365 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 1357 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1366 1358
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 6cfdcf55572d..121008c0fcc9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
40 srcu_idx = srcu_read_lock(&kvm->srcu); 40 srcu_idx = srcu_read_lock(&kvm->srcu);
41 pfn = gfn_to_pfn(kvm, gfn); 41 pfn = gfn_to_pfn(kvm, gfn);
42 42
43 if (is_error_pfn(pfn)) { 43 if (is_error_noslot_pfn(pfn)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); 44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 err = -EFAULT; 45 err = -EFAULT;
46 goto out; 46 goto out;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a45d8580f91e..5cede40e2552 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -422,6 +422,7 @@ struct nested_vmx {
422 struct list_head vmcs02_pool; 422 struct list_head vmcs02_pool;
423 int vmcs02_num; 423 int vmcs02_num;
424 u64 vmcs01_tsc_offset; 424 u64 vmcs01_tsc_offset;
425 bool change_vmcs01_virtual_x2apic_mode;
425 /* L2 must run next, and mustn't decide to exit to L1. */ 426 /* L2 must run next, and mustn't decide to exit to L1. */
426 bool nested_run_pending; 427 bool nested_run_pending;
427 /* 428 /*
@@ -435,6 +436,8 @@ struct nested_vmx {
435 bool pi_pending; 436 bool pi_pending;
436 u16 posted_intr_nv; 437 u16 posted_intr_nv;
437 438
439 unsigned long *msr_bitmap;
440
438 struct hrtimer preemption_timer; 441 struct hrtimer preemption_timer;
439 bool preemption_timer_expired; 442 bool preemption_timer_expired;
440 443
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
924static unsigned long *vmx_msr_bitmap_longmode; 927static unsigned long *vmx_msr_bitmap_longmode;
925static unsigned long *vmx_msr_bitmap_legacy_x2apic; 928static unsigned long *vmx_msr_bitmap_legacy_x2apic;
926static unsigned long *vmx_msr_bitmap_longmode_x2apic; 929static unsigned long *vmx_msr_bitmap_longmode_x2apic;
927static unsigned long *vmx_msr_bitmap_nested;
928static unsigned long *vmx_vmread_bitmap; 930static unsigned long *vmx_vmread_bitmap;
929static unsigned long *vmx_vmwrite_bitmap; 931static unsigned long *vmx_vmwrite_bitmap;
930 932
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2198 new.control) != old.control); 2200 new.control) != old.control);
2199} 2201}
2200 2202
2203static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2204{
2205 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2206 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2207}
2208
2201/* 2209/*
2202 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 2210 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2203 * vcpu mutex is already taken. 2211 * vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2256 2264
2257 /* Setup TSC multiplier */ 2265 /* Setup TSC multiplier */
2258 if (kvm_has_tsc_control && 2266 if (kvm_has_tsc_control &&
2259 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { 2267 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2260 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; 2268 decache_tsc_multiplier(vmx);
2261 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2262 }
2263 2269
2264 vmx_vcpu_pi_load(vcpu, cpu); 2270 vmx_vcpu_pi_load(vcpu, cpu);
2265 vmx->host_pkru = read_pkru(); 2271 vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2508 unsigned long *msr_bitmap; 2514 unsigned long *msr_bitmap;
2509 2515
2510 if (is_guest_mode(vcpu)) 2516 if (is_guest_mode(vcpu))
2511 msr_bitmap = vmx_msr_bitmap_nested; 2517 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2512 else if (cpu_has_secondary_exec_ctrls() && 2518 else if (cpu_has_secondary_exec_ctrls() &&
2513 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & 2519 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2514 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 2520 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
6363 if (!vmx_msr_bitmap_longmode_x2apic) 6369 if (!vmx_msr_bitmap_longmode_x2apic)
6364 goto out4; 6370 goto out4;
6365 6371
6366 if (nested) {
6367 vmx_msr_bitmap_nested =
6368 (unsigned long *)__get_free_page(GFP_KERNEL);
6369 if (!vmx_msr_bitmap_nested)
6370 goto out5;
6371 }
6372
6373 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 6372 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
6374 if (!vmx_vmread_bitmap) 6373 if (!vmx_vmread_bitmap)
6375 goto out6; 6374 goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
6392 6391
6393 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); 6392 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
6394 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); 6393 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
6395 if (nested)
6396 memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
6397 6394
6398 if (setup_vmcs_config(&vmcs_config) < 0) { 6395 if (setup_vmcs_config(&vmcs_config) < 0) {
6399 r = -EIO; 6396 r = -EIO;
@@ -6529,9 +6526,6 @@ out8:
6529out7: 6526out7:
6530 free_page((unsigned long)vmx_vmread_bitmap); 6527 free_page((unsigned long)vmx_vmread_bitmap);
6531out6: 6528out6:
6532 if (nested)
6533 free_page((unsigned long)vmx_msr_bitmap_nested);
6534out5:
6535 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); 6529 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6536out4: 6530out4:
6537 free_page((unsigned long)vmx_msr_bitmap_longmode); 6531 free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
6557 free_page((unsigned long)vmx_io_bitmap_a); 6551 free_page((unsigned long)vmx_io_bitmap_a);
6558 free_page((unsigned long)vmx_vmwrite_bitmap); 6552 free_page((unsigned long)vmx_vmwrite_bitmap);
6559 free_page((unsigned long)vmx_vmread_bitmap); 6553 free_page((unsigned long)vmx_vmread_bitmap);
6560 if (nested)
6561 free_page((unsigned long)vmx_msr_bitmap_nested);
6562 6554
6563 free_kvm_area(); 6555 free_kvm_area();
6564} 6556}
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
6995 return 1; 6987 return 1;
6996 } 6988 }
6997 6989
6990 if (cpu_has_vmx_msr_bitmap()) {
6991 vmx->nested.msr_bitmap =
6992 (unsigned long *)__get_free_page(GFP_KERNEL);
6993 if (!vmx->nested.msr_bitmap)
6994 goto out_msr_bitmap;
6995 }
6996
6998 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 6997 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
6999 if (!vmx->nested.cached_vmcs12) 6998 if (!vmx->nested.cached_vmcs12)
7000 return -ENOMEM; 6999 goto out_cached_vmcs12;
7001 7000
7002 if (enable_shadow_vmcs) { 7001 if (enable_shadow_vmcs) {
7003 shadow_vmcs = alloc_vmcs(); 7002 shadow_vmcs = alloc_vmcs();
7004 if (!shadow_vmcs) { 7003 if (!shadow_vmcs)
7005 kfree(vmx->nested.cached_vmcs12); 7004 goto out_shadow_vmcs;
7006 return -ENOMEM;
7007 }
7008 /* mark vmcs as shadow */ 7005 /* mark vmcs as shadow */
7009 shadow_vmcs->revision_id |= (1u << 31); 7006 shadow_vmcs->revision_id |= (1u << 31);
7010 /* init shadow vmcs */ 7007 /* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7024 skip_emulated_instruction(vcpu); 7021 skip_emulated_instruction(vcpu);
7025 nested_vmx_succeed(vcpu); 7022 nested_vmx_succeed(vcpu);
7026 return 1; 7023 return 1;
7024
7025out_shadow_vmcs:
7026 kfree(vmx->nested.cached_vmcs12);
7027
7028out_cached_vmcs12:
7029 free_page((unsigned long)vmx->nested.msr_bitmap);
7030
7031out_msr_bitmap:
7032 return -ENOMEM;
7027} 7033}
7028 7034
7029/* 7035/*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
7098 vmx->nested.vmxon = false; 7104 vmx->nested.vmxon = false;
7099 free_vpid(vmx->nested.vpid02); 7105 free_vpid(vmx->nested.vpid02);
7100 nested_release_vmcs12(vmx); 7106 nested_release_vmcs12(vmx);
7107 if (vmx->nested.msr_bitmap) {
7108 free_page((unsigned long)vmx->nested.msr_bitmap);
7109 vmx->nested.msr_bitmap = NULL;
7110 }
7101 if (enable_shadow_vmcs) 7111 if (enable_shadow_vmcs)
7102 free_vmcs(vmx->nested.current_shadow_vmcs); 7112 free_vmcs(vmx->nested.current_shadow_vmcs);
7103 kfree(vmx->nested.cached_vmcs12); 7113 kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8419{ 8429{
8420 u32 sec_exec_control; 8430 u32 sec_exec_control;
8421 8431
8432 /* Postpone execution until vmcs01 is the current VMCS. */
8433 if (is_guest_mode(vcpu)) {
8434 to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
8435 return;
8436 }
8437
8422 /* 8438 /*
8423 * There is not point to enable virtualize x2apic without enable 8439 * There is not point to enable virtualize x2apic without enable
8424 * apicv 8440 * apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9472{ 9488{
9473 int msr; 9489 int msr;
9474 struct page *page; 9490 struct page *page;
9475 unsigned long *msr_bitmap; 9491 unsigned long *msr_bitmap_l1;
9492 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
9476 9493
9494 /* This shortcut is ok because we support only x2APIC MSRs so far. */
9477 if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) 9495 if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
9478 return false; 9496 return false;
9479 9497
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9482 WARN_ON(1); 9500 WARN_ON(1);
9483 return false; 9501 return false;
9484 } 9502 }
9485 msr_bitmap = (unsigned long *)kmap(page); 9503 msr_bitmap_l1 = (unsigned long *)kmap(page);
9486 if (!msr_bitmap) { 9504 if (!msr_bitmap_l1) {
9487 nested_release_page_clean(page); 9505 nested_release_page_clean(page);
9488 WARN_ON(1); 9506 WARN_ON(1);
9489 return false; 9507 return false;
9490 } 9508 }
9491 9509
9510 memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
9511
9492 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 9512 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
9493 if (nested_cpu_has_apic_reg_virt(vmcs12)) 9513 if (nested_cpu_has_apic_reg_virt(vmcs12))
9494 for (msr = 0x800; msr <= 0x8ff; msr++) 9514 for (msr = 0x800; msr <= 0x8ff; msr++)
9495 nested_vmx_disable_intercept_for_msr( 9515 nested_vmx_disable_intercept_for_msr(
9496 msr_bitmap, 9516 msr_bitmap_l1, msr_bitmap_l0,
9497 vmx_msr_bitmap_nested,
9498 msr, MSR_TYPE_R); 9517 msr, MSR_TYPE_R);
9499 /* TPR is allowed */ 9518
9500 nested_vmx_disable_intercept_for_msr(msr_bitmap, 9519 nested_vmx_disable_intercept_for_msr(
9501 vmx_msr_bitmap_nested, 9520 msr_bitmap_l1, msr_bitmap_l0,
9502 APIC_BASE_MSR + (APIC_TASKPRI >> 4), 9521 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9503 MSR_TYPE_R | MSR_TYPE_W); 9522 MSR_TYPE_R | MSR_TYPE_W);
9523
9504 if (nested_cpu_has_vid(vmcs12)) { 9524 if (nested_cpu_has_vid(vmcs12)) {
9505 /* EOI and self-IPI are allowed */
9506 nested_vmx_disable_intercept_for_msr( 9525 nested_vmx_disable_intercept_for_msr(
9507 msr_bitmap, 9526 msr_bitmap_l1, msr_bitmap_l0,
9508 vmx_msr_bitmap_nested,
9509 APIC_BASE_MSR + (APIC_EOI >> 4), 9527 APIC_BASE_MSR + (APIC_EOI >> 4),
9510 MSR_TYPE_W); 9528 MSR_TYPE_W);
9511 nested_vmx_disable_intercept_for_msr( 9529 nested_vmx_disable_intercept_for_msr(
9512 msr_bitmap, 9530 msr_bitmap_l1, msr_bitmap_l0,
9513 vmx_msr_bitmap_nested,
9514 APIC_BASE_MSR + (APIC_SELF_IPI >> 4), 9531 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9515 MSR_TYPE_W); 9532 MSR_TYPE_W);
9516 } 9533 }
9517 } else {
9518 /*
9519 * Enable reading intercept of all the x2apic
9520 * MSRs. We should not rely on vmcs12 to do any
9521 * optimizations here, it may have been modified
9522 * by L1.
9523 */
9524 for (msr = 0x800; msr <= 0x8ff; msr++)
9525 __vmx_enable_intercept_for_msr(
9526 vmx_msr_bitmap_nested,
9527 msr,
9528 MSR_TYPE_R);
9529
9530 __vmx_enable_intercept_for_msr(
9531 vmx_msr_bitmap_nested,
9532 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9533 MSR_TYPE_W);
9534 __vmx_enable_intercept_for_msr(
9535 vmx_msr_bitmap_nested,
9536 APIC_BASE_MSR + (APIC_EOI >> 4),
9537 MSR_TYPE_W);
9538 __vmx_enable_intercept_for_msr(
9539 vmx_msr_bitmap_nested,
9540 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9541 MSR_TYPE_W);
9542 } 9534 }
9543 kunmap(page); 9535 kunmap(page);
9544 nested_release_page_clean(page); 9536 nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9957 } 9949 }
9958 9950
9959 if (cpu_has_vmx_msr_bitmap() && 9951 if (cpu_has_vmx_msr_bitmap() &&
9960 exec_control & CPU_BASED_USE_MSR_BITMAPS) { 9952 exec_control & CPU_BASED_USE_MSR_BITMAPS &&
9961 nested_vmx_merge_msr_bitmap(vcpu, vmcs12); 9953 nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
9962 /* MSR_BITMAP will be set by following vmx_set_efer. */ 9954 ; /* MSR_BITMAP will be set by following vmx_set_efer. */
9963 } else 9955 else
9964 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 9956 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9965 9957
9966 /* 9958 /*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10011 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); 10003 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
10012 else 10004 else
10013 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10005 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
10006 if (kvm_has_tsc_control)
10007 decache_tsc_multiplier(vmx);
10014 10008
10015 if (enable_vpid) { 10009 if (enable_vpid) {
10016 /* 10010 /*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10767 else 10761 else
10768 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, 10762 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10769 PIN_BASED_VMX_PREEMPTION_TIMER); 10763 PIN_BASED_VMX_PREEMPTION_TIMER);
10764 if (kvm_has_tsc_control)
10765 decache_tsc_multiplier(vmx);
10766
10767 if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
10768 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
10769 vmx_set_virtual_x2apic_mode(vcpu,
10770 vcpu->arch.apic_base & X2APIC_ENABLE);
10771 }
10770 10772
10771 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 10773 /* This is needed for same reason as it was needed in prepare_vmcs02 */
10772 vmx->host_rsp = 0; 10774 vmx->host_rsp = 0;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 56b0b7ec66aa..99ac022edc60 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -337,6 +337,7 @@
337 */ 337 */
338#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 338#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
339#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 339#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
340#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
340#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 341#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
341#define E_ITS_MAPD_DEVICE_OOR 0x010801 342#define E_ITS_MAPD_DEVICE_OOR 0x010801
342#define E_ITS_MAPC_PROCNUM_OOR 0x010902 343#define E_ITS_MAPC_PROCNUM_OOR 0x010902
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4fde8c7dfcfe..77e6ccf14901 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -33,6 +33,7 @@
33static struct timecounter *timecounter; 33static struct timecounter *timecounter;
34static struct workqueue_struct *wqueue; 34static struct workqueue_struct *wqueue;
35static unsigned int host_vtimer_irq; 35static unsigned int host_vtimer_irq;
36static u32 host_vtimer_irq_flags;
36 37
37void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 38void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
38{ 39{
@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
365 366
366static void kvm_timer_init_interrupt(void *info) 367static void kvm_timer_init_interrupt(void *info)
367{ 368{
368 enable_percpu_irq(host_vtimer_irq, 0); 369 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
369} 370}
370 371
371int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 372int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
432 } 433 }
433 host_vtimer_irq = info->virtual_irq; 434 host_vtimer_irq = info->virtual_irq;
434 435
436 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
437 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
438 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
439 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
440 host_vtimer_irq);
441 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
442 }
443
435 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 444 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
436 "kvm guest timer", kvm_get_running_vcpus()); 445 "kvm guest timer", kvm_get_running_vcpus());
437 if (err) { 446 if (err) {
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 07411cf967b9..4660a7d04eea 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
51 51
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL); 52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
53 if (!irq) 53 if (!irq)
54 return NULL; 54 return ERR_PTR(-ENOMEM);
55 55
56 INIT_LIST_HEAD(&irq->lpi_list); 56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list); 57 INIT_LIST_HEAD(&irq->ap_list);
@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
441 * Find the target VCPU and the LPI number for a given devid/eventid pair 441 * Find the target VCPU and the LPI number for a given devid/eventid pair
442 * and make this IRQ pending, possibly injecting it. 442 * and make this IRQ pending, possibly injecting it.
443 * Must be called with the its_lock mutex held. 443 * Must be called with the its_lock mutex held.
444 * Returns 0 on success, a positive error value for any ITS mapping
445 * related errors and negative error values for generic errors.
444 */ 446 */
445static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, 447static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
446 u32 devid, u32 eventid) 448 u32 devid, u32 eventid)
447{ 449{
450 struct kvm_vcpu *vcpu;
448 struct its_itte *itte; 451 struct its_itte *itte;
449 452
450 if (!its->enabled) 453 if (!its->enabled)
451 return; 454 return -EBUSY;
452 455
453 itte = find_itte(its, devid, eventid); 456 itte = find_itte(its, devid, eventid);
454 /* Triggering an unmapped IRQ gets silently dropped. */ 457 if (!itte || !its_is_collection_mapped(itte->collection))
455 if (itte && its_is_collection_mapped(itte->collection)) { 458 return E_ITS_INT_UNMAPPED_INTERRUPT;
456 struct kvm_vcpu *vcpu; 459
457 460 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
458 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr); 461 if (!vcpu)
459 if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) { 462 return E_ITS_INT_UNMAPPED_INTERRUPT;
460 spin_lock(&itte->irq->irq_lock); 463
461 itte->irq->pending = true; 464 if (!vcpu->arch.vgic_cpu.lpis_enabled)
462 vgic_queue_irq_unlock(kvm, itte->irq); 465 return -EBUSY;
463 } 466
464 } 467 spin_lock(&itte->irq->irq_lock);
468 itte->irq->pending = true;
469 vgic_queue_irq_unlock(kvm, itte->irq);
470
471 return 0;
472}
473
474static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
475{
476 struct vgic_io_device *iodev;
477
478 if (dev->ops != &kvm_io_gic_ops)
479 return NULL;
480
481 iodev = container_of(dev, struct vgic_io_device, dev);
482
483 if (iodev->iodev_type != IODEV_ITS)
484 return NULL;
485
486 return iodev;
465} 487}
466 488
467/* 489/*
468 * Queries the KVM IO bus framework to get the ITS pointer from the given 490 * Queries the KVM IO bus framework to get the ITS pointer from the given
469 * doorbell address. 491 * doorbell address.
470 * We then call vgic_its_trigger_msi() with the decoded data. 492 * We then call vgic_its_trigger_msi() with the decoded data.
493 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
471 */ 494 */
472int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 495int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
473{ 496{
474 u64 address; 497 u64 address;
475 struct kvm_io_device *kvm_io_dev; 498 struct kvm_io_device *kvm_io_dev;
476 struct vgic_io_device *iodev; 499 struct vgic_io_device *iodev;
500 int ret;
477 501
478 if (!vgic_has_its(kvm)) 502 if (!vgic_has_its(kvm))
479 return -ENODEV; 503 return -ENODEV;
@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
485 509
486 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); 510 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
487 if (!kvm_io_dev) 511 if (!kvm_io_dev)
488 return -ENODEV; 512 return -EINVAL;
489 513
490 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); 514 iodev = vgic_get_its_iodev(kvm_io_dev);
515 if (!iodev)
516 return -EINVAL;
491 517
492 mutex_lock(&iodev->its->its_lock); 518 mutex_lock(&iodev->its->its_lock);
493 vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data); 519 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
494 mutex_unlock(&iodev->its->its_lock); 520 mutex_unlock(&iodev->its->its_lock);
495 521
496 return 0; 522 if (ret < 0)
523 return ret;
524
525 /*
526 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
527 * if the guest has blocked the MSI. So we map any LPI mapping
528 * related error to that.
529 */
530 if (ret)
531 return 0;
532 else
533 return 1;
497} 534}
498 535
499/* Requires the its_lock to be held. */ 536/* Requires the its_lock to be held. */
@@ -502,7 +539,8 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
502 list_del(&itte->itte_list); 539 list_del(&itte->itte_list);
503 540
504 /* This put matches the get in vgic_add_lpi. */ 541 /* This put matches the get in vgic_add_lpi. */
505 vgic_put_irq(kvm, itte->irq); 542 if (itte->irq)
543 vgic_put_irq(kvm, itte->irq);
506 544
507 kfree(itte); 545 kfree(itte);
508} 546}
@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
697 struct its_device *device; 735 struct its_device *device;
698 struct its_collection *collection, *new_coll = NULL; 736 struct its_collection *collection, *new_coll = NULL;
699 int lpi_nr; 737 int lpi_nr;
738 struct vgic_irq *irq;
700 739
701 device = find_its_device(its, device_id); 740 device = find_its_device(its, device_id);
702 if (!device) 741 if (!device)
@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
710 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) 749 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
711 return E_ITS_MAPTI_PHYSICALID_OOR; 750 return E_ITS_MAPTI_PHYSICALID_OOR;
712 751
752 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
753 if (find_itte(its, device_id, event_id))
754 return 0;
755
713 collection = find_collection(its, coll_id); 756 collection = find_collection(its, coll_id);
714 if (!collection) { 757 if (!collection) {
715 int ret = vgic_its_alloc_collection(its, &collection, coll_id); 758 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
@@ -718,22 +761,28 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
718 new_coll = collection; 761 new_coll = collection;
719 } 762 }
720 763
721 itte = find_itte(its, device_id, event_id); 764 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
722 if (!itte) { 765 if (!itte) {
723 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL); 766 if (new_coll)
724 if (!itte) { 767 vgic_its_free_collection(its, coll_id);
725 if (new_coll) 768 return -ENOMEM;
726 vgic_its_free_collection(its, coll_id);
727 return -ENOMEM;
728 }
729
730 itte->event_id = event_id;
731 list_add_tail(&itte->itte_list, &device->itt_head);
732 } 769 }
733 770
771 itte->event_id = event_id;
772 list_add_tail(&itte->itte_list, &device->itt_head);
773
734 itte->collection = collection; 774 itte->collection = collection;
735 itte->lpi = lpi_nr; 775 itte->lpi = lpi_nr;
736 itte->irq = vgic_add_lpi(kvm, lpi_nr); 776
777 irq = vgic_add_lpi(kvm, lpi_nr);
778 if (IS_ERR(irq)) {
779 if (new_coll)
780 vgic_its_free_collection(its, coll_id);
781 its_free_itte(kvm, itte);
782 return PTR_ERR(irq);
783 }
784 itte->irq = irq;
785
737 update_affinity_itte(kvm, itte); 786 update_affinity_itte(kvm, itte);
738 787
739 /* 788 /*
@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
981 u32 msi_data = its_cmd_get_id(its_cmd); 1030 u32 msi_data = its_cmd_get_id(its_cmd);
982 u64 msi_devid = its_cmd_get_deviceid(its_cmd); 1031 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
983 1032
984 vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); 1033 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
985
986 return 0;
987} 1034}
988 1035
989/* 1036/*
@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1288 its_sync_lpi_pending_table(vcpu); 1335 its_sync_lpi_pending_table(vcpu);
1289} 1336}
1290 1337
1291static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) 1338static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
1292{ 1339{
1293 struct vgic_io_device *iodev = &its->iodev; 1340 struct vgic_io_device *iodev = &its->iodev;
1294 int ret; 1341 int ret;
1295 1342
1296 if (its->initialized) 1343 if (!its->initialized)
1297 return 0; 1344 return -EBUSY;
1298 1345
1299 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) 1346 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1300 return -ENXIO; 1347 return -ENXIO;
@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
1311 KVM_VGIC_V3_ITS_SIZE, &iodev->dev); 1358 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1312 mutex_unlock(&kvm->slots_lock); 1359 mutex_unlock(&kvm->slots_lock);
1313 1360
1314 if (!ret)
1315 its->initialized = true;
1316
1317 return ret; 1361 return ret;
1318} 1362}
1319 1363
@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev,
1435 if (type != KVM_VGIC_ITS_ADDR_TYPE) 1479 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1436 return -ENODEV; 1480 return -ENODEV;
1437 1481
1438 if (its->initialized)
1439 return -EBUSY;
1440
1441 if (copy_from_user(&addr, uaddr, sizeof(addr))) 1482 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1442 return -EFAULT; 1483 return -EFAULT;
1443 1484
@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev,
1453 case KVM_DEV_ARM_VGIC_GRP_CTRL: 1494 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1454 switch (attr->attr) { 1495 switch (attr->attr) {
1455 case KVM_DEV_ARM_VGIC_CTRL_INIT: 1496 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1456 return vgic_its_init_its(dev->kvm, its); 1497 its->initialized = true;
1498
1499 return 0;
1457 } 1500 }
1458 break; 1501 break;
1459 } 1502 }
@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void)
1498 return kvm_register_device_ops(&kvm_arm_vgic_its_ops, 1541 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1499 KVM_DEV_TYPE_ARM_VGIC_ITS); 1542 KVM_DEV_TYPE_ARM_VGIC_ITS);
1500} 1543}
1544
1545/*
1546 * Registers all ITSes with the kvm_io_bus framework.
1547 * To follow the existing VGIC initialization sequence, this has to be
1548 * done as late as possible, just before the first VCPU runs.
1549 */
1550int vgic_register_its_iodevs(struct kvm *kvm)
1551{
1552 struct kvm_device *dev;
1553 int ret = 0;
1554
1555 list_for_each_entry(dev, &kvm->devices, vm_node) {
1556 if (dev->ops != &kvm_arm_vgic_its_ops)
1557 continue;
1558
1559 ret = vgic_register_its_iodev(kvm, dev->private);
1560 if (ret)
1561 return ret;
1562 /*
1563 * We don't need to care about tearing down previously
1564 * registered ITSes, as the kvm_io_bus framework removes
1565 * them for us if the VM gets destroyed.
1566 */
1567 }
1568
1569 return ret;
1570}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index ff668e0dd586..90d81811fdda 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
306{ 306{
307 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 307 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
308 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 308 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
309 u64 propbaser = dist->propbaser; 309 u64 old_propbaser, propbaser;
310 310
311 /* Storing a value with LPIs already enabled is undefined */ 311 /* Storing a value with LPIs already enabled is undefined */
312 if (vgic_cpu->lpis_enabled) 312 if (vgic_cpu->lpis_enabled)
313 return; 313 return;
314 314
315 propbaser = update_64bit_reg(propbaser, addr & 4, len, val); 315 do {
316 propbaser = vgic_sanitise_propbaser(propbaser); 316 old_propbaser = dist->propbaser;
317 317 propbaser = old_propbaser;
318 dist->propbaser = propbaser; 318 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
319 propbaser = vgic_sanitise_propbaser(propbaser);
320 } while (cmpxchg64(&dist->propbaser, old_propbaser,
321 propbaser) != old_propbaser);
319} 322}
320 323
321static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu, 324static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
331 unsigned long val) 334 unsigned long val)
332{ 335{
333 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 336 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
334 u64 pendbaser = vgic_cpu->pendbaser; 337 u64 old_pendbaser, pendbaser;
335 338
336 /* Storing a value with LPIs already enabled is undefined */ 339 /* Storing a value with LPIs already enabled is undefined */
337 if (vgic_cpu->lpis_enabled) 340 if (vgic_cpu->lpis_enabled)
338 return; 341 return;
339 342
340 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); 343 do {
341 pendbaser = vgic_sanitise_pendbaser(pendbaser); 344 old_pendbaser = vgic_cpu->pendbaser;
342 345 pendbaser = old_pendbaser;
343 vgic_cpu->pendbaser = pendbaser; 346 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
347 pendbaser = vgic_sanitise_pendbaser(pendbaser);
348 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
349 pendbaser) != old_pendbaser);
344} 350}
345 351
346/* 352/*
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 0506543df38a..9f0dae397d9c 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm)
289 goto out; 289 goto out;
290 } 290 }
291 291
292 if (vgic_has_its(kvm)) {
293 ret = vgic_register_its_iodevs(kvm);
294 if (ret) {
295 kvm_err("Unable to register VGIC ITS MMIO regions\n");
296 goto out;
297 }
298 }
299
292 dist->ready = true; 300 dist->ready = true;
293 301
294out: 302out:
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e7aeac719e09..e83b7fe4baae 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref)
117 117
118void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 118void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
119{ 119{
120 struct vgic_dist *dist; 120 struct vgic_dist *dist = &kvm->arch.vgic;
121 121
122 if (irq->intid < VGIC_MIN_LPI) 122 if (irq->intid < VGIC_MIN_LPI)
123 return; 123 return;
124 124
125 if (!kref_put(&irq->refcount, vgic_irq_release)) 125 spin_lock(&dist->lpi_list_lock);
126 if (!kref_put(&irq->refcount, vgic_irq_release)) {
127 spin_unlock(&dist->lpi_list_lock);
126 return; 128 return;
129 };
127 130
128 dist = &kvm->arch.vgic;
129
130 spin_lock(&dist->lpi_list_lock);
131 list_del(&irq->lpi_list); 131 list_del(&irq->lpi_list);
132 dist->lpi_list_count--; 132 dist->lpi_list_count--;
133 spin_unlock(&dist->lpi_list_lock); 133 spin_unlock(&dist->lpi_list_lock);
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 1d8e21d5c13f..6c4625c46368 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
84int vgic_v3_probe(const struct gic_kvm_info *info); 84int vgic_v3_probe(const struct gic_kvm_info *info);
85int vgic_v3_map_resources(struct kvm *kvm); 85int vgic_v3_map_resources(struct kvm *kvm);
86int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); 86int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
87int vgic_register_its_iodevs(struct kvm *kvm);
87bool vgic_has_its(struct kvm *kvm); 88bool vgic_has_its(struct kvm *kvm);
88int kvm_vgic_register_its_device(void); 89int kvm_vgic_register_its_device(void);
89void vgic_enable_lpis(struct kvm_vcpu *vcpu); 90void vgic_enable_lpis(struct kvm_vcpu *vcpu);
@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
140 return -ENODEV; 141 return -ENODEV;
141} 142}
142 143
144static inline int vgic_register_its_iodevs(struct kvm *kvm)
145{
146 return -ENODEV;
147}
148
143static inline bool vgic_has_its(struct kvm *kvm) 149static inline bool vgic_has_its(struct kvm *kvm)
144{ 150{
145 return false; 151 return false;