summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2019-08-02 05:28:32 -0400
committerMarc Zyngier <maz@kernel.org>2019-08-05 10:36:46 -0400
commit5eeaf10eec394b28fad2c58f1f5c3a5da0e87d1c (patch)
treef7e6c05a259c908bae74fa2852b6d80d811e822d
parentcdb2d3ee0436d74fa9092f2df46aaa6f9e03c969 (diff)
KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block
Since commit commit 328e56647944 ("KVM: arm/arm64: vgic: Defer touching GICH_VMCR to vcpu_load/put"), we leave ICH_VMCR_EL2 (or its GICv2 equivalent) loaded as long as we can, only syncing it back when we're scheduled out. There is a small snag with that though: kvm_vgic_vcpu_pending_irq(), which is indirectly called from kvm_vcpu_check_block(), needs to evaluate the guest's view of ICC_PMR_EL1. At the point were we call kvm_vcpu_check_block(), the vcpu is still loaded, and whatever changes to PMR is not visible in memory until we do a vcpu_put(). Things go really south if the guest does the following: mov x0, #0 // or any small value masking interrupts msr ICC_PMR_EL1, x0 [vcpu preempted, then rescheduled, VMCR sampled] mov x0, #ff // allow all interrupts msr ICC_PMR_EL1, x0 wfi // traps to EL2, so samping of VMCR [interrupt arrives just after WFI] Here, the hypervisor's view of PMR is zero, while the guest has enabled its interrupts. kvm_vgic_vcpu_pending_irq() will then say that no interrupts are pending (despite an interrupt being received) and we'll block for no reason. If the guest doesn't have a periodic interrupt firing once it has blocked, it will stay there forever. To avoid this unfortuante situation, let's resync VMCR from kvm_arch_vcpu_blocking(), ensuring that a following kvm_vcpu_check_block() will observe the latest value of PMR. This has been found by booting an arm64 Linux guest with the pseudo NMI feature, and thus using interrupt priorities to mask interrupts instead of the usual PSTATE masking. Cc: stable@vger.kernel.org # 4.12 Fixes: 328e56647944 ("KVM: arm/arm64: vgic: Defer touching GICH_VMCR to vcpu_load/put") Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--virt/kvm/arm/arm.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c9
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c7
-rw-r--r--virt/kvm/arm/vgic/vgic.c11
-rw-r--r--virt/kvm/arm/vgic/vgic.h2
6 files changed, 39 insertions, 2 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 46bbc949c20a..7a30524a80ee 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
350 350
351void kvm_vgic_load(struct kvm_vcpu *vcpu); 351void kvm_vgic_load(struct kvm_vcpu *vcpu);
352void kvm_vgic_put(struct kvm_vcpu *vcpu); 352void kvm_vgic_put(struct kvm_vcpu *vcpu);
353void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
353 354
354#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 355#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
355#define vgic_initialized(k) ((k)->arch.vgic.initialized) 356#define vgic_initialized(k) ((k)->arch.vgic.initialized)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index c704fa696184..482b20256fa8 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
323 323
324void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 324void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
325{ 325{
326 /*
327 * If we're about to block (most likely because we've just hit a
328 * WFI), we need to sync back the state of the GIC CPU interface
329 * so that we have the lastest PMR and group enables. This ensures
330 * that kvm_arch_vcpu_runnable has up-to-date data to decide
331 * whether we have pending interrupts.
332 */
333 preempt_disable();
334 kvm_vgic_vmcr_sync(vcpu);
335 preempt_enable();
336
326 kvm_vgic_v4_enable_doorbell(vcpu); 337 kvm_vgic_v4_enable_doorbell(vcpu);
327} 338}
328 339
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 6dd5ad706c92..96aab77d0471 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
484 kvm_vgic_global_state.vctrl_base + GICH_APR); 484 kvm_vgic_global_state.vctrl_base + GICH_APR);
485} 485}
486 486
487void vgic_v2_put(struct kvm_vcpu *vcpu) 487void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
488{ 488{
489 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 489 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
490 490
491 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); 491 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
492}
493
494void vgic_v2_put(struct kvm_vcpu *vcpu)
495{
496 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
497
498 vgic_v2_vmcr_sync(vcpu);
492 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); 499 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
493} 500}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index c2c9ce009f63..0c653a1e5215 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
662 __vgic_v3_activate_traps(vcpu); 662 __vgic_v3_activate_traps(vcpu);
663} 663}
664 664
665void vgic_v3_put(struct kvm_vcpu *vcpu) 665void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
666{ 666{
667 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 667 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
668 668
669 if (likely(cpu_if->vgic_sre)) 669 if (likely(cpu_if->vgic_sre))
670 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); 670 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
671}
672
673void vgic_v3_put(struct kvm_vcpu *vcpu)
674{
675 vgic_v3_vmcr_sync(vcpu);
671 676
672 kvm_call_hyp(__vgic_v3_save_aprs, vcpu); 677 kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
673 678
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 04786c8ec77e..13d4b38a94ec 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
919 vgic_v3_put(vcpu); 919 vgic_v3_put(vcpu);
920} 920}
921 921
922void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
923{
924 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
925 return;
926
927 if (kvm_vgic_global_state.type == VGIC_V2)
928 vgic_v2_vmcr_sync(vcpu);
929 else
930 vgic_v3_vmcr_sync(vcpu);
931}
932
922int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 933int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
923{ 934{
924 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 935 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 57205beaa981..11adbdac1d56 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
193void vgic_v2_init_lrs(void); 193void vgic_v2_init_lrs(void);
194void vgic_v2_load(struct kvm_vcpu *vcpu); 194void vgic_v2_load(struct kvm_vcpu *vcpu);
195void vgic_v2_put(struct kvm_vcpu *vcpu); 195void vgic_v2_put(struct kvm_vcpu *vcpu);
196void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
196 197
197void vgic_v2_save_state(struct kvm_vcpu *vcpu); 198void vgic_v2_save_state(struct kvm_vcpu *vcpu);
198void vgic_v2_restore_state(struct kvm_vcpu *vcpu); 199void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
223 224
224void vgic_v3_load(struct kvm_vcpu *vcpu); 225void vgic_v3_load(struct kvm_vcpu *vcpu);
225void vgic_v3_put(struct kvm_vcpu *vcpu); 226void vgic_v3_put(struct kvm_vcpu *vcpu);
227void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
226 228
227bool vgic_has_its(struct kvm *kvm); 229bool vgic_has_its(struct kvm *kvm);
228int kvm_vgic_register_its_device(void); 230int kvm_vgic_register_its_device(void);