diff options
-rw-r--r-- | include/kvm/arm_vgic.h | 1 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 9 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 7 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 2 |
6 files changed, 39 insertions, 2 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 46bbc949c20a..7a30524a80ee 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | |||
350 | 350 | ||
351 | void kvm_vgic_load(struct kvm_vcpu *vcpu); | 351 | void kvm_vgic_load(struct kvm_vcpu *vcpu); |
352 | void kvm_vgic_put(struct kvm_vcpu *vcpu); | 352 | void kvm_vgic_put(struct kvm_vcpu *vcpu); |
353 | void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); | ||
353 | 354 | ||
354 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 355 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
355 | #define vgic_initialized(k) ((k)->arch.vgic.initialized) | 356 | #define vgic_initialized(k) ((k)->arch.vgic.initialized) |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c704fa696184..482b20256fa8 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
323 | 323 | ||
324 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | 324 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
325 | { | 325 | { |
326 | /* | ||
327 | * If we're about to block (most likely because we've just hit a | ||
328 | * WFI), we need to sync back the state of the GIC CPU interface | ||
329 | * so that we have the lastest PMR and group enables. This ensures | ||
330 | * that kvm_arch_vcpu_runnable has up-to-date data to decide | ||
331 | * whether we have pending interrupts. | ||
332 | */ | ||
333 | preempt_disable(); | ||
334 | kvm_vgic_vmcr_sync(vcpu); | ||
335 | preempt_enable(); | ||
336 | |||
326 | kvm_vgic_v4_enable_doorbell(vcpu); | 337 | kvm_vgic_v4_enable_doorbell(vcpu); |
327 | } | 338 | } |
328 | 339 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 6dd5ad706c92..96aab77d0471 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu) | |||
484 | kvm_vgic_global_state.vctrl_base + GICH_APR); | 484 | kvm_vgic_global_state.vctrl_base + GICH_APR); |
485 | } | 485 | } |
486 | 486 | ||
487 | void vgic_v2_put(struct kvm_vcpu *vcpu) | 487 | void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu) |
488 | { | 488 | { |
489 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | 489 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
490 | 490 | ||
491 | cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); | 491 | cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); |
492 | } | ||
493 | |||
494 | void vgic_v2_put(struct kvm_vcpu *vcpu) | ||
495 | { | ||
496 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | ||
497 | |||
498 | vgic_v2_vmcr_sync(vcpu); | ||
492 | cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); | 499 | cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); |
493 | } | 500 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c2c9ce009f63..0c653a1e5215 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) | |||
662 | __vgic_v3_activate_traps(vcpu); | 662 | __vgic_v3_activate_traps(vcpu); |
663 | } | 663 | } |
664 | 664 | ||
665 | void vgic_v3_put(struct kvm_vcpu *vcpu) | 665 | void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) |
666 | { | 666 | { |
667 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | 667 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
668 | 668 | ||
669 | if (likely(cpu_if->vgic_sre)) | 669 | if (likely(cpu_if->vgic_sre)) |
670 | cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); | 670 | cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); |
671 | } | ||
672 | |||
673 | void vgic_v3_put(struct kvm_vcpu *vcpu) | ||
674 | { | ||
675 | vgic_v3_vmcr_sync(vcpu); | ||
671 | 676 | ||
672 | kvm_call_hyp(__vgic_v3_save_aprs, vcpu); | 677 | kvm_call_hyp(__vgic_v3_save_aprs, vcpu); |
673 | 678 | ||
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 04786c8ec77e..13d4b38a94ec 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu) | |||
919 | vgic_v3_put(vcpu); | 919 | vgic_v3_put(vcpu); |
920 | } | 920 | } |
921 | 921 | ||
922 | void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu) | ||
923 | { | ||
924 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
925 | return; | ||
926 | |||
927 | if (kvm_vgic_global_state.type == VGIC_V2) | ||
928 | vgic_v2_vmcr_sync(vcpu); | ||
929 | else | ||
930 | vgic_v3_vmcr_sync(vcpu); | ||
931 | } | ||
932 | |||
922 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 933 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
923 | { | 934 | { |
924 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 935 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 57205beaa981..11adbdac1d56 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, | |||
193 | void vgic_v2_init_lrs(void); | 193 | void vgic_v2_init_lrs(void); |
194 | void vgic_v2_load(struct kvm_vcpu *vcpu); | 194 | void vgic_v2_load(struct kvm_vcpu *vcpu); |
195 | void vgic_v2_put(struct kvm_vcpu *vcpu); | 195 | void vgic_v2_put(struct kvm_vcpu *vcpu); |
196 | void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu); | ||
196 | 197 | ||
197 | void vgic_v2_save_state(struct kvm_vcpu *vcpu); | 198 | void vgic_v2_save_state(struct kvm_vcpu *vcpu); |
198 | void vgic_v2_restore_state(struct kvm_vcpu *vcpu); | 199 | void vgic_v2_restore_state(struct kvm_vcpu *vcpu); |
@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm); | |||
223 | 224 | ||
224 | void vgic_v3_load(struct kvm_vcpu *vcpu); | 225 | void vgic_v3_load(struct kvm_vcpu *vcpu); |
225 | void vgic_v3_put(struct kvm_vcpu *vcpu); | 226 | void vgic_v3_put(struct kvm_vcpu *vcpu); |
227 | void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu); | ||
226 | 228 | ||
227 | bool vgic_has_its(struct kvm *kvm); | 229 | bool vgic_has_its(struct kvm *kvm); |
228 | int kvm_vgic_register_its_device(void); | 230 | int kvm_vgic_register_its_device(void); |