diff options
| -rw-r--r-- | virt/kvm/arm/hyp/vgic-v3-sr.c | 4 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 14 |
2 files changed, 12 insertions, 6 deletions
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index 264d92da3240..370bd6c5e6cb 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
| @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 222 | } | 222 | } |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | if (used_lrs) { | 225 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
| 226 | int i; | 226 | int i; |
| 227 | u32 elrsr; | 227 | u32 elrsr; |
| 228 | 228 | ||
| @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
| 247 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; | 247 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
| 248 | int i; | 248 | int i; |
| 249 | 249 | ||
| 250 | if (used_lrs) { | 250 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
| 251 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | 251 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
| 252 | 252 | ||
| 253 | for (i = 0; i < used_lrs; i++) | 253 | for (i = 0; i < used_lrs; i++) |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index abd9c7352677..3af69f2a3866 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
| @@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 867 | * either observe the new interrupt before or after doing this check, | 867 | * either observe the new interrupt before or after doing this check, |
| 868 | * and introducing additional synchronization mechanism doesn't change | 868 | * and introducing additional synchronization mechanism doesn't change |
| 869 | * this. | 869 | * this. |
| 870 | * | ||
| 871 | * Note that we still need to go through the whole thing if anything | ||
| 872 | * can be directly injected (GICv4). | ||
| 870 | */ | 873 | */ |
| 871 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | 874 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && |
| 875 | !vgic_supports_direct_msis(vcpu->kvm)) | ||
| 872 | return; | 876 | return; |
| 873 | 877 | ||
| 874 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 878 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
| 875 | 879 | ||
| 876 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 880 | if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { |
| 877 | vgic_flush_lr_state(vcpu); | 881 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 878 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 882 | vgic_flush_lr_state(vcpu); |
| 883 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | ||
| 884 | } | ||
| 879 | 885 | ||
| 880 | if (can_access_vgic_from_kernel()) | 886 | if (can_access_vgic_from_kernel()) |
| 881 | vgic_restore_state(vcpu); | 887 | vgic_restore_state(vcpu); |
