diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-02-01 13:28:30 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-02-22 08:29:38 -0500 |
commit | 33c83cb3c1d84b76c8270abe5487e77f83a81b22 (patch) | |
tree | 923e491a5533d96e906a1b67c174c5b152eb680a /arch/arm/kvm | |
parent | 75da01e127f7db3b23effa6118336d303e7572a7 (diff) |
ARM: KVM: vgic: take distributor lock on sync_hwstate path
Now that the maintenance interrupt handling is actually out of the
handler itself, the code becomes quite racy as we can get preempted
while we process the state.
Wrapping this code around the distributor lock ensures that we're not
preempted and relatively race-free.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/vgic.c | 24 |
1 files changed, 6 insertions, 18 deletions
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index 76ea1aa5e7d2..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
@@ -1016,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1016 | 1016 | ||
1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); |
1018 | 1018 | ||
1019 | /* | ||
1020 | * We do not need to take the distributor lock here, since the only | ||
1021 | * action we perform is clearing the irq_active_bit for an EOIed | ||
1022 | * level interrupt. There is a potential race with | ||
1023 | * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we | ||
1024 | * check if the interrupt is already active. Two possibilities: | ||
1025 | * | ||
1026 | * - The queuing is occurring on the same vcpu: cannot happen, | ||
1027 | * as we're already in the context of this vcpu, and | ||
1028 | * executing the handler | ||
1029 | * - The interrupt has been migrated to another vcpu, and we | ||
1030 | * ignore this interrupt for this run. Big deal. It is still | ||
1031 | * pending though, and will get considered when this vcpu | ||
1032 | * exits. | ||
1033 | */ | ||
1034 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1019 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { |
1035 | /* | 1020 | /* |
1036 | * Some level interrupts have been EOIed. Clear their | 1021 | * Some level interrupts have been EOIed. Clear their |
@@ -1069,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1069 | } | 1054 | } |
1070 | 1055 | ||
1071 | /* | 1056 | /* |
1072 | * Sync back the VGIC state after a guest run. We do not really touch | 1057 | * Sync back the VGIC state after a guest run. The distributor lock is |
1073 | * the distributor here (the irq_pending_on_cpu bit is safe to set), | 1058 | * needed so we don't get preempted in the middle of the state processing. |
1074 | * so there is no need for taking its lock. | ||
1075 | */ | 1059 | */ |
1076 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1060 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1077 | { | 1061 | { |
@@ -1117,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1117 | 1101 | ||
1118 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1102 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1119 | { | 1103 | { |
1104 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1105 | |||
1120 | if (!irqchip_in_kernel(vcpu->kvm)) | 1106 | if (!irqchip_in_kernel(vcpu->kvm)) |
1121 | return; | 1107 | return; |
1122 | 1108 | ||
1109 | spin_lock(&dist->lock); | ||
1123 | __kvm_vgic_sync_hwstate(vcpu); | 1110 | __kvm_vgic_sync_hwstate(vcpu); |
1111 | spin_unlock(&dist->lock); | ||
1124 | } | 1112 | } |
1125 | 1113 | ||
1126 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 1114 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |