diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2016-05-25 10:26:35 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-05-31 10:12:15 -0400 |
commit | df7942d17e1623d7358fe895377293637de5521b (patch) | |
tree | 7fe65e36f85673413127aac9bcf24f1c98d73c88 | |
parent | fa89c77e891917b5913f9be080f9131a9457bb3e (diff) |
KVM: arm/arm64: vgic-v2: Always resample level interrupts
When reading back from the list registers, we need to perform
two actions for level interrupts:
1) clear the soft-pending bit if the interrupt is not pending
anymore *in the list register*
2) resample the line level and propagate it to the pending state
But these two actions shouldn't be linked, and we should *always*
resample the line level, no matter what state is in the list
register. Otherwise, we may end-up injecting spurious interrupts
that have been already retired.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 8ad42c217770..e31405ee5515 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Clear soft pending state when level IRQs have been acked */ | 115 | /* |
116 | if (irq->config == VGIC_CONFIG_LEVEL && | 116 | * Clear soft pending state when level irqs have been acked. |
117 | !(val & GICH_LR_PENDING_BIT)) { | 117 | * Always regenerate the pending state. |
118 | irq->soft_pending = false; | 118 | */ |
119 | irq->pending = irq->line_level; | 119 | if (irq->config == VGIC_CONFIG_LEVEL) { |
120 | if (!(val & GICH_LR_PENDING_BIT)) | ||
121 | irq->soft_pending = false; | ||
122 | |||
123 | irq->pending = irq->line_level || irq->soft_pending; | ||
120 | } | 124 | } |
121 | 125 | ||
122 | spin_unlock(&irq->irq_lock); | 126 | spin_unlock(&irq->irq_lock); |