aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2018-01-25 08:20:19 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2018-01-31 04:10:17 -0500
commit13e59ece5b30f39e4e1e1fac2b2ddc7ed527f3cc (patch)
treea64576430832223e0b152170f62dd9ef17692b47
parentb276f1b3b1eb52697f6645bb98f7d32ffb89df69 (diff)
KVM: arm/arm64: Fix incorrect timer_is_pending logic
After the recently introduced support for level-triggered mapped interrupt, I accidentally left the VCPU thread busily going back and forward between the guest and the hypervisor whenever the guest was blocking, because I would always incorrectly report that a timer interrupt was pending. This is because the timer->irq.level field is not valid for mapped interrupts, where we offload the level state to the hardware, and as a result this field is always true. Luckily the problem can be relatively easily solved by not checking the cached signal state of either timer in kvm_timer_should_fire() but instead compute the timer state on the fly, which we do already if the cached signal state wasn't high. In fact, the only reason for checking the cached signal state was a tiny optimization which would only be potentially faster when the polling loop detects a pending timer interrupt, which is quite unlikely. Instead of duplicating the logic from kvm_arch_timer_handler(), we enlighten kvm_timer_should_fire() to report something valid when the timer state is loaded onto the hardware. We can then call this from kvm_arch_timer_handler() as well and avoid the call to __timer_snapshot_state() in kvm_arch_timer_get_input_level(). Reported-by: Tomasz Nowicki <tn@semihalf.com> Tested-by: Tomasz Nowicki <tn@semihalf.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--virt/kvm/arm/arch_timer.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index cfcd0323deab..63cf828f3c4f 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -97,10 +97,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
97 pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n"); 97 pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
98 return IRQ_NONE; 98 return IRQ_NONE;
99 } 99 }
100 vtimer = vcpu_vtimer(vcpu);
101 100
102 vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); 101 vtimer = vcpu_vtimer(vcpu);
103 if (kvm_timer_irq_can_fire(vtimer)) 102 if (kvm_timer_should_fire(vtimer))
104 kvm_timer_update_irq(vcpu, true, vtimer); 103 kvm_timer_update_irq(vcpu, true, vtimer);
105 104
106 if (static_branch_unlikely(&userspace_irqchip_in_use) && 105 if (static_branch_unlikely(&userspace_irqchip_in_use) &&
@@ -230,6 +229,16 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
230{ 229{
231 u64 cval, now; 230 u64 cval, now;
232 231
232 if (timer_ctx->loaded) {
233 u32 cnt_ctl;
234
235 /* Only the virtual timer can be loaded so far */
236 cnt_ctl = read_sysreg_el0(cntv_ctl);
237 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
238 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
239 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
240 }
241
233 if (!kvm_timer_irq_can_fire(timer_ctx)) 242 if (!kvm_timer_irq_can_fire(timer_ctx))
234 return false; 243 return false;
235 244
@@ -244,15 +253,7 @@ bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
244 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 253 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
245 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 254 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
246 255
247 if (vtimer->irq.level || ptimer->irq.level) 256 if (kvm_timer_should_fire(vtimer))
248 return true;
249
250 /*
251 * When this is called from withing the wait loop of kvm_vcpu_block(),
252 * the software view of the timer state is up to date (timer->loaded
253 * is false), and so we can simply check if the timer should fire now.
254 */
255 if (!vtimer->loaded && kvm_timer_should_fire(vtimer))
256 return true; 257 return true;
257 258
258 return kvm_timer_should_fire(ptimer); 259 return kvm_timer_should_fire(ptimer);
@@ -270,9 +271,9 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
270 /* Populate the device bitmap with the timer states */ 271 /* Populate the device bitmap with the timer states */
271 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | 272 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
272 KVM_ARM_DEV_EL1_PTIMER); 273 KVM_ARM_DEV_EL1_PTIMER);
273 if (vtimer->irq.level) 274 if (kvm_timer_should_fire(vtimer))
274 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; 275 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
275 if (ptimer->irq.level) 276 if (kvm_timer_should_fire(ptimer))
276 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; 277 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
277} 278}
278 279
@@ -507,8 +508,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
507 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; 508 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
508 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; 509 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
509 510
510 return vtimer->irq.level != vlevel || 511 return kvm_timer_should_fire(vtimer) != vlevel ||
511 ptimer->irq.level != plevel; 512 kvm_timer_should_fire(ptimer) != plevel;
512} 513}
513 514
514void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 515void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
@@ -801,9 +802,6 @@ bool kvm_arch_timer_get_input_level(int vintid)
801 else 802 else
802 BUG(); /* We only map the vtimer so far */ 803 BUG(); /* We only map the vtimer so far */
803 804
804 if (timer->loaded)
805 __timer_snapshot_state(timer);
806
807 return kvm_timer_should_fire(timer); 805 return kvm_timer_should_fire(timer);
808} 806}
809 807