aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/arch_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/arch_timer.c')
-rw-r--r--virt/kvm/arm/arch_timer.c116
1 files changed, 64 insertions, 52 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 70268c0bec79..70f4c30918eb 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
36static unsigned int host_vtimer_irq; 36static unsigned int host_vtimer_irq;
37static u32 host_vtimer_irq_flags; 37static u32 host_vtimer_irq_flags;
38 38
39static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
40
39static const struct kvm_irq_level default_ptimer_irq = { 41static const struct kvm_irq_level default_ptimer_irq = {
40 .irq = 30, 42 .irq = 30,
41 .level = 1, 43 .level = 1,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
56 return timecounter->cc->read(timecounter->cc); 58 return timecounter->cc->read(timecounter->cc);
57} 59}
58 60
61static inline bool userspace_irqchip(struct kvm *kvm)
62{
63 return static_branch_unlikely(&userspace_irqchip_in_use) &&
64 unlikely(!irqchip_in_kernel(kvm));
65}
66
59static void soft_timer_start(struct hrtimer *hrt, u64 ns) 67static void soft_timer_start(struct hrtimer *hrt, u64 ns)
60{ 68{
61 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), 69 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
69 cancel_work_sync(work); 77 cancel_work_sync(work);
70} 78}
71 79
72static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
73{
74 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
75
76 /*
77 * When using a userspace irqchip with the architected timers, we must
78 * prevent continuously exiting from the guest, and therefore mask the
79 * physical interrupt by disabling it on the host interrupt controller
80 * when the virtual level is high, such that the guest can make
81 * forward progress. Once we detect the output level being
82 * de-asserted, we unmask the interrupt again so that we exit from the
83 * guest when the timer fires.
84 */
85 if (vtimer->irq.level)
86 disable_percpu_irq(host_vtimer_irq);
87 else
88 enable_percpu_irq(host_vtimer_irq, 0);
89}
90
91static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 80static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
92{ 81{
93 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 82 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
106 if (kvm_timer_should_fire(vtimer)) 95 if (kvm_timer_should_fire(vtimer))
107 kvm_timer_update_irq(vcpu, true, vtimer); 96 kvm_timer_update_irq(vcpu, true, vtimer);
108 97
109 if (static_branch_unlikely(&userspace_irqchip_in_use) && 98 if (userspace_irqchip(vcpu->kvm) &&
110 unlikely(!irqchip_in_kernel(vcpu->kvm))) 99 !static_branch_unlikely(&has_gic_active_state))
111 kvm_vtimer_update_mask_user(vcpu); 100 disable_percpu_irq(host_vtimer_irq);
112 101
113 return IRQ_HANDLED; 102 return IRQ_HANDLED;
114} 103}
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
290 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, 279 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
291 timer_ctx->irq.level); 280 timer_ctx->irq.level);
292 281
293 if (!static_branch_unlikely(&userspace_irqchip_in_use) || 282 if (!userspace_irqchip(vcpu->kvm)) {
294 likely(irqchip_in_kernel(vcpu->kvm))) {
295 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 283 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
296 timer_ctx->irq.irq, 284 timer_ctx->irq.irq,
297 timer_ctx->irq.level, 285 timer_ctx->irq.level,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
350 phys_timer_emulate(vcpu); 338 phys_timer_emulate(vcpu);
351} 339}
352 340
353static void __timer_snapshot_state(struct arch_timer_context *timer)
354{
355 timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
356 timer->cnt_cval = read_sysreg_el0(cntv_cval);
357}
358
359static void vtimer_save_state(struct kvm_vcpu *vcpu) 341static void vtimer_save_state(struct kvm_vcpu *vcpu)
360{ 342{
361 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 343 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
367 if (!vtimer->loaded) 349 if (!vtimer->loaded)
368 goto out; 350 goto out;
369 351
370 if (timer->enabled) 352 if (timer->enabled) {
371 __timer_snapshot_state(vtimer); 353 vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
354 vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
355 }
372 356
373 /* Disable the virtual timer */ 357 /* Disable the virtual timer */
374 write_sysreg_el0(0, cntv_ctl); 358 write_sysreg_el0(0, cntv_ctl);
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
460 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); 444 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
461} 445}
462 446
463static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) 447static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
448{
449 int r;
450 r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
451 WARN_ON(r);
452}
453
454static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
464{ 455{
465 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 456 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
466 bool phys_active; 457 bool phys_active;
467 int ret;
468 458
469 phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); 459 if (irqchip_in_kernel(vcpu->kvm))
470 460 phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
471 ret = irq_set_irqchip_state(host_vtimer_irq, 461 else
472 IRQCHIP_STATE_ACTIVE, 462 phys_active = vtimer->irq.level;
473 phys_active); 463 set_vtimer_irq_phys_active(vcpu, phys_active);
474 WARN_ON(ret);
475} 464}
476 465
477static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) 466static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
478{ 467{
479 kvm_vtimer_update_mask_user(vcpu); 468 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
469
470 /*
471 * When using a userspace irqchip with the architected timers and a
472 * host interrupt controller that doesn't support an active state, we
473 * must still prevent continuously exiting from the guest, and
474 * therefore mask the physical interrupt by disabling it on the host
475 * interrupt controller when the virtual level is high, such that the
476 * guest can make forward progress. Once we detect the output level
477 * being de-asserted, we unmask the interrupt again so that we exit
478 * from the guest when the timer fires.
479 */
480 if (vtimer->irq.level)
481 disable_percpu_irq(host_vtimer_irq);
482 else
483 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
480} 484}
481 485
482void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) 486void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
487 if (unlikely(!timer->enabled)) 491 if (unlikely(!timer->enabled))
488 return; 492 return;
489 493
490 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 494 if (static_branch_likely(&has_gic_active_state))
491 kvm_timer_vcpu_load_user(vcpu); 495 kvm_timer_vcpu_load_gic(vcpu);
492 else 496 else
493 kvm_timer_vcpu_load_vgic(vcpu); 497 kvm_timer_vcpu_load_nogic(vcpu);
494 498
495 set_cntvoff(vtimer->cntvoff); 499 set_cntvoff(vtimer->cntvoff);
496 500
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
555{ 559{
556 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 560 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
557 561
558 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { 562 if (!kvm_timer_should_fire(vtimer)) {
559 __timer_snapshot_state(vtimer); 563 kvm_timer_update_irq(vcpu, false, vtimer);
560 if (!kvm_timer_should_fire(vtimer)) { 564 if (static_branch_likely(&has_gic_active_state))
561 kvm_timer_update_irq(vcpu, false, vtimer); 565 set_vtimer_irq_phys_active(vcpu, false);
562 kvm_vtimer_update_mask_user(vcpu); 566 else
563 } 567 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
564 } 568 }
565} 569}
566 570
567void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) 571void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
568{ 572{
569 unmask_vtimer_irq_user(vcpu); 573 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
574
575 if (unlikely(!timer->enabled))
576 return;
577
578 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
579 unmask_vtimer_irq_user(vcpu);
570} 580}
571 581
572int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) 582int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
753 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 763 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
754 goto out_free_irq; 764 goto out_free_irq;
755 } 765 }
766
767 static_branch_enable(&has_gic_active_state);
756 } 768 }
757 769
758 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 770 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);