aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2019-01-04 07:31:22 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-19 16:05:43 -0500
commitbee038a67487598ebbe995f85bf60c3a5b2e9099 (patch)
treec0756ab3d7bc4822adff585a482c940135f4db6e /virt/kvm
parent9e01dc76be6a3b5768cb02130d2ff0055a68809a (diff)
KVM: arm/arm64: Rework the timer code to use a timer_map
We are currently emulating two timers in two different ways. When we add support for nested virtualization in the future, we are going to be emulating either two timers in two diffferent ways, or four timers in a single way. We need a unified data structure to keep track of how we map virtual state to physical state and we need to cleanup some of the timer code to operate more independently on a struct arch_timer_context instead of trying to consider the global state of the VCPU and recomputing all state. Co-written with Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/arch_timer.c295
-rw-r--r--virt/kvm/arm/trace.h105
2 files changed, 265 insertions, 135 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 10c15151c87e..17f9de73cc8a 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -68,6 +68,21 @@ u64 kvm_phys_timer_read(void)
68 return timecounter->cc->read(timecounter->cc); 68 return timecounter->cc->read(timecounter->cc);
69} 69}
70 70
71static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
72{
73 if (has_vhe()) {
74 map->direct_vtimer = vcpu_vtimer(vcpu);
75 map->direct_ptimer = vcpu_ptimer(vcpu);
76 map->emul_ptimer = NULL;
77 } else {
78 map->direct_vtimer = vcpu_vtimer(vcpu);
79 map->direct_ptimer = NULL;
80 map->emul_ptimer = vcpu_ptimer(vcpu);
81 }
82
83 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
84}
85
71static inline bool userspace_irqchip(struct kvm *kvm) 86static inline bool userspace_irqchip(struct kvm *kvm)
72{ 87{
73 return static_branch_unlikely(&userspace_irqchip_in_use) && 88 return static_branch_unlikely(&userspace_irqchip_in_use) &&
@@ -89,6 +104,7 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
89{ 104{
90 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 105 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
91 struct arch_timer_context *ctx; 106 struct arch_timer_context *ctx;
107 struct timer_map map;
92 108
93 /* 109 /*
94 * We may see a timer interrupt after vcpu_put() has been called which 110 * We may see a timer interrupt after vcpu_put() has been called which
@@ -99,10 +115,12 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
99 if (!vcpu) 115 if (!vcpu)
100 return IRQ_HANDLED; 116 return IRQ_HANDLED;
101 117
118 get_timer_map(vcpu, &map);
119
102 if (irq == host_vtimer_irq) 120 if (irq == host_vtimer_irq)
103 ctx = vcpu_vtimer(vcpu); 121 ctx = map.direct_vtimer;
104 else 122 else
105 ctx = vcpu_ptimer(vcpu); 123 ctx = map.direct_ptimer;
106 124
107 if (kvm_timer_should_fire(ctx)) 125 if (kvm_timer_should_fire(ctx))
108 kvm_timer_update_irq(vcpu, true, ctx); 126 kvm_timer_update_irq(vcpu, true, ctx);
@@ -136,7 +154,9 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
136 154
137static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) 155static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
138{ 156{
139 return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && 157 WARN_ON(timer_ctx && timer_ctx->loaded);
158 return timer_ctx &&
159 !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
140 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); 160 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
141} 161}
142 162
@@ -146,21 +166,22 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
146 */ 166 */
147static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) 167static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
148{ 168{
149 u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX; 169 u64 min_delta = ULLONG_MAX;
150 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 170 int i;
151 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
152 171
153 if (kvm_timer_irq_can_fire(vtimer)) 172 for (i = 0; i < NR_KVM_TIMERS; i++) {
154 min_virt = kvm_timer_compute_delta(vtimer); 173 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
155 174
156 if (kvm_timer_irq_can_fire(ptimer)) 175 WARN(ctx->loaded, "timer %d loaded\n", i);
157 min_phys = kvm_timer_compute_delta(ptimer); 176 if (kvm_timer_irq_can_fire(ctx))
177 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
178 }
158 179
159 /* If none of timers can fire, then return 0 */ 180 /* If none of timers can fire, then return 0 */
160 if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX)) 181 if (min_delta == ULLONG_MAX)
161 return 0; 182 return 0;
162 183
163 return min(min_virt, min_phys); 184 return min_delta;
164} 185}
165 186
166static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) 187static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
@@ -187,37 +208,45 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
187 return HRTIMER_NORESTART; 208 return HRTIMER_NORESTART;
188} 209}
189 210
190static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) 211static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
191{ 212{
192 struct arch_timer_context *ptimer; 213 struct arch_timer_context *ctx;
193 struct kvm_vcpu *vcpu; 214 struct kvm_vcpu *vcpu;
194 u64 ns; 215 u64 ns;
195 216
196 ptimer = container_of(hrt, struct arch_timer_context, hrtimer); 217 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
197 vcpu = ptimer->vcpu; 218 vcpu = ctx->vcpu;
219
220 trace_kvm_timer_hrtimer_expire(ctx);
198 221
199 /* 222 /*
200 * Check that the timer has really expired from the guest's 223 * Check that the timer has really expired from the guest's
201 * PoV (NTP on the host may have forced it to expire 224 * PoV (NTP on the host may have forced it to expire
202 * early). If not ready, schedule for a later time. 225 * early). If not ready, schedule for a later time.
203 */ 226 */
204 ns = kvm_timer_compute_delta(ptimer); 227 ns = kvm_timer_compute_delta(ctx);
205 if (unlikely(ns)) { 228 if (unlikely(ns)) {
206 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 229 hrtimer_forward_now(hrt, ns_to_ktime(ns));
207 return HRTIMER_RESTART; 230 return HRTIMER_RESTART;
208 } 231 }
209 232
210 kvm_timer_update_irq(vcpu, true, ptimer); 233 kvm_timer_update_irq(vcpu, true, ctx);
211 return HRTIMER_NORESTART; 234 return HRTIMER_NORESTART;
212} 235}
213 236
214static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) 237static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
215{ 238{
216 struct arch_timer_cpu *timer = vcpu_timer(timer_ctx->vcpu); 239 struct arch_timer_cpu *timer;
217 enum kvm_arch_timers index = arch_timer_ctx_index(timer_ctx); 240 enum kvm_arch_timers index;
218 u64 cval, now; 241 u64 cval, now;
219 242
220 if (timer->loaded) { 243 if (!timer_ctx)
244 return false;
245
246 timer = vcpu_timer(timer_ctx->vcpu);
247 index = arch_timer_ctx_index(timer_ctx);
248
249 if (timer_ctx->loaded) {
221 u32 cnt_ctl = 0; 250 u32 cnt_ctl = 0;
222 251
223 switch (index) { 252 switch (index) {
@@ -249,13 +278,13 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
249 278
250bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) 279bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
251{ 280{
252 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 281 struct timer_map map;
253 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
254 282
255 if (kvm_timer_should_fire(vtimer)) 283 get_timer_map(vcpu, &map);
256 return true;
257 284
258 return kvm_timer_should_fire(ptimer); 285 return kvm_timer_should_fire(map.direct_vtimer) ||
286 kvm_timer_should_fire(map.direct_ptimer) ||
287 kvm_timer_should_fire(map.emul_ptimer);
259} 288}
260 289
261/* 290/*
@@ -294,60 +323,28 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
294 } 323 }
295} 324}
296 325
297/* Schedule the background timer for the emulated timer. */ 326static void timer_emulate(struct arch_timer_context *ctx)
298static void phys_timer_emulate(struct kvm_vcpu *vcpu)
299{ 327{
300 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 328 bool should_fire = kvm_timer_should_fire(ctx);
301 329
302 /* 330 trace_kvm_timer_emulate(ctx, should_fire);
303 * If the timer can fire now, we don't need to have a soft timer
304 * scheduled for the future. If the timer cannot fire at all,
305 * then we also don't need a soft timer.
306 */
307 if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
308 soft_timer_cancel(&ptimer->hrtimer);
309 return;
310 }
311
312 soft_timer_start(&ptimer->hrtimer, kvm_timer_compute_delta(ptimer));
313}
314
315/*
316 * Check if there was a change in the timer state, so that we should either
317 * raise or lower the line level to the GIC or schedule a background timer to
318 * emulate the physical timer.
319 */
320static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
321{
322 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
323 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
324 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
325 bool level;
326 331
327 if (unlikely(!timer->enabled)) 332 if (should_fire) {
333 kvm_timer_update_irq(ctx->vcpu, true, ctx);
328 return; 334 return;
335 }
329 336
330 /* 337 /*
331 * If the timer virtual interrupt is a 'mapped' interrupt, part 338 * If the timer can fire now, we don't need to have a soft timer
332 * of its lifecycle is offloaded to the hardware, and we therefore may 339 * scheduled for the future. If the timer cannot fire at all,
333 * not have lowered the irq.level value before having to signal a new 340 * then we also don't need a soft timer.
334 * interrupt, but have to signal an interrupt every time the level is
335 * asserted.
336 */ 341 */
337 level = kvm_timer_should_fire(vtimer); 342 if (!kvm_timer_irq_can_fire(ctx)) {
338 kvm_timer_update_irq(vcpu, level, vtimer); 343 soft_timer_cancel(&ctx->hrtimer);
339
340 if (has_vhe()) {
341 level = kvm_timer_should_fire(ptimer);
342 kvm_timer_update_irq(vcpu, level, ptimer);
343
344 return; 344 return;
345 } 345 }
346 346
347 phys_timer_emulate(vcpu); 347 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
348
349 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
350 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
351} 348}
352 349
353static void timer_save_state(struct arch_timer_context *ctx) 350static void timer_save_state(struct arch_timer_context *ctx)
@@ -361,7 +358,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
361 358
362 local_irq_save(flags); 359 local_irq_save(flags);
363 360
364 if (!timer->loaded) 361 if (!ctx->loaded)
365 goto out; 362 goto out;
366 363
367 switch (index) { 364 switch (index) {
@@ -384,10 +381,12 @@ static void timer_save_state(struct arch_timer_context *ctx)
384 381
385 break; 382 break;
386 case NR_KVM_TIMERS: 383 case NR_KVM_TIMERS:
387 break; /* GCC is braindead */ 384 BUG();
388 } 385 }
389 386
390 timer->loaded = false; 387 trace_kvm_timer_save_state(ctx);
388
389 ctx->loaded = false;
391out: 390out:
392 local_irq_restore(flags); 391 local_irq_restore(flags);
393} 392}
@@ -400,14 +399,17 @@ out:
400static void kvm_timer_blocking(struct kvm_vcpu *vcpu) 399static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
401{ 400{
402 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 401 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
403 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 402 struct timer_map map;
404 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 403
404 get_timer_map(vcpu, &map);
405 405
406 /* 406 /*
407 * If both timers are not capable of raising interrupts (disabled or 407 * If no timers are capable of raising interrupts (disabled or
408 * masked), then there's no more work for us to do. 408 * masked), then there's no more work for us to do.
409 */ 409 */
410 if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer)) 410 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
411 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
412 !kvm_timer_irq_can_fire(map.emul_ptimer))
411 return; 413 return;
412 414
413 /* 415 /*
@@ -435,7 +437,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
435 437
436 local_irq_save(flags); 438 local_irq_save(flags);
437 439
438 if (timer->loaded) 440 if (ctx->loaded)
439 goto out; 441 goto out;
440 442
441 switch (index) { 443 switch (index) {
@@ -450,10 +452,12 @@ static void timer_restore_state(struct arch_timer_context *ctx)
450 write_sysreg_el0(ctx->cnt_ctl, cntp_ctl); 452 write_sysreg_el0(ctx->cnt_ctl, cntp_ctl);
451 break; 453 break;
452 case NR_KVM_TIMERS: 454 case NR_KVM_TIMERS:
453 break; /* GCC is braindead */ 455 BUG();
454 } 456 }
455 457
456 timer->loaded = true; 458 trace_kvm_timer_restore_state(ctx);
459
460 ctx->loaded = true;
457out: 461out:
458 local_irq_restore(flags); 462 local_irq_restore(flags);
459} 463}
@@ -515,37 +519,31 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
515void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) 519void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
516{ 520{
517 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 521 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
518 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 522 struct timer_map map;
519 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
520 523
521 if (unlikely(!timer->enabled)) 524 if (unlikely(!timer->enabled))
522 return; 525 return;
523 526
527 get_timer_map(vcpu, &map);
528
524 if (static_branch_likely(&has_gic_active_state)) { 529 if (static_branch_likely(&has_gic_active_state)) {
525 kvm_timer_vcpu_load_gic(vtimer); 530 kvm_timer_vcpu_load_gic(map.direct_vtimer);
526 if (has_vhe()) 531 if (map.direct_ptimer)
527 kvm_timer_vcpu_load_gic(ptimer); 532 kvm_timer_vcpu_load_gic(map.direct_ptimer);
528 } else { 533 } else {
529 kvm_timer_vcpu_load_nogic(vcpu); 534 kvm_timer_vcpu_load_nogic(vcpu);
530 } 535 }
531 536
532 set_cntvoff(vtimer->cntvoff); 537 set_cntvoff(map.direct_vtimer->cntvoff);
533
534 timer_restore_state(vtimer);
535
536 if (has_vhe()) {
537 timer_restore_state(ptimer);
538 return;
539 }
540
541 /* Set the background timer for the physical timer emulation. */
542 phys_timer_emulate(vcpu);
543 538
544 kvm_timer_unblocking(vcpu); 539 kvm_timer_unblocking(vcpu);
545 540
546 /* If the timer fired while we weren't running, inject it now */ 541 timer_restore_state(map.direct_vtimer);
547 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) 542 if (map.direct_ptimer)
548 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); 543 timer_restore_state(map.direct_ptimer);
544
545 if (map.emul_ptimer)
546 timer_emulate(map.emul_ptimer);
549} 547}
550 548
551bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) 549bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
@@ -568,20 +566,19 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
568void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 566void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
569{ 567{
570 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 568 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
571 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 569 struct timer_map map;
572 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
573 570
574 if (unlikely(!timer->enabled)) 571 if (unlikely(!timer->enabled))
575 return; 572 return;
576 573
577 timer_save_state(vtimer); 574 get_timer_map(vcpu, &map);
578 if (has_vhe()) { 575
579 timer_save_state(ptimer); 576 timer_save_state(map.direct_vtimer);
580 return; 577 if (map.direct_ptimer)
581 } 578 timer_save_state(map.direct_ptimer);
582 579
583 /* 580 /*
584 * Cancel the physical timer emulation, because the only case where we 581 * Cancel soft timer emulation, because the only case where we
585 * need it after a vcpu_put is in the context of a sleeping VCPU, and 582 * need it after a vcpu_put is in the context of a sleeping VCPU, and
586 * in that case we already factor in the deadline for the physical 583 * in that case we already factor in the deadline for the physical
587 * timer when scheduling the bg_timer. 584 * timer when scheduling the bg_timer.
@@ -589,7 +586,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
589 * In any case, we re-schedule the hrtimer for the physical timer when 586 * In any case, we re-schedule the hrtimer for the physical timer when
590 * coming back to the VCPU thread in kvm_timer_vcpu_load(). 587 * coming back to the VCPU thread in kvm_timer_vcpu_load().
591 */ 588 */
592 soft_timer_cancel(&ptimer->hrtimer); 589 if (map.emul_ptimer)
590 soft_timer_cancel(&map.emul_ptimer->hrtimer);
593 591
594 if (swait_active(kvm_arch_vcpu_wq(vcpu))) 592 if (swait_active(kvm_arch_vcpu_wq(vcpu)))
595 kvm_timer_blocking(vcpu); 593 kvm_timer_blocking(vcpu);
@@ -636,8 +634,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
636int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) 634int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
637{ 635{
638 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 636 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
639 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 637 struct timer_map map;
640 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 638
639 get_timer_map(vcpu, &map);
641 640
642 /* 641 /*
643 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 642 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
@@ -645,12 +644,22 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
645 * resets the timer to be disabled and unmasked and is compliant with 644 * resets the timer to be disabled and unmasked and is compliant with
646 * the ARMv7 architecture. 645 * the ARMv7 architecture.
647 */ 646 */
648 vtimer->cnt_ctl = 0; 647 vcpu_vtimer(vcpu)->cnt_ctl = 0;
649 ptimer->cnt_ctl = 0; 648 vcpu_ptimer(vcpu)->cnt_ctl = 0;
650 kvm_timer_update_state(vcpu); 649
650 if (timer->enabled) {
651 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
652 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
653
654 if (irqchip_in_kernel(vcpu->kvm)) {
655 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
656 if (map.direct_ptimer)
657 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
658 }
659 }
651 660
652 if (timer->enabled && irqchip_in_kernel(vcpu->kvm)) 661 if (map.emul_ptimer)
653 kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq); 662 soft_timer_cancel(&map.emul_ptimer->hrtimer);
654 663
655 return 0; 664 return 0;
656} 665}
@@ -687,15 +696,18 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
687 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 696 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
688 timer->bg_timer.function = kvm_bg_timer_expire; 697 timer->bg_timer.function = kvm_bg_timer_expire;
689 698
699 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
690 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 700 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
691 ptimer->hrtimer.function = kvm_phys_timer_expire; 701 vtimer->hrtimer.function = kvm_hrtimer_expire;
702 ptimer->hrtimer.function = kvm_hrtimer_expire;
692 703
693 vtimer->irq.irq = default_vtimer_irq.irq; 704 vtimer->irq.irq = default_vtimer_irq.irq;
694 vtimer->host_timer_irq = host_vtimer_irq;
695 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
696
697 ptimer->irq.irq = default_ptimer_irq.irq; 705 ptimer->irq.irq = default_ptimer_irq.irq;
706
707 vtimer->host_timer_irq = host_vtimer_irq;
698 ptimer->host_timer_irq = host_ptimer_irq; 708 ptimer->host_timer_irq = host_ptimer_irq;
709
710 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
699 ptimer->host_timer_irq_flags = host_ptimer_irq_flags; 711 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
700 712
701 vtimer->vcpu = vcpu; 713 vtimer->vcpu = vcpu;
@@ -710,32 +722,39 @@ static void kvm_timer_init_interrupt(void *info)
710 722
711int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 723int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
712{ 724{
725 struct arch_timer_context *timer;
726 bool level;
727
713 switch (regid) { 728 switch (regid) {
714 case KVM_REG_ARM_TIMER_CTL: 729 case KVM_REG_ARM_TIMER_CTL:
715 kvm_arm_timer_write(vcpu, 730 timer = vcpu_vtimer(vcpu);
716 vcpu_vtimer(vcpu), TIMER_REG_CTL, value); 731 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
717 break; 732 break;
718 case KVM_REG_ARM_TIMER_CNT: 733 case KVM_REG_ARM_TIMER_CNT:
734 timer = vcpu_vtimer(vcpu);
719 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); 735 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
720 break; 736 break;
721 case KVM_REG_ARM_TIMER_CVAL: 737 case KVM_REG_ARM_TIMER_CVAL:
722 kvm_arm_timer_write(vcpu, 738 timer = vcpu_vtimer(vcpu);
723 vcpu_vtimer(vcpu), TIMER_REG_CVAL, value); 739 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
724 break; 740 break;
725 case KVM_REG_ARM_PTIMER_CTL: 741 case KVM_REG_ARM_PTIMER_CTL:
726 kvm_arm_timer_write(vcpu, 742 timer = vcpu_ptimer(vcpu);
727 vcpu_ptimer(vcpu), TIMER_REG_CTL, value); 743 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
728 break; 744 break;
729 case KVM_REG_ARM_PTIMER_CVAL: 745 case KVM_REG_ARM_PTIMER_CVAL:
730 kvm_arm_timer_write(vcpu, 746 timer = vcpu_ptimer(vcpu);
731 vcpu_ptimer(vcpu), TIMER_REG_CVAL, value); 747 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
732 break; 748 break;
733 749
734 default: 750 default:
735 return -1; 751 return -1;
736 } 752 }
737 753
738 kvm_timer_update_state(vcpu); 754 level = kvm_timer_should_fire(timer);
755 kvm_timer_update_irq(vcpu, level, timer);
756 timer_emulate(timer);
757
739 return 0; 758 return 0;
740} 759}
741 760
@@ -1020,8 +1039,7 @@ bool kvm_arch_timer_get_input_level(int vintid)
1020int kvm_timer_enable(struct kvm_vcpu *vcpu) 1039int kvm_timer_enable(struct kvm_vcpu *vcpu)
1021{ 1040{
1022 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1041 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1023 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 1042 struct timer_map map;
1024 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1025 int ret; 1043 int ret;
1026 1044
1027 if (timer->enabled) 1045 if (timer->enabled)
@@ -1039,18 +1057,25 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
1039 return -EINVAL; 1057 return -EINVAL;
1040 } 1058 }
1041 1059
1042 ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq, 1060 get_timer_map(vcpu, &map);
1061
1062 ret = kvm_vgic_map_phys_irq(vcpu,
1063 map.direct_vtimer->host_timer_irq,
1064 map.direct_vtimer->irq.irq,
1043 kvm_arch_timer_get_input_level); 1065 kvm_arch_timer_get_input_level);
1044 if (ret) 1066 if (ret)
1045 return ret; 1067 return ret;
1046 1068
1047 if (has_vhe()) { 1069 if (map.direct_ptimer) {
1048 ret = kvm_vgic_map_phys_irq(vcpu, host_ptimer_irq, ptimer->irq.irq, 1070 ret = kvm_vgic_map_phys_irq(vcpu,
1071 map.direct_ptimer->host_timer_irq,
1072 map.direct_ptimer->irq.irq,
1049 kvm_arch_timer_get_input_level); 1073 kvm_arch_timer_get_input_level);
1050 if (ret)
1051 return ret;
1052 } 1074 }
1053 1075
1076 if (ret)
1077 return ret;
1078
1054no_vgic: 1079no_vgic:
1055 timer->enabled = 1; 1080 timer->enabled = 1;
1056 return 0; 1081 return 0;
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index 3828beab93f2..54bb059243b9 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -2,6 +2,7 @@
2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_H 3#define _TRACE_KVM_H
4 4
5#include <kvm/arm_arch_timer.h>
5#include <linux/tracepoint.h> 6#include <linux/tracepoint.h>
6 7
7#undef TRACE_SYSTEM 8#undef TRACE_SYSTEM
@@ -262,6 +263,110 @@ TRACE_EVENT(kvm_timer_update_irq,
262 __entry->vcpu_id, __entry->irq, __entry->level) 263 __entry->vcpu_id, __entry->irq, __entry->level)
263); 264);
264 265
266TRACE_EVENT(kvm_get_timer_map,
267 TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
268 TP_ARGS(vcpu_id, map),
269
270 TP_STRUCT__entry(
271 __field( unsigned long, vcpu_id )
272 __field( int, direct_vtimer )
273 __field( int, direct_ptimer )
274 __field( int, emul_ptimer )
275 ),
276
277 TP_fast_assign(
278 __entry->vcpu_id = vcpu_id;
279 __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
280 __entry->direct_ptimer =
281 (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
282 __entry->emul_ptimer =
283 (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
284 ),
285
286 TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
287 __entry->vcpu_id,
288 __entry->direct_vtimer,
289 __entry->direct_ptimer,
290 __entry->emul_ptimer)
291);
292
293TRACE_EVENT(kvm_timer_save_state,
294 TP_PROTO(struct arch_timer_context *ctx),
295 TP_ARGS(ctx),
296
297 TP_STRUCT__entry(
298 __field( unsigned long, ctl )
299 __field( unsigned long long, cval )
300 __field( int, timer_idx )
301 ),
302
303 TP_fast_assign(
304 __entry->ctl = ctx->cnt_ctl;
305 __entry->cval = ctx->cnt_cval;
306 __entry->timer_idx = arch_timer_ctx_index(ctx);
307 ),
308
309 TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
310 __entry->ctl,
311 __entry->cval,
312 __entry->timer_idx)
313);
314
315TRACE_EVENT(kvm_timer_restore_state,
316 TP_PROTO(struct arch_timer_context *ctx),
317 TP_ARGS(ctx),
318
319 TP_STRUCT__entry(
320 __field( unsigned long, ctl )
321 __field( unsigned long long, cval )
322 __field( int, timer_idx )
323 ),
324
325 TP_fast_assign(
326 __entry->ctl = ctx->cnt_ctl;
327 __entry->cval = ctx->cnt_cval;
328 __entry->timer_idx = arch_timer_ctx_index(ctx);
329 ),
330
331 TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
332 __entry->ctl,
333 __entry->cval,
334 __entry->timer_idx)
335);
336
337TRACE_EVENT(kvm_timer_hrtimer_expire,
338 TP_PROTO(struct arch_timer_context *ctx),
339 TP_ARGS(ctx),
340
341 TP_STRUCT__entry(
342 __field( int, timer_idx )
343 ),
344
345 TP_fast_assign(
346 __entry->timer_idx = arch_timer_ctx_index(ctx);
347 ),
348
349 TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx)
350);
351
352TRACE_EVENT(kvm_timer_emulate,
353 TP_PROTO(struct arch_timer_context *ctx, bool should_fire),
354 TP_ARGS(ctx, should_fire),
355
356 TP_STRUCT__entry(
357 __field( int, timer_idx )
358 __field( bool, should_fire )
359 ),
360
361 TP_fast_assign(
362 __entry->timer_idx = arch_timer_ctx_index(ctx);
363 __entry->should_fire = should_fire;
364 ),
365
366 TP_printk("arch_timer_ctx_index: %d (should_fire: %d)",
367 __entry->timer_idx, __entry->should_fire)
368);
369
265#endif /* _TRACE_KVM_H */ 370#endif /* _TRACE_KVM_H */
266 371
267#undef TRACE_INCLUDE_PATH 372#undef TRACE_INCLUDE_PATH