aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2019-02-19 08:04:30 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-19 16:05:42 -0500
commit9e01dc76be6a3b5768cb02130d2ff0055a68809a (patch)
tree6763096554b5c4143f49ee0653faf5a1d7761fca /virt/kvm
parente604dd5d45c75c2112424dec74853efb708f4fa6 (diff)
KVM: arm/arm64: arch_timer: Assign the phys timer on VHE systems
VHE systems don't have to emulate the physical timer, we can simply assign the EL1 physical timer directly to the VM as the host always uses the EL2 timers. In order to minimize the amount of cruft, AArch32 gets definitions for the physical timer too, but is should be generally unused on this architecture. Co-written with Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/arch_timer.c219
1 files changed, 170 insertions, 49 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 471f9fd004c9..10c15151c87e 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -35,7 +35,9 @@
35 35
36static struct timecounter *timecounter; 36static struct timecounter *timecounter;
37static unsigned int host_vtimer_irq; 37static unsigned int host_vtimer_irq;
38static unsigned int host_ptimer_irq;
38static u32 host_vtimer_irq_flags; 39static u32 host_vtimer_irq_flags;
40static u32 host_ptimer_irq_flags;
39 41
40static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); 42static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
41 43
@@ -86,20 +88,24 @@ static void soft_timer_cancel(struct hrtimer *hrt)
86static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 88static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
87{ 89{
88 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 90 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
89 struct arch_timer_context *vtimer; 91 struct arch_timer_context *ctx;
90 92
91 /* 93 /*
92 * We may see a timer interrupt after vcpu_put() has been called which 94 * We may see a timer interrupt after vcpu_put() has been called which
93 * sets the CPU's vcpu pointer to NULL, because even though the timer 95 * sets the CPU's vcpu pointer to NULL, because even though the timer
94 * has been disabled in vtimer_save_state(), the hardware interrupt 96 * has been disabled in timer_save_state(), the hardware interrupt
95 * signal may not have been retired from the interrupt controller yet. 97 * signal may not have been retired from the interrupt controller yet.
96 */ 98 */
97 if (!vcpu) 99 if (!vcpu)
98 return IRQ_HANDLED; 100 return IRQ_HANDLED;
99 101
100 vtimer = vcpu_vtimer(vcpu); 102 if (irq == host_vtimer_irq)
101 if (kvm_timer_should_fire(vtimer)) 103 ctx = vcpu_vtimer(vcpu);
102 kvm_timer_update_irq(vcpu, true, vtimer); 104 else
105 ctx = vcpu_ptimer(vcpu);
106
107 if (kvm_timer_should_fire(ctx))
108 kvm_timer_update_irq(vcpu, true, ctx);
103 109
104 if (userspace_irqchip(vcpu->kvm) && 110 if (userspace_irqchip(vcpu->kvm) &&
105 !static_branch_unlikely(&has_gic_active_state)) 111 !static_branch_unlikely(&has_gic_active_state))
@@ -208,13 +214,25 @@ static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt)
208static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) 214static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
209{ 215{
210 struct arch_timer_cpu *timer = vcpu_timer(timer_ctx->vcpu); 216 struct arch_timer_cpu *timer = vcpu_timer(timer_ctx->vcpu);
217 enum kvm_arch_timers index = arch_timer_ctx_index(timer_ctx);
211 u64 cval, now; 218 u64 cval, now;
212 219
213 if (timer->loaded) { 220 if (timer->loaded) {
214 u32 cnt_ctl; 221 u32 cnt_ctl = 0;
222
223 switch (index) {
224 case TIMER_VTIMER:
225 cnt_ctl = read_sysreg_el0(cntv_ctl);
226 break;
227 case TIMER_PTIMER:
228 cnt_ctl = read_sysreg_el0(cntp_ctl);
229 break;
230 case NR_KVM_TIMERS:
231 /* GCC is braindead */
232 cnt_ctl = 0;
233 break;
234 }
215 235
216 /* Only the virtual timer can be loaded so far */
217 cnt_ctl = read_sysreg_el0(cntv_ctl);
218 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) && 236 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
219 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) && 237 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
220 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK); 238 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
@@ -310,7 +328,7 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
310 return; 328 return;
311 329
312 /* 330 /*
313 * The vtimer virtual interrupt is a 'mapped' interrupt, meaning part 331 * If the timer virtual interrupt is a 'mapped' interrupt, part
314 * of its lifecycle is offloaded to the hardware, and we therefore may 332 * of its lifecycle is offloaded to the hardware, and we therefore may
315 * not have lowered the irq.level value before having to signal a new 333 * not have lowered the irq.level value before having to signal a new
316 * interrupt, but have to signal an interrupt every time the level is 334 * interrupt, but have to signal an interrupt every time the level is
@@ -319,31 +337,55 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
319 level = kvm_timer_should_fire(vtimer); 337 level = kvm_timer_should_fire(vtimer);
320 kvm_timer_update_irq(vcpu, level, vtimer); 338 kvm_timer_update_irq(vcpu, level, vtimer);
321 339
340 if (has_vhe()) {
341 level = kvm_timer_should_fire(ptimer);
342 kvm_timer_update_irq(vcpu, level, ptimer);
343
344 return;
345 }
346
322 phys_timer_emulate(vcpu); 347 phys_timer_emulate(vcpu);
323 348
324 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) 349 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
325 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); 350 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
326} 351}
327 352
328static void vtimer_save_state(struct kvm_vcpu *vcpu) 353static void timer_save_state(struct arch_timer_context *ctx)
329{ 354{
330 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 355 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
331 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 356 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
332 unsigned long flags; 357 unsigned long flags;
333 358
359 if (!timer->enabled)
360 return;
361
334 local_irq_save(flags); 362 local_irq_save(flags);
335 363
336 if (!timer->loaded) 364 if (!timer->loaded)
337 goto out; 365 goto out;
338 366
339 if (timer->enabled) { 367 switch (index) {
340 vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); 368 case TIMER_VTIMER:
341 vtimer->cnt_cval = read_sysreg_el0(cntv_cval); 369 ctx->cnt_ctl = read_sysreg_el0(cntv_ctl);
342 } 370 ctx->cnt_cval = read_sysreg_el0(cntv_cval);
343 371
344 /* Disable the virtual timer */ 372 /* Disable the timer */
345 write_sysreg_el0(0, cntv_ctl); 373 write_sysreg_el0(0, cntv_ctl);
346 isb(); 374 isb();
375
376 break;
377 case TIMER_PTIMER:
378 ctx->cnt_ctl = read_sysreg_el0(cntp_ctl);
379 ctx->cnt_cval = read_sysreg_el0(cntp_cval);
380
381 /* Disable the timer */
382 write_sysreg_el0(0, cntp_ctl);
383 isb();
384
385 break;
386 case NR_KVM_TIMERS:
387 break; /* GCC is braindead */
388 }
347 389
348 timer->loaded = false; 390 timer->loaded = false;
349out: 391out:
@@ -382,21 +424,33 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
382 soft_timer_cancel(&timer->bg_timer); 424 soft_timer_cancel(&timer->bg_timer);
383} 425}
384 426
385static void vtimer_restore_state(struct kvm_vcpu *vcpu) 427static void timer_restore_state(struct arch_timer_context *ctx)
386{ 428{
387 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 429 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
388 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 430 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
389 unsigned long flags; 431 unsigned long flags;
390 432
433 if (!timer->enabled)
434 return;
435
391 local_irq_save(flags); 436 local_irq_save(flags);
392 437
393 if (timer->loaded) 438 if (timer->loaded)
394 goto out; 439 goto out;
395 440
396 if (timer->enabled) { 441 switch (index) {
397 write_sysreg_el0(vtimer->cnt_cval, cntv_cval); 442 case TIMER_VTIMER:
443 write_sysreg_el0(ctx->cnt_cval, cntv_cval);
444 isb();
445 write_sysreg_el0(ctx->cnt_ctl, cntv_ctl);
446 break;
447 case TIMER_PTIMER:
448 write_sysreg_el0(ctx->cnt_cval, cntp_cval);
398 isb(); 449 isb();
399 write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); 450 write_sysreg_el0(ctx->cnt_ctl, cntp_ctl);
451 break;
452 case NR_KVM_TIMERS:
453 break; /* GCC is braindead */
400 } 454 }
401 455
402 timer->loaded = true; 456 timer->loaded = true;
@@ -419,23 +473,23 @@ static void set_cntvoff(u64 cntvoff)
419 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); 473 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
420} 474}
421 475
422static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active) 476static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
423{ 477{
424 int r; 478 int r;
425 r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active); 479 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
426 WARN_ON(r); 480 WARN_ON(r);
427} 481}
428 482
429static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu) 483static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
430{ 484{
431 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 485 struct kvm_vcpu *vcpu = ctx->vcpu;
432 bool phys_active; 486 bool phys_active;
433 487
434 if (irqchip_in_kernel(vcpu->kvm)) 488 if (irqchip_in_kernel(vcpu->kvm))
435 phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); 489 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
436 else 490 else
437 phys_active = vtimer->irq.level; 491 phys_active = ctx->irq.level;
438 set_vtimer_irq_phys_active(vcpu, phys_active); 492 set_timer_irq_phys_active(ctx, phys_active);
439} 493}
440 494
441static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) 495static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
@@ -467,14 +521,22 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
467 if (unlikely(!timer->enabled)) 521 if (unlikely(!timer->enabled))
468 return; 522 return;
469 523
470 if (static_branch_likely(&has_gic_active_state)) 524 if (static_branch_likely(&has_gic_active_state)) {
471 kvm_timer_vcpu_load_gic(vcpu); 525 kvm_timer_vcpu_load_gic(vtimer);
472 else 526 if (has_vhe())
527 kvm_timer_vcpu_load_gic(ptimer);
528 } else {
473 kvm_timer_vcpu_load_nogic(vcpu); 529 kvm_timer_vcpu_load_nogic(vcpu);
530 }
474 531
475 set_cntvoff(vtimer->cntvoff); 532 set_cntvoff(vtimer->cntvoff);
476 533
477 vtimer_restore_state(vcpu); 534 timer_restore_state(vtimer);
535
536 if (has_vhe()) {
537 timer_restore_state(ptimer);
538 return;
539 }
478 540
479 /* Set the background timer for the physical timer emulation. */ 541 /* Set the background timer for the physical timer emulation. */
480 phys_timer_emulate(vcpu); 542 phys_timer_emulate(vcpu);
@@ -506,12 +568,17 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
506void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 568void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
507{ 569{
508 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 570 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
571 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
509 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 572 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
510 573
511 if (unlikely(!timer->enabled)) 574 if (unlikely(!timer->enabled))
512 return; 575 return;
513 576
514 vtimer_save_state(vcpu); 577 timer_save_state(vtimer);
578 if (has_vhe()) {
579 timer_save_state(ptimer);
580 return;
581 }
515 582
516 /* 583 /*
517 * Cancel the physical timer emulation, because the only case where we 584 * Cancel the physical timer emulation, because the only case where we
@@ -534,8 +601,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
534 * counter of non-VHE case. For VHE, the virtual counter uses a fixed 601 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
535 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register. 602 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
536 */ 603 */
537 if (!has_vhe()) 604 set_cntvoff(0);
538 set_cntvoff(0);
539} 605}
540 606
541/* 607/*
@@ -550,7 +616,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
550 if (!kvm_timer_should_fire(vtimer)) { 616 if (!kvm_timer_should_fire(vtimer)) {
551 kvm_timer_update_irq(vcpu, false, vtimer); 617 kvm_timer_update_irq(vcpu, false, vtimer);
552 if (static_branch_likely(&has_gic_active_state)) 618 if (static_branch_likely(&has_gic_active_state))
553 set_vtimer_irq_phys_active(vcpu, false); 619 set_timer_irq_phys_active(vtimer, false);
554 else 620 else
555 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 621 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
556 } 622 }
@@ -625,7 +691,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
625 ptimer->hrtimer.function = kvm_phys_timer_expire; 691 ptimer->hrtimer.function = kvm_phys_timer_expire;
626 692
627 vtimer->irq.irq = default_vtimer_irq.irq; 693 vtimer->irq.irq = default_vtimer_irq.irq;
694 vtimer->host_timer_irq = host_vtimer_irq;
695 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
696
628 ptimer->irq.irq = default_ptimer_irq.irq; 697 ptimer->irq.irq = default_ptimer_irq.irq;
698 ptimer->host_timer_irq = host_ptimer_irq;
699 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
629 700
630 vtimer->vcpu = vcpu; 701 vtimer->vcpu = vcpu;
631 ptimer->vcpu = vcpu; 702 ptimer->vcpu = vcpu;
@@ -634,6 +705,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
634static void kvm_timer_init_interrupt(void *info) 705static void kvm_timer_init_interrupt(void *info)
635{ 706{
636 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 707 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
708 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
637} 709}
638 710
639int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 711int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -815,6 +887,8 @@ int kvm_timer_hyp_init(bool has_gic)
815 return -ENODEV; 887 return -ENODEV;
816 } 888 }
817 889
890 /* First, do the virtual EL1 timer irq */
891
818 if (info->virtual_irq <= 0) { 892 if (info->virtual_irq <= 0) {
819 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", 893 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
820 info->virtual_irq); 894 info->virtual_irq);
@@ -825,15 +899,15 @@ int kvm_timer_hyp_init(bool has_gic)
825 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq); 899 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
826 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH && 900 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
827 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) { 901 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
828 kvm_err("Invalid trigger for IRQ%d, assuming level low\n", 902 kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
829 host_vtimer_irq); 903 host_vtimer_irq);
830 host_vtimer_irq_flags = IRQF_TRIGGER_LOW; 904 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
831 } 905 }
832 906
833 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 907 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
834 "kvm guest timer", kvm_get_running_vcpus()); 908 "kvm guest vtimer", kvm_get_running_vcpus());
835 if (err) { 909 if (err) {
836 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", 910 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
837 host_vtimer_irq, err); 911 host_vtimer_irq, err);
838 return err; 912 return err;
839 } 913 }
@@ -851,6 +925,43 @@ int kvm_timer_hyp_init(bool has_gic)
851 925
852 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); 926 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
853 927
928 /* Now let's do the physical EL1 timer irq */
929
930 if (info->physical_irq > 0) {
931 host_ptimer_irq = info->physical_irq;
932 host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
933 if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
934 host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
935 kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
936 host_ptimer_irq);
937 host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
938 }
939
940 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
941 "kvm guest ptimer", kvm_get_running_vcpus());
942 if (err) {
943 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
944 host_ptimer_irq, err);
945 return err;
946 }
947
948 if (has_gic) {
949 err = irq_set_vcpu_affinity(host_ptimer_irq,
950 kvm_get_running_vcpus());
951 if (err) {
952 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
953 goto out_free_irq;
954 }
955 }
956
957 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
958 } else if (has_vhe()) {
959 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
960 info->physical_irq);
961 err = -ENODEV;
962 goto out_free_irq;
963 }
964
854 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 965 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
855 "kvm/arm/timer:starting", kvm_timer_starting_cpu, 966 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
856 kvm_timer_dying_cpu); 967 kvm_timer_dying_cpu);
@@ -898,8 +1009,10 @@ bool kvm_arch_timer_get_input_level(int vintid)
898 1009
899 if (vintid == vcpu_vtimer(vcpu)->irq.irq) 1010 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
900 timer = vcpu_vtimer(vcpu); 1011 timer = vcpu_vtimer(vcpu);
1012 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1013 timer = vcpu_ptimer(vcpu);
901 else 1014 else
902 BUG(); /* We only map the vtimer so far */ 1015 BUG();
903 1016
904 return kvm_timer_should_fire(timer); 1017 return kvm_timer_should_fire(timer);
905} 1018}
@@ -908,6 +1021,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
908{ 1021{
909 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1022 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
910 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 1023 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1024 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
911 int ret; 1025 int ret;
912 1026
913 if (timer->enabled) 1027 if (timer->enabled)
@@ -930,14 +1044,21 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
930 if (ret) 1044 if (ret)
931 return ret; 1045 return ret;
932 1046
1047 if (has_vhe()) {
1048 ret = kvm_vgic_map_phys_irq(vcpu, host_ptimer_irq, ptimer->irq.irq,
1049 kvm_arch_timer_get_input_level);
1050 if (ret)
1051 return ret;
1052 }
1053
933no_vgic: 1054no_vgic:
934 timer->enabled = 1; 1055 timer->enabled = 1;
935 return 0; 1056 return 0;
936} 1057}
937 1058
938/* 1059/*
939 * On VHE system, we only need to configure trap on physical timer and counter 1060 * On VHE system, we only need to configure the EL2 timer trap register once,
940 * accesses in EL0 and EL1 once, not for every world switch. 1061 * not for every world switch.
941 * The host kernel runs at EL2 with HCR_EL2.TGE == 1, 1062 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
942 * and this makes those bits have no effect for the host kernel execution. 1063 * and this makes those bits have no effect for the host kernel execution.
943 */ 1064 */
@@ -948,11 +1069,11 @@ void kvm_timer_init_vhe(void)
948 u64 val; 1069 u64 val;
949 1070
950 /* 1071 /*
951 * Disallow physical timer access for the guest. 1072 * VHE systems allow the guest direct access to the EL1 physical
952 * Physical counter access is allowed. 1073 * timer/counter.
953 */ 1074 */
954 val = read_sysreg(cnthctl_el2); 1075 val = read_sysreg(cnthctl_el2);
955 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift); 1076 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
956 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); 1077 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
957 write_sysreg(val, cnthctl_el2); 1078 write_sysreg(val, cnthctl_el2);
958} 1079}