diff options
author | Christoffer Dall <christoffer.dall@arm.com> | 2018-09-18 13:08:18 -0400 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2019-02-19 16:05:41 -0500 |
commit | e604dd5d45c75c2112424dec74853efb708f4fa6 (patch) | |
tree | 0898f81bdaa7ab3e90338ffaf7e8bd34ec2dc914 /virt/kvm | |
parent | 84135d3d18da2ff17d3ad1a609b2818cc3049552 (diff) |
KVM: arm/arm64: timer: Rework data structures for multiple timers
Prepare for having 4 timer data structures (2 for now).
Move loaded to the cpu data structure and not the individual timer
structure, in preparation for assigning the EL1 phys timer as well.
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 58 |
1 files changed, 30 insertions, 28 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index f7d377448438..471f9fd004c9 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -184,13 +184,11 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) | |||
184 | static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) | 184 | static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) |
185 | { | 185 | { |
186 | struct arch_timer_context *ptimer; | 186 | struct arch_timer_context *ptimer; |
187 | struct arch_timer_cpu *timer; | ||
188 | struct kvm_vcpu *vcpu; | 187 | struct kvm_vcpu *vcpu; |
189 | u64 ns; | 188 | u64 ns; |
190 | 189 | ||
191 | timer = container_of(hrt, struct arch_timer_cpu, phys_timer); | 190 | ptimer = container_of(hrt, struct arch_timer_context, hrtimer); |
192 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); | 191 | vcpu = ptimer->vcpu; |
193 | ptimer = vcpu_ptimer(vcpu); | ||
194 | 192 | ||
195 | /* | 193 | /* |
196 | * Check that the timer has really expired from the guest's | 194 | * Check that the timer has really expired from the guest's |
@@ -209,9 +207,10 @@ static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) | |||
209 | 207 | ||
210 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) | 208 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) |
211 | { | 209 | { |
210 | struct arch_timer_cpu *timer = vcpu_timer(timer_ctx->vcpu); | ||
212 | u64 cval, now; | 211 | u64 cval, now; |
213 | 212 | ||
214 | if (timer_ctx->loaded) { | 213 | if (timer->loaded) { |
215 | u32 cnt_ctl; | 214 | u32 cnt_ctl; |
216 | 215 | ||
217 | /* Only the virtual timer can be loaded so far */ | 216 | /* Only the virtual timer can be loaded so far */ |
@@ -280,7 +279,6 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
280 | /* Schedule the background timer for the emulated timer. */ | 279 | /* Schedule the background timer for the emulated timer. */ |
281 | static void phys_timer_emulate(struct kvm_vcpu *vcpu) | 280 | static void phys_timer_emulate(struct kvm_vcpu *vcpu) |
282 | { | 281 | { |
283 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
284 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 282 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
285 | 283 | ||
286 | /* | 284 | /* |
@@ -289,11 +287,11 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu) | |||
289 | * then we also don't need a soft timer. | 287 | * then we also don't need a soft timer. |
290 | */ | 288 | */ |
291 | if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { | 289 | if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { |
292 | soft_timer_cancel(&timer->phys_timer); | 290 | soft_timer_cancel(&ptimer->hrtimer); |
293 | return; | 291 | return; |
294 | } | 292 | } |
295 | 293 | ||
296 | soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer)); | 294 | soft_timer_start(&ptimer->hrtimer, kvm_timer_compute_delta(ptimer)); |
297 | } | 295 | } |
298 | 296 | ||
299 | /* | 297 | /* |
@@ -303,7 +301,7 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu) | |||
303 | */ | 301 | */ |
304 | static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | 302 | static void kvm_timer_update_state(struct kvm_vcpu *vcpu) |
305 | { | 303 | { |
306 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 304 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
307 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 305 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
308 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 306 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
309 | bool level; | 307 | bool level; |
@@ -329,13 +327,13 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
329 | 327 | ||
330 | static void vtimer_save_state(struct kvm_vcpu *vcpu) | 328 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
331 | { | 329 | { |
332 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 330 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
333 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 331 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
334 | unsigned long flags; | 332 | unsigned long flags; |
335 | 333 | ||
336 | local_irq_save(flags); | 334 | local_irq_save(flags); |
337 | 335 | ||
338 | if (!vtimer->loaded) | 336 | if (!timer->loaded) |
339 | goto out; | 337 | goto out; |
340 | 338 | ||
341 | if (timer->enabled) { | 339 | if (timer->enabled) { |
@@ -347,7 +345,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu) | |||
347 | write_sysreg_el0(0, cntv_ctl); | 345 | write_sysreg_el0(0, cntv_ctl); |
348 | isb(); | 346 | isb(); |
349 | 347 | ||
350 | vtimer->loaded = false; | 348 | timer->loaded = false; |
351 | out: | 349 | out: |
352 | local_irq_restore(flags); | 350 | local_irq_restore(flags); |
353 | } | 351 | } |
@@ -359,7 +357,7 @@ out: | |||
359 | */ | 357 | */ |
360 | static void kvm_timer_blocking(struct kvm_vcpu *vcpu) | 358 | static void kvm_timer_blocking(struct kvm_vcpu *vcpu) |
361 | { | 359 | { |
362 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 360 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
363 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 361 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
364 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 362 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
365 | 363 | ||
@@ -379,20 +377,20 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu) | |||
379 | 377 | ||
380 | static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) | 378 | static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) |
381 | { | 379 | { |
382 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 380 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
383 | 381 | ||
384 | soft_timer_cancel(&timer->bg_timer); | 382 | soft_timer_cancel(&timer->bg_timer); |
385 | } | 383 | } |
386 | 384 | ||
387 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) | 385 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) |
388 | { | 386 | { |
389 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 387 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
390 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 388 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
391 | unsigned long flags; | 389 | unsigned long flags; |
392 | 390 | ||
393 | local_irq_save(flags); | 391 | local_irq_save(flags); |
394 | 392 | ||
395 | if (vtimer->loaded) | 393 | if (timer->loaded) |
396 | goto out; | 394 | goto out; |
397 | 395 | ||
398 | if (timer->enabled) { | 396 | if (timer->enabled) { |
@@ -401,7 +399,7 @@ static void vtimer_restore_state(struct kvm_vcpu *vcpu) | |||
401 | write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); | 399 | write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); |
402 | } | 400 | } |
403 | 401 | ||
404 | vtimer->loaded = true; | 402 | timer->loaded = true; |
405 | out: | 403 | out: |
406 | local_irq_restore(flags); | 404 | local_irq_restore(flags); |
407 | } | 405 | } |
@@ -462,7 +460,7 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) | |||
462 | 460 | ||
463 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | 461 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) |
464 | { | 462 | { |
465 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 463 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
466 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 464 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
467 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 465 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
468 | 466 | ||
@@ -507,7 +505,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) | |||
507 | 505 | ||
508 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) | 506 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) |
509 | { | 507 | { |
510 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 508 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
509 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
511 | 510 | ||
512 | if (unlikely(!timer->enabled)) | 511 | if (unlikely(!timer->enabled)) |
513 | return; | 512 | return; |
@@ -523,7 +522,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) | |||
523 | * In any case, we re-schedule the hrtimer for the physical timer when | 522 | * In any case, we re-schedule the hrtimer for the physical timer when |
524 | * coming back to the VCPU thread in kvm_timer_vcpu_load(). | 523 | * coming back to the VCPU thread in kvm_timer_vcpu_load(). |
525 | */ | 524 | */ |
526 | soft_timer_cancel(&timer->phys_timer); | 525 | soft_timer_cancel(&ptimer->hrtimer); |
527 | 526 | ||
528 | if (swait_active(kvm_arch_vcpu_wq(vcpu))) | 527 | if (swait_active(kvm_arch_vcpu_wq(vcpu))) |
529 | kvm_timer_blocking(vcpu); | 528 | kvm_timer_blocking(vcpu); |
@@ -559,7 +558,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) | |||
559 | 558 | ||
560 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | 559 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
561 | { | 560 | { |
562 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 561 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
563 | 562 | ||
564 | if (unlikely(!timer->enabled)) | 563 | if (unlikely(!timer->enabled)) |
565 | return; | 564 | return; |
@@ -570,7 +569,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |||
570 | 569 | ||
571 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 570 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
572 | { | 571 | { |
573 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 572 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
574 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 573 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
575 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 574 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
576 | 575 | ||
@@ -611,22 +610,25 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) | |||
611 | 610 | ||
612 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) | 611 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) |
613 | { | 612 | { |
614 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 613 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
615 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 614 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
616 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 615 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
617 | 616 | ||
618 | /* Synchronize cntvoff across all vtimers of a VM. */ | 617 | /* Synchronize cntvoff across all vtimers of a VM. */ |
619 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); | 618 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); |
620 | vcpu_ptimer(vcpu)->cntvoff = 0; | 619 | ptimer->cntvoff = 0; |
621 | 620 | ||
622 | hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 621 | hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
623 | timer->bg_timer.function = kvm_bg_timer_expire; | 622 | timer->bg_timer.function = kvm_bg_timer_expire; |
624 | 623 | ||
625 | hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 624 | hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
626 | timer->phys_timer.function = kvm_phys_timer_expire; | 625 | ptimer->hrtimer.function = kvm_phys_timer_expire; |
627 | 626 | ||
628 | vtimer->irq.irq = default_vtimer_irq.irq; | 627 | vtimer->irq.irq = default_vtimer_irq.irq; |
629 | ptimer->irq.irq = default_ptimer_irq.irq; | 628 | ptimer->irq.irq = default_ptimer_irq.irq; |
629 | |||
630 | vtimer->vcpu = vcpu; | ||
631 | ptimer->vcpu = vcpu; | ||
630 | } | 632 | } |
631 | 633 | ||
632 | static void kvm_timer_init_interrupt(void *info) | 634 | static void kvm_timer_init_interrupt(void *info) |
@@ -860,7 +862,7 @@ out_free_irq: | |||
860 | 862 | ||
861 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | 863 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) |
862 | { | 864 | { |
863 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 865 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
864 | 866 | ||
865 | soft_timer_cancel(&timer->bg_timer); | 867 | soft_timer_cancel(&timer->bg_timer); |
866 | } | 868 | } |
@@ -904,7 +906,7 @@ bool kvm_arch_timer_get_input_level(int vintid) | |||
904 | 906 | ||
905 | int kvm_timer_enable(struct kvm_vcpu *vcpu) | 907 | int kvm_timer_enable(struct kvm_vcpu *vcpu) |
906 | { | 908 | { |
907 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 909 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
908 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 910 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
909 | int ret; | 911 | int ret; |
910 | 912 | ||