aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Smetanin <asmetanin@virtuozzo.com>2015-11-30 11:22:21 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-12-16 12:49:45 -0500
commit1f4b34f825e8cef6f493d06b46605384785b3d16 (patch)
treec7b5b3cb1b5af261d6b567bbcce4c1ccfdc928fa
parent765eaa0f70eaa274ec8b815d8c210c20cf7b6dbc (diff)
kvm/x86: Hyper-V SynIC timers
Per Hyper-V specification (and as required by Hyper-V-aware guests), SynIC provides 4 per-vCPU timers. Each timer is programmed via a pair of MSRs, and signals expiration by delivering a special format message to the configured SynIC message slot and triggering the corresponding synthetic interrupt. Note: as implemented by this patch, all periodic timers are "lazy" (i.e. if the vCPU wasn't scheduled for more than the timer period the timer events are lost), regardless of the corresponding configuration MSR. If deemed necessary, the "catch up" mode (the timer period is shortened until the timer catches up) will be implemented later. Changes v2: * Use remainder to calculate periodic timer expiration time Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com> CC: Gleb Natapov <gleb@kernel.org> CC: Paolo Bonzini <pbonzini@redhat.com> CC: "K. Y. Srinivasan" <kys@microsoft.com> CC: Haiyang Zhang <haiyangz@microsoft.com> CC: Vitaly Kuznetsov <vkuznets@redhat.com> CC: Roman Kagan <rkagan@virtuozzo.com> CC: Denis V. Lunev <den@openvz.org> CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h6
-rw-r--r--arch/x86/kvm/hyperv.c318
-rw-r--r--arch/x86/kvm/hyperv.h24
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--include/linux/kvm_host.h1
6 files changed, 368 insertions, 3 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 814007701f8b..a7c89876698b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -379,6 +379,17 @@ struct kvm_mtrr {
379 struct list_head head; 379 struct list_head head;
380}; 380};
381 381
382/* Hyper-V SynIC timer */
383struct kvm_vcpu_hv_stimer {
384 struct hrtimer timer;
385 int index;
386 u64 config;
387 u64 count;
388 u64 exp_time;
389 struct hv_message msg;
390 bool msg_pending;
391};
392
382/* Hyper-V synthetic interrupt controller (SynIC)*/ 393/* Hyper-V synthetic interrupt controller (SynIC)*/
383struct kvm_vcpu_hv_synic { 394struct kvm_vcpu_hv_synic {
384 u64 version; 395 u64 version;
@@ -398,6 +409,8 @@ struct kvm_vcpu_hv {
398 s64 runtime_offset; 409 s64 runtime_offset;
399 struct kvm_vcpu_hv_synic synic; 410 struct kvm_vcpu_hv_synic synic;
400 struct kvm_hyperv_exit exit; 411 struct kvm_hyperv_exit exit;
412 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
413 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
401}; 414};
402 415
403struct kvm_vcpu_arch { 416struct kvm_vcpu_arch {
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 2a5629eac0b1..7956412d09bd 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -355,4 +355,10 @@ struct hv_timer_message_payload {
355 __u64 delivery_time; /* When the message was delivered */ 355 __u64 delivery_time; /* When the message was delivered */
356}; 356};
357 357
358#define HV_STIMER_ENABLE (1ULL << 0)
359#define HV_STIMER_PERIODIC (1ULL << 1)
360#define HV_STIMER_LAZY (1ULL << 2)
361#define HV_STIMER_AUTOENABLE (1ULL << 3)
362#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
363
358#endif 364#endif
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 6412b6b504b5..8ff88293b906 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -147,15 +147,32 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
147{ 147{
148 struct kvm *kvm = vcpu->kvm; 148 struct kvm *kvm = vcpu->kvm;
149 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 149 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
150 int gsi, idx; 150 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
151 struct kvm_vcpu_hv_stimer *stimer;
152 int gsi, idx, stimers_pending;
151 153
152 vcpu_debug(vcpu, "Hyper-V SynIC acked sint %d\n", sint); 154 vcpu_debug(vcpu, "Hyper-V SynIC acked sint %d\n", sint);
153 155
154 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) 156 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
155 synic_clear_sint_msg_pending(synic, sint); 157 synic_clear_sint_msg_pending(synic, sint);
156 158
159 /* Try to deliver pending Hyper-V SynIC timers messages */
160 stimers_pending = 0;
161 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
162 stimer = &hv_vcpu->stimer[idx];
163 if (stimer->msg_pending &&
164 (stimer->config & HV_STIMER_ENABLE) &&
165 HV_STIMER_SINT(stimer->config) == sint) {
166 set_bit(stimer->index,
167 hv_vcpu->stimer_pending_bitmap);
168 stimers_pending++;
169 }
170 }
171 if (stimers_pending)
172 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
173
157 idx = srcu_read_lock(&kvm->irq_srcu); 174 idx = srcu_read_lock(&kvm->irq_srcu);
158 gsi = atomic_read(&vcpu_to_synic(vcpu)->sint_to_gsi[sint]); 175 gsi = atomic_read(&synic->sint_to_gsi[sint]);
159 if (gsi != -1) 176 if (gsi != -1)
160 kvm_notify_acked_gsi(kvm, gsi); 177 kvm_notify_acked_gsi(kvm, gsi);
161 srcu_read_unlock(&kvm->irq_srcu, idx); 178 srcu_read_unlock(&kvm->irq_srcu, idx);
@@ -371,9 +388,268 @@ static u64 get_time_ref_counter(struct kvm *kvm)
371 return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); 388 return div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
372} 389}
373 390
391static void stimer_mark_expired(struct kvm_vcpu_hv_stimer *stimer,
392 bool vcpu_kick)
393{
394 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
395
396 set_bit(stimer->index,
397 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
398 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
399 if (vcpu_kick)
400 kvm_vcpu_kick(vcpu);
401}
402
403static void stimer_stop(struct kvm_vcpu_hv_stimer *stimer)
404{
405 hrtimer_cancel(&stimer->timer);
406}
407
408static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
409{
410 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
411
412 stimer_stop(stimer);
413 clear_bit(stimer->index,
414 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
415 stimer->msg_pending = false;
416}
417
418static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
419{
420 struct kvm_vcpu_hv_stimer *stimer;
421
422 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
423 stimer_mark_expired(stimer, true);
424
425 return HRTIMER_NORESTART;
426}
427
428static void stimer_restart(struct kvm_vcpu_hv_stimer *stimer)
429{
430 u64 time_now;
431 ktime_t ktime_now;
432 u64 remainder;
433
434 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
435 ktime_now = ktime_get();
436
437 div64_u64_rem(time_now - stimer->exp_time, stimer->count, &remainder);
438 stimer->exp_time = time_now + (stimer->count - remainder);
439
440 hrtimer_start(&stimer->timer,
441 ktime_add_ns(ktime_now,
442 100 * (stimer->exp_time - time_now)),
443 HRTIMER_MODE_ABS);
444}
445
446static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
447{
448 u64 time_now;
449 ktime_t ktime_now;
450
451 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
452 ktime_now = ktime_get();
453
454 if (stimer->config & HV_STIMER_PERIODIC) {
455 if (stimer->count == 0)
456 return -EINVAL;
457
458 stimer->exp_time = time_now + stimer->count;
459 hrtimer_start(&stimer->timer,
460 ktime_add_ns(ktime_now, 100 * stimer->count),
461 HRTIMER_MODE_ABS);
462 return 0;
463 }
464 stimer->exp_time = stimer->count;
465 if (time_now >= stimer->count) {
466 /*
467 * Expire timer according to Hypervisor Top-Level Functional
468 * specification v4(15.3.1):
469 * "If a one shot is enabled and the specified count is in
470 * the past, it will expire immediately."
471 */
472 stimer_mark_expired(stimer, false);
473 return 0;
474 }
475
476 hrtimer_start(&stimer->timer,
477 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
478 HRTIMER_MODE_ABS);
479 return 0;
480}
481
482static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
483 bool host)
484{
485 if (stimer->count == 0 || HV_STIMER_SINT(config) == 0)
486 config &= ~HV_STIMER_ENABLE;
487 stimer->config = config;
488 stimer_cleanup(stimer);
489 if (stimer->config & HV_STIMER_ENABLE)
490 if (stimer_start(stimer))
491 return 1;
492 return 0;
493}
494
495static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
496 bool host)
497{
498 stimer->count = count;
499
500 stimer_cleanup(stimer);
501 if (stimer->count == 0)
502 stimer->config &= ~HV_STIMER_ENABLE;
503 else if (stimer->config & HV_STIMER_AUTOENABLE) {
504 stimer->config |= HV_STIMER_ENABLE;
505 if (stimer_start(stimer))
506 return 1;
507 }
508
509 return 0;
510}
511
512static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
513{
514 *pconfig = stimer->config;
515 return 0;
516}
517
518static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
519{
520 *pcount = stimer->count;
521 return 0;
522}
523
524static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
525 struct hv_message *src_msg)
526{
527 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
528 struct page *page;
529 gpa_t gpa;
530 struct hv_message *dst_msg;
531 int r;
532 struct hv_message_page *msg_page;
533
534 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
535 return -ENOENT;
536
537 gpa = synic->msg_page & PAGE_MASK;
538 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
539 if (is_error_page(page))
540 return -EFAULT;
541
542 msg_page = kmap_atomic(page);
543 dst_msg = &msg_page->sint_message[sint];
544 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
545 src_msg->header.message_type) != HVMSG_NONE) {
546 dst_msg->header.message_flags.msg_pending = 1;
547 r = -EAGAIN;
548 } else {
549 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
550 src_msg->header.payload_size);
551 dst_msg->header.message_type = src_msg->header.message_type;
552 dst_msg->header.payload_size = src_msg->header.payload_size;
553 r = synic_set_irq(synic, sint);
554 if (r >= 1)
555 r = 0;
556 else if (r == 0)
557 r = -EFAULT;
558 }
559 kunmap_atomic(msg_page);
560 kvm_release_page_dirty(page);
561 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
562 return r;
563}
564
565static void stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
566{
567 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
568 struct hv_message *msg = &stimer->msg;
569 struct hv_timer_message_payload *payload =
570 (struct hv_timer_message_payload *)&msg->u.payload;
571 int r;
572
573 stimer->msg_pending = true;
574 payload->expiration_time = stimer->exp_time;
575 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
576 r = synic_deliver_msg(vcpu_to_synic(vcpu),
577 HV_STIMER_SINT(stimer->config), msg);
578 if (!r)
579 stimer->msg_pending = false;
580}
581
582static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
583{
584 stimer_send_msg(stimer);
585 if (!(stimer->config & HV_STIMER_PERIODIC))
586 stimer->config |= ~HV_STIMER_ENABLE;
587 else
588 stimer_restart(stimer);
589}
590
591void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
592{
593 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
594 struct kvm_vcpu_hv_stimer *stimer;
595 u64 time_now;
596 int i;
597
598 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
599 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
600 stimer = &hv_vcpu->stimer[i];
601 stimer_stop(stimer);
602 if (stimer->config & HV_STIMER_ENABLE) {
603 time_now = get_time_ref_counter(vcpu->kvm);
604 if (time_now >= stimer->exp_time)
605 stimer_expiration(stimer);
606 }
607 }
608}
609
610void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
611{
612 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
613 int i;
614
615 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
616 stimer_cleanup(&hv_vcpu->stimer[i]);
617}
618
619static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
620{
621 struct hv_message *msg = &stimer->msg;
622 struct hv_timer_message_payload *payload =
623 (struct hv_timer_message_payload *)&msg->u.payload;
624
625 memset(&msg->header, 0, sizeof(msg->header));
626 msg->header.message_type = HVMSG_TIMER_EXPIRED;
627 msg->header.payload_size = sizeof(*payload);
628
629 payload->timer_index = stimer->index;
630 payload->expiration_time = 0;
631 payload->delivery_time = 0;
632}
633
634static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
635{
636 memset(stimer, 0, sizeof(*stimer));
637 stimer->index = timer_index;
638 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
639 stimer->timer.function = stimer_timer_callback;
640 stimer_prepare_msg(stimer);
641}
642
374void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) 643void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
375{ 644{
376 synic_init(vcpu_to_synic(vcpu)); 645 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
646 int i;
647
648 synic_init(&hv_vcpu->synic);
649
650 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
651 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
652 stimer_init(&hv_vcpu->stimer[i], i);
377} 653}
378 654
379int kvm_hv_activate_synic(struct kvm_vcpu *vcpu) 655int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
@@ -590,6 +866,24 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
590 case HV_X64_MSR_EOM: 866 case HV_X64_MSR_EOM:
591 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 867 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
592 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); 868 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
869 case HV_X64_MSR_STIMER0_CONFIG:
870 case HV_X64_MSR_STIMER1_CONFIG:
871 case HV_X64_MSR_STIMER2_CONFIG:
872 case HV_X64_MSR_STIMER3_CONFIG: {
873 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
874
875 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
876 data, host);
877 }
878 case HV_X64_MSR_STIMER0_COUNT:
879 case HV_X64_MSR_STIMER1_COUNT:
880 case HV_X64_MSR_STIMER2_COUNT:
881 case HV_X64_MSR_STIMER3_COUNT: {
882 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
883
884 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
885 data, host);
886 }
593 default: 887 default:
594 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", 888 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
595 msr, data); 889 msr, data);
@@ -673,6 +967,24 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
673 case HV_X64_MSR_EOM: 967 case HV_X64_MSR_EOM:
674 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 968 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
675 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); 969 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
970 case HV_X64_MSR_STIMER0_CONFIG:
971 case HV_X64_MSR_STIMER1_CONFIG:
972 case HV_X64_MSR_STIMER2_CONFIG:
973 case HV_X64_MSR_STIMER3_CONFIG: {
974 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
975
976 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
977 pdata);
978 }
979 case HV_X64_MSR_STIMER0_COUNT:
980 case HV_X64_MSR_STIMER1_COUNT:
981 case HV_X64_MSR_STIMER2_COUNT:
982 case HV_X64_MSR_STIMER3_COUNT: {
983 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
984
985 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
986 pdata);
987 }
676 default: 988 default:
677 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 989 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
678 return 1; 990 return 1;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index d5d8217c59bb..60eccd4bd1d3 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -59,5 +59,29 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
59int kvm_hv_activate_synic(struct kvm_vcpu *vcpu); 59int kvm_hv_activate_synic(struct kvm_vcpu *vcpu);
60 60
61void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); 61void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
62void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
63
64static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
65 int timer_index)
66{
67 return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
68}
69
70static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
71{
72 struct kvm_vcpu_hv *hv_vcpu;
73
74 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
75 stimer[0]);
76 return hv_vcpu_to_vcpu(hv_vcpu);
77}
78
79static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
80{
81 return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
82 HV_SYNIC_STIMER_COUNT);
83}
84
85void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
62 86
63#endif 87#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f1d6501180ec..b6102c1eb3b1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -967,6 +967,7 @@ static u32 emulated_msrs[] = {
967 HV_X64_MSR_VP_INDEX, 967 HV_X64_MSR_VP_INDEX,
968 HV_X64_MSR_VP_RUNTIME, 968 HV_X64_MSR_VP_RUNTIME,
969 HV_X64_MSR_SCONTROL, 969 HV_X64_MSR_SCONTROL,
970 HV_X64_MSR_STIMER0_CONFIG,
970 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 971 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
971 MSR_KVM_PV_EOI_EN, 972 MSR_KVM_PV_EOI_EN,
972 973
@@ -2199,6 +2200,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2199 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2200 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2200 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 2201 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2201 case HV_X64_MSR_CRASH_CTL: 2202 case HV_X64_MSR_CRASH_CTL:
2203 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2202 return kvm_hv_set_msr_common(vcpu, msr, data, 2204 return kvm_hv_set_msr_common(vcpu, msr, data,
2203 msr_info->host_initiated); 2205 msr_info->host_initiated);
2204 case MSR_IA32_BBL_CR_CTL3: 2206 case MSR_IA32_BBL_CR_CTL3:
@@ -2403,6 +2405,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2403 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 2405 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2404 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 2406 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2405 case HV_X64_MSR_CRASH_CTL: 2407 case HV_X64_MSR_CRASH_CTL:
2408 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2406 return kvm_hv_get_msr_common(vcpu, 2409 return kvm_hv_get_msr_common(vcpu,
2407 msr_info->index, &msr_info->data); 2410 msr_info->index, &msr_info->data);
2408 break; 2411 break;
@@ -6489,6 +6492,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6489 r = 0; 6492 r = 0;
6490 goto out; 6493 goto out;
6491 } 6494 }
6495 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
6496 kvm_hv_process_stimers(vcpu);
6492 } 6497 }
6493 6498
6494 /* 6499 /*
@@ -7649,6 +7654,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7649{ 7654{
7650 int idx; 7655 int idx;
7651 7656
7657 kvm_hv_vcpu_uninit(vcpu);
7652 kvm_pmu_destroy(vcpu); 7658 kvm_pmu_destroy(vcpu);
7653 kfree(vcpu->arch.mce_banks); 7659 kfree(vcpu->arch.mce_banks);
7654 kvm_free_lapic(vcpu); 7660 kvm_free_lapic(vcpu);
@@ -8043,6 +8049,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
8043 kvm_cpu_has_interrupt(vcpu)) 8049 kvm_cpu_has_interrupt(vcpu))
8044 return true; 8050 return true;
8045 8051
8052 if (kvm_hv_has_stimer_pending(vcpu))
8053 return true;
8054
8046 return false; 8055 return false;
8047} 8056}
8048 8057
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f44c24b81b17..2969c474a399 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -144,6 +144,7 @@ static inline bool is_error_page(struct page *page)
144#define KVM_REQ_IOAPIC_EOI_EXIT 28 144#define KVM_REQ_IOAPIC_EOI_EXIT 28
145#define KVM_REQ_HV_RESET 29 145#define KVM_REQ_HV_RESET 29
146#define KVM_REQ_HV_EXIT 30 146#define KVM_REQ_HV_EXIT 30
147#define KVM_REQ_HV_STIMER 31
147 148
148#define KVM_USERSPACE_IRQ_SOURCE_ID 0 149#define KVM_USERSPACE_IRQ_SOURCE_ID 0
149#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 150#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1