aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/x86.c27
2 files changed, 24 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e714f8c08ccf..9aa09d330a4b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -598,6 +598,7 @@ struct kvm_arch {
598 bool use_master_clock; 598 bool use_master_clock;
599 u64 master_kernel_ns; 599 u64 master_kernel_ns;
600 cycle_t master_cycle_now; 600 cycle_t master_cycle_now;
601 struct delayed_work kvmclock_update_work;
601 602
602 struct kvm_xen_hvm_config xen_hvm_config; 603 struct kvm_xen_hvm_config xen_hvm_config;
603 604
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 773eba799c45..5ed9293a696d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1628,14 +1628,21 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1628 * the others. 1628 * the others.
1629 * 1629 *
1630 * So in those cases, request a kvmclock update for all vcpus. 1630 * So in those cases, request a kvmclock update for all vcpus.
1631 * The worst case for a remote vcpu to update its kvmclock 1631 * We need to rate-limit these requests though, as they can
1632 * is then bounded by maximum nohz sleep latency. 1632 * considerably slow guests that have a large number of vcpus.
1633 * The time for a remote vcpu to update its kvmclock is bound
1634 * by the delay we use to rate-limit the updates.
1633 */ 1635 */
1634 1636
1635static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 1637#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1638
1639static void kvmclock_update_fn(struct work_struct *work)
1636{ 1640{
1637 int i; 1641 int i;
1638 struct kvm *kvm = v->kvm; 1642 struct delayed_work *dwork = to_delayed_work(work);
1643 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1644 kvmclock_update_work);
1645 struct kvm *kvm = container_of(ka, struct kvm, arch);
1639 struct kvm_vcpu *vcpu; 1646 struct kvm_vcpu *vcpu;
1640 1647
1641 kvm_for_each_vcpu(i, vcpu, kvm) { 1648 kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -1644,6 +1651,15 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1644 } 1651 }
1645} 1652}
1646 1653
1654static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1655{
1656 struct kvm *kvm = v->kvm;
1657
1658 set_bit(KVM_REQ_CLOCK_UPDATE, &v->requests);
1659 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1660 KVMCLOCK_UPDATE_DELAY);
1661}
1662
1647static bool msr_mtrr_valid(unsigned msr) 1663static bool msr_mtrr_valid(unsigned msr)
1648{ 1664{
1649 switch (msr) { 1665 switch (msr) {
@@ -7022,6 +7038,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7022 7038
7023 pvclock_update_vm_gtod_copy(kvm); 7039 pvclock_update_vm_gtod_copy(kvm);
7024 7040
7041 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7042
7025 return 0; 7043 return 0;
7026} 7044}
7027 7045
@@ -7059,6 +7077,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
7059 7077
7060void kvm_arch_sync_events(struct kvm *kvm) 7078void kvm_arch_sync_events(struct kvm *kvm)
7061{ 7079{
7080 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7062 kvm_free_all_assigned_devices(kvm); 7081 kvm_free_all_assigned_devices(kvm);
7063 kvm_free_pit(kvm); 7082 kvm_free_pit(kvm);
7064} 7083}