aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAndrew Jones <drjones@redhat.com>2014-02-28 06:52:55 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-03-04 05:50:54 -0500
commit332967a3eac06f6379283cf155c84fe7cd0537c2 (patch)
treed217992b4a019e4aeecc246bca3a1afbbbaee2f3 /arch/x86/kvm
parent7e44e4495a398eb553ce561f29f9148f40a3448f (diff)
x86: kvm: introduce periodic global clock updates
commit 0061d53daf26f introduced a mechanism to execute a global clock update for a vm. We can apply this periodically in order to propagate host NTP corrections. Also, if all vcpus of a vm are pinned, then without an additional trigger, no guest NTP corrections can propagate either, as the current trigger is only vcpu cpu migration. Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ed9293a696d..1e91a246e996 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1660,6 +1660,20 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1660 KVMCLOCK_UPDATE_DELAY); 1660 KVMCLOCK_UPDATE_DELAY);
1661} 1661}
1662 1662
1663#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1664
1665static void kvmclock_sync_fn(struct work_struct *work)
1666{
1667 struct delayed_work *dwork = to_delayed_work(work);
1668 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1669 kvmclock_sync_work);
1670 struct kvm *kvm = container_of(ka, struct kvm, arch);
1671
1672 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
1673 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
1674 KVMCLOCK_SYNC_PERIOD);
1675}
1676
1663static bool msr_mtrr_valid(unsigned msr) 1677static bool msr_mtrr_valid(unsigned msr)
1664{ 1678{
1665 switch (msr) { 1679 switch (msr) {
@@ -6736,6 +6750,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
6736{ 6750{
6737 int r; 6751 int r;
6738 struct msr_data msr; 6752 struct msr_data msr;
6753 struct kvm *kvm = vcpu->kvm;
6739 6754
6740 r = vcpu_load(vcpu); 6755 r = vcpu_load(vcpu);
6741 if (r) 6756 if (r)
@@ -6746,6 +6761,9 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
6746 kvm_write_tsc(vcpu, &msr); 6761 kvm_write_tsc(vcpu, &msr);
6747 vcpu_put(vcpu); 6762 vcpu_put(vcpu);
6748 6763
6764 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
6765 KVMCLOCK_SYNC_PERIOD);
6766
6749 return r; 6767 return r;
6750} 6768}
6751 6769
@@ -7039,6 +7057,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7039 pvclock_update_vm_gtod_copy(kvm); 7057 pvclock_update_vm_gtod_copy(kvm);
7040 7058
7041 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 7059 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7060 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7042 7061
7043 return 0; 7062 return 0;
7044} 7063}
@@ -7077,6 +7096,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
7077 7096
7078void kvm_arch_sync_events(struct kvm *kvm) 7097void kvm_arch_sync_events(struct kvm *kvm)
7079{ 7098{
7099 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7080 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 7100 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7081 kvm_free_all_assigned_devices(kvm); 7101 kvm_free_all_assigned_devices(kvm);
7082 kvm_free_pit(kvm); 7102 kvm_free_pit(kvm);