diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 18:36:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 18:36:00 -0400 |
commit | 08d19f51f05a68ce89a289320ce4ed96e757df72 (patch) | |
tree | 31c5d718d0aeaff5083fe533cd6e1f9fbbe846bb /arch/x86/kernel | |
parent | 1c95e1b69073cff5ff179e592fa1a1e182c78a17 (diff) | |
parent | 2381ad241d0bea1253a37f314b270848067640bb (diff) |
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (134 commits)
KVM: ia64: Add intel iommu support for guests.
KVM: ia64: add directed mmio range support for kvm guests
KVM: ia64: Make pmt table be able to hold physical mmio entries.
KVM: Move irqchip_in_kernel() from ioapic.h to irq.h
KVM: Separate irq ack notification out of arch/x86/kvm/irq.c
KVM: Change is_mmio_pfn to kvm_is_mmio_pfn, and make it common for all archs
KVM: Move device assignment logic to common code
KVM: Device Assignment: Move vtd.c from arch/x86/kvm/ to virt/kvm/
KVM: VMX: enable invlpg exiting if EPT is disabled
KVM: x86: Silence various LAPIC-related host kernel messages
KVM: Device Assignment: Map mmio pages into VT-d page table
KVM: PIC: enhance IPI avoidance
KVM: MMU: add "oos_shadow" parameter to disable oos
KVM: MMU: speed up mmu_unsync_walk
KVM: MMU: out of sync shadow core
KVM: MMU: mmu_convert_notrap helper
KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
KVM: MMU: mmu_parent_walk
KVM: x86: trap invlpg
KVM: MMU: sync roots on mmu reload
...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/pvclock.c | 12 |
2 files changed, 42 insertions, 0 deletions
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index d02def06ca91..774ac4991568 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -78,6 +78,34 @@ static cycle_t kvm_clock_read(void) | |||
78 | return ret; | 78 | return ret; |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | ||
82 | * If we don't do that, there is the possibility that the guest | ||
83 | * will calibrate under heavy load - thus, getting a lower lpj - | ||
84 | * and execute the delays themselves without load. This is wrong, | ||
85 | * because no delay loop can finish beforehand. | ||
86 | * Any heuristics is subject to fail, because ultimately, a large | ||
87 | * poll of guests can be running and trouble each other. So we preset | ||
88 | * lpj here | ||
89 | */ | ||
90 | static unsigned long kvm_get_tsc_khz(void) | ||
91 | { | ||
92 | return preset_lpj; | ||
93 | } | ||
94 | |||
95 | static void kvm_get_preset_lpj(void) | ||
96 | { | ||
97 | struct pvclock_vcpu_time_info *src; | ||
98 | unsigned long khz; | ||
99 | u64 lpj; | ||
100 | |||
101 | src = &per_cpu(hv_clock, 0); | ||
102 | khz = pvclock_tsc_khz(src); | ||
103 | |||
104 | lpj = ((u64)khz * 1000); | ||
105 | do_div(lpj, HZ); | ||
106 | preset_lpj = lpj; | ||
107 | } | ||
108 | |||
81 | static struct clocksource kvm_clock = { | 109 | static struct clocksource kvm_clock = { |
82 | .name = "kvm-clock", | 110 | .name = "kvm-clock", |
83 | .read = kvm_clock_read, | 111 | .read = kvm_clock_read, |
@@ -153,6 +181,7 @@ void __init kvmclock_init(void) | |||
153 | pv_time_ops.get_wallclock = kvm_get_wallclock; | 181 | pv_time_ops.get_wallclock = kvm_get_wallclock; |
154 | pv_time_ops.set_wallclock = kvm_set_wallclock; | 182 | pv_time_ops.set_wallclock = kvm_set_wallclock; |
155 | pv_time_ops.sched_clock = kvm_clock_read; | 183 | pv_time_ops.sched_clock = kvm_clock_read; |
184 | pv_time_ops.get_tsc_khz = kvm_get_tsc_khz; | ||
156 | #ifdef CONFIG_X86_LOCAL_APIC | 185 | #ifdef CONFIG_X86_LOCAL_APIC |
157 | pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; | 186 | pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; |
158 | #endif | 187 | #endif |
@@ -163,6 +192,7 @@ void __init kvmclock_init(void) | |||
163 | #ifdef CONFIG_KEXEC | 192 | #ifdef CONFIG_KEXEC |
164 | machine_ops.crash_shutdown = kvm_crash_shutdown; | 193 | machine_ops.crash_shutdown = kvm_crash_shutdown; |
165 | #endif | 194 | #endif |
195 | kvm_get_preset_lpj(); | ||
166 | clocksource_register(&kvm_clock); | 196 | clocksource_register(&kvm_clock); |
167 | } | 197 | } |
168 | } | 198 | } |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 05fbe9a0325a..4f9c55f3a7c0 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c | |||
@@ -97,6 +97,18 @@ static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst, | |||
97 | return dst->version; | 97 | return dst->version; |
98 | } | 98 | } |
99 | 99 | ||
100 | unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) | ||
101 | { | ||
102 | u64 pv_tsc_khz = 1000000ULL << 32; | ||
103 | |||
104 | do_div(pv_tsc_khz, src->tsc_to_system_mul); | ||
105 | if (src->tsc_shift < 0) | ||
106 | pv_tsc_khz <<= -src->tsc_shift; | ||
107 | else | ||
108 | pv_tsc_khz >>= src->tsc_shift; | ||
109 | return pv_tsc_khz; | ||
110 | } | ||
111 | |||
100 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) | 112 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) |
101 | { | 113 | { |
102 | struct pvclock_shadow_time shadow; | 114 | struct pvclock_shadow_time shadow; |