aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-09-07 04:12:18 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:21 -0500
commit18863bdd60f895f3b3ba16b15e8331aee781e8ec (patch)
tree2d0e456a8d2dc12def13f2fed386ca9e5a440823 /arch/x86
parent44ea2b1758d88ad822e65b1c4c21ca6164494e27 (diff)
KVM: x86 shared msr infrastructure
The various syscall-related MSRs are fairly expensive to switch. Currently we switch them on every vcpu preemption, which is far too often: - if we're switching to a kernel thread (idle task, threaded interrupt, kernel-mode virtio server (vhost-net), for example) and back, then there's no need to switch those MSRs since kernel threasd won't be exiting to userspace. - if we're switching to another guest running an identical OS, most likely those MSRs will have the same value, so there's little point in reloading them. - if we're running the same OS on the guest and host, the MSRs will have identical values and reloading is unnecessary. This patch uses the new user return notifiers to implement last-minute switching, and checks the msr values to avoid unnecessary reloading. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/x86.c81
3 files changed, 85 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0558ff8c32a..26a74b7bb6b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -809,4 +809,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
809int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 809int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
810int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 810int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
811 811
812void kvm_define_shared_msr(unsigned index, u32 msr);
813void kvm_set_shared_msr(unsigned index, u64 val);
814
812#endif /* _ASM_X86_KVM_HOST_H */ 815#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index b84e571f417..4cd49833246 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
28 select HAVE_KVM_IRQCHIP 28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_EVENTFD 29 select HAVE_KVM_EVENTFD
30 select KVM_APIC_ARCHITECTURE 30 select KVM_APIC_ARCHITECTURE
31 select USER_RETURN_NOTIFIER
31 ---help--- 32 ---help---
32 Support hosting fully virtualized guest machines using hardware 33 Support hosting fully virtualized guest machines using hardware
33 virtualization extensions. You will need a fairly recent 34 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e16cdc9ec0c..58c5cddf363 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -37,6 +37,7 @@
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/cpufreq.h> 39#include <linux/cpufreq.h>
40#include <linux/user-return-notifier.h>
40#include <trace/events/kvm.h> 41#include <trace/events/kvm.h>
41#undef TRACE_INCLUDE_FILE 42#undef TRACE_INCLUDE_FILE
42#define CREATE_TRACE_POINTS 43#define CREATE_TRACE_POINTS
@@ -87,6 +88,25 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
87int ignore_msrs = 0; 88int ignore_msrs = 0;
88module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); 89module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
89 90
91#define KVM_NR_SHARED_MSRS 16
92
93struct kvm_shared_msrs_global {
94 int nr;
95 struct kvm_shared_msr {
96 u32 msr;
97 u64 value;
98 } msrs[KVM_NR_SHARED_MSRS];
99};
100
101struct kvm_shared_msrs {
102 struct user_return_notifier urn;
103 bool registered;
104 u64 current_value[KVM_NR_SHARED_MSRS];
105};
106
107static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
108static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
109
90struct kvm_stats_debugfs_item debugfs_entries[] = { 110struct kvm_stats_debugfs_item debugfs_entries[] = {
91 { "pf_fixed", VCPU_STAT(pf_fixed) }, 111 { "pf_fixed", VCPU_STAT(pf_fixed) },
92 { "pf_guest", VCPU_STAT(pf_guest) }, 112 { "pf_guest", VCPU_STAT(pf_guest) },
@@ -123,6 +143,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
123 { NULL } 143 { NULL }
124}; 144};
125 145
146static void kvm_on_user_return(struct user_return_notifier *urn)
147{
148 unsigned slot;
149 struct kvm_shared_msr *global;
150 struct kvm_shared_msrs *locals
151 = container_of(urn, struct kvm_shared_msrs, urn);
152
153 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
154 global = &shared_msrs_global.msrs[slot];
155 if (global->value != locals->current_value[slot]) {
156 wrmsrl(global->msr, global->value);
157 locals->current_value[slot] = global->value;
158 }
159 }
160 locals->registered = false;
161 user_return_notifier_unregister(urn);
162}
163
164void kvm_define_shared_msr(unsigned slot, u32 msr)
165{
166 int cpu;
167 u64 value;
168
169 if (slot >= shared_msrs_global.nr)
170 shared_msrs_global.nr = slot + 1;
171 shared_msrs_global.msrs[slot].msr = msr;
172 rdmsrl_safe(msr, &value);
173 shared_msrs_global.msrs[slot].value = value;
174 for_each_online_cpu(cpu)
175 per_cpu(shared_msrs, cpu).current_value[slot] = value;
176}
177EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
178
179static void kvm_shared_msr_cpu_online(void)
180{
181 unsigned i;
182 struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
183
184 for (i = 0; i < shared_msrs_global.nr; ++i)
185 locals->current_value[i] = shared_msrs_global.msrs[i].value;
186}
187
188void kvm_set_shared_msr(unsigned slot, u64 value)
189{
190 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
191
192 if (value == smsr->current_value[slot])
193 return;
194 smsr->current_value[slot] = value;
195 wrmsrl(shared_msrs_global.msrs[slot].msr, value);
196 if (!smsr->registered) {
197 smsr->urn.on_user_return = kvm_on_user_return;
198 user_return_notifier_register(&smsr->urn);
199 smsr->registered = true;
200 }
201}
202EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
203
126unsigned long segment_base(u16 selector) 204unsigned long segment_base(u16 selector)
127{ 205{
128 struct descriptor_table gdt; 206 struct descriptor_table gdt;
@@ -4815,6 +4893,9 @@ int kvm_arch_hardware_enable(void *garbage)
4815 int cpu = raw_smp_processor_id(); 4893 int cpu = raw_smp_processor_id();
4816 per_cpu(cpu_tsc_khz, cpu) = 0; 4894 per_cpu(cpu_tsc_khz, cpu) = 0;
4817 } 4895 }
4896
4897 kvm_shared_msr_cpu_online();
4898
4818 return kvm_x86_ops->hardware_enable(garbage); 4899 return kvm_x86_ops->hardware_enable(garbage);
4819} 4900}
4820 4901