diff options
Diffstat (limited to 'arch/x86/kvm')
| -rw-r--r-- | arch/x86/kvm/Kconfig | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 81 |
2 files changed, 82 insertions, 0 deletions
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index b84e571f4175..4cd498332466 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
| @@ -28,6 +28,7 @@ config KVM | |||
| 28 | select HAVE_KVM_IRQCHIP | 28 | select HAVE_KVM_IRQCHIP |
| 29 | select HAVE_KVM_EVENTFD | 29 | select HAVE_KVM_EVENTFD |
| 30 | select KVM_APIC_ARCHITECTURE | 30 | select KVM_APIC_ARCHITECTURE |
| 31 | select USER_RETURN_NOTIFIER | ||
| 31 | ---help--- | 32 | ---help--- |
| 32 | Support hosting fully virtualized guest machines using hardware | 33 | Support hosting fully virtualized guest machines using hardware |
| 33 | virtualization extensions. You will need a fairly recent | 34 | virtualization extensions. You will need a fairly recent |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e16cdc9ec0c1..58c5cddf363d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
| 38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
| 39 | #include <linux/cpufreq.h> | 39 | #include <linux/cpufreq.h> |
| 40 | #include <linux/user-return-notifier.h> | ||
| 40 | #include <trace/events/kvm.h> | 41 | #include <trace/events/kvm.h> |
| 41 | #undef TRACE_INCLUDE_FILE | 42 | #undef TRACE_INCLUDE_FILE |
| 42 | #define CREATE_TRACE_POINTS | 43 | #define CREATE_TRACE_POINTS |
| @@ -87,6 +88,25 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); | |||
| 87 | int ignore_msrs = 0; | 88 | int ignore_msrs = 0; |
| 88 | module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); | 89 | module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR); |
| 89 | 90 | ||
| 91 | #define KVM_NR_SHARED_MSRS 16 | ||
| 92 | |||
| 93 | struct kvm_shared_msrs_global { | ||
| 94 | int nr; | ||
| 95 | struct kvm_shared_msr { | ||
| 96 | u32 msr; | ||
| 97 | u64 value; | ||
| 98 | } msrs[KVM_NR_SHARED_MSRS]; | ||
| 99 | }; | ||
| 100 | |||
| 101 | struct kvm_shared_msrs { | ||
| 102 | struct user_return_notifier urn; | ||
| 103 | bool registered; | ||
| 104 | u64 current_value[KVM_NR_SHARED_MSRS]; | ||
| 105 | }; | ||
| 106 | |||
| 107 | static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; | ||
| 108 | static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); | ||
| 109 | |||
| 90 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 110 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
| 91 | { "pf_fixed", VCPU_STAT(pf_fixed) }, | 111 | { "pf_fixed", VCPU_STAT(pf_fixed) }, |
| 92 | { "pf_guest", VCPU_STAT(pf_guest) }, | 112 | { "pf_guest", VCPU_STAT(pf_guest) }, |
| @@ -123,6 +143,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
| 123 | { NULL } | 143 | { NULL } |
| 124 | }; | 144 | }; |
| 125 | 145 | ||
| 146 | static void kvm_on_user_return(struct user_return_notifier *urn) | ||
| 147 | { | ||
| 148 | unsigned slot; | ||
| 149 | struct kvm_shared_msr *global; | ||
| 150 | struct kvm_shared_msrs *locals | ||
| 151 | = container_of(urn, struct kvm_shared_msrs, urn); | ||
| 152 | |||
| 153 | for (slot = 0; slot < shared_msrs_global.nr; ++slot) { | ||
| 154 | global = &shared_msrs_global.msrs[slot]; | ||
| 155 | if (global->value != locals->current_value[slot]) { | ||
| 156 | wrmsrl(global->msr, global->value); | ||
| 157 | locals->current_value[slot] = global->value; | ||
| 158 | } | ||
| 159 | } | ||
| 160 | locals->registered = false; | ||
| 161 | user_return_notifier_unregister(urn); | ||
| 162 | } | ||
| 163 | |||
| 164 | void kvm_define_shared_msr(unsigned slot, u32 msr) | ||
| 165 | { | ||
| 166 | int cpu; | ||
| 167 | u64 value; | ||
| 168 | |||
| 169 | if (slot >= shared_msrs_global.nr) | ||
| 170 | shared_msrs_global.nr = slot + 1; | ||
| 171 | shared_msrs_global.msrs[slot].msr = msr; | ||
| 172 | rdmsrl_safe(msr, &value); | ||
| 173 | shared_msrs_global.msrs[slot].value = value; | ||
| 174 | for_each_online_cpu(cpu) | ||
| 175 | per_cpu(shared_msrs, cpu).current_value[slot] = value; | ||
| 176 | } | ||
| 177 | EXPORT_SYMBOL_GPL(kvm_define_shared_msr); | ||
| 178 | |||
| 179 | static void kvm_shared_msr_cpu_online(void) | ||
| 180 | { | ||
| 181 | unsigned i; | ||
| 182 | struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs); | ||
| 183 | |||
| 184 | for (i = 0; i < shared_msrs_global.nr; ++i) | ||
| 185 | locals->current_value[i] = shared_msrs_global.msrs[i].value; | ||
| 186 | } | ||
| 187 | |||
| 188 | void kvm_set_shared_msr(unsigned slot, u64 value) | ||
| 189 | { | ||
| 190 | struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); | ||
| 191 | |||
| 192 | if (value == smsr->current_value[slot]) | ||
| 193 | return; | ||
| 194 | smsr->current_value[slot] = value; | ||
| 195 | wrmsrl(shared_msrs_global.msrs[slot].msr, value); | ||
| 196 | if (!smsr->registered) { | ||
| 197 | smsr->urn.on_user_return = kvm_on_user_return; | ||
| 198 | user_return_notifier_register(&smsr->urn); | ||
| 199 | smsr->registered = true; | ||
| 200 | } | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL_GPL(kvm_set_shared_msr); | ||
| 203 | |||
| 126 | unsigned long segment_base(u16 selector) | 204 | unsigned long segment_base(u16 selector) |
| 127 | { | 205 | { |
| 128 | struct descriptor_table gdt; | 206 | struct descriptor_table gdt; |
| @@ -4815,6 +4893,9 @@ int kvm_arch_hardware_enable(void *garbage) | |||
| 4815 | int cpu = raw_smp_processor_id(); | 4893 | int cpu = raw_smp_processor_id(); |
| 4816 | per_cpu(cpu_tsc_khz, cpu) = 0; | 4894 | per_cpu(cpu_tsc_khz, cpu) = 0; |
| 4817 | } | 4895 | } |
| 4896 | |||
| 4897 | kvm_shared_msr_cpu_online(); | ||
| 4898 | |||
| 4818 | return kvm_x86_ops->hardware_enable(garbage); | 4899 | return kvm_x86_ops->hardware_enable(garbage); |
| 4819 | } | 4900 | } |
| 4820 | 4901 | ||
