diff options
author | Alexander Graf <agraf@suse.de> | 2008-11-25 14:17:11 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:48 -0400 |
commit | d80174745ba3938bc6abb8f95ed670ac0631a182 (patch) | |
tree | 3f6c3ca1f06f4737dea4857f33e9182d9988d57c /arch/x86/kvm/x86.c | |
parent | 236de05553a7ef8f6940de8686ae9bf1272cd2cf (diff) |
KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set
Userspace has to tell the kernel module somehow that nested SVM should be used.
The easiest way that doesn't break anything I could think of is to implement
if (cpuid & svm)
allow write to efer
else
deny write to efer
Old userspaces mask the SVM capability bit, so they don't break.
In order to find out that the SVM capability is set, I had to split the
kvm_emulate_cpuid into a finding and an emulating part.
(introduced in v6)
Acked-by: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 58 |
1 files changed, 42 insertions, 16 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 99165a961f08..b5e9932e0f62 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -69,6 +69,8 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; | |||
69 | 69 | ||
70 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, | 70 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, |
71 | struct kvm_cpuid_entry2 __user *entries); | 71 | struct kvm_cpuid_entry2 __user *entries); |
72 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, | ||
73 | u32 function, u32 index); | ||
72 | 74 | ||
73 | struct kvm_x86_ops *kvm_x86_ops; | 75 | struct kvm_x86_ops *kvm_x86_ops; |
74 | EXPORT_SYMBOL_GPL(kvm_x86_ops); | 76 | EXPORT_SYMBOL_GPL(kvm_x86_ops); |
@@ -173,6 +175,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |||
173 | u32 error_code) | 175 | u32 error_code) |
174 | { | 176 | { |
175 | ++vcpu->stat.pf_guest; | 177 | ++vcpu->stat.pf_guest; |
178 | |||
176 | if (vcpu->arch.exception.pending) { | 179 | if (vcpu->arch.exception.pending) { |
177 | if (vcpu->arch.exception.nr == PF_VECTOR) { | 180 | if (vcpu->arch.exception.nr == PF_VECTOR) { |
178 | printk(KERN_DEBUG "kvm: inject_page_fault:" | 181 | printk(KERN_DEBUG "kvm: inject_page_fault:" |
@@ -442,6 +445,11 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) | |||
442 | } | 445 | } |
443 | EXPORT_SYMBOL_GPL(kvm_get_cr8); | 446 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
444 | 447 | ||
448 | static inline u32 bit(int bitno) | ||
449 | { | ||
450 | return 1 << (bitno & 31); | ||
451 | } | ||
452 | |||
445 | /* | 453 | /* |
446 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS | 454 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS |
447 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. | 455 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. |
@@ -481,6 +489,17 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
481 | return; | 489 | return; |
482 | } | 490 | } |
483 | 491 | ||
492 | if (efer & EFER_SVME) { | ||
493 | struct kvm_cpuid_entry2 *feat; | ||
494 | |||
495 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | ||
496 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | ||
497 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
498 | kvm_inject_gp(vcpu, 0); | ||
499 | return; | ||
500 | } | ||
501 | } | ||
502 | |||
484 | kvm_x86_ops->set_efer(vcpu, efer); | 503 | kvm_x86_ops->set_efer(vcpu, efer); |
485 | 504 | ||
486 | efer &= ~EFER_LMA; | 505 | efer &= ~EFER_LMA; |
@@ -1181,11 +1200,6 @@ out: | |||
1181 | return r; | 1200 | return r; |
1182 | } | 1201 | } |
1183 | 1202 | ||
1184 | static inline u32 bit(int bitno) | ||
1185 | { | ||
1186 | return 1 << (bitno & 31); | ||
1187 | } | ||
1188 | |||
1189 | static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, | 1203 | static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
1190 | u32 index) | 1204 | u32 index) |
1191 | { | 1205 | { |
@@ -1228,7 +1242,8 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1228 | const u32 kvm_supported_word3_x86_features = | 1242 | const u32 kvm_supported_word3_x86_features = |
1229 | bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16); | 1243 | bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16); |
1230 | const u32 kvm_supported_word6_x86_features = | 1244 | const u32 kvm_supported_word6_x86_features = |
1231 | bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY); | 1245 | bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) | |
1246 | bit(X86_FEATURE_SVM); | ||
1232 | 1247 | ||
1233 | /* all func 2 cpuid_count() should be called on the same cpu */ | 1248 | /* all func 2 cpuid_count() should be called on the same cpu */ |
1234 | get_cpu(); | 1249 | get_cpu(); |
@@ -2832,20 +2847,15 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, | |||
2832 | return 1; | 2847 | return 1; |
2833 | } | 2848 | } |
2834 | 2849 | ||
2835 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | 2850 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
2851 | u32 function, u32 index) | ||
2836 | { | 2852 | { |
2837 | int i; | 2853 | int i; |
2838 | u32 function, index; | 2854 | struct kvm_cpuid_entry2 *best = NULL; |
2839 | struct kvm_cpuid_entry2 *e, *best; | ||
2840 | 2855 | ||
2841 | function = kvm_register_read(vcpu, VCPU_REGS_RAX); | ||
2842 | index = kvm_register_read(vcpu, VCPU_REGS_RCX); | ||
2843 | kvm_register_write(vcpu, VCPU_REGS_RAX, 0); | ||
2844 | kvm_register_write(vcpu, VCPU_REGS_RBX, 0); | ||
2845 | kvm_register_write(vcpu, VCPU_REGS_RCX, 0); | ||
2846 | kvm_register_write(vcpu, VCPU_REGS_RDX, 0); | ||
2847 | best = NULL; | ||
2848 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { | 2856 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { |
2857 | struct kvm_cpuid_entry2 *e; | ||
2858 | |||
2849 | e = &vcpu->arch.cpuid_entries[i]; | 2859 | e = &vcpu->arch.cpuid_entries[i]; |
2850 | if (is_matching_cpuid_entry(e, function, index)) { | 2860 | if (is_matching_cpuid_entry(e, function, index)) { |
2851 | if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) | 2861 | if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) |
@@ -2860,6 +2870,22 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
2860 | if (!best || e->function > best->function) | 2870 | if (!best || e->function > best->function) |
2861 | best = e; | 2871 | best = e; |
2862 | } | 2872 | } |
2873 | |||
2874 | return best; | ||
2875 | } | ||
2876 | |||
2877 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | ||
2878 | { | ||
2879 | u32 function, index; | ||
2880 | struct kvm_cpuid_entry2 *best; | ||
2881 | |||
2882 | function = kvm_register_read(vcpu, VCPU_REGS_RAX); | ||
2883 | index = kvm_register_read(vcpu, VCPU_REGS_RCX); | ||
2884 | kvm_register_write(vcpu, VCPU_REGS_RAX, 0); | ||
2885 | kvm_register_write(vcpu, VCPU_REGS_RBX, 0); | ||
2886 | kvm_register_write(vcpu, VCPU_REGS_RCX, 0); | ||
2887 | kvm_register_write(vcpu, VCPU_REGS_RDX, 0); | ||
2888 | best = kvm_find_cpuid_entry(vcpu, function, index); | ||
2863 | if (best) { | 2889 | if (best) { |
2864 | kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); | 2890 | kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); |
2865 | kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); | 2891 | kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); |