diff options
author | Dexuan Cui <dexuan.cui@intel.com> | 2010-06-09 23:27:12 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:46:31 -0400 |
commit | 2acf923e38fb6a4ce0c57115decbb38d334902ac (patch) | |
tree | 51a0fba243c857a3bc373ab152c6374c804df224 /arch/x86/kvm/x86.c | |
parent | f495c6e5e8fdc972162241df5bdff5bcebb4dc33 (diff) |
KVM: VMX: Enable XSAVE/XRSTOR for guest
This patch enable guest to use XSAVE/XRSTOR instructions.
We assume that host_xcr0 would use all possible bits that OS supported.
And we loaded xcr0 in the same way we handled fpu - do it as late as we can.
Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 130 |
1 files changed, 123 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b08c0052e332..b5e644701cc1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -65,6 +65,7 @@ | |||
65 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | 65 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
66 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | 66 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ |
67 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | 67 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ |
68 | | X86_CR4_OSXSAVE \ | ||
68 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | 69 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) |
69 | 70 | ||
70 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) | 71 | #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) |
@@ -150,6 +151,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
150 | { NULL } | 151 | { NULL } |
151 | }; | 152 | }; |
152 | 153 | ||
154 | u64 __read_mostly host_xcr0; | ||
155 | |||
156 | static inline u32 bit(int bitno) | ||
157 | { | ||
158 | return 1 << (bitno & 31); | ||
159 | } | ||
160 | |||
153 | static void kvm_on_user_return(struct user_return_notifier *urn) | 161 | static void kvm_on_user_return(struct user_return_notifier *urn) |
154 | { | 162 | { |
155 | unsigned slot; | 163 | unsigned slot; |
@@ -474,6 +482,61 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | |||
474 | } | 482 | } |
475 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 483 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
476 | 484 | ||
485 | int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | ||
486 | { | ||
487 | u64 xcr0; | ||
488 | |||
489 | /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ | ||
490 | if (index != XCR_XFEATURE_ENABLED_MASK) | ||
491 | return 1; | ||
492 | xcr0 = xcr; | ||
493 | if (kvm_x86_ops->get_cpl(vcpu) != 0) | ||
494 | return 1; | ||
495 | if (!(xcr0 & XSTATE_FP)) | ||
496 | return 1; | ||
497 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) | ||
498 | return 1; | ||
499 | if (xcr0 & ~host_xcr0) | ||
500 | return 1; | ||
501 | vcpu->arch.xcr0 = xcr0; | ||
502 | vcpu->guest_xcr0_loaded = 0; | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | ||
507 | { | ||
508 | if (__kvm_set_xcr(vcpu, index, xcr)) { | ||
509 | kvm_inject_gp(vcpu, 0); | ||
510 | return 1; | ||
511 | } | ||
512 | return 0; | ||
513 | } | ||
514 | EXPORT_SYMBOL_GPL(kvm_set_xcr); | ||
515 | |||
516 | static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) | ||
517 | { | ||
518 | struct kvm_cpuid_entry2 *best; | ||
519 | |||
520 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | ||
521 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); | ||
522 | } | ||
523 | |||
524 | static void update_cpuid(struct kvm_vcpu *vcpu) | ||
525 | { | ||
526 | struct kvm_cpuid_entry2 *best; | ||
527 | |||
528 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | ||
529 | if (!best) | ||
530 | return; | ||
531 | |||
532 | /* Update OSXSAVE bit */ | ||
533 | if (cpu_has_xsave && best->function == 0x1) { | ||
534 | best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); | ||
535 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) | ||
536 | best->ecx |= bit(X86_FEATURE_OSXSAVE); | ||
537 | } | ||
538 | } | ||
539 | |||
477 | int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 540 | int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
478 | { | 541 | { |
479 | unsigned long old_cr4 = kvm_read_cr4(vcpu); | 542 | unsigned long old_cr4 = kvm_read_cr4(vcpu); |
@@ -482,6 +545,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
482 | if (cr4 & CR4_RESERVED_BITS) | 545 | if (cr4 & CR4_RESERVED_BITS) |
483 | return 1; | 546 | return 1; |
484 | 547 | ||
548 | if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) | ||
549 | return 1; | ||
550 | |||
485 | if (is_long_mode(vcpu)) { | 551 | if (is_long_mode(vcpu)) { |
486 | if (!(cr4 & X86_CR4_PAE)) | 552 | if (!(cr4 & X86_CR4_PAE)) |
487 | return 1; | 553 | return 1; |
@@ -498,6 +564,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
498 | if ((cr4 ^ old_cr4) & pdptr_bits) | 564 | if ((cr4 ^ old_cr4) & pdptr_bits) |
499 | kvm_mmu_reset_context(vcpu); | 565 | kvm_mmu_reset_context(vcpu); |
500 | 566 | ||
567 | if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) | ||
568 | update_cpuid(vcpu); | ||
569 | |||
501 | return 0; | 570 | return 0; |
502 | } | 571 | } |
503 | 572 | ||
@@ -666,11 +735,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | |||
666 | } | 735 | } |
667 | EXPORT_SYMBOL_GPL(kvm_get_dr); | 736 | EXPORT_SYMBOL_GPL(kvm_get_dr); |
668 | 737 | ||
669 | static inline u32 bit(int bitno) | ||
670 | { | ||
671 | return 1 << (bitno & 31); | ||
672 | } | ||
673 | |||
674 | /* | 738 | /* |
675 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS | 739 | * List of msr numbers which we expose to userspace through KVM_GET_MSRS |
676 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. | 740 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. |
@@ -1814,6 +1878,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
1814 | r = 0; | 1878 | r = 0; |
1815 | kvm_apic_set_version(vcpu); | 1879 | kvm_apic_set_version(vcpu); |
1816 | kvm_x86_ops->cpuid_update(vcpu); | 1880 | kvm_x86_ops->cpuid_update(vcpu); |
1881 | update_cpuid(vcpu); | ||
1817 | 1882 | ||
1818 | out_free: | 1883 | out_free: |
1819 | vfree(cpuid_entries); | 1884 | vfree(cpuid_entries); |
@@ -1837,6 +1902,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, | |||
1837 | vcpu->arch.cpuid_nent = cpuid->nent; | 1902 | vcpu->arch.cpuid_nent = cpuid->nent; |
1838 | kvm_apic_set_version(vcpu); | 1903 | kvm_apic_set_version(vcpu); |
1839 | kvm_x86_ops->cpuid_update(vcpu); | 1904 | kvm_x86_ops->cpuid_update(vcpu); |
1905 | update_cpuid(vcpu); | ||
1840 | return 0; | 1906 | return 0; |
1841 | 1907 | ||
1842 | out: | 1908 | out: |
@@ -1917,7 +1983,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1917 | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | | 1983 | 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | |
1918 | 0 /* Reserved, DCA */ | F(XMM4_1) | | 1984 | 0 /* Reserved, DCA */ | F(XMM4_1) | |
1919 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | | 1985 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
1920 | 0 /* Reserved, XSAVE, OSXSAVE */; | 1986 | 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */; |
1921 | /* cpuid 0x80000001.ecx */ | 1987 | /* cpuid 0x80000001.ecx */ |
1922 | const u32 kvm_supported_word6_x86_features = | 1988 | const u32 kvm_supported_word6_x86_features = |
1923 | F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | | 1989 | F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | |
@@ -1932,7 +1998,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1932 | 1998 | ||
1933 | switch (function) { | 1999 | switch (function) { |
1934 | case 0: | 2000 | case 0: |
1935 | entry->eax = min(entry->eax, (u32)0xb); | 2001 | entry->eax = min(entry->eax, (u32)0xd); |
1936 | break; | 2002 | break; |
1937 | case 1: | 2003 | case 1: |
1938 | entry->edx &= kvm_supported_word0_x86_features; | 2004 | entry->edx &= kvm_supported_word0_x86_features; |
@@ -1990,6 +2056,20 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1990 | } | 2056 | } |
1991 | break; | 2057 | break; |
1992 | } | 2058 | } |
2059 | case 0xd: { | ||
2060 | int i; | ||
2061 | |||
2062 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; | ||
2063 | for (i = 1; *nent < maxnent; ++i) { | ||
2064 | if (entry[i - 1].eax == 0 && i != 2) | ||
2065 | break; | ||
2066 | do_cpuid_1_ent(&entry[i], function, i); | ||
2067 | entry[i].flags |= | ||
2068 | KVM_CPUID_FLAG_SIGNIFCANT_INDEX; | ||
2069 | ++*nent; | ||
2070 | } | ||
2071 | break; | ||
2072 | } | ||
1993 | case KVM_CPUID_SIGNATURE: { | 2073 | case KVM_CPUID_SIGNATURE: { |
1994 | char signature[12] = "KVMKVMKVM\0\0"; | 2074 | char signature[12] = "KVMKVMKVM\0\0"; |
1995 | u32 *sigptr = (u32 *)signature; | 2075 | u32 *sigptr = (u32 *)signature; |
@@ -4125,6 +4205,9 @@ int kvm_arch_init(void *opaque) | |||
4125 | 4205 | ||
4126 | perf_register_guest_info_callbacks(&kvm_guest_cbs); | 4206 | perf_register_guest_info_callbacks(&kvm_guest_cbs); |
4127 | 4207 | ||
4208 | if (cpu_has_xsave) | ||
4209 | host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); | ||
4210 | |||
4128 | return 0; | 4211 | return 0; |
4129 | 4212 | ||
4130 | out: | 4213 | out: |
@@ -4523,6 +4606,25 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
4523 | } | 4606 | } |
4524 | } | 4607 | } |
4525 | 4608 | ||
4609 | static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) | ||
4610 | { | ||
4611 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && | ||
4612 | !vcpu->guest_xcr0_loaded) { | ||
4613 | /* kvm_set_xcr() also depends on this */ | ||
4614 | xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); | ||
4615 | vcpu->guest_xcr0_loaded = 1; | ||
4616 | } | ||
4617 | } | ||
4618 | |||
4619 | static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | ||
4620 | { | ||
4621 | if (vcpu->guest_xcr0_loaded) { | ||
4622 | if (vcpu->arch.xcr0 != host_xcr0) | ||
4623 | xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); | ||
4624 | vcpu->guest_xcr0_loaded = 0; | ||
4625 | } | ||
4626 | } | ||
4627 | |||
4526 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | 4628 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
4527 | { | 4629 | { |
4528 | int r; | 4630 | int r; |
@@ -4568,6 +4670,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
4568 | kvm_x86_ops->prepare_guest_switch(vcpu); | 4670 | kvm_x86_ops->prepare_guest_switch(vcpu); |
4569 | if (vcpu->fpu_active) | 4671 | if (vcpu->fpu_active) |
4570 | kvm_load_guest_fpu(vcpu); | 4672 | kvm_load_guest_fpu(vcpu); |
4673 | kvm_load_guest_xcr0(vcpu); | ||
4571 | 4674 | ||
4572 | atomic_set(&vcpu->guest_mode, 1); | 4675 | atomic_set(&vcpu->guest_mode, 1); |
4573 | smp_wmb(); | 4676 | smp_wmb(); |
@@ -5124,6 +5227,11 @@ int fx_init(struct kvm_vcpu *vcpu) | |||
5124 | 5227 | ||
5125 | fpu_finit(&vcpu->arch.guest_fpu); | 5228 | fpu_finit(&vcpu->arch.guest_fpu); |
5126 | 5229 | ||
5230 | /* | ||
5231 | * Ensure guest xcr0 is valid for loading | ||
5232 | */ | ||
5233 | vcpu->arch.xcr0 = XSTATE_FP; | ||
5234 | |||
5127 | vcpu->arch.cr0 |= X86_CR0_ET; | 5235 | vcpu->arch.cr0 |= X86_CR0_ET; |
5128 | 5236 | ||
5129 | return 0; | 5237 | return 0; |
@@ -5140,6 +5248,12 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | |||
5140 | if (vcpu->guest_fpu_loaded) | 5248 | if (vcpu->guest_fpu_loaded) |
5141 | return; | 5249 | return; |
5142 | 5250 | ||
5251 | /* | ||
5252 | * Restore all possible states in the guest, | ||
5253 | * and assume host would use all available bits. | ||
5254 | * Guest xcr0 would be loaded later. | ||
5255 | */ | ||
5256 | kvm_put_guest_xcr0(vcpu); | ||
5143 | vcpu->guest_fpu_loaded = 1; | 5257 | vcpu->guest_fpu_loaded = 1; |
5144 | unlazy_fpu(current); | 5258 | unlazy_fpu(current); |
5145 | fpu_restore_checking(&vcpu->arch.guest_fpu); | 5259 | fpu_restore_checking(&vcpu->arch.guest_fpu); |
@@ -5148,6 +5262,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | |||
5148 | 5262 | ||
5149 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | 5263 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) |
5150 | { | 5264 | { |
5265 | kvm_put_guest_xcr0(vcpu); | ||
5266 | |||
5151 | if (!vcpu->guest_fpu_loaded) | 5267 | if (!vcpu->guest_fpu_loaded) |
5152 | return; | 5268 | return; |
5153 | 5269 | ||