aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-21 23:15:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-21 23:15:16 -0400
commitf0d8690ad443069b26df43a1be09c0f14a928eb9 (patch)
tree74a19a907b64ad441e63373842d268efa6f99d72
parent2f8126e3964261db3184d95bff1ae801e61239e9 (diff)
parentc447e76b4cabb49ddae8e49c5758f031f35d55fb (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "This includes a fix for two oopses, one on PPC and on x86. The rest is fixes for bugs with newer Intel processors" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm/fpu: Enable eager restore kvm FPU for MPX Revert "KVM: x86: drop fpu_activate hook" kvm: fix crash in kvm_vcpu_reload_apic_access_page KVM: MMU: fix SMAP virtualization KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages KVM: MMU: fix smap permission check KVM: PPC: Book3S HV: Fix list traversal in error case
-rw-r--r--Documentation/virtual/kvm/mmu.txt18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c5
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/paging_tmpl.h7
-rw-r--r--arch/x86/kvm/svm.c1
-rw-r--r--arch/x86/kvm/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c26
11 files changed, 74 insertions, 19 deletions
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index 53838d9c6295..c59bd9bc41ef 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -169,6 +169,10 @@ Shadow pages contain the following information:
169 Contains the value of cr4.smep && !cr0.wp for which the page is valid 169 Contains the value of cr4.smep && !cr0.wp for which the page is valid
170 (pages for which this is true are different from other pages; see the 170 (pages for which this is true are different from other pages; see the
171 treatment of cr0.wp=0 below). 171 treatment of cr0.wp=0 below).
172 role.smap_andnot_wp:
173 Contains the value of cr4.smap && !cr0.wp for which the page is valid
174 (pages for which this is true are different from other pages; see the
175 treatment of cr0.wp=0 below).
172 gfn: 176 gfn:
173 Either the guest page table containing the translations shadowed by this 177 Either the guest page table containing the translations shadowed by this
174 page, or the base page frame for linear translations. See role.direct. 178 page, or the base page frame for linear translations. See role.direct.
@@ -344,10 +348,16 @@ on fault type:
344 348
345(user write faults generate a #PF) 349(user write faults generate a #PF)
346 350
347In the first case there is an additional complication if CR4.SMEP is 351In the first case there are two additional complications:
348enabled: since we've turned the page into a kernel page, the kernel may now 352- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
349execute it. We handle this by also setting spte.nx. If we get a user 353 the kernel may now execute it. We handle this by also setting spte.nx.
350fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back. 354 If we get a user fetch or read fault, we'll change spte.u=1 and
355 spte.nx=gpte.nx back.
356- if CR4.SMAP is disabled: since the page has been changed to a kernel
357 page, it can not be reused when CR4.SMAP is enabled. We set
358 CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
359 here we do not care the case that CR4.SMAP is enabled since KVM will
360 directly inject #PF to guest due to failed permission check.
351 361
352To prevent an spte that was converted into a kernel page with cr0.wp=0 362To prevent an spte that was converted into a kernel page with cr0.wp=0
353from being written by the kernel after cr0.wp has changed to 1, we make 363from being written by the kernel after cr0.wp has changed to 1, we make
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 48d3c5d2ecc9..df81caab7383 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
1952 */ 1952 */
1953static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) 1953static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1954{ 1954{
1955 struct kvm_vcpu *vcpu; 1955 struct kvm_vcpu *vcpu, *vnext;
1956 int i; 1956 int i;
1957 int srcu_idx; 1957 int srcu_idx;
1958 1958
@@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1982 */ 1982 */
1983 if ((threads_per_core > 1) && 1983 if ((threads_per_core > 1) &&
1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { 1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1985 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1985 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1986 arch.run_list) {
1986 vcpu->arch.ret = -EBUSY; 1987 vcpu->arch.ret = -EBUSY;
1987 kvmppc_remove_runnable(vc, vcpu); 1988 kvmppc_remove_runnable(vc, vcpu);
1988 wake_up(&vcpu->arch.cpu_run); 1989 wake_up(&vcpu->arch.cpu_run);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dea2e7e962e3..f4a555beef19 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
207 unsigned nxe:1; 207 unsigned nxe:1;
208 unsigned cr0_wp:1; 208 unsigned cr0_wp:1;
209 unsigned smep_andnot_wp:1; 209 unsigned smep_andnot_wp:1;
210 unsigned smap_andnot_wp:1;
210 }; 211 };
211}; 212};
212 213
@@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
400 struct kvm_mmu_memory_cache mmu_page_header_cache; 401 struct kvm_mmu_memory_cache mmu_page_header_cache;
401 402
402 struct fpu guest_fpu; 403 struct fpu guest_fpu;
404 bool eager_fpu;
403 u64 xcr0; 405 u64 xcr0;
404 u64 guest_supported_xcr0; 406 u64 guest_supported_xcr0;
405 u32 guest_xstate_size; 407 u32 guest_xstate_size;
@@ -743,6 +745,7 @@ struct kvm_x86_ops {
743 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 745 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
744 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 746 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
745 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 747 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
748 void (*fpu_activate)(struct kvm_vcpu *vcpu);
746 void (*fpu_deactivate)(struct kvm_vcpu *vcpu); 749 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
747 750
748 void (*tlb_flush)(struct kvm_vcpu *vcpu); 751 void (*tlb_flush)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 59b69f6a2844..1d08ad3582d0 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,6 +16,8 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
20#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
19#include <asm/user.h> 21#include <asm/user.h>
20#include <asm/xsave.h> 22#include <asm/xsave.h>
21#include "cpuid.h" 23#include "cpuid.h"
@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
95 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 97 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
96 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
97 99
100 vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
101
98 /* 102 /*
99 * The existing code assumes virtual address is 48-bit in the canonical 103 * The existing code assumes virtual address is 48-bit in the canonical
100 * address checks; exit if it is ever changed. 104 * address checks; exit if it is ever changed.
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c3b1ad9fca81..496b3695d3d3 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
117 best = kvm_find_cpuid_entry(vcpu, 7, 0); 117 best = kvm_find_cpuid_entry(vcpu, 7, 0);
118 return best && (best->ebx & bit(X86_FEATURE_RTM)); 118 return best && (best->ebx & bit(X86_FEATURE_RTM));
119} 119}
120
121static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
122{
123 struct kvm_cpuid_entry2 *best;
124
125 best = kvm_find_cpuid_entry(vcpu, 7, 0);
126 return best && (best->ebx & bit(X86_FEATURE_MPX));
127}
120#endif 128#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d43867c33bc4..44a7d2515497 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3736 } 3736 }
3737} 3737}
3738 3738
3739void update_permission_bitmask(struct kvm_vcpu *vcpu, 3739static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3740 struct kvm_mmu *mmu, bool ept) 3740 struct kvm_mmu *mmu, bool ept)
3741{ 3741{
3742 unsigned bit, byte, pfec; 3742 unsigned bit, byte, pfec;
3743 u8 map; 3743 u8 map;
@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3918void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) 3918void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3919{ 3919{
3920 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3920 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3921 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3921 struct kvm_mmu *context = &vcpu->arch.mmu; 3922 struct kvm_mmu *context = &vcpu->arch.mmu;
3922 3923
3923 MMU_WARN_ON(VALID_PAGE(context->root_hpa)); 3924 MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
3936 context->base_role.cr0_wp = is_write_protection(vcpu); 3937 context->base_role.cr0_wp = is_write_protection(vcpu);
3937 context->base_role.smep_andnot_wp 3938 context->base_role.smep_andnot_wp
3938 = smep && !is_write_protection(vcpu); 3939 = smep && !is_write_protection(vcpu);
3940 context->base_role.smap_andnot_wp
3941 = smap && !is_write_protection(vcpu);
3939} 3942}
3940EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3943EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3941 3944
@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4207 const u8 *new, int bytes) 4210 const u8 *new, int bytes)
4208{ 4211{
4209 gfn_t gfn = gpa >> PAGE_SHIFT; 4212 gfn_t gfn = gpa >> PAGE_SHIFT;
4210 union kvm_mmu_page_role mask = { .word = 0 };
4211 struct kvm_mmu_page *sp; 4213 struct kvm_mmu_page *sp;
4212 LIST_HEAD(invalid_list); 4214 LIST_HEAD(invalid_list);
4213 u64 entry, gentry, *spte; 4215 u64 entry, gentry, *spte;
4214 int npte; 4216 int npte;
4215 bool remote_flush, local_flush, zap_page; 4217 bool remote_flush, local_flush, zap_page;
4218 union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
4219 .cr0_wp = 1,
4220 .cr4_pae = 1,
4221 .nxe = 1,
4222 .smep_andnot_wp = 1,
4223 .smap_andnot_wp = 1,
4224 };
4216 4225
4217 /* 4226 /*
4218 * If we don't have indirect shadow pages, it means no page is 4227 * If we don't have indirect shadow pages, it means no page is
@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4238 ++vcpu->kvm->stat.mmu_pte_write; 4247 ++vcpu->kvm->stat.mmu_pte_write;
4239 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 4248 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4240 4249
4241 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
4242 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 4250 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4243 if (detect_write_misaligned(sp, gpa, bytes) || 4251 if (detect_write_misaligned(sp, gpa, bytes) ||
4244 detect_write_flooding(sp)) { 4252 detect_write_flooding(sp)) {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c7d65637c851..0ada65ecddcf 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,8 +71,6 @@ enum {
71int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 71int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
72void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 72void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
73void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 73void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
74void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
75 bool ept);
76 74
77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 75static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
78{ 76{
@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
166 int index = (pfec >> 1) + 164 int index = (pfec >> 1) +
167 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); 165 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
168 166
167 WARN_ON(pfec & PFERR_RSVD_MASK);
168
169 return (mmu->permissions[index] >> pte_access) & 1; 169 return (mmu->permissions[index] >> pte_access) & 1;
170} 170}
171 171
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index fd49c867b25a..6e6d115fe9b5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
718 mmu_is_nested(vcpu)); 718 mmu_is_nested(vcpu));
719 if (likely(r != RET_MMIO_PF_INVALID)) 719 if (likely(r != RET_MMIO_PF_INVALID))
720 return r; 720 return r;
721
722 /*
723 * page fault with PFEC.RSVD = 1 is caused by shadow
724 * page fault, should not be used to walk guest page
725 * table.
726 */
727 error_code &= ~PFERR_RSVD_MASK;
721 }; 728 };
722 729
723 r = mmu_topup_memory_caches(vcpu); 730 r = mmu_topup_memory_caches(vcpu);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce741b8650f6..9afa233b5482 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4381 .cache_reg = svm_cache_reg, 4381 .cache_reg = svm_cache_reg,
4382 .get_rflags = svm_get_rflags, 4382 .get_rflags = svm_get_rflags,
4383 .set_rflags = svm_set_rflags, 4383 .set_rflags = svm_set_rflags,
4384 .fpu_activate = svm_fpu_activate,
4384 .fpu_deactivate = svm_fpu_deactivate, 4385 .fpu_deactivate = svm_fpu_deactivate,
4385 4386
4386 .tlb_flush = svm_flush_tlb, 4387 .tlb_flush = svm_flush_tlb,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b61687bd79..2d73807f0d31 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10185,6 +10185,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
10185 .cache_reg = vmx_cache_reg, 10185 .cache_reg = vmx_cache_reg,
10186 .get_rflags = vmx_get_rflags, 10186 .get_rflags = vmx_get_rflags,
10187 .set_rflags = vmx_set_rflags, 10187 .set_rflags = vmx_set_rflags,
10188 .fpu_activate = vmx_fpu_activate,
10188 .fpu_deactivate = vmx_fpu_deactivate, 10189 .fpu_deactivate = vmx_fpu_deactivate,
10189 10190
10190 .tlb_flush = vmx_flush_tlb, 10191 .tlb_flush = vmx_flush_tlb,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c73efcd03e29..ea306adbbc13 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
702int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 702int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
703{ 703{
704 unsigned long old_cr4 = kvm_read_cr4(vcpu); 704 unsigned long old_cr4 = kvm_read_cr4(vcpu);
705 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | 705 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
706 X86_CR4_PAE | X86_CR4_SMEP; 706 X86_CR4_SMEP | X86_CR4_SMAP;
707
707 if (cr4 & CR4_RESERVED_BITS) 708 if (cr4 & CR4_RESERVED_BITS)
708 return 1; 709 return 1;
709 710
@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
744 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 745 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
745 kvm_mmu_reset_context(vcpu); 746 kvm_mmu_reset_context(vcpu);
746 747
747 if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
748 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
749
750 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 748 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
751 kvm_update_cpuid(vcpu); 749 kvm_update_cpuid(vcpu);
752 750
@@ -6197,6 +6195,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6197 return; 6195 return;
6198 6196
6199 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6197 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6198 if (is_error_page(page))
6199 return;
6200 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); 6200 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6201 6201
6202 /* 6202 /*
@@ -7060,7 +7060,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7060 fpu_save_init(&vcpu->arch.guest_fpu); 7060 fpu_save_init(&vcpu->arch.guest_fpu);
7061 __kernel_fpu_end(); 7061 __kernel_fpu_end();
7062 ++vcpu->stat.fpu_reload; 7062 ++vcpu->stat.fpu_reload;
7063 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); 7063 if (!vcpu->arch.eager_fpu)
7064 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
7065
7064 trace_kvm_fpu(0); 7066 trace_kvm_fpu(0);
7065} 7067}
7066 7068
@@ -7076,11 +7078,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7076struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 7078struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7077 unsigned int id) 7079 unsigned int id)
7078{ 7080{
7081 struct kvm_vcpu *vcpu;
7082
7079 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 7083 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7080 printk_once(KERN_WARNING 7084 printk_once(KERN_WARNING
7081 "kvm: SMP vm created on host with unstable TSC; " 7085 "kvm: SMP vm created on host with unstable TSC; "
7082 "guest TSC will not be reliable\n"); 7086 "guest TSC will not be reliable\n");
7083 return kvm_x86_ops->vcpu_create(kvm, id); 7087
7088 vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7089
7090 /*
7091 * Activate fpu unconditionally in case the guest needs eager FPU. It will be
7092 * deactivated soon if it doesn't.
7093 */
7094 kvm_x86_ops->fpu_activate(vcpu);
7095 return vcpu;
7084} 7096}
7085 7097
7086int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 7098int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)