diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4c2096f30d90..54ce77582eda 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/desc.h> | 52 | #include <asm/desc.h> |
53 | #include <asm/mtrr.h> | 53 | #include <asm/mtrr.h> |
54 | #include <asm/mce.h> | 54 | #include <asm/mce.h> |
55 | #include <asm/i387.h> | ||
55 | 56 | ||
56 | #define MAX_IO_MSRS 256 | 57 | #define MAX_IO_MSRS 256 |
57 | #define CR0_RESERVED_BITS \ | 58 | #define CR0_RESERVED_BITS \ |
@@ -5134,21 +5135,10 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
5134 | { | 5135 | { |
5135 | unsigned after_mxcsr_mask; | 5136 | unsigned after_mxcsr_mask; |
5136 | 5137 | ||
5137 | /* | ||
5138 | * Touch the fpu the first time in non atomic context as if | ||
5139 | * this is the first fpu instruction the exception handler | ||
5140 | * will fire before the instruction returns and it'll have to | ||
5141 | * allocate ram with GFP_KERNEL. | ||
5142 | */ | ||
5143 | if (!used_math()) | ||
5144 | kvm_fx_save(&vcpu->arch.host_fx_image); | ||
5145 | |||
5146 | /* Initialize guest FPU by resetting ours and saving into guest's */ | 5138 | /* Initialize guest FPU by resetting ours and saving into guest's */ |
5147 | preempt_disable(); | 5139 | preempt_disable(); |
5148 | kvm_fx_save(&vcpu->arch.host_fx_image); | ||
5149 | kvm_fx_finit(); | 5140 | kvm_fx_finit(); |
5150 | kvm_fx_save(&vcpu->arch.guest_fx_image); | 5141 | kvm_fx_save(&vcpu->arch.guest_fx_image); |
5151 | kvm_fx_restore(&vcpu->arch.host_fx_image); | ||
5152 | preempt_enable(); | 5142 | preempt_enable(); |
5153 | 5143 | ||
5154 | vcpu->arch.cr0 |= X86_CR0_ET; | 5144 | vcpu->arch.cr0 |= X86_CR0_ET; |
@@ -5165,7 +5155,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | |||
5165 | return; | 5155 | return; |
5166 | 5156 | ||
5167 | vcpu->guest_fpu_loaded = 1; | 5157 | vcpu->guest_fpu_loaded = 1; |
5168 | kvm_fx_save(&vcpu->arch.host_fx_image); | 5158 | unlazy_fpu(current); |
5169 | kvm_fx_restore(&vcpu->arch.guest_fx_image); | 5159 | kvm_fx_restore(&vcpu->arch.guest_fx_image); |
5170 | trace_kvm_fpu(1); | 5160 | trace_kvm_fpu(1); |
5171 | } | 5161 | } |
@@ -5177,7 +5167,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | |||
5177 | 5167 | ||
5178 | vcpu->guest_fpu_loaded = 0; | 5168 | vcpu->guest_fpu_loaded = 0; |
5179 | kvm_fx_save(&vcpu->arch.guest_fx_image); | 5169 | kvm_fx_save(&vcpu->arch.guest_fx_image); |
5180 | kvm_fx_restore(&vcpu->arch.host_fx_image); | ||
5181 | ++vcpu->stat.fpu_reload; | 5170 | ++vcpu->stat.fpu_reload; |
5182 | set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); | 5171 | set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); |
5183 | trace_kvm_fpu(0); | 5172 | trace_kvm_fpu(0); |
@@ -5203,9 +5192,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
5203 | { | 5192 | { |
5204 | int r; | 5193 | int r; |
5205 | 5194 | ||
5206 | /* We do fxsave: this must be aligned. */ | ||
5207 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); | ||
5208 | |||
5209 | vcpu->arch.mtrr_state.have_fixed = 1; | 5195 | vcpu->arch.mtrr_state.have_fixed = 1; |
5210 | vcpu_load(vcpu); | 5196 | vcpu_load(vcpu); |
5211 | r = kvm_arch_vcpu_reset(vcpu); | 5197 | r = kvm_arch_vcpu_reset(vcpu); |