aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-07-30 07:13:43 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:21 -0400
commitb114b0804df7131cb6764b948c1c530c834fa3c0 (patch)
tree4e5ced9ed1cdb673d27b26b166cd0bd7c845d5b9 /drivers/kvm/kvm_main.c
parentc16f862d0257349607b7a9be7b4a4b7ed419a3ab (diff)
KVM: Use alignment properties of vcpu to simplify FPU ops
Now we use a kmem cache for allocating vcpus, we can get the 16-byte alignment required by fxsave & fxrstor instructions, and avoid manually aligning the buffer. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c45
1 files changed, 17 insertions, 28 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4166a08ce500..bfb1b6de0584 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -154,8 +154,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
154 return; 154 return;
155 155
156 vcpu->guest_fpu_loaded = 1; 156 vcpu->guest_fpu_loaded = 1;
157 fx_save(vcpu->host_fx_image); 157 fx_save(&vcpu->host_fx_image);
158 fx_restore(vcpu->guest_fx_image); 158 fx_restore(&vcpu->guest_fx_image);
159} 159}
160EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); 160EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
161 161
@@ -165,8 +165,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
165 return; 165 return;
166 166
167 vcpu->guest_fpu_loaded = 0; 167 vcpu->guest_fpu_loaded = 0;
168 fx_save(vcpu->guest_fx_image); 168 fx_save(&vcpu->guest_fx_image);
169 fx_restore(vcpu->host_fx_image); 169 fx_restore(&vcpu->host_fx_image);
170} 170}
171EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); 171EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
172 172
@@ -262,10 +262,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
262 } 262 }
263 vcpu->pio_data = page_address(page); 263 vcpu->pio_data = page_address(page);
264 264
265 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
266 FX_IMAGE_ALIGN);
267 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
268
269 r = kvm_mmu_create(vcpu); 265 r = kvm_mmu_create(vcpu);
270 if (r < 0) 266 if (r < 0)
271 goto fail_free_pio_data; 267 goto fail_free_pio_data;
@@ -615,30 +611,20 @@ EXPORT_SYMBOL_GPL(set_cr8);
615 611
616void fx_init(struct kvm_vcpu *vcpu) 612void fx_init(struct kvm_vcpu *vcpu)
617{ 613{
618 struct __attribute__ ((__packed__)) fx_image_s { 614 unsigned after_mxcsr_mask;
619 u16 control; //fcw
620 u16 status; //fsw
621 u16 tag; // ftw
622 u16 opcode; //fop
623 u64 ip; // fpu ip
624 u64 operand;// fpu dp
625 u32 mxcsr;
626 u32 mxcsr_mask;
627
628 } *fx_image;
629 615
630 /* Initialize guest FPU by resetting ours and saving into guest's */ 616 /* Initialize guest FPU by resetting ours and saving into guest's */
631 preempt_disable(); 617 preempt_disable();
632 fx_save(vcpu->host_fx_image); 618 fx_save(&vcpu->host_fx_image);
633 fpu_init(); 619 fpu_init();
634 fx_save(vcpu->guest_fx_image); 620 fx_save(&vcpu->guest_fx_image);
635 fx_restore(vcpu->host_fx_image); 621 fx_restore(&vcpu->host_fx_image);
636 preempt_enable(); 622 preempt_enable();
637 623
638 fx_image = (struct fx_image_s *)vcpu->guest_fx_image; 624 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
639 fx_image->mxcsr = 0x1f80; 625 vcpu->guest_fx_image.mxcsr = 0x1f80;
640 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s), 626 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
641 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s)); 627 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
642} 628}
643EXPORT_SYMBOL_GPL(fx_init); 629EXPORT_SYMBOL_GPL(fx_init);
644 630
@@ -2356,6 +2342,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2356 2342
2357 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2343 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2358 2344
2345 /* We do fxsave: this must be aligned. */
2346 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2347
2359 vcpu_load(vcpu); 2348 vcpu_load(vcpu);
2360 r = kvm_mmu_setup(vcpu); 2349 r = kvm_mmu_setup(vcpu);
2361 vcpu_put(vcpu); 2350 vcpu_put(vcpu);
@@ -2468,7 +2457,7 @@ struct fxsave {
2468 2457
2469static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2458static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2470{ 2459{
2471 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; 2460 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2472 2461
2473 vcpu_load(vcpu); 2462 vcpu_load(vcpu);
2474 2463
@@ -2488,7 +2477,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2488 2477
2489static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 2478static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2490{ 2479{
2491 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; 2480 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2492 2481
2493 vcpu_load(vcpu); 2482 vcpu_load(vcpu);
2494 2483