diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ce556372a4d..21338bdb28ff 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque) | |||
2417 | 2417 | ||
2418 | kvm_x86_ops = ops; | 2418 | kvm_x86_ops = ops; |
2419 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); | 2419 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); |
2420 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); | ||
2421 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | ||
2422 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | ||
2420 | return 0; | 2423 | return 0; |
2421 | 2424 | ||
2422 | out: | 2425 | out: |
@@ -3019,6 +3022,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3019 | 3022 | ||
3020 | kvm_x86_ops->decache_regs(vcpu); | 3023 | kvm_x86_ops->decache_regs(vcpu); |
3021 | 3024 | ||
3025 | vcpu->arch.exception.pending = false; | ||
3026 | |||
3022 | vcpu_put(vcpu); | 3027 | vcpu_put(vcpu); |
3023 | 3028 | ||
3024 | return 0; | 3029 | return 0; |
@@ -3481,7 +3486,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3481 | } | 3486 | } |
3482 | 3487 | ||
3483 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { | 3488 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { |
3484 | cseg_desc.type &= ~(1 << 8); //clear the B flag | 3489 | cseg_desc.type &= ~(1 << 1); //clear the B flag |
3485 | save_guest_segment_descriptor(vcpu, tr_seg.selector, | 3490 | save_guest_segment_descriptor(vcpu, tr_seg.selector, |
3486 | &cseg_desc); | 3491 | &cseg_desc); |
3487 | } | 3492 | } |
@@ -3507,7 +3512,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3507 | } | 3512 | } |
3508 | 3513 | ||
3509 | if (reason != TASK_SWITCH_IRET) { | 3514 | if (reason != TASK_SWITCH_IRET) { |
3510 | nseg_desc.type |= (1 << 8); | 3515 | nseg_desc.type |= (1 << 1); |
3511 | save_guest_segment_descriptor(vcpu, tss_selector, | 3516 | save_guest_segment_descriptor(vcpu, tss_selector, |
3512 | &nseg_desc); | 3517 | &nseg_desc); |
3513 | } | 3518 | } |
@@ -3698,10 +3703,19 @@ void fx_init(struct kvm_vcpu *vcpu) | |||
3698 | { | 3703 | { |
3699 | unsigned after_mxcsr_mask; | 3704 | unsigned after_mxcsr_mask; |
3700 | 3705 | ||
3706 | /* | ||
3707 | * Touch the fpu the first time in non atomic context as if | ||
3708 | * this is the first fpu instruction the exception handler | ||
3709 | * will fire before the instruction returns and it'll have to | ||
3710 | * allocate ram with GFP_KERNEL. | ||
3711 | */ | ||
3712 | if (!used_math()) | ||
3713 | fx_save(&vcpu->arch.host_fx_image); | ||
3714 | |||
3701 | /* Initialize guest FPU by resetting ours and saving into guest's */ | 3715 | /* Initialize guest FPU by resetting ours and saving into guest's */ |
3702 | preempt_disable(); | 3716 | preempt_disable(); |
3703 | fx_save(&vcpu->arch.host_fx_image); | 3717 | fx_save(&vcpu->arch.host_fx_image); |
3704 | fpu_init(); | 3718 | fx_finit(); |
3705 | fx_save(&vcpu->arch.guest_fx_image); | 3719 | fx_save(&vcpu->arch.guest_fx_image); |
3706 | fx_restore(&vcpu->arch.host_fx_image); | 3720 | fx_restore(&vcpu->arch.host_fx_image); |
3707 | preempt_enable(); | 3721 | preempt_enable(); |
@@ -3906,6 +3920,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
3906 | kvm_free_physmem(kvm); | 3920 | kvm_free_physmem(kvm); |
3907 | if (kvm->arch.apic_access_page) | 3921 | if (kvm->arch.apic_access_page) |
3908 | put_page(kvm->arch.apic_access_page); | 3922 | put_page(kvm->arch.apic_access_page); |
3923 | if (kvm->arch.ept_identity_pagetable) | ||
3924 | put_page(kvm->arch.ept_identity_pagetable); | ||
3909 | kfree(kvm); | 3925 | kfree(kvm); |
3910 | } | 3926 | } |
3911 | 3927 | ||