diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 129 |
1 files changed, 88 insertions, 41 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 506bd2b4b8bb..edbf00ec56b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
628 | gfn_t gfn; | 628 | gfn_t gfn; |
629 | int r; | 629 | int r; |
630 | 630 | ||
631 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 631 | if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) |
632 | return false; | 632 | return false; |
633 | 633 | ||
634 | if (!test_bit(VCPU_EXREG_PDPTR, | 634 | if (!test_bit(VCPU_EXREG_PDPTR, |
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2537 | break; | 2537 | break; |
2538 | case MSR_PLATFORM_INFO: | 2538 | case MSR_PLATFORM_INFO: |
2539 | if (!msr_info->host_initiated || | 2539 | if (!msr_info->host_initiated || |
2540 | data & ~MSR_PLATFORM_INFO_CPUID_FAULT || | ||
2541 | (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && | 2540 | (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && |
2542 | cpuid_fault_enabled(vcpu))) | 2541 | cpuid_fault_enabled(vcpu))) |
2543 | return 1; | 2542 | return 1; |
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2780 | msr_info->data = vcpu->arch.osvw.status; | 2779 | msr_info->data = vcpu->arch.osvw.status; |
2781 | break; | 2780 | break; |
2782 | case MSR_PLATFORM_INFO: | 2781 | case MSR_PLATFORM_INFO: |
2782 | if (!msr_info->host_initiated && | ||
2783 | !vcpu->kvm->arch.guest_can_read_msr_platform_info) | ||
2784 | return 1; | ||
2783 | msr_info->data = vcpu->arch.msr_platform_info; | 2785 | msr_info->data = vcpu->arch.msr_platform_info; |
2784 | break; | 2786 | break; |
2785 | case MSR_MISC_FEATURES_ENABLES: | 2787 | case MSR_MISC_FEATURES_ENABLES: |
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2927 | case KVM_CAP_SPLIT_IRQCHIP: | 2929 | case KVM_CAP_SPLIT_IRQCHIP: |
2928 | case KVM_CAP_IMMEDIATE_EXIT: | 2930 | case KVM_CAP_IMMEDIATE_EXIT: |
2929 | case KVM_CAP_GET_MSR_FEATURES: | 2931 | case KVM_CAP_GET_MSR_FEATURES: |
2932 | case KVM_CAP_MSR_PLATFORM_INFO: | ||
2930 | r = 1; | 2933 | r = 1; |
2931 | break; | 2934 | break; |
2932 | case KVM_CAP_SYNC_REGS: | 2935 | case KVM_CAP_SYNC_REGS: |
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4007 | break; | 4010 | break; |
4008 | 4011 | ||
4009 | BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); | 4012 | BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); |
4013 | r = -EFAULT; | ||
4010 | if (get_user(user_data_size, &user_kvm_nested_state->size)) | 4014 | if (get_user(user_data_size, &user_kvm_nested_state->size)) |
4011 | return -EFAULT; | 4015 | break; |
4012 | 4016 | ||
4013 | r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, | 4017 | r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, |
4014 | user_data_size); | 4018 | user_data_size); |
4015 | if (r < 0) | 4019 | if (r < 0) |
4016 | return r; | 4020 | break; |
4017 | 4021 | ||
4018 | if (r > user_data_size) { | 4022 | if (r > user_data_size) { |
4019 | if (put_user(r, &user_kvm_nested_state->size)) | 4023 | if (put_user(r, &user_kvm_nested_state->size)) |
4020 | return -EFAULT; | 4024 | r = -EFAULT; |
4021 | return -E2BIG; | 4025 | else |
4026 | r = -E2BIG; | ||
4027 | break; | ||
4022 | } | 4028 | } |
4029 | |||
4023 | r = 0; | 4030 | r = 0; |
4024 | break; | 4031 | break; |
4025 | } | 4032 | } |
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4031 | if (!kvm_x86_ops->set_nested_state) | 4038 | if (!kvm_x86_ops->set_nested_state) |
4032 | break; | 4039 | break; |
4033 | 4040 | ||
4041 | r = -EFAULT; | ||
4034 | if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) | 4042 | if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) |
4035 | return -EFAULT; | 4043 | break; |
4036 | 4044 | ||
4045 | r = -EINVAL; | ||
4037 | if (kvm_state.size < sizeof(kvm_state)) | 4046 | if (kvm_state.size < sizeof(kvm_state)) |
4038 | return -EINVAL; | 4047 | break; |
4039 | 4048 | ||
4040 | if (kvm_state.flags & | 4049 | if (kvm_state.flags & |
4041 | ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) | 4050 | ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) |
4042 | return -EINVAL; | 4051 | break; |
4043 | 4052 | ||
4044 | /* nested_run_pending implies guest_mode. */ | 4053 | /* nested_run_pending implies guest_mode. */ |
4045 | if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) | 4054 | if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) |
4046 | return -EINVAL; | 4055 | break; |
4047 | 4056 | ||
4048 | r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); | 4057 | r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); |
4049 | break; | 4058 | break; |
@@ -4350,6 +4359,10 @@ split_irqchip_unlock: | |||
4350 | kvm->arch.pause_in_guest = true; | 4359 | kvm->arch.pause_in_guest = true; |
4351 | r = 0; | 4360 | r = 0; |
4352 | break; | 4361 | break; |
4362 | case KVM_CAP_MSR_PLATFORM_INFO: | ||
4363 | kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; | ||
4364 | r = 0; | ||
4365 | break; | ||
4353 | default: | 4366 | default: |
4354 | r = -EINVAL; | 4367 | r = -EINVAL; |
4355 | break; | 4368 | break; |
@@ -4987,7 +5000,7 @@ int handle_ud(struct kvm_vcpu *vcpu) | |||
4987 | emul_type = 0; | 5000 | emul_type = 0; |
4988 | } | 5001 | } |
4989 | 5002 | ||
4990 | er = emulate_instruction(vcpu, emul_type); | 5003 | er = kvm_emulate_instruction(vcpu, emul_type); |
4991 | if (er == EMULATE_USER_EXIT) | 5004 | if (er == EMULATE_USER_EXIT) |
4992 | return 0; | 5005 | return 0; |
4993 | if (er != EMULATE_DONE) | 5006 | if (er != EMULATE_DONE) |
@@ -5870,7 +5883,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, | |||
5870 | gpa_t gpa = cr2; | 5883 | gpa_t gpa = cr2; |
5871 | kvm_pfn_t pfn; | 5884 | kvm_pfn_t pfn; |
5872 | 5885 | ||
5873 | if (emulation_type & EMULTYPE_NO_REEXECUTE) | 5886 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
5887 | return false; | ||
5888 | |||
5889 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
5874 | return false; | 5890 | return false; |
5875 | 5891 | ||
5876 | if (!vcpu->arch.mmu.direct_map) { | 5892 | if (!vcpu->arch.mmu.direct_map) { |
@@ -5958,7 +5974,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, | |||
5958 | */ | 5974 | */ |
5959 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; | 5975 | vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; |
5960 | 5976 | ||
5961 | if (!(emulation_type & EMULTYPE_RETRY)) | 5977 | if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) |
5978 | return false; | ||
5979 | |||
5980 | if (WARN_ON_ONCE(is_guest_mode(vcpu))) | ||
5962 | return false; | 5981 | return false; |
5963 | 5982 | ||
5964 | if (x86_page_table_writing_insn(ctxt)) | 5983 | if (x86_page_table_writing_insn(ctxt)) |
@@ -6276,7 +6295,19 @@ restart: | |||
6276 | 6295 | ||
6277 | return r; | 6296 | return r; |
6278 | } | 6297 | } |
6279 | EXPORT_SYMBOL_GPL(x86_emulate_instruction); | 6298 | |
6299 | int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) | ||
6300 | { | ||
6301 | return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); | ||
6302 | } | ||
6303 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction); | ||
6304 | |||
6305 | int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, | ||
6306 | void *insn, int insn_len) | ||
6307 | { | ||
6308 | return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); | ||
6309 | } | ||
6310 | EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); | ||
6280 | 6311 | ||
6281 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, | 6312 | static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, |
6282 | unsigned short port) | 6313 | unsigned short port) |
@@ -7343,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) | |||
7343 | } | 7374 | } |
7344 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); | 7375 | EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); |
7345 | 7376 | ||
7377 | void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) | ||
7378 | { | ||
7379 | smp_send_reschedule(vcpu->cpu); | ||
7380 | } | ||
7381 | EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); | ||
7382 | |||
7346 | /* | 7383 | /* |
7347 | * Returns 1 to let vcpu_run() continue the guest execution loop without | 7384 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
7348 | * exiting to the userspace. Otherwise, the value will be returned to the | 7385 | * exiting to the userspace. Otherwise, the value will be returned to the |
@@ -7547,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7547 | 7584 | ||
7548 | if (req_immediate_exit) { | 7585 | if (req_immediate_exit) { |
7549 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7586 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7550 | smp_send_reschedule(vcpu->cpu); | 7587 | kvm_x86_ops->request_immediate_exit(vcpu); |
7551 | } | 7588 | } |
7552 | 7589 | ||
7553 | trace_kvm_entry(vcpu->vcpu_id); | 7590 | trace_kvm_entry(vcpu->vcpu_id); |
@@ -7734,7 +7771,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu) | |||
7734 | { | 7771 | { |
7735 | int r; | 7772 | int r; |
7736 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 7773 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
7737 | r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); | 7774 | r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); |
7738 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 7775 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
7739 | if (r != EMULATE_DONE) | 7776 | if (r != EMULATE_DONE) |
7740 | return 0; | 7777 | return 0; |
@@ -7811,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | |||
7811 | return 0; | 7848 | return 0; |
7812 | } | 7849 | } |
7813 | 7850 | ||
7851 | /* Swap (qemu) user FPU context for the guest FPU context. */ | ||
7852 | static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
7853 | { | ||
7854 | preempt_disable(); | ||
7855 | copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); | ||
7856 | /* PKRU is separately restored in kvm_x86_ops->run. */ | ||
7857 | __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, | ||
7858 | ~XFEATURE_MASK_PKRU); | ||
7859 | preempt_enable(); | ||
7860 | trace_kvm_fpu(1); | ||
7861 | } | ||
7862 | |||
7863 | /* When vcpu_run ends, restore user space FPU context. */ | ||
7864 | static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
7865 | { | ||
7866 | preempt_disable(); | ||
7867 | copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); | ||
7868 | copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); | ||
7869 | preempt_enable(); | ||
7870 | ++vcpu->stat.fpu_reload; | ||
7871 | trace_kvm_fpu(0); | ||
7872 | } | ||
7873 | |||
7814 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 7874 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
7815 | { | 7875 | { |
7816 | int r; | 7876 | int r; |
@@ -8159,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
8159 | kvm_update_cpuid(vcpu); | 8219 | kvm_update_cpuid(vcpu); |
8160 | 8220 | ||
8161 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 8221 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
8162 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { | 8222 | if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { |
8163 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); | 8223 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
8164 | mmu_reset_needed = 1; | 8224 | mmu_reset_needed = 1; |
8165 | } | 8225 | } |
@@ -8388,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu) | |||
8388 | vcpu->arch.cr0 |= X86_CR0_ET; | 8448 | vcpu->arch.cr0 |= X86_CR0_ET; |
8389 | } | 8449 | } |
8390 | 8450 | ||
8391 | /* Swap (qemu) user FPU context for the guest FPU context. */ | ||
8392 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) | ||
8393 | { | ||
8394 | preempt_disable(); | ||
8395 | copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); | ||
8396 | /* PKRU is separately restored in kvm_x86_ops->run. */ | ||
8397 | __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, | ||
8398 | ~XFEATURE_MASK_PKRU); | ||
8399 | preempt_enable(); | ||
8400 | trace_kvm_fpu(1); | ||
8401 | } | ||
8402 | |||
8403 | /* When vcpu_run ends, restore user space FPU context. */ | ||
8404 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | ||
8405 | { | ||
8406 | preempt_disable(); | ||
8407 | copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); | ||
8408 | copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); | ||
8409 | preempt_enable(); | ||
8410 | ++vcpu->stat.fpu_reload; | ||
8411 | trace_kvm_fpu(0); | ||
8412 | } | ||
8413 | |||
8414 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 8451 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
8415 | { | 8452 | { |
8416 | void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; | 8453 | void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; |
@@ -8834,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
8834 | kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); | 8871 | kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); |
8835 | pvclock_update_vm_gtod_copy(kvm); | 8872 | pvclock_update_vm_gtod_copy(kvm); |
8836 | 8873 | ||
8874 | kvm->arch.guest_can_read_msr_platform_info = true; | ||
8875 | |||
8837 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); | 8876 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); |
8838 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); | 8877 | INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); |
8839 | 8878 | ||
@@ -9182,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
9182 | kvm_page_track_flush_slot(kvm, slot); | 9221 | kvm_page_track_flush_slot(kvm, slot); |
9183 | } | 9222 | } |
9184 | 9223 | ||
9224 | static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
9225 | { | ||
9226 | return (is_guest_mode(vcpu) && | ||
9227 | kvm_x86_ops->guest_apic_has_interrupt && | ||
9228 | kvm_x86_ops->guest_apic_has_interrupt(vcpu)); | ||
9229 | } | ||
9230 | |||
9185 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | 9231 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) |
9186 | { | 9232 | { |
9187 | if (!list_empty_careful(&vcpu->async_pf.done)) | 9233 | if (!list_empty_careful(&vcpu->async_pf.done)) |
@@ -9206,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | |||
9206 | return true; | 9252 | return true; |
9207 | 9253 | ||
9208 | if (kvm_arch_interrupt_allowed(vcpu) && | 9254 | if (kvm_arch_interrupt_allowed(vcpu) && |
9209 | kvm_cpu_has_interrupt(vcpu)) | 9255 | (kvm_cpu_has_interrupt(vcpu) || |
9256 | kvm_guest_apic_has_interrupt(vcpu))) | ||
9210 | return true; | 9257 | return true; |
9211 | 9258 | ||
9212 | if (kvm_hv_has_stimer_pending(vcpu)) | 9259 | if (kvm_hv_has_stimer_pending(vcpu)) |