diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 94 |
1 files changed, 67 insertions, 27 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03869eb7fcd6..34c85aa2e2d1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2006,10 +2006,12 @@ static void kvmclock_sync_fn(struct work_struct *work) | |||
2006 | KVMCLOCK_SYNC_PERIOD); | 2006 | KVMCLOCK_SYNC_PERIOD); |
2007 | } | 2007 | } |
2008 | 2008 | ||
2009 | static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 2009 | static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2010 | { | 2010 | { |
2011 | u64 mcg_cap = vcpu->arch.mcg_cap; | 2011 | u64 mcg_cap = vcpu->arch.mcg_cap; |
2012 | unsigned bank_num = mcg_cap & 0xff; | 2012 | unsigned bank_num = mcg_cap & 0xff; |
2013 | u32 msr = msr_info->index; | ||
2014 | u64 data = msr_info->data; | ||
2013 | 2015 | ||
2014 | switch (msr) { | 2016 | switch (msr) { |
2015 | case MSR_IA32_MCG_STATUS: | 2017 | case MSR_IA32_MCG_STATUS: |
@@ -2034,6 +2036,9 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
2034 | if ((offset & 0x3) == 0 && | 2036 | if ((offset & 0x3) == 0 && |
2035 | data != 0 && (data | (1 << 10)) != ~(u64)0) | 2037 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
2036 | return -1; | 2038 | return -1; |
2039 | if (!msr_info->host_initiated && | ||
2040 | (offset & 0x3) == 1 && data != 0) | ||
2041 | return -1; | ||
2037 | vcpu->arch.mce_banks[offset] = data; | 2042 | vcpu->arch.mce_banks[offset] = data; |
2038 | break; | 2043 | break; |
2039 | } | 2044 | } |
@@ -2283,7 +2288,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2283 | case MSR_IA32_MCG_CTL: | 2288 | case MSR_IA32_MCG_CTL: |
2284 | case MSR_IA32_MCG_STATUS: | 2289 | case MSR_IA32_MCG_STATUS: |
2285 | case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: | 2290 | case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: |
2286 | return set_msr_mce(vcpu, msr, data); | 2291 | return set_msr_mce(vcpu, msr_info); |
2287 | 2292 | ||
2288 | case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: | 2293 | case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: |
2289 | case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: | 2294 | case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: |
@@ -4034,10 +4039,16 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
4034 | case KVM_SET_IDENTITY_MAP_ADDR: { | 4039 | case KVM_SET_IDENTITY_MAP_ADDR: { |
4035 | u64 ident_addr; | 4040 | u64 ident_addr; |
4036 | 4041 | ||
4042 | mutex_lock(&kvm->lock); | ||
4043 | r = -EINVAL; | ||
4044 | if (kvm->created_vcpus) | ||
4045 | goto set_identity_unlock; | ||
4037 | r = -EFAULT; | 4046 | r = -EFAULT; |
4038 | if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) | 4047 | if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) |
4039 | goto out; | 4048 | goto set_identity_unlock; |
4040 | r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); | 4049 | r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); |
4050 | set_identity_unlock: | ||
4051 | mutex_unlock(&kvm->lock); | ||
4041 | break; | 4052 | break; |
4042 | } | 4053 | } |
4043 | case KVM_SET_NR_MMU_PAGES: | 4054 | case KVM_SET_NR_MMU_PAGES: |
@@ -5275,6 +5286,11 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla | |||
5275 | kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); | 5286 | kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); |
5276 | } | 5287 | } |
5277 | 5288 | ||
5289 | static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase) | ||
5290 | { | ||
5291 | return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase); | ||
5292 | } | ||
5293 | |||
5278 | static const struct x86_emulate_ops emulate_ops = { | 5294 | static const struct x86_emulate_ops emulate_ops = { |
5279 | .read_gpr = emulator_read_gpr, | 5295 | .read_gpr = emulator_read_gpr, |
5280 | .write_gpr = emulator_write_gpr, | 5296 | .write_gpr = emulator_write_gpr, |
@@ -5316,6 +5332,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
5316 | .set_nmi_mask = emulator_set_nmi_mask, | 5332 | .set_nmi_mask = emulator_set_nmi_mask, |
5317 | .get_hflags = emulator_get_hflags, | 5333 | .get_hflags = emulator_get_hflags, |
5318 | .set_hflags = emulator_set_hflags, | 5334 | .set_hflags = emulator_set_hflags, |
5335 | .pre_leave_smm = emulator_pre_leave_smm, | ||
5319 | }; | 5336 | }; |
5320 | 5337 | ||
5321 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 5338 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
@@ -6426,7 +6443,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
6426 | } | 6443 | } |
6427 | 6444 | ||
6428 | kvm_x86_ops->queue_exception(vcpu); | 6445 | kvm_x86_ops->queue_exception(vcpu); |
6429 | } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { | 6446 | } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) { |
6430 | vcpu->arch.smi_pending = false; | 6447 | vcpu->arch.smi_pending = false; |
6431 | enter_smm(vcpu); | 6448 | enter_smm(vcpu); |
6432 | } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { | 6449 | } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { |
@@ -6473,9 +6490,6 @@ static void process_nmi(struct kvm_vcpu *vcpu) | |||
6473 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 6490 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
6474 | } | 6491 | } |
6475 | 6492 | ||
6476 | #define put_smstate(type, buf, offset, val) \ | ||
6477 | *(type *)((buf) + (offset) - 0x7e00) = val | ||
6478 | |||
6479 | static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) | 6493 | static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) |
6480 | { | 6494 | { |
6481 | u32 flags = 0; | 6495 | u32 flags = 0; |
@@ -6641,13 +6655,20 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |||
6641 | u32 cr0; | 6655 | u32 cr0; |
6642 | 6656 | ||
6643 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); | 6657 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); |
6644 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
6645 | memset(buf, 0, 512); | 6658 | memset(buf, 0, 512); |
6646 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) | 6659 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
6647 | enter_smm_save_state_64(vcpu, buf); | 6660 | enter_smm_save_state_64(vcpu, buf); |
6648 | else | 6661 | else |
6649 | enter_smm_save_state_32(vcpu, buf); | 6662 | enter_smm_save_state_32(vcpu, buf); |
6650 | 6663 | ||
6664 | /* | ||
6665 | * Give pre_enter_smm() a chance to make ISA-specific changes to the | ||
6666 | * vCPU state (e.g. leave guest mode) after we've saved the state into | ||
6667 | * the SMM state-save area. | ||
6668 | */ | ||
6669 | kvm_x86_ops->pre_enter_smm(vcpu, buf); | ||
6670 | |||
6671 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
6651 | kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); | 6672 | kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); |
6652 | 6673 | ||
6653 | if (kvm_x86_ops->get_nmi_mask(vcpu)) | 6674 | if (kvm_x86_ops->get_nmi_mask(vcpu)) |
@@ -6876,17 +6897,23 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6876 | if (inject_pending_event(vcpu, req_int_win) != 0) | 6897 | if (inject_pending_event(vcpu, req_int_win) != 0) |
6877 | req_immediate_exit = true; | 6898 | req_immediate_exit = true; |
6878 | else { | 6899 | else { |
6879 | /* Enable NMI/IRQ window open exits if needed. | 6900 | /* Enable SMI/NMI/IRQ window open exits if needed. |
6880 | * | 6901 | * |
6881 | * SMIs have two cases: 1) they can be nested, and | 6902 | * SMIs have three cases: |
6882 | * then there is nothing to do here because RSM will | 6903 | * 1) They can be nested, and then there is nothing to |
6883 | * cause a vmexit anyway; 2) or the SMI can be pending | 6904 | * do here because RSM will cause a vmexit anyway. |
6884 | * because inject_pending_event has completed the | 6905 | * 2) There is an ISA-specific reason why SMI cannot be |
6885 | * injection of an IRQ or NMI from the previous vmexit, | 6906 | * injected, and the moment when this changes can be |
6886 | * and then we request an immediate exit to inject the SMI. | 6907 | * intercepted. |
6908 | * 3) Or the SMI can be pending because | ||
6909 | * inject_pending_event has completed the injection | ||
6910 | * of an IRQ or NMI from the previous vmexit, and | ||
6911 | * then we request an immediate exit to inject the | ||
6912 | * SMI. | ||
6887 | */ | 6913 | */ |
6888 | if (vcpu->arch.smi_pending && !is_smm(vcpu)) | 6914 | if (vcpu->arch.smi_pending && !is_smm(vcpu)) |
6889 | req_immediate_exit = true; | 6915 | if (!kvm_x86_ops->enable_smi_window(vcpu)) |
6916 | req_immediate_exit = true; | ||
6890 | if (vcpu->arch.nmi_pending) | 6917 | if (vcpu->arch.nmi_pending) |
6891 | kvm_x86_ops->enable_nmi_window(vcpu); | 6918 | kvm_x86_ops->enable_nmi_window(vcpu); |
6892 | if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) | 6919 | if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) |
@@ -7798,18 +7825,40 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
7798 | kvm_async_pf_hash_reset(vcpu); | 7825 | kvm_async_pf_hash_reset(vcpu); |
7799 | vcpu->arch.apf.halted = false; | 7826 | vcpu->arch.apf.halted = false; |
7800 | 7827 | ||
7828 | if (kvm_mpx_supported()) { | ||
7829 | void *mpx_state_buffer; | ||
7830 | |||
7831 | /* | ||
7832 | * To avoid have the INIT path from kvm_apic_has_events() that be | ||
7833 | * called with loaded FPU and does not let userspace fix the state. | ||
7834 | */ | ||
7835 | kvm_put_guest_fpu(vcpu); | ||
7836 | mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, | ||
7837 | XFEATURE_MASK_BNDREGS); | ||
7838 | if (mpx_state_buffer) | ||
7839 | memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state)); | ||
7840 | mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, | ||
7841 | XFEATURE_MASK_BNDCSR); | ||
7842 | if (mpx_state_buffer) | ||
7843 | memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); | ||
7844 | } | ||
7845 | |||
7801 | if (!init_event) { | 7846 | if (!init_event) { |
7802 | kvm_pmu_reset(vcpu); | 7847 | kvm_pmu_reset(vcpu); |
7803 | vcpu->arch.smbase = 0x30000; | 7848 | vcpu->arch.smbase = 0x30000; |
7804 | 7849 | ||
7805 | vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; | 7850 | vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; |
7806 | vcpu->arch.msr_misc_features_enables = 0; | 7851 | vcpu->arch.msr_misc_features_enables = 0; |
7852 | |||
7853 | vcpu->arch.xcr0 = XFEATURE_MASK_FP; | ||
7807 | } | 7854 | } |
7808 | 7855 | ||
7809 | memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); | 7856 | memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); |
7810 | vcpu->arch.regs_avail = ~0; | 7857 | vcpu->arch.regs_avail = ~0; |
7811 | vcpu->arch.regs_dirty = ~0; | 7858 | vcpu->arch.regs_dirty = ~0; |
7812 | 7859 | ||
7860 | vcpu->arch.ia32_xss = 0; | ||
7861 | |||
7813 | kvm_x86_ops->vcpu_reset(vcpu, init_event); | 7862 | kvm_x86_ops->vcpu_reset(vcpu, init_event); |
7814 | } | 7863 | } |
7815 | 7864 | ||
@@ -7974,16 +8023,11 @@ EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); | |||
7974 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 8023 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
7975 | { | 8024 | { |
7976 | struct page *page; | 8025 | struct page *page; |
7977 | struct kvm *kvm; | ||
7978 | int r; | 8026 | int r; |
7979 | 8027 | ||
7980 | BUG_ON(vcpu->kvm == NULL); | ||
7981 | kvm = vcpu->kvm; | ||
7982 | |||
7983 | vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); | 8028 | vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); |
7984 | vcpu->arch.pv.pv_unhalted = false; | ||
7985 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; | 8029 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; |
7986 | if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) | 8030 | if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) |
7987 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 8031 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
7988 | else | 8032 | else |
7989 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | 8033 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
@@ -8001,7 +8045,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
8001 | if (r < 0) | 8045 | if (r < 0) |
8002 | goto fail_free_pio_data; | 8046 | goto fail_free_pio_data; |
8003 | 8047 | ||
8004 | if (irqchip_in_kernel(kvm)) { | 8048 | if (irqchip_in_kernel(vcpu->kvm)) { |
8005 | r = kvm_create_lapic(vcpu); | 8049 | r = kvm_create_lapic(vcpu); |
8006 | if (r < 0) | 8050 | if (r < 0) |
8007 | goto fail_mmu_destroy; | 8051 | goto fail_mmu_destroy; |
@@ -8023,10 +8067,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
8023 | 8067 | ||
8024 | fx_init(vcpu); | 8068 | fx_init(vcpu); |
8025 | 8069 | ||
8026 | vcpu->arch.ia32_tsc_adjust_msr = 0x0; | ||
8027 | vcpu->arch.pv_time_enabled = false; | ||
8028 | |||
8029 | vcpu->arch.guest_supported_xcr0 = 0; | ||
8030 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; | 8070 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
8031 | 8071 | ||
8032 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); | 8072 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); |