diff options
-rw-r--r-- | arch/x86/include/uapi/asm/vmx.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 101 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 1 |
4 files changed, 97 insertions, 11 deletions
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index b813bf9da1e2..ff2b8e28883e 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #define EXIT_REASON_MSR_READ 31 | 56 | #define EXIT_REASON_MSR_READ 31 |
57 | #define EXIT_REASON_MSR_WRITE 32 | 57 | #define EXIT_REASON_MSR_WRITE 32 |
58 | #define EXIT_REASON_INVALID_STATE 33 | 58 | #define EXIT_REASON_INVALID_STATE 33 |
59 | #define EXIT_REASON_MSR_LOAD_FAIL 34 | ||
59 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 60 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
60 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | 61 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 |
61 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | 62 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 |
@@ -116,10 +117,14 @@ | |||
116 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ | 117 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ |
117 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ | 118 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ |
118 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ | 119 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ |
120 | { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ | ||
119 | { EXIT_REASON_INVD, "INVD" }, \ | 121 | { EXIT_REASON_INVD, "INVD" }, \ |
120 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | 122 | { EXIT_REASON_INVVPID, "INVVPID" }, \ |
121 | { EXIT_REASON_INVPCID, "INVPCID" }, \ | 123 | { EXIT_REASON_INVPCID, "INVPCID" }, \ |
122 | { EXIT_REASON_XSAVES, "XSAVES" }, \ | 124 | { EXIT_REASON_XSAVES, "XSAVES" }, \ |
123 | { EXIT_REASON_XRSTORS, "XRSTORS" } | 125 | { EXIT_REASON_XRSTORS, "XRSTORS" } |
124 | 126 | ||
127 | #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 | ||
128 | #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 | ||
129 | |||
125 | #endif /* _UAPIVMX_H */ | 130 | #endif /* _UAPIVMX_H */ |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d4c58d884838..9137d2ba26a2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6143,6 +6143,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, | |||
6143 | */ | 6143 | */ |
6144 | } | 6144 | } |
6145 | 6145 | ||
6146 | static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) | ||
6147 | { | ||
6148 | /* TODO: not to reset guest simply here. */ | ||
6149 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | ||
6150 | pr_warn("kvm: nested vmx abort, indicator %d\n", indicator); | ||
6151 | } | ||
6152 | |||
6146 | static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) | 6153 | static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) |
6147 | { | 6154 | { |
6148 | struct vcpu_vmx *vmx = | 6155 | struct vcpu_vmx *vmx = |
@@ -8286,6 +8293,67 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) | |||
8286 | ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); | 8293 | ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); |
8287 | } | 8294 | } |
8288 | 8295 | ||
8296 | static inline int nested_vmx_msr_check_common(struct vmx_msr_entry *e) | ||
8297 | { | ||
8298 | if (e->index >> 8 == 0x8 || e->reserved != 0) | ||
8299 | return -EINVAL; | ||
8300 | return 0; | ||
8301 | } | ||
8302 | |||
8303 | static inline int nested_vmx_load_msr_check(struct vmx_msr_entry *e) | ||
8304 | { | ||
8305 | if (e->index == MSR_FS_BASE || | ||
8306 | e->index == MSR_GS_BASE || | ||
8307 | nested_vmx_msr_check_common(e)) | ||
8308 | return -EINVAL; | ||
8309 | return 0; | ||
8310 | } | ||
8311 | |||
8312 | /* | ||
8313 | * Load guest's/host's msr at nested entry/exit. | ||
8314 | * return 0 for success, entry index for failure. | ||
8315 | */ | ||
8316 | static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) | ||
8317 | { | ||
8318 | u32 i; | ||
8319 | struct vmx_msr_entry e; | ||
8320 | struct msr_data msr; | ||
8321 | |||
8322 | msr.host_initiated = false; | ||
8323 | for (i = 0; i < count; i++) { | ||
8324 | kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, sizeof(e)); | ||
8325 | if (nested_vmx_load_msr_check(&e)) | ||
8326 | goto fail; | ||
8327 | msr.index = e.index; | ||
8328 | msr.data = e.value; | ||
8329 | if (kvm_set_msr(vcpu, &msr)) | ||
8330 | goto fail; | ||
8331 | } | ||
8332 | return 0; | ||
8333 | fail: | ||
8334 | return i + 1; | ||
8335 | } | ||
8336 | |||
8337 | static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) | ||
8338 | { | ||
8339 | u32 i; | ||
8340 | struct vmx_msr_entry e; | ||
8341 | |||
8342 | for (i = 0; i < count; i++) { | ||
8343 | kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), | ||
8344 | &e, 2 * sizeof(u32)); | ||
8345 | if (nested_vmx_msr_check_common(&e)) | ||
8346 | return -EINVAL; | ||
8347 | if (kvm_get_msr(vcpu, e.index, &e.value)) | ||
8348 | return -EINVAL; | ||
8349 | kvm_write_guest(vcpu->kvm, | ||
8350 | gpa + i * sizeof(e) + | ||
8351 | offsetof(struct vmx_msr_entry, value), | ||
8352 | &e.value, sizeof(e.value)); | ||
8353 | } | ||
8354 | return 0; | ||
8355 | } | ||
8356 | |||
8289 | /* | 8357 | /* |
8290 | * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested | 8358 | * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested |
8291 | * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it | 8359 | * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it |
@@ -8582,6 +8650,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8582 | int cpu; | 8650 | int cpu; |
8583 | struct loaded_vmcs *vmcs02; | 8651 | struct loaded_vmcs *vmcs02; |
8584 | bool ia32e; | 8652 | bool ia32e; |
8653 | u32 msr_entry_idx; | ||
8585 | 8654 | ||
8586 | if (!nested_vmx_check_permission(vcpu) || | 8655 | if (!nested_vmx_check_permission(vcpu) || |
8587 | !nested_vmx_check_vmcs12(vcpu)) | 8656 | !nested_vmx_check_vmcs12(vcpu)) |
@@ -8629,15 +8698,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8629 | return 1; | 8698 | return 1; |
8630 | } | 8699 | } |
8631 | 8700 | ||
8632 | if (vmcs12->vm_entry_msr_load_count > 0 || | ||
8633 | vmcs12->vm_exit_msr_load_count > 0 || | ||
8634 | vmcs12->vm_exit_msr_store_count > 0) { | ||
8635 | pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n", | ||
8636 | __func__); | ||
8637 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | ||
8638 | return 1; | ||
8639 | } | ||
8640 | |||
8641 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | 8701 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, |
8642 | nested_vmx_true_procbased_ctls_low, | 8702 | nested_vmx_true_procbased_ctls_low, |
8643 | nested_vmx_procbased_ctls_high) || | 8703 | nested_vmx_procbased_ctls_high) || |
@@ -8739,10 +8799,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8739 | 8799 | ||
8740 | vmx_segment_cache_clear(vmx); | 8800 | vmx_segment_cache_clear(vmx); |
8741 | 8801 | ||
8742 | vmcs12->launch_state = 1; | ||
8743 | |||
8744 | prepare_vmcs02(vcpu, vmcs12); | 8802 | prepare_vmcs02(vcpu, vmcs12); |
8745 | 8803 | ||
8804 | msr_entry_idx = nested_vmx_load_msr(vcpu, | ||
8805 | vmcs12->vm_entry_msr_load_addr, | ||
8806 | vmcs12->vm_entry_msr_load_count); | ||
8807 | if (msr_entry_idx) { | ||
8808 | leave_guest_mode(vcpu); | ||
8809 | vmx_load_vmcs01(vcpu); | ||
8810 | nested_vmx_entry_failure(vcpu, vmcs12, | ||
8811 | EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); | ||
8812 | return 1; | ||
8813 | } | ||
8814 | |||
8815 | vmcs12->launch_state = 1; | ||
8816 | |||
8746 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) | 8817 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) |
8747 | return kvm_emulate_halt(vcpu); | 8818 | return kvm_emulate_halt(vcpu); |
8748 | 8819 | ||
@@ -9172,6 +9243,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
9172 | 9243 | ||
9173 | kvm_set_dr(vcpu, 7, 0x400); | 9244 | kvm_set_dr(vcpu, 7, 0x400); |
9174 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | 9245 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); |
9246 | |||
9247 | if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, | ||
9248 | vmcs12->vm_exit_msr_load_count)) | ||
9249 | nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); | ||
9175 | } | 9250 | } |
9176 | 9251 | ||
9177 | /* | 9252 | /* |
@@ -9193,6 +9268,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
9193 | prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, | 9268 | prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, |
9194 | exit_qualification); | 9269 | exit_qualification); |
9195 | 9270 | ||
9271 | if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, | ||
9272 | vmcs12->vm_exit_msr_store_count)) | ||
9273 | nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); | ||
9274 | |||
9196 | vmx_load_vmcs01(vcpu); | 9275 | vmx_load_vmcs01(vcpu); |
9197 | 9276 | ||
9198 | if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) | 9277 | if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c259814200bd..af9faed270f1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2324,6 +2324,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2324 | { | 2324 | { |
2325 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); | 2325 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); |
2326 | } | 2326 | } |
2327 | EXPORT_SYMBOL_GPL(kvm_get_msr); | ||
2327 | 2328 | ||
2328 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 2329 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
2329 | { | 2330 | { |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1cc6e2e19982..167e8c14b143 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1593,6 +1593,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
1593 | } | 1593 | } |
1594 | return 0; | 1594 | return 0; |
1595 | } | 1595 | } |
1596 | EXPORT_SYMBOL_GPL(kvm_write_guest); | ||
1596 | 1597 | ||
1597 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 1598 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1598 | gpa_t gpa, unsigned long len) | 1599 | gpa_t gpa, unsigned long len) |