aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0acac81f198b..3e556c68351b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2659,12 +2659,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2659 default: 2659 default:
2660 msr = find_msr_entry(vmx, msr_index); 2660 msr = find_msr_entry(vmx, msr_index);
2661 if (msr) { 2661 if (msr) {
2662 u64 old_msr_data = msr->data;
2662 msr->data = data; 2663 msr->data = data;
2663 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 2664 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2664 preempt_disable(); 2665 preempt_disable();
2665 kvm_set_shared_msr(msr->index, msr->data, 2666 ret = kvm_set_shared_msr(msr->index, msr->data,
2666 msr->mask); 2667 msr->mask);
2667 preempt_enable(); 2668 preempt_enable();
2669 if (ret)
2670 msr->data = old_msr_data;
2668 } 2671 }
2669 break; 2672 break;
2670 } 2673 }
@@ -4576,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4576 vmcs_write32(TPR_THRESHOLD, 0); 4579 vmcs_write32(TPR_THRESHOLD, 0);
4577 } 4580 }
4578 4581
4579 kvm_vcpu_reload_apic_access_page(vcpu); 4582 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4580 4583
4581 if (vmx_vm_has_apicv(vcpu->kvm)) 4584 if (vmx_vm_has_apicv(vcpu->kvm))
4582 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 4585 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -5291,7 +5294,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
5291 msr.data = data; 5294 msr.data = data;
5292 msr.index = ecx; 5295 msr.index = ecx;
5293 msr.host_initiated = false; 5296 msr.host_initiated = false;
5294 if (vmx_set_msr(vcpu, &msr) != 0) { 5297 if (kvm_set_msr(vcpu, &msr) != 0) {
5295 trace_kvm_msr_write_ex(ecx, data); 5298 trace_kvm_msr_write_ex(ecx, data);
5296 kvm_inject_gp(vcpu, 0); 5299 kvm_inject_gp(vcpu, 0);
5297 return 1; 5300 return 1;
@@ -6423,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6423 const unsigned long *fields = shadow_read_write_fields; 6426 const unsigned long *fields = shadow_read_write_fields;
6424 const int num_fields = max_shadow_read_write_fields; 6427 const int num_fields = max_shadow_read_write_fields;
6425 6428
6429 preempt_disable();
6430
6426 vmcs_load(shadow_vmcs); 6431 vmcs_load(shadow_vmcs);
6427 6432
6428 for (i = 0; i < num_fields; i++) { 6433 for (i = 0; i < num_fields; i++) {
@@ -6446,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6446 6451
6447 vmcs_clear(shadow_vmcs); 6452 vmcs_clear(shadow_vmcs);
6448 vmcs_load(vmx->loaded_vmcs->vmcs); 6453 vmcs_load(vmx->loaded_vmcs->vmcs);
6454
6455 preempt_enable();
6449} 6456}
6450 6457
6451static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 6458static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
@@ -6743,6 +6750,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
6743 return 1; 6750 return 1;
6744} 6751}
6745 6752
6753static int handle_invvpid(struct kvm_vcpu *vcpu)
6754{
6755 kvm_queue_exception(vcpu, UD_VECTOR);
6756 return 1;
6757}
6758
6746/* 6759/*
6747 * The exit handlers return 1 if the exit was handled fully and guest execution 6760 * The exit handlers return 1 if the exit was handled fully and guest execution
6748 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 6761 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -6788,6 +6801,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6788 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, 6801 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
6789 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, 6802 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
6790 [EXIT_REASON_INVEPT] = handle_invept, 6803 [EXIT_REASON_INVEPT] = handle_invept,
6804 [EXIT_REASON_INVVPID] = handle_invvpid,
6791}; 6805};
6792 6806
6793static const int kvm_vmx_max_exit_handlers = 6807static const int kvm_vmx_max_exit_handlers =
@@ -7023,7 +7037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7023 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: 7037 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
7024 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: 7038 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
7025 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 7039 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
7026 case EXIT_REASON_INVEPT: 7040 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
7027 /* 7041 /*
7028 * VMX instructions trap unconditionally. This allows L1 to 7042 * VMX instructions trap unconditionally. This allows L1 to
7029 * emulate them for its L2 guest, i.e., allows 3-level nesting! 7043 * emulate them for its L2 guest, i.e., allows 3-level nesting!
@@ -7164,10 +7178,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
7164 && kvm_vmx_exit_handlers[exit_reason]) 7178 && kvm_vmx_exit_handlers[exit_reason])
7165 return kvm_vmx_exit_handlers[exit_reason](vcpu); 7179 return kvm_vmx_exit_handlers[exit_reason](vcpu);
7166 else { 7180 else {
7167 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 7181 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
7168 vcpu->run->hw.hardware_exit_reason = exit_reason; 7182 kvm_queue_exception(vcpu, UD_VECTOR);
7183 return 1;
7169 } 7184 }
7170 return 0;
7171} 7185}
7172 7186
7173static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 7187static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)