aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJoel Schopp <joel.schopp@amd.com>2015-03-02 14:43:31 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2015-03-10 19:29:15 -0400
commit5cb56059c94ddfaf92567a1c6443deec8363ae1c (patch)
treeb92abbd21ebb0f6b84708383e123259f8782ee4a /arch/x86/kvm
parent1170adc6dd9e94d3cefb6eefe1f44b308d882515 (diff)
kvm: x86: make kvm_emulate_* consistant
Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes don't. The end reult is the caller ends up doing the skip themselves. Let's make them consistant. Signed-off-by: Joel Schopp <joel.schopp@amd.com> Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c23
3 files changed, 23 insertions, 11 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 93dda3ccff03..16d6e5ca4c03 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
1929static int halt_interception(struct vcpu_svm *svm) 1929static int halt_interception(struct vcpu_svm *svm)
1930{ 1930{
1931 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; 1931 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1932 skip_emulated_instruction(&svm->vcpu);
1933 return kvm_emulate_halt(&svm->vcpu); 1932 return kvm_emulate_halt(&svm->vcpu);
1934} 1933}
1935 1934
1936static int vmmcall_interception(struct vcpu_svm *svm) 1935static int vmmcall_interception(struct vcpu_svm *svm)
1937{ 1936{
1938 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1937 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1939 skip_emulated_instruction(&svm->vcpu);
1940 kvm_emulate_hypercall(&svm->vcpu); 1938 kvm_emulate_hypercall(&svm->vcpu);
1941 return 1; 1939 return 1;
1942} 1940}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b20b417a3a..fbd949909628 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5000,7 +5000,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5000 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { 5000 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
5001 if (vcpu->arch.halt_request) { 5001 if (vcpu->arch.halt_request) {
5002 vcpu->arch.halt_request = 0; 5002 vcpu->arch.halt_request = 0;
5003 return kvm_emulate_halt(vcpu); 5003 return kvm_vcpu_halt(vcpu);
5004 } 5004 }
5005 return 1; 5005 return 1;
5006 } 5006 }
@@ -5527,13 +5527,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5527 5527
5528static int handle_halt(struct kvm_vcpu *vcpu) 5528static int handle_halt(struct kvm_vcpu *vcpu)
5529{ 5529{
5530 skip_emulated_instruction(vcpu);
5531 return kvm_emulate_halt(vcpu); 5530 return kvm_emulate_halt(vcpu);
5532} 5531}
5533 5532
5534static int handle_vmcall(struct kvm_vcpu *vcpu) 5533static int handle_vmcall(struct kvm_vcpu *vcpu)
5535{ 5534{
5536 skip_emulated_instruction(vcpu);
5537 kvm_emulate_hypercall(vcpu); 5535 kvm_emulate_hypercall(vcpu);
5538 return 1; 5536 return 1;
5539} 5537}
@@ -5564,7 +5562,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
5564 5562
5565static int handle_wbinvd(struct kvm_vcpu *vcpu) 5563static int handle_wbinvd(struct kvm_vcpu *vcpu)
5566{ 5564{
5567 skip_emulated_instruction(vcpu);
5568 kvm_emulate_wbinvd(vcpu); 5565 kvm_emulate_wbinvd(vcpu);
5569 return 1; 5566 return 1;
5570} 5567}
@@ -5903,7 +5900,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5903 5900
5904 if (vcpu->arch.halt_request) { 5901 if (vcpu->arch.halt_request) {
5905 vcpu->arch.halt_request = 0; 5902 vcpu->arch.halt_request = 0;
5906 ret = kvm_emulate_halt(vcpu); 5903 ret = kvm_vcpu_halt(vcpu);
5907 goto out; 5904 goto out;
5908 } 5905 }
5909 5906
@@ -9518,7 +9515,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
9518 vmcs12->launch_state = 1; 9515 vmcs12->launch_state = 1;
9519 9516
9520 if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) 9517 if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
9521 return kvm_emulate_halt(vcpu); 9518 return kvm_vcpu_halt(vcpu);
9522 9519
9523 vmx->nested.nested_run_pending = 1; 9520 vmx->nested.nested_run_pending = 1;
9524 9521
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c5f7e035e0f1..d1a1feaa522b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4706,7 +4706,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4706 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 4706 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4707} 4707}
4708 4708
4709int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 4709int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
4710{ 4710{
4711 if (!need_emulate_wbinvd(vcpu)) 4711 if (!need_emulate_wbinvd(vcpu))
4712 return X86EMUL_CONTINUE; 4712 return X86EMUL_CONTINUE;
@@ -4723,11 +4723,19 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4723 wbinvd(); 4723 wbinvd();
4724 return X86EMUL_CONTINUE; 4724 return X86EMUL_CONTINUE;
4725} 4725}
4726
4727int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4728{
4729 kvm_x86_ops->skip_emulated_instruction(vcpu);
4730 return kvm_emulate_wbinvd_noskip(vcpu);
4731}
4726EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 4732EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4727 4733
4734
4735
4728static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 4736static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4729{ 4737{
4730 kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); 4738 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
4731} 4739}
4732 4740
4733int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) 4741int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
@@ -5817,7 +5825,7 @@ void kvm_arch_exit(void)
5817 free_percpu(shared_msrs); 5825 free_percpu(shared_msrs);
5818} 5826}
5819 5827
5820int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5828int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
5821{ 5829{
5822 ++vcpu->stat.halt_exits; 5830 ++vcpu->stat.halt_exits;
5823 if (irqchip_in_kernel(vcpu->kvm)) { 5831 if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5828,6 +5836,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5828 return 0; 5836 return 0;
5829 } 5837 }
5830} 5838}
5839EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
5840
5841int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5842{
5843 kvm_x86_ops->skip_emulated_instruction(vcpu);
5844 return kvm_vcpu_halt(vcpu);
5845}
5831EXPORT_SYMBOL_GPL(kvm_emulate_halt); 5846EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5832 5847
5833int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 5848int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5912,6 +5927,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5912 unsigned long nr, a0, a1, a2, a3, ret; 5927 unsigned long nr, a0, a1, a2, a3, ret;
5913 int op_64_bit, r = 1; 5928 int op_64_bit, r = 1;
5914 5929
5930 kvm_x86_ops->skip_emulated_instruction(vcpu);
5931
5915 if (kvm_hv_hypercall_enabled(vcpu->kvm)) 5932 if (kvm_hv_hypercall_enabled(vcpu->kvm))
5916 return kvm_hv_hypercall(vcpu); 5933 return kvm_hv_hypercall(vcpu);
5917 5934