diff options
| author | Kyle Huey <me@kylehuey.com> | 2016-11-29 15:40:40 -0500 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-12-08 09:31:05 -0500 |
| commit | 6affcbedcac79b01c8d01948a693461040133e46 (patch) | |
| tree | 0a6ed81ce4fe4afe59af892a80649b8b81650a86 | |
| parent | eb2775621701e6ee3ea2a474437d04e93ccdcb2f (diff) | |
KVM: x86: Add kvm_skip_emulated_instruction and use it.
kvm_skip_emulated_instruction calls both
kvm_x86_ops->skip_emulated_instruction and kvm_vcpu_check_singlestep,
skipping the emulated instruction and generating a trap if necessary.
Replacing skip_emulated_instruction calls with
kvm_skip_emulated_instruction is straightforward, except for:
- ICEBP, which is already inside a trap, so avoid triggering another trap.
- Instructions that can trigger exits to userspace, such as the IO insns,
MOVs to CR8, and HALT. If kvm_skip_emulated_instruction does trigger a
KVM_GUESTDBG_SINGLESTEP exit, and the handling code for
IN/OUT/MOV CR8/HALT also triggers an exit to userspace, the latter will
take precedence. The singlestep will be triggered again on the next
instruction, which is the current behavior.
- Task switch instructions which would require additional handling (e.g.
the task switch bit) and are instead left alone.
- Cases where VMLAUNCH/VMRESUME do not proceed to the next instruction,
which do not trigger singlestep traps as mentioned previously.
Signed-off-by: Kyle Huey <khuey@kylehuey.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 3 | ||||
| -rw-r--r-- | arch/x86/kvm/cpuid.c | 3 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 11 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 177 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 33 |
5 files changed, 103 insertions, 124 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 80bad5c372bf..8d1587092851 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1368,7 +1368,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, | |||
| 1368 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); | 1368 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); |
| 1369 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); | 1369 | extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 1370 | 1370 | ||
| 1371 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | 1371 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); |
| 1372 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); | ||
| 1372 | 1373 | ||
| 1373 | int kvm_is_in_guest(void); | 1374 | int kvm_is_in_guest(void); |
| 1374 | 1375 | ||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 07cc62955520..dc2685e3f8ea 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
| @@ -890,7 +890,6 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
| 890 | kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); | 890 | kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); |
| 891 | kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); | 891 | kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); |
| 892 | kvm_register_write(vcpu, VCPU_REGS_RDX, edx); | 892 | kvm_register_write(vcpu, VCPU_REGS_RDX, edx); |
| 893 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 893 | return kvm_skip_emulated_instruction(vcpu); |
| 894 | return 1; | ||
| 895 | } | 894 | } |
| 896 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | 895 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5bdffcd781f5..08a4d3ab3455 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -3151,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm) | |||
| 3151 | 3151 | ||
| 3152 | static int wbinvd_interception(struct vcpu_svm *svm) | 3152 | static int wbinvd_interception(struct vcpu_svm *svm) |
| 3153 | { | 3153 | { |
| 3154 | kvm_emulate_wbinvd(&svm->vcpu); | 3154 | return kvm_emulate_wbinvd(&svm->vcpu); |
| 3155 | return 1; | ||
| 3156 | } | 3155 | } |
| 3157 | 3156 | ||
| 3158 | static int xsetbv_interception(struct vcpu_svm *svm) | 3157 | static int xsetbv_interception(struct vcpu_svm *svm) |
| @@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm) | |||
| 3275 | return emulate_on_interception(svm); | 3274 | return emulate_on_interception(svm); |
| 3276 | 3275 | ||
| 3277 | err = kvm_rdpmc(&svm->vcpu); | 3276 | err = kvm_rdpmc(&svm->vcpu); |
| 3278 | kvm_complete_insn_gp(&svm->vcpu, err); | 3277 | return kvm_complete_insn_gp(&svm->vcpu, err); |
| 3279 | |||
| 3280 | return 1; | ||
| 3281 | } | 3278 | } |
| 3282 | 3279 | ||
| 3283 | static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, | 3280 | static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, |
| @@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm) | |||
| 3374 | } | 3371 | } |
| 3375 | kvm_register_write(&svm->vcpu, reg, val); | 3372 | kvm_register_write(&svm->vcpu, reg, val); |
| 3376 | } | 3373 | } |
| 3377 | kvm_complete_insn_gp(&svm->vcpu, err); | 3374 | return kvm_complete_insn_gp(&svm->vcpu, err); |
| 3378 | |||
| 3379 | return 1; | ||
| 3380 | } | 3375 | } |
| 3381 | 3376 | ||
| 3382 | static int dr_interception(struct vcpu_svm *svm) | 3377 | static int dr_interception(struct vcpu_svm *svm) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f4f6304f9583..16a144d22033 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -5556,7 +5556,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu) | |||
| 5556 | static int handle_io(struct kvm_vcpu *vcpu) | 5556 | static int handle_io(struct kvm_vcpu *vcpu) |
| 5557 | { | 5557 | { |
| 5558 | unsigned long exit_qualification; | 5558 | unsigned long exit_qualification; |
| 5559 | int size, in, string; | 5559 | int size, in, string, ret; |
| 5560 | unsigned port; | 5560 | unsigned port; |
| 5561 | 5561 | ||
| 5562 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 5562 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| @@ -5570,9 +5570,14 @@ static int handle_io(struct kvm_vcpu *vcpu) | |||
| 5570 | 5570 | ||
| 5571 | port = exit_qualification >> 16; | 5571 | port = exit_qualification >> 16; |
| 5572 | size = (exit_qualification & 7) + 1; | 5572 | size = (exit_qualification & 7) + 1; |
| 5573 | skip_emulated_instruction(vcpu); | ||
| 5574 | 5573 | ||
| 5575 | return kvm_fast_pio_out(vcpu, size, port); | 5574 | ret = kvm_skip_emulated_instruction(vcpu); |
| 5575 | |||
| 5576 | /* | ||
| 5577 | * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered | ||
| 5578 | * KVM_EXIT_DEBUG here. | ||
| 5579 | */ | ||
| 5580 | return kvm_fast_pio_out(vcpu, size, port) && ret; | ||
| 5576 | } | 5581 | } |
| 5577 | 5582 | ||
| 5578 | static void | 5583 | static void |
| @@ -5670,6 +5675,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
| 5670 | int cr; | 5675 | int cr; |
| 5671 | int reg; | 5676 | int reg; |
| 5672 | int err; | 5677 | int err; |
| 5678 | int ret; | ||
| 5673 | 5679 | ||
| 5674 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 5680 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 5675 | cr = exit_qualification & 15; | 5681 | cr = exit_qualification & 15; |
| @@ -5681,25 +5687,27 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
| 5681 | switch (cr) { | 5687 | switch (cr) { |
| 5682 | case 0: | 5688 | case 0: |
| 5683 | err = handle_set_cr0(vcpu, val); | 5689 | err = handle_set_cr0(vcpu, val); |
| 5684 | kvm_complete_insn_gp(vcpu, err); | 5690 | return kvm_complete_insn_gp(vcpu, err); |
| 5685 | return 1; | ||
| 5686 | case 3: | 5691 | case 3: |
| 5687 | err = kvm_set_cr3(vcpu, val); | 5692 | err = kvm_set_cr3(vcpu, val); |
| 5688 | kvm_complete_insn_gp(vcpu, err); | 5693 | return kvm_complete_insn_gp(vcpu, err); |
| 5689 | return 1; | ||
| 5690 | case 4: | 5694 | case 4: |
| 5691 | err = handle_set_cr4(vcpu, val); | 5695 | err = handle_set_cr4(vcpu, val); |
| 5692 | kvm_complete_insn_gp(vcpu, err); | 5696 | return kvm_complete_insn_gp(vcpu, err); |
| 5693 | return 1; | ||
| 5694 | case 8: { | 5697 | case 8: { |
| 5695 | u8 cr8_prev = kvm_get_cr8(vcpu); | 5698 | u8 cr8_prev = kvm_get_cr8(vcpu); |
| 5696 | u8 cr8 = (u8)val; | 5699 | u8 cr8 = (u8)val; |
| 5697 | err = kvm_set_cr8(vcpu, cr8); | 5700 | err = kvm_set_cr8(vcpu, cr8); |
| 5698 | kvm_complete_insn_gp(vcpu, err); | 5701 | ret = kvm_complete_insn_gp(vcpu, err); |
| 5699 | if (lapic_in_kernel(vcpu)) | 5702 | if (lapic_in_kernel(vcpu)) |
| 5700 | return 1; | 5703 | return ret; |
| 5701 | if (cr8_prev <= cr8) | 5704 | if (cr8_prev <= cr8) |
| 5702 | return 1; | 5705 | return ret; |
| 5706 | /* | ||
| 5707 | * TODO: we might be squashing a | ||
| 5708 | * KVM_GUESTDBG_SINGLESTEP-triggered | ||
| 5709 | * KVM_EXIT_DEBUG here. | ||
| 5710 | */ | ||
| 5703 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; | 5711 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
| 5704 | return 0; | 5712 | return 0; |
| 5705 | } | 5713 | } |
| @@ -5709,22 +5717,19 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
| 5709 | handle_clts(vcpu); | 5717 | handle_clts(vcpu); |
| 5710 | trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); | 5718 | trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); |
| 5711 | vmx_fpu_activate(vcpu); | 5719 | vmx_fpu_activate(vcpu); |
| 5712 | skip_emulated_instruction(vcpu); | 5720 | return kvm_skip_emulated_instruction(vcpu); |
| 5713 | return 1; | ||
| 5714 | case 1: /*mov from cr*/ | 5721 | case 1: /*mov from cr*/ |
| 5715 | switch (cr) { | 5722 | switch (cr) { |
| 5716 | case 3: | 5723 | case 3: |
| 5717 | val = kvm_read_cr3(vcpu); | 5724 | val = kvm_read_cr3(vcpu); |
| 5718 | kvm_register_write(vcpu, reg, val); | 5725 | kvm_register_write(vcpu, reg, val); |
| 5719 | trace_kvm_cr_read(cr, val); | 5726 | trace_kvm_cr_read(cr, val); |
| 5720 | skip_emulated_instruction(vcpu); | 5727 | return kvm_skip_emulated_instruction(vcpu); |
| 5721 | return 1; | ||
| 5722 | case 8: | 5728 | case 8: |
| 5723 | val = kvm_get_cr8(vcpu); | 5729 | val = kvm_get_cr8(vcpu); |
| 5724 | kvm_register_write(vcpu, reg, val); | 5730 | kvm_register_write(vcpu, reg, val); |
| 5725 | trace_kvm_cr_read(cr, val); | 5731 | trace_kvm_cr_read(cr, val); |
| 5726 | skip_emulated_instruction(vcpu); | 5732 | return kvm_skip_emulated_instruction(vcpu); |
| 5727 | return 1; | ||
| 5728 | } | 5733 | } |
| 5729 | break; | 5734 | break; |
| 5730 | case 3: /* lmsw */ | 5735 | case 3: /* lmsw */ |
| @@ -5732,8 +5737,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
| 5732 | trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); | 5737 | trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); |
| 5733 | kvm_lmsw(vcpu, val); | 5738 | kvm_lmsw(vcpu, val); |
| 5734 | 5739 | ||
| 5735 | skip_emulated_instruction(vcpu); | 5740 | return kvm_skip_emulated_instruction(vcpu); |
| 5736 | return 1; | ||
| 5737 | default: | 5741 | default: |
| 5738 | break; | 5742 | break; |
| 5739 | } | 5743 | } |
| @@ -5804,8 +5808,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
| 5804 | if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) | 5808 | if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) |
| 5805 | return 1; | 5809 | return 1; |
| 5806 | 5810 | ||
| 5807 | skip_emulated_instruction(vcpu); | 5811 | return kvm_skip_emulated_instruction(vcpu); |
| 5808 | return 1; | ||
| 5809 | } | 5812 | } |
| 5810 | 5813 | ||
| 5811 | static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) | 5814 | static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) |
| @@ -5858,8 +5861,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu) | |||
| 5858 | /* FIXME: handling of bits 32:63 of rax, rdx */ | 5861 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
| 5859 | vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; | 5862 | vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; |
| 5860 | vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; | 5863 | vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; |
| 5861 | skip_emulated_instruction(vcpu); | 5864 | return kvm_skip_emulated_instruction(vcpu); |
| 5862 | return 1; | ||
| 5863 | } | 5865 | } |
| 5864 | 5866 | ||
| 5865 | static int handle_wrmsr(struct kvm_vcpu *vcpu) | 5867 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
| @@ -5879,8 +5881,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) | |||
| 5879 | } | 5881 | } |
| 5880 | 5882 | ||
| 5881 | trace_kvm_msr_write(ecx, data); | 5883 | trace_kvm_msr_write(ecx, data); |
| 5882 | skip_emulated_instruction(vcpu); | 5884 | return kvm_skip_emulated_instruction(vcpu); |
| 5883 | return 1; | ||
| 5884 | } | 5885 | } |
| 5885 | 5886 | ||
| 5886 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) | 5887 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
| @@ -5924,8 +5925,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) | |||
| 5924 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 5925 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 5925 | 5926 | ||
| 5926 | kvm_mmu_invlpg(vcpu, exit_qualification); | 5927 | kvm_mmu_invlpg(vcpu, exit_qualification); |
| 5927 | skip_emulated_instruction(vcpu); | 5928 | return kvm_skip_emulated_instruction(vcpu); |
| 5928 | return 1; | ||
| 5929 | } | 5929 | } |
| 5930 | 5930 | ||
| 5931 | static int handle_rdpmc(struct kvm_vcpu *vcpu) | 5931 | static int handle_rdpmc(struct kvm_vcpu *vcpu) |
| @@ -5933,15 +5933,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu) | |||
| 5933 | int err; | 5933 | int err; |
| 5934 | 5934 | ||
| 5935 | err = kvm_rdpmc(vcpu); | 5935 | err = kvm_rdpmc(vcpu); |
| 5936 | kvm_complete_insn_gp(vcpu, err); | 5936 | return kvm_complete_insn_gp(vcpu, err); |
| 5937 | |||
| 5938 | return 1; | ||
| 5939 | } | 5937 | } |
| 5940 | 5938 | ||
| 5941 | static int handle_wbinvd(struct kvm_vcpu *vcpu) | 5939 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
| 5942 | { | 5940 | { |
| 5943 | kvm_emulate_wbinvd(vcpu); | 5941 | return kvm_emulate_wbinvd(vcpu); |
| 5944 | return 1; | ||
| 5945 | } | 5942 | } |
| 5946 | 5943 | ||
| 5947 | static int handle_xsetbv(struct kvm_vcpu *vcpu) | 5944 | static int handle_xsetbv(struct kvm_vcpu *vcpu) |
| @@ -5950,20 +5947,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) | |||
| 5950 | u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); | 5947 | u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); |
| 5951 | 5948 | ||
| 5952 | if (kvm_set_xcr(vcpu, index, new_bv) == 0) | 5949 | if (kvm_set_xcr(vcpu, index, new_bv) == 0) |
| 5953 | skip_emulated_instruction(vcpu); | 5950 | return kvm_skip_emulated_instruction(vcpu); |
| 5954 | return 1; | 5951 | return 1; |
| 5955 | } | 5952 | } |
| 5956 | 5953 | ||
| 5957 | static int handle_xsaves(struct kvm_vcpu *vcpu) | 5954 | static int handle_xsaves(struct kvm_vcpu *vcpu) |
| 5958 | { | 5955 | { |
| 5959 | skip_emulated_instruction(vcpu); | 5956 | kvm_skip_emulated_instruction(vcpu); |
| 5960 | WARN(1, "this should never happen\n"); | 5957 | WARN(1, "this should never happen\n"); |
| 5961 | return 1; | 5958 | return 1; |
| 5962 | } | 5959 | } |
| 5963 | 5960 | ||
| 5964 | static int handle_xrstors(struct kvm_vcpu *vcpu) | 5961 | static int handle_xrstors(struct kvm_vcpu *vcpu) |
| 5965 | { | 5962 | { |
| 5966 | skip_emulated_instruction(vcpu); | 5963 | kvm_skip_emulated_instruction(vcpu); |
| 5967 | WARN(1, "this should never happen\n"); | 5964 | WARN(1, "this should never happen\n"); |
| 5968 | return 1; | 5965 | return 1; |
| 5969 | } | 5966 | } |
| @@ -5984,8 +5981,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) | |||
| 5984 | if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && | 5981 | if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && |
| 5985 | (offset == APIC_EOI)) { | 5982 | (offset == APIC_EOI)) { |
| 5986 | kvm_lapic_set_eoi(vcpu); | 5983 | kvm_lapic_set_eoi(vcpu); |
| 5987 | skip_emulated_instruction(vcpu); | 5984 | return kvm_skip_emulated_instruction(vcpu); |
| 5988 | return 1; | ||
| 5989 | } | 5985 | } |
| 5990 | } | 5986 | } |
| 5991 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; | 5987 | return emulate_instruction(vcpu, 0) == EMULATE_DONE; |
| @@ -6134,8 +6130,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) | |||
| 6134 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | 6130 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
| 6135 | if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { | 6131 | if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { |
| 6136 | trace_kvm_fast_mmio(gpa); | 6132 | trace_kvm_fast_mmio(gpa); |
| 6137 | skip_emulated_instruction(vcpu); | 6133 | return kvm_skip_emulated_instruction(vcpu); |
| 6138 | return 1; | ||
| 6139 | } | 6134 | } |
| 6140 | 6135 | ||
| 6141 | ret = handle_mmio_page_fault(vcpu, gpa, true); | 6136 | ret = handle_mmio_page_fault(vcpu, gpa, true); |
| @@ -6508,15 +6503,12 @@ static int handle_pause(struct kvm_vcpu *vcpu) | |||
| 6508 | grow_ple_window(vcpu); | 6503 | grow_ple_window(vcpu); |
| 6509 | 6504 | ||
| 6510 | kvm_vcpu_on_spin(vcpu); | 6505 | kvm_vcpu_on_spin(vcpu); |
| 6511 | skip_emulated_instruction(vcpu); | 6506 | return kvm_skip_emulated_instruction(vcpu); |
| 6512 | |||
| 6513 | return 1; | ||
| 6514 | } | 6507 | } |
| 6515 | 6508 | ||
| 6516 | static int handle_nop(struct kvm_vcpu *vcpu) | 6509 | static int handle_nop(struct kvm_vcpu *vcpu) |
| 6517 | { | 6510 | { |
| 6518 | skip_emulated_instruction(vcpu); | 6511 | return kvm_skip_emulated_instruction(vcpu); |
| 6519 | return 1; | ||
| 6520 | } | 6512 | } |
| 6521 | 6513 | ||
| 6522 | static int handle_mwait(struct kvm_vcpu *vcpu) | 6514 | static int handle_mwait(struct kvm_vcpu *vcpu) |
| @@ -6823,8 +6815,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
| 6823 | */ | 6815 | */ |
| 6824 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | 6816 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
| 6825 | nested_vmx_failInvalid(vcpu); | 6817 | nested_vmx_failInvalid(vcpu); |
| 6826 | skip_emulated_instruction(vcpu); | 6818 | return kvm_skip_emulated_instruction(vcpu); |
| 6827 | return 1; | ||
| 6828 | } | 6819 | } |
| 6829 | 6820 | ||
| 6830 | page = nested_get_page(vcpu, vmptr); | 6821 | page = nested_get_page(vcpu, vmptr); |
| @@ -6832,8 +6823,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
| 6832 | *(u32 *)kmap(page) != VMCS12_REVISION) { | 6823 | *(u32 *)kmap(page) != VMCS12_REVISION) { |
| 6833 | nested_vmx_failInvalid(vcpu); | 6824 | nested_vmx_failInvalid(vcpu); |
| 6834 | kunmap(page); | 6825 | kunmap(page); |
| 6835 | skip_emulated_instruction(vcpu); | 6826 | return kvm_skip_emulated_instruction(vcpu); |
| 6836 | return 1; | ||
| 6837 | } | 6827 | } |
| 6838 | kunmap(page); | 6828 | kunmap(page); |
| 6839 | vmx->nested.vmxon_ptr = vmptr; | 6829 | vmx->nested.vmxon_ptr = vmptr; |
| @@ -6842,30 +6832,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
| 6842 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | 6832 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
| 6843 | nested_vmx_failValid(vcpu, | 6833 | nested_vmx_failValid(vcpu, |
| 6844 | VMXERR_VMCLEAR_INVALID_ADDRESS); | 6834 | VMXERR_VMCLEAR_INVALID_ADDRESS); |
| 6845 | skip_emulated_instruction(vcpu); | 6835 | return kvm_skip_emulated_instruction(vcpu); |
| 6846 | return 1; | ||
| 6847 | } | 6836 | } |
| 6848 | 6837 | ||
| 6849 | if (vmptr == vmx->nested.vmxon_ptr) { | 6838 | if (vmptr == vmx->nested.vmxon_ptr) { |
| 6850 | nested_vmx_failValid(vcpu, | 6839 | nested_vmx_failValid(vcpu, |
| 6851 | VMXERR_VMCLEAR_VMXON_POINTER); | 6840 | VMXERR_VMCLEAR_VMXON_POINTER); |
| 6852 | skip_emulated_instruction(vcpu); | 6841 | return kvm_skip_emulated_instruction(vcpu); |
| 6853 | return 1; | ||
| 6854 | } | 6842 | } |
| 6855 | break; | 6843 | break; |
| 6856 | case EXIT_REASON_VMPTRLD: | 6844 | case EXIT_REASON_VMPTRLD: |
| 6857 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | 6845 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
| 6858 | nested_vmx_failValid(vcpu, | 6846 | nested_vmx_failValid(vcpu, |
| 6859 | VMXERR_VMPTRLD_INVALID_ADDRESS); | 6847 | VMXERR_VMPTRLD_INVALID_ADDRESS); |
| 6860 | skip_emulated_instruction(vcpu); | 6848 | return kvm_skip_emulated_instruction(vcpu); |
| 6861 | return 1; | ||
| 6862 | } | 6849 | } |
| 6863 | 6850 | ||
| 6864 | if (vmptr == vmx->nested.vmxon_ptr) { | 6851 | if (vmptr == vmx->nested.vmxon_ptr) { |
| 6865 | nested_vmx_failValid(vcpu, | 6852 | nested_vmx_failValid(vcpu, |
| 6866 | VMXERR_VMCLEAR_VMXON_POINTER); | 6853 | VMXERR_VMCLEAR_VMXON_POINTER); |
| 6867 | skip_emulated_instruction(vcpu); | 6854 | return kvm_skip_emulated_instruction(vcpu); |
| 6868 | return 1; | ||
| 6869 | } | 6855 | } |
| 6870 | break; | 6856 | break; |
| 6871 | default: | 6857 | default: |
| @@ -6921,8 +6907,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
| 6921 | 6907 | ||
| 6922 | if (vmx->nested.vmxon) { | 6908 | if (vmx->nested.vmxon) { |
| 6923 | nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); | 6909 | nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); |
| 6924 | skip_emulated_instruction(vcpu); | 6910 | return kvm_skip_emulated_instruction(vcpu); |
| 6925 | return 1; | ||
| 6926 | } | 6911 | } |
| 6927 | 6912 | ||
| 6928 | if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) | 6913 | if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) |
| @@ -6963,8 +6948,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
| 6963 | vmx->nested.vmxon = true; | 6948 | vmx->nested.vmxon = true; |
| 6964 | 6949 | ||
| 6965 | nested_vmx_succeed(vcpu); | 6950 | nested_vmx_succeed(vcpu); |
| 6966 | skip_emulated_instruction(vcpu); | 6951 | return kvm_skip_emulated_instruction(vcpu); |
| 6967 | return 1; | ||
| 6968 | 6952 | ||
| 6969 | out_shadow_vmcs: | 6953 | out_shadow_vmcs: |
| 6970 | kfree(vmx->nested.cached_vmcs12); | 6954 | kfree(vmx->nested.cached_vmcs12); |
| @@ -7084,8 +7068,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) | |||
| 7084 | return 1; | 7068 | return 1; |
| 7085 | free_nested(to_vmx(vcpu)); | 7069 | free_nested(to_vmx(vcpu)); |
| 7086 | nested_vmx_succeed(vcpu); | 7070 | nested_vmx_succeed(vcpu); |
| 7087 | skip_emulated_instruction(vcpu); | 7071 | return kvm_skip_emulated_instruction(vcpu); |
| 7088 | return 1; | ||
| 7089 | } | 7072 | } |
| 7090 | 7073 | ||
| 7091 | /* Emulate the VMCLEAR instruction */ | 7074 | /* Emulate the VMCLEAR instruction */ |
| @@ -7125,8 +7108,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
| 7125 | nested_free_vmcs02(vmx, vmptr); | 7108 | nested_free_vmcs02(vmx, vmptr); |
| 7126 | 7109 | ||
| 7127 | nested_vmx_succeed(vcpu); | 7110 | nested_vmx_succeed(vcpu); |
| 7128 | skip_emulated_instruction(vcpu); | 7111 | return kvm_skip_emulated_instruction(vcpu); |
| 7129 | return 1; | ||
| 7130 | } | 7112 | } |
| 7131 | 7113 | ||
| 7132 | static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); | 7114 | static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); |
| @@ -7340,18 +7322,15 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
| 7340 | if (!nested_vmx_check_permission(vcpu)) | 7322 | if (!nested_vmx_check_permission(vcpu)) |
| 7341 | return 1; | 7323 | return 1; |
| 7342 | 7324 | ||
| 7343 | if (!nested_vmx_check_vmcs12(vcpu)) { | 7325 | if (!nested_vmx_check_vmcs12(vcpu)) |
| 7344 | skip_emulated_instruction(vcpu); | 7326 | return kvm_skip_emulated_instruction(vcpu); |
| 7345 | return 1; | ||
| 7346 | } | ||
| 7347 | 7327 | ||
| 7348 | /* Decode instruction info and find the field to read */ | 7328 | /* Decode instruction info and find the field to read */ |
| 7349 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 7329 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
| 7350 | /* Read the field, zero-extended to a u64 field_value */ | 7330 | /* Read the field, zero-extended to a u64 field_value */ |
| 7351 | if (vmcs12_read_any(vcpu, field, &field_value) < 0) { | 7331 | if (vmcs12_read_any(vcpu, field, &field_value) < 0) { |
| 7352 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); | 7332 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); |
| 7353 | skip_emulated_instruction(vcpu); | 7333 | return kvm_skip_emulated_instruction(vcpu); |
| 7354 | return 1; | ||
| 7355 | } | 7334 | } |
| 7356 | /* | 7335 | /* |
| 7357 | * Now copy part of this value to register or memory, as requested. | 7336 | * Now copy part of this value to register or memory, as requested. |
| @@ -7371,8 +7350,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
| 7371 | } | 7350 | } |
| 7372 | 7351 | ||
| 7373 | nested_vmx_succeed(vcpu); | 7352 | nested_vmx_succeed(vcpu); |
| 7374 | skip_emulated_instruction(vcpu); | 7353 | return kvm_skip_emulated_instruction(vcpu); |
| 7375 | return 1; | ||
| 7376 | } | 7354 | } |
| 7377 | 7355 | ||
| 7378 | 7356 | ||
| @@ -7394,10 +7372,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) | |||
| 7394 | if (!nested_vmx_check_permission(vcpu)) | 7372 | if (!nested_vmx_check_permission(vcpu)) |
| 7395 | return 1; | 7373 | return 1; |
| 7396 | 7374 | ||
| 7397 | if (!nested_vmx_check_vmcs12(vcpu)) { | 7375 | if (!nested_vmx_check_vmcs12(vcpu)) |
| 7398 | skip_emulated_instruction(vcpu); | 7376 | return kvm_skip_emulated_instruction(vcpu); |
| 7399 | return 1; | ||
| 7400 | } | ||
| 7401 | 7377 | ||
| 7402 | if (vmx_instruction_info & (1u << 10)) | 7378 | if (vmx_instruction_info & (1u << 10)) |
| 7403 | field_value = kvm_register_readl(vcpu, | 7379 | field_value = kvm_register_readl(vcpu, |
| @@ -7418,19 +7394,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) | |||
| 7418 | if (vmcs_field_readonly(field)) { | 7394 | if (vmcs_field_readonly(field)) { |
| 7419 | nested_vmx_failValid(vcpu, | 7395 | nested_vmx_failValid(vcpu, |
| 7420 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); | 7396 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); |
| 7421 | skip_emulated_instruction(vcpu); | 7397 | return kvm_skip_emulated_instruction(vcpu); |
| 7422 | return 1; | ||
| 7423 | } | 7398 | } |
| 7424 | 7399 | ||
| 7425 | if (vmcs12_write_any(vcpu, field, field_value) < 0) { | 7400 | if (vmcs12_write_any(vcpu, field, field_value) < 0) { |
| 7426 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); | 7401 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); |
| 7427 | skip_emulated_instruction(vcpu); | 7402 | return kvm_skip_emulated_instruction(vcpu); |
| 7428 | return 1; | ||
| 7429 | } | 7403 | } |
| 7430 | 7404 | ||
| 7431 | nested_vmx_succeed(vcpu); | 7405 | nested_vmx_succeed(vcpu); |
| 7432 | skip_emulated_instruction(vcpu); | 7406 | return kvm_skip_emulated_instruction(vcpu); |
| 7433 | return 1; | ||
| 7434 | } | 7407 | } |
| 7435 | 7408 | ||
| 7436 | /* Emulate the VMPTRLD instruction */ | 7409 | /* Emulate the VMPTRLD instruction */ |
| @@ -7451,8 +7424,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
| 7451 | page = nested_get_page(vcpu, vmptr); | 7424 | page = nested_get_page(vcpu, vmptr); |
| 7452 | if (page == NULL) { | 7425 | if (page == NULL) { |
| 7453 | nested_vmx_failInvalid(vcpu); | 7426 | nested_vmx_failInvalid(vcpu); |
| 7454 | skip_emulated_instruction(vcpu); | 7427 | return kvm_skip_emulated_instruction(vcpu); |
| 7455 | return 1; | ||
| 7456 | } | 7428 | } |
| 7457 | new_vmcs12 = kmap(page); | 7429 | new_vmcs12 = kmap(page); |
| 7458 | if (new_vmcs12->revision_id != VMCS12_REVISION) { | 7430 | if (new_vmcs12->revision_id != VMCS12_REVISION) { |
| @@ -7460,8 +7432,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
| 7460 | nested_release_page_clean(page); | 7432 | nested_release_page_clean(page); |
| 7461 | nested_vmx_failValid(vcpu, | 7433 | nested_vmx_failValid(vcpu, |
| 7462 | VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); | 7434 | VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); |
| 7463 | skip_emulated_instruction(vcpu); | 7435 | return kvm_skip_emulated_instruction(vcpu); |
| 7464 | return 1; | ||
| 7465 | } | 7436 | } |
| 7466 | 7437 | ||
| 7467 | nested_release_vmcs12(vmx); | 7438 | nested_release_vmcs12(vmx); |
| @@ -7485,8 +7456,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
| 7485 | } | 7456 | } |
| 7486 | 7457 | ||
| 7487 | nested_vmx_succeed(vcpu); | 7458 | nested_vmx_succeed(vcpu); |
| 7488 | skip_emulated_instruction(vcpu); | 7459 | return kvm_skip_emulated_instruction(vcpu); |
| 7489 | return 1; | ||
| 7490 | } | 7460 | } |
| 7491 | 7461 | ||
| 7492 | /* Emulate the VMPTRST instruction */ | 7462 | /* Emulate the VMPTRST instruction */ |
| @@ -7511,8 +7481,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) | |||
| 7511 | return 1; | 7481 | return 1; |
| 7512 | } | 7482 | } |
| 7513 | nested_vmx_succeed(vcpu); | 7483 | nested_vmx_succeed(vcpu); |
| 7514 | skip_emulated_instruction(vcpu); | 7484 | return kvm_skip_emulated_instruction(vcpu); |
| 7515 | return 1; | ||
| 7516 | } | 7485 | } |
| 7517 | 7486 | ||
| 7518 | /* Emulate the INVEPT instruction */ | 7487 | /* Emulate the INVEPT instruction */ |
| @@ -7550,8 +7519,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) | |||
| 7550 | if (type >= 32 || !(types & (1 << type))) { | 7519 | if (type >= 32 || !(types & (1 << type))) { |
| 7551 | nested_vmx_failValid(vcpu, | 7520 | nested_vmx_failValid(vcpu, |
| 7552 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | 7521 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); |
| 7553 | skip_emulated_instruction(vcpu); | 7522 | return kvm_skip_emulated_instruction(vcpu); |
| 7554 | return 1; | ||
| 7555 | } | 7523 | } |
| 7556 | 7524 | ||
| 7557 | /* According to the Intel VMX instruction reference, the memory | 7525 | /* According to the Intel VMX instruction reference, the memory |
| @@ -7582,8 +7550,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) | |||
| 7582 | break; | 7550 | break; |
| 7583 | } | 7551 | } |
| 7584 | 7552 | ||
| 7585 | skip_emulated_instruction(vcpu); | 7553 | return kvm_skip_emulated_instruction(vcpu); |
| 7586 | return 1; | ||
| 7587 | } | 7554 | } |
| 7588 | 7555 | ||
| 7589 | static int handle_invvpid(struct kvm_vcpu *vcpu) | 7556 | static int handle_invvpid(struct kvm_vcpu *vcpu) |
| @@ -7614,8 +7581,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | |||
| 7614 | if (type >= 32 || !(types & (1 << type))) { | 7581 | if (type >= 32 || !(types & (1 << type))) { |
| 7615 | nested_vmx_failValid(vcpu, | 7582 | nested_vmx_failValid(vcpu, |
| 7616 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | 7583 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); |
| 7617 | skip_emulated_instruction(vcpu); | 7584 | return kvm_skip_emulated_instruction(vcpu); |
| 7618 | return 1; | ||
| 7619 | } | 7585 | } |
| 7620 | 7586 | ||
| 7621 | /* according to the intel vmx instruction reference, the memory | 7587 | /* according to the intel vmx instruction reference, the memory |
| @@ -7637,23 +7603,20 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | |||
| 7637 | if (!vpid) { | 7603 | if (!vpid) { |
| 7638 | nested_vmx_failValid(vcpu, | 7604 | nested_vmx_failValid(vcpu, |
| 7639 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | 7605 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); |
| 7640 | skip_emulated_instruction(vcpu); | 7606 | return kvm_skip_emulated_instruction(vcpu); |
| 7641 | return 1; | ||
| 7642 | } | 7607 | } |
| 7643 | break; | 7608 | break; |
| 7644 | case VMX_VPID_EXTENT_ALL_CONTEXT: | 7609 | case VMX_VPID_EXTENT_ALL_CONTEXT: |
| 7645 | break; | 7610 | break; |
| 7646 | default: | 7611 | default: |
| 7647 | WARN_ON_ONCE(1); | 7612 | WARN_ON_ONCE(1); |
| 7648 | skip_emulated_instruction(vcpu); | 7613 | return kvm_skip_emulated_instruction(vcpu); |
| 7649 | return 1; | ||
| 7650 | } | 7614 | } |
| 7651 | 7615 | ||
| 7652 | __vmx_flush_tlb(vcpu, vmx->nested.vpid02); | 7616 | __vmx_flush_tlb(vcpu, vmx->nested.vpid02); |
| 7653 | nested_vmx_succeed(vcpu); | 7617 | nested_vmx_succeed(vcpu); |
| 7654 | 7618 | ||
| 7655 | skip_emulated_instruction(vcpu); | 7619 | return kvm_skip_emulated_instruction(vcpu); |
| 7656 | return 1; | ||
| 7657 | } | 7620 | } |
| 7658 | 7621 | ||
| 7659 | static int handle_pml_full(struct kvm_vcpu *vcpu) | 7622 | static int handle_pml_full(struct kvm_vcpu *vcpu) |
| @@ -10194,6 +10157,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
| 10194 | if (!vmcs02) | 10157 | if (!vmcs02) |
| 10195 | return -ENOMEM; | 10158 | return -ENOMEM; |
| 10196 | 10159 | ||
| 10160 | /* | ||
| 10161 | * After this point, the trap flag no longer triggers a singlestep trap | ||
| 10162 | * on the vm entry instructions. Don't call | ||
| 10163 | * kvm_skip_emulated_instruction. | ||
| 10164 | */ | ||
| 10197 | skip_emulated_instruction(vcpu); | 10165 | skip_emulated_instruction(vcpu); |
| 10198 | enter_guest_mode(vcpu); | 10166 | enter_guest_mode(vcpu); |
| 10199 | 10167 | ||
| @@ -10238,8 +10206,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
| 10238 | return 1; | 10206 | return 1; |
| 10239 | 10207 | ||
| 10240 | out: | 10208 | out: |
| 10241 | skip_emulated_instruction(vcpu); | 10209 | return kvm_skip_emulated_instruction(vcpu); |
| 10242 | return 1; | ||
| 10243 | } | 10210 | } |
| 10244 | 10211 | ||
| 10245 | /* | 10212 | /* |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ec59301f5192..7b38c5e6f412 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -425,12 +425,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) | |||
| 425 | } | 425 | } |
| 426 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); | 426 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); |
| 427 | 427 | ||
| 428 | void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) | 428 | int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) |
| 429 | { | 429 | { |
| 430 | if (err) | 430 | if (err) |
| 431 | kvm_inject_gp(vcpu, 0); | 431 | kvm_inject_gp(vcpu, 0); |
| 432 | else | 432 | else |
| 433 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 433 | return kvm_skip_emulated_instruction(vcpu); |
| 434 | |||
| 435 | return 1; | ||
| 434 | } | 436 | } |
| 435 | EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); | 437 | EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); |
| 436 | 438 | ||
| @@ -4813,8 +4815,8 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) | |||
| 4813 | 4815 | ||
| 4814 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | 4816 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) |
| 4815 | { | 4817 | { |
| 4816 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 4818 | kvm_emulate_wbinvd_noskip(vcpu); |
| 4817 | return kvm_emulate_wbinvd_noskip(vcpu); | 4819 | return kvm_skip_emulated_instruction(vcpu); |
| 4818 | } | 4820 | } |
| 4819 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); | 4821 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); |
| 4820 | 4822 | ||
| @@ -5430,6 +5432,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag | |||
| 5430 | } | 5432 | } |
| 5431 | } | 5433 | } |
| 5432 | 5434 | ||
| 5435 | int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) | ||
| 5436 | { | ||
| 5437 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
| 5438 | int r = EMULATE_DONE; | ||
| 5439 | |||
| 5440 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
| 5441 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); | ||
| 5442 | return r == EMULATE_DONE; | ||
| 5443 | } | ||
| 5444 | EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); | ||
| 5445 | |||
| 5433 | static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | 5446 | static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) |
| 5434 | { | 5447 | { |
| 5435 | if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && | 5448 | if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && |
| @@ -6007,8 +6020,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); | |||
| 6007 | 6020 | ||
| 6008 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 6021 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) |
| 6009 | { | 6022 | { |
| 6010 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 6023 | int ret = kvm_skip_emulated_instruction(vcpu); |
| 6011 | return kvm_vcpu_halt(vcpu); | 6024 | /* |
| 6025 | * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered | ||
| 6026 | * KVM_EXIT_DEBUG here. | ||
| 6027 | */ | ||
| 6028 | return kvm_vcpu_halt(vcpu) && ret; | ||
| 6012 | } | 6029 | } |
| 6013 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); | 6030 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); |
| 6014 | 6031 | ||
| @@ -6039,9 +6056,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) | |||
| 6039 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 6056 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
| 6040 | { | 6057 | { |
| 6041 | unsigned long nr, a0, a1, a2, a3, ret; | 6058 | unsigned long nr, a0, a1, a2, a3, ret; |
| 6042 | int op_64_bit, r = 1; | 6059 | int op_64_bit, r; |
| 6043 | 6060 | ||
| 6044 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 6061 | r = kvm_skip_emulated_instruction(vcpu); |
| 6045 | 6062 | ||
| 6046 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 6063 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
| 6047 | return kvm_hv_hypercall(vcpu); | 6064 | return kvm_hv_hypercall(vcpu); |
