diff options
author | Ladi Prosek <lprosek@redhat.com> | 2017-06-22 03:05:26 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2017-07-12 16:38:27 -0400 |
commit | b742c1e6e79ddf4192d76336da2407c65ca7242f (patch) | |
tree | 1137bf9432e837fa13f9fe964e05a95d05340b04 | |
parent | fb5307298e49ec1668c3a9ec888c1b9da4347395 (diff) |
KVM: SVM: handle singlestep exception when skipping emulated instructions
kvm_skip_emulated_instruction handles the singlestep debug exception
which is something we almost always want. This commit (specifically
the change in rdmsr_interception) makes the debug.flat KVM unit test
pass on AMD.
Two call sites still call skip_emulated_instruction directly:
* In svm_queue_exception where it's used only for moving the rip forward
* In task_switch_interception which is analogous to handle_task_switch
in VMX
Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm.c | 59 |
1 files changed, 33 insertions, 26 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 905ea6052517..3da42d7c629e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2267,7 +2267,7 @@ static int io_interception(struct vcpu_svm *svm) | |||
2267 | { | 2267 | { |
2268 | struct kvm_vcpu *vcpu = &svm->vcpu; | 2268 | struct kvm_vcpu *vcpu = &svm->vcpu; |
2269 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ | 2269 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
2270 | int size, in, string; | 2270 | int size, in, string, ret; |
2271 | unsigned port; | 2271 | unsigned port; |
2272 | 2272 | ||
2273 | ++svm->vcpu.stat.io_exits; | 2273 | ++svm->vcpu.stat.io_exits; |
@@ -2279,10 +2279,16 @@ static int io_interception(struct vcpu_svm *svm) | |||
2279 | port = io_info >> 16; | 2279 | port = io_info >> 16; |
2280 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | 2280 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
2281 | svm->next_rip = svm->vmcb->control.exit_info_2; | 2281 | svm->next_rip = svm->vmcb->control.exit_info_2; |
2282 | skip_emulated_instruction(&svm->vcpu); | 2282 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
2283 | 2283 | ||
2284 | return in ? kvm_fast_pio_in(vcpu, size, port) | 2284 | /* |
2285 | : kvm_fast_pio_out(vcpu, size, port); | 2285 | * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered |
2286 | * KVM_EXIT_DEBUG here. | ||
2287 | */ | ||
2288 | if (in) | ||
2289 | return kvm_fast_pio_in(vcpu, size, port) && ret; | ||
2290 | else | ||
2291 | return kvm_fast_pio_out(vcpu, size, port) && ret; | ||
2286 | } | 2292 | } |
2287 | 2293 | ||
2288 | static int nmi_interception(struct vcpu_svm *svm) | 2294 | static int nmi_interception(struct vcpu_svm *svm) |
@@ -3055,6 +3061,7 @@ static int vmload_interception(struct vcpu_svm *svm) | |||
3055 | { | 3061 | { |
3056 | struct vmcb *nested_vmcb; | 3062 | struct vmcb *nested_vmcb; |
3057 | struct page *page; | 3063 | struct page *page; |
3064 | int ret; | ||
3058 | 3065 | ||
3059 | if (nested_svm_check_permissions(svm)) | 3066 | if (nested_svm_check_permissions(svm)) |
3060 | return 1; | 3067 | return 1; |
@@ -3064,18 +3071,19 @@ static int vmload_interception(struct vcpu_svm *svm) | |||
3064 | return 1; | 3071 | return 1; |
3065 | 3072 | ||
3066 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3073 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3067 | skip_emulated_instruction(&svm->vcpu); | 3074 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
3068 | 3075 | ||
3069 | nested_svm_vmloadsave(nested_vmcb, svm->vmcb); | 3076 | nested_svm_vmloadsave(nested_vmcb, svm->vmcb); |
3070 | nested_svm_unmap(page); | 3077 | nested_svm_unmap(page); |
3071 | 3078 | ||
3072 | return 1; | 3079 | return ret; |
3073 | } | 3080 | } |
3074 | 3081 | ||
3075 | static int vmsave_interception(struct vcpu_svm *svm) | 3082 | static int vmsave_interception(struct vcpu_svm *svm) |
3076 | { | 3083 | { |
3077 | struct vmcb *nested_vmcb; | 3084 | struct vmcb *nested_vmcb; |
3078 | struct page *page; | 3085 | struct page *page; |
3086 | int ret; | ||
3079 | 3087 | ||
3080 | if (nested_svm_check_permissions(svm)) | 3088 | if (nested_svm_check_permissions(svm)) |
3081 | return 1; | 3089 | return 1; |
@@ -3085,12 +3093,12 @@ static int vmsave_interception(struct vcpu_svm *svm) | |||
3085 | return 1; | 3093 | return 1; |
3086 | 3094 | ||
3087 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3095 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3088 | skip_emulated_instruction(&svm->vcpu); | 3096 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
3089 | 3097 | ||
3090 | nested_svm_vmloadsave(svm->vmcb, nested_vmcb); | 3098 | nested_svm_vmloadsave(svm->vmcb, nested_vmcb); |
3091 | nested_svm_unmap(page); | 3099 | nested_svm_unmap(page); |
3092 | 3100 | ||
3093 | return 1; | 3101 | return ret; |
3094 | } | 3102 | } |
3095 | 3103 | ||
3096 | static int vmrun_interception(struct vcpu_svm *svm) | 3104 | static int vmrun_interception(struct vcpu_svm *svm) |
@@ -3123,25 +3131,29 @@ failed: | |||
3123 | 3131 | ||
3124 | static int stgi_interception(struct vcpu_svm *svm) | 3132 | static int stgi_interception(struct vcpu_svm *svm) |
3125 | { | 3133 | { |
3134 | int ret; | ||
3135 | |||
3126 | if (nested_svm_check_permissions(svm)) | 3136 | if (nested_svm_check_permissions(svm)) |
3127 | return 1; | 3137 | return 1; |
3128 | 3138 | ||
3129 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3139 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3130 | skip_emulated_instruction(&svm->vcpu); | 3140 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
3131 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | 3141 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
3132 | 3142 | ||
3133 | enable_gif(svm); | 3143 | enable_gif(svm); |
3134 | 3144 | ||
3135 | return 1; | 3145 | return ret; |
3136 | } | 3146 | } |
3137 | 3147 | ||
3138 | static int clgi_interception(struct vcpu_svm *svm) | 3148 | static int clgi_interception(struct vcpu_svm *svm) |
3139 | { | 3149 | { |
3150 | int ret; | ||
3151 | |||
3140 | if (nested_svm_check_permissions(svm)) | 3152 | if (nested_svm_check_permissions(svm)) |
3141 | return 1; | 3153 | return 1; |
3142 | 3154 | ||
3143 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3155 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3144 | skip_emulated_instruction(&svm->vcpu); | 3156 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
3145 | 3157 | ||
3146 | disable_gif(svm); | 3158 | disable_gif(svm); |
3147 | 3159 | ||
@@ -3152,7 +3164,7 @@ static int clgi_interception(struct vcpu_svm *svm) | |||
3152 | mark_dirty(svm->vmcb, VMCB_INTR); | 3164 | mark_dirty(svm->vmcb, VMCB_INTR); |
3153 | } | 3165 | } |
3154 | 3166 | ||
3155 | return 1; | 3167 | return ret; |
3156 | } | 3168 | } |
3157 | 3169 | ||
3158 | static int invlpga_interception(struct vcpu_svm *svm) | 3170 | static int invlpga_interception(struct vcpu_svm *svm) |
@@ -3166,8 +3178,7 @@ static int invlpga_interception(struct vcpu_svm *svm) | |||
3166 | kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); | 3178 | kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); |
3167 | 3179 | ||
3168 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3180 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3169 | skip_emulated_instruction(&svm->vcpu); | 3181 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3170 | return 1; | ||
3171 | } | 3182 | } |
3172 | 3183 | ||
3173 | static int skinit_interception(struct vcpu_svm *svm) | 3184 | static int skinit_interception(struct vcpu_svm *svm) |
@@ -3190,7 +3201,7 @@ static int xsetbv_interception(struct vcpu_svm *svm) | |||
3190 | 3201 | ||
3191 | if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { | 3202 | if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { |
3192 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 3203 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
3193 | skip_emulated_instruction(&svm->vcpu); | 3204 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3194 | } | 3205 | } |
3195 | 3206 | ||
3196 | return 1; | 3207 | return 1; |
@@ -3286,8 +3297,7 @@ static int invlpg_interception(struct vcpu_svm *svm) | |||
3286 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3297 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
3287 | 3298 | ||
3288 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); | 3299 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); |
3289 | skip_emulated_instruction(&svm->vcpu); | 3300 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3290 | return 1; | ||
3291 | } | 3301 | } |
3292 | 3302 | ||
3293 | static int emulate_on_interception(struct vcpu_svm *svm) | 3303 | static int emulate_on_interception(struct vcpu_svm *svm) |
@@ -3437,9 +3447,7 @@ static int dr_interception(struct vcpu_svm *svm) | |||
3437 | kvm_register_write(&svm->vcpu, reg, val); | 3447 | kvm_register_write(&svm->vcpu, reg, val); |
3438 | } | 3448 | } |
3439 | 3449 | ||
3440 | skip_emulated_instruction(&svm->vcpu); | 3450 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3441 | |||
3442 | return 1; | ||
3443 | } | 3451 | } |
3444 | 3452 | ||
3445 | static int cr8_write_interception(struct vcpu_svm *svm) | 3453 | static int cr8_write_interception(struct vcpu_svm *svm) |
@@ -3562,6 +3570,7 @@ static int rdmsr_interception(struct vcpu_svm *svm) | |||
3562 | if (svm_get_msr(&svm->vcpu, &msr_info)) { | 3570 | if (svm_get_msr(&svm->vcpu, &msr_info)) { |
3563 | trace_kvm_msr_read_ex(ecx); | 3571 | trace_kvm_msr_read_ex(ecx); |
3564 | kvm_inject_gp(&svm->vcpu, 0); | 3572 | kvm_inject_gp(&svm->vcpu, 0); |
3573 | return 1; | ||
3565 | } else { | 3574 | } else { |
3566 | trace_kvm_msr_read(ecx, msr_info.data); | 3575 | trace_kvm_msr_read(ecx, msr_info.data); |
3567 | 3576 | ||
@@ -3570,9 +3579,8 @@ static int rdmsr_interception(struct vcpu_svm *svm) | |||
3570 | kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, | 3579 | kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, |
3571 | msr_info.data >> 32); | 3580 | msr_info.data >> 32); |
3572 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 3581 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
3573 | skip_emulated_instruction(&svm->vcpu); | 3582 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3574 | } | 3583 | } |
3575 | return 1; | ||
3576 | } | 3584 | } |
3577 | 3585 | ||
3578 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) | 3586 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
@@ -3698,11 +3706,11 @@ static int wrmsr_interception(struct vcpu_svm *svm) | |||
3698 | if (kvm_set_msr(&svm->vcpu, &msr)) { | 3706 | if (kvm_set_msr(&svm->vcpu, &msr)) { |
3699 | trace_kvm_msr_write_ex(ecx, data); | 3707 | trace_kvm_msr_write_ex(ecx, data); |
3700 | kvm_inject_gp(&svm->vcpu, 0); | 3708 | kvm_inject_gp(&svm->vcpu, 0); |
3709 | return 1; | ||
3701 | } else { | 3710 | } else { |
3702 | trace_kvm_msr_write(ecx, data); | 3711 | trace_kvm_msr_write(ecx, data); |
3703 | skip_emulated_instruction(&svm->vcpu); | 3712 | return kvm_skip_emulated_instruction(&svm->vcpu); |
3704 | } | 3713 | } |
3705 | return 1; | ||
3706 | } | 3714 | } |
3707 | 3715 | ||
3708 | static int msr_interception(struct vcpu_svm *svm) | 3716 | static int msr_interception(struct vcpu_svm *svm) |
@@ -3731,8 +3739,7 @@ static int pause_interception(struct vcpu_svm *svm) | |||
3731 | 3739 | ||
3732 | static int nop_interception(struct vcpu_svm *svm) | 3740 | static int nop_interception(struct vcpu_svm *svm) |
3733 | { | 3741 | { |
3734 | skip_emulated_instruction(&(svm->vcpu)); | 3742 | return kvm_skip_emulated_instruction(&(svm->vcpu)); |
3735 | return 1; | ||
3736 | } | 3743 | } |
3737 | 3744 | ||
3738 | static int monitor_interception(struct vcpu_svm *svm) | 3745 | static int monitor_interception(struct vcpu_svm *svm) |