diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2009-10-05 07:07:21 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:32:14 -0500 |
commit | 91586a3b7d79432772a3cdcb81473cd08a237c79 (patch) | |
tree | 3d5955b02c81485f0fe446649e969251598972c6 /arch/x86/kvm/x86.c | |
parent | a68a6a7282373bedba8a2ed751b6384edb983a64 (diff) |
KVM: x86: Rework guest single-step flag injection and filtering
Push TF and RF injection and filtering on guest single-stepping into the
vender get/set_rflags callbacks. This makes the whole mechanism more
robust wrt user space IOCTL order and instruction emulations.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 77 |
1 files changed, 45 insertions, 32 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4693f915f3bd..385cd0a1e23d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -235,6 +235,25 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) | |||
235 | } | 235 | } |
236 | EXPORT_SYMBOL_GPL(kvm_require_cpl); | 236 | EXPORT_SYMBOL_GPL(kvm_require_cpl); |
237 | 237 | ||
238 | unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) | ||
239 | { | ||
240 | unsigned long rflags; | ||
241 | |||
242 | rflags = kvm_x86_ops->get_rflags(vcpu); | ||
243 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
244 | rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
245 | return rflags; | ||
246 | } | ||
247 | EXPORT_SYMBOL_GPL(kvm_get_rflags); | ||
248 | |||
249 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
250 | { | ||
251 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
252 | rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
253 | kvm_x86_ops->set_rflags(vcpu, rflags); | ||
254 | } | ||
255 | EXPORT_SYMBOL_GPL(kvm_set_rflags); | ||
256 | |||
238 | /* | 257 | /* |
239 | * Load the pae pdptrs. Return true is they are all valid. | 258 | * Load the pae pdptrs. Return true is they are all valid. |
240 | */ | 259 | */ |
@@ -2777,7 +2796,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
2777 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 2796 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
2778 | 2797 | ||
2779 | vcpu->arch.emulate_ctxt.vcpu = vcpu; | 2798 | vcpu->arch.emulate_ctxt.vcpu = vcpu; |
2780 | vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); | 2799 | vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu); |
2781 | vcpu->arch.emulate_ctxt.mode = | 2800 | vcpu->arch.emulate_ctxt.mode = |
2782 | (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) | 2801 | (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) |
2783 | ? X86EMUL_MODE_REAL : cs_l | 2802 | ? X86EMUL_MODE_REAL : cs_l |
@@ -2855,7 +2874,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
2855 | return EMULATE_DO_MMIO; | 2874 | return EMULATE_DO_MMIO; |
2856 | } | 2875 | } |
2857 | 2876 | ||
2858 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); | 2877 | kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); |
2859 | 2878 | ||
2860 | if (vcpu->mmio_is_write) { | 2879 | if (vcpu->mmio_is_write) { |
2861 | vcpu->mmio_needed = 0; | 2880 | vcpu->mmio_needed = 0; |
@@ -3291,7 +3310,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |||
3291 | unsigned long *rflags) | 3310 | unsigned long *rflags) |
3292 | { | 3311 | { |
3293 | kvm_lmsw(vcpu, msw); | 3312 | kvm_lmsw(vcpu, msw); |
3294 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 3313 | *rflags = kvm_get_rflags(vcpu); |
3295 | } | 3314 | } |
3296 | 3315 | ||
3297 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 3316 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
@@ -3329,7 +3348,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
3329 | switch (cr) { | 3348 | switch (cr) { |
3330 | case 0: | 3349 | case 0: |
3331 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); | 3350 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); |
3332 | *rflags = kvm_x86_ops->get_rflags(vcpu); | 3351 | *rflags = kvm_get_rflags(vcpu); |
3333 | break; | 3352 | break; |
3334 | case 2: | 3353 | case 2: |
3335 | vcpu->arch.cr2 = val; | 3354 | vcpu->arch.cr2 = val; |
@@ -3460,7 +3479,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) | |||
3460 | { | 3479 | { |
3461 | struct kvm_run *kvm_run = vcpu->run; | 3480 | struct kvm_run *kvm_run = vcpu->run; |
3462 | 3481 | ||
3463 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | 3482 | kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; |
3464 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 3483 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
3465 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 3484 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
3466 | if (irqchip_in_kernel(vcpu->kvm)) | 3485 | if (irqchip_in_kernel(vcpu->kvm)) |
@@ -3840,13 +3859,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3840 | #endif | 3859 | #endif |
3841 | 3860 | ||
3842 | regs->rip = kvm_rip_read(vcpu); | 3861 | regs->rip = kvm_rip_read(vcpu); |
3843 | regs->rflags = kvm_x86_ops->get_rflags(vcpu); | 3862 | regs->rflags = kvm_get_rflags(vcpu); |
3844 | |||
3845 | /* | ||
3846 | * Don't leak debug flags in case they were set for guest debugging | ||
3847 | */ | ||
3848 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
3849 | regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
3850 | 3863 | ||
3851 | vcpu_put(vcpu); | 3864 | vcpu_put(vcpu); |
3852 | 3865 | ||
@@ -3874,12 +3887,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3874 | kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); | 3887 | kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); |
3875 | kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); | 3888 | kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); |
3876 | kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); | 3889 | kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); |
3877 | |||
3878 | #endif | 3890 | #endif |
3879 | 3891 | ||
3880 | kvm_rip_write(vcpu, regs->rip); | 3892 | kvm_rip_write(vcpu, regs->rip); |
3881 | kvm_x86_ops->set_rflags(vcpu, regs->rflags); | 3893 | kvm_set_rflags(vcpu, regs->rflags); |
3882 | |||
3883 | 3894 | ||
3884 | vcpu->arch.exception.pending = false; | 3895 | vcpu->arch.exception.pending = false; |
3885 | 3896 | ||
@@ -4098,7 +4109,7 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) | |||
4098 | { | 4109 | { |
4099 | return (seg != VCPU_SREG_LDTR) && | 4110 | return (seg != VCPU_SREG_LDTR) && |
4100 | (seg != VCPU_SREG_TR) && | 4111 | (seg != VCPU_SREG_TR) && |
4101 | (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM); | 4112 | (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); |
4102 | } | 4113 | } |
4103 | 4114 | ||
4104 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 4115 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, |
@@ -4126,7 +4137,7 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu, | |||
4126 | { | 4137 | { |
4127 | tss->cr3 = vcpu->arch.cr3; | 4138 | tss->cr3 = vcpu->arch.cr3; |
4128 | tss->eip = kvm_rip_read(vcpu); | 4139 | tss->eip = kvm_rip_read(vcpu); |
4129 | tss->eflags = kvm_x86_ops->get_rflags(vcpu); | 4140 | tss->eflags = kvm_get_rflags(vcpu); |
4130 | tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); | 4141 | tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
4131 | tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); | 4142 | tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); |
4132 | tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); | 4143 | tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); |
@@ -4150,7 +4161,7 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu, | |||
4150 | kvm_set_cr3(vcpu, tss->cr3); | 4161 | kvm_set_cr3(vcpu, tss->cr3); |
4151 | 4162 | ||
4152 | kvm_rip_write(vcpu, tss->eip); | 4163 | kvm_rip_write(vcpu, tss->eip); |
4153 | kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2); | 4164 | kvm_set_rflags(vcpu, tss->eflags | 2); |
4154 | 4165 | ||
4155 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); | 4166 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); |
4156 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); | 4167 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); |
@@ -4188,7 +4199,7 @@ static void save_state_to_tss16(struct kvm_vcpu *vcpu, | |||
4188 | struct tss_segment_16 *tss) | 4199 | struct tss_segment_16 *tss) |
4189 | { | 4200 | { |
4190 | tss->ip = kvm_rip_read(vcpu); | 4201 | tss->ip = kvm_rip_read(vcpu); |
4191 | tss->flag = kvm_x86_ops->get_rflags(vcpu); | 4202 | tss->flag = kvm_get_rflags(vcpu); |
4192 | tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); | 4203 | tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
4193 | tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); | 4204 | tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); |
4194 | tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); | 4205 | tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); |
@@ -4209,7 +4220,7 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, | |||
4209 | struct tss_segment_16 *tss) | 4220 | struct tss_segment_16 *tss) |
4210 | { | 4221 | { |
4211 | kvm_rip_write(vcpu, tss->ip); | 4222 | kvm_rip_write(vcpu, tss->ip); |
4212 | kvm_x86_ops->set_rflags(vcpu, tss->flag | 2); | 4223 | kvm_set_rflags(vcpu, tss->flag | 2); |
4213 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); | 4224 | kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); |
4214 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); | 4225 | kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); |
4215 | kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); | 4226 | kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); |
@@ -4355,8 +4366,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4355 | } | 4366 | } |
4356 | 4367 | ||
4357 | if (reason == TASK_SWITCH_IRET) { | 4368 | if (reason == TASK_SWITCH_IRET) { |
4358 | u32 eflags = kvm_x86_ops->get_rflags(vcpu); | 4369 | u32 eflags = kvm_get_rflags(vcpu); |
4359 | kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT); | 4370 | kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT); |
4360 | } | 4371 | } |
4361 | 4372 | ||
4362 | /* set back link to prev task only if NT bit is set in eflags | 4373 | /* set back link to prev task only if NT bit is set in eflags |
@@ -4377,8 +4388,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
4377 | old_tss_base, &nseg_desc); | 4388 | old_tss_base, &nseg_desc); |
4378 | 4389 | ||
4379 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { | 4390 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { |
4380 | u32 eflags = kvm_x86_ops->get_rflags(vcpu); | 4391 | u32 eflags = kvm_get_rflags(vcpu); |
4381 | kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT); | 4392 | kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT); |
4382 | } | 4393 | } |
4383 | 4394 | ||
4384 | if (reason != TASK_SWITCH_IRET) { | 4395 | if (reason != TASK_SWITCH_IRET) { |
@@ -4473,12 +4484,15 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
4473 | struct kvm_guest_debug *dbg) | 4484 | struct kvm_guest_debug *dbg) |
4474 | { | 4485 | { |
4475 | unsigned long rflags; | 4486 | unsigned long rflags; |
4476 | int old_debug; | ||
4477 | int i; | 4487 | int i; |
4478 | 4488 | ||
4479 | vcpu_load(vcpu); | 4489 | vcpu_load(vcpu); |
4480 | 4490 | ||
4481 | old_debug = vcpu->guest_debug; | 4491 | /* |
4492 | * Read rflags as long as potentially injected trace flags are still | ||
4493 | * filtered out. | ||
4494 | */ | ||
4495 | rflags = kvm_get_rflags(vcpu); | ||
4482 | 4496 | ||
4483 | vcpu->guest_debug = dbg->control; | 4497 | vcpu->guest_debug = dbg->control; |
4484 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) | 4498 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) |
@@ -4495,12 +4509,11 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
4495 | vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); | 4509 | vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); |
4496 | } | 4510 | } |
4497 | 4511 | ||
4498 | rflags = kvm_x86_ops->get_rflags(vcpu); | 4512 | /* |
4499 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 4513 | * Trigger an rflags update that will inject or remove the trace |
4500 | rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | 4514 | * flags. |
4501 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | 4515 | */ |
4502 | rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | 4516 | kvm_set_rflags(vcpu, rflags); |
4503 | kvm_x86_ops->set_rflags(vcpu, rflags); | ||
4504 | 4517 | ||
4505 | kvm_x86_ops->set_guest_debug(vcpu, dbg); | 4518 | kvm_x86_ops->set_guest_debug(vcpu, dbg); |
4506 | 4519 | ||