diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-05-11 06:35:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:59 -0400 |
commit | 44c11430b52cbad0a467bc023a802d122dfd285c (patch) | |
tree | 1be73025a2019b6de60836cbaa56bf5c8cb6f9db /arch/x86/kvm/svm.c | |
parent | 6a8b1d13121f8226783987dc7ddd861ee2245410 (diff) |
KVM: inject NMI after IRET from a previous NMI, not before.
If NMI is received during handling of another NMI it should be injected
immediately after IRET from previous NMI handler, but SVM intercept IRET
before instruction execution so we can't inject pending NMI at this
point and there is not way to request exit when NMI window opens. This
patch fix SVM code to open NMI window after IRET by single stepping over
IRET instruction.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 377c4f17d170..71510e07e69e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -965,15 +965,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
965 | 965 | ||
966 | } | 966 | } |
967 | 967 | ||
968 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | 968 | static void update_db_intercept(struct kvm_vcpu *vcpu) |
969 | { | 969 | { |
970 | int old_debug = vcpu->guest_debug; | ||
971 | struct vcpu_svm *svm = to_svm(vcpu); | 970 | struct vcpu_svm *svm = to_svm(vcpu); |
972 | 971 | ||
973 | vcpu->guest_debug = dbg->control; | ||
974 | |||
975 | svm->vmcb->control.intercept_exceptions &= | 972 | svm->vmcb->control.intercept_exceptions &= |
976 | ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); | 973 | ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); |
974 | |||
975 | if (vcpu->arch.singlestep) | ||
976 | svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR); | ||
977 | |||
977 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { | 978 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
978 | if (vcpu->guest_debug & | 979 | if (vcpu->guest_debug & |
979 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | 980 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) |
@@ -984,6 +985,16 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | |||
984 | 1 << BP_VECTOR; | 985 | 1 << BP_VECTOR; |
985 | } else | 986 | } else |
986 | vcpu->guest_debug = 0; | 987 | vcpu->guest_debug = 0; |
988 | } | ||
989 | |||
990 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | ||
991 | { | ||
992 | int old_debug = vcpu->guest_debug; | ||
993 | struct vcpu_svm *svm = to_svm(vcpu); | ||
994 | |||
995 | vcpu->guest_debug = dbg->control; | ||
996 | |||
997 | update_db_intercept(vcpu); | ||
987 | 998 | ||
988 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | 999 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
989 | svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; | 1000 | svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; |
@@ -1133,14 +1144,30 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1133 | static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1144 | static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1134 | { | 1145 | { |
1135 | if (!(svm->vcpu.guest_debug & | 1146 | if (!(svm->vcpu.guest_debug & |
1136 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | 1147 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
1148 | !svm->vcpu.arch.singlestep) { | ||
1137 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); | 1149 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); |
1138 | return 1; | 1150 | return 1; |
1139 | } | 1151 | } |
1140 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 1152 | |
1141 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | 1153 | if (svm->vcpu.arch.singlestep) { |
1142 | kvm_run->debug.arch.exception = DB_VECTOR; | 1154 | svm->vcpu.arch.singlestep = false; |
1143 | return 0; | 1155 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) |
1156 | svm->vmcb->save.rflags &= | ||
1157 | ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
1158 | update_db_intercept(&svm->vcpu); | ||
1159 | } | ||
1160 | |||
1161 | if (svm->vcpu.guest_debug & | ||
1162 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){ | ||
1163 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
1164 | kvm_run->debug.arch.pc = | ||
1165 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; | ||
1166 | kvm_run->debug.arch.exception = DB_VECTOR; | ||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | return 1; | ||
1144 | } | 1171 | } |
1145 | 1172 | ||
1146 | static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1173 | static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
@@ -1887,7 +1914,7 @@ static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1887 | { | 1914 | { |
1888 | ++svm->vcpu.stat.nmi_window_exits; | 1915 | ++svm->vcpu.stat.nmi_window_exits; |
1889 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | 1916 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); |
1890 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; | 1917 | svm->vcpu.arch.hflags |= HF_IRET_MASK; |
1891 | return 1; | 1918 | return 1; |
1892 | } | 1919 | } |
1893 | 1920 | ||
@@ -2357,8 +2384,16 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) | |||
2357 | { | 2384 | { |
2358 | struct vcpu_svm *svm = to_svm(vcpu); | 2385 | struct vcpu_svm *svm = to_svm(vcpu); |
2359 | 2386 | ||
2360 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) | 2387 | if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) |
2361 | enable_irq_window(vcpu); | 2388 | == HF_NMI_MASK) |
2389 | return; /* IRET will cause a vm exit */ | ||
2390 | |||
2391 | /* Something prevents NMI from been injected. Single step over | ||
2392 | possible problem (IRET or exception injection or interrupt | ||
2393 | shadow) */ | ||
2394 | vcpu->arch.singlestep = true; | ||
2395 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
2396 | update_db_intercept(vcpu); | ||
2362 | } | 2397 | } |
2363 | 2398 | ||
2364 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2399 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
@@ -2401,6 +2436,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
2401 | int type; | 2436 | int type; |
2402 | u32 exitintinfo = svm->vmcb->control.exit_int_info; | 2437 | u32 exitintinfo = svm->vmcb->control.exit_int_info; |
2403 | 2438 | ||
2439 | if (svm->vcpu.arch.hflags & HF_IRET_MASK) | ||
2440 | svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); | ||
2441 | |||
2404 | svm->vcpu.arch.nmi_injected = false; | 2442 | svm->vcpu.arch.nmi_injected = false; |
2405 | kvm_clear_exception_queue(&svm->vcpu); | 2443 | kvm_clear_exception_queue(&svm->vcpu); |
2406 | kvm_clear_interrupt_queue(&svm->vcpu); | 2444 | kvm_clear_interrupt_queue(&svm->vcpu); |