diff options
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 187 |
1 files changed, 85 insertions, 102 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f2933abc9691..a80ffaa16a94 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -70,7 +70,6 @@ module_param(npt, int, S_IRUGO); | |||
70 | static int nested = 0; | 70 | static int nested = 0; |
71 | module_param(nested, int, S_IRUGO); | 71 | module_param(nested, int, S_IRUGO); |
72 | 72 | ||
73 | static void kvm_reput_irq(struct vcpu_svm *svm); | ||
74 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); | 73 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); |
75 | 74 | ||
76 | static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); | 75 | static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); |
@@ -199,9 +198,7 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
199 | 198 | ||
200 | static bool svm_exception_injected(struct kvm_vcpu *vcpu) | 199 | static bool svm_exception_injected(struct kvm_vcpu *vcpu) |
201 | { | 200 | { |
202 | struct vcpu_svm *svm = to_svm(vcpu); | 201 | return false; |
203 | |||
204 | return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); | ||
205 | } | 202 | } |
206 | 203 | ||
207 | static int is_external_interrupt(u32 info) | 204 | static int is_external_interrupt(u32 info) |
@@ -978,12 +975,9 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) | |||
978 | 975 | ||
979 | static int svm_get_irq(struct kvm_vcpu *vcpu) | 976 | static int svm_get_irq(struct kvm_vcpu *vcpu) |
980 | { | 977 | { |
981 | struct vcpu_svm *svm = to_svm(vcpu); | 978 | if (!vcpu->arch.interrupt.pending) |
982 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | 979 | return -1; |
983 | 980 | return vcpu->arch.interrupt.nr; | |
984 | if (is_external_interrupt(exit_int_info)) | ||
985 | return exit_int_info & SVM_EVTINJ_VEC_MASK; | ||
986 | return -1; | ||
987 | } | 981 | } |
988 | 982 | ||
989 | static void load_host_msrs(struct kvm_vcpu *vcpu) | 983 | static void load_host_msrs(struct kvm_vcpu *vcpu) |
@@ -1090,17 +1084,8 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
1090 | 1084 | ||
1091 | static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1085 | static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1092 | { | 1086 | { |
1093 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | ||
1094 | struct kvm *kvm = svm->vcpu.kvm; | ||
1095 | u64 fault_address; | 1087 | u64 fault_address; |
1096 | u32 error_code; | 1088 | u32 error_code; |
1097 | bool event_injection = false; | ||
1098 | |||
1099 | if (!irqchip_in_kernel(kvm) && | ||
1100 | is_external_interrupt(exit_int_info)) { | ||
1101 | event_injection = true; | ||
1102 | kvm_push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); | ||
1103 | } | ||
1104 | 1089 | ||
1105 | fault_address = svm->vmcb->control.exit_info_2; | 1090 | fault_address = svm->vmcb->control.exit_info_2; |
1106 | error_code = svm->vmcb->control.exit_info_1; | 1091 | error_code = svm->vmcb->control.exit_info_1; |
@@ -1120,9 +1105,11 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1120 | */ | 1105 | */ |
1121 | if (npt_enabled) | 1106 | if (npt_enabled) |
1122 | svm_flush_tlb(&svm->vcpu); | 1107 | svm_flush_tlb(&svm->vcpu); |
1123 | 1108 | else { | |
1124 | if (!npt_enabled && event_injection) | 1109 | if (svm->vcpu.arch.interrupt.pending || |
1125 | kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); | 1110 | svm->vcpu.arch.exception.pending) |
1111 | kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); | ||
1112 | } | ||
1126 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1113 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1127 | } | 1114 | } |
1128 | 1115 | ||
@@ -2196,7 +2183,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2196 | } | 2183 | } |
2197 | } | 2184 | } |
2198 | 2185 | ||
2199 | kvm_reput_irq(svm); | ||
2200 | 2186 | ||
2201 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 2187 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
2202 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 2188 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
@@ -2259,13 +2245,19 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | |||
2259 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); | 2245 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
2260 | } | 2246 | } |
2261 | 2247 | ||
2248 | static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr) | ||
2249 | { | ||
2250 | svm->vmcb->control.event_inj = nr | | ||
2251 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; | ||
2252 | } | ||
2253 | |||
2262 | static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) | 2254 | static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) |
2263 | { | 2255 | { |
2264 | struct vcpu_svm *svm = to_svm(vcpu); | 2256 | struct vcpu_svm *svm = to_svm(vcpu); |
2265 | 2257 | ||
2266 | nested_svm_intr(svm); | 2258 | nested_svm_intr(svm); |
2267 | 2259 | ||
2268 | svm_inject_irq(svm, irq); | 2260 | svm_queue_irq(svm, irq); |
2269 | } | 2261 | } |
2270 | 2262 | ||
2271 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) | 2263 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) |
@@ -2298,98 +2290,47 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
2298 | (svm->vcpu.arch.hflags & HF_GIF_MASK); | 2290 | (svm->vcpu.arch.hflags & HF_GIF_MASK); |
2299 | } | 2291 | } |
2300 | 2292 | ||
2301 | static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2293 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
2302 | { | 2294 | { |
2303 | struct vcpu_svm *svm = to_svm(vcpu); | 2295 | svm_set_vintr(to_svm(vcpu)); |
2304 | struct vmcb *vmcb = svm->vmcb; | 2296 | svm_inject_irq(to_svm(vcpu), 0x0); |
2305 | int intr_vector = -1; | ||
2306 | |||
2307 | if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && | ||
2308 | ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) { | ||
2309 | intr_vector = vmcb->control.exit_int_info & | ||
2310 | SVM_EVTINJ_VEC_MASK; | ||
2311 | vmcb->control.exit_int_info = 0; | ||
2312 | svm_inject_irq(svm, intr_vector); | ||
2313 | goto out; | ||
2314 | } | ||
2315 | |||
2316 | if (vmcb->control.int_ctl & V_IRQ_MASK) | ||
2317 | goto out; | ||
2318 | |||
2319 | if (!kvm_cpu_has_interrupt(vcpu)) | ||
2320 | goto out; | ||
2321 | |||
2322 | if (nested_svm_intr(svm)) | ||
2323 | goto out; | ||
2324 | |||
2325 | if (!(svm->vcpu.arch.hflags & HF_GIF_MASK)) | ||
2326 | goto out; | ||
2327 | |||
2328 | if (!(vmcb->save.rflags & X86_EFLAGS_IF) || | ||
2329 | (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || | ||
2330 | (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { | ||
2331 | /* unable to deliver irq, set pending irq */ | ||
2332 | svm_set_vintr(svm); | ||
2333 | svm_inject_irq(svm, 0x0); | ||
2334 | goto out; | ||
2335 | } | ||
2336 | /* Okay, we can deliver the interrupt: grab it and update PIC state. */ | ||
2337 | intr_vector = kvm_cpu_get_interrupt(vcpu); | ||
2338 | svm_inject_irq(svm, intr_vector); | ||
2339 | out: | ||
2340 | update_cr8_intercept(vcpu); | ||
2341 | } | 2297 | } |
2342 | 2298 | ||
2343 | static void kvm_reput_irq(struct vcpu_svm *svm) | 2299 | static void svm_intr_inject(struct kvm_vcpu *vcpu) |
2344 | { | 2300 | { |
2345 | struct vmcb_control_area *control = &svm->vmcb->control; | 2301 | /* try to reinject previous events if any */ |
2346 | 2302 | if (vcpu->arch.interrupt.pending) { | |
2347 | if ((control->int_ctl & V_IRQ_MASK) | 2303 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); |
2348 | && !irqchip_in_kernel(svm->vcpu.kvm)) { | 2304 | return; |
2349 | control->int_ctl &= ~V_IRQ_MASK; | ||
2350 | kvm_push_irq(&svm->vcpu, control->int_vector); | ||
2351 | } | 2305 | } |
2352 | 2306 | ||
2353 | svm->vcpu.arch.interrupt_window_open = | 2307 | /* try to inject new event if pending */ |
2354 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | 2308 | if (kvm_cpu_has_interrupt(vcpu)) { |
2355 | (svm->vcpu.arch.hflags & HF_GIF_MASK); | 2309 | if (vcpu->arch.interrupt_window_open) { |
2356 | } | 2310 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
2357 | 2311 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); | |
2358 | static void svm_do_inject_vector(struct vcpu_svm *svm) | 2312 | } |
2359 | { | 2313 | } |
2360 | svm_inject_irq(svm, kvm_pop_irq(&svm->vcpu)); | ||
2361 | } | 2314 | } |
2362 | 2315 | ||
2363 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | 2316 | static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2364 | struct kvm_run *kvm_run) | ||
2365 | { | 2317 | { |
2366 | struct vcpu_svm *svm = to_svm(vcpu); | 2318 | struct vcpu_svm *svm = to_svm(vcpu); |
2367 | struct vmcb_control_area *control = &svm->vmcb->control; | 2319 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
2320 | kvm_run->request_interrupt_window; | ||
2368 | 2321 | ||
2369 | if (nested_svm_intr(svm)) | 2322 | if (nested_svm_intr(svm)) |
2370 | return; | 2323 | goto out; |
2371 | 2324 | ||
2372 | svm->vcpu.arch.interrupt_window_open = | 2325 | svm->vcpu.arch.interrupt_window_open = svm_interrupt_allowed(vcpu); |
2373 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | ||
2374 | (svm->vmcb->save.rflags & X86_EFLAGS_IF) && | ||
2375 | (svm->vcpu.arch.hflags & HF_GIF_MASK)); | ||
2376 | 2326 | ||
2377 | if (svm->vcpu.arch.interrupt_window_open && | 2327 | svm_intr_inject(vcpu); |
2378 | kvm_cpu_has_interrupt(&svm->vcpu)) | ||
2379 | /* | ||
2380 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
2381 | */ | ||
2382 | svm_do_inject_vector(svm); | ||
2383 | 2328 | ||
2384 | /* | 2329 | if (kvm_cpu_has_interrupt(vcpu) || req_int_win) |
2385 | * Interrupts blocked. Wait for unblock. | 2330 | enable_irq_window(vcpu); |
2386 | */ | 2331 | |
2387 | if (!svm->vcpu.arch.interrupt_window_open && | 2332 | out: |
2388 | (kvm_cpu_has_interrupt(&svm->vcpu) || | 2333 | update_cr8_intercept(vcpu); |
2389 | kvm_run->request_interrupt_window)) | ||
2390 | svm_set_vintr(svm); | ||
2391 | else | ||
2392 | svm_clear_vintr(svm); | ||
2393 | } | 2334 | } |
2394 | 2335 | ||
2395 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2336 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
@@ -2429,6 +2370,46 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) | |||
2429 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; | 2370 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
2430 | } | 2371 | } |
2431 | 2372 | ||
2373 | static void svm_complete_interrupts(struct vcpu_svm *svm) | ||
2374 | { | ||
2375 | u8 vector; | ||
2376 | int type; | ||
2377 | u32 exitintinfo = svm->vmcb->control.exit_int_info; | ||
2378 | |||
2379 | svm->vcpu.arch.nmi_injected = false; | ||
2380 | kvm_clear_exception_queue(&svm->vcpu); | ||
2381 | kvm_clear_interrupt_queue(&svm->vcpu); | ||
2382 | |||
2383 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) | ||
2384 | return; | ||
2385 | |||
2386 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; | ||
2387 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; | ||
2388 | |||
2389 | switch (type) { | ||
2390 | case SVM_EXITINTINFO_TYPE_NMI: | ||
2391 | svm->vcpu.arch.nmi_injected = true; | ||
2392 | break; | ||
2393 | case SVM_EXITINTINFO_TYPE_EXEPT: | ||
2394 | /* In case of software exception do not reinject an exception | ||
2395 | vector, but re-execute and instruction instead */ | ||
2396 | if (vector == BP_VECTOR || vector == OF_VECTOR) | ||
2397 | break; | ||
2398 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { | ||
2399 | u32 err = svm->vmcb->control.exit_int_info_err; | ||
2400 | kvm_queue_exception_e(&svm->vcpu, vector, err); | ||
2401 | |||
2402 | } else | ||
2403 | kvm_queue_exception(&svm->vcpu, vector); | ||
2404 | break; | ||
2405 | case SVM_EXITINTINFO_TYPE_INTR: | ||
2406 | kvm_queue_interrupt(&svm->vcpu, vector); | ||
2407 | break; | ||
2408 | default: | ||
2409 | break; | ||
2410 | } | ||
2411 | } | ||
2412 | |||
2432 | #ifdef CONFIG_X86_64 | 2413 | #ifdef CONFIG_X86_64 |
2433 | #define R "r" | 2414 | #define R "r" |
2434 | #else | 2415 | #else |
@@ -2557,6 +2538,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2557 | sync_cr8_to_lapic(vcpu); | 2538 | sync_cr8_to_lapic(vcpu); |
2558 | 2539 | ||
2559 | svm->next_rip = 0; | 2540 | svm->next_rip = 0; |
2541 | |||
2542 | svm_complete_interrupts(svm); | ||
2560 | } | 2543 | } |
2561 | 2544 | ||
2562 | #undef R | 2545 | #undef R |
@@ -2678,7 +2661,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2678 | .queue_exception = svm_queue_exception, | 2661 | .queue_exception = svm_queue_exception, |
2679 | .exception_injected = svm_exception_injected, | 2662 | .exception_injected = svm_exception_injected, |
2680 | .inject_pending_irq = svm_intr_assist, | 2663 | .inject_pending_irq = svm_intr_assist, |
2681 | .inject_pending_vectors = do_interrupt_requests, | 2664 | .inject_pending_vectors = svm_intr_assist, |
2682 | .interrupt_allowed = svm_interrupt_allowed, | 2665 | .interrupt_allowed = svm_interrupt_allowed, |
2683 | 2666 | ||
2684 | .set_tss_addr = svm_set_tss_addr, | 2667 | .set_tss_addr = svm_set_tss_addr, |