diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-04-21 10:45:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:48 -0400 |
commit | 95ba82731374eb1c2af4dd442526c4b314f0e8b6 (patch) | |
tree | a8b8e23285686761694ee214c6de85e83f52652b /arch/x86/kvm/svm.c | |
parent | c4282df98ae0993983924c00ed76428a6609d68b (diff) |
KVM: SVM: Add NMI injection support
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 96 |
1 files changed, 50 insertions, 46 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0f53439296b9..18072888efc5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1843,6 +1843,14 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1843 | return 1; | 1843 | return 1; |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1847 | { | ||
1848 | ++svm->vcpu.stat.nmi_window_exits; | ||
1849 | svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); | ||
1850 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; | ||
1851 | return 1; | ||
1852 | } | ||
1853 | |||
1846 | static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1854 | static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1847 | { | 1855 | { |
1848 | if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) | 1856 | if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) |
@@ -1863,8 +1871,10 @@ static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1863 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 1871 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
1864 | /* instruction emulation calls kvm_set_cr8() */ | 1872 | /* instruction emulation calls kvm_set_cr8() */ |
1865 | emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); | 1873 | emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); |
1866 | if (irqchip_in_kernel(svm->vcpu.kvm)) | 1874 | if (irqchip_in_kernel(svm->vcpu.kvm)) { |
1875 | svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; | ||
1867 | return 1; | 1876 | return 1; |
1877 | } | ||
1868 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) | 1878 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) |
1869 | return 1; | 1879 | return 1; |
1870 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 1880 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; |
@@ -2120,6 +2130,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
2120 | [SVM_EXIT_VINTR] = interrupt_window_interception, | 2130 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
2121 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ | 2131 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ |
2122 | [SVM_EXIT_CPUID] = cpuid_interception, | 2132 | [SVM_EXIT_CPUID] = cpuid_interception, |
2133 | [SVM_EXIT_IRET] = iret_interception, | ||
2123 | [SVM_EXIT_INVD] = emulate_on_interception, | 2134 | [SVM_EXIT_INVD] = emulate_on_interception, |
2124 | [SVM_EXIT_HLT] = halt_interception, | 2135 | [SVM_EXIT_HLT] = halt_interception, |
2125 | [SVM_EXIT_INVLPG] = invlpg_interception, | 2136 | [SVM_EXIT_INVLPG] = invlpg_interception, |
@@ -2227,6 +2238,21 @@ static void pre_svm_run(struct vcpu_svm *svm) | |||
2227 | new_asid(svm, svm_data); | 2238 | new_asid(svm, svm_data); |
2228 | } | 2239 | } |
2229 | 2240 | ||
2241 | static void svm_drop_interrupt_shadow(struct kvm_vcpu *vcpu) | ||
2242 | { | ||
2243 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2244 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | ||
2245 | } | ||
2246 | |||
2247 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) | ||
2248 | { | ||
2249 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2250 | |||
2251 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; | ||
2252 | vcpu->arch.hflags |= HF_NMI_MASK; | ||
2253 | svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); | ||
2254 | ++vcpu->stat.nmi_injections; | ||
2255 | } | ||
2230 | 2256 | ||
2231 | static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | 2257 | static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) |
2232 | { | 2258 | { |
@@ -2242,8 +2268,10 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | |||
2242 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); | 2268 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
2243 | } | 2269 | } |
2244 | 2270 | ||
2245 | static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr) | 2271 | static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr) |
2246 | { | 2272 | { |
2273 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2274 | |||
2247 | svm->vmcb->control.event_inj = nr | | 2275 | svm->vmcb->control.event_inj = nr | |
2248 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; | 2276 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; |
2249 | } | 2277 | } |
@@ -2254,28 +2282,26 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) | |||
2254 | 2282 | ||
2255 | nested_svm_intr(svm); | 2283 | nested_svm_intr(svm); |
2256 | 2284 | ||
2257 | svm_queue_irq(svm, irq); | 2285 | svm_queue_irq(vcpu, irq); |
2258 | } | 2286 | } |
2259 | 2287 | ||
2260 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) | 2288 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
2261 | { | 2289 | { |
2262 | struct vcpu_svm *svm = to_svm(vcpu); | 2290 | struct vcpu_svm *svm = to_svm(vcpu); |
2263 | struct vmcb *vmcb = svm->vmcb; | ||
2264 | int max_irr, tpr; | ||
2265 | 2291 | ||
2266 | if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr) | 2292 | if (irr == -1) |
2267 | return; | 2293 | return; |
2268 | 2294 | ||
2269 | vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; | 2295 | if (tpr >= irr) |
2270 | 2296 | svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; | |
2271 | max_irr = kvm_lapic_find_highest_irr(vcpu); | 2297 | } |
2272 | if (max_irr == -1) | ||
2273 | return; | ||
2274 | |||
2275 | tpr = kvm_lapic_get_cr8(vcpu) << 4; | ||
2276 | 2298 | ||
2277 | if (tpr >= (max_irr & 0xf0)) | 2299 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu) |
2278 | vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; | 2300 | { |
2301 | struct vcpu_svm *svm = to_svm(vcpu); | ||
2302 | struct vmcb *vmcb = svm->vmcb; | ||
2303 | return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && | ||
2304 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); | ||
2279 | } | 2305 | } |
2280 | 2306 | ||
2281 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | 2307 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) |
@@ -2293,39 +2319,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) | |||
2293 | svm_inject_irq(to_svm(vcpu), 0x0); | 2319 | svm_inject_irq(to_svm(vcpu), 0x0); |
2294 | } | 2320 | } |
2295 | 2321 | ||
2296 | static void svm_intr_inject(struct kvm_vcpu *vcpu) | 2322 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
2297 | { | ||
2298 | /* try to reinject previous events if any */ | ||
2299 | if (vcpu->arch.interrupt.pending) { | ||
2300 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); | ||
2301 | return; | ||
2302 | } | ||
2303 | |||
2304 | /* try to inject new event if pending */ | ||
2305 | if (kvm_cpu_has_interrupt(vcpu)) { | ||
2306 | if (svm_interrupt_allowed(vcpu)) { | ||
2307 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | ||
2308 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); | ||
2309 | } | ||
2310 | } | ||
2311 | } | ||
2312 | |||
2313 | static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2314 | { | 2323 | { |
2315 | struct vcpu_svm *svm = to_svm(vcpu); | 2324 | struct vcpu_svm *svm = to_svm(vcpu); |
2316 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | ||
2317 | kvm_run->request_interrupt_window; | ||
2318 | 2325 | ||
2319 | if (nested_svm_intr(svm)) | 2326 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
2320 | goto out; | ||
2321 | |||
2322 | svm_intr_inject(vcpu); | ||
2323 | |||
2324 | if (kvm_cpu_has_interrupt(vcpu) || req_int_win) | ||
2325 | enable_irq_window(vcpu); | 2327 | enable_irq_window(vcpu); |
2326 | |||
2327 | out: | ||
2328 | update_cr8_intercept(vcpu); | ||
2329 | } | 2328 | } |
2330 | 2329 | ||
2331 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2330 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
@@ -2650,9 +2649,14 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2650 | .patch_hypercall = svm_patch_hypercall, | 2649 | .patch_hypercall = svm_patch_hypercall, |
2651 | .get_irq = svm_get_irq, | 2650 | .get_irq = svm_get_irq, |
2652 | .set_irq = svm_set_irq, | 2651 | .set_irq = svm_set_irq, |
2652 | .set_nmi = svm_inject_nmi, | ||
2653 | .queue_exception = svm_queue_exception, | 2653 | .queue_exception = svm_queue_exception, |
2654 | .inject_pending_irq = svm_intr_assist, | ||
2655 | .interrupt_allowed = svm_interrupt_allowed, | 2654 | .interrupt_allowed = svm_interrupt_allowed, |
2655 | .nmi_allowed = svm_nmi_allowed, | ||
2656 | .enable_nmi_window = enable_nmi_window, | ||
2657 | .enable_irq_window = enable_irq_window, | ||
2658 | .update_cr8_intercept = update_cr8_intercept, | ||
2659 | .drop_interrupt_shadow = svm_drop_interrupt_shadow, | ||
2656 | 2660 | ||
2657 | .set_tss_addr = svm_set_tss_addr, | 2661 | .set_tss_addr = svm_set_tss_addr, |
2658 | .get_tdp_level = get_npt_level, | 2662 | .get_tdp_level = get_npt_level, |