aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-04-21 10:45:08 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:48 -0400
commit95ba82731374eb1c2af4dd442526c4b314f0e8b6 (patch)
treea8b8e23285686761694ee214c6de85e83f52652b /arch/x86
parentc4282df98ae0993983924c00ed76428a6609d68b (diff)
KVM: SVM: Add NMI injection support
Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h8
-rw-r--r--arch/x86/kvm/svm.c96
-rw-r--r--arch/x86/kvm/vmx.c79
-rw-r--r--arch/x86/kvm/x86.c71
4 files changed, 145 insertions, 109 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 53533ea17555..dd9ecd3de90d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -512,10 +512,15 @@ struct kvm_x86_ops {
512 unsigned char *hypercall_addr); 512 unsigned char *hypercall_addr);
513 int (*get_irq)(struct kvm_vcpu *vcpu); 513 int (*get_irq)(struct kvm_vcpu *vcpu);
514 void (*set_irq)(struct kvm_vcpu *vcpu, int vec); 514 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
515 void (*set_nmi)(struct kvm_vcpu *vcpu);
515 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 516 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
516 bool has_error_code, u32 error_code); 517 bool has_error_code, u32 error_code);
517 void (*inject_pending_irq)(struct kvm_vcpu *vcpu, struct kvm_run *run);
518 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 518 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
519 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
520 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
521 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
522 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
523 void (*drop_interrupt_shadow)(struct kvm_vcpu *vcpu);
519 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 524 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
520 int (*get_tdp_level)(void); 525 int (*get_tdp_level)(void);
521 int (*get_mt_mask_shift)(void); 526 int (*get_mt_mask_shift)(void);
@@ -763,6 +768,7 @@ enum {
763#define HF_GIF_MASK (1 << 0) 768#define HF_GIF_MASK (1 << 0)
764#define HF_HIF_MASK (1 << 1) 769#define HF_HIF_MASK (1 << 1)
765#define HF_VINTR_MASK (1 << 2) 770#define HF_VINTR_MASK (1 << 2)
771#define HF_NMI_MASK (1 << 3)
766 772
767/* 773/*
768 * Hardware virtualization extension instructions may fault if a 774 * Hardware virtualization extension instructions may fault if a
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0f53439296b9..18072888efc5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1843,6 +1843,14 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1843 return 1; 1843 return 1;
1844} 1844}
1845 1845
1846static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1847{
1848 ++svm->vcpu.stat.nmi_window_exits;
1849 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
1850 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
1851 return 1;
1852}
1853
1846static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1854static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1847{ 1855{
1848 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) 1856 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
@@ -1863,8 +1871,10 @@ static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1863 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 1871 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
1864 /* instruction emulation calls kvm_set_cr8() */ 1872 /* instruction emulation calls kvm_set_cr8() */
1865 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); 1873 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1866 if (irqchip_in_kernel(svm->vcpu.kvm)) 1874 if (irqchip_in_kernel(svm->vcpu.kvm)) {
1875 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1867 return 1; 1876 return 1;
1877 }
1868 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 1878 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
1869 return 1; 1879 return 1;
1870 kvm_run->exit_reason = KVM_EXIT_SET_TPR; 1880 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -2120,6 +2130,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
2120 [SVM_EXIT_VINTR] = interrupt_window_interception, 2130 [SVM_EXIT_VINTR] = interrupt_window_interception,
2121 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ 2131 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
2122 [SVM_EXIT_CPUID] = cpuid_interception, 2132 [SVM_EXIT_CPUID] = cpuid_interception,
2133 [SVM_EXIT_IRET] = iret_interception,
2123 [SVM_EXIT_INVD] = emulate_on_interception, 2134 [SVM_EXIT_INVD] = emulate_on_interception,
2124 [SVM_EXIT_HLT] = halt_interception, 2135 [SVM_EXIT_HLT] = halt_interception,
2125 [SVM_EXIT_INVLPG] = invlpg_interception, 2136 [SVM_EXIT_INVLPG] = invlpg_interception,
@@ -2227,6 +2238,21 @@ static void pre_svm_run(struct vcpu_svm *svm)
2227 new_asid(svm, svm_data); 2238 new_asid(svm, svm_data);
2228} 2239}
2229 2240
2241static void svm_drop_interrupt_shadow(struct kvm_vcpu *vcpu)
2242{
2243 struct vcpu_svm *svm = to_svm(vcpu);
2244 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
2245}
2246
2247static void svm_inject_nmi(struct kvm_vcpu *vcpu)
2248{
2249 struct vcpu_svm *svm = to_svm(vcpu);
2250
2251 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
2252 vcpu->arch.hflags |= HF_NMI_MASK;
2253 svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2254 ++vcpu->stat.nmi_injections;
2255}
2230 2256
2231static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) 2257static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2232{ 2258{
@@ -2242,8 +2268,10 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2242 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 2268 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2243} 2269}
2244 2270
2245static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr) 2271static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
2246{ 2272{
2273 struct vcpu_svm *svm = to_svm(vcpu);
2274
2247 svm->vmcb->control.event_inj = nr | 2275 svm->vmcb->control.event_inj = nr |
2248 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; 2276 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2249} 2277}
@@ -2254,28 +2282,26 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
2254 2282
2255 nested_svm_intr(svm); 2283 nested_svm_intr(svm);
2256 2284
2257 svm_queue_irq(svm, irq); 2285 svm_queue_irq(vcpu, irq);
2258} 2286}
2259 2287
2260static void update_cr8_intercept(struct kvm_vcpu *vcpu) 2288static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
2261{ 2289{
2262 struct vcpu_svm *svm = to_svm(vcpu); 2290 struct vcpu_svm *svm = to_svm(vcpu);
2263 struct vmcb *vmcb = svm->vmcb;
2264 int max_irr, tpr;
2265 2291
2266 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr) 2292 if (irr == -1)
2267 return; 2293 return;
2268 2294
2269 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; 2295 if (tpr >= irr)
2270 2296 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2271 max_irr = kvm_lapic_find_highest_irr(vcpu); 2297}
2272 if (max_irr == -1)
2273 return;
2274
2275 tpr = kvm_lapic_get_cr8(vcpu) << 4;
2276 2298
2277 if (tpr >= (max_irr & 0xf0)) 2299static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2278 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; 2300{
2301 struct vcpu_svm *svm = to_svm(vcpu);
2302 struct vmcb *vmcb = svm->vmcb;
2303 return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2304 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
2279} 2305}
2280 2306
2281static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) 2307static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
@@ -2293,39 +2319,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
2293 svm_inject_irq(to_svm(vcpu), 0x0); 2319 svm_inject_irq(to_svm(vcpu), 0x0);
2294} 2320}
2295 2321
2296static void svm_intr_inject(struct kvm_vcpu *vcpu) 2322static void enable_nmi_window(struct kvm_vcpu *vcpu)
2297{
2298 /* try to reinject previous events if any */
2299 if (vcpu->arch.interrupt.pending) {
2300 svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
2301 return;
2302 }
2303
2304 /* try to inject new event if pending */
2305 if (kvm_cpu_has_interrupt(vcpu)) {
2306 if (svm_interrupt_allowed(vcpu)) {
2307 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
2308 svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
2309 }
2310 }
2311}
2312
2313static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2314{ 2323{
2315 struct vcpu_svm *svm = to_svm(vcpu); 2324 struct vcpu_svm *svm = to_svm(vcpu);
2316 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
2317 kvm_run->request_interrupt_window;
2318 2325
2319 if (nested_svm_intr(svm)) 2326 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
2320 goto out;
2321
2322 svm_intr_inject(vcpu);
2323
2324 if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
2325 enable_irq_window(vcpu); 2327 enable_irq_window(vcpu);
2326
2327out:
2328 update_cr8_intercept(vcpu);
2329} 2328}
2330 2329
2331static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) 2330static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -2650,9 +2649,14 @@ static struct kvm_x86_ops svm_x86_ops = {
2650 .patch_hypercall = svm_patch_hypercall, 2649 .patch_hypercall = svm_patch_hypercall,
2651 .get_irq = svm_get_irq, 2650 .get_irq = svm_get_irq,
2652 .set_irq = svm_set_irq, 2651 .set_irq = svm_set_irq,
2652 .set_nmi = svm_inject_nmi,
2653 .queue_exception = svm_queue_exception, 2653 .queue_exception = svm_queue_exception,
2654 .inject_pending_irq = svm_intr_assist,
2655 .interrupt_allowed = svm_interrupt_allowed, 2654 .interrupt_allowed = svm_interrupt_allowed,
2655 .nmi_allowed = svm_nmi_allowed,
2656 .enable_nmi_window = enable_nmi_window,
2657 .enable_irq_window = enable_irq_window,
2658 .update_cr8_intercept = update_cr8_intercept,
2659 .drop_interrupt_shadow = svm_drop_interrupt_shadow,
2656 2660
2657 .set_tss_addr = svm_set_tss_addr, 2661 .set_tss_addr = svm_set_tss_addr,
2658 .get_tdp_level = get_npt_level, 2662 .get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 116eac01a9f0..bad2413fbd51 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1314,6 +1314,9 @@ static __init int hardware_setup(void)
1314 if (!cpu_has_vmx_flexpriority()) 1314 if (!cpu_has_vmx_flexpriority())
1315 flexpriority_enabled = 0; 1315 flexpriority_enabled = 0;
1316 1316
1317 if (!cpu_has_vmx_tpr_shadow())
1318 kvm_x86_ops->update_cr8_intercept = NULL;
1319
1317 return alloc_kvm_area(); 1320 return alloc_kvm_area();
1318} 1321}
1319 1322
@@ -2404,6 +2407,12 @@ out:
2404 return ret; 2407 return ret;
2405} 2408}
2406 2409
2410void vmx_drop_interrupt_shadow(struct kvm_vcpu *vcpu)
2411{
2412 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2413 GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2414}
2415
2407static void enable_irq_window(struct kvm_vcpu *vcpu) 2416static void enable_irq_window(struct kvm_vcpu *vcpu)
2408{ 2417{
2409 u32 cpu_based_vm_exec_control; 2418 u32 cpu_based_vm_exec_control;
@@ -3214,21 +3223,14 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3214 return 0; 3223 return 0;
3215} 3224}
3216 3225
3217static void update_tpr_threshold(struct kvm_vcpu *vcpu) 3226static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3218{ 3227{
3219 int max_irr, tpr; 3228 if (irr == -1 || tpr < irr) {
3220
3221 if (!vm_need_tpr_shadow(vcpu->kvm))
3222 return;
3223
3224 if (!kvm_lapic_enabled(vcpu) ||
3225 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
3226 vmcs_write32(TPR_THRESHOLD, 0); 3229 vmcs_write32(TPR_THRESHOLD, 0);
3227 return; 3230 return;
3228 } 3231 }
3229 3232
3230 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; 3233 vmcs_write32(TPR_THRESHOLD, irr);
3231 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
3232} 3234}
3233 3235
3234static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 3236static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
@@ -3300,55 +3302,6 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3300 } 3302 }
3301} 3303}
3302 3304
3303static void vmx_intr_inject(struct kvm_vcpu *vcpu)
3304{
3305 /* try to reinject previous events if any */
3306 if (vcpu->arch.nmi_injected) {
3307 vmx_inject_nmi(vcpu);
3308 return;
3309 }
3310
3311 if (vcpu->arch.interrupt.pending) {
3312 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
3313 return;
3314 }
3315
3316 /* try to inject new event if pending */
3317 if (vcpu->arch.nmi_pending) {
3318 if (vmx_nmi_allowed(vcpu)) {
3319 vcpu->arch.nmi_pending = false;
3320 vcpu->arch.nmi_injected = true;
3321 vmx_inject_nmi(vcpu);
3322 }
3323 } else if (kvm_cpu_has_interrupt(vcpu)) {
3324 if (vmx_interrupt_allowed(vcpu)) {
3325 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3326 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
3327 }
3328 }
3329}
3330
3331static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3332{
3333 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3334 kvm_run->request_interrupt_window;
3335
3336 update_tpr_threshold(vcpu);
3337
3338 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3339 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3340 GUEST_INTR_STATE_STI |
3341 GUEST_INTR_STATE_MOV_SS);
3342
3343 vmx_intr_inject(vcpu);
3344
3345 /* enable NMI/IRQ window open exits if needed */
3346 if (vcpu->arch.nmi_pending)
3347 enable_nmi_window(vcpu);
3348 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3349 enable_irq_window(vcpu);
3350}
3351
3352/* 3305/*
3353 * Failure to inject an interrupt should give us the information 3306 * Failure to inject an interrupt should give us the information
3354 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs 3307 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
@@ -3683,9 +3636,15 @@ static struct kvm_x86_ops vmx_x86_ops = {
3683 .patch_hypercall = vmx_patch_hypercall, 3636 .patch_hypercall = vmx_patch_hypercall,
3684 .get_irq = vmx_get_irq, 3637 .get_irq = vmx_get_irq,
3685 .set_irq = vmx_inject_irq, 3638 .set_irq = vmx_inject_irq,
3639 .set_nmi = vmx_inject_nmi,
3686 .queue_exception = vmx_queue_exception, 3640 .queue_exception = vmx_queue_exception,
3687 .inject_pending_irq = vmx_intr_assist,
3688 .interrupt_allowed = vmx_interrupt_allowed, 3641 .interrupt_allowed = vmx_interrupt_allowed,
3642 .nmi_allowed = vmx_nmi_allowed,
3643 .enable_nmi_window = enable_nmi_window,
3644 .enable_irq_window = enable_irq_window,
3645 .update_cr8_intercept = update_cr8_intercept,
3646 .drop_interrupt_shadow = vmx_drop_interrupt_shadow,
3647
3689 .set_tss_addr = vmx_set_tss_addr, 3648 .set_tss_addr = vmx_set_tss_addr,
3690 .get_tdp_level = get_ept_level, 3649 .get_tdp_level = get_ept_level,
3691 .get_mt_mask_shift = vmx_get_mt_mask_shift, 3650 .get_mt_mask_shift = vmx_get_mt_mask_shift,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0890df9e88ff..96e995c1dd76 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3114,6 +3114,68 @@ static void vapic_exit(struct kvm_vcpu *vcpu)
3114 up_read(&vcpu->kvm->slots_lock); 3114 up_read(&vcpu->kvm->slots_lock);
3115} 3115}
3116 3116
3117static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3118{
3119 int max_irr, tpr;
3120
3121 if (!kvm_x86_ops->update_cr8_intercept)
3122 return;
3123
3124 max_irr = kvm_lapic_find_highest_irr(vcpu);
3125
3126 if (max_irr != -1)
3127 max_irr >>= 4;
3128
3129 tpr = kvm_lapic_get_cr8(vcpu);
3130
3131 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3132}
3133
3134static void inject_irq(struct kvm_vcpu *vcpu)
3135{
3136 /* try to reinject previous events if any */
3137 if (vcpu->arch.nmi_injected) {
3138 kvm_x86_ops->set_nmi(vcpu);
3139 return;
3140 }
3141
3142 if (vcpu->arch.interrupt.pending) {
3143 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3144 return;
3145 }
3146
3147 /* try to inject new event if pending */
3148 if (vcpu->arch.nmi_pending) {
3149 if (kvm_x86_ops->nmi_allowed(vcpu)) {
3150 vcpu->arch.nmi_pending = false;
3151 vcpu->arch.nmi_injected = true;
3152 kvm_x86_ops->set_nmi(vcpu);
3153 }
3154 } else if (kvm_cpu_has_interrupt(vcpu)) {
3155 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
3156 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3157 kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
3158 }
3159 }
3160}
3161
3162static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3163{
3164 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
3165 kvm_run->request_interrupt_window;
3166
3167 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3168 kvm_x86_ops->drop_interrupt_shadow(vcpu);
3169
3170 inject_irq(vcpu);
3171
3172 /* enable NMI/IRQ window open exits if needed */
3173 if (vcpu->arch.nmi_pending)
3174 kvm_x86_ops->enable_nmi_window(vcpu);
3175 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
3176 kvm_x86_ops->enable_irq_window(vcpu);
3177}
3178
3117static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3179static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3118{ 3180{
3119 int r; 3181 int r;
@@ -3172,9 +3234,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3172 if (vcpu->arch.exception.pending) 3234 if (vcpu->arch.exception.pending)
3173 __queue_exception(vcpu); 3235 __queue_exception(vcpu);
3174 else 3236 else
3175 kvm_x86_ops->inject_pending_irq(vcpu, kvm_run); 3237 inject_pending_irq(vcpu, kvm_run);
3176 3238
3177 kvm_lapic_sync_to_vapic(vcpu); 3239 if (kvm_lapic_enabled(vcpu)) {
3240 if (!vcpu->arch.apic->vapic_addr)
3241 update_cr8_intercept(vcpu);
3242 else
3243 kvm_lapic_sync_to_vapic(vcpu);
3244 }
3178 3245
3179 up_read(&vcpu->kvm->slots_lock); 3246 up_read(&vcpu->kvm->slots_lock);
3180 3247