aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r--drivers/kvm/svm.c94
1 files changed, 87 insertions, 7 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa0428735717..855207a9b396 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -235,6 +235,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
235 235
236 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; 236 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
237 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 237 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
238
239 vcpu->interrupt_window_open = 1;
238} 240}
239 241
240static int has_svm(void) 242static int has_svm(void)
@@ -1031,10 +1033,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1031{ 1033{
1032 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1034 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1033 skip_emulated_instruction(vcpu); 1035 skip_emulated_instruction(vcpu);
1034 if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) 1036 if (vcpu->irq_summary)
1035 return 1; 1037 return 1;
1036 1038
1037 kvm_run->exit_reason = KVM_EXIT_HLT; 1039 kvm_run->exit_reason = KVM_EXIT_HLT;
1040 ++kvm_stat.halt_exits;
1038 return 0; 1041 return 0;
1039} 1042}
1040 1043
@@ -1186,6 +1189,24 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1186 return rdmsr_interception(vcpu, kvm_run); 1189 return rdmsr_interception(vcpu, kvm_run);
1187} 1190}
1188 1191
1192static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1193 struct kvm_run *kvm_run)
1194{
1195 /*
1196 * If the user space waits to inject interrupts, exit as soon as
1197 * possible
1198 */
1199 if (kvm_run->request_interrupt_window &&
1200 !vcpu->irq_summary &&
1201 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) {
1202 ++kvm_stat.irq_window_exits;
1203 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1204 return 0;
1205 }
1206
1207 return 1;
1208}
1209
1189static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1210static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1190 struct kvm_run *kvm_run) = { 1211 struct kvm_run *kvm_run) = {
1191 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1212 [SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -1210,6 +1231,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1210 [SVM_EXIT_NMI] = nop_on_interception, 1231 [SVM_EXIT_NMI] = nop_on_interception,
1211 [SVM_EXIT_SMI] = nop_on_interception, 1232 [SVM_EXIT_SMI] = nop_on_interception,
1212 [SVM_EXIT_INIT] = nop_on_interception, 1233 [SVM_EXIT_INIT] = nop_on_interception,
1234 [SVM_EXIT_VINTR] = interrupt_window_interception,
1213 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ 1235 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1214 [SVM_EXIT_CPUID] = cpuid_interception, 1236 [SVM_EXIT_CPUID] = cpuid_interception,
1215 [SVM_EXIT_HLT] = halt_interception, 1237 [SVM_EXIT_HLT] = halt_interception,
@@ -1278,15 +1300,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
1278} 1300}
1279 1301
1280 1302
1281static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1303static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1282{ 1304{
1283 struct vmcb_control_area *control; 1305 struct vmcb_control_area *control;
1284 1306
1285 if (!vcpu->irq_summary)
1286 return;
1287
1288 control = &vcpu->svm->vmcb->control; 1307 control = &vcpu->svm->vmcb->control;
1289
1290 control->int_vector = pop_irq(vcpu); 1308 control->int_vector = pop_irq(vcpu);
1291 control->int_ctl &= ~V_INTR_PRIO_MASK; 1309 control->int_ctl &= ~V_INTR_PRIO_MASK;
1292 control->int_ctl |= V_IRQ_MASK | 1310 control->int_ctl |= V_IRQ_MASK |
@@ -1301,6 +1319,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1301 control->int_ctl &= ~V_IRQ_MASK; 1319 control->int_ctl &= ~V_IRQ_MASK;
1302 push_irq(vcpu, control->int_vector); 1320 push_irq(vcpu, control->int_vector);
1303 } 1321 }
1322
1323 vcpu->interrupt_window_open =
1324 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1325}
1326
1327static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1328 struct kvm_run *kvm_run)
1329{
1330 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1331
1332 vcpu->interrupt_window_open =
1333 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1334 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1335
1336 if (vcpu->interrupt_window_open && vcpu->irq_summary)
1337 /*
1338 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1339 */
1340 kvm_do_inject_irq(vcpu);
1341
1342 /*
1343 * Interrupts blocked. Wait for unblock.
1344 */
1345 if (!vcpu->interrupt_window_open &&
1346 (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
1347 control->intercept |= 1ULL << INTERCEPT_VINTR;
1348 } else
1349 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1350}
1351
1352static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1353 struct kvm_run *kvm_run)
1354{
1355 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1356 vcpu->irq_summary == 0);
1357 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1358 kvm_run->cr8 = vcpu->cr8;
1359 kvm_run->apic_base = vcpu->apic_base;
1360}
1361
1362/*
1363 * Check if userspace requested an interrupt window, and that the
1364 * interrupt window is open.
1365 *
1366 * No need to exit to userspace if we already have an interrupt queued.
1367 */
1368static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1369 struct kvm_run *kvm_run)
1370{
1371 return (!vcpu->irq_summary &&
1372 kvm_run->request_interrupt_window &&
1373 vcpu->interrupt_window_open &&
1374 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1304} 1375}
1305 1376
1306static void save_db_regs(unsigned long *db_regs) 1377static void save_db_regs(unsigned long *db_regs)
@@ -1326,7 +1397,7 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1326 u16 ldt_selector; 1397 u16 ldt_selector;
1327 1398
1328again: 1399again:
1329 kvm_try_inject_irq(vcpu); 1400 do_interrupt_requests(vcpu, kvm_run);
1330 1401
1331 clgi(); 1402 clgi();
1332 1403
@@ -1487,17 +1558,26 @@ again:
1487 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 1558 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1488 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1559 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1489 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; 1560 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
1561 post_kvm_run_save(vcpu, kvm_run);
1490 return 0; 1562 return 0;
1491 } 1563 }
1492 1564
1493 if (handle_exit(vcpu, kvm_run)) { 1565 if (handle_exit(vcpu, kvm_run)) {
1494 if (signal_pending(current)) { 1566 if (signal_pending(current)) {
1495 ++kvm_stat.signal_exits; 1567 ++kvm_stat.signal_exits;
1568 post_kvm_run_save(vcpu, kvm_run);
1569 return -EINTR;
1570 }
1571
1572 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1573 ++kvm_stat.request_irq_exits;
1574 post_kvm_run_save(vcpu, kvm_run);
1496 return -EINTR; 1575 return -EINTR;
1497 } 1576 }
1498 kvm_resched(vcpu); 1577 kvm_resched(vcpu);
1499 goto again; 1578 goto again;
1500 } 1579 }
1580 post_kvm_run_save(vcpu, kvm_run);
1501 return 0; 1581 return 0;
1502} 1582}
1503 1583