aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/kvm/kvm.h4
-rw-r--r--drivers/kvm/kvm_main.c11
-rw-r--r--drivers/kvm/svm.c94
-rw-r--r--drivers/kvm/vmx.c88
-rw-r--r--include/linux/kvm.h11
5 files changed, 180 insertions, 28 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 100df6f38d92..32023d1ac24b 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -173,6 +173,7 @@ struct kvm_vcpu {
173 struct mutex mutex; 173 struct mutex mutex;
174 int cpu; 174 int cpu;
175 int launched; 175 int launched;
176 int interrupt_window_open;
176 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 177 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
177#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 178#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
178 unsigned long irq_pending[NR_IRQ_WORDS]; 179 unsigned long irq_pending[NR_IRQ_WORDS];
@@ -247,6 +248,9 @@ struct kvm_stat {
247 u32 io_exits; 248 u32 io_exits;
248 u32 mmio_exits; 249 u32 mmio_exits;
249 u32 signal_exits; 250 u32 signal_exits;
251 u32 irq_window_exits;
252 u32 halt_exits;
253 u32 request_irq_exits;
250 u32 irq_exits; 254 u32 irq_exits;
251}; 255};
252 256
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index b54caf0ceeb1..aca14139a680 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item {
58 { "io_exits", &kvm_stat.io_exits }, 58 { "io_exits", &kvm_stat.io_exits },
59 { "mmio_exits", &kvm_stat.mmio_exits }, 59 { "mmio_exits", &kvm_stat.mmio_exits },
60 { "signal_exits", &kvm_stat.signal_exits }, 60 { "signal_exits", &kvm_stat.signal_exits },
61 { "irq_window", &kvm_stat.irq_window_exits },
62 { "halt_exits", &kvm_stat.halt_exits },
63 { "request_irq", &kvm_stat.request_irq_exits },
61 { "irq_exits", &kvm_stat.irq_exits }, 64 { "irq_exits", &kvm_stat.irq_exits },
62 { 0, 0 } 65 { 0, 0 }
63}; 66};
@@ -1693,12 +1696,12 @@ static long kvm_dev_ioctl(struct file *filp,
1693 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) 1696 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
1694 goto out; 1697 goto out;
1695 r = kvm_dev_ioctl_run(kvm, &kvm_run); 1698 r = kvm_dev_ioctl_run(kvm, &kvm_run);
1696 if (r < 0) 1699 if (r < 0 && r != -EINTR)
1697 goto out; 1700 goto out;
1698 r = -EFAULT; 1701 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) {
1699 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) 1702 r = -EFAULT;
1700 goto out; 1703 goto out;
1701 r = 0; 1704 }
1702 break; 1705 break;
1703 } 1706 }
1704 case KVM_GET_REGS: { 1707 case KVM_GET_REGS: {
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa0428735717..855207a9b396 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -235,6 +235,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
235 235
236 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; 236 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
237 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 237 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
238
239 vcpu->interrupt_window_open = 1;
238} 240}
239 241
240static int has_svm(void) 242static int has_svm(void)
@@ -1031,10 +1033,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1031{ 1033{
1032 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1034 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1033 skip_emulated_instruction(vcpu); 1035 skip_emulated_instruction(vcpu);
1034 if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) 1036 if (vcpu->irq_summary)
1035 return 1; 1037 return 1;
1036 1038
1037 kvm_run->exit_reason = KVM_EXIT_HLT; 1039 kvm_run->exit_reason = KVM_EXIT_HLT;
1040 ++kvm_stat.halt_exits;
1038 return 0; 1041 return 0;
1039} 1042}
1040 1043
@@ -1186,6 +1189,24 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1186 return rdmsr_interception(vcpu, kvm_run); 1189 return rdmsr_interception(vcpu, kvm_run);
1187} 1190}
1188 1191
1192static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1193 struct kvm_run *kvm_run)
1194{
1195 /*
1196 * If the user space waits to inject interrupts, exit as soon as
1197 * possible
1198 */
1199 if (kvm_run->request_interrupt_window &&
1200 !vcpu->irq_summary &&
1201 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) {
1202 ++kvm_stat.irq_window_exits;
1203 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1204 return 0;
1205 }
1206
1207 return 1;
1208}
1209
1189static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1210static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1190 struct kvm_run *kvm_run) = { 1211 struct kvm_run *kvm_run) = {
1191 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1212 [SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -1210,6 +1231,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1210 [SVM_EXIT_NMI] = nop_on_interception, 1231 [SVM_EXIT_NMI] = nop_on_interception,
1211 [SVM_EXIT_SMI] = nop_on_interception, 1232 [SVM_EXIT_SMI] = nop_on_interception,
1212 [SVM_EXIT_INIT] = nop_on_interception, 1233 [SVM_EXIT_INIT] = nop_on_interception,
1234 [SVM_EXIT_VINTR] = interrupt_window_interception,
1213 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ 1235 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1214 [SVM_EXIT_CPUID] = cpuid_interception, 1236 [SVM_EXIT_CPUID] = cpuid_interception,
1215 [SVM_EXIT_HLT] = halt_interception, 1237 [SVM_EXIT_HLT] = halt_interception,
@@ -1278,15 +1300,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
1278} 1300}
1279 1301
1280 1302
1281static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1303static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1282{ 1304{
1283 struct vmcb_control_area *control; 1305 struct vmcb_control_area *control;
1284 1306
1285 if (!vcpu->irq_summary)
1286 return;
1287
1288 control = &vcpu->svm->vmcb->control; 1307 control = &vcpu->svm->vmcb->control;
1289
1290 control->int_vector = pop_irq(vcpu); 1308 control->int_vector = pop_irq(vcpu);
1291 control->int_ctl &= ~V_INTR_PRIO_MASK; 1309 control->int_ctl &= ~V_INTR_PRIO_MASK;
1292 control->int_ctl |= V_IRQ_MASK | 1310 control->int_ctl |= V_IRQ_MASK |
@@ -1301,6 +1319,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1301 control->int_ctl &= ~V_IRQ_MASK; 1319 control->int_ctl &= ~V_IRQ_MASK;
1302 push_irq(vcpu, control->int_vector); 1320 push_irq(vcpu, control->int_vector);
1303 } 1321 }
1322
1323 vcpu->interrupt_window_open =
1324 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1325}
1326
1327static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1328 struct kvm_run *kvm_run)
1329{
1330 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1331
1332 vcpu->interrupt_window_open =
1333 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1334 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1335
1336 if (vcpu->interrupt_window_open && vcpu->irq_summary)
1337 /*
1338 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1339 */
1340 kvm_do_inject_irq(vcpu);
1341
1342 /*
1343 * Interrupts blocked. Wait for unblock.
1344 */
1345 if (!vcpu->interrupt_window_open &&
1346 (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
1347 control->intercept |= 1ULL << INTERCEPT_VINTR;
1348 } else
1349 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1350}
1351
1352static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1353 struct kvm_run *kvm_run)
1354{
1355 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1356 vcpu->irq_summary == 0);
1357 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1358 kvm_run->cr8 = vcpu->cr8;
1359 kvm_run->apic_base = vcpu->apic_base;
1360}
1361
1362/*
1363 * Check if userspace requested an interrupt window, and that the
1364 * interrupt window is open.
1365 *
1366 * No need to exit to userspace if we already have an interrupt queued.
1367 */
1368static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1369 struct kvm_run *kvm_run)
1370{
1371 return (!vcpu->irq_summary &&
1372 kvm_run->request_interrupt_window &&
1373 vcpu->interrupt_window_open &&
1374 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1304} 1375}
1305 1376
1306static void save_db_regs(unsigned long *db_regs) 1377static void save_db_regs(unsigned long *db_regs)
@@ -1326,7 +1397,7 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1326 u16 ldt_selector; 1397 u16 ldt_selector;
1327 1398
1328again: 1399again:
1329 kvm_try_inject_irq(vcpu); 1400 do_interrupt_requests(vcpu, kvm_run);
1330 1401
1331 clgi(); 1402 clgi();
1332 1403
@@ -1487,17 +1558,26 @@ again:
1487 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 1558 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1488 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1559 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1489 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; 1560 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
1561 post_kvm_run_save(vcpu, kvm_run);
1490 return 0; 1562 return 0;
1491 } 1563 }
1492 1564
1493 if (handle_exit(vcpu, kvm_run)) { 1565 if (handle_exit(vcpu, kvm_run)) {
1494 if (signal_pending(current)) { 1566 if (signal_pending(current)) {
1495 ++kvm_stat.signal_exits; 1567 ++kvm_stat.signal_exits;
1568 post_kvm_run_save(vcpu, kvm_run);
1569 return -EINTR;
1570 }
1571
1572 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1573 ++kvm_stat.request_irq_exits;
1574 post_kvm_run_save(vcpu, kvm_run);
1496 return -EINTR; 1575 return -EINTR;
1497 } 1576 }
1498 kvm_resched(vcpu); 1577 kvm_resched(vcpu);
1499 goto again; 1578 goto again;
1500 } 1579 }
1580 post_kvm_run_save(vcpu, kvm_run);
1501 return 0; 1581 return 0;
1502} 1582}
1503 1583
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 2d204fd45972..c55635ddf426 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -263,6 +263,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
263 if (interruptibility & 3) 263 if (interruptibility & 3)
264 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 264 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
265 interruptibility & ~3); 265 interruptibility & ~3);
266 vcpu->interrupt_window_open = 1;
266} 267}
267 268
268static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) 269static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -1214,21 +1215,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1214 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 1215 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1215} 1216}
1216 1217
1217static void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1218
1219static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1220 struct kvm_run *kvm_run)
1218{ 1221{
1219 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) 1222 u32 cpu_based_vm_exec_control;
1220 && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0) 1223
1224 vcpu->interrupt_window_open =
1225 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1226 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1227
1228 if (vcpu->interrupt_window_open &&
1229 vcpu->irq_summary &&
1230 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1221 /* 1231 /*
1222 * Interrupts enabled, and not blocked by sti or mov ss. Good. 1232 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1223 */ 1233 */
1224 kvm_do_inject_irq(vcpu); 1234 kvm_do_inject_irq(vcpu);
1225 else 1235
1236 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1237 if (!vcpu->interrupt_window_open &&
1238 (vcpu->irq_summary || kvm_run->request_interrupt_window))
1226 /* 1239 /*
1227 * Interrupts blocked. Wait for unblock. 1240 * Interrupts blocked. Wait for unblock.
1228 */ 1241 */
1229 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1242 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1230 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1243 else
1231 | CPU_BASED_VIRTUAL_INTR_PENDING); 1244 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1245 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1232} 1246}
1233 1247
1234static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) 1248static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
@@ -1565,23 +1579,41 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1565 return 1; 1579 return 1;
1566} 1580}
1567 1581
1582static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1583 struct kvm_run *kvm_run)
1584{
1585 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1586 kvm_run->cr8 = vcpu->cr8;
1587 kvm_run->apic_base = vcpu->apic_base;
1588 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1589 vcpu->irq_summary == 0);
1590}
1591
1568static int handle_interrupt_window(struct kvm_vcpu *vcpu, 1592static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1569 struct kvm_run *kvm_run) 1593 struct kvm_run *kvm_run)
1570{ 1594{
1571 /* Turn off interrupt window reporting. */ 1595 /*
1572 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1596 * If the user space waits to inject interrupts, exit as soon as
1573 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1597 * possible
1574 & ~CPU_BASED_VIRTUAL_INTR_PENDING); 1598 */
1599 if (kvm_run->request_interrupt_window &&
1600 !vcpu->irq_summary &&
1601 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) {
1602 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1603 ++kvm_stat.irq_window_exits;
1604 return 0;
1605 }
1575 return 1; 1606 return 1;
1576} 1607}
1577 1608
1578static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1609static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1579{ 1610{
1580 skip_emulated_instruction(vcpu); 1611 skip_emulated_instruction(vcpu);
1581 if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) 1612 if (vcpu->irq_summary)
1582 return 1; 1613 return 1;
1583 1614
1584 kvm_run->exit_reason = KVM_EXIT_HLT; 1615 kvm_run->exit_reason = KVM_EXIT_HLT;
1616 ++kvm_stat.halt_exits;
1585 return 0; 1617 return 0;
1586} 1618}
1587 1619
@@ -1632,6 +1664,21 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1632 return 0; 1664 return 0;
1633} 1665}
1634 1666
1667/*
1668 * Check if userspace requested an interrupt window, and that the
1669 * interrupt window is open.
1670 *
1671 * No need to exit to userspace if we already have an interrupt queued.
1672 */
1673static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1674 struct kvm_run *kvm_run)
1675{
1676 return (!vcpu->irq_summary &&
1677 kvm_run->request_interrupt_window &&
1678 vcpu->interrupt_window_open &&
1679 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1680}
1681
1635static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1682static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1636{ 1683{
1637 u8 fail; 1684 u8 fail;
@@ -1663,9 +1710,7 @@ again:
1663 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); 1710 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1664#endif 1711#endif
1665 1712
1666 if (vcpu->irq_summary && 1713 do_interrupt_requests(vcpu, kvm_run);
1667 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1668 kvm_try_inject_irq(vcpu);
1669 1714
1670 if (vcpu->guest_debug.enabled) 1715 if (vcpu->guest_debug.enabled)
1671 kvm_guest_debug_pre(vcpu); 1716 kvm_guest_debug_pre(vcpu);
@@ -1802,6 +1847,7 @@ again:
1802 1847
1803 fx_save(vcpu->guest_fx_image); 1848 fx_save(vcpu->guest_fx_image);
1804 fx_restore(vcpu->host_fx_image); 1849 fx_restore(vcpu->host_fx_image);
1850 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1805 1851
1806#ifndef CONFIG_X86_64 1852#ifndef CONFIG_X86_64
1807 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1853 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
@@ -1834,12 +1880,22 @@ again:
1834 /* Give scheduler a change to reschedule. */ 1880 /* Give scheduler a change to reschedule. */
1835 if (signal_pending(current)) { 1881 if (signal_pending(current)) {
1836 ++kvm_stat.signal_exits; 1882 ++kvm_stat.signal_exits;
1883 post_kvm_run_save(vcpu, kvm_run);
1884 return -EINTR;
1885 }
1886
1887 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1888 ++kvm_stat.request_irq_exits;
1889 post_kvm_run_save(vcpu, kvm_run);
1837 return -EINTR; 1890 return -EINTR;
1838 } 1891 }
1892
1839 kvm_resched(vcpu); 1893 kvm_resched(vcpu);
1840 goto again; 1894 goto again;
1841 } 1895 }
1842 } 1896 }
1897
1898 post_kvm_run_save(vcpu, kvm_run);
1843 return 0; 1899 return 0;
1844} 1900}
1845 1901
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 28fdce1ac1db..bc8b4616bad7 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -11,7 +11,7 @@
11#include <asm/types.h> 11#include <asm/types.h>
12#include <linux/ioctl.h> 12#include <linux/ioctl.h>
13 13
14#define KVM_API_VERSION 1 14#define KVM_API_VERSION 2
15 15
16/* 16/*
17 * Architectural interrupt line count, and the size of the bitmap needed 17 * Architectural interrupt line count, and the size of the bitmap needed
@@ -45,6 +45,7 @@ enum kvm_exit_reason {
45 KVM_EXIT_DEBUG = 4, 45 KVM_EXIT_DEBUG = 4,
46 KVM_EXIT_HLT = 5, 46 KVM_EXIT_HLT = 5,
47 KVM_EXIT_MMIO = 6, 47 KVM_EXIT_MMIO = 6,
48 KVM_EXIT_IRQ_WINDOW_OPEN = 7,
48}; 49};
49 50
50/* for KVM_RUN */ 51/* for KVM_RUN */
@@ -53,11 +54,19 @@ struct kvm_run {
53 __u32 vcpu; 54 __u32 vcpu;
54 __u32 emulated; /* skip current instruction */ 55 __u32 emulated; /* skip current instruction */
55 __u32 mmio_completed; /* mmio request completed */ 56 __u32 mmio_completed; /* mmio request completed */
57 __u8 request_interrupt_window;
58 __u8 padding1[3];
56 59
57 /* out */ 60 /* out */
58 __u32 exit_type; 61 __u32 exit_type;
59 __u32 exit_reason; 62 __u32 exit_reason;
60 __u32 instruction_length; 63 __u32 instruction_length;
64 __u8 ready_for_interrupt_injection;
65 __u8 if_flag;
66 __u16 padding2;
67 __u64 cr8;
68 __u64 apic_base;
69
61 union { 70 union {
62 /* KVM_EXIT_UNKNOWN */ 71 /* KVM_EXIT_UNKNOWN */
63 struct { 72 struct {