diff options
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 182 |
1 files changed, 132 insertions, 50 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index d0a2c2d5342a..ce219e3f557f 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/profile.h> | ||
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/desc.h> | 26 | #include <asm/desc.h> |
26 | 27 | ||
@@ -116,7 +117,7 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
116 | static void __vcpu_clear(void *arg) | 117 | static void __vcpu_clear(void *arg) |
117 | { | 118 | { |
118 | struct kvm_vcpu *vcpu = arg; | 119 | struct kvm_vcpu *vcpu = arg; |
119 | int cpu = smp_processor_id(); | 120 | int cpu = raw_smp_processor_id(); |
120 | 121 | ||
121 | if (vcpu->cpu == cpu) | 122 | if (vcpu->cpu == cpu) |
122 | vmcs_clear(vcpu->vmcs); | 123 | vmcs_clear(vcpu->vmcs); |
@@ -152,15 +153,21 @@ static u64 vmcs_read64(unsigned long field) | |||
152 | #endif | 153 | #endif |
153 | } | 154 | } |
154 | 155 | ||
156 | static noinline void vmwrite_error(unsigned long field, unsigned long value) | ||
157 | { | ||
158 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | ||
159 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
160 | dump_stack(); | ||
161 | } | ||
162 | |||
155 | static void vmcs_writel(unsigned long field, unsigned long value) | 163 | static void vmcs_writel(unsigned long field, unsigned long value) |
156 | { | 164 | { |
157 | u8 error; | 165 | u8 error; |
158 | 166 | ||
159 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | 167 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" |
160 | : "=q"(error) : "a"(value), "d"(field) : "cc" ); | 168 | : "=q"(error) : "a"(value), "d"(field) : "cc" ); |
161 | if (error) | 169 | if (unlikely(error)) |
162 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | 170 | vmwrite_error(field, value); |
163 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
164 | } | 171 | } |
165 | 172 | ||
166 | static void vmcs_write16(unsigned long field, u16 value) | 173 | static void vmcs_write16(unsigned long field, u16 value) |
@@ -263,6 +270,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
263 | if (interruptibility & 3) | 270 | if (interruptibility & 3) |
264 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 271 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
265 | interruptibility & ~3); | 272 | interruptibility & ~3); |
273 | vcpu->interrupt_window_open = 1; | ||
266 | } | 274 | } |
267 | 275 | ||
268 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 276 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
@@ -541,7 +549,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) | |||
541 | 549 | ||
542 | static struct vmcs *alloc_vmcs(void) | 550 | static struct vmcs *alloc_vmcs(void) |
543 | { | 551 | { |
544 | return alloc_vmcs_cpu(smp_processor_id()); | 552 | return alloc_vmcs_cpu(raw_smp_processor_id()); |
545 | } | 553 | } |
546 | 554 | ||
547 | static void free_vmcs(struct vmcs *vmcs) | 555 | static void free_vmcs(struct vmcs *vmcs) |
@@ -736,6 +744,15 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
736 | 744 | ||
737 | #endif | 745 | #endif |
738 | 746 | ||
747 | static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
748 | { | ||
749 | vcpu->cr0 &= KVM_GUEST_CR0_MASK; | ||
750 | vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK; | ||
751 | |||
752 | vcpu->cr4 &= KVM_GUEST_CR4_MASK; | ||
753 | vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | ||
754 | } | ||
755 | |||
739 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 756 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
740 | { | 757 | { |
741 | if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) | 758 | if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) |
@@ -1011,8 +1028,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1011 | vmcs_writel(GUEST_RIP, 0xfff0); | 1028 | vmcs_writel(GUEST_RIP, 0xfff0); |
1012 | vmcs_writel(GUEST_RSP, 0); | 1029 | vmcs_writel(GUEST_RSP, 0); |
1013 | 1030 | ||
1014 | vmcs_writel(GUEST_CR3, 0); | ||
1015 | |||
1016 | //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 | 1031 | //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 |
1017 | vmcs_writel(GUEST_DR7, 0x400); | 1032 | vmcs_writel(GUEST_DR7, 0x400); |
1018 | 1033 | ||
@@ -1049,7 +1064,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1049 | | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ | 1064 | | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ |
1050 | | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ | 1065 | | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ |
1051 | | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ | 1066 | | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ |
1052 | | CPU_BASED_INVDPG_EXITING | ||
1053 | | CPU_BASED_MOV_DR_EXITING | 1067 | | CPU_BASED_MOV_DR_EXITING |
1054 | | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ | 1068 | | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ |
1055 | ); | 1069 | ); |
@@ -1094,14 +1108,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1094 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | 1108 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); |
1095 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | 1109 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ |
1096 | 1110 | ||
1097 | ret = -ENOMEM; | ||
1098 | vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1099 | if (!vcpu->guest_msrs) | ||
1100 | goto out; | ||
1101 | vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1102 | if (!vcpu->host_msrs) | ||
1103 | goto out_free_guest_msrs; | ||
1104 | |||
1105 | for (i = 0; i < NR_VMX_MSR; ++i) { | 1111 | for (i = 0; i < NR_VMX_MSR; ++i) { |
1106 | u32 index = vmx_msr_index[i]; | 1112 | u32 index = vmx_msr_index[i]; |
1107 | u32 data_low, data_high; | 1113 | u32 data_low, data_high; |
@@ -1155,8 +1161,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1155 | 1161 | ||
1156 | return 0; | 1162 | return 0; |
1157 | 1163 | ||
1158 | out_free_guest_msrs: | ||
1159 | kfree(vcpu->guest_msrs); | ||
1160 | out: | 1164 | out: |
1161 | return ret; | 1165 | return ret; |
1162 | } | 1166 | } |
@@ -1224,21 +1228,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1224 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 1228 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
1225 | } | 1229 | } |
1226 | 1230 | ||
1227 | static void kvm_try_inject_irq(struct kvm_vcpu *vcpu) | 1231 | |
1232 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1233 | struct kvm_run *kvm_run) | ||
1228 | { | 1234 | { |
1229 | if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) | 1235 | u32 cpu_based_vm_exec_control; |
1230 | && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0) | 1236 | |
1237 | vcpu->interrupt_window_open = | ||
1238 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
1239 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
1240 | |||
1241 | if (vcpu->interrupt_window_open && | ||
1242 | vcpu->irq_summary && | ||
1243 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1231 | /* | 1244 | /* |
1232 | * Interrupts enabled, and not blocked by sti or mov ss. Good. | 1245 | * If interrupts enabled, and not blocked by sti or mov ss. Good. |
1233 | */ | 1246 | */ |
1234 | kvm_do_inject_irq(vcpu); | 1247 | kvm_do_inject_irq(vcpu); |
1235 | else | 1248 | |
1249 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
1250 | if (!vcpu->interrupt_window_open && | ||
1251 | (vcpu->irq_summary || kvm_run->request_interrupt_window)) | ||
1236 | /* | 1252 | /* |
1237 | * Interrupts blocked. Wait for unblock. | 1253 | * Interrupts blocked. Wait for unblock. |
1238 | */ | 1254 | */ |
1239 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1255 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
1240 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 1256 | else |
1241 | | CPU_BASED_VIRTUAL_INTR_PENDING); | 1257 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
1258 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
1242 | } | 1259 | } |
1243 | 1260 | ||
1244 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | 1261 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) |
@@ -1277,6 +1294,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1277 | unsigned long cr2, rip; | 1294 | unsigned long cr2, rip; |
1278 | u32 vect_info; | 1295 | u32 vect_info; |
1279 | enum emulation_result er; | 1296 | enum emulation_result er; |
1297 | int r; | ||
1280 | 1298 | ||
1281 | vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | 1299 | vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
1282 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 1300 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
@@ -1305,7 +1323,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1305 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 1323 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
1306 | 1324 | ||
1307 | spin_lock(&vcpu->kvm->lock); | 1325 | spin_lock(&vcpu->kvm->lock); |
1308 | if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) { | 1326 | r = kvm_mmu_page_fault(vcpu, cr2, error_code); |
1327 | if (r < 0) { | ||
1328 | spin_unlock(&vcpu->kvm->lock); | ||
1329 | return r; | ||
1330 | } | ||
1331 | if (!r) { | ||
1309 | spin_unlock(&vcpu->kvm->lock); | 1332 | spin_unlock(&vcpu->kvm->lock); |
1310 | return 1; | 1333 | return 1; |
1311 | } | 1334 | } |
@@ -1425,17 +1448,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1425 | return 0; | 1448 | return 0; |
1426 | } | 1449 | } |
1427 | 1450 | ||
1428 | static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1429 | { | ||
1430 | u64 address = vmcs_read64(EXIT_QUALIFICATION); | ||
1431 | int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
1432 | spin_lock(&vcpu->kvm->lock); | ||
1433 | vcpu->mmu.inval_page(vcpu, address); | ||
1434 | spin_unlock(&vcpu->kvm->lock); | ||
1435 | vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length); | ||
1436 | return 1; | ||
1437 | } | ||
1438 | |||
1439 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1451 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1440 | { | 1452 | { |
1441 | u64 exit_qualification; | 1453 | u64 exit_qualification; |
@@ -1575,23 +1587,40 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1575 | return 1; | 1587 | return 1; |
1576 | } | 1588 | } |
1577 | 1589 | ||
1590 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | ||
1591 | struct kvm_run *kvm_run) | ||
1592 | { | ||
1593 | kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0; | ||
1594 | kvm_run->cr8 = vcpu->cr8; | ||
1595 | kvm_run->apic_base = vcpu->apic_base; | ||
1596 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | ||
1597 | vcpu->irq_summary == 0); | ||
1598 | } | ||
1599 | |||
1578 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | 1600 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, |
1579 | struct kvm_run *kvm_run) | 1601 | struct kvm_run *kvm_run) |
1580 | { | 1602 | { |
1581 | /* Turn off interrupt window reporting. */ | 1603 | /* |
1582 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, | 1604 | * If the user space waits to inject interrupts, exit as soon as |
1583 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 1605 | * possible |
1584 | & ~CPU_BASED_VIRTUAL_INTR_PENDING); | 1606 | */ |
1607 | if (kvm_run->request_interrupt_window && | ||
1608 | !vcpu->irq_summary) { | ||
1609 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
1610 | ++kvm_stat.irq_window_exits; | ||
1611 | return 0; | ||
1612 | } | ||
1585 | return 1; | 1613 | return 1; |
1586 | } | 1614 | } |
1587 | 1615 | ||
1588 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1616 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1589 | { | 1617 | { |
1590 | skip_emulated_instruction(vcpu); | 1618 | skip_emulated_instruction(vcpu); |
1591 | if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) | 1619 | if (vcpu->irq_summary) |
1592 | return 1; | 1620 | return 1; |
1593 | 1621 | ||
1594 | kvm_run->exit_reason = KVM_EXIT_HLT; | 1622 | kvm_run->exit_reason = KVM_EXIT_HLT; |
1623 | ++kvm_stat.halt_exits; | ||
1595 | return 0; | 1624 | return 0; |
1596 | } | 1625 | } |
1597 | 1626 | ||
@@ -1605,7 +1634,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1605 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | 1634 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
1606 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 1635 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
1607 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | 1636 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, |
1608 | [EXIT_REASON_INVLPG] = handle_invlpg, | ||
1609 | [EXIT_REASON_CR_ACCESS] = handle_cr, | 1637 | [EXIT_REASON_CR_ACCESS] = handle_cr, |
1610 | [EXIT_REASON_DR_ACCESS] = handle_dr, | 1638 | [EXIT_REASON_DR_ACCESS] = handle_dr, |
1611 | [EXIT_REASON_CPUID] = handle_cpuid, | 1639 | [EXIT_REASON_CPUID] = handle_cpuid, |
@@ -1642,11 +1670,27 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1642 | return 0; | 1670 | return 0; |
1643 | } | 1671 | } |
1644 | 1672 | ||
1673 | /* | ||
1674 | * Check if userspace requested an interrupt window, and that the | ||
1675 | * interrupt window is open. | ||
1676 | * | ||
1677 | * No need to exit to userspace if we already have an interrupt queued. | ||
1678 | */ | ||
1679 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | ||
1680 | struct kvm_run *kvm_run) | ||
1681 | { | ||
1682 | return (!vcpu->irq_summary && | ||
1683 | kvm_run->request_interrupt_window && | ||
1684 | vcpu->interrupt_window_open && | ||
1685 | (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); | ||
1686 | } | ||
1687 | |||
1645 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1688 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1646 | { | 1689 | { |
1647 | u8 fail; | 1690 | u8 fail; |
1648 | u16 fs_sel, gs_sel, ldt_sel; | 1691 | u16 fs_sel, gs_sel, ldt_sel; |
1649 | int fs_gs_ldt_reload_needed; | 1692 | int fs_gs_ldt_reload_needed; |
1693 | int r; | ||
1650 | 1694 | ||
1651 | again: | 1695 | again: |
1652 | /* | 1696 | /* |
@@ -1673,9 +1717,7 @@ again: | |||
1673 | vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); | 1717 | vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); |
1674 | #endif | 1718 | #endif |
1675 | 1719 | ||
1676 | if (vcpu->irq_summary && | 1720 | do_interrupt_requests(vcpu, kvm_run); |
1677 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1678 | kvm_try_inject_irq(vcpu); | ||
1679 | 1721 | ||
1680 | if (vcpu->guest_debug.enabled) | 1722 | if (vcpu->guest_debug.enabled) |
1681 | kvm_guest_debug_pre(vcpu); | 1723 | kvm_guest_debug_pre(vcpu); |
@@ -1812,15 +1854,23 @@ again: | |||
1812 | 1854 | ||
1813 | fx_save(vcpu->guest_fx_image); | 1855 | fx_save(vcpu->guest_fx_image); |
1814 | fx_restore(vcpu->host_fx_image); | 1856 | fx_restore(vcpu->host_fx_image); |
1857 | vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | ||
1815 | 1858 | ||
1816 | #ifndef CONFIG_X86_64 | 1859 | #ifndef CONFIG_X86_64 |
1817 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 1860 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
1818 | #endif | 1861 | #endif |
1819 | 1862 | ||
1863 | /* | ||
1864 | * Profile KVM exit RIPs: | ||
1865 | */ | ||
1866 | if (unlikely(prof_on == KVM_PROFILING)) | ||
1867 | profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); | ||
1868 | |||
1820 | kvm_run->exit_type = 0; | 1869 | kvm_run->exit_type = 0; |
1821 | if (fail) { | 1870 | if (fail) { |
1822 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; | 1871 | kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; |
1823 | kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); | 1872 | kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); |
1873 | r = 0; | ||
1824 | } else { | 1874 | } else { |
1825 | if (fs_gs_ldt_reload_needed) { | 1875 | if (fs_gs_ldt_reload_needed) { |
1826 | load_ldt(ldt_sel); | 1876 | load_ldt(ldt_sel); |
@@ -1840,17 +1890,28 @@ again: | |||
1840 | } | 1890 | } |
1841 | vcpu->launched = 1; | 1891 | vcpu->launched = 1; |
1842 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; | 1892 | kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; |
1843 | if (kvm_handle_exit(kvm_run, vcpu)) { | 1893 | r = kvm_handle_exit(kvm_run, vcpu); |
1894 | if (r > 0) { | ||
1844 | /* Give scheduler a change to reschedule. */ | 1895 | /* Give scheduler a change to reschedule. */ |
1845 | if (signal_pending(current)) { | 1896 | if (signal_pending(current)) { |
1846 | ++kvm_stat.signal_exits; | 1897 | ++kvm_stat.signal_exits; |
1898 | post_kvm_run_save(vcpu, kvm_run); | ||
1847 | return -EINTR; | 1899 | return -EINTR; |
1848 | } | 1900 | } |
1901 | |||
1902 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
1903 | ++kvm_stat.request_irq_exits; | ||
1904 | post_kvm_run_save(vcpu, kvm_run); | ||
1905 | return -EINTR; | ||
1906 | } | ||
1907 | |||
1849 | kvm_resched(vcpu); | 1908 | kvm_resched(vcpu); |
1850 | goto again; | 1909 | goto again; |
1851 | } | 1910 | } |
1852 | } | 1911 | } |
1853 | return 0; | 1912 | |
1913 | post_kvm_run_save(vcpu, kvm_run); | ||
1914 | return r; | ||
1854 | } | 1915 | } |
1855 | 1916 | ||
1856 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | 1917 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) |
@@ -1906,13 +1967,33 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) | |||
1906 | { | 1967 | { |
1907 | struct vmcs *vmcs; | 1968 | struct vmcs *vmcs; |
1908 | 1969 | ||
1970 | vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1971 | if (!vcpu->guest_msrs) | ||
1972 | return -ENOMEM; | ||
1973 | |||
1974 | vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1975 | if (!vcpu->host_msrs) | ||
1976 | goto out_free_guest_msrs; | ||
1977 | |||
1909 | vmcs = alloc_vmcs(); | 1978 | vmcs = alloc_vmcs(); |
1910 | if (!vmcs) | 1979 | if (!vmcs) |
1911 | return -ENOMEM; | 1980 | goto out_free_msrs; |
1981 | |||
1912 | vmcs_clear(vmcs); | 1982 | vmcs_clear(vmcs); |
1913 | vcpu->vmcs = vmcs; | 1983 | vcpu->vmcs = vmcs; |
1914 | vcpu->launched = 0; | 1984 | vcpu->launched = 0; |
1985 | |||
1915 | return 0; | 1986 | return 0; |
1987 | |||
1988 | out_free_msrs: | ||
1989 | kfree(vcpu->host_msrs); | ||
1990 | vcpu->host_msrs = NULL; | ||
1991 | |||
1992 | out_free_guest_msrs: | ||
1993 | kfree(vcpu->guest_msrs); | ||
1994 | vcpu->guest_msrs = NULL; | ||
1995 | |||
1996 | return -ENOMEM; | ||
1916 | } | 1997 | } |
1917 | 1998 | ||
1918 | static struct kvm_arch_ops vmx_arch_ops = { | 1999 | static struct kvm_arch_ops vmx_arch_ops = { |
@@ -1936,6 +2017,7 @@ static struct kvm_arch_ops vmx_arch_ops = { | |||
1936 | .get_segment = vmx_get_segment, | 2017 | .get_segment = vmx_get_segment, |
1937 | .set_segment = vmx_set_segment, | 2018 | .set_segment = vmx_set_segment, |
1938 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, | 2019 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, |
2020 | .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits, | ||
1939 | .set_cr0 = vmx_set_cr0, | 2021 | .set_cr0 = vmx_set_cr0, |
1940 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, | 2022 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, |
1941 | .set_cr3 = vmx_set_cr3, | 2023 | .set_cr3 = vmx_set_cr3, |