aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c175
1 files changed, 125 insertions, 50 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index d0a2c2d5342a..d4701cb4c654 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -116,7 +116,7 @@ static void vmcs_clear(struct vmcs *vmcs)
116static void __vcpu_clear(void *arg) 116static void __vcpu_clear(void *arg)
117{ 117{
118 struct kvm_vcpu *vcpu = arg; 118 struct kvm_vcpu *vcpu = arg;
119 int cpu = smp_processor_id(); 119 int cpu = raw_smp_processor_id();
120 120
121 if (vcpu->cpu == cpu) 121 if (vcpu->cpu == cpu)
122 vmcs_clear(vcpu->vmcs); 122 vmcs_clear(vcpu->vmcs);
@@ -152,15 +152,21 @@ static u64 vmcs_read64(unsigned long field)
152#endif 152#endif
153} 153}
154 154
155static noinline void vmwrite_error(unsigned long field, unsigned long value)
156{
157 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
158 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
159 dump_stack();
160}
161
155static void vmcs_writel(unsigned long field, unsigned long value) 162static void vmcs_writel(unsigned long field, unsigned long value)
156{ 163{
157 u8 error; 164 u8 error;
158 165
159 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" 166 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
160 : "=q"(error) : "a"(value), "d"(field) : "cc" ); 167 : "=q"(error) : "a"(value), "d"(field) : "cc" );
161 if (error) 168 if (unlikely(error))
162 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", 169 vmwrite_error(field, value);
163 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
164} 170}
165 171
166static void vmcs_write16(unsigned long field, u16 value) 172static void vmcs_write16(unsigned long field, u16 value)
@@ -263,6 +269,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
263 if (interruptibility & 3) 269 if (interruptibility & 3)
264 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 270 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
265 interruptibility & ~3); 271 interruptibility & ~3);
272 vcpu->interrupt_window_open = 1;
266} 273}
267 274
268static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) 275static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -541,7 +548,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
541 548
542static struct vmcs *alloc_vmcs(void) 549static struct vmcs *alloc_vmcs(void)
543{ 550{
544 return alloc_vmcs_cpu(smp_processor_id()); 551 return alloc_vmcs_cpu(raw_smp_processor_id());
545} 552}
546 553
547static void free_vmcs(struct vmcs *vmcs) 554static void free_vmcs(struct vmcs *vmcs)
@@ -736,6 +743,15 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
736 743
737#endif 744#endif
738 745
746static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
747{
748 vcpu->cr0 &= KVM_GUEST_CR0_MASK;
749 vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
750
751 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
752 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
753}
754
739static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 755static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
740{ 756{
741 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) 757 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
@@ -1011,8 +1027,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1011 vmcs_writel(GUEST_RIP, 0xfff0); 1027 vmcs_writel(GUEST_RIP, 0xfff0);
1012 vmcs_writel(GUEST_RSP, 0); 1028 vmcs_writel(GUEST_RSP, 0);
1013 1029
1014 vmcs_writel(GUEST_CR3, 0);
1015
1016 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 1030 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1017 vmcs_writel(GUEST_DR7, 0x400); 1031 vmcs_writel(GUEST_DR7, 0x400);
1018 1032
@@ -1049,7 +1063,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1049 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ 1063 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1050 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ 1064 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
1051 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ 1065 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
1052 | CPU_BASED_INVDPG_EXITING
1053 | CPU_BASED_MOV_DR_EXITING 1066 | CPU_BASED_MOV_DR_EXITING
1054 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ 1067 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1055 ); 1068 );
@@ -1094,14 +1107,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1094 rdmsrl(MSR_IA32_SYSENTER_EIP, a); 1107 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1095 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ 1108 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1096 1109
1097 ret = -ENOMEM;
1098 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1099 if (!vcpu->guest_msrs)
1100 goto out;
1101 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1102 if (!vcpu->host_msrs)
1103 goto out_free_guest_msrs;
1104
1105 for (i = 0; i < NR_VMX_MSR; ++i) { 1110 for (i = 0; i < NR_VMX_MSR; ++i) {
1106 u32 index = vmx_msr_index[i]; 1111 u32 index = vmx_msr_index[i];
1107 u32 data_low, data_high; 1112 u32 data_low, data_high;
@@ -1155,8 +1160,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1155 1160
1156 return 0; 1161 return 0;
1157 1162
1158out_free_guest_msrs:
1159 kfree(vcpu->guest_msrs);
1160out: 1163out:
1161 return ret; 1164 return ret;
1162} 1165}
@@ -1224,21 +1227,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1224 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 1227 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1225} 1228}
1226 1229
1227static void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1230
1231static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1232 struct kvm_run *kvm_run)
1228{ 1233{
1229 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) 1234 u32 cpu_based_vm_exec_control;
1230 && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0) 1235
1236 vcpu->interrupt_window_open =
1237 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1238 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1239
1240 if (vcpu->interrupt_window_open &&
1241 vcpu->irq_summary &&
1242 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1231 /* 1243 /*
1232 * Interrupts enabled, and not blocked by sti or mov ss. Good. 1244 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1233 */ 1245 */
1234 kvm_do_inject_irq(vcpu); 1246 kvm_do_inject_irq(vcpu);
1235 else 1247
1248 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1249 if (!vcpu->interrupt_window_open &&
1250 (vcpu->irq_summary || kvm_run->request_interrupt_window))
1236 /* 1251 /*
1237 * Interrupts blocked. Wait for unblock. 1252 * Interrupts blocked. Wait for unblock.
1238 */ 1253 */
1239 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1254 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1240 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1255 else
1241 | CPU_BASED_VIRTUAL_INTR_PENDING); 1256 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1257 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1242} 1258}
1243 1259
1244static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) 1260static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
@@ -1277,6 +1293,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1277 unsigned long cr2, rip; 1293 unsigned long cr2, rip;
1278 u32 vect_info; 1294 u32 vect_info;
1279 enum emulation_result er; 1295 enum emulation_result er;
1296 int r;
1280 1297
1281 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 1298 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1282 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 1299 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
@@ -1305,7 +1322,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1305 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1322 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1306 1323
1307 spin_lock(&vcpu->kvm->lock); 1324 spin_lock(&vcpu->kvm->lock);
1308 if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) { 1325 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1326 if (r < 0) {
1327 spin_unlock(&vcpu->kvm->lock);
1328 return r;
1329 }
1330 if (!r) {
1309 spin_unlock(&vcpu->kvm->lock); 1331 spin_unlock(&vcpu->kvm->lock);
1310 return 1; 1332 return 1;
1311 } 1333 }
@@ -1425,17 +1447,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1425 return 0; 1447 return 0;
1426} 1448}
1427 1449
1428static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1429{
1430 u64 address = vmcs_read64(EXIT_QUALIFICATION);
1431 int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1432 spin_lock(&vcpu->kvm->lock);
1433 vcpu->mmu.inval_page(vcpu, address);
1434 spin_unlock(&vcpu->kvm->lock);
1435 vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
1436 return 1;
1437}
1438
1439static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1450static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1440{ 1451{
1441 u64 exit_qualification; 1452 u64 exit_qualification;
@@ -1575,23 +1586,40 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1575 return 1; 1586 return 1;
1576} 1587}
1577 1588
1589static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1590 struct kvm_run *kvm_run)
1591{
1592 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1593 kvm_run->cr8 = vcpu->cr8;
1594 kvm_run->apic_base = vcpu->apic_base;
1595 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1596 vcpu->irq_summary == 0);
1597}
1598
1578static int handle_interrupt_window(struct kvm_vcpu *vcpu, 1599static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1579 struct kvm_run *kvm_run) 1600 struct kvm_run *kvm_run)
1580{ 1601{
1581 /* Turn off interrupt window reporting. */ 1602 /*
1582 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1603 * If the user space waits to inject interrupts, exit as soon as
1583 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1604 * possible
1584 & ~CPU_BASED_VIRTUAL_INTR_PENDING); 1605 */
1606 if (kvm_run->request_interrupt_window &&
1607 !vcpu->irq_summary) {
1608 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1609 ++kvm_stat.irq_window_exits;
1610 return 0;
1611 }
1585 return 1; 1612 return 1;
1586} 1613}
1587 1614
1588static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1615static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1589{ 1616{
1590 skip_emulated_instruction(vcpu); 1617 skip_emulated_instruction(vcpu);
1591 if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) 1618 if (vcpu->irq_summary)
1592 return 1; 1619 return 1;
1593 1620
1594 kvm_run->exit_reason = KVM_EXIT_HLT; 1621 kvm_run->exit_reason = KVM_EXIT_HLT;
1622 ++kvm_stat.halt_exits;
1595 return 0; 1623 return 0;
1596} 1624}
1597 1625
@@ -1605,7 +1633,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1605 [EXIT_REASON_EXCEPTION_NMI] = handle_exception, 1633 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
1606 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 1634 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
1607 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 1635 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
1608 [EXIT_REASON_INVLPG] = handle_invlpg,
1609 [EXIT_REASON_CR_ACCESS] = handle_cr, 1636 [EXIT_REASON_CR_ACCESS] = handle_cr,
1610 [EXIT_REASON_DR_ACCESS] = handle_dr, 1637 [EXIT_REASON_DR_ACCESS] = handle_dr,
1611 [EXIT_REASON_CPUID] = handle_cpuid, 1638 [EXIT_REASON_CPUID] = handle_cpuid,
@@ -1642,11 +1669,27 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1642 return 0; 1669 return 0;
1643} 1670}
1644 1671
1672/*
1673 * Check if userspace requested an interrupt window, and that the
1674 * interrupt window is open.
1675 *
1676 * No need to exit to userspace if we already have an interrupt queued.
1677 */
1678static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1679 struct kvm_run *kvm_run)
1680{
1681 return (!vcpu->irq_summary &&
1682 kvm_run->request_interrupt_window &&
1683 vcpu->interrupt_window_open &&
1684 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1685}
1686
1645static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1687static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1646{ 1688{
1647 u8 fail; 1689 u8 fail;
1648 u16 fs_sel, gs_sel, ldt_sel; 1690 u16 fs_sel, gs_sel, ldt_sel;
1649 int fs_gs_ldt_reload_needed; 1691 int fs_gs_ldt_reload_needed;
1692 int r;
1650 1693
1651again: 1694again:
1652 /* 1695 /*
@@ -1673,9 +1716,7 @@ again:
1673 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); 1716 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1674#endif 1717#endif
1675 1718
1676 if (vcpu->irq_summary && 1719 do_interrupt_requests(vcpu, kvm_run);
1677 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1678 kvm_try_inject_irq(vcpu);
1679 1720
1680 if (vcpu->guest_debug.enabled) 1721 if (vcpu->guest_debug.enabled)
1681 kvm_guest_debug_pre(vcpu); 1722 kvm_guest_debug_pre(vcpu);
@@ -1812,6 +1853,7 @@ again:
1812 1853
1813 fx_save(vcpu->guest_fx_image); 1854 fx_save(vcpu->guest_fx_image);
1814 fx_restore(vcpu->host_fx_image); 1855 fx_restore(vcpu->host_fx_image);
1856 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1815 1857
1816#ifndef CONFIG_X86_64 1858#ifndef CONFIG_X86_64
1817 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1859 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
@@ -1821,6 +1863,7 @@ again:
1821 if (fail) { 1863 if (fail) {
1822 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1864 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1823 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); 1865 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
1866 r = 0;
1824 } else { 1867 } else {
1825 if (fs_gs_ldt_reload_needed) { 1868 if (fs_gs_ldt_reload_needed) {
1826 load_ldt(ldt_sel); 1869 load_ldt(ldt_sel);
@@ -1840,17 +1883,28 @@ again:
1840 } 1883 }
1841 vcpu->launched = 1; 1884 vcpu->launched = 1;
1842 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; 1885 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
1843 if (kvm_handle_exit(kvm_run, vcpu)) { 1886 r = kvm_handle_exit(kvm_run, vcpu);
1887 if (r > 0) {
1844 /* Give scheduler a change to reschedule. */ 1888 /* Give scheduler a change to reschedule. */
1845 if (signal_pending(current)) { 1889 if (signal_pending(current)) {
1846 ++kvm_stat.signal_exits; 1890 ++kvm_stat.signal_exits;
1891 post_kvm_run_save(vcpu, kvm_run);
1892 return -EINTR;
1893 }
1894
1895 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1896 ++kvm_stat.request_irq_exits;
1897 post_kvm_run_save(vcpu, kvm_run);
1847 return -EINTR; 1898 return -EINTR;
1848 } 1899 }
1900
1849 kvm_resched(vcpu); 1901 kvm_resched(vcpu);
1850 goto again; 1902 goto again;
1851 } 1903 }
1852 } 1904 }
1853 return 0; 1905
1906 post_kvm_run_save(vcpu, kvm_run);
1907 return r;
1854} 1908}
1855 1909
1856static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 1910static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1906,13 +1960,33 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
1906{ 1960{
1907 struct vmcs *vmcs; 1961 struct vmcs *vmcs;
1908 1962
1963 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1964 if (!vcpu->guest_msrs)
1965 return -ENOMEM;
1966
1967 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1968 if (!vcpu->host_msrs)
1969 goto out_free_guest_msrs;
1970
1909 vmcs = alloc_vmcs(); 1971 vmcs = alloc_vmcs();
1910 if (!vmcs) 1972 if (!vmcs)
1911 return -ENOMEM; 1973 goto out_free_msrs;
1974
1912 vmcs_clear(vmcs); 1975 vmcs_clear(vmcs);
1913 vcpu->vmcs = vmcs; 1976 vcpu->vmcs = vmcs;
1914 vcpu->launched = 0; 1977 vcpu->launched = 0;
1978
1915 return 0; 1979 return 0;
1980
1981out_free_msrs:
1982 kfree(vcpu->host_msrs);
1983 vcpu->host_msrs = NULL;
1984
1985out_free_guest_msrs:
1986 kfree(vcpu->guest_msrs);
1987 vcpu->guest_msrs = NULL;
1988
1989 return -ENOMEM;
1916} 1990}
1917 1991
1918static struct kvm_arch_ops vmx_arch_ops = { 1992static struct kvm_arch_ops vmx_arch_ops = {
@@ -1936,6 +2010,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
1936 .get_segment = vmx_get_segment, 2010 .get_segment = vmx_get_segment,
1937 .set_segment = vmx_set_segment, 2011 .set_segment = vmx_set_segment,
1938 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 2012 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2013 .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
1939 .set_cr0 = vmx_set_cr0, 2014 .set_cr0 = vmx_set_cr0,
1940 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, 2015 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
1941 .set_cr3 = vmx_set_cr3, 2016 .set_cr3 = vmx_set_cr3,