diff options
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 62 |
1 files changed, 37 insertions, 25 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index bda7a7ae2167..f0f0b1a781f8 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include <asm/desc.h> | ||
25 | 26 | ||
26 | #include "segment_descriptor.h" | 27 | #include "segment_descriptor.h" |
27 | 28 | ||
@@ -33,7 +34,7 @@ MODULE_LICENSE("GPL"); | |||
33 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 34 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
34 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 35 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
35 | 36 | ||
36 | #ifdef __x86_64__ | 37 | #ifdef CONFIG_X86_64 |
37 | #define HOST_IS_64 1 | 38 | #define HOST_IS_64 1 |
38 | #else | 39 | #else |
39 | #define HOST_IS_64 0 | 40 | #define HOST_IS_64 0 |
@@ -70,15 +71,13 @@ static struct kvm_vmx_segment_field { | |||
70 | }; | 71 | }; |
71 | 72 | ||
72 | static const u32 vmx_msr_index[] = { | 73 | static const u32 vmx_msr_index[] = { |
73 | #ifdef __x86_64__ | 74 | #ifdef CONFIG_X86_64 |
74 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, | 75 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, |
75 | #endif | 76 | #endif |
76 | MSR_EFER, MSR_K6_STAR, | 77 | MSR_EFER, MSR_K6_STAR, |
77 | }; | 78 | }; |
78 | #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) | 79 | #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index)) |
79 | 80 | ||
80 | struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr); | ||
81 | |||
82 | static inline int is_page_fault(u32 intr_info) | 81 | static inline int is_page_fault(u32 intr_info) |
83 | { | 82 | { |
84 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 83 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
@@ -92,6 +91,16 @@ static inline int is_external_interrupt(u32 intr_info) | |||
92 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 91 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
93 | } | 92 | } |
94 | 93 | ||
94 | static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) | ||
95 | { | ||
96 | int i; | ||
97 | |||
98 | for (i = 0; i < vcpu->nmsrs; ++i) | ||
99 | if (vcpu->guest_msrs[i].index == msr) | ||
100 | return &vcpu->guest_msrs[i]; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
95 | static void vmcs_clear(struct vmcs *vmcs) | 104 | static void vmcs_clear(struct vmcs *vmcs) |
96 | { | 105 | { |
97 | u64 phys_addr = __pa(vmcs); | 106 | u64 phys_addr = __pa(vmcs); |
@@ -137,7 +146,7 @@ static u32 vmcs_read32(unsigned long field) | |||
137 | 146 | ||
138 | static u64 vmcs_read64(unsigned long field) | 147 | static u64 vmcs_read64(unsigned long field) |
139 | { | 148 | { |
140 | #ifdef __x86_64__ | 149 | #ifdef CONFIG_X86_64 |
141 | return vmcs_readl(field); | 150 | return vmcs_readl(field); |
142 | #else | 151 | #else |
143 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); | 152 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); |
@@ -167,7 +176,7 @@ static void vmcs_write32(unsigned long field, u32 value) | |||
167 | 176 | ||
168 | static void vmcs_write64(unsigned long field, u64 value) | 177 | static void vmcs_write64(unsigned long field, u64 value) |
169 | { | 178 | { |
170 | #ifdef __x86_64__ | 179 | #ifdef CONFIG_X86_64 |
171 | vmcs_writel(field, value); | 180 | vmcs_writel(field, value); |
172 | #else | 181 | #else |
173 | vmcs_writel(field, value); | 182 | vmcs_writel(field, value); |
@@ -296,7 +305,7 @@ static void guest_write_tsc(u64 guest_tsc) | |||
296 | 305 | ||
297 | static void reload_tss(void) | 306 | static void reload_tss(void) |
298 | { | 307 | { |
299 | #ifndef __x86_64__ | 308 | #ifndef CONFIG_X86_64 |
300 | 309 | ||
301 | /* | 310 | /* |
302 | * VT restores TR but not its size. Useless. | 311 | * VT restores TR but not its size. Useless. |
@@ -327,7 +336,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
327 | } | 336 | } |
328 | 337 | ||
329 | switch (msr_index) { | 338 | switch (msr_index) { |
330 | #ifdef __x86_64__ | 339 | #ifdef CONFIG_X86_64 |
331 | case MSR_FS_BASE: | 340 | case MSR_FS_BASE: |
332 | data = vmcs_readl(GUEST_FS_BASE); | 341 | data = vmcs_readl(GUEST_FS_BASE); |
333 | break; | 342 | break; |
@@ -390,7 +399,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
390 | { | 399 | { |
391 | struct vmx_msr_entry *msr; | 400 | struct vmx_msr_entry *msr; |
392 | switch (msr_index) { | 401 | switch (msr_index) { |
393 | #ifdef __x86_64__ | 402 | #ifdef CONFIG_X86_64 |
394 | case MSR_FS_BASE: | 403 | case MSR_FS_BASE: |
395 | vmcs_writel(GUEST_FS_BASE, data); | 404 | vmcs_writel(GUEST_FS_BASE, data); |
396 | break; | 405 | break; |
@@ -525,7 +534,7 @@ static __init void hardware_enable(void *garbage) | |||
525 | u64 old; | 534 | u64 old; |
526 | 535 | ||
527 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | 536 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
528 | if ((old & 5) == 0) | 537 | if ((old & 5) != 5) |
529 | /* enable and lock */ | 538 | /* enable and lock */ |
530 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); | 539 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); |
531 | write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 540 | write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
@@ -725,7 +734,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
725 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); | 734 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); |
726 | } | 735 | } |
727 | 736 | ||
728 | #ifdef __x86_64__ | 737 | #ifdef CONFIG_X86_64 |
729 | 738 | ||
730 | static void enter_lmode(struct kvm_vcpu *vcpu) | 739 | static void enter_lmode(struct kvm_vcpu *vcpu) |
731 | { | 740 | { |
@@ -767,7 +776,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
767 | if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) | 776 | if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) |
768 | enter_rmode(vcpu); | 777 | enter_rmode(vcpu); |
769 | 778 | ||
770 | #ifdef __x86_64__ | 779 | #ifdef CONFIG_X86_64 |
771 | if (vcpu->shadow_efer & EFER_LME) { | 780 | if (vcpu->shadow_efer & EFER_LME) { |
772 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) | 781 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) |
773 | enter_lmode(vcpu); | 782 | enter_lmode(vcpu); |
@@ -808,7 +817,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
808 | vcpu->cr4 = cr4; | 817 | vcpu->cr4 = cr4; |
809 | } | 818 | } |
810 | 819 | ||
811 | #ifdef __x86_64__ | 820 | #ifdef CONFIG_X86_64 |
812 | 821 | ||
813 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 822 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
814 | { | 823 | { |
@@ -883,6 +892,8 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
883 | ar |= (var->db & 1) << 14; | 892 | ar |= (var->db & 1) << 14; |
884 | ar |= (var->g & 1) << 15; | 893 | ar |= (var->g & 1) << 15; |
885 | } | 894 | } |
895 | if (ar == 0) /* a 0 value means unusable */ | ||
896 | ar = AR_UNUSABLE_MASK; | ||
886 | vmcs_write32(sf->ar_bytes, ar); | 897 | vmcs_write32(sf->ar_bytes, ar); |
887 | } | 898 | } |
888 | 899 | ||
@@ -1095,7 +1106,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1095 | vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ | 1106 | vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ |
1096 | vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ | 1107 | vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ |
1097 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | 1108 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
1098 | #ifdef __x86_64__ | 1109 | #ifdef CONFIG_X86_64 |
1099 | rdmsrl(MSR_FS_BASE, a); | 1110 | rdmsrl(MSR_FS_BASE, a); |
1100 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ | 1111 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ |
1101 | rdmsrl(MSR_GS_BASE, a); | 1112 | rdmsrl(MSR_GS_BASE, a); |
@@ -1164,8 +1175,10 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1164 | VM_ENTRY_CONTROLS, 0); | 1175 | VM_ENTRY_CONTROLS, 0); |
1165 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ | 1176 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ |
1166 | 1177 | ||
1178 | #ifdef CONFIG_X86_64 | ||
1167 | vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0); | 1179 | vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0); |
1168 | vmcs_writel(TPR_THRESHOLD, 0); | 1180 | vmcs_writel(TPR_THRESHOLD, 0); |
1181 | #endif | ||
1169 | 1182 | ||
1170 | vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK); | 1183 | vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK); |
1171 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | 1184 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); |
@@ -1173,7 +1186,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1173 | vcpu->cr0 = 0x60000010; | 1186 | vcpu->cr0 = 0x60000010; |
1174 | vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode | 1187 | vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode |
1175 | vmx_set_cr4(vcpu, 0); | 1188 | vmx_set_cr4(vcpu, 0); |
1176 | #ifdef __x86_64__ | 1189 | #ifdef CONFIG_X86_64 |
1177 | vmx_set_efer(vcpu, 0); | 1190 | vmx_set_efer(vcpu, 0); |
1178 | #endif | 1191 | #endif |
1179 | 1192 | ||
@@ -1689,7 +1702,7 @@ again: | |||
1689 | vmcs_write16(HOST_GS_SELECTOR, 0); | 1702 | vmcs_write16(HOST_GS_SELECTOR, 0); |
1690 | } | 1703 | } |
1691 | 1704 | ||
1692 | #ifdef __x86_64__ | 1705 | #ifdef CONFIG_X86_64 |
1693 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | 1706 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); |
1694 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | 1707 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); |
1695 | #else | 1708 | #else |
@@ -1713,7 +1726,7 @@ again: | |||
1713 | asm ( | 1726 | asm ( |
1714 | /* Store host registers */ | 1727 | /* Store host registers */ |
1715 | "pushf \n\t" | 1728 | "pushf \n\t" |
1716 | #ifdef __x86_64__ | 1729 | #ifdef CONFIG_X86_64 |
1717 | "push %%rax; push %%rbx; push %%rdx;" | 1730 | "push %%rax; push %%rbx; push %%rdx;" |
1718 | "push %%rsi; push %%rdi; push %%rbp;" | 1731 | "push %%rsi; push %%rdi; push %%rbp;" |
1719 | "push %%r8; push %%r9; push %%r10; push %%r11;" | 1732 | "push %%r8; push %%r9; push %%r10; push %%r11;" |
@@ -1727,7 +1740,7 @@ again: | |||
1727 | /* Check if vmlaunch of vmresume is needed */ | 1740 | /* Check if vmlaunch of vmresume is needed */ |
1728 | "cmp $0, %1 \n\t" | 1741 | "cmp $0, %1 \n\t" |
1729 | /* Load guest registers. Don't clobber flags. */ | 1742 | /* Load guest registers. Don't clobber flags. */ |
1730 | #ifdef __x86_64__ | 1743 | #ifdef CONFIG_X86_64 |
1731 | "mov %c[cr2](%3), %%rax \n\t" | 1744 | "mov %c[cr2](%3), %%rax \n\t" |
1732 | "mov %%rax, %%cr2 \n\t" | 1745 | "mov %%rax, %%cr2 \n\t" |
1733 | "mov %c[rax](%3), %%rax \n\t" | 1746 | "mov %c[rax](%3), %%rax \n\t" |
@@ -1764,7 +1777,7 @@ again: | |||
1764 | ".globl kvm_vmx_return \n\t" | 1777 | ".globl kvm_vmx_return \n\t" |
1765 | "kvm_vmx_return: " | 1778 | "kvm_vmx_return: " |
1766 | /* Save guest registers, load host registers, keep flags */ | 1779 | /* Save guest registers, load host registers, keep flags */ |
1767 | #ifdef __x86_64__ | 1780 | #ifdef CONFIG_X86_64 |
1768 | "xchg %3, 0(%%rsp) \n\t" | 1781 | "xchg %3, 0(%%rsp) \n\t" |
1769 | "mov %%rax, %c[rax](%3) \n\t" | 1782 | "mov %%rax, %c[rax](%3) \n\t" |
1770 | "mov %%rbx, %c[rbx](%3) \n\t" | 1783 | "mov %%rbx, %c[rbx](%3) \n\t" |
@@ -1816,7 +1829,7 @@ again: | |||
1816 | [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), | 1829 | [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), |
1817 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), | 1830 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), |
1818 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), | 1831 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), |
1819 | #ifdef __x86_64__ | 1832 | #ifdef CONFIG_X86_64 |
1820 | [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), | 1833 | [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), |
1821 | [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), | 1834 | [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), |
1822 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), | 1835 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), |
@@ -1837,7 +1850,7 @@ again: | |||
1837 | fx_save(vcpu->guest_fx_image); | 1850 | fx_save(vcpu->guest_fx_image); |
1838 | fx_restore(vcpu->host_fx_image); | 1851 | fx_restore(vcpu->host_fx_image); |
1839 | 1852 | ||
1840 | #ifndef __x86_64__ | 1853 | #ifndef CONFIG_X86_64 |
1841 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 1854 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
1842 | #endif | 1855 | #endif |
1843 | 1856 | ||
@@ -1855,7 +1868,7 @@ again: | |||
1855 | */ | 1868 | */ |
1856 | local_irq_disable(); | 1869 | local_irq_disable(); |
1857 | load_gs(gs_sel); | 1870 | load_gs(gs_sel); |
1858 | #ifdef __x86_64__ | 1871 | #ifdef CONFIG_X86_64 |
1859 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | 1872 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); |
1860 | #endif | 1873 | #endif |
1861 | local_irq_enable(); | 1874 | local_irq_enable(); |
@@ -1965,7 +1978,7 @@ static struct kvm_arch_ops vmx_arch_ops = { | |||
1965 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, | 1978 | .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, |
1966 | .set_cr3 = vmx_set_cr3, | 1979 | .set_cr3 = vmx_set_cr3, |
1967 | .set_cr4 = vmx_set_cr4, | 1980 | .set_cr4 = vmx_set_cr4, |
1968 | #ifdef __x86_64__ | 1981 | #ifdef CONFIG_X86_64 |
1969 | .set_efer = vmx_set_efer, | 1982 | .set_efer = vmx_set_efer, |
1970 | #endif | 1983 | #endif |
1971 | .get_idt = vmx_get_idt, | 1984 | .get_idt = vmx_get_idt, |
@@ -1989,8 +2002,7 @@ static struct kvm_arch_ops vmx_arch_ops = { | |||
1989 | 2002 | ||
1990 | static int __init vmx_init(void) | 2003 | static int __init vmx_init(void) |
1991 | { | 2004 | { |
1992 | kvm_init_arch(&vmx_arch_ops, THIS_MODULE); | 2005 | return kvm_init_arch(&vmx_arch_ops, THIS_MODULE); |
1993 | return 0; | ||
1994 | } | 2006 | } |
1995 | 2007 | ||
1996 | static void __exit vmx_exit(void) | 2008 | static void __exit vmx_exit(void) |