diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/hyperv.h | 186 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 17 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 60 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_para.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/vmx.h | 5 |
7 files changed, 243 insertions, 29 deletions
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 9f828f87ca35..493092efaa3b 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -11,6 +11,7 @@ header-y += sigcontext32.h | |||
11 | header-y += ucontext.h | 11 | header-y += ucontext.h |
12 | header-y += processor-flags.h | 12 | header-y += processor-flags.h |
13 | header-y += hw_breakpoint.h | 13 | header-y += hw_breakpoint.h |
14 | header-y += hyperv.h | ||
14 | 15 | ||
15 | unifdef-y += e820.h | 16 | unifdef-y += e820.h |
16 | unifdef-y += ist.h | 17 | unifdef-y += ist.h |
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h new file mode 100644 index 000000000000..e153a2b3889a --- /dev/null +++ b/arch/x86/include/asm/hyperv.h | |||
@@ -0,0 +1,186 @@ | |||
1 | #ifndef _ASM_X86_KVM_HYPERV_H | ||
2 | #define _ASM_X86_KVM_HYPERV_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent | ||
8 | * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). | ||
9 | */ | ||
10 | #define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 | ||
11 | #define HYPERV_CPUID_INTERFACE 0x40000001 | ||
12 | #define HYPERV_CPUID_VERSION 0x40000002 | ||
13 | #define HYPERV_CPUID_FEATURES 0x40000003 | ||
14 | #define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004 | ||
15 | #define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005 | ||
16 | |||
17 | /* | ||
18 | * Feature identification. EAX indicates which features are available | ||
19 | * to the partition based upon the current partition privileges. | ||
20 | */ | ||
21 | |||
22 | /* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ | ||
23 | #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) | ||
24 | /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ | ||
25 | #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) | ||
26 | /* | ||
27 | * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM | ||
28 | * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available | ||
29 | */ | ||
30 | #define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) | ||
31 | /* | ||
32 | * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through | ||
33 | * HV_X64_MSR_STIMER3_COUNT) available | ||
34 | */ | ||
35 | #define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3) | ||
36 | /* | ||
37 | * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) | ||
38 | * are available | ||
39 | */ | ||
40 | #define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) | ||
41 | /* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ | ||
42 | #define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) | ||
43 | /* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ | ||
44 | #define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) | ||
45 | /* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ | ||
46 | #define HV_X64_MSR_RESET_AVAILABLE (1 << 7) | ||
47 | /* | ||
48 | * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, | ||
49 | * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, | ||
50 | * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available | ||
51 | */ | ||
52 | #define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) | ||
53 | |||
54 | /* | ||
55 | * Feature identification: EBX indicates which flags were specified at | ||
56 | * partition creation. The format is the same as the partition creation | ||
57 | * flag structure defined in section Partition Creation Flags. | ||
58 | */ | ||
59 | #define HV_X64_CREATE_PARTITIONS (1 << 0) | ||
60 | #define HV_X64_ACCESS_PARTITION_ID (1 << 1) | ||
61 | #define HV_X64_ACCESS_MEMORY_POOL (1 << 2) | ||
62 | #define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) | ||
63 | #define HV_X64_POST_MESSAGES (1 << 4) | ||
64 | #define HV_X64_SIGNAL_EVENTS (1 << 5) | ||
65 | #define HV_X64_CREATE_PORT (1 << 6) | ||
66 | #define HV_X64_CONNECT_PORT (1 << 7) | ||
67 | #define HV_X64_ACCESS_STATS (1 << 8) | ||
68 | #define HV_X64_DEBUGGING (1 << 11) | ||
69 | #define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) | ||
70 | #define HV_X64_CONFIGURE_PROFILER (1 << 13) | ||
71 | |||
72 | /* | ||
73 | * Feature identification. EDX indicates which miscellaneous features | ||
74 | * are available to the partition. | ||
75 | */ | ||
76 | /* The MWAIT instruction is available (per section MONITOR / MWAIT) */ | ||
77 | #define HV_X64_MWAIT_AVAILABLE (1 << 0) | ||
78 | /* Guest debugging support is available */ | ||
79 | #define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) | ||
80 | /* Performance Monitor support is available*/ | ||
81 | #define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) | ||
82 | /* Support for physical CPU dynamic partitioning events is available*/ | ||
83 | #define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) | ||
84 | /* | ||
85 | * Support for passing hypercall input parameter block via XMM | ||
86 | * registers is available | ||
87 | */ | ||
88 | #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) | ||
89 | /* Support for a virtual guest idle state is available */ | ||
90 | #define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) | ||
91 | |||
92 | /* | ||
93 | * Implementation recommendations. Indicates which behaviors the hypervisor | ||
94 | * recommends the OS implement for optimal performance. | ||
95 | */ | ||
96 | /* | ||
97 | * Recommend using hypercall for address space switches rather | ||
98 | * than MOV to CR3 instruction | ||
99 | */ | ||
100 | #define HV_X64_MWAIT_RECOMMENDED (1 << 0) | ||
101 | /* Recommend using hypercall for local TLB flushes rather | ||
102 | * than INVLPG or MOV to CR3 instructions */ | ||
103 | #define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) | ||
104 | /* | ||
105 | * Recommend using hypercall for remote TLB flushes rather | ||
106 | * than inter-processor interrupts | ||
107 | */ | ||
108 | #define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) | ||
109 | /* | ||
110 | * Recommend using MSRs for accessing APIC registers | ||
111 | * EOI, ICR and TPR rather than their memory-mapped counterparts | ||
112 | */ | ||
113 | #define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) | ||
114 | /* Recommend using the hypervisor-provided MSR to initiate a system RESET */ | ||
115 | #define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) | ||
116 | /* | ||
117 | * Recommend using relaxed timing for this partition. If used, | ||
118 | * the VM should disable any watchdog timeouts that rely on the | ||
119 | * timely delivery of external interrupts | ||
120 | */ | ||
121 | #define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) | ||
122 | |||
123 | /* MSR used to identify the guest OS. */ | ||
124 | #define HV_X64_MSR_GUEST_OS_ID 0x40000000 | ||
125 | |||
126 | /* MSR used to setup pages used to communicate with the hypervisor. */ | ||
127 | #define HV_X64_MSR_HYPERCALL 0x40000001 | ||
128 | |||
129 | /* MSR used to provide vcpu index */ | ||
130 | #define HV_X64_MSR_VP_INDEX 0x40000002 | ||
131 | |||
132 | /* Define the virtual APIC registers */ | ||
133 | #define HV_X64_MSR_EOI 0x40000070 | ||
134 | #define HV_X64_MSR_ICR 0x40000071 | ||
135 | #define HV_X64_MSR_TPR 0x40000072 | ||
136 | #define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073 | ||
137 | |||
138 | /* Define synthetic interrupt controller model specific registers. */ | ||
139 | #define HV_X64_MSR_SCONTROL 0x40000080 | ||
140 | #define HV_X64_MSR_SVERSION 0x40000081 | ||
141 | #define HV_X64_MSR_SIEFP 0x40000082 | ||
142 | #define HV_X64_MSR_SIMP 0x40000083 | ||
143 | #define HV_X64_MSR_EOM 0x40000084 | ||
144 | #define HV_X64_MSR_SINT0 0x40000090 | ||
145 | #define HV_X64_MSR_SINT1 0x40000091 | ||
146 | #define HV_X64_MSR_SINT2 0x40000092 | ||
147 | #define HV_X64_MSR_SINT3 0x40000093 | ||
148 | #define HV_X64_MSR_SINT4 0x40000094 | ||
149 | #define HV_X64_MSR_SINT5 0x40000095 | ||
150 | #define HV_X64_MSR_SINT6 0x40000096 | ||
151 | #define HV_X64_MSR_SINT7 0x40000097 | ||
152 | #define HV_X64_MSR_SINT8 0x40000098 | ||
153 | #define HV_X64_MSR_SINT9 0x40000099 | ||
154 | #define HV_X64_MSR_SINT10 0x4000009A | ||
155 | #define HV_X64_MSR_SINT11 0x4000009B | ||
156 | #define HV_X64_MSR_SINT12 0x4000009C | ||
157 | #define HV_X64_MSR_SINT13 0x4000009D | ||
158 | #define HV_X64_MSR_SINT14 0x4000009E | ||
159 | #define HV_X64_MSR_SINT15 0x4000009F | ||
160 | |||
161 | |||
162 | #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 | ||
163 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 | ||
164 | #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ | ||
165 | (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) | ||
166 | |||
167 | /* Declare the various hypercall operations. */ | ||
168 | #define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008 | ||
169 | |||
170 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001 | ||
171 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12 | ||
172 | #define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \ | ||
173 | (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) | ||
174 | |||
175 | #define HV_PROCESSOR_POWER_STATE_C0 0 | ||
176 | #define HV_PROCESSOR_POWER_STATE_C1 1 | ||
177 | #define HV_PROCESSOR_POWER_STATE_C2 2 | ||
178 | #define HV_PROCESSOR_POWER_STATE_C3 3 | ||
179 | |||
180 | /* hypercall status code */ | ||
181 | #define HV_STATUS_SUCCESS 0 | ||
182 | #define HV_STATUS_INVALID_HYPERCALL_CODE 2 | ||
183 | #define HV_STATUS_INVALID_HYPERCALL_INPUT 3 | ||
184 | #define HV_STATUS_INVALID_ALIGNMENT 4 | ||
185 | |||
186 | #endif | ||
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7c18e1230f54..7a6f54fa13ba 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt; | |||
54 | struct x86_emulate_ops { | 54 | struct x86_emulate_ops { |
55 | /* | 55 | /* |
56 | * read_std: Read bytes of standard (non-emulated/special) memory. | 56 | * read_std: Read bytes of standard (non-emulated/special) memory. |
57 | * Used for instruction fetch, stack operations, and others. | 57 | * Used for descriptor reading. |
58 | * @addr: [IN ] Linear address from which to read. | 58 | * @addr: [IN ] Linear address from which to read. |
59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | 59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. |
60 | * @bytes: [IN ] Number of bytes to read from memory. | 60 | * @bytes: [IN ] Number of bytes to read from memory. |
61 | */ | 61 | */ |
62 | int (*read_std)(unsigned long addr, void *val, | 62 | int (*read_std)(unsigned long addr, void *val, |
63 | unsigned int bytes, struct kvm_vcpu *vcpu); | 63 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); |
64 | |||
65 | /* | ||
66 | * fetch: Read bytes of standard (non-emulated/special) memory. | ||
67 | * Used for instruction fetch. | ||
68 | * @addr: [IN ] Linear address from which to read. | ||
69 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | ||
70 | * @bytes: [IN ] Number of bytes to read from memory. | ||
71 | */ | ||
72 | int (*fetch)(unsigned long addr, void *val, | ||
73 | unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); | ||
64 | 74 | ||
65 | /* | 75 | /* |
66 | * read_emulated: Read bytes from emulated/special memory area. | 76 | * read_emulated: Read bytes from emulated/special memory area. |
@@ -74,7 +84,7 @@ struct x86_emulate_ops { | |||
74 | struct kvm_vcpu *vcpu); | 84 | struct kvm_vcpu *vcpu); |
75 | 85 | ||
76 | /* | 86 | /* |
77 | * write_emulated: Read bytes from emulated/special memory area. | 87 | * write_emulated: Write bytes to emulated/special memory area. |
78 | * @addr: [IN ] Linear address to which to write. | 88 | * @addr: [IN ] Linear address to which to write. |
79 | * @val: [IN ] Value to write to memory (low-order bytes used as | 89 | * @val: [IN ] Value to write to memory (low-order bytes used as |
80 | * required). | 90 | * required). |
@@ -168,6 +178,7 @@ struct x86_emulate_ctxt { | |||
168 | 178 | ||
169 | /* Execution mode, passed to the emulator. */ | 179 | /* Execution mode, passed to the emulator. */ |
170 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 180 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
181 | #define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */ | ||
171 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | 182 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ |
172 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | 183 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ |
173 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 184 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f865e8b8540..06d9e79ca37d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/mtrr.h> | 25 | #include <asm/mtrr.h> |
26 | #include <asm/msr-index.h> | 26 | #include <asm/msr-index.h> |
27 | 27 | ||
28 | #define KVM_MAX_VCPUS 16 | 28 | #define KVM_MAX_VCPUS 64 |
29 | #define KVM_MEMORY_SLOTS 32 | 29 | #define KVM_MEMORY_SLOTS 32 |
30 | /* memory slots that does not exposed to userspace */ | 30 | /* memory slots that does not exposed to userspace */ |
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | 31 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -38,19 +38,6 @@ | |||
38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ | 38 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
39 | 0xFFFFFF0000000000ULL) | 39 | 0xFFFFFF0000000000ULL) |
40 | 40 | ||
41 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ | ||
42 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) | ||
43 | #define KVM_GUEST_CR0_MASK \ | ||
44 | (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
45 | #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ | ||
46 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP) | ||
47 | #define KVM_VM_CR0_ALWAYS_ON \ | ||
48 | (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) | ||
49 | #define KVM_GUEST_CR4_MASK \ | ||
50 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | ||
51 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | ||
52 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | ||
53 | |||
54 | #define INVALID_PAGE (~(hpa_t)0) | 41 | #define INVALID_PAGE (~(hpa_t)0) |
55 | #define UNMAPPED_GVA (~(gpa_t)0) | 42 | #define UNMAPPED_GVA (~(gpa_t)0) |
56 | 43 | ||
@@ -256,7 +243,8 @@ struct kvm_mmu { | |||
256 | void (*new_cr3)(struct kvm_vcpu *vcpu); | 243 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
257 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | 244 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
258 | void (*free)(struct kvm_vcpu *vcpu); | 245 | void (*free)(struct kvm_vcpu *vcpu); |
259 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | 246 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
247 | u32 *error); | ||
260 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | 248 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
261 | struct kvm_mmu_page *page); | 249 | struct kvm_mmu_page *page); |
262 | int (*sync_page)(struct kvm_vcpu *vcpu, | 250 | int (*sync_page)(struct kvm_vcpu *vcpu, |
@@ -282,13 +270,15 @@ struct kvm_vcpu_arch { | |||
282 | u32 regs_dirty; | 270 | u32 regs_dirty; |
283 | 271 | ||
284 | unsigned long cr0; | 272 | unsigned long cr0; |
273 | unsigned long cr0_guest_owned_bits; | ||
285 | unsigned long cr2; | 274 | unsigned long cr2; |
286 | unsigned long cr3; | 275 | unsigned long cr3; |
287 | unsigned long cr4; | 276 | unsigned long cr4; |
277 | unsigned long cr4_guest_owned_bits; | ||
288 | unsigned long cr8; | 278 | unsigned long cr8; |
289 | u32 hflags; | 279 | u32 hflags; |
290 | u64 pdptrs[4]; /* pae */ | 280 | u64 pdptrs[4]; /* pae */ |
291 | u64 shadow_efer; | 281 | u64 efer; |
292 | u64 apic_base; | 282 | u64 apic_base; |
293 | struct kvm_lapic *apic; /* kernel irqchip context */ | 283 | struct kvm_lapic *apic; /* kernel irqchip context */ |
294 | int32_t apic_arb_prio; | 284 | int32_t apic_arb_prio; |
@@ -374,17 +364,27 @@ struct kvm_vcpu_arch { | |||
374 | /* used for guest single stepping over the given code position */ | 364 | /* used for guest single stepping over the given code position */ |
375 | u16 singlestep_cs; | 365 | u16 singlestep_cs; |
376 | unsigned long singlestep_rip; | 366 | unsigned long singlestep_rip; |
367 | /* fields used by HYPER-V emulation */ | ||
368 | u64 hv_vapic; | ||
377 | }; | 369 | }; |
378 | 370 | ||
379 | struct kvm_mem_alias { | 371 | struct kvm_mem_alias { |
380 | gfn_t base_gfn; | 372 | gfn_t base_gfn; |
381 | unsigned long npages; | 373 | unsigned long npages; |
382 | gfn_t target_gfn; | 374 | gfn_t target_gfn; |
375 | #define KVM_ALIAS_INVALID 1UL | ||
376 | unsigned long flags; | ||
383 | }; | 377 | }; |
384 | 378 | ||
385 | struct kvm_arch{ | 379 | #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION |
386 | int naliases; | 380 | |
381 | struct kvm_mem_aliases { | ||
387 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | 382 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; |
383 | int naliases; | ||
384 | }; | ||
385 | |||
386 | struct kvm_arch { | ||
387 | struct kvm_mem_aliases *aliases; | ||
388 | 388 | ||
389 | unsigned int n_free_mmu_pages; | 389 | unsigned int n_free_mmu_pages; |
390 | unsigned int n_requested_mmu_pages; | 390 | unsigned int n_requested_mmu_pages; |
@@ -416,6 +416,10 @@ struct kvm_arch{ | |||
416 | s64 kvmclock_offset; | 416 | s64 kvmclock_offset; |
417 | 417 | ||
418 | struct kvm_xen_hvm_config xen_hvm_config; | 418 | struct kvm_xen_hvm_config xen_hvm_config; |
419 | |||
420 | /* fields used by HYPER-V emulation */ | ||
421 | u64 hv_guest_os_id; | ||
422 | u64 hv_hypercall; | ||
419 | }; | 423 | }; |
420 | 424 | ||
421 | struct kvm_vm_stat { | 425 | struct kvm_vm_stat { |
@@ -471,6 +475,7 @@ struct kvm_x86_ops { | |||
471 | int (*hardware_setup)(void); /* __init */ | 475 | int (*hardware_setup)(void); /* __init */ |
472 | void (*hardware_unsetup)(void); /* __exit */ | 476 | void (*hardware_unsetup)(void); /* __exit */ |
473 | bool (*cpu_has_accelerated_tpr)(void); | 477 | bool (*cpu_has_accelerated_tpr)(void); |
478 | void (*cpuid_update)(struct kvm_vcpu *vcpu); | ||
474 | 479 | ||
475 | /* Create, but do not attach this VCPU */ | 480 | /* Create, but do not attach this VCPU */ |
476 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | 481 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); |
@@ -492,6 +497,7 @@ struct kvm_x86_ops { | |||
492 | void (*set_segment)(struct kvm_vcpu *vcpu, | 497 | void (*set_segment)(struct kvm_vcpu *vcpu, |
493 | struct kvm_segment *var, int seg); | 498 | struct kvm_segment *var, int seg); |
494 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | 499 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
500 | void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); | ||
495 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); | 501 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
496 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | 502 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
497 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 503 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
@@ -501,12 +507,13 @@ struct kvm_x86_ops { | |||
501 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 507 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
502 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 508 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
503 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | 509 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
504 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | 510 | int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); |
505 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 511 | int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); |
506 | int *exception); | ||
507 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | 512 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
508 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 513 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
509 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 514 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
515 | void (*fpu_activate)(struct kvm_vcpu *vcpu); | ||
516 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); | ||
510 | 517 | ||
511 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 518 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
512 | 519 | ||
@@ -531,7 +538,8 @@ struct kvm_x86_ops { | |||
531 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 538 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
532 | int (*get_tdp_level)(void); | 539 | int (*get_tdp_level)(void); |
533 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | 540 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
534 | bool (*gb_page_enable)(void); | 541 | int (*get_lpage_level)(void); |
542 | bool (*rdtscp_supported)(void); | ||
535 | 543 | ||
536 | const struct trace_print_flags *exit_reasons_str; | 544 | const struct trace_print_flags *exit_reasons_str; |
537 | }; | 545 | }; |
@@ -606,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
606 | unsigned long value); | 614 | unsigned long value); |
607 | 615 | ||
608 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 616 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
609 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 617 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
610 | int type_bits, int seg); | ||
611 | 618 | ||
612 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 619 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); |
613 | 620 | ||
@@ -653,6 +660,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |||
653 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 660 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
654 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 661 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
655 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 662 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
663 | gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
664 | gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
665 | gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
666 | gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); | ||
656 | 667 | ||
657 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 668 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
658 | 669 | ||
@@ -666,6 +677,7 @@ void kvm_disable_tdp(void); | |||
666 | 677 | ||
667 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 678 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
668 | int complete_pio(struct kvm_vcpu *vcpu); | 679 | int complete_pio(struct kvm_vcpu *vcpu); |
680 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | ||
669 | 681 | ||
670 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | 682 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); |
671 | 683 | ||
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index c584076a47f4..ffae1420e7d7 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_KVM_PARA_H | 2 | #define _ASM_X86_KVM_PARA_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/hyperv.h> | ||
5 | 6 | ||
6 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It | 7 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It |
7 | * should be used to determine that a VM is running under KVM. | 8 | * should be used to determine that a VM is running under KVM. |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1fecb7e61130..38638cd2fa4c 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -313,7 +313,7 @@ struct __attribute__ ((__packed__)) vmcb { | |||
313 | 313 | ||
314 | #define SVM_EXIT_ERR -1 | 314 | #define SVM_EXIT_ERR -1 |
315 | 315 | ||
316 | #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | 316 | #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) |
317 | 317 | ||
318 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | 318 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" |
319 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | 319 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 2b4945419a84..fb9a080740ec 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -53,6 +53,7 @@ | |||
53 | */ | 53 | */ |
54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | 54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 |
55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | 55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 |
56 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | 57 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 |
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | 58 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 |
58 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | 59 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 |
@@ -251,6 +252,7 @@ enum vmcs_field { | |||
251 | #define EXIT_REASON_MSR_READ 31 | 252 | #define EXIT_REASON_MSR_READ 31 |
252 | #define EXIT_REASON_MSR_WRITE 32 | 253 | #define EXIT_REASON_MSR_WRITE 32 |
253 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 254 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
255 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
254 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | 256 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 |
255 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | 257 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 |
256 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 258 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
@@ -362,6 +364,7 @@ enum vmcs_field { | |||
362 | #define VMX_EPTP_UC_BIT (1ull << 8) | 364 | #define VMX_EPTP_UC_BIT (1ull << 8) |
363 | #define VMX_EPTP_WB_BIT (1ull << 14) | 365 | #define VMX_EPTP_WB_BIT (1ull << 14) |
364 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) | 366 | #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) |
367 | #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) | ||
365 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | 368 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) |
366 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | 369 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) |
367 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | 370 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) |
@@ -374,7 +377,7 @@ enum vmcs_field { | |||
374 | #define VMX_EPT_READABLE_MASK 0x1ull | 377 | #define VMX_EPT_READABLE_MASK 0x1ull |
375 | #define VMX_EPT_WRITABLE_MASK 0x2ull | 378 | #define VMX_EPT_WRITABLE_MASK 0x2ull |
376 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | 379 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull |
377 | #define VMX_EPT_IGMT_BIT (1ull << 6) | 380 | #define VMX_EPT_IPAT_BIT (1ull << 6) |
378 | 381 | ||
379 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 382 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
380 | 383 | ||