aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h52
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/include/asm/kvm_para.h20
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/processor-flags.h1
-rw-r--r--arch/x86/include/asm/vmx.h43
8 files changed, 143 insertions, 41 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0049211959c0..6040d115ef51 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -229,7 +229,26 @@ struct read_cache {
229 unsigned long end; 229 unsigned long end;
230}; 230};
231 231
232struct decode_cache { 232struct x86_emulate_ctxt {
233 struct x86_emulate_ops *ops;
234
235 /* Register state before/after emulation. */
236 unsigned long eflags;
237 unsigned long eip; /* eip before instruction emulation */
238 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
239 int mode;
240
241 /* interruptibility state, as a result of execution of STI or MOV SS */
242 int interruptibility;
243
244 bool guest_mode; /* guest running a nested guest */
245 bool perm_ok; /* do not check permissions if true */
246 bool only_vendor_specific_insn;
247
248 bool have_exception;
249 struct x86_exception exception;
250
251 /* decode cache */
233 u8 twobyte; 252 u8 twobyte;
234 u8 b; 253 u8 b;
235 u8 intercept; 254 u8 intercept;
@@ -246,8 +265,6 @@ struct decode_cache {
246 unsigned int d; 265 unsigned int d;
247 int (*execute)(struct x86_emulate_ctxt *ctxt); 266 int (*execute)(struct x86_emulate_ctxt *ctxt);
248 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 267 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
249 unsigned long regs[NR_VCPU_REGS];
250 unsigned long eip;
251 /* modrm */ 268 /* modrm */
252 u8 modrm; 269 u8 modrm;
253 u8 modrm_mod; 270 u8 modrm_mod;
@@ -255,34 +272,14 @@ struct decode_cache {
255 u8 modrm_rm; 272 u8 modrm_rm;
256 u8 modrm_seg; 273 u8 modrm_seg;
257 bool rip_relative; 274 bool rip_relative;
275 unsigned long _eip;
276 /* Fields above regs are cleared together. */
277 unsigned long regs[NR_VCPU_REGS];
258 struct fetch_cache fetch; 278 struct fetch_cache fetch;
259 struct read_cache io_read; 279 struct read_cache io_read;
260 struct read_cache mem_read; 280 struct read_cache mem_read;
261}; 281};
262 282
263struct x86_emulate_ctxt {
264 struct x86_emulate_ops *ops;
265
266 /* Register state before/after emulation. */
267 unsigned long eflags;
268 unsigned long eip; /* eip before instruction emulation */
269 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
270 int mode;
271
272 /* interruptibility state, as a result of execution of STI or MOV SS */
273 int interruptibility;
274
275 bool guest_mode; /* guest running a nested guest */
276 bool perm_ok; /* do not check permissions if true */
277 bool only_vendor_specific_insn;
278
279 bool have_exception;
280 struct x86_exception exception;
281
282 /* decode cache */
283 struct decode_cache decode;
284};
285
286/* Repeat String Operation Prefix */ 283/* Repeat String Operation Prefix */
287#define REPE_PREFIX 0xf3 284#define REPE_PREFIX 0xf3
288#define REPNE_PREFIX 0xf2 285#define REPNE_PREFIX 0xf2
@@ -373,6 +370,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
373int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 370int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
374 u16 tss_selector, int reason, 371 u16 tss_selector, int reason,
375 bool has_error_code, u32 error_code); 372 bool has_error_code, u32 error_code);
376int emulate_int_real(struct x86_emulate_ctxt *ctxt, 373int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
377 struct x86_emulate_ops *ops, int irq);
378#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 374#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d2ac8e2ee897..dd51c83aa5de 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ 50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
51 | X86_CR4_OSXSAVE \ 51 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
53 53
54#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 54#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -205,6 +205,7 @@ union kvm_mmu_page_role {
205 unsigned invalid:1; 205 unsigned invalid:1;
206 unsigned nxe:1; 206 unsigned nxe:1;
207 unsigned cr0_wp:1; 207 unsigned cr0_wp:1;
208 unsigned smep_andnot_wp:1;
208 }; 209 };
209}; 210};
210 211
@@ -227,15 +228,17 @@ struct kvm_mmu_page {
227 * in this shadow page. 228 * in this shadow page.
228 */ 229 */
229 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 230 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
230 bool multimapped; /* More than one parent_pte? */
231 bool unsync; 231 bool unsync;
232 int root_count; /* Currently serving as active root */ 232 int root_count; /* Currently serving as active root */
233 unsigned int unsync_children; 233 unsigned int unsync_children;
234 union { 234 unsigned long parent_ptes; /* Reverse mapping for parent_pte */
235 u64 *parent_pte; /* !multimapped */
236 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
237 };
238 DECLARE_BITMAP(unsync_child_bitmap, 512); 235 DECLARE_BITMAP(unsync_child_bitmap, 512);
236
237#ifdef CONFIG_X86_32
238 int clear_spte_count;
239#endif
240
241 struct rcu_head rcu;
239}; 242};
240 243
241struct kvm_pv_mmu_op_buffer { 244struct kvm_pv_mmu_op_buffer {
@@ -269,8 +272,6 @@ struct kvm_mmu {
269 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 272 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
270 struct x86_exception *exception); 273 struct x86_exception *exception);
271 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); 274 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
272 void (*prefetch_page)(struct kvm_vcpu *vcpu,
273 struct kvm_mmu_page *page);
274 int (*sync_page)(struct kvm_vcpu *vcpu, 275 int (*sync_page)(struct kvm_vcpu *vcpu,
275 struct kvm_mmu_page *sp); 276 struct kvm_mmu_page *sp);
276 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 277 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
@@ -346,8 +347,7 @@ struct kvm_vcpu_arch {
346 * put it here to avoid allocation */ 347 * put it here to avoid allocation */
347 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 348 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
348 349
349 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 350 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
350 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
351 struct kvm_mmu_memory_cache mmu_page_cache; 351 struct kvm_mmu_memory_cache mmu_page_cache;
352 struct kvm_mmu_memory_cache mmu_page_header_cache; 352 struct kvm_mmu_memory_cache mmu_page_header_cache;
353 353
@@ -393,6 +393,15 @@ struct kvm_vcpu_arch {
393 unsigned int hw_tsc_khz; 393 unsigned int hw_tsc_khz;
394 unsigned int time_offset; 394 unsigned int time_offset;
395 struct page *time_page; 395 struct page *time_page;
396
397 struct {
398 u64 msr_val;
399 u64 last_steal;
400 u64 accum_steal;
401 struct gfn_to_hva_cache stime;
402 struct kvm_steal_time steal;
403 } st;
404
396 u64 last_guest_tsc; 405 u64 last_guest_tsc;
397 u64 last_kernel_ns; 406 u64 last_kernel_ns;
398 u64 last_tsc_nsec; 407 u64 last_tsc_nsec;
@@ -419,6 +428,11 @@ struct kvm_vcpu_arch {
419 u64 mcg_ctl; 428 u64 mcg_ctl;
420 u64 *mce_banks; 429 u64 *mce_banks;
421 430
431 /* Cache MMIO info */
432 u64 mmio_gva;
433 unsigned access;
434 gfn_t mmio_gfn;
435
422 /* used for guest single stepping over the given code position */ 436 /* used for guest single stepping over the given code position */
423 unsigned long singlestep_rip; 437 unsigned long singlestep_rip;
424 438
@@ -441,6 +455,7 @@ struct kvm_arch {
441 unsigned int n_used_mmu_pages; 455 unsigned int n_used_mmu_pages;
442 unsigned int n_requested_mmu_pages; 456 unsigned int n_requested_mmu_pages;
443 unsigned int n_max_mmu_pages; 457 unsigned int n_max_mmu_pages;
458 unsigned int indirect_shadow_pages;
444 atomic_t invlpg_counter; 459 atomic_t invlpg_counter;
445 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 460 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
446 /* 461 /*
@@ -477,6 +492,8 @@ struct kvm_arch {
477 u64 hv_guest_os_id; 492 u64 hv_guest_os_id;
478 u64 hv_hypercall; 493 u64 hv_hypercall;
479 494
495 atomic_t reader_counter;
496
480 #ifdef CONFIG_KVM_MMU_AUDIT 497 #ifdef CONFIG_KVM_MMU_AUDIT
481 int audit_point; 498 int audit_point;
482 #endif 499 #endif
@@ -559,7 +576,7 @@ struct kvm_x86_ops {
559 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 576 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
560 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 577 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
561 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 578 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
562 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 579 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
563 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 580 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
564 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 581 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
565 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 582 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
@@ -636,7 +653,6 @@ void kvm_mmu_module_exit(void);
636void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 653void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
637int kvm_mmu_create(struct kvm_vcpu *vcpu); 654int kvm_mmu_create(struct kvm_vcpu *vcpu);
638int kvm_mmu_setup(struct kvm_vcpu *vcpu); 655int kvm_mmu_setup(struct kvm_vcpu *vcpu);
639void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
640void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 656void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
641 u64 dirty_mask, u64 nx_mask, u64 x_mask); 657 u64 dirty_mask, u64 nx_mask, u64 x_mask);
642 658
@@ -830,11 +846,12 @@ enum {
830asmlinkage void kvm_spurious_fault(void); 846asmlinkage void kvm_spurious_fault(void);
831extern bool kvm_rebooting; 847extern bool kvm_rebooting;
832 848
833#define __kvm_handle_fault_on_reboot(insn) \ 849#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
834 "666: " insn "\n\t" \ 850 "666: " insn "\n\t" \
835 "668: \n\t" \ 851 "668: \n\t" \
836 ".pushsection .fixup, \"ax\" \n" \ 852 ".pushsection .fixup, \"ax\" \n" \
837 "667: \n\t" \ 853 "667: \n\t" \
854 cleanup_insn "\n\t" \
838 "cmpb $0, kvm_rebooting \n\t" \ 855 "cmpb $0, kvm_rebooting \n\t" \
839 "jne 668b \n\t" \ 856 "jne 668b \n\t" \
840 __ASM_SIZE(push) " $666b \n\t" \ 857 __ASM_SIZE(push) " $666b \n\t" \
@@ -844,6 +861,9 @@ extern bool kvm_rebooting;
844 _ASM_PTR " 666b, 667b \n\t" \ 861 _ASM_PTR " 666b, 667b \n\t" \
845 ".popsection" 862 ".popsection"
846 863
864#define __kvm_handle_fault_on_reboot(insn) \
865 ____kvm_handle_fault_on_reboot(insn, "")
866
847#define KVM_ARCH_WANT_MMU_NOTIFIER 867#define KVM_ARCH_WANT_MMU_NOTIFIER
848int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 868int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
849int kvm_age_hva(struct kvm *kvm, unsigned long hva); 869int kvm_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index a427bf77a93d..734c3767cfac 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -21,6 +21,7 @@
21 */ 21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3 22#define KVM_FEATURE_CLOCKSOURCE2 3
23#define KVM_FEATURE_ASYNC_PF 4 23#define KVM_FEATURE_ASYNC_PF 4
24#define KVM_FEATURE_STEAL_TIME 5
24 25
25/* The last 8 bits are used to indicate how to interpret the flags field 26/* The last 8 bits are used to indicate how to interpret the flags field
26 * in pvclock structure. If no bits are set, all flags are ignored. 27 * in pvclock structure. If no bits are set, all flags are ignored.
@@ -30,10 +31,23 @@
30#define MSR_KVM_WALL_CLOCK 0x11 31#define MSR_KVM_WALL_CLOCK 0x11
31#define MSR_KVM_SYSTEM_TIME 0x12 32#define MSR_KVM_SYSTEM_TIME 0x12
32 33
34#define KVM_MSR_ENABLED 1
33/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ 35/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
34#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 36#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
35#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 37#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
36#define MSR_KVM_ASYNC_PF_EN 0x4b564d02 38#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
39#define MSR_KVM_STEAL_TIME 0x4b564d03
40
41struct kvm_steal_time {
42 __u64 steal;
43 __u32 version;
44 __u32 flags;
45 __u32 pad[12];
46};
47
48#define KVM_STEAL_ALIGNMENT_BITS 5
49#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
50#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
37 51
38#define KVM_MAX_MMU_OP_BATCH 32 52#define KVM_MAX_MMU_OP_BATCH 32
39 53
@@ -178,6 +192,7 @@ void __init kvm_guest_init(void);
178void kvm_async_pf_task_wait(u32 token); 192void kvm_async_pf_task_wait(u32 token);
179void kvm_async_pf_task_wake(u32 token); 193void kvm_async_pf_task_wake(u32 token);
180u32 kvm_read_and_reset_pf_reason(void); 194u32 kvm_read_and_reset_pf_reason(void);
195extern void kvm_disable_steal_time(void);
181#else 196#else
182#define kvm_guest_init() do { } while (0) 197#define kvm_guest_init() do { } while (0)
183#define kvm_async_pf_task_wait(T) do {} while(0) 198#define kvm_async_pf_task_wait(T) do {} while(0)
@@ -186,6 +201,11 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
186{ 201{
187 return 0; 202 return 0;
188} 203}
204
205static inline void kvm_disable_steal_time(void)
206{
207 return;
208}
189#endif 209#endif
190 210
191#endif /* __KERNEL__ */ 211#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d96bdb25ca3d..d52609aeeab8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -441,6 +441,18 @@
441#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a 441#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
442#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 442#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
443#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 443#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
444#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
445#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
446#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
447#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
448
449/* VMX_BASIC bits and bitmasks */
450#define VMX_BASIC_VMCS_SIZE_SHIFT 32
451#define VMX_BASIC_64 0x0001000000000000LLU
452#define VMX_BASIC_MEM_TYPE_SHIFT 50
453#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
454#define VMX_BASIC_MEM_TYPE_WB 6LLU
455#define VMX_BASIC_INOUT 0x0040000000000000LLU
444 456
445/* AMD-V MSRs */ 457/* AMD-V MSRs */
446 458
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ebbc4d8ab170..a7d2db9a74fb 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -230,6 +230,15 @@ static inline unsigned long long paravirt_sched_clock(void)
230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
231} 231}
232 232
233struct jump_label_key;
234extern struct jump_label_key paravirt_steal_enabled;
235extern struct jump_label_key paravirt_steal_rq_enabled;
236
237static inline u64 paravirt_steal_clock(int cpu)
238{
239 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
240}
241
233static inline unsigned long long paravirt_read_pmc(int counter) 242static inline unsigned long long paravirt_read_pmc(int counter)
234{ 243{
235 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); 244 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 82885099c869..2c7652163111 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -89,6 +89,7 @@ struct pv_lazy_ops {
89 89
90struct pv_time_ops { 90struct pv_time_ops {
91 unsigned long long (*sched_clock)(void); 91 unsigned long long (*sched_clock)(void);
92 unsigned long long (*steal_clock)(int cpu);
92 unsigned long (*get_tsc_khz)(void); 93 unsigned long (*get_tsc_khz)(void);
93}; 94};
94 95
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 59ab4dffa377..2dddb317bb39 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -59,6 +59,7 @@
59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ 59#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ 60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ 61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ 63#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
63#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ 64#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
64 65
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 84471b810460..2caf290e9895 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -132,6 +132,8 @@ enum vmcs_field {
132 GUEST_IA32_PAT_HIGH = 0x00002805, 132 GUEST_IA32_PAT_HIGH = 0x00002805,
133 GUEST_IA32_EFER = 0x00002806, 133 GUEST_IA32_EFER = 0x00002806,
134 GUEST_IA32_EFER_HIGH = 0x00002807, 134 GUEST_IA32_EFER_HIGH = 0x00002807,
135 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
136 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
135 GUEST_PDPTR0 = 0x0000280a, 137 GUEST_PDPTR0 = 0x0000280a,
136 GUEST_PDPTR0_HIGH = 0x0000280b, 138 GUEST_PDPTR0_HIGH = 0x0000280b,
137 GUEST_PDPTR1 = 0x0000280c, 139 GUEST_PDPTR1 = 0x0000280c,
@@ -144,6 +146,8 @@ enum vmcs_field {
144 HOST_IA32_PAT_HIGH = 0x00002c01, 146 HOST_IA32_PAT_HIGH = 0x00002c01,
145 HOST_IA32_EFER = 0x00002c02, 147 HOST_IA32_EFER = 0x00002c02,
146 HOST_IA32_EFER_HIGH = 0x00002c03, 148 HOST_IA32_EFER_HIGH = 0x00002c03,
149 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
150 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
147 PIN_BASED_VM_EXEC_CONTROL = 0x00004000, 151 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
148 CPU_BASED_VM_EXEC_CONTROL = 0x00004002, 152 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
149 EXCEPTION_BITMAP = 0x00004004, 153 EXCEPTION_BITMAP = 0x00004004,
@@ -426,4 +430,43 @@ struct vmx_msr_entry {
426 u64 value; 430 u64 value;
427} __aligned(16); 431} __aligned(16);
428 432
433/*
434 * Exit Qualifications for entry failure during or after loading guest state
435 */
436#define ENTRY_FAIL_DEFAULT 0
437#define ENTRY_FAIL_PDPTE 2
438#define ENTRY_FAIL_NMI 3
439#define ENTRY_FAIL_VMCS_LINK_PTR 4
440
441/*
442 * VM-instruction error numbers
443 */
444enum vm_instruction_error_number {
445 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
446 VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
447 VMXERR_VMCLEAR_VMXON_POINTER = 3,
448 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
449 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
450 VMXERR_VMRESUME_AFTER_VMXOFF = 6,
451 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
452 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
453 VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
454 VMXERR_VMPTRLD_VMXON_POINTER = 10,
455 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
456 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
457 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
458 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
459 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
460 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
461 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
462 VMXERR_VMCALL_NONCLEAR_VMCS = 19,
463 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
464 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
465 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
466 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
467 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
468 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
469 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
470};
471
429#endif 472#endif