diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 45 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_x86_emulate.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/mtrr.h | 25 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 328 | ||||
-rw-r--r-- | arch/x86/include/asm/virtext.h | 132 | ||||
-rw-r--r-- | arch/x86/include/asm/vmx.h | 382 |
6 files changed, 897 insertions, 26 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8346be87cfa1..97215a458e5f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <asm/pvclock-abi.h> | 22 | #include <asm/pvclock-abi.h> |
23 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
24 | #include <asm/mtrr.h> | ||
24 | 25 | ||
25 | #define KVM_MAX_VCPUS 16 | 26 | #define KVM_MAX_VCPUS 16 |
26 | #define KVM_MEMORY_SLOTS 32 | 27 | #define KVM_MEMORY_SLOTS 32 |
@@ -86,6 +87,7 @@ | |||
86 | #define KVM_MIN_FREE_MMU_PAGES 5 | 87 | #define KVM_MIN_FREE_MMU_PAGES 5 |
87 | #define KVM_REFILL_PAGES 25 | 88 | #define KVM_REFILL_PAGES 25 |
88 | #define KVM_MAX_CPUID_ENTRIES 40 | 89 | #define KVM_MAX_CPUID_ENTRIES 40 |
90 | #define KVM_NR_FIXED_MTRR_REGION 88 | ||
89 | #define KVM_NR_VAR_MTRR 8 | 91 | #define KVM_NR_VAR_MTRR 8 |
90 | 92 | ||
91 | extern spinlock_t kvm_lock; | 93 | extern spinlock_t kvm_lock; |
@@ -180,6 +182,8 @@ struct kvm_mmu_page { | |||
180 | struct list_head link; | 182 | struct list_head link; |
181 | struct hlist_node hash_link; | 183 | struct hlist_node hash_link; |
182 | 184 | ||
185 | struct list_head oos_link; | ||
186 | |||
183 | /* | 187 | /* |
184 | * The following two entries are used to key the shadow page in the | 188 | * The following two entries are used to key the shadow page in the |
185 | * hash table. | 189 | * hash table. |
@@ -190,13 +194,16 @@ struct kvm_mmu_page { | |||
190 | u64 *spt; | 194 | u64 *spt; |
191 | /* hold the gfn of each spte inside spt */ | 195 | /* hold the gfn of each spte inside spt */ |
192 | gfn_t *gfns; | 196 | gfn_t *gfns; |
193 | unsigned long slot_bitmap; /* One bit set per slot which has memory | 197 | /* |
194 | * in this shadow page. | 198 | * One bit set per slot which has memory |
195 | */ | 199 | * in this shadow page. |
200 | */ | ||
201 | DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | ||
196 | int multimapped; /* More than one parent_pte? */ | 202 | int multimapped; /* More than one parent_pte? */ |
197 | int root_count; /* Currently serving as active root */ | 203 | int root_count; /* Currently serving as active root */ |
198 | bool unsync; | 204 | bool unsync; |
199 | bool unsync_children; | 205 | bool global; |
206 | unsigned int unsync_children; | ||
200 | union { | 207 | union { |
201 | u64 *parent_pte; /* !multimapped */ | 208 | u64 *parent_pte; /* !multimapped */ |
202 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 209 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
@@ -327,8 +334,10 @@ struct kvm_vcpu_arch { | |||
327 | 334 | ||
328 | bool nmi_pending; | 335 | bool nmi_pending; |
329 | bool nmi_injected; | 336 | bool nmi_injected; |
337 | bool nmi_window_open; | ||
330 | 338 | ||
331 | u64 mtrr[0x100]; | 339 | struct mtrr_state_type mtrr_state; |
340 | u32 pat; | ||
332 | }; | 341 | }; |
333 | 342 | ||
334 | struct kvm_mem_alias { | 343 | struct kvm_mem_alias { |
@@ -350,11 +359,13 @@ struct kvm_arch{ | |||
350 | */ | 359 | */ |
351 | struct list_head active_mmu_pages; | 360 | struct list_head active_mmu_pages; |
352 | struct list_head assigned_dev_head; | 361 | struct list_head assigned_dev_head; |
362 | struct list_head oos_global_pages; | ||
353 | struct dmar_domain *intel_iommu_domain; | 363 | struct dmar_domain *intel_iommu_domain; |
354 | struct kvm_pic *vpic; | 364 | struct kvm_pic *vpic; |
355 | struct kvm_ioapic *vioapic; | 365 | struct kvm_ioapic *vioapic; |
356 | struct kvm_pit *vpit; | 366 | struct kvm_pit *vpit; |
357 | struct hlist_head irq_ack_notifier_list; | 367 | struct hlist_head irq_ack_notifier_list; |
368 | int vapics_in_nmi_mode; | ||
358 | 369 | ||
359 | int round_robin_prev_vcpu; | 370 | int round_robin_prev_vcpu; |
360 | unsigned int tss_addr; | 371 | unsigned int tss_addr; |
@@ -378,6 +389,7 @@ struct kvm_vm_stat { | |||
378 | u32 mmu_recycled; | 389 | u32 mmu_recycled; |
379 | u32 mmu_cache_miss; | 390 | u32 mmu_cache_miss; |
380 | u32 mmu_unsync; | 391 | u32 mmu_unsync; |
392 | u32 mmu_unsync_global; | ||
381 | u32 remote_tlb_flush; | 393 | u32 remote_tlb_flush; |
382 | u32 lpages; | 394 | u32 lpages; |
383 | }; | 395 | }; |
@@ -397,6 +409,7 @@ struct kvm_vcpu_stat { | |||
397 | u32 halt_exits; | 409 | u32 halt_exits; |
398 | u32 halt_wakeup; | 410 | u32 halt_wakeup; |
399 | u32 request_irq_exits; | 411 | u32 request_irq_exits; |
412 | u32 request_nmi_exits; | ||
400 | u32 irq_exits; | 413 | u32 irq_exits; |
401 | u32 host_state_reload; | 414 | u32 host_state_reload; |
402 | u32 efer_reload; | 415 | u32 efer_reload; |
@@ -405,6 +418,7 @@ struct kvm_vcpu_stat { | |||
405 | u32 insn_emulation_fail; | 418 | u32 insn_emulation_fail; |
406 | u32 hypercalls; | 419 | u32 hypercalls; |
407 | u32 irq_injections; | 420 | u32 irq_injections; |
421 | u32 nmi_injections; | ||
408 | }; | 422 | }; |
409 | 423 | ||
410 | struct descriptor_table { | 424 | struct descriptor_table { |
@@ -477,6 +491,7 @@ struct kvm_x86_ops { | |||
477 | 491 | ||
478 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 492 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
479 | int (*get_tdp_level)(void); | 493 | int (*get_tdp_level)(void); |
494 | int (*get_mt_mask_shift)(void); | ||
480 | }; | 495 | }; |
481 | 496 | ||
482 | extern struct kvm_x86_ops *kvm_x86_ops; | 497 | extern struct kvm_x86_ops *kvm_x86_ops; |
@@ -490,7 +505,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |||
490 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | 505 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); |
491 | void kvm_mmu_set_base_ptes(u64 base_pte); | 506 | void kvm_mmu_set_base_ptes(u64 base_pte); |
492 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 507 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
493 | u64 dirty_mask, u64 nx_mask, u64 x_mask); | 508 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); |
494 | 509 | ||
495 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 510 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
496 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 511 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
@@ -587,12 +602,14 @@ unsigned long segment_base(u16 selector); | |||
587 | 602 | ||
588 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | 603 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
589 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 604 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
590 | const u8 *new, int bytes); | 605 | const u8 *new, int bytes, |
606 | bool guest_initiated); | ||
591 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 607 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
592 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 608 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
593 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 609 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
594 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 610 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
595 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 611 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
612 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); | ||
596 | 613 | ||
597 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 614 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
598 | 615 | ||
@@ -607,6 +624,8 @@ void kvm_disable_tdp(void); | |||
607 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 624 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
608 | int complete_pio(struct kvm_vcpu *vcpu); | 625 | int complete_pio(struct kvm_vcpu *vcpu); |
609 | 626 | ||
627 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | ||
628 | |||
610 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | 629 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
611 | { | 630 | { |
612 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 631 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
@@ -702,18 +721,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |||
702 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | 721 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
703 | } | 722 | } |
704 | 723 | ||
705 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
706 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
707 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
708 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
709 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
710 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
711 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
712 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
713 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
714 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
715 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
716 | |||
717 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | 724 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 |
718 | 725 | ||
719 | #define TSS_IOPB_BASE_OFFSET 0x66 | 726 | #define TSS_IOPB_BASE_OFFSET 0x66 |
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 25179a29f208..6a159732881a 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h | |||
@@ -123,6 +123,7 @@ struct decode_cache { | |||
123 | u8 ad_bytes; | 123 | u8 ad_bytes; |
124 | u8 rex_prefix; | 124 | u8 rex_prefix; |
125 | struct operand src; | 125 | struct operand src; |
126 | struct operand src2; | ||
126 | struct operand dst; | 127 | struct operand dst; |
127 | bool has_seg_override; | 128 | bool has_seg_override; |
128 | u8 seg_override; | 129 | u8 seg_override; |
@@ -146,22 +147,18 @@ struct x86_emulate_ctxt { | |||
146 | /* Register state before/after emulation. */ | 147 | /* Register state before/after emulation. */ |
147 | struct kvm_vcpu *vcpu; | 148 | struct kvm_vcpu *vcpu; |
148 | 149 | ||
149 | /* Linear faulting address (if emulating a page-faulting instruction) */ | ||
150 | unsigned long eflags; | 150 | unsigned long eflags; |
151 | |||
152 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 151 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
153 | int mode; | 152 | int mode; |
154 | |||
155 | u32 cs_base; | 153 | u32 cs_base; |
156 | 154 | ||
157 | /* decode cache */ | 155 | /* decode cache */ |
158 | |||
159 | struct decode_cache decode; | 156 | struct decode_cache decode; |
160 | }; | 157 | }; |
161 | 158 | ||
162 | /* Repeat String Operation Prefix */ | 159 | /* Repeat String Operation Prefix */ |
163 | #define REPE_PREFIX 1 | 160 | #define REPE_PREFIX 1 |
164 | #define REPNE_PREFIX 2 | 161 | #define REPNE_PREFIX 2 |
165 | 162 | ||
166 | /* Execution mode, passed to the emulator. */ | 163 | /* Execution mode, passed to the emulator. */ |
167 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 164 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
@@ -170,7 +167,7 @@ struct x86_emulate_ctxt { | |||
170 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 167 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
171 | 168 | ||
172 | /* Host execution mode. */ | 169 | /* Host execution mode. */ |
173 | #if defined(__i386__) | 170 | #if defined(CONFIG_X86_32) |
174 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 | 171 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 |
175 | #elif defined(CONFIG_X86_64) | 172 | #elif defined(CONFIG_X86_64) |
176 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 | 173 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 7c1e4258b31e..cb988aab716d 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -57,6 +57,31 @@ struct mtrr_gentry { | |||
57 | }; | 57 | }; |
58 | #endif /* !__i386__ */ | 58 | #endif /* !__i386__ */ |
59 | 59 | ||
60 | struct mtrr_var_range { | ||
61 | u32 base_lo; | ||
62 | u32 base_hi; | ||
63 | u32 mask_lo; | ||
64 | u32 mask_hi; | ||
65 | }; | ||
66 | |||
67 | /* In the Intel processor's MTRR interface, the MTRR type is always held in | ||
68 | an 8 bit field: */ | ||
69 | typedef u8 mtrr_type; | ||
70 | |||
71 | #define MTRR_NUM_FIXED_RANGES 88 | ||
72 | #define MTRR_MAX_VAR_RANGES 256 | ||
73 | |||
74 | struct mtrr_state_type { | ||
75 | struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; | ||
76 | mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; | ||
77 | unsigned char enabled; | ||
78 | unsigned char have_fixed; | ||
79 | mtrr_type def_type; | ||
80 | }; | ||
81 | |||
82 | #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) | ||
83 | #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) | ||
84 | |||
60 | /* These are the various ioctls */ | 85 | /* These are the various ioctls */ |
61 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) | 86 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) |
62 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) | 87 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h new file mode 100644 index 000000000000..1b8afa78e869 --- /dev/null +++ b/arch/x86/include/asm/svm.h | |||
@@ -0,0 +1,328 @@ | |||
1 | #ifndef __SVM_H | ||
2 | #define __SVM_H | ||
3 | |||
4 | enum { | ||
5 | INTERCEPT_INTR, | ||
6 | INTERCEPT_NMI, | ||
7 | INTERCEPT_SMI, | ||
8 | INTERCEPT_INIT, | ||
9 | INTERCEPT_VINTR, | ||
10 | INTERCEPT_SELECTIVE_CR0, | ||
11 | INTERCEPT_STORE_IDTR, | ||
12 | INTERCEPT_STORE_GDTR, | ||
13 | INTERCEPT_STORE_LDTR, | ||
14 | INTERCEPT_STORE_TR, | ||
15 | INTERCEPT_LOAD_IDTR, | ||
16 | INTERCEPT_LOAD_GDTR, | ||
17 | INTERCEPT_LOAD_LDTR, | ||
18 | INTERCEPT_LOAD_TR, | ||
19 | INTERCEPT_RDTSC, | ||
20 | INTERCEPT_RDPMC, | ||
21 | INTERCEPT_PUSHF, | ||
22 | INTERCEPT_POPF, | ||
23 | INTERCEPT_CPUID, | ||
24 | INTERCEPT_RSM, | ||
25 | INTERCEPT_IRET, | ||
26 | INTERCEPT_INTn, | ||
27 | INTERCEPT_INVD, | ||
28 | INTERCEPT_PAUSE, | ||
29 | INTERCEPT_HLT, | ||
30 | INTERCEPT_INVLPG, | ||
31 | INTERCEPT_INVLPGA, | ||
32 | INTERCEPT_IOIO_PROT, | ||
33 | INTERCEPT_MSR_PROT, | ||
34 | INTERCEPT_TASK_SWITCH, | ||
35 | INTERCEPT_FERR_FREEZE, | ||
36 | INTERCEPT_SHUTDOWN, | ||
37 | INTERCEPT_VMRUN, | ||
38 | INTERCEPT_VMMCALL, | ||
39 | INTERCEPT_VMLOAD, | ||
40 | INTERCEPT_VMSAVE, | ||
41 | INTERCEPT_STGI, | ||
42 | INTERCEPT_CLGI, | ||
43 | INTERCEPT_SKINIT, | ||
44 | INTERCEPT_RDTSCP, | ||
45 | INTERCEPT_ICEBP, | ||
46 | INTERCEPT_WBINVD, | ||
47 | INTERCEPT_MONITOR, | ||
48 | INTERCEPT_MWAIT, | ||
49 | INTERCEPT_MWAIT_COND, | ||
50 | }; | ||
51 | |||
52 | |||
53 | struct __attribute__ ((__packed__)) vmcb_control_area { | ||
54 | u16 intercept_cr_read; | ||
55 | u16 intercept_cr_write; | ||
56 | u16 intercept_dr_read; | ||
57 | u16 intercept_dr_write; | ||
58 | u32 intercept_exceptions; | ||
59 | u64 intercept; | ||
60 | u8 reserved_1[44]; | ||
61 | u64 iopm_base_pa; | ||
62 | u64 msrpm_base_pa; | ||
63 | u64 tsc_offset; | ||
64 | u32 asid; | ||
65 | u8 tlb_ctl; | ||
66 | u8 reserved_2[3]; | ||
67 | u32 int_ctl; | ||
68 | u32 int_vector; | ||
69 | u32 int_state; | ||
70 | u8 reserved_3[4]; | ||
71 | u32 exit_code; | ||
72 | u32 exit_code_hi; | ||
73 | u64 exit_info_1; | ||
74 | u64 exit_info_2; | ||
75 | u32 exit_int_info; | ||
76 | u32 exit_int_info_err; | ||
77 | u64 nested_ctl; | ||
78 | u8 reserved_4[16]; | ||
79 | u32 event_inj; | ||
80 | u32 event_inj_err; | ||
81 | u64 nested_cr3; | ||
82 | u64 lbr_ctl; | ||
83 | u8 reserved_5[832]; | ||
84 | }; | ||
85 | |||
86 | |||
87 | #define TLB_CONTROL_DO_NOTHING 0 | ||
88 | #define TLB_CONTROL_FLUSH_ALL_ASID 1 | ||
89 | |||
90 | #define V_TPR_MASK 0x0f | ||
91 | |||
92 | #define V_IRQ_SHIFT 8 | ||
93 | #define V_IRQ_MASK (1 << V_IRQ_SHIFT) | ||
94 | |||
95 | #define V_INTR_PRIO_SHIFT 16 | ||
96 | #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) | ||
97 | |||
98 | #define V_IGN_TPR_SHIFT 20 | ||
99 | #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) | ||
100 | |||
101 | #define V_INTR_MASKING_SHIFT 24 | ||
102 | #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) | ||
103 | |||
104 | #define SVM_INTERRUPT_SHADOW_MASK 1 | ||
105 | |||
106 | #define SVM_IOIO_STR_SHIFT 2 | ||
107 | #define SVM_IOIO_REP_SHIFT 3 | ||
108 | #define SVM_IOIO_SIZE_SHIFT 4 | ||
109 | #define SVM_IOIO_ASIZE_SHIFT 7 | ||
110 | |||
111 | #define SVM_IOIO_TYPE_MASK 1 | ||
112 | #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) | ||
113 | #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) | ||
114 | #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) | ||
115 | #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) | ||
116 | |||
117 | struct __attribute__ ((__packed__)) vmcb_seg { | ||
118 | u16 selector; | ||
119 | u16 attrib; | ||
120 | u32 limit; | ||
121 | u64 base; | ||
122 | }; | ||
123 | |||
124 | struct __attribute__ ((__packed__)) vmcb_save_area { | ||
125 | struct vmcb_seg es; | ||
126 | struct vmcb_seg cs; | ||
127 | struct vmcb_seg ss; | ||
128 | struct vmcb_seg ds; | ||
129 | struct vmcb_seg fs; | ||
130 | struct vmcb_seg gs; | ||
131 | struct vmcb_seg gdtr; | ||
132 | struct vmcb_seg ldtr; | ||
133 | struct vmcb_seg idtr; | ||
134 | struct vmcb_seg tr; | ||
135 | u8 reserved_1[43]; | ||
136 | u8 cpl; | ||
137 | u8 reserved_2[4]; | ||
138 | u64 efer; | ||
139 | u8 reserved_3[112]; | ||
140 | u64 cr4; | ||
141 | u64 cr3; | ||
142 | u64 cr0; | ||
143 | u64 dr7; | ||
144 | u64 dr6; | ||
145 | u64 rflags; | ||
146 | u64 rip; | ||
147 | u8 reserved_4[88]; | ||
148 | u64 rsp; | ||
149 | u8 reserved_5[24]; | ||
150 | u64 rax; | ||
151 | u64 star; | ||
152 | u64 lstar; | ||
153 | u64 cstar; | ||
154 | u64 sfmask; | ||
155 | u64 kernel_gs_base; | ||
156 | u64 sysenter_cs; | ||
157 | u64 sysenter_esp; | ||
158 | u64 sysenter_eip; | ||
159 | u64 cr2; | ||
160 | u8 reserved_6[32]; | ||
161 | u64 g_pat; | ||
162 | u64 dbgctl; | ||
163 | u64 br_from; | ||
164 | u64 br_to; | ||
165 | u64 last_excp_from; | ||
166 | u64 last_excp_to; | ||
167 | }; | ||
168 | |||
169 | struct __attribute__ ((__packed__)) vmcb { | ||
170 | struct vmcb_control_area control; | ||
171 | struct vmcb_save_area save; | ||
172 | }; | ||
173 | |||
174 | #define SVM_CPUID_FEATURE_SHIFT 2 | ||
175 | #define SVM_CPUID_FUNC 0x8000000a | ||
176 | |||
177 | #define MSR_EFER_SVME_MASK (1ULL << 12) | ||
178 | #define MSR_VM_CR 0xc0010114 | ||
179 | #define MSR_VM_HSAVE_PA 0xc0010117ULL | ||
180 | |||
181 | #define SVM_VM_CR_SVM_DISABLE 4 | ||
182 | |||
183 | #define SVM_SELECTOR_S_SHIFT 4 | ||
184 | #define SVM_SELECTOR_DPL_SHIFT 5 | ||
185 | #define SVM_SELECTOR_P_SHIFT 7 | ||
186 | #define SVM_SELECTOR_AVL_SHIFT 8 | ||
187 | #define SVM_SELECTOR_L_SHIFT 9 | ||
188 | #define SVM_SELECTOR_DB_SHIFT 10 | ||
189 | #define SVM_SELECTOR_G_SHIFT 11 | ||
190 | |||
191 | #define SVM_SELECTOR_TYPE_MASK (0xf) | ||
192 | #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) | ||
193 | #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) | ||
194 | #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) | ||
195 | #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) | ||
196 | #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) | ||
197 | #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) | ||
198 | #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) | ||
199 | |||
200 | #define SVM_SELECTOR_WRITE_MASK (1 << 1) | ||
201 | #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK | ||
202 | #define SVM_SELECTOR_CODE_MASK (1 << 3) | ||
203 | |||
204 | #define INTERCEPT_CR0_MASK 1 | ||
205 | #define INTERCEPT_CR3_MASK (1 << 3) | ||
206 | #define INTERCEPT_CR4_MASK (1 << 4) | ||
207 | #define INTERCEPT_CR8_MASK (1 << 8) | ||
208 | |||
209 | #define INTERCEPT_DR0_MASK 1 | ||
210 | #define INTERCEPT_DR1_MASK (1 << 1) | ||
211 | #define INTERCEPT_DR2_MASK (1 << 2) | ||
212 | #define INTERCEPT_DR3_MASK (1 << 3) | ||
213 | #define INTERCEPT_DR4_MASK (1 << 4) | ||
214 | #define INTERCEPT_DR5_MASK (1 << 5) | ||
215 | #define INTERCEPT_DR6_MASK (1 << 6) | ||
216 | #define INTERCEPT_DR7_MASK (1 << 7) | ||
217 | |||
218 | #define SVM_EVTINJ_VEC_MASK 0xff | ||
219 | |||
220 | #define SVM_EVTINJ_TYPE_SHIFT 8 | ||
221 | #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) | ||
222 | |||
223 | #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) | ||
224 | #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) | ||
225 | #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) | ||
226 | #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) | ||
227 | |||
228 | #define SVM_EVTINJ_VALID (1 << 31) | ||
229 | #define SVM_EVTINJ_VALID_ERR (1 << 11) | ||
230 | |||
231 | #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK | ||
232 | |||
233 | #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR | ||
234 | #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI | ||
235 | #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT | ||
236 | #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT | ||
237 | |||
238 | #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID | ||
239 | #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR | ||
240 | |||
241 | #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 | ||
242 | #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 | ||
243 | |||
244 | #define SVM_EXIT_READ_CR0 0x000 | ||
245 | #define SVM_EXIT_READ_CR3 0x003 | ||
246 | #define SVM_EXIT_READ_CR4 0x004 | ||
247 | #define SVM_EXIT_READ_CR8 0x008 | ||
248 | #define SVM_EXIT_WRITE_CR0 0x010 | ||
249 | #define SVM_EXIT_WRITE_CR3 0x013 | ||
250 | #define SVM_EXIT_WRITE_CR4 0x014 | ||
251 | #define SVM_EXIT_WRITE_CR8 0x018 | ||
252 | #define SVM_EXIT_READ_DR0 0x020 | ||
253 | #define SVM_EXIT_READ_DR1 0x021 | ||
254 | #define SVM_EXIT_READ_DR2 0x022 | ||
255 | #define SVM_EXIT_READ_DR3 0x023 | ||
256 | #define SVM_EXIT_READ_DR4 0x024 | ||
257 | #define SVM_EXIT_READ_DR5 0x025 | ||
258 | #define SVM_EXIT_READ_DR6 0x026 | ||
259 | #define SVM_EXIT_READ_DR7 0x027 | ||
260 | #define SVM_EXIT_WRITE_DR0 0x030 | ||
261 | #define SVM_EXIT_WRITE_DR1 0x031 | ||
262 | #define SVM_EXIT_WRITE_DR2 0x032 | ||
263 | #define SVM_EXIT_WRITE_DR3 0x033 | ||
264 | #define SVM_EXIT_WRITE_DR4 0x034 | ||
265 | #define SVM_EXIT_WRITE_DR5 0x035 | ||
266 | #define SVM_EXIT_WRITE_DR6 0x036 | ||
267 | #define SVM_EXIT_WRITE_DR7 0x037 | ||
268 | #define SVM_EXIT_EXCP_BASE 0x040 | ||
269 | #define SVM_EXIT_INTR 0x060 | ||
270 | #define SVM_EXIT_NMI 0x061 | ||
271 | #define SVM_EXIT_SMI 0x062 | ||
272 | #define SVM_EXIT_INIT 0x063 | ||
273 | #define SVM_EXIT_VINTR 0x064 | ||
274 | #define SVM_EXIT_CR0_SEL_WRITE 0x065 | ||
275 | #define SVM_EXIT_IDTR_READ 0x066 | ||
276 | #define SVM_EXIT_GDTR_READ 0x067 | ||
277 | #define SVM_EXIT_LDTR_READ 0x068 | ||
278 | #define SVM_EXIT_TR_READ 0x069 | ||
279 | #define SVM_EXIT_IDTR_WRITE 0x06a | ||
280 | #define SVM_EXIT_GDTR_WRITE 0x06b | ||
281 | #define SVM_EXIT_LDTR_WRITE 0x06c | ||
282 | #define SVM_EXIT_TR_WRITE 0x06d | ||
283 | #define SVM_EXIT_RDTSC 0x06e | ||
284 | #define SVM_EXIT_RDPMC 0x06f | ||
285 | #define SVM_EXIT_PUSHF 0x070 | ||
286 | #define SVM_EXIT_POPF 0x071 | ||
287 | #define SVM_EXIT_CPUID 0x072 | ||
288 | #define SVM_EXIT_RSM 0x073 | ||
289 | #define SVM_EXIT_IRET 0x074 | ||
290 | #define SVM_EXIT_SWINT 0x075 | ||
291 | #define SVM_EXIT_INVD 0x076 | ||
292 | #define SVM_EXIT_PAUSE 0x077 | ||
293 | #define SVM_EXIT_HLT 0x078 | ||
294 | #define SVM_EXIT_INVLPG 0x079 | ||
295 | #define SVM_EXIT_INVLPGA 0x07a | ||
296 | #define SVM_EXIT_IOIO 0x07b | ||
297 | #define SVM_EXIT_MSR 0x07c | ||
298 | #define SVM_EXIT_TASK_SWITCH 0x07d | ||
299 | #define SVM_EXIT_FERR_FREEZE 0x07e | ||
300 | #define SVM_EXIT_SHUTDOWN 0x07f | ||
301 | #define SVM_EXIT_VMRUN 0x080 | ||
302 | #define SVM_EXIT_VMMCALL 0x081 | ||
303 | #define SVM_EXIT_VMLOAD 0x082 | ||
304 | #define SVM_EXIT_VMSAVE 0x083 | ||
305 | #define SVM_EXIT_STGI 0x084 | ||
306 | #define SVM_EXIT_CLGI 0x085 | ||
307 | #define SVM_EXIT_SKINIT 0x086 | ||
308 | #define SVM_EXIT_RDTSCP 0x087 | ||
309 | #define SVM_EXIT_ICEBP 0x088 | ||
310 | #define SVM_EXIT_WBINVD 0x089 | ||
311 | #define SVM_EXIT_MONITOR 0x08a | ||
312 | #define SVM_EXIT_MWAIT 0x08b | ||
313 | #define SVM_EXIT_MWAIT_COND 0x08c | ||
314 | #define SVM_EXIT_NPF 0x400 | ||
315 | |||
316 | #define SVM_EXIT_ERR -1 | ||
317 | |||
318 | #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | ||
319 | |||
320 | #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | ||
321 | #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | ||
322 | #define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" | ||
323 | #define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" | ||
324 | #define SVM_STGI ".byte 0x0f, 0x01, 0xdc" | ||
325 | #define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" | ||
326 | |||
327 | #endif | ||
328 | |||
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h new file mode 100644 index 000000000000..593636275238 --- /dev/null +++ b/arch/x86/include/asm/virtext.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* CPU virtualization extensions handling | ||
2 | * | ||
3 | * This should carry the code for handling CPU virtualization extensions | ||
4 | * that needs to live in the kernel core. | ||
5 | * | ||
6 | * Author: Eduardo Habkost <ehabkost@redhat.com> | ||
7 | * | ||
8 | * Copyright (C) 2008, Red Hat Inc. | ||
9 | * | ||
10 | * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. | ||
11 | * | ||
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
13 | * the COPYING file in the top-level directory. | ||
14 | */ | ||
15 | #ifndef _ASM_X86_VIRTEX_H | ||
16 | #define _ASM_X86_VIRTEX_H | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <asm/system.h> | ||
20 | |||
21 | #include <asm/vmx.h> | ||
22 | #include <asm/svm.h> | ||
23 | |||
24 | /* | ||
25 | * VMX functions: | ||
26 | */ | ||
27 | |||
28 | static inline int cpu_has_vmx(void) | ||
29 | { | ||
30 | unsigned long ecx = cpuid_ecx(1); | ||
31 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
32 | } | ||
33 | |||
34 | |||
35 | /** Disable VMX on the current CPU | ||
36 | * | ||
37 | * vmxoff causes a undefined-opcode exception if vmxon was not run | ||
38 | * on the CPU previously. Only call this function if you know VMX | ||
39 | * is enabled. | ||
40 | */ | ||
41 | static inline void cpu_vmxoff(void) | ||
42 | { | ||
43 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | ||
44 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
45 | } | ||
46 | |||
47 | static inline int cpu_vmx_enabled(void) | ||
48 | { | ||
49 | return read_cr4() & X86_CR4_VMXE; | ||
50 | } | ||
51 | |||
52 | /** Disable VMX if it is enabled on the current CPU | ||
53 | * | ||
54 | * You shouldn't call this if cpu_has_vmx() returns 0. | ||
55 | */ | ||
56 | static inline void __cpu_emergency_vmxoff(void) | ||
57 | { | ||
58 | if (cpu_vmx_enabled()) | ||
59 | cpu_vmxoff(); | ||
60 | } | ||
61 | |||
62 | /** Disable VMX if it is supported and enabled on the current CPU | ||
63 | */ | ||
64 | static inline void cpu_emergency_vmxoff(void) | ||
65 | { | ||
66 | if (cpu_has_vmx()) | ||
67 | __cpu_emergency_vmxoff(); | ||
68 | } | ||
69 | |||
70 | |||
71 | |||
72 | |||
73 | /* | ||
74 | * SVM functions: | ||
75 | */ | ||
76 | |||
77 | /** Check if the CPU has SVM support | ||
78 | * | ||
79 | * You can use the 'msg' arg to get a message describing the problem, | ||
80 | * if the function returns zero. Simply pass NULL if you are not interested | ||
81 | * on the messages; gcc should take care of not generating code for | ||
82 | * the messages on this case. | ||
83 | */ | ||
84 | static inline int cpu_has_svm(const char **msg) | ||
85 | { | ||
86 | uint32_t eax, ebx, ecx, edx; | ||
87 | |||
88 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
89 | if (msg) | ||
90 | *msg = "not amd"; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | ||
95 | if (eax < SVM_CPUID_FUNC) { | ||
96 | if (msg) | ||
97 | *msg = "can't execute cpuid_8000000a"; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | ||
102 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | ||
103 | if (msg) | ||
104 | *msg = "svm not available"; | ||
105 | return 0; | ||
106 | } | ||
107 | return 1; | ||
108 | } | ||
109 | |||
110 | |||
111 | /** Disable SVM on the current CPU | ||
112 | * | ||
113 | * You should call this only if cpu_has_svm() returned true. | ||
114 | */ | ||
115 | static inline void cpu_svm_disable(void) | ||
116 | { | ||
117 | uint64_t efer; | ||
118 | |||
119 | wrmsrl(MSR_VM_HSAVE_PA, 0); | ||
120 | rdmsrl(MSR_EFER, efer); | ||
121 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | ||
122 | } | ||
123 | |||
124 | /** Makes sure SVM is disabled, if it is supported on the CPU | ||
125 | */ | ||
126 | static inline void cpu_emergency_svm_disable(void) | ||
127 | { | ||
128 | if (cpu_has_svm(NULL)) | ||
129 | cpu_svm_disable(); | ||
130 | } | ||
131 | |||
132 | #endif /* _ASM_X86_VIRTEX_H */ | ||
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h new file mode 100644 index 000000000000..d0238e6151d8 --- /dev/null +++ b/arch/x86/include/asm/vmx.h | |||
@@ -0,0 +1,382 @@ | |||
1 | #ifndef VMX_H | ||
2 | #define VMX_H | ||
3 | |||
4 | /* | ||
5 | * vmx.h: VMX Architecture related definitions | ||
6 | * Copyright (c) 2004, Intel Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * A few random additions are: | ||
22 | * Copyright (C) 2006 Qumranet | ||
23 | * Avi Kivity <avi@qumranet.com> | ||
24 | * Yaniv Kamay <yaniv@qumranet.com> | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Definitions of Primary Processor-Based VM-Execution Controls. | ||
30 | */ | ||
31 | #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 | ||
32 | #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 | ||
33 | #define CPU_BASED_HLT_EXITING 0x00000080 | ||
34 | #define CPU_BASED_INVLPG_EXITING 0x00000200 | ||
35 | #define CPU_BASED_MWAIT_EXITING 0x00000400 | ||
36 | #define CPU_BASED_RDPMC_EXITING 0x00000800 | ||
37 | #define CPU_BASED_RDTSC_EXITING 0x00001000 | ||
38 | #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 | ||
39 | #define CPU_BASED_CR3_STORE_EXITING 0x00010000 | ||
40 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | ||
41 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | ||
42 | #define CPU_BASED_TPR_SHADOW 0x00200000 | ||
43 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
44 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | ||
45 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | ||
46 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | ||
47 | #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 | ||
48 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | ||
49 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | ||
50 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | ||
51 | /* | ||
52 | * Definitions of Secondary Processor-Based VM-Execution Controls. | ||
53 | */ | ||
54 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | ||
55 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | ||
56 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | ||
57 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | ||
58 | |||
59 | |||
60 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | ||
61 | #define PIN_BASED_NMI_EXITING 0x00000008 | ||
62 | #define PIN_BASED_VIRTUAL_NMIS 0x00000020 | ||
63 | |||
64 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | ||
65 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | ||
66 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
67 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
68 | |||
69 | #define VM_ENTRY_IA32E_MODE 0x00000200 | ||
70 | #define VM_ENTRY_SMM 0x00000400 | ||
71 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | ||
72 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
73 | |||
74 | /* VMCS Encodings */ | ||
75 | enum vmcs_field { | ||
76 | VIRTUAL_PROCESSOR_ID = 0x00000000, | ||
77 | GUEST_ES_SELECTOR = 0x00000800, | ||
78 | GUEST_CS_SELECTOR = 0x00000802, | ||
79 | GUEST_SS_SELECTOR = 0x00000804, | ||
80 | GUEST_DS_SELECTOR = 0x00000806, | ||
81 | GUEST_FS_SELECTOR = 0x00000808, | ||
82 | GUEST_GS_SELECTOR = 0x0000080a, | ||
83 | GUEST_LDTR_SELECTOR = 0x0000080c, | ||
84 | GUEST_TR_SELECTOR = 0x0000080e, | ||
85 | HOST_ES_SELECTOR = 0x00000c00, | ||
86 | HOST_CS_SELECTOR = 0x00000c02, | ||
87 | HOST_SS_SELECTOR = 0x00000c04, | ||
88 | HOST_DS_SELECTOR = 0x00000c06, | ||
89 | HOST_FS_SELECTOR = 0x00000c08, | ||
90 | HOST_GS_SELECTOR = 0x00000c0a, | ||
91 | HOST_TR_SELECTOR = 0x00000c0c, | ||
92 | IO_BITMAP_A = 0x00002000, | ||
93 | IO_BITMAP_A_HIGH = 0x00002001, | ||
94 | IO_BITMAP_B = 0x00002002, | ||
95 | IO_BITMAP_B_HIGH = 0x00002003, | ||
96 | MSR_BITMAP = 0x00002004, | ||
97 | MSR_BITMAP_HIGH = 0x00002005, | ||
98 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, | ||
99 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, | ||
100 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, | ||
101 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, | ||
102 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, | ||
103 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, | ||
104 | TSC_OFFSET = 0x00002010, | ||
105 | TSC_OFFSET_HIGH = 0x00002011, | ||
106 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | ||
107 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | ||
108 | APIC_ACCESS_ADDR = 0x00002014, | ||
109 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
110 | EPT_POINTER = 0x0000201a, | ||
111 | EPT_POINTER_HIGH = 0x0000201b, | ||
112 | GUEST_PHYSICAL_ADDRESS = 0x00002400, | ||
113 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, | ||
114 | VMCS_LINK_POINTER = 0x00002800, | ||
115 | VMCS_LINK_POINTER_HIGH = 0x00002801, | ||
116 | GUEST_IA32_DEBUGCTL = 0x00002802, | ||
117 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | ||
118 | GUEST_IA32_PAT = 0x00002804, | ||
119 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
120 | GUEST_PDPTR0 = 0x0000280a, | ||
121 | GUEST_PDPTR0_HIGH = 0x0000280b, | ||
122 | GUEST_PDPTR1 = 0x0000280c, | ||
123 | GUEST_PDPTR1_HIGH = 0x0000280d, | ||
124 | GUEST_PDPTR2 = 0x0000280e, | ||
125 | GUEST_PDPTR2_HIGH = 0x0000280f, | ||
126 | GUEST_PDPTR3 = 0x00002810, | ||
127 | GUEST_PDPTR3_HIGH = 0x00002811, | ||
128 | HOST_IA32_PAT = 0x00002c00, | ||
129 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
130 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | ||
131 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | ||
132 | EXCEPTION_BITMAP = 0x00004004, | ||
133 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, | ||
134 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, | ||
135 | CR3_TARGET_COUNT = 0x0000400a, | ||
136 | VM_EXIT_CONTROLS = 0x0000400c, | ||
137 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, | ||
138 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, | ||
139 | VM_ENTRY_CONTROLS = 0x00004012, | ||
140 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, | ||
141 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, | ||
142 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, | ||
143 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | ||
144 | TPR_THRESHOLD = 0x0000401c, | ||
145 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | ||
146 | VM_INSTRUCTION_ERROR = 0x00004400, | ||
147 | VM_EXIT_REASON = 0x00004402, | ||
148 | VM_EXIT_INTR_INFO = 0x00004404, | ||
149 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, | ||
150 | IDT_VECTORING_INFO_FIELD = 0x00004408, | ||
151 | IDT_VECTORING_ERROR_CODE = 0x0000440a, | ||
152 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, | ||
153 | VMX_INSTRUCTION_INFO = 0x0000440e, | ||
154 | GUEST_ES_LIMIT = 0x00004800, | ||
155 | GUEST_CS_LIMIT = 0x00004802, | ||
156 | GUEST_SS_LIMIT = 0x00004804, | ||
157 | GUEST_DS_LIMIT = 0x00004806, | ||
158 | GUEST_FS_LIMIT = 0x00004808, | ||
159 | GUEST_GS_LIMIT = 0x0000480a, | ||
160 | GUEST_LDTR_LIMIT = 0x0000480c, | ||
161 | GUEST_TR_LIMIT = 0x0000480e, | ||
162 | GUEST_GDTR_LIMIT = 0x00004810, | ||
163 | GUEST_IDTR_LIMIT = 0x00004812, | ||
164 | GUEST_ES_AR_BYTES = 0x00004814, | ||
165 | GUEST_CS_AR_BYTES = 0x00004816, | ||
166 | GUEST_SS_AR_BYTES = 0x00004818, | ||
167 | GUEST_DS_AR_BYTES = 0x0000481a, | ||
168 | GUEST_FS_AR_BYTES = 0x0000481c, | ||
169 | GUEST_GS_AR_BYTES = 0x0000481e, | ||
170 | GUEST_LDTR_AR_BYTES = 0x00004820, | ||
171 | GUEST_TR_AR_BYTES = 0x00004822, | ||
172 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, | ||
173 | GUEST_ACTIVITY_STATE = 0X00004826, | ||
174 | GUEST_SYSENTER_CS = 0x0000482A, | ||
175 | HOST_IA32_SYSENTER_CS = 0x00004c00, | ||
176 | CR0_GUEST_HOST_MASK = 0x00006000, | ||
177 | CR4_GUEST_HOST_MASK = 0x00006002, | ||
178 | CR0_READ_SHADOW = 0x00006004, | ||
179 | CR4_READ_SHADOW = 0x00006006, | ||
180 | CR3_TARGET_VALUE0 = 0x00006008, | ||
181 | CR3_TARGET_VALUE1 = 0x0000600a, | ||
182 | CR3_TARGET_VALUE2 = 0x0000600c, | ||
183 | CR3_TARGET_VALUE3 = 0x0000600e, | ||
184 | EXIT_QUALIFICATION = 0x00006400, | ||
185 | GUEST_LINEAR_ADDRESS = 0x0000640a, | ||
186 | GUEST_CR0 = 0x00006800, | ||
187 | GUEST_CR3 = 0x00006802, | ||
188 | GUEST_CR4 = 0x00006804, | ||
189 | GUEST_ES_BASE = 0x00006806, | ||
190 | GUEST_CS_BASE = 0x00006808, | ||
191 | GUEST_SS_BASE = 0x0000680a, | ||
192 | GUEST_DS_BASE = 0x0000680c, | ||
193 | GUEST_FS_BASE = 0x0000680e, | ||
194 | GUEST_GS_BASE = 0x00006810, | ||
195 | GUEST_LDTR_BASE = 0x00006812, | ||
196 | GUEST_TR_BASE = 0x00006814, | ||
197 | GUEST_GDTR_BASE = 0x00006816, | ||
198 | GUEST_IDTR_BASE = 0x00006818, | ||
199 | GUEST_DR7 = 0x0000681a, | ||
200 | GUEST_RSP = 0x0000681c, | ||
201 | GUEST_RIP = 0x0000681e, | ||
202 | GUEST_RFLAGS = 0x00006820, | ||
203 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, | ||
204 | GUEST_SYSENTER_ESP = 0x00006824, | ||
205 | GUEST_SYSENTER_EIP = 0x00006826, | ||
206 | HOST_CR0 = 0x00006c00, | ||
207 | HOST_CR3 = 0x00006c02, | ||
208 | HOST_CR4 = 0x00006c04, | ||
209 | HOST_FS_BASE = 0x00006c06, | ||
210 | HOST_GS_BASE = 0x00006c08, | ||
211 | HOST_TR_BASE = 0x00006c0a, | ||
212 | HOST_GDTR_BASE = 0x00006c0c, | ||
213 | HOST_IDTR_BASE = 0x00006c0e, | ||
214 | HOST_IA32_SYSENTER_ESP = 0x00006c10, | ||
215 | HOST_IA32_SYSENTER_EIP = 0x00006c12, | ||
216 | HOST_RSP = 0x00006c14, | ||
217 | HOST_RIP = 0x00006c16, | ||
218 | }; | ||
219 | |||
220 | #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 | ||
221 | |||
222 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
223 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
224 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
225 | |||
226 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
227 | #define EXIT_REASON_NMI_WINDOW 8 | ||
228 | #define EXIT_REASON_TASK_SWITCH 9 | ||
229 | #define EXIT_REASON_CPUID 10 | ||
230 | #define EXIT_REASON_HLT 12 | ||
231 | #define EXIT_REASON_INVLPG 14 | ||
232 | #define EXIT_REASON_RDPMC 15 | ||
233 | #define EXIT_REASON_RDTSC 16 | ||
234 | #define EXIT_REASON_VMCALL 18 | ||
235 | #define EXIT_REASON_VMCLEAR 19 | ||
236 | #define EXIT_REASON_VMLAUNCH 20 | ||
237 | #define EXIT_REASON_VMPTRLD 21 | ||
238 | #define EXIT_REASON_VMPTRST 22 | ||
239 | #define EXIT_REASON_VMREAD 23 | ||
240 | #define EXIT_REASON_VMRESUME 24 | ||
241 | #define EXIT_REASON_VMWRITE 25 | ||
242 | #define EXIT_REASON_VMOFF 26 | ||
243 | #define EXIT_REASON_VMON 27 | ||
244 | #define EXIT_REASON_CR_ACCESS 28 | ||
245 | #define EXIT_REASON_DR_ACCESS 29 | ||
246 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
247 | #define EXIT_REASON_MSR_READ 31 | ||
248 | #define EXIT_REASON_MSR_WRITE 32 | ||
249 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
250 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
251 | #define EXIT_REASON_APIC_ACCESS 44 | ||
252 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
253 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
254 | #define EXIT_REASON_WBINVD 54 | ||
255 | |||
256 | /* | ||
257 | * Interruption-information format | ||
258 | */ | ||
259 | #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ | ||
260 | #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ | ||
261 | #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ | ||
262 | #define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ | ||
263 | #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ | ||
264 | #define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 | ||
265 | |||
266 | #define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK | ||
267 | #define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK | ||
268 | #define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK | ||
269 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK | ||
270 | |||
271 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | ||
272 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | ||
273 | #define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ | ||
274 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | ||
275 | |||
276 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | ||
277 | #define GUEST_INTR_STATE_STI 0x00000001 | ||
278 | #define GUEST_INTR_STATE_MOV_SS 0x00000002 | ||
279 | #define GUEST_INTR_STATE_SMI 0x00000004 | ||
280 | #define GUEST_INTR_STATE_NMI 0x00000008 | ||
281 | |||
282 | /* | ||
283 | * Exit Qualifications for MOV for Control Register Access | ||
284 | */ | ||
285 | #define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ | ||
286 | #define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ | ||
287 | #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ | ||
288 | #define LMSW_SOURCE_DATA_SHIFT 16 | ||
289 | #define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ | ||
290 | #define REG_EAX (0 << 8) | ||
291 | #define REG_ECX (1 << 8) | ||
292 | #define REG_EDX (2 << 8) | ||
293 | #define REG_EBX (3 << 8) | ||
294 | #define REG_ESP (4 << 8) | ||
295 | #define REG_EBP (5 << 8) | ||
296 | #define REG_ESI (6 << 8) | ||
297 | #define REG_EDI (7 << 8) | ||
298 | #define REG_R8 (8 << 8) | ||
299 | #define REG_R9 (9 << 8) | ||
300 | #define REG_R10 (10 << 8) | ||
301 | #define REG_R11 (11 << 8) | ||
302 | #define REG_R12 (12 << 8) | ||
303 | #define REG_R13 (13 << 8) | ||
304 | #define REG_R14 (14 << 8) | ||
305 | #define REG_R15 (15 << 8) | ||
306 | |||
307 | /* | ||
308 | * Exit Qualifications for MOV for Debug Register Access | ||
309 | */ | ||
310 | #define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ | ||
311 | #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ | ||
312 | #define TYPE_MOV_TO_DR (0 << 4) | ||
313 | #define TYPE_MOV_FROM_DR (1 << 4) | ||
314 | #define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ | ||
315 | |||
316 | |||
317 | /* segment AR */ | ||
318 | #define SEGMENT_AR_L_MASK (1 << 13) | ||
319 | |||
320 | #define AR_TYPE_ACCESSES_MASK 1 | ||
321 | #define AR_TYPE_READABLE_MASK (1 << 1) | ||
322 | #define AR_TYPE_WRITEABLE_MASK (1 << 2) | ||
323 | #define AR_TYPE_CODE_MASK (1 << 3) | ||
324 | #define AR_TYPE_MASK 0x0f | ||
325 | #define AR_TYPE_BUSY_64_TSS 11 | ||
326 | #define AR_TYPE_BUSY_32_TSS 11 | ||
327 | #define AR_TYPE_BUSY_16_TSS 3 | ||
328 | #define AR_TYPE_LDT 2 | ||
329 | |||
330 | #define AR_UNUSABLE_MASK (1 << 16) | ||
331 | #define AR_S_MASK (1 << 4) | ||
332 | #define AR_P_MASK (1 << 7) | ||
333 | #define AR_L_MASK (1 << 13) | ||
334 | #define AR_DB_MASK (1 << 14) | ||
335 | #define AR_G_MASK (1 << 15) | ||
336 | #define AR_DPL_SHIFT 5 | ||
337 | #define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) | ||
338 | |||
339 | #define AR_RESERVD_MASK 0xfffe0f00 | ||
340 | |||
341 | #define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) | ||
342 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) | ||
343 | #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) | ||
344 | |||
345 | #define VMX_NR_VPIDS (1 << 16) | ||
346 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 | ||
347 | #define VMX_VPID_EXTENT_ALL_CONTEXT 2 | ||
348 | |||
349 | #define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 | ||
350 | #define VMX_EPT_EXTENT_CONTEXT 1 | ||
351 | #define VMX_EPT_EXTENT_GLOBAL 2 | ||
352 | #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) | ||
353 | #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) | ||
354 | #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) | ||
355 | #define VMX_EPT_DEFAULT_GAW 3 | ||
356 | #define VMX_EPT_MAX_GAW 0x4 | ||
357 | #define VMX_EPT_MT_EPTE_SHIFT 3 | ||
358 | #define VMX_EPT_GAW_EPTP_SHIFT 3 | ||
359 | #define VMX_EPT_DEFAULT_MT 0x6ull | ||
360 | #define VMX_EPT_READABLE_MASK 0x1ull | ||
361 | #define VMX_EPT_WRITABLE_MASK 0x2ull | ||
362 | #define VMX_EPT_EXECUTABLE_MASK 0x4ull | ||
363 | #define VMX_EPT_IGMT_BIT (1ull << 6) | ||
364 | |||
365 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | ||
366 | |||
367 | |||
368 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
369 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
370 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
371 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
372 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
373 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
374 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
375 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
376 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
377 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
378 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
379 | |||
380 | |||
381 | |||
382 | #endif | ||