aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:14:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:14:24 -0500
commit55065bc52795faae549abfb912aacc622dd63876 (patch)
tree63683547e41ed459a2a8747eeafb5e969633d54f /arch/x86/include/asm
parent008d23e4852d78bb2618f2035f8b2110b6a6b968 (diff)
parente5c301428294cb8925667c9ee39f817c4ab1c2c9 (diff)
Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits) KVM: Initialize fpu state in preemptible context KVM: VMX: when entering real mode align segment base to 16 bytes KVM: MMU: handle 'map_writable' in set_spte() function KVM: MMU: audit: allow audit more guests at the same time KVM: Fetch guest cr3 from hardware on demand KVM: Replace reads of vcpu->arch.cr3 by an accessor KVM: MMU: only write protect mappings at pagetable level KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear() KVM: MMU: Initialize base_role for tdp mmus KVM: VMX: Optimize atomic EFER load KVM: VMX: Add definitions for more vm entry/exit control bits KVM: SVM: copy instruction bytes from VMCB KVM: SVM: implement enhanced INVLPG intercept KVM: SVM: enhance mov DR intercept handler KVM: SVM: enhance MOV CR intercept handler KVM: SVM: add new SVM feature bit names KVM: cleanup emulate_instruction KVM: move complete_insn_gp() into x86.c KVM: x86: fix CR8 handling KVM guest: Fix kvm clock initialization when it's configured out ...
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h35
-rw-r--r--arch/x86/include/asm/kvm_host.h99
-rw-r--r--arch/x86/include/asm/kvm_para.h24
-rw-r--r--arch/x86/include/asm/svm.h57
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/include/asm/vmx.h15
6 files changed, 174 insertions, 57 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b36c6b3fe144..8e37deb1eb38 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -15,6 +15,14 @@
15 15
16struct x86_emulate_ctxt; 16struct x86_emulate_ctxt;
17 17
18struct x86_exception {
19 u8 vector;
20 bool error_code_valid;
21 u16 error_code;
22 bool nested_page_fault;
23 u64 address; /* cr2 or nested page fault gpa */
24};
25
18/* 26/*
19 * x86_emulate_ops: 27 * x86_emulate_ops:
20 * 28 *
@@ -64,7 +72,8 @@ struct x86_emulate_ops {
64 * @bytes: [IN ] Number of bytes to read from memory. 72 * @bytes: [IN ] Number of bytes to read from memory.
65 */ 73 */
66 int (*read_std)(unsigned long addr, void *val, 74 int (*read_std)(unsigned long addr, void *val,
67 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 75 unsigned int bytes, struct kvm_vcpu *vcpu,
76 struct x86_exception *fault);
68 77
69 /* 78 /*
70 * write_std: Write bytes of standard (non-emulated/special) memory. 79 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -74,7 +83,8 @@ struct x86_emulate_ops {
74 * @bytes: [IN ] Number of bytes to write to memory. 83 * @bytes: [IN ] Number of bytes to write to memory.
75 */ 84 */
76 int (*write_std)(unsigned long addr, void *val, 85 int (*write_std)(unsigned long addr, void *val,
77 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 86 unsigned int bytes, struct kvm_vcpu *vcpu,
87 struct x86_exception *fault);
78 /* 88 /*
79 * fetch: Read bytes of standard (non-emulated/special) memory. 89 * fetch: Read bytes of standard (non-emulated/special) memory.
80 * Used for instruction fetch. 90 * Used for instruction fetch.
@@ -83,7 +93,8 @@ struct x86_emulate_ops {
83 * @bytes: [IN ] Number of bytes to read from memory. 93 * @bytes: [IN ] Number of bytes to read from memory.
84 */ 94 */
85 int (*fetch)(unsigned long addr, void *val, 95 int (*fetch)(unsigned long addr, void *val,
86 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 96 unsigned int bytes, struct kvm_vcpu *vcpu,
97 struct x86_exception *fault);
87 98
88 /* 99 /*
89 * read_emulated: Read bytes from emulated/special memory area. 100 * read_emulated: Read bytes from emulated/special memory area.
@@ -94,7 +105,7 @@ struct x86_emulate_ops {
94 int (*read_emulated)(unsigned long addr, 105 int (*read_emulated)(unsigned long addr,
95 void *val, 106 void *val,
96 unsigned int bytes, 107 unsigned int bytes,
97 unsigned int *error, 108 struct x86_exception *fault,
98 struct kvm_vcpu *vcpu); 109 struct kvm_vcpu *vcpu);
99 110
100 /* 111 /*
@@ -107,7 +118,7 @@ struct x86_emulate_ops {
107 int (*write_emulated)(unsigned long addr, 118 int (*write_emulated)(unsigned long addr,
108 const void *val, 119 const void *val,
109 unsigned int bytes, 120 unsigned int bytes,
110 unsigned int *error, 121 struct x86_exception *fault,
111 struct kvm_vcpu *vcpu); 122 struct kvm_vcpu *vcpu);
112 123
113 /* 124 /*
@@ -122,7 +133,7 @@ struct x86_emulate_ops {
122 const void *old, 133 const void *old,
123 const void *new, 134 const void *new,
124 unsigned int bytes, 135 unsigned int bytes,
125 unsigned int *error, 136 struct x86_exception *fault,
126 struct kvm_vcpu *vcpu); 137 struct kvm_vcpu *vcpu);
127 138
128 int (*pio_in_emulated)(int size, unsigned short port, void *val, 139 int (*pio_in_emulated)(int size, unsigned short port, void *val,
@@ -159,7 +170,10 @@ struct operand {
159 }; 170 };
160 union { 171 union {
161 unsigned long *reg; 172 unsigned long *reg;
162 unsigned long mem; 173 struct segmented_address {
174 ulong ea;
175 unsigned seg;
176 } mem;
163 } addr; 177 } addr;
164 union { 178 union {
165 unsigned long val; 179 unsigned long val;
@@ -226,9 +240,8 @@ struct x86_emulate_ctxt {
226 240
227 bool perm_ok; /* do not check permissions if true */ 241 bool perm_ok; /* do not check permissions if true */
228 242
229 int exception; /* exception that happens during emulation or -1 */ 243 bool have_exception;
230 u32 error_code; /* error code for exception */ 244 struct x86_exception exception;
231 bool error_code_valid;
232 245
233 /* decode cache */ 246 /* decode cache */
234 struct decode_cache decode; 247 struct decode_cache decode;
@@ -252,7 +265,7 @@ struct x86_emulate_ctxt {
252#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 265#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
253#endif 266#endif
254 267
255int x86_decode_insn(struct x86_emulate_ctxt *ctxt); 268int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
256#define EMULATION_FAILED -1 269#define EMULATION_FAILED -1
257#define EMULATION_OK 0 270#define EMULATION_OK 0
258#define EMULATION_RESTART 1 271#define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f702f82aa1eb..aa75f21a9fba 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -83,11 +83,14 @@
83#define KVM_NR_FIXED_MTRR_REGION 88 83#define KVM_NR_FIXED_MTRR_REGION 88
84#define KVM_NR_VAR_MTRR 8 84#define KVM_NR_VAR_MTRR 8
85 85
86#define ASYNC_PF_PER_VCPU 64
87
86extern spinlock_t kvm_lock; 88extern spinlock_t kvm_lock;
87extern struct list_head vm_list; 89extern struct list_head vm_list;
88 90
89struct kvm_vcpu; 91struct kvm_vcpu;
90struct kvm; 92struct kvm;
93struct kvm_async_pf;
91 94
92enum kvm_reg { 95enum kvm_reg {
93 VCPU_REGS_RAX = 0, 96 VCPU_REGS_RAX = 0,
@@ -114,6 +117,7 @@ enum kvm_reg {
114 117
115enum kvm_reg_ex { 118enum kvm_reg_ex {
116 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 119 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
120 VCPU_EXREG_CR3,
117}; 121};
118 122
119enum { 123enum {
@@ -238,16 +242,18 @@ struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 242 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 243 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
240 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 244 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
241 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 245 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
242 void (*inject_page_fault)(struct kvm_vcpu *vcpu); 246 bool prefault);
247 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
248 struct x86_exception *fault);
243 void (*free)(struct kvm_vcpu *vcpu); 249 void (*free)(struct kvm_vcpu *vcpu);
244 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 250 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
245 u32 *error); 251 struct x86_exception *exception);
246 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); 252 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
247 void (*prefetch_page)(struct kvm_vcpu *vcpu, 253 void (*prefetch_page)(struct kvm_vcpu *vcpu,
248 struct kvm_mmu_page *page); 254 struct kvm_mmu_page *page);
249 int (*sync_page)(struct kvm_vcpu *vcpu, 255 int (*sync_page)(struct kvm_vcpu *vcpu,
250 struct kvm_mmu_page *sp, bool clear_unsync); 256 struct kvm_mmu_page *sp);
251 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 257 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
252 hpa_t root_hpa; 258 hpa_t root_hpa;
253 int root_level; 259 int root_level;
@@ -315,16 +321,6 @@ struct kvm_vcpu_arch {
315 */ 321 */
316 struct kvm_mmu *walk_mmu; 322 struct kvm_mmu *walk_mmu;
317 323
318 /*
319 * This struct is filled with the necessary information to propagate a
320 * page fault into the guest
321 */
322 struct {
323 u64 address;
324 unsigned error_code;
325 bool nested;
326 } fault;
327
328 /* only needed in kvm_pv_mmu_op() path, but it's hot so 324 /* only needed in kvm_pv_mmu_op() path, but it's hot so
329 * put it here to avoid allocation */ 325 * put it here to avoid allocation */
330 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 326 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -412,6 +408,15 @@ struct kvm_vcpu_arch {
412 u64 hv_vapic; 408 u64 hv_vapic;
413 409
414 cpumask_var_t wbinvd_dirty_mask; 410 cpumask_var_t wbinvd_dirty_mask;
411
412 struct {
413 bool halted;
414 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
415 struct gfn_to_hva_cache data;
416 u64 msr_val;
417 u32 id;
418 bool send_user_only;
419 } apf;
415}; 420};
416 421
417struct kvm_arch { 422struct kvm_arch {
@@ -456,6 +461,10 @@ struct kvm_arch {
456 /* fields used by HYPER-V emulation */ 461 /* fields used by HYPER-V emulation */
457 u64 hv_guest_os_id; 462 u64 hv_guest_os_id;
458 u64 hv_hypercall; 463 u64 hv_hypercall;
464
465 #ifdef CONFIG_KVM_MMU_AUDIT
466 int audit_point;
467 #endif
459}; 468};
460 469
461struct kvm_vm_stat { 470struct kvm_vm_stat {
@@ -529,6 +538,7 @@ struct kvm_x86_ops {
529 struct kvm_segment *var, int seg); 538 struct kvm_segment *var, int seg);
530 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 539 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
531 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 540 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
541 void (*decache_cr3)(struct kvm_vcpu *vcpu);
532 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 542 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
533 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 543 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
534 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 544 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -582,9 +592,17 @@ struct kvm_x86_ops {
582 592
583 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 593 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
584 594
595 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
585 const struct trace_print_flags *exit_reasons_str; 596 const struct trace_print_flags *exit_reasons_str;
586}; 597};
587 598
599struct kvm_arch_async_pf {
600 u32 token;
601 gfn_t gfn;
602 unsigned long cr3;
603 bool direct_map;
604};
605
588extern struct kvm_x86_ops *kvm_x86_ops; 606extern struct kvm_x86_ops *kvm_x86_ops;
589 607
590int kvm_mmu_module_init(void); 608int kvm_mmu_module_init(void);
@@ -594,7 +612,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
594int kvm_mmu_create(struct kvm_vcpu *vcpu); 612int kvm_mmu_create(struct kvm_vcpu *vcpu);
595int kvm_mmu_setup(struct kvm_vcpu *vcpu); 613int kvm_mmu_setup(struct kvm_vcpu *vcpu);
596void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 614void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
597void kvm_mmu_set_base_ptes(u64 base_pte);
598void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 615void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
599 u64 dirty_mask, u64 nx_mask, u64 x_mask); 616 u64 dirty_mask, u64 nx_mask, u64 x_mask);
600 617
@@ -623,8 +640,15 @@ enum emulation_result {
623#define EMULTYPE_NO_DECODE (1 << 0) 640#define EMULTYPE_NO_DECODE (1 << 0)
624#define EMULTYPE_TRAP_UD (1 << 1) 641#define EMULTYPE_TRAP_UD (1 << 1)
625#define EMULTYPE_SKIP (1 << 2) 642#define EMULTYPE_SKIP (1 << 2)
626int emulate_instruction(struct kvm_vcpu *vcpu, 643int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
627 unsigned long cr2, u16 error_code, int emulation_type); 644 int emulation_type, void *insn, int insn_len);
645
646static inline int emulate_instruction(struct kvm_vcpu *vcpu,
647 int emulation_type)
648{
649 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
650}
651
628void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 652void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
629void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 653void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
630 654
@@ -650,7 +674,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
650int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 674int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
651int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 675int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
652int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 676int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
653void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 677int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
654int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 678int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
655int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 679int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
656unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 680unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -668,11 +692,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
668void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 692void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
669void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 693void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
670void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 694void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
671void kvm_inject_page_fault(struct kvm_vcpu *vcpu); 695void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
672int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 696int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
673 gfn_t gfn, void *data, int offset, int len, 697 gfn_t gfn, void *data, int offset, int len,
674 u32 access); 698 u32 access);
675void kvm_propagate_fault(struct kvm_vcpu *vcpu); 699void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
676bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 700bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
677 701
678int kvm_pic_set_irq(void *opaque, int irq, int level); 702int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -690,16 +714,21 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
690int kvm_mmu_load(struct kvm_vcpu *vcpu); 714int kvm_mmu_load(struct kvm_vcpu *vcpu);
691void kvm_mmu_unload(struct kvm_vcpu *vcpu); 715void kvm_mmu_unload(struct kvm_vcpu *vcpu);
692void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 716void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
693gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 717gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
694gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 718 struct x86_exception *exception);
695gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 719gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
696gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 720 struct x86_exception *exception);
721gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
722 struct x86_exception *exception);
723gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
724 struct x86_exception *exception);
697 725
698int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 726int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
699 727
700int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 728int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
701 729
702int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 730int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
731 void *insn, int insn_len);
703void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 732void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
704 733
705void kvm_enable_tdp(void); 734void kvm_enable_tdp(void);
@@ -766,20 +795,25 @@ enum {
766#define HF_VINTR_MASK (1 << 2) 795#define HF_VINTR_MASK (1 << 2)
767#define HF_NMI_MASK (1 << 3) 796#define HF_NMI_MASK (1 << 3)
768#define HF_IRET_MASK (1 << 4) 797#define HF_IRET_MASK (1 << 4)
798#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
769 799
770/* 800/*
771 * Hardware virtualization extension instructions may fault if a 801 * Hardware virtualization extension instructions may fault if a
772 * reboot turns off virtualization while processes are running. 802 * reboot turns off virtualization while processes are running.
773 * Trap the fault and ignore the instruction if that happens. 803 * Trap the fault and ignore the instruction if that happens.
774 */ 804 */
775asmlinkage void kvm_handle_fault_on_reboot(void); 805asmlinkage void kvm_spurious_fault(void);
806extern bool kvm_rebooting;
776 807
777#define __kvm_handle_fault_on_reboot(insn) \ 808#define __kvm_handle_fault_on_reboot(insn) \
778 "666: " insn "\n\t" \ 809 "666: " insn "\n\t" \
810 "668: \n\t" \
779 ".pushsection .fixup, \"ax\" \n" \ 811 ".pushsection .fixup, \"ax\" \n" \
780 "667: \n\t" \ 812 "667: \n\t" \
813 "cmpb $0, kvm_rebooting \n\t" \
814 "jne 668b \n\t" \
781 __ASM_SIZE(push) " $666b \n\t" \ 815 __ASM_SIZE(push) " $666b \n\t" \
782 "jmp kvm_handle_fault_on_reboot \n\t" \ 816 "call kvm_spurious_fault \n\t" \
783 ".popsection \n\t" \ 817 ".popsection \n\t" \
784 ".pushsection __ex_table, \"a\" \n\t" \ 818 ".pushsection __ex_table, \"a\" \n\t" \
785 _ASM_PTR " 666b, 667b \n\t" \ 819 _ASM_PTR " 666b, 667b \n\t" \
@@ -799,4 +833,15 @@ void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
799 833
800bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 834bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
801 835
836void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
837 struct kvm_async_pf *work);
838void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
839 struct kvm_async_pf *work);
840void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
841 struct kvm_async_pf *work);
842bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
843extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
844
845void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
846
802#endif /* _ASM_X86_KVM_HOST_H */ 847#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 7b562b6184bc..a427bf77a93d 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -20,6 +20,7 @@
20 * are available. The use of 0x11 and 0x12 is deprecated 20 * are available. The use of 0x11 and 0x12 is deprecated
21 */ 21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3 22#define KVM_FEATURE_CLOCKSOURCE2 3
23#define KVM_FEATURE_ASYNC_PF 4
23 24
24/* The last 8 bits are used to indicate how to interpret the flags field 25/* The last 8 bits are used to indicate how to interpret the flags field
25 * in pvclock structure. If no bits are set, all flags are ignored. 26 * in pvclock structure. If no bits are set, all flags are ignored.
@@ -32,9 +33,13 @@
32/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ 33/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
33#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 34#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
34#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 35#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
36#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
35 37
36#define KVM_MAX_MMU_OP_BATCH 32 38#define KVM_MAX_MMU_OP_BATCH 32
37 39
40#define KVM_ASYNC_PF_ENABLED (1 << 0)
41#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
42
38/* Operations for KVM_HC_MMU_OP */ 43/* Operations for KVM_HC_MMU_OP */
39#define KVM_MMU_OP_WRITE_PTE 1 44#define KVM_MMU_OP_WRITE_PTE 1
40#define KVM_MMU_OP_FLUSH_TLB 2 45#define KVM_MMU_OP_FLUSH_TLB 2
@@ -61,10 +66,20 @@ struct kvm_mmu_op_release_pt {
61 __u64 pt_phys; 66 __u64 pt_phys;
62}; 67};
63 68
69#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
70#define KVM_PV_REASON_PAGE_READY 2
71
72struct kvm_vcpu_pv_apf_data {
73 __u32 reason;
74 __u8 pad[60];
75 __u32 enabled;
76};
77
64#ifdef __KERNEL__ 78#ifdef __KERNEL__
65#include <asm/processor.h> 79#include <asm/processor.h>
66 80
67extern void kvmclock_init(void); 81extern void kvmclock_init(void);
82extern int kvm_register_clock(char *txt);
68 83
69 84
70/* This instruction is vmcall. On non-VT architectures, it will generate a 85/* This instruction is vmcall. On non-VT architectures, it will generate a
@@ -160,8 +175,17 @@ static inline unsigned int kvm_arch_para_features(void)
160 175
161#ifdef CONFIG_KVM_GUEST 176#ifdef CONFIG_KVM_GUEST
162void __init kvm_guest_init(void); 177void __init kvm_guest_init(void);
178void kvm_async_pf_task_wait(u32 token);
179void kvm_async_pf_task_wake(u32 token);
180u32 kvm_read_and_reset_pf_reason(void);
163#else 181#else
164#define kvm_guest_init() do { } while (0) 182#define kvm_guest_init() do { } while (0)
183#define kvm_async_pf_task_wait(T) do {} while(0)
184#define kvm_async_pf_task_wake(T) do {} while(0)
185static inline u32 kvm_read_and_reset_pf_reason(void)
186{
187 return 0;
188}
165#endif 189#endif
166 190
167#endif /* __KERNEL__ */ 191#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e831059ac5a..f2b83bc7d784 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -47,14 +47,13 @@ enum {
47 INTERCEPT_MONITOR, 47 INTERCEPT_MONITOR,
48 INTERCEPT_MWAIT, 48 INTERCEPT_MWAIT,
49 INTERCEPT_MWAIT_COND, 49 INTERCEPT_MWAIT_COND,
50 INTERCEPT_XSETBV,
50}; 51};
51 52
52 53
53struct __attribute__ ((__packed__)) vmcb_control_area { 54struct __attribute__ ((__packed__)) vmcb_control_area {
54 u16 intercept_cr_read; 55 u32 intercept_cr;
55 u16 intercept_cr_write; 56 u32 intercept_dr;
56 u16 intercept_dr_read;
57 u16 intercept_dr_write;
58 u32 intercept_exceptions; 57 u32 intercept_exceptions;
59 u64 intercept; 58 u64 intercept;
60 u8 reserved_1[42]; 59 u8 reserved_1[42];
@@ -81,14 +80,19 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
81 u32 event_inj_err; 80 u32 event_inj_err;
82 u64 nested_cr3; 81 u64 nested_cr3;
83 u64 lbr_ctl; 82 u64 lbr_ctl;
84 u64 reserved_5; 83 u32 clean;
84 u32 reserved_5;
85 u64 next_rip; 85 u64 next_rip;
86 u8 reserved_6[816]; 86 u8 insn_len;
87 u8 insn_bytes[15];
88 u8 reserved_6[800];
87}; 89};
88 90
89 91
90#define TLB_CONTROL_DO_NOTHING 0 92#define TLB_CONTROL_DO_NOTHING 0
91#define TLB_CONTROL_FLUSH_ALL_ASID 1 93#define TLB_CONTROL_FLUSH_ALL_ASID 1
94#define TLB_CONTROL_FLUSH_ASID 3
95#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
92 96
93#define V_TPR_MASK 0x0f 97#define V_TPR_MASK 0x0f
94 98
@@ -204,19 +208,31 @@ struct __attribute__ ((__packed__)) vmcb {
204#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK 208#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
205#define SVM_SELECTOR_CODE_MASK (1 << 3) 209#define SVM_SELECTOR_CODE_MASK (1 << 3)
206 210
207#define INTERCEPT_CR0_MASK 1 211#define INTERCEPT_CR0_READ 0
208#define INTERCEPT_CR3_MASK (1 << 3) 212#define INTERCEPT_CR3_READ 3
209#define INTERCEPT_CR4_MASK (1 << 4) 213#define INTERCEPT_CR4_READ 4
210#define INTERCEPT_CR8_MASK (1 << 8) 214#define INTERCEPT_CR8_READ 8
211 215#define INTERCEPT_CR0_WRITE (16 + 0)
212#define INTERCEPT_DR0_MASK 1 216#define INTERCEPT_CR3_WRITE (16 + 3)
213#define INTERCEPT_DR1_MASK (1 << 1) 217#define INTERCEPT_CR4_WRITE (16 + 4)
214#define INTERCEPT_DR2_MASK (1 << 2) 218#define INTERCEPT_CR8_WRITE (16 + 8)
215#define INTERCEPT_DR3_MASK (1 << 3) 219
216#define INTERCEPT_DR4_MASK (1 << 4) 220#define INTERCEPT_DR0_READ 0
217#define INTERCEPT_DR5_MASK (1 << 5) 221#define INTERCEPT_DR1_READ 1
218#define INTERCEPT_DR6_MASK (1 << 6) 222#define INTERCEPT_DR2_READ 2
219#define INTERCEPT_DR7_MASK (1 << 7) 223#define INTERCEPT_DR3_READ 3
224#define INTERCEPT_DR4_READ 4
225#define INTERCEPT_DR5_READ 5
226#define INTERCEPT_DR6_READ 6
227#define INTERCEPT_DR7_READ 7
228#define INTERCEPT_DR0_WRITE (16 + 0)
229#define INTERCEPT_DR1_WRITE (16 + 1)
230#define INTERCEPT_DR2_WRITE (16 + 2)
231#define INTERCEPT_DR3_WRITE (16 + 3)
232#define INTERCEPT_DR4_WRITE (16 + 4)
233#define INTERCEPT_DR5_WRITE (16 + 5)
234#define INTERCEPT_DR6_WRITE (16 + 6)
235#define INTERCEPT_DR7_WRITE (16 + 7)
220 236
221#define SVM_EVTINJ_VEC_MASK 0xff 237#define SVM_EVTINJ_VEC_MASK 0xff
222 238
@@ -246,6 +262,8 @@ struct __attribute__ ((__packed__)) vmcb {
246#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 262#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
247#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 263#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
248 264
265#define SVM_EXITINFO_REG_MASK 0x0F
266
249#define SVM_EXIT_READ_CR0 0x000 267#define SVM_EXIT_READ_CR0 0x000
250#define SVM_EXIT_READ_CR3 0x003 268#define SVM_EXIT_READ_CR3 0x003
251#define SVM_EXIT_READ_CR4 0x004 269#define SVM_EXIT_READ_CR4 0x004
@@ -316,6 +334,7 @@ struct __attribute__ ((__packed__)) vmcb {
316#define SVM_EXIT_MONITOR 0x08a 334#define SVM_EXIT_MONITOR 0x08a
317#define SVM_EXIT_MWAIT 0x08b 335#define SVM_EXIT_MWAIT 0x08b
318#define SVM_EXIT_MWAIT_COND 0x08c 336#define SVM_EXIT_MWAIT_COND 0x08c
337#define SVM_EXIT_XSETBV 0x08d
319#define SVM_EXIT_NPF 0x400 338#define SVM_EXIT_NPF 0x400
320 339
321#define SVM_EXIT_ERR -1 340#define SVM_EXIT_ERR -1
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index f66cda56781d..0310da67307f 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -30,6 +30,7 @@ asmlinkage void segment_not_present(void);
30asmlinkage void stack_segment(void); 30asmlinkage void stack_segment(void);
31asmlinkage void general_protection(void); 31asmlinkage void general_protection(void);
32asmlinkage void page_fault(void); 32asmlinkage void page_fault(void);
33asmlinkage void async_page_fault(void);
33asmlinkage void spurious_interrupt_bug(void); 34asmlinkage void spurious_interrupt_bug(void);
34asmlinkage void coprocessor_error(void); 35asmlinkage void coprocessor_error(void);
35asmlinkage void alignment_check(void); 36asmlinkage void alignment_check(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9f0cbd987d50..84471b810460 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -66,15 +66,23 @@
66#define PIN_BASED_NMI_EXITING 0x00000008 66#define PIN_BASED_NMI_EXITING 0x00000008
67#define PIN_BASED_VIRTUAL_NMIS 0x00000020 67#define PIN_BASED_VIRTUAL_NMIS 0x00000020
68 68
69#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002
69#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 70#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
71#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
70#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 72#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
71#define VM_EXIT_SAVE_IA32_PAT 0x00040000 73#define VM_EXIT_SAVE_IA32_PAT 0x00040000
72#define VM_EXIT_LOAD_IA32_PAT 0x00080000 74#define VM_EXIT_LOAD_IA32_PAT 0x00080000
75#define VM_EXIT_SAVE_IA32_EFER 0x00100000
76#define VM_EXIT_LOAD_IA32_EFER 0x00200000
77#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
73 78
79#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002
74#define VM_ENTRY_IA32E_MODE 0x00000200 80#define VM_ENTRY_IA32E_MODE 0x00000200
75#define VM_ENTRY_SMM 0x00000400 81#define VM_ENTRY_SMM 0x00000400
76#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 82#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
83#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
77#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 84#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
85#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
78 86
79/* VMCS Encodings */ 87/* VMCS Encodings */
80enum vmcs_field { 88enum vmcs_field {
@@ -239,6 +247,7 @@ enum vmcs_field {
239#define EXIT_REASON_TASK_SWITCH 9 247#define EXIT_REASON_TASK_SWITCH 9
240#define EXIT_REASON_CPUID 10 248#define EXIT_REASON_CPUID 10
241#define EXIT_REASON_HLT 12 249#define EXIT_REASON_HLT 12
250#define EXIT_REASON_INVD 13
242#define EXIT_REASON_INVLPG 14 251#define EXIT_REASON_INVLPG 14
243#define EXIT_REASON_RDPMC 15 252#define EXIT_REASON_RDPMC 15
244#define EXIT_REASON_RDTSC 16 253#define EXIT_REASON_RDTSC 16
@@ -296,6 +305,12 @@ enum vmcs_field {
296#define GUEST_INTR_STATE_SMI 0x00000004 305#define GUEST_INTR_STATE_SMI 0x00000004
297#define GUEST_INTR_STATE_NMI 0x00000008 306#define GUEST_INTR_STATE_NMI 0x00000008
298 307
308/* GUEST_ACTIVITY_STATE flags */
309#define GUEST_ACTIVITY_ACTIVE 0
310#define GUEST_ACTIVITY_HLT 1
311#define GUEST_ACTIVITY_SHUTDOWN 2
312#define GUEST_ACTIVITY_WAIT_SIPI 3
313
299/* 314/*
300 * Exit Qualifications for MOV for Control Register Access 315 * Exit Qualifications for MOV for Control Register Access
301 */ 316 */