aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h210
1 files changed, 172 insertions, 38 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c52e2eb40a1e..d2ac8e2ee897 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -30,14 +30,30 @@
30#define KVM_MEMORY_SLOTS 32 30#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */ 31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4 32#define KVM_PRIVATE_MEM_SLOTS 4
33#define KVM_MMIO_SIZE 16
33 34
34#define KVM_PIO_PAGE_OFFSET 1 35#define KVM_PIO_PAGE_OFFSET 1
35#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 36#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
36 37
38#define CR0_RESERVED_BITS \
39 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
40 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
41 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
42
37#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 43#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
38#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 44#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
39#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 45#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
40 0xFFFFFF0000000000ULL) 46 0xFFFFFF0000000000ULL)
47#define CR4_RESERVED_BITS \
48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
51 | X86_CR4_OSXSAVE \
52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
53
54#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
55
56
41 57
42#define INVALID_PAGE (~(hpa_t)0) 58#define INVALID_PAGE (~(hpa_t)0)
43#define VALID_PAGE(x) ((x) != INVALID_PAGE) 59#define VALID_PAGE(x) ((x) != INVALID_PAGE)
@@ -79,15 +95,18 @@
79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 95#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
80#define KVM_MIN_FREE_MMU_PAGES 5 96#define KVM_MIN_FREE_MMU_PAGES 5
81#define KVM_REFILL_PAGES 25 97#define KVM_REFILL_PAGES 25
82#define KVM_MAX_CPUID_ENTRIES 40 98#define KVM_MAX_CPUID_ENTRIES 80
83#define KVM_NR_FIXED_MTRR_REGION 88 99#define KVM_NR_FIXED_MTRR_REGION 88
84#define KVM_NR_VAR_MTRR 8 100#define KVM_NR_VAR_MTRR 8
85 101
86extern spinlock_t kvm_lock; 102#define ASYNC_PF_PER_VCPU 64
103
104extern raw_spinlock_t kvm_lock;
87extern struct list_head vm_list; 105extern struct list_head vm_list;
88 106
89struct kvm_vcpu; 107struct kvm_vcpu;
90struct kvm; 108struct kvm;
109struct kvm_async_pf;
91 110
92enum kvm_reg { 111enum kvm_reg {
93 VCPU_REGS_RAX = 0, 112 VCPU_REGS_RAX = 0,
@@ -114,6 +133,10 @@ enum kvm_reg {
114 133
115enum kvm_reg_ex { 134enum kvm_reg_ex {
116 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 135 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
136 VCPU_EXREG_CR3,
137 VCPU_EXREG_RFLAGS,
138 VCPU_EXREG_CPL,
139 VCPU_EXREG_SEGMENTS,
117}; 140};
118 141
119enum { 142enum {
@@ -236,26 +259,39 @@ struct kvm_pio_request {
236 */ 259 */
237struct kvm_mmu { 260struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 261 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 262 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
263 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
264 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
265 bool prefault);
266 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
267 struct x86_exception *fault);
240 void (*free)(struct kvm_vcpu *vcpu); 268 void (*free)(struct kvm_vcpu *vcpu);
241 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 269 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
242 u32 *error); 270 struct x86_exception *exception);
271 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
243 void (*prefetch_page)(struct kvm_vcpu *vcpu, 272 void (*prefetch_page)(struct kvm_vcpu *vcpu,
244 struct kvm_mmu_page *page); 273 struct kvm_mmu_page *page);
245 int (*sync_page)(struct kvm_vcpu *vcpu, 274 int (*sync_page)(struct kvm_vcpu *vcpu,
246 struct kvm_mmu_page *sp, bool clear_unsync); 275 struct kvm_mmu_page *sp);
247 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 276 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
277 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
278 u64 *spte, const void *pte);
248 hpa_t root_hpa; 279 hpa_t root_hpa;
249 int root_level; 280 int root_level;
250 int shadow_root_level; 281 int shadow_root_level;
251 union kvm_mmu_page_role base_role; 282 union kvm_mmu_page_role base_role;
283 bool direct_map;
252 284
253 u64 *pae_root; 285 u64 *pae_root;
286 u64 *lm_root;
254 u64 rsvd_bits_mask[2][4]; 287 u64 rsvd_bits_mask[2][4];
288
289 bool nx;
290
291 u64 pdptrs[4]; /* pae */
255}; 292};
256 293
257struct kvm_vcpu_arch { 294struct kvm_vcpu_arch {
258 u64 host_tsc;
259 /* 295 /*
260 * rip and regs accesses must go through 296 * rip and regs accesses must go through
261 * kvm_{register,rip}_{read,write} functions. 297 * kvm_{register,rip}_{read,write} functions.
@@ -272,7 +308,6 @@ struct kvm_vcpu_arch {
272 unsigned long cr4_guest_owned_bits; 308 unsigned long cr4_guest_owned_bits;
273 unsigned long cr8; 309 unsigned long cr8;
274 u32 hflags; 310 u32 hflags;
275 u64 pdptrs[4]; /* pae */
276 u64 efer; 311 u64 efer;
277 u64 apic_base; 312 u64 apic_base;
278 struct kvm_lapic *apic; /* kernel irqchip context */ 313 struct kvm_lapic *apic; /* kernel irqchip context */
@@ -282,7 +317,31 @@ struct kvm_vcpu_arch {
282 u64 ia32_misc_enable_msr; 317 u64 ia32_misc_enable_msr;
283 bool tpr_access_reporting; 318 bool tpr_access_reporting;
284 319
320 /*
321 * Paging state of the vcpu
322 *
323 * If the vcpu runs in guest mode with two level paging this still saves
324 * the paging mode of the l1 guest. This context is always used to
325 * handle faults.
326 */
285 struct kvm_mmu mmu; 327 struct kvm_mmu mmu;
328
329 /*
330 * Paging state of an L2 guest (used for nested npt)
331 *
332 * This context will save all necessary information to walk page tables
333 * of the an L2 guest. This context is only initialized for page table
334 * walking and not for faulting since we never handle l2 page faults on
335 * the host.
336 */
337 struct kvm_mmu nested_mmu;
338
339 /*
340 * Pointer to the mmu context currently used for
341 * gva_to_gpa translations.
342 */
343 struct kvm_mmu *walk_mmu;
344
286 /* only needed in kvm_pv_mmu_op() path, but it's hot so 345 /* only needed in kvm_pv_mmu_op() path, but it's hot so
287 * put it here to avoid allocation */ 346 * put it here to avoid allocation */
288 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 347 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -297,16 +356,9 @@ struct kvm_vcpu_arch {
297 u64 *last_pte_updated; 356 u64 *last_pte_updated;
298 gfn_t last_pte_gfn; 357 gfn_t last_pte_gfn;
299 358
300 struct {
301 gfn_t gfn; /* presumed gfn during guest pte update */
302 pfn_t pfn; /* pfn corresponding to that gfn */
303 unsigned long mmu_seq;
304 } update_pte;
305
306 struct fpu guest_fpu; 359 struct fpu guest_fpu;
307 u64 xcr0; 360 u64 xcr0;
308 361
309 gva_t mmio_fault_cr2;
310 struct kvm_pio_request pio; 362 struct kvm_pio_request pio;
311 void *pio_data; 363 void *pio_data;
312 364
@@ -333,12 +385,22 @@ struct kvm_vcpu_arch {
333 /* emulate context */ 385 /* emulate context */
334 386
335 struct x86_emulate_ctxt emulate_ctxt; 387 struct x86_emulate_ctxt emulate_ctxt;
388 bool emulate_regs_need_sync_to_vcpu;
389 bool emulate_regs_need_sync_from_vcpu;
336 390
337 gpa_t time; 391 gpa_t time;
338 struct pvclock_vcpu_time_info hv_clock; 392 struct pvclock_vcpu_time_info hv_clock;
339 unsigned int hv_clock_tsc_khz; 393 unsigned int hw_tsc_khz;
340 unsigned int time_offset; 394 unsigned int time_offset;
341 struct page *time_page; 395 struct page *time_page;
396 u64 last_guest_tsc;
397 u64 last_kernel_ns;
398 u64 last_tsc_nsec;
399 u64 last_tsc_write;
400 u32 virtual_tsc_khz;
401 bool tsc_catchup;
402 u32 tsc_catchup_mult;
403 s8 tsc_catchup_shift;
342 404
343 bool nmi_pending; 405 bool nmi_pending;
344 bool nmi_injected; 406 bool nmi_injected;
@@ -364,12 +426,21 @@ struct kvm_vcpu_arch {
364 u64 hv_vapic; 426 u64 hv_vapic;
365 427
366 cpumask_var_t wbinvd_dirty_mask; 428 cpumask_var_t wbinvd_dirty_mask;
429
430 struct {
431 bool halted;
432 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
433 struct gfn_to_hva_cache data;
434 u64 msr_val;
435 u32 id;
436 bool send_user_only;
437 } apf;
367}; 438};
368 439
369struct kvm_arch { 440struct kvm_arch {
370 unsigned int n_free_mmu_pages; 441 unsigned int n_used_mmu_pages;
371 unsigned int n_requested_mmu_pages; 442 unsigned int n_requested_mmu_pages;
372 unsigned int n_alloc_mmu_pages; 443 unsigned int n_max_mmu_pages;
373 atomic_t invlpg_counter; 444 atomic_t invlpg_counter;
374 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 445 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
375 /* 446 /*
@@ -394,14 +465,21 @@ struct kvm_arch {
394 gpa_t ept_identity_map_addr; 465 gpa_t ept_identity_map_addr;
395 466
396 unsigned long irq_sources_bitmap; 467 unsigned long irq_sources_bitmap;
397 u64 vm_init_tsc;
398 s64 kvmclock_offset; 468 s64 kvmclock_offset;
469 raw_spinlock_t tsc_write_lock;
470 u64 last_tsc_nsec;
471 u64 last_tsc_offset;
472 u64 last_tsc_write;
399 473
400 struct kvm_xen_hvm_config xen_hvm_config; 474 struct kvm_xen_hvm_config xen_hvm_config;
401 475
402 /* fields used by HYPER-V emulation */ 476 /* fields used by HYPER-V emulation */
403 u64 hv_guest_os_id; 477 u64 hv_guest_os_id;
404 u64 hv_hypercall; 478 u64 hv_hypercall;
479
480 #ifdef CONFIG_KVM_MMU_AUDIT
481 int audit_point;
482 #endif
405}; 483};
406 484
407struct kvm_vm_stat { 485struct kvm_vm_stat {
@@ -443,6 +521,8 @@ struct kvm_vcpu_stat {
443 u32 nmi_injections; 521 u32 nmi_injections;
444}; 522};
445 523
524struct x86_instruction_info;
525
446struct kvm_x86_ops { 526struct kvm_x86_ops {
447 int (*cpu_has_kvm_support)(void); /* __init */ 527 int (*cpu_has_kvm_support)(void); /* __init */
448 int (*disabled_by_bios)(void); /* __init */ 528 int (*disabled_by_bios)(void); /* __init */
@@ -475,6 +555,7 @@ struct kvm_x86_ops {
475 struct kvm_segment *var, int seg); 555 struct kvm_segment *var, int seg);
476 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 556 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
477 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 557 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
558 void (*decache_cr3)(struct kvm_vcpu *vcpu);
478 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 559 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
479 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 560 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
480 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 561 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -505,6 +586,7 @@ struct kvm_x86_ops {
505 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 586 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
506 bool has_error_code, u32 error_code, 587 bool has_error_code, u32 error_code,
507 bool reinject); 588 bool reinject);
589 void (*cancel_injection)(struct kvm_vcpu *vcpu);
508 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 590 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
509 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 591 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
510 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 592 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -517,14 +599,35 @@ struct kvm_x86_ops {
517 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 599 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
518 int (*get_lpage_level)(void); 600 int (*get_lpage_level)(void);
519 bool (*rdtscp_supported)(void); 601 bool (*rdtscp_supported)(void);
602 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
603
604 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
520 605
521 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 606 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
522 607
523 bool (*has_wbinvd_exit)(void); 608 bool (*has_wbinvd_exit)(void);
524 609
610 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
611 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
612
613 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
614
615 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
616
617 int (*check_intercept)(struct kvm_vcpu *vcpu,
618 struct x86_instruction_info *info,
619 enum x86_intercept_stage stage);
620
525 const struct trace_print_flags *exit_reasons_str; 621 const struct trace_print_flags *exit_reasons_str;
526}; 622};
527 623
624struct kvm_arch_async_pf {
625 u32 token;
626 gfn_t gfn;
627 unsigned long cr3;
628 bool direct_map;
629};
630
528extern struct kvm_x86_ops *kvm_x86_ops; 631extern struct kvm_x86_ops *kvm_x86_ops;
529 632
530int kvm_mmu_module_init(void); 633int kvm_mmu_module_init(void);
@@ -534,7 +637,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
534int kvm_mmu_create(struct kvm_vcpu *vcpu); 637int kvm_mmu_create(struct kvm_vcpu *vcpu);
535int kvm_mmu_setup(struct kvm_vcpu *vcpu); 638int kvm_mmu_setup(struct kvm_vcpu *vcpu);
536void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 639void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
537void kvm_mmu_set_base_ptes(u64 base_pte);
538void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 640void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
539 u64 dirty_mask, u64 nx_mask, u64 x_mask); 641 u64 dirty_mask, u64 nx_mask, u64 x_mask);
540 642
@@ -544,7 +646,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
544unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 646unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
545void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 647void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
546 648
547int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 649int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
548 650
549int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 651int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
550 const void *val, int bytes); 652 const void *val, int bytes);
@@ -554,6 +656,13 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
554 656
555extern bool tdp_enabled; 657extern bool tdp_enabled;
556 658
659/* control of guest tsc rate supported? */
660extern bool kvm_has_tsc_control;
661/* minimum supported tsc_khz for guests */
662extern u32 kvm_min_guest_tsc_khz;
663/* maximum supported tsc_khz for guests */
664extern u32 kvm_max_guest_tsc_khz;
665
557enum emulation_result { 666enum emulation_result {
558 EMULATE_DONE, /* no further processing */ 667 EMULATE_DONE, /* no further processing */
559 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 668 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -563,10 +672,14 @@ enum emulation_result {
563#define EMULTYPE_NO_DECODE (1 << 0) 672#define EMULTYPE_NO_DECODE (1 << 0)
564#define EMULTYPE_TRAP_UD (1 << 1) 673#define EMULTYPE_TRAP_UD (1 << 1)
565#define EMULTYPE_SKIP (1 << 2) 674#define EMULTYPE_SKIP (1 << 2)
566int emulate_instruction(struct kvm_vcpu *vcpu, 675int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
567 unsigned long cr2, u16 error_code, int emulation_type); 676 int emulation_type, void *insn, int insn_len);
568void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 677
569void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 678static inline int emulate_instruction(struct kvm_vcpu *vcpu,
679 int emulation_type)
680{
681 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
682}
570 683
571void kvm_enable_efer_bits(u64); 684void kvm_enable_efer_bits(u64);
572int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 685int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
@@ -577,8 +690,6 @@ struct x86_emulate_ctxt;
577int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); 690int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
578void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 691void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
579int kvm_emulate_halt(struct kvm_vcpu *vcpu); 692int kvm_emulate_halt(struct kvm_vcpu *vcpu);
580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
581int emulate_clts(struct kvm_vcpu *vcpu);
582int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); 693int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
583 694
584void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 695void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -590,7 +701,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
590int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 701int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
591int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 702int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
592int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 703int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
593void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 704int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
594int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 705int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
595int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 706int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
596unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 707unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -608,8 +719,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
608void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 719void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
609void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 720void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
610void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 721void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
611void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 722void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
612 u32 error_code); 723int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
724 gfn_t gfn, void *data, int offset, int len,
725 u32 access);
726void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
613bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 727bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
614 728
615int kvm_pic_set_irq(void *opaque, int irq, int level); 729int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -627,16 +741,19 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
627int kvm_mmu_load(struct kvm_vcpu *vcpu); 741int kvm_mmu_load(struct kvm_vcpu *vcpu);
628void kvm_mmu_unload(struct kvm_vcpu *vcpu); 742void kvm_mmu_unload(struct kvm_vcpu *vcpu);
629void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 743void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
630gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 744gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
631gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 745 struct x86_exception *exception);
632gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 746gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
633gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 747 struct x86_exception *exception);
748gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
749 struct x86_exception *exception);
750gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
751 struct x86_exception *exception);
634 752
635int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 753int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
636 754
637int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 755int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
638 756 void *insn, int insn_len);
639int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
640void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 757void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
641 758
642void kvm_enable_tdp(void); 759void kvm_enable_tdp(void);
@@ -703,20 +820,25 @@ enum {
703#define HF_VINTR_MASK (1 << 2) 820#define HF_VINTR_MASK (1 << 2)
704#define HF_NMI_MASK (1 << 3) 821#define HF_NMI_MASK (1 << 3)
705#define HF_IRET_MASK (1 << 4) 822#define HF_IRET_MASK (1 << 4)
823#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
706 824
707/* 825/*
708 * Hardware virtualization extension instructions may fault if a 826 * Hardware virtualization extension instructions may fault if a
709 * reboot turns off virtualization while processes are running. 827 * reboot turns off virtualization while processes are running.
710 * Trap the fault and ignore the instruction if that happens. 828 * Trap the fault and ignore the instruction if that happens.
711 */ 829 */
712asmlinkage void kvm_handle_fault_on_reboot(void); 830asmlinkage void kvm_spurious_fault(void);
831extern bool kvm_rebooting;
713 832
714#define __kvm_handle_fault_on_reboot(insn) \ 833#define __kvm_handle_fault_on_reboot(insn) \
715 "666: " insn "\n\t" \ 834 "666: " insn "\n\t" \
835 "668: \n\t" \
716 ".pushsection .fixup, \"ax\" \n" \ 836 ".pushsection .fixup, \"ax\" \n" \
717 "667: \n\t" \ 837 "667: \n\t" \
838 "cmpb $0, kvm_rebooting \n\t" \
839 "jne 668b \n\t" \
718 __ASM_SIZE(push) " $666b \n\t" \ 840 __ASM_SIZE(push) " $666b \n\t" \
719 "jmp kvm_handle_fault_on_reboot \n\t" \ 841 "call kvm_spurious_fault \n\t" \
720 ".popsection \n\t" \ 842 ".popsection \n\t" \
721 ".pushsection __ex_table, \"a\" \n\t" \ 843 ".pushsection __ex_table, \"a\" \n\t" \
722 _ASM_PTR " 666b, 667b \n\t" \ 844 _ASM_PTR " 666b, 667b \n\t" \
@@ -725,6 +847,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
725#define KVM_ARCH_WANT_MMU_NOTIFIER 847#define KVM_ARCH_WANT_MMU_NOTIFIER
726int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 848int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
727int kvm_age_hva(struct kvm *kvm, unsigned long hva); 849int kvm_age_hva(struct kvm *kvm, unsigned long hva);
850int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
728void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 851void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
729int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 852int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
730int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 853int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
@@ -736,4 +859,15 @@ void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
736 859
737bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 860bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
738 861
862void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
863 struct kvm_async_pf *work);
864void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
865 struct kvm_async_pf *work);
866void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
867 struct kvm_async_pf *work);
868bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
869extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
870
871void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
872
739#endif /* _ASM_X86_KVM_HOST_H */ 873#endif /* _ASM_X86_KVM_HOST_H */