aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/kvm_host.h')
-rw-r--r--include/asm-x86/kvm_host.h72
1 files changed, 55 insertions, 17 deletions
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 844f2a89afbc..bc34dc21f178 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -27,6 +27,7 @@
27#define KVM_PRIVATE_MEM_SLOTS 4 27#define KVM_PRIVATE_MEM_SLOTS 4
28 28
29#define KVM_PIO_PAGE_OFFSET 1 29#define KVM_PIO_PAGE_OFFSET 1
30#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
30 31
31#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 32#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
32#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 33#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
@@ -79,6 +80,7 @@
79#define KVM_MIN_FREE_MMU_PAGES 5 80#define KVM_MIN_FREE_MMU_PAGES 5
80#define KVM_REFILL_PAGES 25 81#define KVM_REFILL_PAGES 25
81#define KVM_MAX_CPUID_ENTRIES 40 82#define KVM_MAX_CPUID_ENTRIES 40
83#define KVM_NR_VAR_MTRR 8
82 84
83extern spinlock_t kvm_lock; 85extern spinlock_t kvm_lock;
84extern struct list_head vm_list; 86extern struct list_head vm_list;
@@ -109,12 +111,12 @@ enum {
109}; 111};
110 112
111enum { 113enum {
114 VCPU_SREG_ES,
112 VCPU_SREG_CS, 115 VCPU_SREG_CS,
116 VCPU_SREG_SS,
113 VCPU_SREG_DS, 117 VCPU_SREG_DS,
114 VCPU_SREG_ES,
115 VCPU_SREG_FS, 118 VCPU_SREG_FS,
116 VCPU_SREG_GS, 119 VCPU_SREG_GS,
117 VCPU_SREG_SS,
118 VCPU_SREG_TR, 120 VCPU_SREG_TR,
119 VCPU_SREG_LDTR, 121 VCPU_SREG_LDTR,
120}; 122};
@@ -243,6 +245,7 @@ struct kvm_vcpu_arch {
243 gfn_t last_pt_write_gfn; 245 gfn_t last_pt_write_gfn;
244 int last_pt_write_count; 246 int last_pt_write_count;
245 u64 *last_pte_updated; 247 u64 *last_pte_updated;
248 gfn_t last_pte_gfn;
246 249
247 struct { 250 struct {
248 gfn_t gfn; /* presumed gfn during guest pte update */ 251 gfn_t gfn; /* presumed gfn during guest pte update */
@@ -287,6 +290,10 @@ struct kvm_vcpu_arch {
287 unsigned int hv_clock_tsc_khz; 290 unsigned int hv_clock_tsc_khz;
288 unsigned int time_offset; 291 unsigned int time_offset;
289 struct page *time_page; 292 struct page *time_page;
293
294 bool nmi_pending;
295
296 u64 mtrr[0x100];
290}; 297};
291 298
292struct kvm_mem_alias { 299struct kvm_mem_alias {
@@ -344,6 +351,7 @@ struct kvm_vcpu_stat {
344 u32 mmio_exits; 351 u32 mmio_exits;
345 u32 signal_exits; 352 u32 signal_exits;
346 u32 irq_window_exits; 353 u32 irq_window_exits;
354 u32 nmi_window_exits;
347 u32 halt_exits; 355 u32 halt_exits;
348 u32 halt_wakeup; 356 u32 halt_wakeup;
349 u32 request_irq_exits; 357 u32 request_irq_exits;
@@ -379,7 +387,6 @@ struct kvm_x86_ops {
379 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); 387 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
380 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 388 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
381 void (*vcpu_put)(struct kvm_vcpu *vcpu); 389 void (*vcpu_put)(struct kvm_vcpu *vcpu);
382 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
383 390
384 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 391 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
385 struct kvm_debug_guest *dbg); 392 struct kvm_debug_guest *dbg);
@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
497int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 504int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
498 unsigned long value); 505 unsigned long value);
499 506
507void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
508int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
509 int type_bits, int seg);
510
500int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 511int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
501 512
502void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 513void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
515void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 526void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
516 u32 error_code); 527 u32 error_code);
517 528
529void kvm_inject_nmi(struct kvm_vcpu *vcpu);
530
518void fx_init(struct kvm_vcpu *vcpu); 531void fx_init(struct kvm_vcpu *vcpu);
519 532
520int emulator_read_std(unsigned long addr, 533int emulator_read_std(unsigned long addr,
@@ -543,6 +556,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
543int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 556int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
544 557
545void kvm_enable_tdp(void); 558void kvm_enable_tdp(void);
559void kvm_disable_tdp(void);
546 560
547int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 561int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
548int complete_pio(struct kvm_vcpu *vcpu); 562int complete_pio(struct kvm_vcpu *vcpu);
@@ -554,55 +568,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
554 return (struct kvm_mmu_page *)page_private(page); 568 return (struct kvm_mmu_page *)page_private(page);
555} 569}
556 570
557static inline u16 read_fs(void) 571static inline u16 kvm_read_fs(void)
558{ 572{
559 u16 seg; 573 u16 seg;
560 asm("mov %%fs, %0" : "=g"(seg)); 574 asm("mov %%fs, %0" : "=g"(seg));
561 return seg; 575 return seg;
562} 576}
563 577
564static inline u16 read_gs(void) 578static inline u16 kvm_read_gs(void)
565{ 579{
566 u16 seg; 580 u16 seg;
567 asm("mov %%gs, %0" : "=g"(seg)); 581 asm("mov %%gs, %0" : "=g"(seg));
568 return seg; 582 return seg;
569} 583}
570 584
571static inline u16 read_ldt(void) 585static inline u16 kvm_read_ldt(void)
572{ 586{
573 u16 ldt; 587 u16 ldt;
574 asm("sldt %0" : "=g"(ldt)); 588 asm("sldt %0" : "=g"(ldt));
575 return ldt; 589 return ldt;
576} 590}
577 591
578static inline void load_fs(u16 sel) 592static inline void kvm_load_fs(u16 sel)
579{ 593{
580 asm("mov %0, %%fs" : : "rm"(sel)); 594 asm("mov %0, %%fs" : : "rm"(sel));
581} 595}
582 596
583static inline void load_gs(u16 sel) 597static inline void kvm_load_gs(u16 sel)
584{ 598{
585 asm("mov %0, %%gs" : : "rm"(sel)); 599 asm("mov %0, %%gs" : : "rm"(sel));
586} 600}
587 601
588#ifndef load_ldt 602static inline void kvm_load_ldt(u16 sel)
589static inline void load_ldt(u16 sel)
590{ 603{
591 asm("lldt %0" : : "rm"(sel)); 604 asm("lldt %0" : : "rm"(sel));
592} 605}
593#endif
594 606
595static inline void get_idt(struct descriptor_table *table) 607static inline void kvm_get_idt(struct descriptor_table *table)
596{ 608{
597 asm("sidt %0" : "=m"(*table)); 609 asm("sidt %0" : "=m"(*table));
598} 610}
599 611
600static inline void get_gdt(struct descriptor_table *table) 612static inline void kvm_get_gdt(struct descriptor_table *table)
601{ 613{
602 asm("sgdt %0" : "=m"(*table)); 614 asm("sgdt %0" : "=m"(*table));
603} 615}
604 616
605static inline unsigned long read_tr_base(void) 617static inline unsigned long kvm_read_tr_base(void)
606{ 618{
607 u16 tr; 619 u16 tr;
608 asm("str %0" : "=g"(tr)); 620 asm("str %0" : "=g"(tr));
@@ -619,17 +631,17 @@ static inline unsigned long read_msr(unsigned long msr)
619} 631}
620#endif 632#endif
621 633
622static inline void fx_save(struct i387_fxsave_struct *image) 634static inline void kvm_fx_save(struct i387_fxsave_struct *image)
623{ 635{
624 asm("fxsave (%0)":: "r" (image)); 636 asm("fxsave (%0)":: "r" (image));
625} 637}
626 638
627static inline void fx_restore(struct i387_fxsave_struct *image) 639static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
628{ 640{
629 asm("fxrstor (%0)":: "r" (image)); 641 asm("fxrstor (%0)":: "r" (image));
630} 642}
631 643
632static inline void fx_finit(void) 644static inline void kvm_fx_finit(void)
633{ 645{
634 asm("finit"); 646 asm("finit");
635} 647}
@@ -691,4 +703,30 @@ enum {
691 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 703 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
692 vcpu, 0, 0, 0, 0, 0, 0) 704 vcpu, 0, 0, 0, 0, 0, 0)
693 705
706#ifdef CONFIG_64BIT
707# define KVM_EX_ENTRY ".quad"
708# define KVM_EX_PUSH "pushq"
709#else
710# define KVM_EX_ENTRY ".long"
711# define KVM_EX_PUSH "pushl"
712#endif
713
714/*
715 * Hardware virtualization extension instructions may fault if a
716 * reboot turns off virtualization while processes are running.
717 * Trap the fault and ignore the instruction if that happens.
718 */
719asmlinkage void kvm_handle_fault_on_reboot(void);
720
721#define __kvm_handle_fault_on_reboot(insn) \
722 "666: " insn "\n\t" \
723 ".pushsection .text.fixup, \"ax\" \n" \
724 "667: \n\t" \
725 KVM_EX_PUSH " $666b \n\t" \
726 "jmp kvm_handle_fault_on_reboot \n\t" \
727 ".popsection \n\t" \
728 ".pushsection __ex_table, \"a\" \n\t" \
729 KVM_EX_ENTRY " 666b, 667b \n\t" \
730 ".popsection"
731
694#endif 732#endif