diff options
Diffstat (limited to 'include/asm-x86/kvm_host.h')
-rw-r--r-- | include/asm-x86/kvm_host.h | 71 |
1 files changed, 54 insertions, 17 deletions
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 844f2a89afbc..fdde0bedaa90 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #define KVM_PRIVATE_MEM_SLOTS 4 | 27 | #define KVM_PRIVATE_MEM_SLOTS 4 |
28 | 28 | ||
29 | #define KVM_PIO_PAGE_OFFSET 1 | 29 | #define KVM_PIO_PAGE_OFFSET 1 |
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | ||
30 | 31 | ||
31 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | 32 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
32 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 33 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
@@ -79,6 +80,7 @@ | |||
79 | #define KVM_MIN_FREE_MMU_PAGES 5 | 80 | #define KVM_MIN_FREE_MMU_PAGES 5 |
80 | #define KVM_REFILL_PAGES 25 | 81 | #define KVM_REFILL_PAGES 25 |
81 | #define KVM_MAX_CPUID_ENTRIES 40 | 82 | #define KVM_MAX_CPUID_ENTRIES 40 |
83 | #define KVM_NR_VAR_MTRR 8 | ||
82 | 84 | ||
83 | extern spinlock_t kvm_lock; | 85 | extern spinlock_t kvm_lock; |
84 | extern struct list_head vm_list; | 86 | extern struct list_head vm_list; |
@@ -109,12 +111,12 @@ enum { | |||
109 | }; | 111 | }; |
110 | 112 | ||
111 | enum { | 113 | enum { |
114 | VCPU_SREG_ES, | ||
112 | VCPU_SREG_CS, | 115 | VCPU_SREG_CS, |
116 | VCPU_SREG_SS, | ||
113 | VCPU_SREG_DS, | 117 | VCPU_SREG_DS, |
114 | VCPU_SREG_ES, | ||
115 | VCPU_SREG_FS, | 118 | VCPU_SREG_FS, |
116 | VCPU_SREG_GS, | 119 | VCPU_SREG_GS, |
117 | VCPU_SREG_SS, | ||
118 | VCPU_SREG_TR, | 120 | VCPU_SREG_TR, |
119 | VCPU_SREG_LDTR, | 121 | VCPU_SREG_LDTR, |
120 | }; | 122 | }; |
@@ -243,6 +245,7 @@ struct kvm_vcpu_arch { | |||
243 | gfn_t last_pt_write_gfn; | 245 | gfn_t last_pt_write_gfn; |
244 | int last_pt_write_count; | 246 | int last_pt_write_count; |
245 | u64 *last_pte_updated; | 247 | u64 *last_pte_updated; |
248 | gfn_t last_pte_gfn; | ||
246 | 249 | ||
247 | struct { | 250 | struct { |
248 | gfn_t gfn; /* presumed gfn during guest pte update */ | 251 | gfn_t gfn; /* presumed gfn during guest pte update */ |
@@ -287,6 +290,10 @@ struct kvm_vcpu_arch { | |||
287 | unsigned int hv_clock_tsc_khz; | 290 | unsigned int hv_clock_tsc_khz; |
288 | unsigned int time_offset; | 291 | unsigned int time_offset; |
289 | struct page *time_page; | 292 | struct page *time_page; |
293 | |||
294 | bool nmi_pending; | ||
295 | |||
296 | u64 mtrr[0x100]; | ||
290 | }; | 297 | }; |
291 | 298 | ||
292 | struct kvm_mem_alias { | 299 | struct kvm_mem_alias { |
@@ -344,6 +351,7 @@ struct kvm_vcpu_stat { | |||
344 | u32 mmio_exits; | 351 | u32 mmio_exits; |
345 | u32 signal_exits; | 352 | u32 signal_exits; |
346 | u32 irq_window_exits; | 353 | u32 irq_window_exits; |
354 | u32 nmi_window_exits; | ||
347 | u32 halt_exits; | 355 | u32 halt_exits; |
348 | u32 halt_wakeup; | 356 | u32 halt_wakeup; |
349 | u32 request_irq_exits; | 357 | u32 request_irq_exits; |
@@ -379,7 +387,6 @@ struct kvm_x86_ops { | |||
379 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | 387 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); |
380 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | 388 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
381 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 389 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
382 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); | ||
383 | 390 | ||
384 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 391 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, |
385 | struct kvm_debug_guest *dbg); | 392 | struct kvm_debug_guest *dbg); |
@@ -497,6 +504,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | |||
497 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | 504 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
498 | unsigned long value); | 505 | unsigned long value); |
499 | 506 | ||
507 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | ||
508 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | ||
509 | int type_bits, int seg); | ||
510 | |||
500 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); | 511 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); |
501 | 512 | ||
502 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 513 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
@@ -515,6 +526,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |||
515 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 526 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
516 | u32 error_code); | 527 | u32 error_code); |
517 | 528 | ||
529 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | ||
530 | |||
518 | void fx_init(struct kvm_vcpu *vcpu); | 531 | void fx_init(struct kvm_vcpu *vcpu); |
519 | 532 | ||
520 | int emulator_read_std(unsigned long addr, | 533 | int emulator_read_std(unsigned long addr, |
@@ -554,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |||
554 | return (struct kvm_mmu_page *)page_private(page); | 567 | return (struct kvm_mmu_page *)page_private(page); |
555 | } | 568 | } |
556 | 569 | ||
557 | static inline u16 read_fs(void) | 570 | static inline u16 kvm_read_fs(void) |
558 | { | 571 | { |
559 | u16 seg; | 572 | u16 seg; |
560 | asm("mov %%fs, %0" : "=g"(seg)); | 573 | asm("mov %%fs, %0" : "=g"(seg)); |
561 | return seg; | 574 | return seg; |
562 | } | 575 | } |
563 | 576 | ||
564 | static inline u16 read_gs(void) | 577 | static inline u16 kvm_read_gs(void) |
565 | { | 578 | { |
566 | u16 seg; | 579 | u16 seg; |
567 | asm("mov %%gs, %0" : "=g"(seg)); | 580 | asm("mov %%gs, %0" : "=g"(seg)); |
568 | return seg; | 581 | return seg; |
569 | } | 582 | } |
570 | 583 | ||
571 | static inline u16 read_ldt(void) | 584 | static inline u16 kvm_read_ldt(void) |
572 | { | 585 | { |
573 | u16 ldt; | 586 | u16 ldt; |
574 | asm("sldt %0" : "=g"(ldt)); | 587 | asm("sldt %0" : "=g"(ldt)); |
575 | return ldt; | 588 | return ldt; |
576 | } | 589 | } |
577 | 590 | ||
578 | static inline void load_fs(u16 sel) | 591 | static inline void kvm_load_fs(u16 sel) |
579 | { | 592 | { |
580 | asm("mov %0, %%fs" : : "rm"(sel)); | 593 | asm("mov %0, %%fs" : : "rm"(sel)); |
581 | } | 594 | } |
582 | 595 | ||
583 | static inline void load_gs(u16 sel) | 596 | static inline void kvm_load_gs(u16 sel) |
584 | { | 597 | { |
585 | asm("mov %0, %%gs" : : "rm"(sel)); | 598 | asm("mov %0, %%gs" : : "rm"(sel)); |
586 | } | 599 | } |
587 | 600 | ||
588 | #ifndef load_ldt | 601 | static inline void kvm_load_ldt(u16 sel) |
589 | static inline void load_ldt(u16 sel) | ||
590 | { | 602 | { |
591 | asm("lldt %0" : : "rm"(sel)); | 603 | asm("lldt %0" : : "rm"(sel)); |
592 | } | 604 | } |
593 | #endif | ||
594 | 605 | ||
595 | static inline void get_idt(struct descriptor_table *table) | 606 | static inline void kvm_get_idt(struct descriptor_table *table) |
596 | { | 607 | { |
597 | asm("sidt %0" : "=m"(*table)); | 608 | asm("sidt %0" : "=m"(*table)); |
598 | } | 609 | } |
599 | 610 | ||
600 | static inline void get_gdt(struct descriptor_table *table) | 611 | static inline void kvm_get_gdt(struct descriptor_table *table) |
601 | { | 612 | { |
602 | asm("sgdt %0" : "=m"(*table)); | 613 | asm("sgdt %0" : "=m"(*table)); |
603 | } | 614 | } |
604 | 615 | ||
605 | static inline unsigned long read_tr_base(void) | 616 | static inline unsigned long kvm_read_tr_base(void) |
606 | { | 617 | { |
607 | u16 tr; | 618 | u16 tr; |
608 | asm("str %0" : "=g"(tr)); | 619 | asm("str %0" : "=g"(tr)); |
@@ -619,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr) | |||
619 | } | 630 | } |
620 | #endif | 631 | #endif |
621 | 632 | ||
622 | static inline void fx_save(struct i387_fxsave_struct *image) | 633 | static inline void kvm_fx_save(struct i387_fxsave_struct *image) |
623 | { | 634 | { |
624 | asm("fxsave (%0)":: "r" (image)); | 635 | asm("fxsave (%0)":: "r" (image)); |
625 | } | 636 | } |
626 | 637 | ||
627 | static inline void fx_restore(struct i387_fxsave_struct *image) | 638 | static inline void kvm_fx_restore(struct i387_fxsave_struct *image) |
628 | { | 639 | { |
629 | asm("fxrstor (%0)":: "r" (image)); | 640 | asm("fxrstor (%0)":: "r" (image)); |
630 | } | 641 | } |
631 | 642 | ||
632 | static inline void fx_finit(void) | 643 | static inline void kvm_fx_finit(void) |
633 | { | 644 | { |
634 | asm("finit"); | 645 | asm("finit"); |
635 | } | 646 | } |
@@ -691,4 +702,30 @@ enum { | |||
691 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | 702 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ |
692 | vcpu, 0, 0, 0, 0, 0, 0) | 703 | vcpu, 0, 0, 0, 0, 0, 0) |
693 | 704 | ||
705 | #ifdef CONFIG_64BIT | ||
706 | # define KVM_EX_ENTRY ".quad" | ||
707 | # define KVM_EX_PUSH "pushq" | ||
708 | #else | ||
709 | # define KVM_EX_ENTRY ".long" | ||
710 | # define KVM_EX_PUSH "pushl" | ||
711 | #endif | ||
712 | |||
713 | /* | ||
714 | * Hardware virtualization extension instructions may fault if a | ||
715 | * reboot turns off virtualization while processes are running. | ||
716 | * Trap the fault and ignore the instruction if that happens. | ||
717 | */ | ||
718 | asmlinkage void kvm_handle_fault_on_reboot(void); | ||
719 | |||
720 | #define __kvm_handle_fault_on_reboot(insn) \ | ||
721 | "666: " insn "\n\t" \ | ||
722 | ".pushsection .text.fixup, \"ax\" \n" \ | ||
723 | "667: \n\t" \ | ||
724 | KVM_EX_PUSH " $666b \n\t" \ | ||
725 | "jmp kvm_handle_fault_on_reboot \n\t" \ | ||
726 | ".popsection \n\t" \ | ||
727 | ".pushsection __ex_table, \"a\" \n\t" \ | ||
728 | KVM_EX_ENTRY " 666b, 667b \n\t" \ | ||
729 | ".popsection" | ||
730 | |||
694 | #endif | 731 | #endif |