diff options
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 70 |
1 files changed, 17 insertions, 53 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 76f5483cffec..502e53f999cf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/mmu_notifier.h> | 16 | #include <linux/mmu_notifier.h> |
17 | #include <linux/tracepoint.h> | 17 | #include <linux/tracepoint.h> |
18 | #include <linux/cpumask.h> | ||
18 | 19 | ||
19 | #include <linux/kvm.h> | 20 | #include <linux/kvm.h> |
20 | #include <linux/kvm_para.h> | 21 | #include <linux/kvm_para.h> |
@@ -39,11 +40,14 @@ | |||
39 | 0xFFFFFF0000000000ULL) | 40 | 0xFFFFFF0000000000ULL) |
40 | 41 | ||
41 | #define INVALID_PAGE (~(hpa_t)0) | 42 | #define INVALID_PAGE (~(hpa_t)0) |
43 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) | ||
44 | |||
42 | #define UNMAPPED_GVA (~(gpa_t)0) | 45 | #define UNMAPPED_GVA (~(gpa_t)0) |
43 | 46 | ||
44 | /* KVM Hugepage definitions for x86 */ | 47 | /* KVM Hugepage definitions for x86 */ |
45 | #define KVM_NR_PAGE_SIZES 3 | 48 | #define KVM_NR_PAGE_SIZES 3 |
46 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) | 49 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) |
50 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | ||
47 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) | 51 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
48 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | 52 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
49 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | 53 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
@@ -69,8 +73,6 @@ | |||
69 | 73 | ||
70 | #define IOPL_SHIFT 12 | 74 | #define IOPL_SHIFT 12 |
71 | 75 | ||
72 | #define KVM_ALIAS_SLOTS 4 | ||
73 | |||
74 | #define KVM_PERMILLE_MMU_PAGES 20 | 76 | #define KVM_PERMILLE_MMU_PAGES 20 |
75 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | 77 | #define KVM_MIN_ALLOC_MMU_PAGES 64 |
76 | #define KVM_MMU_HASH_SHIFT 10 | 78 | #define KVM_MMU_HASH_SHIFT 10 |
@@ -241,7 +243,7 @@ struct kvm_mmu { | |||
241 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | 243 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
242 | struct kvm_mmu_page *page); | 244 | struct kvm_mmu_page *page); |
243 | int (*sync_page)(struct kvm_vcpu *vcpu, | 245 | int (*sync_page)(struct kvm_vcpu *vcpu, |
244 | struct kvm_mmu_page *sp); | 246 | struct kvm_mmu_page *sp, bool clear_unsync); |
245 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); | 247 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); |
246 | hpa_t root_hpa; | 248 | hpa_t root_hpa; |
247 | int root_level; | 249 | int root_level; |
@@ -301,8 +303,8 @@ struct kvm_vcpu_arch { | |||
301 | unsigned long mmu_seq; | 303 | unsigned long mmu_seq; |
302 | } update_pte; | 304 | } update_pte; |
303 | 305 | ||
304 | struct i387_fxsave_struct host_fx_image; | 306 | struct fpu guest_fpu; |
305 | struct i387_fxsave_struct guest_fx_image; | 307 | u64 xcr0; |
306 | 308 | ||
307 | gva_t mmio_fault_cr2; | 309 | gva_t mmio_fault_cr2; |
308 | struct kvm_pio_request pio; | 310 | struct kvm_pio_request pio; |
@@ -360,26 +362,11 @@ struct kvm_vcpu_arch { | |||
360 | 362 | ||
361 | /* fields used by HYPER-V emulation */ | 363 | /* fields used by HYPER-V emulation */ |
362 | u64 hv_vapic; | 364 | u64 hv_vapic; |
363 | }; | ||
364 | |||
365 | struct kvm_mem_alias { | ||
366 | gfn_t base_gfn; | ||
367 | unsigned long npages; | ||
368 | gfn_t target_gfn; | ||
369 | #define KVM_ALIAS_INVALID 1UL | ||
370 | unsigned long flags; | ||
371 | }; | ||
372 | 365 | ||
373 | #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION | 366 | cpumask_var_t wbinvd_dirty_mask; |
374 | |||
375 | struct kvm_mem_aliases { | ||
376 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | ||
377 | int naliases; | ||
378 | }; | 367 | }; |
379 | 368 | ||
380 | struct kvm_arch { | 369 | struct kvm_arch { |
381 | struct kvm_mem_aliases *aliases; | ||
382 | |||
383 | unsigned int n_free_mmu_pages; | 370 | unsigned int n_free_mmu_pages; |
384 | unsigned int n_requested_mmu_pages; | 371 | unsigned int n_requested_mmu_pages; |
385 | unsigned int n_alloc_mmu_pages; | 372 | unsigned int n_alloc_mmu_pages; |
@@ -533,6 +520,8 @@ struct kvm_x86_ops { | |||
533 | 520 | ||
534 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); | 521 | void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
535 | 522 | ||
523 | bool (*has_wbinvd_exit)(void); | ||
524 | |||
536 | const struct trace_print_flags *exit_reasons_str; | 525 | const struct trace_print_flags *exit_reasons_str; |
537 | }; | 526 | }; |
538 | 527 | ||
@@ -576,7 +565,6 @@ enum emulation_result { | |||
576 | #define EMULTYPE_SKIP (1 << 2) | 565 | #define EMULTYPE_SKIP (1 << 2) |
577 | int emulate_instruction(struct kvm_vcpu *vcpu, | 566 | int emulate_instruction(struct kvm_vcpu *vcpu, |
578 | unsigned long cr2, u16 error_code, int emulation_type); | 567 | unsigned long cr2, u16 error_code, int emulation_type); |
579 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | ||
580 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 568 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
581 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | 569 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
582 | 570 | ||
@@ -591,10 +579,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | |||
591 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | 579 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
592 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); | 580 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); |
593 | int emulate_clts(struct kvm_vcpu *vcpu); | 581 | int emulate_clts(struct kvm_vcpu *vcpu); |
594 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | 582 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); |
595 | unsigned long *dest); | ||
596 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | ||
597 | unsigned long value); | ||
598 | 583 | ||
599 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | 584 | void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
600 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); | 585 | int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); |
@@ -602,15 +587,16 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); | |||
602 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, | 587 | int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, |
603 | bool has_error_code, u32 error_code); | 588 | bool has_error_code, u32 error_code); |
604 | 589 | ||
605 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 590 | int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
606 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | 591 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
607 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | 592 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
608 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); | 593 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
609 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); | 594 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); |
610 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | 595 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); |
611 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); | 596 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
612 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | 597 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
613 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | 598 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
599 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); | ||
614 | 600 | ||
615 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | 601 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
616 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | 602 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
@@ -630,12 +616,7 @@ int kvm_pic_set_irq(void *opaque, int irq, int level); | |||
630 | 616 | ||
631 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | 617 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
632 | 618 | ||
633 | void fx_init(struct kvm_vcpu *vcpu); | 619 | int fx_init(struct kvm_vcpu *vcpu); |
634 | |||
635 | int emulator_write_emulated(unsigned long addr, | ||
636 | const void *val, | ||
637 | unsigned int bytes, | ||
638 | struct kvm_vcpu *vcpu); | ||
639 | 620 | ||
640 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | 621 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
641 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 622 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
@@ -664,8 +645,6 @@ void kvm_disable_tdp(void); | |||
664 | int complete_pio(struct kvm_vcpu *vcpu); | 645 | int complete_pio(struct kvm_vcpu *vcpu); |
665 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | 646 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); |
666 | 647 | ||
667 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | ||
668 | |||
669 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | 648 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
670 | { | 649 | { |
671 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 650 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
@@ -719,21 +698,6 @@ static inline unsigned long read_msr(unsigned long msr) | |||
719 | } | 698 | } |
720 | #endif | 699 | #endif |
721 | 700 | ||
722 | static inline void kvm_fx_save(struct i387_fxsave_struct *image) | ||
723 | { | ||
724 | asm("fxsave (%0)":: "r" (image)); | ||
725 | } | ||
726 | |||
727 | static inline void kvm_fx_restore(struct i387_fxsave_struct *image) | ||
728 | { | ||
729 | asm("fxrstor (%0)":: "r" (image)); | ||
730 | } | ||
731 | |||
732 | static inline void kvm_fx_finit(void) | ||
733 | { | ||
734 | asm("finit"); | ||
735 | } | ||
736 | |||
737 | static inline u32 get_rdx_init_val(void) | 701 | static inline u32 get_rdx_init_val(void) |
738 | { | 702 | { |
739 | return 0x600; /* P6 family */ | 703 | return 0x600; /* P6 family */ |