diff options
Diffstat (limited to 'include/asm-x86/kvm_host.h')
-rw-r--r-- | include/asm-x86/kvm_host.h | 90 |
1 files changed, 52 insertions, 38 deletions
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index c2e34c275900..411fb8cfb24e 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -1,4 +1,4 @@ | |||
1 | #/* | 1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | 2 | * Kernel-based Virtual Machine driver for Linux |
3 | * | 3 | * |
4 | * This header defines architecture specific interfaces, x86 version | 4 | * This header defines architecture specific interfaces, x86 version |
@@ -8,8 +8,8 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef ASM_KVM_HOST_H | 11 | #ifndef ASM_X86__KVM_HOST_H |
12 | #define ASM_KVM_HOST_H | 12 | #define ASM_X86__KVM_HOST_H |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
@@ -57,6 +57,10 @@ | |||
57 | #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) | 57 | #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) |
58 | 58 | ||
59 | #define DE_VECTOR 0 | 59 | #define DE_VECTOR 0 |
60 | #define DB_VECTOR 1 | ||
61 | #define BP_VECTOR 3 | ||
62 | #define OF_VECTOR 4 | ||
63 | #define BR_VECTOR 5 | ||
60 | #define UD_VECTOR 6 | 64 | #define UD_VECTOR 6 |
61 | #define NM_VECTOR 7 | 65 | #define NM_VECTOR 7 |
62 | #define DF_VECTOR 8 | 66 | #define DF_VECTOR 8 |
@@ -65,6 +69,7 @@ | |||
65 | #define SS_VECTOR 12 | 69 | #define SS_VECTOR 12 |
66 | #define GP_VECTOR 13 | 70 | #define GP_VECTOR 13 |
67 | #define PF_VECTOR 14 | 71 | #define PF_VECTOR 14 |
72 | #define MF_VECTOR 16 | ||
68 | #define MC_VECTOR 18 | 73 | #define MC_VECTOR 18 |
69 | 74 | ||
70 | #define SELECTOR_TI_MASK (1 << 2) | 75 | #define SELECTOR_TI_MASK (1 << 2) |
@@ -89,7 +94,7 @@ extern struct list_head vm_list; | |||
89 | struct kvm_vcpu; | 94 | struct kvm_vcpu; |
90 | struct kvm; | 95 | struct kvm; |
91 | 96 | ||
92 | enum { | 97 | enum kvm_reg { |
93 | VCPU_REGS_RAX = 0, | 98 | VCPU_REGS_RAX = 0, |
94 | VCPU_REGS_RCX = 1, | 99 | VCPU_REGS_RCX = 1, |
95 | VCPU_REGS_RDX = 2, | 100 | VCPU_REGS_RDX = 2, |
@@ -108,6 +113,7 @@ enum { | |||
108 | VCPU_REGS_R14 = 14, | 113 | VCPU_REGS_R14 = 14, |
109 | VCPU_REGS_R15 = 15, | 114 | VCPU_REGS_R15 = 15, |
110 | #endif | 115 | #endif |
116 | VCPU_REGS_RIP, | ||
111 | NR_VCPU_REGS | 117 | NR_VCPU_REGS |
112 | }; | 118 | }; |
113 | 119 | ||
@@ -189,10 +195,20 @@ struct kvm_mmu_page { | |||
189 | */ | 195 | */ |
190 | int multimapped; /* More than one parent_pte? */ | 196 | int multimapped; /* More than one parent_pte? */ |
191 | int root_count; /* Currently serving as active root */ | 197 | int root_count; /* Currently serving as active root */ |
198 | bool unsync; | ||
199 | bool unsync_children; | ||
192 | union { | 200 | union { |
193 | u64 *parent_pte; /* !multimapped */ | 201 | u64 *parent_pte; /* !multimapped */ |
194 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 202 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
195 | }; | 203 | }; |
204 | DECLARE_BITMAP(unsync_child_bitmap, 512); | ||
205 | }; | ||
206 | |||
207 | struct kvm_pv_mmu_op_buffer { | ||
208 | void *ptr; | ||
209 | unsigned len; | ||
210 | unsigned processed; | ||
211 | char buf[512] __aligned(sizeof(long)); | ||
196 | }; | 212 | }; |
197 | 213 | ||
198 | /* | 214 | /* |
@@ -207,6 +223,9 @@ struct kvm_mmu { | |||
207 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | 223 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); |
208 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | 224 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
209 | struct kvm_mmu_page *page); | 225 | struct kvm_mmu_page *page); |
226 | int (*sync_page)(struct kvm_vcpu *vcpu, | ||
227 | struct kvm_mmu_page *sp); | ||
228 | void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); | ||
210 | hpa_t root_hpa; | 229 | hpa_t root_hpa; |
211 | int root_level; | 230 | int root_level; |
212 | int shadow_root_level; | 231 | int shadow_root_level; |
@@ -219,8 +238,13 @@ struct kvm_vcpu_arch { | |||
219 | int interrupt_window_open; | 238 | int interrupt_window_open; |
220 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | 239 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
221 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | 240 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); |
222 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | 241 | /* |
223 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | 242 | * rip and regs accesses must go through |
243 | * kvm_{register,rip}_{read,write} functions. | ||
244 | */ | ||
245 | unsigned long regs[NR_VCPU_REGS]; | ||
246 | u32 regs_avail; | ||
247 | u32 regs_dirty; | ||
224 | 248 | ||
225 | unsigned long cr0; | 249 | unsigned long cr0; |
226 | unsigned long cr2; | 250 | unsigned long cr2; |
@@ -237,6 +261,9 @@ struct kvm_vcpu_arch { | |||
237 | bool tpr_access_reporting; | 261 | bool tpr_access_reporting; |
238 | 262 | ||
239 | struct kvm_mmu mmu; | 263 | struct kvm_mmu mmu; |
264 | /* only needed in kvm_pv_mmu_op() path, but it's hot so | ||
265 | * put it here to avoid allocation */ | ||
266 | struct kvm_pv_mmu_op_buffer mmu_op_buffer; | ||
240 | 267 | ||
241 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | 268 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; |
242 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | 269 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; |
@@ -269,6 +296,11 @@ struct kvm_vcpu_arch { | |||
269 | u32 error_code; | 296 | u32 error_code; |
270 | } exception; | 297 | } exception; |
271 | 298 | ||
299 | struct kvm_queued_interrupt { | ||
300 | bool pending; | ||
301 | u8 nr; | ||
302 | } interrupt; | ||
303 | |||
272 | struct { | 304 | struct { |
273 | int active; | 305 | int active; |
274 | u8 save_iopl; | 306 | u8 save_iopl; |
@@ -294,6 +326,7 @@ struct kvm_vcpu_arch { | |||
294 | struct page *time_page; | 326 | struct page *time_page; |
295 | 327 | ||
296 | bool nmi_pending; | 328 | bool nmi_pending; |
329 | bool nmi_injected; | ||
297 | 330 | ||
298 | u64 mtrr[0x100]; | 331 | u64 mtrr[0x100]; |
299 | }; | 332 | }; |
@@ -316,9 +349,12 @@ struct kvm_arch{ | |||
316 | * Hash table of struct kvm_mmu_page. | 349 | * Hash table of struct kvm_mmu_page. |
317 | */ | 350 | */ |
318 | struct list_head active_mmu_pages; | 351 | struct list_head active_mmu_pages; |
352 | struct list_head assigned_dev_head; | ||
353 | struct dmar_domain *intel_iommu_domain; | ||
319 | struct kvm_pic *vpic; | 354 | struct kvm_pic *vpic; |
320 | struct kvm_ioapic *vioapic; | 355 | struct kvm_ioapic *vioapic; |
321 | struct kvm_pit *vpit; | 356 | struct kvm_pit *vpit; |
357 | struct hlist_head irq_ack_notifier_list; | ||
322 | 358 | ||
323 | int round_robin_prev_vcpu; | 359 | int round_robin_prev_vcpu; |
324 | unsigned int tss_addr; | 360 | unsigned int tss_addr; |
@@ -338,6 +374,7 @@ struct kvm_vm_stat { | |||
338 | u32 mmu_flooded; | 374 | u32 mmu_flooded; |
339 | u32 mmu_recycled; | 375 | u32 mmu_recycled; |
340 | u32 mmu_cache_miss; | 376 | u32 mmu_cache_miss; |
377 | u32 mmu_unsync; | ||
341 | u32 remote_tlb_flush; | 378 | u32 remote_tlb_flush; |
342 | u32 lpages; | 379 | u32 lpages; |
343 | }; | 380 | }; |
@@ -364,6 +401,7 @@ struct kvm_vcpu_stat { | |||
364 | u32 insn_emulation; | 401 | u32 insn_emulation; |
365 | u32 insn_emulation_fail; | 402 | u32 insn_emulation_fail; |
366 | u32 hypercalls; | 403 | u32 hypercalls; |
404 | u32 irq_injections; | ||
367 | }; | 405 | }; |
368 | 406 | ||
369 | struct descriptor_table { | 407 | struct descriptor_table { |
@@ -414,8 +452,7 @@ struct kvm_x86_ops { | |||
414 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | 452 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); |
415 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 453 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, |
416 | int *exception); | 454 | int *exception); |
417 | void (*cache_regs)(struct kvm_vcpu *vcpu); | 455 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
418 | void (*decache_regs)(struct kvm_vcpu *vcpu); | ||
419 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 456 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
420 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 457 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
421 | 458 | ||
@@ -528,6 +565,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | |||
528 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 565 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
529 | u32 error_code); | 566 | u32 error_code); |
530 | 567 | ||
568 | void kvm_pic_set_irq(void *opaque, int irq, int level); | ||
569 | |||
531 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); | 570 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
532 | 571 | ||
533 | void fx_init(struct kvm_vcpu *vcpu); | 572 | void fx_init(struct kvm_vcpu *vcpu); |
@@ -550,12 +589,14 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | |||
550 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 589 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
551 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 590 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
552 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 591 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
592 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | ||
553 | 593 | ||
554 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 594 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
555 | 595 | ||
556 | int kvm_fix_hypercall(struct kvm_vcpu *vcpu); | 596 | int kvm_fix_hypercall(struct kvm_vcpu *vcpu); |
557 | 597 | ||
558 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); | 598 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); |
599 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); | ||
559 | 600 | ||
560 | void kvm_enable_tdp(void); | 601 | void kvm_enable_tdp(void); |
561 | void kvm_disable_tdp(void); | 602 | void kvm_disable_tdp(void); |
@@ -686,33 +727,6 @@ enum { | |||
686 | TASK_SWITCH_GATE = 3, | 727 | TASK_SWITCH_GATE = 3, |
687 | }; | 728 | }; |
688 | 729 | ||
689 | #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ | ||
690 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
691 | vcpu, 5, d1, d2, d3, d4, d5) | ||
692 | #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ | ||
693 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
694 | vcpu, 4, d1, d2, d3, d4, 0) | ||
695 | #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ | ||
696 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
697 | vcpu, 3, d1, d2, d3, 0, 0) | ||
698 | #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ | ||
699 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
700 | vcpu, 2, d1, d2, 0, 0, 0) | ||
701 | #define KVMTRACE_1D(evt, vcpu, d1, name) \ | ||
702 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
703 | vcpu, 1, d1, 0, 0, 0, 0) | ||
704 | #define KVMTRACE_0D(evt, vcpu, name) \ | ||
705 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
706 | vcpu, 0, 0, 0, 0, 0, 0) | ||
707 | |||
708 | #ifdef CONFIG_64BIT | ||
709 | # define KVM_EX_ENTRY ".quad" | ||
710 | # define KVM_EX_PUSH "pushq" | ||
711 | #else | ||
712 | # define KVM_EX_ENTRY ".long" | ||
713 | # define KVM_EX_PUSH "pushl" | ||
714 | #endif | ||
715 | |||
716 | /* | 730 | /* |
717 | * Hardware virtualization extension instructions may fault if a | 731 | * Hardware virtualization extension instructions may fault if a |
718 | * reboot turns off virtualization while processes are running. | 732 | * reboot turns off virtualization while processes are running. |
@@ -724,15 +738,15 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
724 | "666: " insn "\n\t" \ | 738 | "666: " insn "\n\t" \ |
725 | ".pushsection .fixup, \"ax\" \n" \ | 739 | ".pushsection .fixup, \"ax\" \n" \ |
726 | "667: \n\t" \ | 740 | "667: \n\t" \ |
727 | KVM_EX_PUSH " $666b \n\t" \ | 741 | __ASM_SIZE(push) " $666b \n\t" \ |
728 | "jmp kvm_handle_fault_on_reboot \n\t" \ | 742 | "jmp kvm_handle_fault_on_reboot \n\t" \ |
729 | ".popsection \n\t" \ | 743 | ".popsection \n\t" \ |
730 | ".pushsection __ex_table, \"a\" \n\t" \ | 744 | ".pushsection __ex_table, \"a\" \n\t" \ |
731 | KVM_EX_ENTRY " 666b, 667b \n\t" \ | 745 | _ASM_PTR " 666b, 667b \n\t" \ |
732 | ".popsection" | 746 | ".popsection" |
733 | 747 | ||
734 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 748 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
735 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 749 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
736 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 750 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
737 | 751 | ||
738 | #endif | 752 | #endif /* ASM_X86__KVM_HOST_H */ |