diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 45 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_x86_emulate.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/svm.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/vmx.h | 1 |
6 files changed, 36 insertions, 20 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 19af42138f78..4a28d22d4793 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -116,6 +116,8 @@ | |||
116 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ | 116 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
117 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ | 117 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ |
118 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ | 118 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
119 | #define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ | ||
120 | #define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ | ||
119 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ | 121 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
120 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ | 122 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
121 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ | 123 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index dc3f6cf11704..125be8b19568 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define __KVM_HAVE_MSI | 16 | #define __KVM_HAVE_MSI |
17 | #define __KVM_HAVE_USER_NMI | 17 | #define __KVM_HAVE_USER_NMI |
18 | #define __KVM_HAVE_GUEST_DEBUG | 18 | #define __KVM_HAVE_GUEST_DEBUG |
19 | #define __KVM_HAVE_MSIX | ||
19 | 20 | ||
20 | /* Architectural interrupt line count. */ | 21 | /* Architectural interrupt line count. */ |
21 | #define KVM_NR_INTERRUPTS 256 | 22 | #define KVM_NR_INTERRUPTS 256 |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f0faf58044ff..eabdc1cfab5c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -185,6 +185,7 @@ union kvm_mmu_page_role { | |||
185 | unsigned access:3; | 185 | unsigned access:3; |
186 | unsigned invalid:1; | 186 | unsigned invalid:1; |
187 | unsigned cr4_pge:1; | 187 | unsigned cr4_pge:1; |
188 | unsigned nxe:1; | ||
188 | }; | 189 | }; |
189 | }; | 190 | }; |
190 | 191 | ||
@@ -212,7 +213,6 @@ struct kvm_mmu_page { | |||
212 | int multimapped; /* More than one parent_pte? */ | 213 | int multimapped; /* More than one parent_pte? */ |
213 | int root_count; /* Currently serving as active root */ | 214 | int root_count; /* Currently serving as active root */ |
214 | bool unsync; | 215 | bool unsync; |
215 | bool global; | ||
216 | unsigned int unsync_children; | 216 | unsigned int unsync_children; |
217 | union { | 217 | union { |
218 | u64 *parent_pte; /* !multimapped */ | 218 | u64 *parent_pte; /* !multimapped */ |
@@ -261,13 +261,11 @@ struct kvm_mmu { | |||
261 | union kvm_mmu_page_role base_role; | 261 | union kvm_mmu_page_role base_role; |
262 | 262 | ||
263 | u64 *pae_root; | 263 | u64 *pae_root; |
264 | u64 rsvd_bits_mask[2][4]; | ||
264 | }; | 265 | }; |
265 | 266 | ||
266 | struct kvm_vcpu_arch { | 267 | struct kvm_vcpu_arch { |
267 | u64 host_tsc; | 268 | u64 host_tsc; |
268 | int interrupt_window_open; | ||
269 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | ||
270 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | ||
271 | /* | 269 | /* |
272 | * rip and regs accesses must go through | 270 | * rip and regs accesses must go through |
273 | * kvm_{register,rip}_{read,write} functions. | 271 | * kvm_{register,rip}_{read,write} functions. |
@@ -286,6 +284,7 @@ struct kvm_vcpu_arch { | |||
286 | u64 shadow_efer; | 284 | u64 shadow_efer; |
287 | u64 apic_base; | 285 | u64 apic_base; |
288 | struct kvm_lapic *apic; /* kernel irqchip context */ | 286 | struct kvm_lapic *apic; /* kernel irqchip context */ |
287 | int32_t apic_arb_prio; | ||
289 | int mp_state; | 288 | int mp_state; |
290 | int sipi_vector; | 289 | int sipi_vector; |
291 | u64 ia32_misc_enable_msr; | 290 | u64 ia32_misc_enable_msr; |
@@ -320,6 +319,8 @@ struct kvm_vcpu_arch { | |||
320 | struct kvm_pio_request pio; | 319 | struct kvm_pio_request pio; |
321 | void *pio_data; | 320 | void *pio_data; |
322 | 321 | ||
322 | u8 event_exit_inst_len; | ||
323 | |||
323 | struct kvm_queued_exception { | 324 | struct kvm_queued_exception { |
324 | bool pending; | 325 | bool pending; |
325 | bool has_error_code; | 326 | bool has_error_code; |
@@ -329,11 +330,12 @@ struct kvm_vcpu_arch { | |||
329 | 330 | ||
330 | struct kvm_queued_interrupt { | 331 | struct kvm_queued_interrupt { |
331 | bool pending; | 332 | bool pending; |
333 | bool soft; | ||
332 | u8 nr; | 334 | u8 nr; |
333 | } interrupt; | 335 | } interrupt; |
334 | 336 | ||
335 | struct { | 337 | struct { |
336 | int active; | 338 | int vm86_active; |
337 | u8 save_iopl; | 339 | u8 save_iopl; |
338 | struct kvm_save_segment { | 340 | struct kvm_save_segment { |
339 | u16 selector; | 341 | u16 selector; |
@@ -356,9 +358,9 @@ struct kvm_vcpu_arch { | |||
356 | unsigned int time_offset; | 358 | unsigned int time_offset; |
357 | struct page *time_page; | 359 | struct page *time_page; |
358 | 360 | ||
361 | bool singlestep; /* guest is single stepped by KVM */ | ||
359 | bool nmi_pending; | 362 | bool nmi_pending; |
360 | bool nmi_injected; | 363 | bool nmi_injected; |
361 | bool nmi_window_open; | ||
362 | 364 | ||
363 | struct mtrr_state_type mtrr_state; | 365 | struct mtrr_state_type mtrr_state; |
364 | u32 pat; | 366 | u32 pat; |
@@ -392,15 +394,14 @@ struct kvm_arch{ | |||
392 | */ | 394 | */ |
393 | struct list_head active_mmu_pages; | 395 | struct list_head active_mmu_pages; |
394 | struct list_head assigned_dev_head; | 396 | struct list_head assigned_dev_head; |
395 | struct list_head oos_global_pages; | ||
396 | struct iommu_domain *iommu_domain; | 397 | struct iommu_domain *iommu_domain; |
398 | int iommu_flags; | ||
397 | struct kvm_pic *vpic; | 399 | struct kvm_pic *vpic; |
398 | struct kvm_ioapic *vioapic; | 400 | struct kvm_ioapic *vioapic; |
399 | struct kvm_pit *vpit; | 401 | struct kvm_pit *vpit; |
400 | struct hlist_head irq_ack_notifier_list; | 402 | struct hlist_head irq_ack_notifier_list; |
401 | int vapics_in_nmi_mode; | 403 | int vapics_in_nmi_mode; |
402 | 404 | ||
403 | int round_robin_prev_vcpu; | ||
404 | unsigned int tss_addr; | 405 | unsigned int tss_addr; |
405 | struct page *apic_access_page; | 406 | struct page *apic_access_page; |
406 | 407 | ||
@@ -423,7 +424,6 @@ struct kvm_vm_stat { | |||
423 | u32 mmu_recycled; | 424 | u32 mmu_recycled; |
424 | u32 mmu_cache_miss; | 425 | u32 mmu_cache_miss; |
425 | u32 mmu_unsync; | 426 | u32 mmu_unsync; |
426 | u32 mmu_unsync_global; | ||
427 | u32 remote_tlb_flush; | 427 | u32 remote_tlb_flush; |
428 | u32 lpages; | 428 | u32 lpages; |
429 | }; | 429 | }; |
@@ -443,7 +443,6 @@ struct kvm_vcpu_stat { | |||
443 | u32 halt_exits; | 443 | u32 halt_exits; |
444 | u32 halt_wakeup; | 444 | u32 halt_wakeup; |
445 | u32 request_irq_exits; | 445 | u32 request_irq_exits; |
446 | u32 request_nmi_exits; | ||
447 | u32 irq_exits; | 446 | u32 irq_exits; |
448 | u32 host_state_reload; | 447 | u32 host_state_reload; |
449 | u32 efer_reload; | 448 | u32 efer_reload; |
@@ -511,20 +510,22 @@ struct kvm_x86_ops { | |||
511 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 510 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); |
512 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | 511 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
513 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 512 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
513 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | ||
514 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | ||
514 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | 515 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
515 | unsigned char *hypercall_addr); | 516 | unsigned char *hypercall_addr); |
516 | int (*get_irq)(struct kvm_vcpu *vcpu); | 517 | void (*set_irq)(struct kvm_vcpu *vcpu); |
517 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); | 518 | void (*set_nmi)(struct kvm_vcpu *vcpu); |
518 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, | 519 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
519 | bool has_error_code, u32 error_code); | 520 | bool has_error_code, u32 error_code); |
520 | bool (*exception_injected)(struct kvm_vcpu *vcpu); | 521 | int (*interrupt_allowed)(struct kvm_vcpu *vcpu); |
521 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); | 522 | int (*nmi_allowed)(struct kvm_vcpu *vcpu); |
522 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, | 523 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
523 | struct kvm_run *run); | 524 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); |
524 | 525 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | |
525 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 526 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
526 | int (*get_tdp_level)(void); | 527 | int (*get_tdp_level)(void); |
527 | int (*get_mt_mask_shift)(void); | 528 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
528 | }; | 529 | }; |
529 | 530 | ||
530 | extern struct kvm_x86_ops *kvm_x86_ops; | 531 | extern struct kvm_x86_ops *kvm_x86_ops; |
@@ -538,7 +539,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |||
538 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | 539 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); |
539 | void kvm_mmu_set_base_ptes(u64 base_pte); | 540 | void kvm_mmu_set_base_ptes(u64 base_pte); |
540 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 541 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
541 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); | 542 | u64 dirty_mask, u64 nx_mask, u64 x_mask); |
542 | 543 | ||
543 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 544 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
544 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 545 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
@@ -552,6 +553,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
552 | const void *val, int bytes); | 553 | const void *val, int bytes); |
553 | int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, | 554 | int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, |
554 | gpa_t addr, unsigned long *ret); | 555 | gpa_t addr, unsigned long *ret); |
556 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
555 | 557 | ||
556 | extern bool tdp_enabled; | 558 | extern bool tdp_enabled; |
557 | 559 | ||
@@ -563,6 +565,7 @@ enum emulation_result { | |||
563 | 565 | ||
564 | #define EMULTYPE_NO_DECODE (1 << 0) | 566 | #define EMULTYPE_NO_DECODE (1 << 0) |
565 | #define EMULTYPE_TRAP_UD (1 << 1) | 567 | #define EMULTYPE_TRAP_UD (1 << 1) |
568 | #define EMULTYPE_SKIP (1 << 2) | ||
566 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | 569 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, |
567 | unsigned long cr2, u16 error_code, int emulation_type); | 570 | unsigned long cr2, u16 error_code, int emulation_type); |
568 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | 571 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
@@ -638,7 +641,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |||
638 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 641 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
639 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 642 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
640 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 643 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
641 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); | ||
642 | 644 | ||
643 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 645 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
644 | 646 | ||
@@ -769,6 +771,8 @@ enum { | |||
769 | #define HF_GIF_MASK (1 << 0) | 771 | #define HF_GIF_MASK (1 << 0) |
770 | #define HF_HIF_MASK (1 << 1) | 772 | #define HF_HIF_MASK (1 << 1) |
771 | #define HF_VINTR_MASK (1 << 2) | 773 | #define HF_VINTR_MASK (1 << 2) |
774 | #define HF_NMI_MASK (1 << 3) | ||
775 | #define HF_IRET_MASK (1 << 4) | ||
772 | 776 | ||
773 | /* | 777 | /* |
774 | * Hardware virtualization extension instructions may fault if a | 778 | * Hardware virtualization extension instructions may fault if a |
@@ -791,5 +795,6 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
791 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 795 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
792 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 796 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
793 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 797 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
798 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); | ||
794 | 799 | ||
795 | #endif /* _ASM_X86_KVM_HOST_H */ | 800 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 6a159732881a..b7ed2c423116 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h | |||
@@ -143,6 +143,9 @@ struct decode_cache { | |||
143 | struct fetch_cache fetch; | 143 | struct fetch_cache fetch; |
144 | }; | 144 | }; |
145 | 145 | ||
146 | #define X86_SHADOW_INT_MOV_SS 1 | ||
147 | #define X86_SHADOW_INT_STI 2 | ||
148 | |||
146 | struct x86_emulate_ctxt { | 149 | struct x86_emulate_ctxt { |
147 | /* Register state before/after emulation. */ | 150 | /* Register state before/after emulation. */ |
148 | struct kvm_vcpu *vcpu; | 151 | struct kvm_vcpu *vcpu; |
@@ -152,6 +155,9 @@ struct x86_emulate_ctxt { | |||
152 | int mode; | 155 | int mode; |
153 | u32 cs_base; | 156 | u32 cs_base; |
154 | 157 | ||
158 | /* interruptibility state, as a result of execution of STI or MOV SS */ | ||
159 | int interruptibility; | ||
160 | |||
155 | /* decode cache */ | 161 | /* decode cache */ |
156 | struct decode_cache decode; | 162 | struct decode_cache decode; |
157 | }; | 163 | }; |
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 82ada75f3ebf..85574b7c1bc1 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
@@ -225,6 +225,7 @@ struct __attribute__ ((__packed__)) vmcb { | |||
225 | #define SVM_EVTINJ_VALID_ERR (1 << 11) | 225 | #define SVM_EVTINJ_VALID_ERR (1 << 11) |
226 | 226 | ||
227 | #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK | 227 | #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK |
228 | #define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK | ||
228 | 229 | ||
229 | #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR | 230 | #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR |
230 | #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI | 231 | #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 498f944010b9..11be5ad2e0e9 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -247,6 +247,7 @@ enum vmcs_field { | |||
247 | #define EXIT_REASON_MSR_READ 31 | 247 | #define EXIT_REASON_MSR_READ 31 |
248 | #define EXIT_REASON_MSR_WRITE 32 | 248 | #define EXIT_REASON_MSR_WRITE 32 |
249 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 249 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
250 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
250 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 251 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
251 | #define EXIT_REASON_APIC_ACCESS 44 | 252 | #define EXIT_REASON_APIC_ACCESS 44 |
252 | #define EXIT_REASON_EPT_VIOLATION 48 | 253 | #define EXIT_REASON_EPT_VIOLATION 48 |