aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-13 12:55:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-13 12:55:09 -0500
commitb9085bcbf5f43adf60533f9b635b2e7faeed0fe9 (patch)
treee397abf5682a45c096e75b3d0fa99c8e228425fc /arch/x86/include
parentc7d7b98671552abade78834c522b7308bda73c0d (diff)
parent6557bada461afeaa920a189fae2cff7c8fdce39f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini: "Fairly small update, but there are some interesting new features. Common: Optional support for adding a small amount of polling on each HLT instruction executed in the guest (or equivalent for other architectures). This can improve latency up to 50% on some scenarios (e.g. O_DSYNC writes or TCP_RR netperf tests). This also has to be enabled manually for now, but the plan is to auto-tune this in the future. ARM/ARM64: The highlights are support for GICv3 emulation and dirty page tracking s390: Several optimizations and bugfixes. Also a first: a feature exposed by KVM (UUID and long guest name in /proc/sysinfo) before it is available in IBM's hypervisor! :) MIPS: Bugfixes. x86: Support for PML (page modification logging, a new feature in Broadwell Xeons that speeds up dirty page tracking), nested virtualization improvements (nested APICv---a nice optimization), usual round of emulation fixes. There is also a new option to reduce latency of the TSC deadline timer in the guest; this needs to be tuned manually. Some commits are common between this pull and Catalin's; I see you have already included his tree. Powerpc: Nothing yet. The KVM/PPC changes will come in through the PPC maintainers, because I haven't received them yet and I might end up being offline for some part of next week" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits) KVM: ia64: drop kvm.h from installed user headers KVM: x86: fix build with !CONFIG_SMP KVM: x86: emulate: correct page fault error code for NoWrite instructions KVM: Disable compat ioctl for s390 KVM: s390: add cpu model support KVM: s390: use facilities and cpu_id per KVM KVM: s390/CPACF: Choose crypto control block format s390/kernel: Update /proc/sysinfo file with Extended Name and UUID KVM: s390: reenable LPP facility KVM: s390: floating irqs: fix user triggerable endless loop kvm: add halt_poll_ns module parameter kvm: remove KVM_MMIO_SIZE KVM: MIPS: Don't leak FPU/DSP to guest KVM: MIPS: Disable HTW while in guest KVM: nVMX: Enable nested posted interrupt processing KVM: nVMX: Enable nested virtual interrupt delivery KVM: nVMX: Enable nested apic register virtualization KVM: nVMX: Make nested control MSRs per-cpu KVM: nVMX: Enable nested virtualize x2apic mode KVM: nVMX: Prepare for using hardware MSR bitmap ...
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h59
-rw-r--r--arch/x86/include/asm/vmx.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
5 files changed, 66 insertions, 7 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index eb181178fe0b..57a9d94fe160 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -208,6 +208,7 @@ struct x86_emulate_ops {
208 208
209 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, 209 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
210 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); 210 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
211 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
211}; 212};
212 213
213typedef u32 __attribute__((vector_size(16))) sse128_t; 214typedef u32 __attribute__((vector_size(16))) sse128_t;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d89c6b828c96..a236e39cc385 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -38,8 +38,6 @@
38#define KVM_PRIVATE_MEM_SLOTS 3 38#define KVM_PRIVATE_MEM_SLOTS 3
39#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 39#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
40 40
41#define KVM_MMIO_SIZE 16
42
43#define KVM_PIO_PAGE_OFFSET 1 41#define KVM_PIO_PAGE_OFFSET 1
44#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
45 43
@@ -51,7 +49,7 @@
51 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) 49 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
52 50
53#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL 51#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
54#define CR3_PCID_INVD (1UL << 63) 52#define CR3_PCID_INVD BIT_64(63)
55#define CR4_RESERVED_BITS \ 53#define CR4_RESERVED_BITS \
56 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 54 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
57 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 55 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -160,6 +158,18 @@ enum {
160#define DR7_FIXED_1 0x00000400 158#define DR7_FIXED_1 0x00000400
161#define DR7_VOLATILE 0xffff2bff 159#define DR7_VOLATILE 0xffff2bff
162 160
161#define PFERR_PRESENT_BIT 0
162#define PFERR_WRITE_BIT 1
163#define PFERR_USER_BIT 2
164#define PFERR_RSVD_BIT 3
165#define PFERR_FETCH_BIT 4
166
167#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
168#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
169#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
170#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
171#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
172
163/* apic attention bits */ 173/* apic attention bits */
164#define KVM_APIC_CHECK_VAPIC 0 174#define KVM_APIC_CHECK_VAPIC 0
165/* 175/*
@@ -615,6 +625,8 @@ struct kvm_arch {
615 #ifdef CONFIG_KVM_MMU_AUDIT 625 #ifdef CONFIG_KVM_MMU_AUDIT
616 int audit_point; 626 int audit_point;
617 #endif 627 #endif
628
629 bool boot_vcpu_runs_old_kvmclock;
618}; 630};
619 631
620struct kvm_vm_stat { 632struct kvm_vm_stat {
@@ -643,6 +655,7 @@ struct kvm_vcpu_stat {
643 u32 irq_window_exits; 655 u32 irq_window_exits;
644 u32 nmi_window_exits; 656 u32 nmi_window_exits;
645 u32 halt_exits; 657 u32 halt_exits;
658 u32 halt_successful_poll;
646 u32 halt_wakeup; 659 u32 halt_wakeup;
647 u32 request_irq_exits; 660 u32 request_irq_exits;
648 u32 irq_exits; 661 u32 irq_exits;
@@ -787,6 +800,31 @@ struct kvm_x86_ops {
787 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 800 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
788 801
789 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 802 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
803
804 /*
805 * Arch-specific dirty logging hooks. These hooks are only supposed to
806 * be valid if the specific arch has hardware-accelerated dirty logging
807 * mechanism. Currently only for PML on VMX.
808 *
809 * - slot_enable_log_dirty:
810 * called when enabling log dirty mode for the slot.
811 * - slot_disable_log_dirty:
812 * called when disabling log dirty mode for the slot.
813 * also called when slot is created with log dirty disabled.
814 * - flush_log_dirty:
815 * called before reporting dirty_bitmap to userspace.
816 * - enable_log_dirty_pt_masked:
817 * called when reenabling log dirty for the GFNs in the mask after
818 * corresponding bits are cleared in slot->dirty_bitmap.
819 */
820 void (*slot_enable_log_dirty)(struct kvm *kvm,
821 struct kvm_memory_slot *slot);
822 void (*slot_disable_log_dirty)(struct kvm *kvm,
823 struct kvm_memory_slot *slot);
824 void (*flush_log_dirty)(struct kvm *kvm);
825 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
826 struct kvm_memory_slot *slot,
827 gfn_t offset, unsigned long mask);
790}; 828};
791 829
792struct kvm_arch_async_pf { 830struct kvm_arch_async_pf {
@@ -819,10 +857,17 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
819 u64 dirty_mask, u64 nx_mask, u64 x_mask); 857 u64 dirty_mask, u64 nx_mask, u64 x_mask);
820 858
821void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 859void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
822void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 860void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
823void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 861 struct kvm_memory_slot *memslot);
824 struct kvm_memory_slot *slot, 862void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
825 gfn_t gfn_offset, unsigned long mask); 863 struct kvm_memory_slot *memslot);
864void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
865 struct kvm_memory_slot *memslot);
866void kvm_mmu_slot_set_dirty(struct kvm *kvm,
867 struct kvm_memory_slot *memslot);
868void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
869 struct kvm_memory_slot *slot,
870 gfn_t gfn_offset, unsigned long mask);
826void kvm_mmu_zap_all(struct kvm *kvm); 871void kvm_mmu_zap_all(struct kvm *kvm);
827void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); 872void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
828unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 873unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 45afaee9555c..da772edd19ab 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -69,6 +69,7 @@
69#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 69#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
70#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 70#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
71#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 71#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
72#define SECONDARY_EXEC_ENABLE_PML 0x00020000
72#define SECONDARY_EXEC_XSAVES 0x00100000 73#define SECONDARY_EXEC_XSAVES 0x00100000
73 74
74 75
@@ -121,6 +122,7 @@ enum vmcs_field {
121 GUEST_LDTR_SELECTOR = 0x0000080c, 122 GUEST_LDTR_SELECTOR = 0x0000080c,
122 GUEST_TR_SELECTOR = 0x0000080e, 123 GUEST_TR_SELECTOR = 0x0000080e,
123 GUEST_INTR_STATUS = 0x00000810, 124 GUEST_INTR_STATUS = 0x00000810,
125 GUEST_PML_INDEX = 0x00000812,
124 HOST_ES_SELECTOR = 0x00000c00, 126 HOST_ES_SELECTOR = 0x00000c00,
125 HOST_CS_SELECTOR = 0x00000c02, 127 HOST_CS_SELECTOR = 0x00000c02,
126 HOST_SS_SELECTOR = 0x00000c04, 128 HOST_SS_SELECTOR = 0x00000c04,
@@ -140,6 +142,8 @@ enum vmcs_field {
140 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, 142 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
141 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, 143 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
142 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, 144 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
145 PML_ADDRESS = 0x0000200e,
146 PML_ADDRESS_HIGH = 0x0000200f,
143 TSC_OFFSET = 0x00002010, 147 TSC_OFFSET = 0x00002010,
144 TSC_OFFSET_HIGH = 0x00002011, 148 TSC_OFFSET_HIGH = 0x00002011,
145 VIRTUAL_APIC_PAGE_ADDR = 0x00002012, 149 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 536240fa9a95..3ce079136c11 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -364,6 +364,9 @@
364#define MSR_IA32_UCODE_WRITE 0x00000079 364#define MSR_IA32_UCODE_WRITE 0x00000079
365#define MSR_IA32_UCODE_REV 0x0000008b 365#define MSR_IA32_UCODE_REV 0x0000008b
366 366
367#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
368#define MSR_IA32_SMBASE 0x0000009e
369
367#define MSR_IA32_PERF_STATUS 0x00000198 370#define MSR_IA32_PERF_STATUS 0x00000198
368#define MSR_IA32_PERF_CTL 0x00000199 371#define MSR_IA32_PERF_CTL 0x00000199
369#define INTEL_PERF_CTL_MASK 0xffff 372#define INTEL_PERF_CTL_MASK 0xffff
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index b813bf9da1e2..c5f1a1deb91a 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
56#define EXIT_REASON_MSR_READ 31 56#define EXIT_REASON_MSR_READ 31
57#define EXIT_REASON_MSR_WRITE 32 57#define EXIT_REASON_MSR_WRITE 32
58#define EXIT_REASON_INVALID_STATE 33 58#define EXIT_REASON_INVALID_STATE 33
59#define EXIT_REASON_MSR_LOAD_FAIL 34
59#define EXIT_REASON_MWAIT_INSTRUCTION 36 60#define EXIT_REASON_MWAIT_INSTRUCTION 36
60#define EXIT_REASON_MONITOR_INSTRUCTION 39 61#define EXIT_REASON_MONITOR_INSTRUCTION 39
61#define EXIT_REASON_PAUSE_INSTRUCTION 40 62#define EXIT_REASON_PAUSE_INSTRUCTION 40
@@ -72,6 +73,7 @@
72#define EXIT_REASON_XSETBV 55 73#define EXIT_REASON_XSETBV 55
73#define EXIT_REASON_APIC_WRITE 56 74#define EXIT_REASON_APIC_WRITE 56
74#define EXIT_REASON_INVPCID 58 75#define EXIT_REASON_INVPCID 58
76#define EXIT_REASON_PML_FULL 62
75#define EXIT_REASON_XSAVES 63 77#define EXIT_REASON_XSAVES 63
76#define EXIT_REASON_XRSTORS 64 78#define EXIT_REASON_XRSTORS 64
77 79
@@ -116,10 +118,14 @@
116 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ 118 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
117 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ 119 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
118 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ 120 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
121 { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
119 { EXIT_REASON_INVD, "INVD" }, \ 122 { EXIT_REASON_INVD, "INVD" }, \
120 { EXIT_REASON_INVVPID, "INVVPID" }, \ 123 { EXIT_REASON_INVVPID, "INVVPID" }, \
121 { EXIT_REASON_INVPCID, "INVPCID" }, \ 124 { EXIT_REASON_INVPCID, "INVPCID" }, \
122 { EXIT_REASON_XSAVES, "XSAVES" }, \ 125 { EXIT_REASON_XSAVES, "XSAVES" }, \
123 { EXIT_REASON_XRSTORS, "XRSTORS" } 126 { EXIT_REASON_XRSTORS, "XRSTORS" }
124 127
128#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
129#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
130
125#endif /* _UAPIVMX_H */ 131#endif /* _UAPIVMX_H */