diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2017-08-04 18:12:49 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-08-07 10:11:50 -0400 |
commit | d6321d493319bfd406c484e8359c6101cbda39d3 (patch) | |
tree | 97b80d5e78b3b5490e377942ca0fbe43bf65a21f | |
parent | c6bd18011ff8ea23473a1f4c6d934f761879081d (diff) |
KVM: x86: generalize guest_cpuid_has_ helpers
This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
X86_FEATURE_XYZ), which gets rid of many very similar helpers.
When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
this information isn't in common code, so we recreate it for KVM.
Add some BUILD_BUG_ONs to make sure that it runs nicely.
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/cpuid.h | 170 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/mtrr.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 26 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 38 |
6 files changed, 95 insertions, 150 deletions
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index c723d64657d0..4e9ac93b4f3a 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include "x86.h" | 4 | #include "x86.h" |
5 | #include <asm/cpu.h> | 5 | #include <asm/cpu.h> |
6 | #include <asm/processor.h> | ||
6 | 7 | ||
7 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); | 8 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
8 | bool kvm_mpx_supported(void); | 9 | bool kvm_mpx_supported(void); |
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) | |||
29 | return vcpu->arch.maxphyaddr; | 30 | return vcpu->arch.maxphyaddr; |
30 | } | 31 | } |
31 | 32 | ||
32 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) | 33 | struct cpuid_reg { |
33 | { | 34 | u32 function; |
34 | struct kvm_cpuid_entry2 *best; | 35 | u32 index; |
36 | int reg; | ||
37 | }; | ||
35 | 38 | ||
36 | if (!static_cpu_has(X86_FEATURE_XSAVE)) | 39 | static const struct cpuid_reg reverse_cpuid[] = { |
37 | return false; | 40 | [CPUID_1_EDX] = { 1, 0, CPUID_EDX}, |
38 | 41 | [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX}, | |
39 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 42 | [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, |
40 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); | 43 | [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, |
41 | } | 44 | [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, |
45 | [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, | ||
46 | [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, | ||
47 | [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, | ||
48 | [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, | ||
49 | [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX}, | ||
50 | [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX}, | ||
51 | [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, | ||
52 | [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, | ||
53 | [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, | ||
54 | [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, | ||
55 | }; | ||
42 | 56 | ||
43 | static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu) | 57 | static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) |
44 | { | 58 | { |
45 | struct kvm_cpuid_entry2 *best; | 59 | unsigned x86_leaf = x86_feature / 32; |
46 | 60 | ||
47 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 61 | BUILD_BUG_ON(!__builtin_constant_p(x86_leaf)); |
48 | return best && (best->edx & bit(X86_FEATURE_MTRR)); | 62 | BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); |
49 | } | 63 | BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); |
50 | 64 | ||
51 | static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) | 65 | return reverse_cpuid[x86_leaf]; |
52 | { | ||
53 | struct kvm_cpuid_entry2 *best; | ||
54 | |||
55 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
56 | return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); | ||
57 | } | ||
58 | |||
59 | static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) | ||
60 | { | ||
61 | struct kvm_cpuid_entry2 *best; | ||
62 | |||
63 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
64 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); | ||
65 | } | 66 | } |
66 | 67 | ||
67 | static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) | 68 | static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) |
68 | { | 69 | { |
69 | struct kvm_cpuid_entry2 *best; | 70 | struct kvm_cpuid_entry2 *entry; |
70 | 71 | const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); | |
71 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
72 | return best && (best->ebx & bit(X86_FEATURE_SMAP)); | ||
73 | } | ||
74 | |||
75 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) | ||
76 | { | ||
77 | struct kvm_cpuid_entry2 *best; | ||
78 | |||
79 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
80 | return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); | ||
81 | } | ||
82 | |||
83 | static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu) | ||
84 | { | ||
85 | struct kvm_cpuid_entry2 *best; | ||
86 | |||
87 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
88 | return best && (best->ecx & bit(X86_FEATURE_PKU)); | ||
89 | } | ||
90 | |||
91 | static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | struct kvm_cpuid_entry2 *best; | ||
94 | |||
95 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | ||
96 | return best && (best->edx & bit(X86_FEATURE_LM)); | ||
97 | } | ||
98 | 72 | ||
99 | static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) | 73 | entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); |
100 | { | 74 | if (!entry) |
101 | struct kvm_cpuid_entry2 *best; | 75 | return NULL; |
102 | 76 | ||
103 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 77 | switch (cpuid.reg) { |
104 | return best && (best->ecx & bit(X86_FEATURE_OSVW)); | 78 | case CPUID_EAX: |
79 | return &entry->eax; | ||
80 | case CPUID_EBX: | ||
81 | return &entry->ebx; | ||
82 | case CPUID_ECX: | ||
83 | return &entry->ecx; | ||
84 | case CPUID_EDX: | ||
85 | return &entry->edx; | ||
86 | default: | ||
87 | BUILD_BUG(); | ||
88 | return NULL; | ||
89 | } | ||
105 | } | 90 | } |
106 | 91 | ||
107 | static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) | 92 | static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature) |
108 | { | 93 | { |
109 | struct kvm_cpuid_entry2 *best; | 94 | int *reg; |
110 | 95 | ||
111 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 96 | if (x86_feature == X86_FEATURE_XSAVE && |
112 | return best && (best->ecx & bit(X86_FEATURE_PCID)); | 97 | !static_cpu_has(X86_FEATURE_XSAVE)) |
113 | } | 98 | return false; |
114 | 99 | ||
115 | static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) | 100 | reg = guest_cpuid_get_register(vcpu, x86_feature); |
116 | { | 101 | if (!reg) |
117 | struct kvm_cpuid_entry2 *best; | 102 | return false; |
118 | 103 | ||
119 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 104 | return *reg & bit(x86_feature); |
120 | return best && (best->ecx & bit(X86_FEATURE_X2APIC)); | ||
121 | } | 105 | } |
122 | 106 | ||
123 | static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) | 107 | static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) |
@@ -128,46 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) | |||
128 | return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; | 112 | return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; |
129 | } | 113 | } |
130 | 114 | ||
131 | static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) | ||
132 | { | ||
133 | struct kvm_cpuid_entry2 *best; | ||
134 | |||
135 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | ||
136 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); | ||
137 | } | ||
138 | |||
139 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) | ||
140 | { | ||
141 | struct kvm_cpuid_entry2 *best; | ||
142 | |||
143 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
144 | return best && (best->ebx & bit(X86_FEATURE_RTM)); | ||
145 | } | ||
146 | |||
147 | static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) | ||
148 | { | ||
149 | struct kvm_cpuid_entry2 *best; | ||
150 | |||
151 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
152 | return best && (best->ebx & bit(X86_FEATURE_MPX)); | ||
153 | } | ||
154 | |||
155 | static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) | ||
156 | { | ||
157 | struct kvm_cpuid_entry2 *best; | ||
158 | |||
159 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | ||
160 | return best && (best->edx & bit(X86_FEATURE_RDTSCP)); | ||
161 | } | ||
162 | |||
163 | static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) | ||
164 | { | ||
165 | struct kvm_cpuid_entry2 *best; | ||
166 | |||
167 | best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0); | ||
168 | return best && (best->edx & bit(X86_FEATURE_NRIPS)); | ||
169 | } | ||
170 | |||
171 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) | 115 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) |
172 | { | 116 | { |
173 | struct kvm_cpuid_entry2 *best; | 117 | struct kvm_cpuid_entry2 *best; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9b1dd114956a..2fac6f78c420 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4052,7 +4052,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
4052 | { | 4052 | { |
4053 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, | 4053 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
4054 | cpuid_maxphyaddr(vcpu), context->root_level, | 4054 | cpuid_maxphyaddr(vcpu), context->root_level, |
4055 | context->nx, guest_cpuid_has_gbpages(vcpu), | 4055 | context->nx, |
4056 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), | ||
4056 | is_pse(vcpu), guest_cpuid_is_amd(vcpu)); | 4057 | is_pse(vcpu), guest_cpuid_is_amd(vcpu)); |
4057 | } | 4058 | } |
4058 | 4059 | ||
@@ -4114,8 +4115,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | |||
4114 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, | 4115 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, |
4115 | boot_cpu_data.x86_phys_bits, | 4116 | boot_cpu_data.x86_phys_bits, |
4116 | context->shadow_root_level, uses_nx, | 4117 | context->shadow_root_level, uses_nx, |
4117 | guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), | 4118 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
4118 | true); | 4119 | is_pse(vcpu), true); |
4119 | } | 4120 | } |
4120 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); | 4121 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
4121 | 4122 | ||
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 0149ac59c273..e9ea2d45ae66 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c | |||
@@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) | |||
130 | * enable MTRRs and it is obviously undesirable to run the | 130 | * enable MTRRs and it is obviously undesirable to run the |
131 | * guest entirely with UC memory and we use WB. | 131 | * guest entirely with UC memory and we use WB. |
132 | */ | 132 | */ |
133 | if (guest_cpuid_has_mtrr(vcpu)) | 133 | if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR)) |
134 | return MTRR_TYPE_UNCACHABLE; | 134 | return MTRR_TYPE_UNCACHABLE; |
135 | else | 135 | else |
136 | return MTRR_TYPE_WRBACK; | 136 | return MTRR_TYPE_WRBACK; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1107626938cc..b8196aecbdcc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -5078,7 +5078,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) | |||
5078 | struct kvm_cpuid_entry2 *entry; | 5078 | struct kvm_cpuid_entry2 *entry; |
5079 | 5079 | ||
5080 | /* Update nrips enabled cache */ | 5080 | /* Update nrips enabled cache */ |
5081 | svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); | 5081 | svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); |
5082 | 5082 | ||
5083 | if (!kvm_vcpu_apicv_active(vcpu)) | 5083 | if (!kvm_vcpu_apicv_active(vcpu)) |
5084 | return; | 5084 | return; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8e4a2dc85375..96d1f8708cef 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2611,7 +2611,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
2611 | if (index >= 0) | 2611 | if (index >= 0) |
2612 | move_msr_up(vmx, index, save_nmsrs++); | 2612 | move_msr_up(vmx, index, save_nmsrs++); |
2613 | index = __find_msr_index(vmx, MSR_TSC_AUX); | 2613 | index = __find_msr_index(vmx, MSR_TSC_AUX); |
2614 | if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) | 2614 | if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) |
2615 | move_msr_up(vmx, index, save_nmsrs++); | 2615 | move_msr_up(vmx, index, save_nmsrs++); |
2616 | /* | 2616 | /* |
2617 | * MSR_STAR is only needed on long mode guests, and only | 2617 | * MSR_STAR is only needed on long mode guests, and only |
@@ -2671,12 +2671,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
2671 | } | 2671 | } |
2672 | } | 2672 | } |
2673 | 2673 | ||
2674 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) | ||
2675 | { | ||
2676 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); | ||
2677 | return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); | ||
2678 | } | ||
2679 | |||
2680 | /* | 2674 | /* |
2681 | * nested_vmx_allowed() checks whether a guest should be allowed to use VMX | 2675 | * nested_vmx_allowed() checks whether a guest should be allowed to use VMX |
2682 | * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for | 2676 | * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for |
@@ -2685,7 +2679,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) | |||
2685 | */ | 2679 | */ |
2686 | static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) | 2680 | static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) |
2687 | { | 2681 | { |
2688 | return nested && guest_cpuid_has_vmx(vcpu); | 2682 | return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); |
2689 | } | 2683 | } |
2690 | 2684 | ||
2691 | /* | 2685 | /* |
@@ -3281,7 +3275,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3281 | break; | 3275 | break; |
3282 | case MSR_IA32_BNDCFGS: | 3276 | case MSR_IA32_BNDCFGS: |
3283 | if (!kvm_mpx_supported() || | 3277 | if (!kvm_mpx_supported() || |
3284 | (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) | 3278 | (!msr_info->host_initiated && |
3279 | !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) | ||
3285 | return 1; | 3280 | return 1; |
3286 | msr_info->data = vmcs_read64(GUEST_BNDCFGS); | 3281 | msr_info->data = vmcs_read64(GUEST_BNDCFGS); |
3287 | break; | 3282 | break; |
@@ -3305,7 +3300,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3305 | msr_info->data = vcpu->arch.ia32_xss; | 3300 | msr_info->data = vcpu->arch.ia32_xss; |
3306 | break; | 3301 | break; |
3307 | case MSR_TSC_AUX: | 3302 | case MSR_TSC_AUX: |
3308 | if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) | 3303 | if (!msr_info->host_initiated && |
3304 | !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) | ||
3309 | return 1; | 3305 | return 1; |
3310 | /* Otherwise falls through */ | 3306 | /* Otherwise falls through */ |
3311 | default: | 3307 | default: |
@@ -3364,7 +3360,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3364 | break; | 3360 | break; |
3365 | case MSR_IA32_BNDCFGS: | 3361 | case MSR_IA32_BNDCFGS: |
3366 | if (!kvm_mpx_supported() || | 3362 | if (!kvm_mpx_supported() || |
3367 | (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) | 3363 | (!msr_info->host_initiated && |
3364 | !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) | ||
3368 | return 1; | 3365 | return 1; |
3369 | if (is_noncanonical_address(data & PAGE_MASK) || | 3366 | if (is_noncanonical_address(data & PAGE_MASK) || |
3370 | (data & MSR_IA32_BNDCFGS_RSVD)) | 3367 | (data & MSR_IA32_BNDCFGS_RSVD)) |
@@ -3427,7 +3424,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3427 | clear_atomic_switch_msr(vmx, MSR_IA32_XSS); | 3424 | clear_atomic_switch_msr(vmx, MSR_IA32_XSS); |
3428 | break; | 3425 | break; |
3429 | case MSR_TSC_AUX: | 3426 | case MSR_TSC_AUX: |
3430 | if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) | 3427 | if (!msr_info->host_initiated && |
3428 | !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) | ||
3431 | return 1; | 3429 | return 1; |
3432 | /* Check reserved bit, higher 32 bits should be zero */ | 3430 | /* Check reserved bit, higher 32 bits should be zero */ |
3433 | if ((data >> 32) != 0) | 3431 | if ((data >> 32) != 0) |
@@ -9622,7 +9620,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
9622 | u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); | 9620 | u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); |
9623 | 9621 | ||
9624 | if (vmx_rdtscp_supported()) { | 9622 | if (vmx_rdtscp_supported()) { |
9625 | bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu); | 9623 | bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); |
9626 | if (!rdtscp_enabled) | 9624 | if (!rdtscp_enabled) |
9627 | secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP; | 9625 | secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP; |
9628 | 9626 | ||
@@ -9641,7 +9639,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
9641 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0); | 9639 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0); |
9642 | bool invpcid_enabled = | 9640 | bool invpcid_enabled = |
9643 | best && best->ebx & bit(X86_FEATURE_INVPCID) && | 9641 | best && best->ebx & bit(X86_FEATURE_INVPCID) && |
9644 | guest_cpuid_has_pcid(vcpu); | 9642 | guest_cpuid_has(vcpu, X86_FEATURE_PCID); |
9645 | 9643 | ||
9646 | if (!invpcid_enabled) { | 9644 | if (!invpcid_enabled) { |
9647 | secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID; | 9645 | secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 72d82ab1ee22..ee4e251c82fc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -310,8 +310,8 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
310 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); | 310 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); |
311 | u64 new_state = msr_info->data & | 311 | u64 new_state = msr_info->data & |
312 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); | 312 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); |
313 | u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | | 313 | u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | |
314 | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); | 314 | (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); |
315 | 315 | ||
316 | if (!msr_info->host_initiated && | 316 | if (!msr_info->host_initiated && |
317 | ((msr_info->data & reserved_bits) != 0 || | 317 | ((msr_info->data & reserved_bits) != 0 || |
@@ -754,19 +754,19 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
754 | if (cr4 & CR4_RESERVED_BITS) | 754 | if (cr4 & CR4_RESERVED_BITS) |
755 | return 1; | 755 | return 1; |
756 | 756 | ||
757 | if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) | 757 | if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) |
758 | return 1; | 758 | return 1; |
759 | 759 | ||
760 | if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) | 760 | if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) |
761 | return 1; | 761 | return 1; |
762 | 762 | ||
763 | if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) | 763 | if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) |
764 | return 1; | 764 | return 1; |
765 | 765 | ||
766 | if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) | 766 | if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) |
767 | return 1; | 767 | return 1; |
768 | 768 | ||
769 | if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE)) | 769 | if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) |
770 | return 1; | 770 | return 1; |
771 | 771 | ||
772 | if (is_long_mode(vcpu)) { | 772 | if (is_long_mode(vcpu)) { |
@@ -779,7 +779,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
779 | return 1; | 779 | return 1; |
780 | 780 | ||
781 | if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { | 781 | if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { |
782 | if (!guest_cpuid_has_pcid(vcpu)) | 782 | if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) |
783 | return 1; | 783 | return 1; |
784 | 784 | ||
785 | /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ | 785 | /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ |
@@ -883,7 +883,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) | |||
883 | { | 883 | { |
884 | u64 fixed = DR6_FIXED_1; | 884 | u64 fixed = DR6_FIXED_1; |
885 | 885 | ||
886 | if (!guest_cpuid_has_rtm(vcpu)) | 886 | if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) |
887 | fixed |= DR6_RTM; | 887 | fixed |= DR6_RTM; |
888 | return fixed; | 888 | return fixed; |
889 | } | 889 | } |
@@ -1534,8 +1534,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1534 | vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; | 1534 | vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; |
1535 | vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; | 1535 | vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; |
1536 | 1536 | ||
1537 | if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) | 1537 | if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) |
1538 | update_ia32_tsc_adjust_msr(vcpu, offset); | 1538 | update_ia32_tsc_adjust_msr(vcpu, offset); |
1539 | |||
1539 | kvm_vcpu_write_tsc_offset(vcpu, offset); | 1540 | kvm_vcpu_write_tsc_offset(vcpu, offset); |
1540 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); | 1541 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); |
1541 | 1542 | ||
@@ -2185,7 +2186,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2185 | kvm_set_lapic_tscdeadline_msr(vcpu, data); | 2186 | kvm_set_lapic_tscdeadline_msr(vcpu, data); |
2186 | break; | 2187 | break; |
2187 | case MSR_IA32_TSC_ADJUST: | 2188 | case MSR_IA32_TSC_ADJUST: |
2188 | if (guest_cpuid_has_tsc_adjust(vcpu)) { | 2189 | if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { |
2189 | if (!msr_info->host_initiated) { | 2190 | if (!msr_info->host_initiated) { |
2190 | s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; | 2191 | s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; |
2191 | adjust_tsc_offset_guest(vcpu, adj); | 2192 | adjust_tsc_offset_guest(vcpu, adj); |
@@ -2307,12 +2308,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2307 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); | 2308 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); |
2308 | break; | 2309 | break; |
2309 | case MSR_AMD64_OSVW_ID_LENGTH: | 2310 | case MSR_AMD64_OSVW_ID_LENGTH: |
2310 | if (!guest_cpuid_has_osvw(vcpu)) | 2311 | if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) |
2311 | return 1; | 2312 | return 1; |
2312 | vcpu->arch.osvw.length = data; | 2313 | vcpu->arch.osvw.length = data; |
2313 | break; | 2314 | break; |
2314 | case MSR_AMD64_OSVW_STATUS: | 2315 | case MSR_AMD64_OSVW_STATUS: |
2315 | if (!guest_cpuid_has_osvw(vcpu)) | 2316 | if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) |
2316 | return 1; | 2317 | return 1; |
2317 | vcpu->arch.osvw.status = data; | 2318 | vcpu->arch.osvw.status = data; |
2318 | break; | 2319 | break; |
@@ -2537,12 +2538,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2537 | msr_info->data = 0xbe702111; | 2538 | msr_info->data = 0xbe702111; |
2538 | break; | 2539 | break; |
2539 | case MSR_AMD64_OSVW_ID_LENGTH: | 2540 | case MSR_AMD64_OSVW_ID_LENGTH: |
2540 | if (!guest_cpuid_has_osvw(vcpu)) | 2541 | if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) |
2541 | return 1; | 2542 | return 1; |
2542 | msr_info->data = vcpu->arch.osvw.length; | 2543 | msr_info->data = vcpu->arch.osvw.length; |
2543 | break; | 2544 | break; |
2544 | case MSR_AMD64_OSVW_STATUS: | 2545 | case MSR_AMD64_OSVW_STATUS: |
2545 | if (!guest_cpuid_has_osvw(vcpu)) | 2546 | if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) |
2546 | return 1; | 2547 | return 1; |
2547 | msr_info->data = vcpu->arch.osvw.status; | 2548 | msr_info->data = vcpu->arch.osvw.status; |
2548 | break; | 2549 | break; |
@@ -6606,7 +6607,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |||
6606 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); | 6607 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); |
6607 | vcpu->arch.hflags |= HF_SMM_MASK; | 6608 | vcpu->arch.hflags |= HF_SMM_MASK; |
6608 | memset(buf, 0, 512); | 6609 | memset(buf, 0, 512); |
6609 | if (guest_cpuid_has_longmode(vcpu)) | 6610 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
6610 | enter_smm_save_state_64(vcpu, buf); | 6611 | enter_smm_save_state_64(vcpu, buf); |
6611 | else | 6612 | else |
6612 | enter_smm_save_state_32(vcpu, buf); | 6613 | enter_smm_save_state_32(vcpu, buf); |
@@ -6658,7 +6659,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |||
6658 | kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); | 6659 | kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); |
6659 | kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); | 6660 | kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); |
6660 | 6661 | ||
6661 | if (guest_cpuid_has_longmode(vcpu)) | 6662 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
6662 | kvm_x86_ops->set_efer(vcpu, 0); | 6663 | kvm_x86_ops->set_efer(vcpu, 0); |
6663 | 6664 | ||
6664 | kvm_update_cpuid(vcpu); | 6665 | kvm_update_cpuid(vcpu); |
@@ -7424,7 +7425,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
7424 | int pending_vec, max_bits, idx; | 7425 | int pending_vec, max_bits, idx; |
7425 | struct desc_ptr dt; | 7426 | struct desc_ptr dt; |
7426 | 7427 | ||
7427 | if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) | 7428 | if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
7429 | (sregs->cr4 & X86_CR4_OSXSAVE)) | ||
7428 | return -EINVAL; | 7430 | return -EINVAL; |
7429 | 7431 | ||
7430 | dt.size = sregs->idt.limit; | 7432 | dt.size = sregs->idt.limit; |