aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c625
-rw-r--r--arch/x86/kvm/cpuid.h46
-rw-r--r--arch/x86/kvm/lapic.c1
-rw-r--r--arch/x86/kvm/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c634
-rw-r--r--arch/x86/kvm/x86.h5
7 files changed, 679 insertions, 635 deletions
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index f15501f431c8..161b76ae87c4 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
12kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o) 12kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
13 13
14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ 14kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
15 i8254.o timer.o 15 i8254.o timer.o cpuid.o
16kvm-intel-y += vmx.o 16kvm-intel-y += vmx.o
17kvm-amd-y += svm.o 17kvm-amd-y += svm.o
18 18
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
new file mode 100644
index 000000000000..0a332ec5203c
--- /dev/null
+++ b/arch/x86/kvm/cpuid.c
@@ -0,0 +1,625 @@
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <asm/user.h>
18#include <asm/xsave.h>
19#include "cpuid.h"
20#include "lapic.h"
21#include "mmu.h"
22#include "trace.h"
23
24void kvm_update_cpuid(struct kvm_vcpu *vcpu)
25{
26 struct kvm_cpuid_entry2 *best;
27 struct kvm_lapic *apic = vcpu->arch.apic;
28
29 best = kvm_find_cpuid_entry(vcpu, 1, 0);
30 if (!best)
31 return;
32
33 /* Update OSXSAVE bit */
34 if (cpu_has_xsave && best->function == 0x1) {
35 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
36 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
37 best->ecx |= bit(X86_FEATURE_OSXSAVE);
38 }
39
40 if (apic) {
41 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
42 apic->lapic_timer.timer_mode_mask = 3 << 17;
43 else
44 apic->lapic_timer.timer_mode_mask = 1 << 17;
45 }
46}
47
48static int is_efer_nx(void)
49{
50 unsigned long long efer = 0;
51
52 rdmsrl_safe(MSR_EFER, &efer);
53 return efer & EFER_NX;
54}
55
56static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
57{
58 int i;
59 struct kvm_cpuid_entry2 *e, *entry;
60
61 entry = NULL;
62 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
63 e = &vcpu->arch.cpuid_entries[i];
64 if (e->function == 0x80000001) {
65 entry = e;
66 break;
67 }
68 }
69 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
70 entry->edx &= ~(1 << 20);
71 printk(KERN_INFO "kvm: guest NX capability removed\n");
72 }
73}
74
75/* when an old userspace process fills a new kernel module */
76int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
77 struct kvm_cpuid *cpuid,
78 struct kvm_cpuid_entry __user *entries)
79{
80 int r, i;
81 struct kvm_cpuid_entry *cpuid_entries;
82
83 r = -E2BIG;
84 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
85 goto out;
86 r = -ENOMEM;
87 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
88 if (!cpuid_entries)
89 goto out;
90 r = -EFAULT;
91 if (copy_from_user(cpuid_entries, entries,
92 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
93 goto out_free;
94 for (i = 0; i < cpuid->nent; i++) {
95 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
96 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
97 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
98 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
99 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
100 vcpu->arch.cpuid_entries[i].index = 0;
101 vcpu->arch.cpuid_entries[i].flags = 0;
102 vcpu->arch.cpuid_entries[i].padding[0] = 0;
103 vcpu->arch.cpuid_entries[i].padding[1] = 0;
104 vcpu->arch.cpuid_entries[i].padding[2] = 0;
105 }
106 vcpu->arch.cpuid_nent = cpuid->nent;
107 cpuid_fix_nx_cap(vcpu);
108 r = 0;
109 kvm_apic_set_version(vcpu);
110 kvm_x86_ops->cpuid_update(vcpu);
111 kvm_update_cpuid(vcpu);
112
113out_free:
114 vfree(cpuid_entries);
115out:
116 return r;
117}
118
119int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
120 struct kvm_cpuid2 *cpuid,
121 struct kvm_cpuid_entry2 __user *entries)
122{
123 int r;
124
125 r = -E2BIG;
126 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
127 goto out;
128 r = -EFAULT;
129 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
130 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
131 goto out;
132 vcpu->arch.cpuid_nent = cpuid->nent;
133 kvm_apic_set_version(vcpu);
134 kvm_x86_ops->cpuid_update(vcpu);
135 kvm_update_cpuid(vcpu);
136 return 0;
137
138out:
139 return r;
140}
141
142int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
143 struct kvm_cpuid2 *cpuid,
144 struct kvm_cpuid_entry2 __user *entries)
145{
146 int r;
147
148 r = -E2BIG;
149 if (cpuid->nent < vcpu->arch.cpuid_nent)
150 goto out;
151 r = -EFAULT;
152 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
153 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
154 goto out;
155 return 0;
156
157out:
158 cpuid->nent = vcpu->arch.cpuid_nent;
159 return r;
160}
161
162static void cpuid_mask(u32 *word, int wordnum)
163{
164 *word &= boot_cpu_data.x86_capability[wordnum];
165}
166
167static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
168 u32 index)
169{
170 entry->function = function;
171 entry->index = index;
172 cpuid_count(entry->function, entry->index,
173 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
174 entry->flags = 0;
175}
176
177static bool supported_xcr0_bit(unsigned bit)
178{
179 u64 mask = ((u64)1 << bit);
180
181 return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
182}
183
184#define F(x) bit(X86_FEATURE_##x)
185
186static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
187 u32 index, int *nent, int maxnent)
188{
189 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
190#ifdef CONFIG_X86_64
191 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
192 ? F(GBPAGES) : 0;
193 unsigned f_lm = F(LM);
194#else
195 unsigned f_gbpages = 0;
196 unsigned f_lm = 0;
197#endif
198 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
199
200 /* cpuid 1.edx */
201 const u32 kvm_supported_word0_x86_features =
202 F(FPU) | F(VME) | F(DE) | F(PSE) |
203 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
204 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
205 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
206 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
207 0 /* Reserved, DS, ACPI */ | F(MMX) |
208 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
209 0 /* HTT, TM, Reserved, PBE */;
210 /* cpuid 0x80000001.edx */
211 const u32 kvm_supported_word1_x86_features =
212 F(FPU) | F(VME) | F(DE) | F(PSE) |
213 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
214 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
215 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
216 F(PAT) | F(PSE36) | 0 /* Reserved */ |
217 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
218 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
219 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
220 /* cpuid 1.ecx */
221 const u32 kvm_supported_word4_x86_features =
222 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
223 0 /* DS-CPL, VMX, SMX, EST */ |
224 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
225 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
226 0 /* Reserved, DCA */ | F(XMM4_1) |
227 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
228 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
229 F(F16C) | F(RDRAND);
230 /* cpuid 0x80000001.ecx */
231 const u32 kvm_supported_word6_x86_features =
232 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
233 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
234 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
235 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
236
237 /* cpuid 0xC0000001.edx */
238 const u32 kvm_supported_word5_x86_features =
239 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
240 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
241 F(PMM) | F(PMM_EN);
242
243 /* cpuid 7.0.ebx */
244 const u32 kvm_supported_word9_x86_features =
245 F(SMEP) | F(FSGSBASE) | F(ERMS);
246
247 /* all calls to cpuid_count() should be made on the same cpu */
248 get_cpu();
249 do_cpuid_1_ent(entry, function, index);
250 ++*nent;
251
252 switch (function) {
253 case 0:
254 entry->eax = min(entry->eax, (u32)0xd);
255 break;
256 case 1:
257 entry->edx &= kvm_supported_word0_x86_features;
258 cpuid_mask(&entry->edx, 0);
259 entry->ecx &= kvm_supported_word4_x86_features;
260 cpuid_mask(&entry->ecx, 4);
261 /* we support x2apic emulation even if host does not support
262 * it since we emulate x2apic in software */
263 entry->ecx |= F(X2APIC);
264 break;
265 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
266 * may return different values. This forces us to get_cpu() before
267 * issuing the first command, and also to emulate this annoying behavior
268 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
269 case 2: {
270 int t, times = entry->eax & 0xff;
271
272 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
273 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
274 for (t = 1; t < times && *nent < maxnent; ++t) {
275 do_cpuid_1_ent(&entry[t], function, 0);
276 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
277 ++*nent;
278 }
279 break;
280 }
281 /* function 4 has additional index. */
282 case 4: {
283 int i, cache_type;
284
285 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
286 /* read more entries until cache_type is zero */
287 for (i = 1; *nent < maxnent; ++i) {
288 cache_type = entry[i - 1].eax & 0x1f;
289 if (!cache_type)
290 break;
291 do_cpuid_1_ent(&entry[i], function, i);
292 entry[i].flags |=
293 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
294 ++*nent;
295 }
296 break;
297 }
298 case 7: {
299 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
300 /* Mask ebx against host capbability word 9 */
301 if (index == 0) {
302 entry->ebx &= kvm_supported_word9_x86_features;
303 cpuid_mask(&entry->ebx, 9);
304 } else
305 entry->ebx = 0;
306 entry->eax = 0;
307 entry->ecx = 0;
308 entry->edx = 0;
309 break;
310 }
311 case 9:
312 break;
313 /* function 0xb has additional index. */
314 case 0xb: {
315 int i, level_type;
316
317 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
318 /* read more entries until level_type is zero */
319 for (i = 1; *nent < maxnent; ++i) {
320 level_type = entry[i - 1].ecx & 0xff00;
321 if (!level_type)
322 break;
323 do_cpuid_1_ent(&entry[i], function, i);
324 entry[i].flags |=
325 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
326 ++*nent;
327 }
328 break;
329 }
330 case 0xd: {
331 int idx, i;
332
333 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
334 for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
335 do_cpuid_1_ent(&entry[i], function, idx);
336 if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
337 continue;
338 entry[i].flags |=
339 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
340 ++*nent;
341 ++i;
342 }
343 break;
344 }
345 case KVM_CPUID_SIGNATURE: {
346 char signature[12] = "KVMKVMKVM\0\0";
347 u32 *sigptr = (u32 *)signature;
348 entry->eax = 0;
349 entry->ebx = sigptr[0];
350 entry->ecx = sigptr[1];
351 entry->edx = sigptr[2];
352 break;
353 }
354 case KVM_CPUID_FEATURES:
355 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
356 (1 << KVM_FEATURE_NOP_IO_DELAY) |
357 (1 << KVM_FEATURE_CLOCKSOURCE2) |
358 (1 << KVM_FEATURE_ASYNC_PF) |
359 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
360
361 if (sched_info_on())
362 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
363
364 entry->ebx = 0;
365 entry->ecx = 0;
366 entry->edx = 0;
367 break;
368 case 0x80000000:
369 entry->eax = min(entry->eax, 0x8000001a);
370 break;
371 case 0x80000001:
372 entry->edx &= kvm_supported_word1_x86_features;
373 cpuid_mask(&entry->edx, 1);
374 entry->ecx &= kvm_supported_word6_x86_features;
375 cpuid_mask(&entry->ecx, 6);
376 break;
377 case 0x80000008: {
378 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
379 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
380 unsigned phys_as = entry->eax & 0xff;
381
382 if (!g_phys_as)
383 g_phys_as = phys_as;
384 entry->eax = g_phys_as | (virt_as << 8);
385 entry->ebx = entry->edx = 0;
386 break;
387 }
388 case 0x80000019:
389 entry->ecx = entry->edx = 0;
390 break;
391 case 0x8000001a:
392 break;
393 case 0x8000001d:
394 break;
395 /*Add support for Centaur's CPUID instruction*/
396 case 0xC0000000:
397 /*Just support up to 0xC0000004 now*/
398 entry->eax = min(entry->eax, 0xC0000004);
399 break;
400 case 0xC0000001:
401 entry->edx &= kvm_supported_word5_x86_features;
402 cpuid_mask(&entry->edx, 5);
403 break;
404 case 3: /* Processor serial number */
405 case 5: /* MONITOR/MWAIT */
406 case 6: /* Thermal management */
407 case 0xA: /* Architectural Performance Monitoring */
408 case 0x80000007: /* Advanced power management */
409 case 0xC0000002:
410 case 0xC0000003:
411 case 0xC0000004:
412 default:
413 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
414 break;
415 }
416
417 kvm_x86_ops->set_supported_cpuid(function, entry);
418
419 put_cpu();
420}
421
422#undef F
423
424int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
425 struct kvm_cpuid_entry2 __user *entries)
426{
427 struct kvm_cpuid_entry2 *cpuid_entries;
428 int limit, nent = 0, r = -E2BIG;
429 u32 func;
430
431 if (cpuid->nent < 1)
432 goto out;
433 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
434 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
435 r = -ENOMEM;
436 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
437 if (!cpuid_entries)
438 goto out;
439
440 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
441 limit = cpuid_entries[0].eax;
442 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
443 do_cpuid_ent(&cpuid_entries[nent], func, 0,
444 &nent, cpuid->nent);
445 r = -E2BIG;
446 if (nent >= cpuid->nent)
447 goto out_free;
448
449 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
450 limit = cpuid_entries[nent - 1].eax;
451 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
452 do_cpuid_ent(&cpuid_entries[nent], func, 0,
453 &nent, cpuid->nent);
454
455
456
457 r = -E2BIG;
458 if (nent >= cpuid->nent)
459 goto out_free;
460
461 /* Add support for Centaur's CPUID instruction. */
462 if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
463 do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
464 &nent, cpuid->nent);
465
466 r = -E2BIG;
467 if (nent >= cpuid->nent)
468 goto out_free;
469
470 limit = cpuid_entries[nent - 1].eax;
471 for (func = 0xC0000001;
472 func <= limit && nent < cpuid->nent; ++func)
473 do_cpuid_ent(&cpuid_entries[nent], func, 0,
474 &nent, cpuid->nent);
475
476 r = -E2BIG;
477 if (nent >= cpuid->nent)
478 goto out_free;
479 }
480
481 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
482 cpuid->nent);
483
484 r = -E2BIG;
485 if (nent >= cpuid->nent)
486 goto out_free;
487
488 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
489 cpuid->nent);
490
491 r = -E2BIG;
492 if (nent >= cpuid->nent)
493 goto out_free;
494
495 r = -EFAULT;
496 if (copy_to_user(entries, cpuid_entries,
497 nent * sizeof(struct kvm_cpuid_entry2)))
498 goto out_free;
499 cpuid->nent = nent;
500 r = 0;
501
502out_free:
503 vfree(cpuid_entries);
504out:
505 return r;
506}
507
508static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
509{
510 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
511 int j, nent = vcpu->arch.cpuid_nent;
512
513 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
514 /* when no next entry is found, the current entry[i] is reselected */
515 for (j = i + 1; ; j = (j + 1) % nent) {
516 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
517 if (ej->function == e->function) {
518 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
519 return j;
520 }
521 }
522 return 0; /* silence gcc, even though control never reaches here */
523}
524
525/* find an entry with matching function, matching index (if needed), and that
526 * should be read next (if it's stateful) */
527static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
528 u32 function, u32 index)
529{
530 if (e->function != function)
531 return 0;
532 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
533 return 0;
534 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
535 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
536 return 0;
537 return 1;
538}
539
540struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
541 u32 function, u32 index)
542{
543 int i;
544 struct kvm_cpuid_entry2 *best = NULL;
545
546 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
547 struct kvm_cpuid_entry2 *e;
548
549 e = &vcpu->arch.cpuid_entries[i];
550 if (is_matching_cpuid_entry(e, function, index)) {
551 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
552 move_to_next_stateful_cpuid_entry(vcpu, i);
553 best = e;
554 break;
555 }
556 }
557 return best;
558}
559EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
560
561int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
562{
563 struct kvm_cpuid_entry2 *best;
564
565 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
566 if (!best || best->eax < 0x80000008)
567 goto not_found;
568 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
569 if (best)
570 return best->eax & 0xff;
571not_found:
572 return 36;
573}
574
575/*
576 * If no match is found, check whether we exceed the vCPU's limit
577 * and return the content of the highest valid _standard_ leaf instead.
578 * This is to satisfy the CPUID specification.
579 */
580static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
581 u32 function, u32 index)
582{
583 struct kvm_cpuid_entry2 *maxlevel;
584
585 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
586 if (!maxlevel || maxlevel->eax >= function)
587 return NULL;
588 if (function & 0x80000000) {
589 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
590 if (!maxlevel)
591 return NULL;
592 }
593 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
594}
595
596void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
597{
598 u32 function, index;
599 struct kvm_cpuid_entry2 *best;
600
601 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
602 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
603 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
604 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
605 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
606 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
607 best = kvm_find_cpuid_entry(vcpu, function, index);
608
609 if (!best)
610 best = check_cpuid_limit(vcpu, function, index);
611
612 if (best) {
613 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
614 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
615 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
616 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
617 }
618 kvm_x86_ops->skip_emulated_instruction(vcpu);
619 trace_kvm_cpuid(function,
620 kvm_register_read(vcpu, VCPU_REGS_RAX),
621 kvm_register_read(vcpu, VCPU_REGS_RBX),
622 kvm_register_read(vcpu, VCPU_REGS_RCX),
623 kvm_register_read(vcpu, VCPU_REGS_RDX));
624}
625EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
new file mode 100644
index 000000000000..5b97e1797a6d
--- /dev/null
+++ b/arch/x86/kvm/cpuid.h
@@ -0,0 +1,46 @@
1#ifndef ARCH_X86_KVM_CPUID_H
2#define ARCH_X86_KVM_CPUID_H
3
4#include "x86.h"
5
6void kvm_update_cpuid(struct kvm_vcpu *vcpu);
7struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
8 u32 function, u32 index);
9int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
10 struct kvm_cpuid_entry2 __user *entries);
11int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
12 struct kvm_cpuid *cpuid,
13 struct kvm_cpuid_entry __user *entries);
14int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
15 struct kvm_cpuid2 *cpuid,
16 struct kvm_cpuid_entry2 __user *entries);
17int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18 struct kvm_cpuid2 *cpuid,
19 struct kvm_cpuid_entry2 __user *entries);
20
21
22static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
23{
24 struct kvm_cpuid_entry2 *best;
25
26 best = kvm_find_cpuid_entry(vcpu, 1, 0);
27 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
28}
29
30static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
31{
32 struct kvm_cpuid_entry2 *best;
33
34 best = kvm_find_cpuid_entry(vcpu, 7, 0);
35 return best && (best->ebx & bit(X86_FEATURE_SMEP));
36}
37
38static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
39{
40 struct kvm_cpuid_entry2 *best;
41
42 best = kvm_find_cpuid_entry(vcpu, 7, 0);
43 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
44}
45
46#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 54abb40199d6..a7f3e655cd3e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -38,6 +38,7 @@
38#include "irq.h" 38#include "irq.h"
39#include "trace.h" 39#include "trace.h"
40#include "x86.h" 40#include "x86.h"
41#include "cpuid.h"
41 42
42#ifndef CONFIG_X86_64 43#ifndef CONFIG_X86_64
43#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 44#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8f19d91ec3e7..4ceced2669ef 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -18,6 +18,7 @@
18 18
19#include "irq.h" 19#include "irq.h"
20#include "mmu.h" 20#include "mmu.h"
21#include "cpuid.h"
21 22
22#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
23#include <linux/module.h> 24#include <linux/module.h>
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b6776c613e6d..4e533d24c513 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -26,6 +26,7 @@
26#include "tss.h" 26#include "tss.h"
27#include "kvm_cache_regs.h" 27#include "kvm_cache_regs.h"
28#include "x86.h" 28#include "x86.h"
29#include "cpuid.h"
29 30
30#include <linux/clocksource.h> 31#include <linux/clocksource.h>
31#include <linux/interrupt.h> 32#include <linux/interrupt.h>
@@ -82,8 +83,6 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
82#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 83#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
83 84
84static void update_cr8_intercept(struct kvm_vcpu *vcpu); 85static void update_cr8_intercept(struct kvm_vcpu *vcpu);
85static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
86 struct kvm_cpuid_entry2 __user *entries);
87static void process_nmi(struct kvm_vcpu *vcpu); 86static void process_nmi(struct kvm_vcpu *vcpu);
88 87
89struct kvm_x86_ops *kvm_x86_ops; 88struct kvm_x86_ops *kvm_x86_ops;
@@ -574,54 +573,6 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
574} 573}
575EXPORT_SYMBOL_GPL(kvm_set_xcr); 574EXPORT_SYMBOL_GPL(kvm_set_xcr);
576 575
577static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
578{
579 struct kvm_cpuid_entry2 *best;
580
581 best = kvm_find_cpuid_entry(vcpu, 1, 0);
582 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
583}
584
585static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
586{
587 struct kvm_cpuid_entry2 *best;
588
589 best = kvm_find_cpuid_entry(vcpu, 7, 0);
590 return best && (best->ebx & bit(X86_FEATURE_SMEP));
591}
592
593static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
594{
595 struct kvm_cpuid_entry2 *best;
596
597 best = kvm_find_cpuid_entry(vcpu, 7, 0);
598 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
599}
600
601static void update_cpuid(struct kvm_vcpu *vcpu)
602{
603 struct kvm_cpuid_entry2 *best;
604 struct kvm_lapic *apic = vcpu->arch.apic;
605
606 best = kvm_find_cpuid_entry(vcpu, 1, 0);
607 if (!best)
608 return;
609
610 /* Update OSXSAVE bit */
611 if (cpu_has_xsave && best->function == 0x1) {
612 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
613 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
614 best->ecx |= bit(X86_FEATURE_OSXSAVE);
615 }
616
617 if (apic) {
618 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
619 apic->lapic_timer.timer_mode_mask = 3 << 17;
620 else
621 apic->lapic_timer.timer_mode_mask = 1 << 17;
622 }
623}
624
625int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 576int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
626{ 577{
627 unsigned long old_cr4 = kvm_read_cr4(vcpu); 578 unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -655,7 +606,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
655 kvm_mmu_reset_context(vcpu); 606 kvm_mmu_reset_context(vcpu);
656 607
657 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 608 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
658 update_cpuid(vcpu); 609 kvm_update_cpuid(vcpu);
659 610
660 return 0; 611 return 0;
661} 612}
@@ -2265,466 +2216,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2265 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu); 2216 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
2266} 2217}
2267 2218
2268static int is_efer_nx(void)
2269{
2270 unsigned long long efer = 0;
2271
2272 rdmsrl_safe(MSR_EFER, &efer);
2273 return efer & EFER_NX;
2274}
2275
2276static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2277{
2278 int i;
2279 struct kvm_cpuid_entry2 *e, *entry;
2280
2281 entry = NULL;
2282 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2283 e = &vcpu->arch.cpuid_entries[i];
2284 if (e->function == 0x80000001) {
2285 entry = e;
2286 break;
2287 }
2288 }
2289 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
2290 entry->edx &= ~(1 << 20);
2291 printk(KERN_INFO "kvm: guest NX capability removed\n");
2292 }
2293}
2294
2295/* when an old userspace process fills a new kernel module */
2296static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2297 struct kvm_cpuid *cpuid,
2298 struct kvm_cpuid_entry __user *entries)
2299{
2300 int r, i;
2301 struct kvm_cpuid_entry *cpuid_entries;
2302
2303 r = -E2BIG;
2304 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2305 goto out;
2306 r = -ENOMEM;
2307 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
2308 if (!cpuid_entries)
2309 goto out;
2310 r = -EFAULT;
2311 if (copy_from_user(cpuid_entries, entries,
2312 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2313 goto out_free;
2314 for (i = 0; i < cpuid->nent; i++) {
2315 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
2316 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
2317 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
2318 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
2319 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
2320 vcpu->arch.cpuid_entries[i].index = 0;
2321 vcpu->arch.cpuid_entries[i].flags = 0;
2322 vcpu->arch.cpuid_entries[i].padding[0] = 0;
2323 vcpu->arch.cpuid_entries[i].padding[1] = 0;
2324 vcpu->arch.cpuid_entries[i].padding[2] = 0;
2325 }
2326 vcpu->arch.cpuid_nent = cpuid->nent;
2327 cpuid_fix_nx_cap(vcpu);
2328 r = 0;
2329 kvm_apic_set_version(vcpu);
2330 kvm_x86_ops->cpuid_update(vcpu);
2331 update_cpuid(vcpu);
2332
2333out_free:
2334 vfree(cpuid_entries);
2335out:
2336 return r;
2337}
2338
2339static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
2340 struct kvm_cpuid2 *cpuid,
2341 struct kvm_cpuid_entry2 __user *entries)
2342{
2343 int r;
2344
2345 r = -E2BIG;
2346 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2347 goto out;
2348 r = -EFAULT;
2349 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
2350 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
2351 goto out;
2352 vcpu->arch.cpuid_nent = cpuid->nent;
2353 kvm_apic_set_version(vcpu);
2354 kvm_x86_ops->cpuid_update(vcpu);
2355 update_cpuid(vcpu);
2356 return 0;
2357
2358out:
2359 return r;
2360}
2361
2362static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
2363 struct kvm_cpuid2 *cpuid,
2364 struct kvm_cpuid_entry2 __user *entries)
2365{
2366 int r;
2367
2368 r = -E2BIG;
2369 if (cpuid->nent < vcpu->arch.cpuid_nent)
2370 goto out;
2371 r = -EFAULT;
2372 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
2373 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
2374 goto out;
2375 return 0;
2376
2377out:
2378 cpuid->nent = vcpu->arch.cpuid_nent;
2379 return r;
2380}
2381
2382static void cpuid_mask(u32 *word, int wordnum)
2383{
2384 *word &= boot_cpu_data.x86_capability[wordnum];
2385}
2386
2387static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2388 u32 index)
2389{
2390 entry->function = function;
2391 entry->index = index;
2392 cpuid_count(entry->function, entry->index,
2393 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
2394 entry->flags = 0;
2395}
2396
2397static bool supported_xcr0_bit(unsigned bit)
2398{
2399 u64 mask = ((u64)1 << bit);
2400
2401 return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
2402}
2403
2404#define F(x) bit(X86_FEATURE_##x)
2405
2406static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2407 u32 index, int *nent, int maxnent)
2408{
2409 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
2410#ifdef CONFIG_X86_64
2411 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
2412 ? F(GBPAGES) : 0;
2413 unsigned f_lm = F(LM);
2414#else
2415 unsigned f_gbpages = 0;
2416 unsigned f_lm = 0;
2417#endif
2418 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
2419
2420 /* cpuid 1.edx */
2421 const u32 kvm_supported_word0_x86_features =
2422 F(FPU) | F(VME) | F(DE) | F(PSE) |
2423 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2424 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
2425 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2426 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
2427 0 /* Reserved, DS, ACPI */ | F(MMX) |
2428 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
2429 0 /* HTT, TM, Reserved, PBE */;
2430 /* cpuid 0x80000001.edx */
2431 const u32 kvm_supported_word1_x86_features =
2432 F(FPU) | F(VME) | F(DE) | F(PSE) |
2433 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2434 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
2435 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2436 F(PAT) | F(PSE36) | 0 /* Reserved */ |
2437 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
2438 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
2439 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
2440 /* cpuid 1.ecx */
2441 const u32 kvm_supported_word4_x86_features =
2442 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
2443 0 /* DS-CPL, VMX, SMX, EST */ |
2444 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
2445 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
2446 0 /* Reserved, DCA */ | F(XMM4_1) |
2447 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
2448 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
2449 F(F16C) | F(RDRAND);
2450 /* cpuid 0x80000001.ecx */
2451 const u32 kvm_supported_word6_x86_features =
2452 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2453 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2454 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2455 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
2456
2457 /* cpuid 0xC0000001.edx */
2458 const u32 kvm_supported_word5_x86_features =
2459 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
2460 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
2461 F(PMM) | F(PMM_EN);
2462
2463 /* cpuid 7.0.ebx */
2464 const u32 kvm_supported_word9_x86_features =
2465 F(SMEP) | F(FSGSBASE) | F(ERMS);
2466
2467 /* all calls to cpuid_count() should be made on the same cpu */
2468 get_cpu();
2469 do_cpuid_1_ent(entry, function, index);
2470 ++*nent;
2471
2472 switch (function) {
2473 case 0:
2474 entry->eax = min(entry->eax, (u32)0xd);
2475 break;
2476 case 1:
2477 entry->edx &= kvm_supported_word0_x86_features;
2478 cpuid_mask(&entry->edx, 0);
2479 entry->ecx &= kvm_supported_word4_x86_features;
2480 cpuid_mask(&entry->ecx, 4);
2481 /* we support x2apic emulation even if host does not support
2482 * it since we emulate x2apic in software */
2483 entry->ecx |= F(X2APIC);
2484 break;
2485 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2486 * may return different values. This forces us to get_cpu() before
2487 * issuing the first command, and also to emulate this annoying behavior
2488 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2489 case 2: {
2490 int t, times = entry->eax & 0xff;
2491
2492 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2493 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2494 for (t = 1; t < times && *nent < maxnent; ++t) {
2495 do_cpuid_1_ent(&entry[t], function, 0);
2496 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2497 ++*nent;
2498 }
2499 break;
2500 }
2501 /* function 4 has additional index. */
2502 case 4: {
2503 int i, cache_type;
2504
2505 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2506 /* read more entries until cache_type is zero */
2507 for (i = 1; *nent < maxnent; ++i) {
2508 cache_type = entry[i - 1].eax & 0x1f;
2509 if (!cache_type)
2510 break;
2511 do_cpuid_1_ent(&entry[i], function, i);
2512 entry[i].flags |=
2513 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2514 ++*nent;
2515 }
2516 break;
2517 }
2518 case 7: {
2519 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2520 /* Mask ebx against host capbability word 9 */
2521 if (index == 0) {
2522 entry->ebx &= kvm_supported_word9_x86_features;
2523 cpuid_mask(&entry->ebx, 9);
2524 } else
2525 entry->ebx = 0;
2526 entry->eax = 0;
2527 entry->ecx = 0;
2528 entry->edx = 0;
2529 break;
2530 }
2531 case 9:
2532 break;
2533 /* function 0xb has additional index. */
2534 case 0xb: {
2535 int i, level_type;
2536
2537 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2538 /* read more entries until level_type is zero */
2539 for (i = 1; *nent < maxnent; ++i) {
2540 level_type = entry[i - 1].ecx & 0xff00;
2541 if (!level_type)
2542 break;
2543 do_cpuid_1_ent(&entry[i], function, i);
2544 entry[i].flags |=
2545 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2546 ++*nent;
2547 }
2548 break;
2549 }
2550 case 0xd: {
2551 int idx, i;
2552
2553 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2554 for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
2555 do_cpuid_1_ent(&entry[i], function, idx);
2556 if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
2557 continue;
2558 entry[i].flags |=
2559 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2560 ++*nent;
2561 ++i;
2562 }
2563 break;
2564 }
2565 case KVM_CPUID_SIGNATURE: {
2566 char signature[12] = "KVMKVMKVM\0\0";
2567 u32 *sigptr = (u32 *)signature;
2568 entry->eax = 0;
2569 entry->ebx = sigptr[0];
2570 entry->ecx = sigptr[1];
2571 entry->edx = sigptr[2];
2572 break;
2573 }
2574 case KVM_CPUID_FEATURES:
2575 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2576 (1 << KVM_FEATURE_NOP_IO_DELAY) |
2577 (1 << KVM_FEATURE_CLOCKSOURCE2) |
2578 (1 << KVM_FEATURE_ASYNC_PF) |
2579 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2580
2581 if (sched_info_on())
2582 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
2583
2584 entry->ebx = 0;
2585 entry->ecx = 0;
2586 entry->edx = 0;
2587 break;
2588 case 0x80000000:
2589 entry->eax = min(entry->eax, 0x8000001a);
2590 break;
2591 case 0x80000001:
2592 entry->edx &= kvm_supported_word1_x86_features;
2593 cpuid_mask(&entry->edx, 1);
2594 entry->ecx &= kvm_supported_word6_x86_features;
2595 cpuid_mask(&entry->ecx, 6);
2596 break;
2597 case 0x80000008: {
2598 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
2599 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
2600 unsigned phys_as = entry->eax & 0xff;
2601
2602 if (!g_phys_as)
2603 g_phys_as = phys_as;
2604 entry->eax = g_phys_as | (virt_as << 8);
2605 entry->ebx = entry->edx = 0;
2606 break;
2607 }
2608 case 0x80000019:
2609 entry->ecx = entry->edx = 0;
2610 break;
2611 case 0x8000001a:
2612 break;
2613 case 0x8000001d:
2614 break;
2615 /*Add support for Centaur's CPUID instruction*/
2616 case 0xC0000000:
2617 /*Just support up to 0xC0000004 now*/
2618 entry->eax = min(entry->eax, 0xC0000004);
2619 break;
2620 case 0xC0000001:
2621 entry->edx &= kvm_supported_word5_x86_features;
2622 cpuid_mask(&entry->edx, 5);
2623 break;
2624 case 3: /* Processor serial number */
2625 case 5: /* MONITOR/MWAIT */
2626 case 6: /* Thermal management */
2627 case 0xA: /* Architectural Performance Monitoring */
2628 case 0x80000007: /* Advanced power management */
2629 case 0xC0000002:
2630 case 0xC0000003:
2631 case 0xC0000004:
2632 default:
2633 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
2634 break;
2635 }
2636
2637 kvm_x86_ops->set_supported_cpuid(function, entry);
2638
2639 put_cpu();
2640}
2641
2642#undef F
2643
2644static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2645 struct kvm_cpuid_entry2 __user *entries)
2646{
2647 struct kvm_cpuid_entry2 *cpuid_entries;
2648 int limit, nent = 0, r = -E2BIG;
2649 u32 func;
2650
2651 if (cpuid->nent < 1)
2652 goto out;
2653 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2654 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2655 r = -ENOMEM;
2656 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2657 if (!cpuid_entries)
2658 goto out;
2659
2660 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2661 limit = cpuid_entries[0].eax;
2662 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2663 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2664 &nent, cpuid->nent);
2665 r = -E2BIG;
2666 if (nent >= cpuid->nent)
2667 goto out_free;
2668
2669 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2670 limit = cpuid_entries[nent - 1].eax;
2671 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2672 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2673 &nent, cpuid->nent);
2674
2675
2676
2677 r = -E2BIG;
2678 if (nent >= cpuid->nent)
2679 goto out_free;
2680
2681 /* Add support for Centaur's CPUID instruction. */
2682 if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
2683 do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
2684 &nent, cpuid->nent);
2685
2686 r = -E2BIG;
2687 if (nent >= cpuid->nent)
2688 goto out_free;
2689
2690 limit = cpuid_entries[nent - 1].eax;
2691 for (func = 0xC0000001;
2692 func <= limit && nent < cpuid->nent; ++func)
2693 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2694 &nent, cpuid->nent);
2695
2696 r = -E2BIG;
2697 if (nent >= cpuid->nent)
2698 goto out_free;
2699 }
2700
2701 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2702 cpuid->nent);
2703
2704 r = -E2BIG;
2705 if (nent >= cpuid->nent)
2706 goto out_free;
2707
2708 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2709 cpuid->nent);
2710
2711 r = -E2BIG;
2712 if (nent >= cpuid->nent)
2713 goto out_free;
2714
2715 r = -EFAULT;
2716 if (copy_to_user(entries, cpuid_entries,
2717 nent * sizeof(struct kvm_cpuid_entry2)))
2718 goto out_free;
2719 cpuid->nent = nent;
2720 r = 0;
2721
2722out_free:
2723 vfree(cpuid_entries);
2724out:
2725 return r;
2726}
2727
2728static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2219static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2729 struct kvm_lapic_state *s) 2220 struct kvm_lapic_state *s)
2730{ 2221{
@@ -5438,125 +4929,6 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5438 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 4929 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5439} 4930}
5440 4931
5441static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
5442{
5443 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
5444 int j, nent = vcpu->arch.cpuid_nent;
5445
5446 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
5447 /* when no next entry is found, the current entry[i] is reselected */
5448 for (j = i + 1; ; j = (j + 1) % nent) {
5449 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
5450 if (ej->function == e->function) {
5451 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
5452 return j;
5453 }
5454 }
5455 return 0; /* silence gcc, even though control never reaches here */
5456}
5457
5458/* find an entry with matching function, matching index (if needed), and that
5459 * should be read next (if it's stateful) */
5460static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
5461 u32 function, u32 index)
5462{
5463 if (e->function != function)
5464 return 0;
5465 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
5466 return 0;
5467 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
5468 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
5469 return 0;
5470 return 1;
5471}
5472
5473struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
5474 u32 function, u32 index)
5475{
5476 int i;
5477 struct kvm_cpuid_entry2 *best = NULL;
5478
5479 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
5480 struct kvm_cpuid_entry2 *e;
5481
5482 e = &vcpu->arch.cpuid_entries[i];
5483 if (is_matching_cpuid_entry(e, function, index)) {
5484 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
5485 move_to_next_stateful_cpuid_entry(vcpu, i);
5486 best = e;
5487 break;
5488 }
5489 }
5490 return best;
5491}
5492EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
5493
5494int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
5495{
5496 struct kvm_cpuid_entry2 *best;
5497
5498 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
5499 if (!best || best->eax < 0x80000008)
5500 goto not_found;
5501 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
5502 if (best)
5503 return best->eax & 0xff;
5504not_found:
5505 return 36;
5506}
5507
5508/*
5509 * If no match is found, check whether we exceed the vCPU's limit
5510 * and return the content of the highest valid _standard_ leaf instead.
5511 * This is to satisfy the CPUID specification.
5512 */
5513static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
5514 u32 function, u32 index)
5515{
5516 struct kvm_cpuid_entry2 *maxlevel;
5517
5518 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
5519 if (!maxlevel || maxlevel->eax >= function)
5520 return NULL;
5521 if (function & 0x80000000) {
5522 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
5523 if (!maxlevel)
5524 return NULL;
5525 }
5526 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
5527}
5528
5529void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
5530{
5531 u32 function, index;
5532 struct kvm_cpuid_entry2 *best;
5533
5534 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
5535 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5536 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
5537 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
5538 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
5539 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
5540 best = kvm_find_cpuid_entry(vcpu, function, index);
5541
5542 if (!best)
5543 best = check_cpuid_limit(vcpu, function, index);
5544
5545 if (best) {
5546 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
5547 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
5548 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
5549 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
5550 }
5551 kvm_x86_ops->skip_emulated_instruction(vcpu);
5552 trace_kvm_cpuid(function,
5553 kvm_register_read(vcpu, VCPU_REGS_RAX),
5554 kvm_register_read(vcpu, VCPU_REGS_RBX),
5555 kvm_register_read(vcpu, VCPU_REGS_RCX),
5556 kvm_register_read(vcpu, VCPU_REGS_RDX));
5557}
5558EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
5559
5560/* 4932/*
5561 * Check if userspace requested an interrupt window, and that the 4933 * Check if userspace requested an interrupt window, and that the
5562 * interrupt window is open. 4934 * interrupt window is open.
@@ -6222,7 +5594,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6222 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5594 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
6223 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5595 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
6224 if (sregs->cr4 & X86_CR4_OSXSAVE) 5596 if (sregs->cr4 & X86_CR4_OSXSAVE)
6225 update_cpuid(vcpu); 5597 kvm_update_cpuid(vcpu);
6226 5598
6227 idx = srcu_read_lock(&vcpu->kvm->srcu); 5599 idx = srcu_read_lock(&vcpu->kvm->srcu);
6228 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5600 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index d36fe237c665..cb80c293cdd8 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
33 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 33 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
34} 34}
35 35
36struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
37 u32 function, u32 index);
38
39static inline bool is_protmode(struct kvm_vcpu *vcpu) 36static inline bool is_protmode(struct kvm_vcpu *vcpu)
40{ 37{
41 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 38 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
125 gva_t addr, void *val, unsigned int bytes, 122 gva_t addr, void *val, unsigned int bytes,
126 struct x86_exception *exception); 123 struct x86_exception *exception);
127 124
125extern u64 host_xcr0;
126
128#endif 127#endif