aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c35
1 files changed, 33 insertions, 2 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6dd802c6d780..0331cb389d68 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -417,7 +417,6 @@ void kvm_disable_steal_time(void)
417#ifdef CONFIG_SMP 417#ifdef CONFIG_SMP
418static void __init kvm_smp_prepare_boot_cpu(void) 418static void __init kvm_smp_prepare_boot_cpu(void)
419{ 419{
420 WARN_ON(kvm_register_clock("primary cpu clock"));
421 kvm_guest_cpu_init(); 420 kvm_guest_cpu_init();
422 native_smp_prepare_boot_cpu(); 421 native_smp_prepare_boot_cpu();
423 kvm_spinlock_init(); 422 kvm_spinlock_init();
@@ -500,6 +499,38 @@ void __init kvm_guest_init(void)
500#endif 499#endif
501} 500}
502 501
502static noinline uint32_t __kvm_cpuid_base(void)
503{
504 if (boot_cpu_data.cpuid_level < 0)
505 return 0; /* So we don't blow up on old processors */
506
507 if (cpu_has_hypervisor)
508 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
509
510 return 0;
511}
512
513static inline uint32_t kvm_cpuid_base(void)
514{
515 static int kvm_cpuid_base = -1;
516
517 if (kvm_cpuid_base == -1)
518 kvm_cpuid_base = __kvm_cpuid_base();
519
520 return kvm_cpuid_base;
521}
522
523bool kvm_para_available(void)
524{
525 return kvm_cpuid_base() != 0;
526}
527EXPORT_SYMBOL_GPL(kvm_para_available);
528
529unsigned int kvm_arch_para_features(void)
530{
531 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
532}
533
503static uint32_t __init kvm_detect(void) 534static uint32_t __init kvm_detect(void)
504{ 535{
505 return kvm_cpuid_base(); 536 return kvm_cpuid_base();
@@ -673,7 +704,7 @@ static cpumask_t waiting_cpus;
673/* Track spinlock on which a cpu is waiting */ 704/* Track spinlock on which a cpu is waiting */
674static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); 705static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
675 706
676static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 707__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
677{ 708{
678 struct kvm_lock_waiting *w; 709 struct kvm_lock_waiting *w;
679 int cpu; 710 int cpu;