diff options
| -rw-r--r-- | arch/x86/include/asm/kvm_para.h | 22 | ||||
| -rw-r--r-- | arch/x86/kernel/kvm.c | 27 |
2 files changed, 33 insertions, 16 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 1df115909758..1679cc799b26 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
| @@ -85,28 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
| 85 | return ret; | 85 | return ret; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline uint32_t kvm_cpuid_base(void) | ||
| 89 | { | ||
| 90 | if (boot_cpu_data.cpuid_level < 0) | ||
| 91 | return 0; /* So we don't blow up on old processors */ | ||
| 92 | |||
| 93 | if (cpu_has_hypervisor) | ||
| 94 | return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); | ||
| 95 | |||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline bool kvm_para_available(void) | ||
| 100 | { | ||
| 101 | return kvm_cpuid_base() != 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline unsigned int kvm_arch_para_features(void) | 88 | static inline unsigned int kvm_arch_para_features(void) |
| 105 | { | 89 | { |
| 106 | return cpuid_eax(KVM_CPUID_FEATURES); | 90 | return cpuid_eax(KVM_CPUID_FEATURES); |
| 107 | } | 91 | } |
| 108 | 92 | ||
| 109 | #ifdef CONFIG_KVM_GUEST | 93 | #ifdef CONFIG_KVM_GUEST |
| 94 | bool kvm_para_available(void); | ||
| 110 | void __init kvm_guest_init(void); | 95 | void __init kvm_guest_init(void); |
| 111 | void kvm_async_pf_task_wait(u32 token); | 96 | void kvm_async_pf_task_wait(u32 token); |
| 112 | void kvm_async_pf_task_wake(u32 token); | 97 | void kvm_async_pf_task_wake(u32 token); |
| @@ -126,6 +111,11 @@ static inline void kvm_spinlock_init(void) | |||
| 126 | #define kvm_async_pf_task_wait(T) do {} while(0) | 111 | #define kvm_async_pf_task_wait(T) do {} while(0) |
| 127 | #define kvm_async_pf_task_wake(T) do {} while(0) | 112 | #define kvm_async_pf_task_wake(T) do {} while(0) |
| 128 | 113 | ||
| 114 | static inline bool kvm_para_available(void) | ||
| 115 | { | ||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | |||
| 129 | static inline u32 kvm_read_and_reset_pf_reason(void) | 119 | static inline u32 kvm_read_and_reset_pf_reason(void) |
| 130 | { | 120 | { |
| 131 | return 0; | 121 | return 0; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 6dd802c6d780..0823003770fc 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
| @@ -500,6 +500,33 @@ void __init kvm_guest_init(void) | |||
| 500 | #endif | 500 | #endif |
| 501 | } | 501 | } |
| 502 | 502 | ||
| 503 | static noinline uint32_t __kvm_cpuid_base(void) | ||
| 504 | { | ||
| 505 | if (boot_cpu_data.cpuid_level < 0) | ||
| 506 | return 0; /* So we don't blow up on old processors */ | ||
| 507 | |||
| 508 | if (cpu_has_hypervisor) | ||
| 509 | return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); | ||
| 510 | |||
| 511 | return 0; | ||
| 512 | } | ||
| 513 | |||
| 514 | static inline uint32_t kvm_cpuid_base(void) | ||
| 515 | { | ||
| 516 | static int kvm_cpuid_base = -1; | ||
| 517 | |||
| 518 | if (kvm_cpuid_base == -1) | ||
| 519 | kvm_cpuid_base = __kvm_cpuid_base(); | ||
| 520 | |||
| 521 | return kvm_cpuid_base; | ||
| 522 | } | ||
| 523 | |||
| 524 | bool kvm_para_available(void) | ||
| 525 | { | ||
| 526 | return kvm_cpuid_base() != 0; | ||
| 527 | } | ||
| 528 | EXPORT_SYMBOL_GPL(kvm_para_available); | ||
| 529 | |||
| 503 | static uint32_t __init kvm_detect(void) | 530 | static uint32_t __init kvm_detect(void) |
| 504 | { | 531 | { |
| 505 | return kvm_cpuid_base(); | 532 | return kvm_cpuid_base(); |
