aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_para.h33
-rw-r--r--arch/x86/kernel/kvm.c32
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c41
6 files changed, 89 insertions, 36 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 1df115909758..c7678e43465b 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,28 +85,9 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
85 return ret; 85 return ret;
86} 86}
87 87
88static inline uint32_t kvm_cpuid_base(void)
89{
90 if (boot_cpu_data.cpuid_level < 0)
91 return 0; /* So we don't blow up on old processors */
92
93 if (cpu_has_hypervisor)
94 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
95
96 return 0;
97}
98
99static inline bool kvm_para_available(void)
100{
101 return kvm_cpuid_base() != 0;
102}
103
104static inline unsigned int kvm_arch_para_features(void)
105{
106 return cpuid_eax(KVM_CPUID_FEATURES);
107}
108
109#ifdef CONFIG_KVM_GUEST 88#ifdef CONFIG_KVM_GUEST
89bool kvm_para_available(void);
90unsigned int kvm_arch_para_features(void);
110void __init kvm_guest_init(void); 91void __init kvm_guest_init(void);
111void kvm_async_pf_task_wait(u32 token); 92void kvm_async_pf_task_wait(u32 token);
112void kvm_async_pf_task_wake(u32 token); 93void kvm_async_pf_task_wake(u32 token);
@@ -126,6 +107,16 @@ static inline void kvm_spinlock_init(void)
126#define kvm_async_pf_task_wait(T) do {} while(0) 107#define kvm_async_pf_task_wait(T) do {} while(0)
127#define kvm_async_pf_task_wake(T) do {} while(0) 108#define kvm_async_pf_task_wake(T) do {} while(0)
128 109
110static inline bool kvm_para_available(void)
111{
112 return 0;
113}
114
115static inline unsigned int kvm_arch_para_features(void)
116{
117 return 0;
118}
119
129static inline u32 kvm_read_and_reset_pf_reason(void) 120static inline u32 kvm_read_and_reset_pf_reason(void)
130{ 121{
131 return 0; 122 return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd1b362e4a23..713f1b3bad52 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -500,6 +500,38 @@ void __init kvm_guest_init(void)
500#endif 500#endif
501} 501}
502 502
503static noinline uint32_t __kvm_cpuid_base(void)
504{
505 if (boot_cpu_data.cpuid_level < 0)
506 return 0; /* So we don't blow up on old processors */
507
508 if (cpu_has_hypervisor)
509 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
510
511 return 0;
512}
513
514static inline uint32_t kvm_cpuid_base(void)
515{
516 static int kvm_cpuid_base = -1;
517
518 if (kvm_cpuid_base == -1)
519 kvm_cpuid_base = __kvm_cpuid_base();
520
521 return kvm_cpuid_base;
522}
523
524bool kvm_para_available(void)
525{
526 return kvm_cpuid_base() != 0;
527}
528EXPORT_SYMBOL_GPL(kvm_para_available);
529
530unsigned int kvm_arch_para_features(void)
531{
532 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
533}
534
503static uint32_t __init kvm_detect(void) 535static uint32_t __init kvm_detect(void)
504{ 536{
505 return kvm_cpuid_base(); 537 return kvm_cpuid_base();
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f1e4895174b2..a2a1bb7ed8c1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
72 return best && (best->ecx & bit(X86_FEATURE_PCID)); 72 return best && (best->ecx & bit(X86_FEATURE_PCID));
73} 73}
74 74
75static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
76{
77 struct kvm_cpuid_entry2 *best;
78
79 best = kvm_find_cpuid_entry(vcpu, 1, 0);
80 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
81}
82
75#endif 83#endif
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c8b0d0d2da5c..6a11845fd8b9 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
65 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); 65 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
66 66
67u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); 67u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
68void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); 68int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
69void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 69void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
70 struct kvm_lapic_state *s); 70 struct kvm_lapic_state *s);
71int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); 71int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5c8879127cfa..a06f101ef64b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4392static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4392static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4393{ 4393{
4394 struct vcpu_vmx *vmx = to_vmx(vcpu); 4394 struct vcpu_vmx *vmx = to_vmx(vcpu);
4395 u64 msr; 4395 struct msr_data apic_base_msr;
4396 4396
4397 vmx->rmode.vm86_active = 0; 4397 vmx->rmode.vm86_active = 0;
4398 4398
@@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4400 4400
4401 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 4401 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4402 kvm_set_cr8(&vmx->vcpu, 0); 4402 kvm_set_cr8(&vmx->vcpu, 0);
4403 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 4403 apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
4404 if (kvm_vcpu_is_bsp(&vmx->vcpu)) 4404 if (kvm_vcpu_is_bsp(&vmx->vcpu))
4405 msr |= MSR_IA32_APICBASE_BSP; 4405 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4406 kvm_set_apic_base(&vmx->vcpu, msr); 4406 apic_base_msr.host_initiated = true;
4407 kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
4407 4408
4408 vmx_segment_cache_clear(vmx); 4409 vmx_segment_cache_clear(vmx);
4409 4410
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0c76f7cfdb32..39c28f09dfd5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
257} 257}
258EXPORT_SYMBOL_GPL(kvm_get_apic_base); 258EXPORT_SYMBOL_GPL(kvm_get_apic_base);
259 259
260void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) 260int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
261{ 261{
262 /* TODO: reserve bits check */ 262 u64 old_state = vcpu->arch.apic_base &
263 kvm_lapic_set_base(vcpu, data); 263 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
264 u64 new_state = msr_info->data &
265 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
266 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
267 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
268
269 if (!msr_info->host_initiated &&
270 ((msr_info->data & reserved_bits) != 0 ||
271 new_state == X2APIC_ENABLE ||
272 (new_state == MSR_IA32_APICBASE_ENABLE &&
273 old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
274 (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
275 old_state == 0)))
276 return 1;
277
278 kvm_lapic_set_base(vcpu, msr_info->data);
279 return 0;
264} 280}
265EXPORT_SYMBOL_GPL(kvm_set_apic_base); 281EXPORT_SYMBOL_GPL(kvm_set_apic_base);
266 282
@@ -1840,6 +1856,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1840 if (__copy_to_user((void __user *)addr, instructions, 4)) 1856 if (__copy_to_user((void __user *)addr, instructions, 4))
1841 return 1; 1857 return 1;
1842 kvm->arch.hv_hypercall = data; 1858 kvm->arch.hv_hypercall = data;
1859 mark_page_dirty(kvm, gfn);
1843 break; 1860 break;
1844 } 1861 }
1845 case HV_X64_MSR_REFERENCE_TSC: { 1862 case HV_X64_MSR_REFERENCE_TSC: {
@@ -1868,19 +1885,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1868{ 1885{
1869 switch (msr) { 1886 switch (msr) {
1870 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1887 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1888 u64 gfn;
1871 unsigned long addr; 1889 unsigned long addr;
1872 1890
1873 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1891 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1874 vcpu->arch.hv_vapic = data; 1892 vcpu->arch.hv_vapic = data;
1875 break; 1893 break;
1876 } 1894 }
1877 addr = gfn_to_hva(vcpu->kvm, data >> 1895 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
1878 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); 1896 addr = gfn_to_hva(vcpu->kvm, gfn);
1879 if (kvm_is_error_hva(addr)) 1897 if (kvm_is_error_hva(addr))
1880 return 1; 1898 return 1;
1881 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1899 if (__clear_user((void __user *)addr, PAGE_SIZE))
1882 return 1; 1900 return 1;
1883 vcpu->arch.hv_vapic = data; 1901 vcpu->arch.hv_vapic = data;
1902 mark_page_dirty(vcpu->kvm, gfn);
1884 break; 1903 break;
1885 } 1904 }
1886 case HV_X64_MSR_EOI: 1905 case HV_X64_MSR_EOI:
@@ -2006,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2006 case 0x200 ... 0x2ff: 2025 case 0x200 ... 0x2ff:
2007 return set_msr_mtrr(vcpu, msr, data); 2026 return set_msr_mtrr(vcpu, msr, data);
2008 case MSR_IA32_APICBASE: 2027 case MSR_IA32_APICBASE:
2009 kvm_set_apic_base(vcpu, data); 2028 return kvm_set_apic_base(vcpu, msr_info);
2010 break;
2011 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2029 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2012 return kvm_x2apic_msr_write(vcpu, msr, data); 2030 return kvm_x2apic_msr_write(vcpu, msr, data);
2013 case MSR_IA32_TSCDEADLINE: 2031 case MSR_IA32_TSCDEADLINE:
@@ -2598,10 +2616,10 @@ int kvm_dev_ioctl_check_extension(long ext)
2598 case KVM_CAP_GET_TSC_KHZ: 2616 case KVM_CAP_GET_TSC_KHZ:
2599 case KVM_CAP_KVMCLOCK_CTRL: 2617 case KVM_CAP_KVMCLOCK_CTRL:
2600 case KVM_CAP_READONLY_MEM: 2618 case KVM_CAP_READONLY_MEM:
2619 case KVM_CAP_HYPERV_TIME:
2601#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2620#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2602 case KVM_CAP_ASSIGN_DEV_IRQ: 2621 case KVM_CAP_ASSIGN_DEV_IRQ:
2603 case KVM_CAP_PCI_2_3: 2622 case KVM_CAP_PCI_2_3:
2604 case KVM_CAP_HYPERV_TIME:
2605#endif 2623#endif
2606 r = 1; 2624 r = 1;
2607 break; 2625 break;
@@ -6409,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
6409int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 6427int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6410 struct kvm_sregs *sregs) 6428 struct kvm_sregs *sregs)
6411{ 6429{
6430 struct msr_data apic_base_msr;
6412 int mmu_reset_needed = 0; 6431 int mmu_reset_needed = 0;
6413 int pending_vec, max_bits, idx; 6432 int pending_vec, max_bits, idx;
6414 struct desc_ptr dt; 6433 struct desc_ptr dt;
@@ -6432,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6432 6451
6433 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 6452 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6434 kvm_x86_ops->set_efer(vcpu, sregs->efer); 6453 kvm_x86_ops->set_efer(vcpu, sregs->efer);
6435 kvm_set_apic_base(vcpu, sregs->apic_base); 6454 apic_base_msr.data = sregs->apic_base;
6455 apic_base_msr.host_initiated = true;
6456 kvm_set_apic_base(vcpu, &apic_base_msr);
6436 6457
6437 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 6458 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6438 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 6459 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);