aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c27
-rw-r--r--arch/x86/kvm/x86.h5
5 files changed, 41 insertions, 30 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a51..fb8b376bf28c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
720 } 720 }
721} 721}
722 722
723static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723static int set_spte_track_bits(u64 *sptep, u64 new_spte)
724{ 724{
725 pfn_t pfn; 725 pfn_t pfn;
726 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
731 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
732 732
733 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
734 return; 734 return 0;
735 735
736 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
738 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
740 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
741 return 1;
741} 742}
742 743
743static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
744{ 745{
745 set_spte_track_bits(sptep, new_spte); 746 if (set_spte_track_bits(sptep, new_spte))
746 rmap_remove(kvm, sptep); 747 rmap_remove(kvm, sptep);
747} 748}
748 749
749static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 750static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 82e144a4e514..b81a9b7c2ca4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3395 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3395 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3396 3396
3397 load_host_msrs(vcpu); 3397 load_host_msrs(vcpu);
3398 kvm_load_ldt(ldt_selector);
3398 loadsegment(fs, fs_selector); 3399 loadsegment(fs, fs_selector);
3399#ifdef CONFIG_X86_64 3400#ifdef CONFIG_X86_64
3400 load_gs_index(gs_selector); 3401 load_gs_index(gs_selector);
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3402#else 3403#else
3403 loadsegment(gs, gs_selector); 3404 loadsegment(gs, gs_selector);
3404#endif 3405#endif
3405 kvm_load_ldt(ldt_selector);
3406 3406
3407 reload_tss(vcpu); 3407 reload_tss(vcpu);
3408 3408
@@ -3494,6 +3494,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3494static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3494static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3495{ 3495{
3496 switch (func) { 3496 switch (func) {
3497 case 0x00000001:
3498 /* Mask out xsave bit as long as it is not supported by SVM */
3499 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3500 break;
3497 case 0x80000001: 3501 case 0x80000001:
3498 if (nested) 3502 if (nested)
3499 entry->ecx |= (1 << 2); /* Set SVM bit */ 3503 entry->ecx |= (1 << 2); /* Set SVM bit */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8da0e45ff7c9..81fcbe9515c5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
821#endif 821#endif
822 822
823#ifdef CONFIG_X86_64 823#ifdef CONFIG_X86_64
824 if (is_long_mode(&vmx->vcpu)) { 824 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
825 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 825 if (is_long_mode(&vmx->vcpu))
826 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 826 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
827 }
828#endif 827#endif
829 for (i = 0; i < vmx->save_nmsrs; ++i) 828 for (i = 0; i < vmx->save_nmsrs; ++i)
830 kvm_set_shared_msr(vmx->guest_msrs[i].index, 829 kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
839 838
840 ++vmx->vcpu.stat.host_state_reload; 839 ++vmx->vcpu.stat.host_state_reload;
841 vmx->host_state.loaded = 0; 840 vmx->host_state.loaded = 0;
842 if (vmx->host_state.fs_reload_needed) 841#ifdef CONFIG_X86_64
843 loadsegment(fs, vmx->host_state.fs_sel); 842 if (is_long_mode(&vmx->vcpu))
843 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
844#endif
844 if (vmx->host_state.gs_ldt_reload_needed) { 845 if (vmx->host_state.gs_ldt_reload_needed) {
845 kvm_load_ldt(vmx->host_state.ldt_sel); 846 kvm_load_ldt(vmx->host_state.ldt_sel);
846#ifdef CONFIG_X86_64 847#ifdef CONFIG_X86_64
847 load_gs_index(vmx->host_state.gs_sel); 848 load_gs_index(vmx->host_state.gs_sel);
848 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
849#else 849#else
850 loadsegment(gs, vmx->host_state.gs_sel); 850 loadsegment(gs, vmx->host_state.gs_sel);
851#endif 851#endif
852 } 852 }
853 if (vmx->host_state.fs_reload_needed)
854 loadsegment(fs, vmx->host_state.fs_sel);
853 reload_tss(); 855 reload_tss();
854#ifdef CONFIG_X86_64 856#ifdef CONFIG_X86_64
855 if (is_long_mode(&vmx->vcpu)) { 857 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
856 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
857 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
858 }
859#endif 858#endif
860 if (current_thread_info()->status & TS_USEDFPU) 859 if (current_thread_info()->status & TS_USEDFPU)
861 clts(); 860 clts();
@@ -4228,11 +4227,6 @@ static int vmx_get_lpage_level(void)
4228 return PT_PDPE_LEVEL; 4227 return PT_PDPE_LEVEL;
4229} 4228}
4230 4229
4231static inline u32 bit(int bitno)
4232{
4233 return 1 << (bitno & 31);
4234}
4235
4236static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 4230static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
4237{ 4231{
4238 struct kvm_cpuid_entry2 *best; 4232 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b32..b989e1f1e5d3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -155,11 +155,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
155 155
156u64 __read_mostly host_xcr0; 156u64 __read_mostly host_xcr0;
157 157
158static inline u32 bit(int bitno)
159{
160 return 1 << (bitno & 31);
161}
162
163static void kvm_on_user_return(struct user_return_notifier *urn) 158static void kvm_on_user_return(struct user_return_notifier *urn)
164{ 159{
165 unsigned slot; 160 unsigned slot;
@@ -2560,6 +2555,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2555 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2561 events->exception.nr = vcpu->arch.exception.nr; 2556 events->exception.nr = vcpu->arch.exception.nr;
2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2557 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2558 events->exception.pad = 0;
2563 events->exception.error_code = vcpu->arch.exception.error_code; 2559 events->exception.error_code = vcpu->arch.exception.error_code;
2564 2560
2565 events->interrupt.injected = 2561 events->interrupt.injected =
@@ -2573,12 +2569,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2573 events->nmi.injected = vcpu->arch.nmi_injected; 2569 events->nmi.injected = vcpu->arch.nmi_injected;
2574 events->nmi.pending = vcpu->arch.nmi_pending; 2570 events->nmi.pending = vcpu->arch.nmi_pending;
2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2571 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2572 events->nmi.pad = 0;
2576 2573
2577 events->sipi_vector = vcpu->arch.sipi_vector; 2574 events->sipi_vector = vcpu->arch.sipi_vector;
2578 2575
2579 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2576 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2580 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2577 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2581 | KVM_VCPUEVENT_VALID_SHADOW); 2578 | KVM_VCPUEVENT_VALID_SHADOW);
2579 memset(&events->reserved, 0, sizeof(events->reserved));
2582} 2580}
2583 2581
2584static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2582static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2621,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2623 dbgregs->dr6 = vcpu->arch.dr6; 2621 dbgregs->dr6 = vcpu->arch.dr6;
2624 dbgregs->dr7 = vcpu->arch.dr7; 2622 dbgregs->dr7 = vcpu->arch.dr7;
2625 dbgregs->flags = 0; 2623 dbgregs->flags = 0;
2624 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2626} 2625}
2627 2626
2628static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2627static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3105,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3106 sizeof(ps->channels)); 3105 sizeof(ps->channels));
3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3106 ps->flags = kvm->arch.vpit->pit_state.flags;
3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3107 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3108 memset(&ps->reserved, 0, sizeof(ps->reserved));
3109 return r; 3109 return r;
3110} 3110}
3111 3111
@@ -3169,10 +3169,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3169 struct kvm_memslots *slots, *old_slots; 3169 struct kvm_memslots *slots, *old_slots;
3170 unsigned long *dirty_bitmap; 3170 unsigned long *dirty_bitmap;
3171 3171
3172 spin_lock(&kvm->mmu_lock);
3173 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3174 spin_unlock(&kvm->mmu_lock);
3175
3176 r = -ENOMEM; 3172 r = -ENOMEM;
3177 dirty_bitmap = vmalloc(n); 3173 dirty_bitmap = vmalloc(n);
3178 if (!dirty_bitmap) 3174 if (!dirty_bitmap)
@@ -3194,6 +3190,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3190 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3195 kfree(old_slots); 3191 kfree(old_slots);
3196 3192
3193 spin_lock(&kvm->mmu_lock);
3194 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3195 spin_unlock(&kvm->mmu_lock);
3196
3197 r = -EFAULT; 3197 r = -EFAULT;
3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3199 vfree(dirty_bitmap); 3199 vfree(dirty_bitmap);
@@ -3486,6 +3486,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable(); 3487 local_irq_enable();
3488 user_ns.flags = 0; 3488 user_ns.flags = 0;
3489 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3489 3490
3490 r = -EFAULT; 3491 r = -EFAULT;
3491 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3492 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3973,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3972 return X86EMUL_CONTINUE; 3973 return X86EMUL_CONTINUE;
3973 3974
3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3975 if (kvm_x86_ops->has_wbinvd_exit()) {
3976 preempt_disable();
3975 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3977 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3976 wbinvd_ipi, NULL, 1); 3978 wbinvd_ipi, NULL, 1);
3979 preempt_enable();
3977 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3980 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3978 } 3981 }
3979 wbinvd(); 3982 wbinvd();
@@ -4561,9 +4564,11 @@ static void kvm_timer_init(void)
4561#ifdef CONFIG_CPU_FREQ 4564#ifdef CONFIG_CPU_FREQ
4562 struct cpufreq_policy policy; 4565 struct cpufreq_policy policy;
4563 memset(&policy, 0, sizeof(policy)); 4566 memset(&policy, 0, sizeof(policy));
4564 cpufreq_get_policy(&policy, get_cpu()); 4567 cpu = get_cpu();
4568 cpufreq_get_policy(&policy, cpu);
4565 if (policy.cpuinfo.max_freq) 4569 if (policy.cpuinfo.max_freq)
4566 max_tsc_khz = policy.cpuinfo.max_freq; 4570 max_tsc_khz = policy.cpuinfo.max_freq;
4571 put_cpu();
4567#endif 4572#endif
4568 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 4573 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4569 CPUFREQ_TRANSITION_NOTIFIER); 4574 CPUFREQ_TRANSITION_NOTIFIER);
@@ -5514,6 +5519,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5514 5519
5515 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 5520 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5516 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 5521 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5522 if (sregs->cr4 & X86_CR4_OSXSAVE)
5523 update_cpuid(vcpu);
5517 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 5524 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5518 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); 5525 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
5519 mmu_reset_needed = 1; 5526 mmu_reset_needed = 1;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2cea414489f3..c600da830ce0 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -70,6 +70,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
71} 71}
72 72
73static inline u32 bit(int bitno)
74{
75 return 1 << (bitno & 31);
76}
77
73void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 78void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
74void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 79void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
75int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq); 80int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);