aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/x86.c16
2 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a51..fb8b376bf28c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
720 } 720 }
721} 721}
722 722
723static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723static int set_spte_track_bits(u64 *sptep, u64 new_spte)
724{ 724{
725 pfn_t pfn; 725 pfn_t pfn;
726 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
731 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
732 732
733 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
734 return; 734 return 0;
735 735
736 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
738 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
740 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
741 return 1;
741} 742}
742 743
743static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
744{ 745{
745 set_spte_track_bits(sptep, new_spte); 746 if (set_spte_track_bits(sptep, new_spte))
746 rmap_remove(kvm, sptep); 747 rmap_remove(kvm, sptep);
747} 748}
748 749
749static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 750static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b32..cdac9e592aa5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2561 events->exception.nr = vcpu->arch.exception.nr; 2561 events->exception.nr = vcpu->arch.exception.nr;
2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2563 events->exception.pad = 0;
2563 events->exception.error_code = vcpu->arch.exception.error_code; 2564 events->exception.error_code = vcpu->arch.exception.error_code;
2564 2565
2565 events->interrupt.injected = 2566 events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2573 events->nmi.injected = vcpu->arch.nmi_injected; 2574 events->nmi.injected = vcpu->arch.nmi_injected;
2574 events->nmi.pending = vcpu->arch.nmi_pending; 2575 events->nmi.pending = vcpu->arch.nmi_pending;
2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2577 events->nmi.pad = 0;
2576 2578
2577 events->sipi_vector = vcpu->arch.sipi_vector; 2579 events->sipi_vector = vcpu->arch.sipi_vector;
2578 2580
2579 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2580 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2581 | KVM_VCPUEVENT_VALID_SHADOW); 2583 | KVM_VCPUEVENT_VALID_SHADOW);
2584 memset(&events->reserved, 0, sizeof(events->reserved));
2582} 2585}
2583 2586
2584static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2587static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2623 dbgregs->dr6 = vcpu->arch.dr6; 2626 dbgregs->dr6 = vcpu->arch.dr6;
2624 dbgregs->dr7 = vcpu->arch.dr7; 2627 dbgregs->dr7 = vcpu->arch.dr7;
2625 dbgregs->flags = 0; 2628 dbgregs->flags = 0;
2629 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2626} 2630}
2627 2631
2628static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2632static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3106 sizeof(ps->channels)); 3110 sizeof(ps->channels));
3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3111 ps->flags = kvm->arch.vpit->pit_state.flags;
3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3112 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3113 memset(&ps->reserved, 0, sizeof(ps->reserved));
3109 return r; 3114 return r;
3110} 3115}
3111 3116
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3169 struct kvm_memslots *slots, *old_slots; 3174 struct kvm_memslots *slots, *old_slots;
3170 unsigned long *dirty_bitmap; 3175 unsigned long *dirty_bitmap;
3171 3176
3172 spin_lock(&kvm->mmu_lock);
3173 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3174 spin_unlock(&kvm->mmu_lock);
3175
3176 r = -ENOMEM; 3177 r = -ENOMEM;
3177 dirty_bitmap = vmalloc(n); 3178 dirty_bitmap = vmalloc(n);
3178 if (!dirty_bitmap) 3179 if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3195 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3195 kfree(old_slots); 3196 kfree(old_slots);
3196 3197
3198 spin_lock(&kvm->mmu_lock);
3199 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3200 spin_unlock(&kvm->mmu_lock);
3201
3197 r = -EFAULT; 3202 r = -EFAULT;
3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3203 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3199 vfree(dirty_bitmap); 3204 vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3491 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable(); 3492 local_irq_enable();
3488 user_ns.flags = 0; 3493 user_ns.flags = 0;
3494 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3489 3495
3490 r = -EFAULT; 3496 r = -EFAULT;
3491 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3497 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3972 return X86EMUL_CONTINUE; 3978 return X86EMUL_CONTINUE;
3973 3979
3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3980 if (kvm_x86_ops->has_wbinvd_exit()) {
3981 preempt_disable();
3975 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3976 wbinvd_ipi, NULL, 1); 3983 wbinvd_ipi, NULL, 1);
3984 preempt_enable();
3977 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3985 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3978 } 3986 }
3979 wbinvd(); 3987 wbinvd();