diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2288ad829b32..cdac9e592aa5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2560 | !kvm_exception_is_soft(vcpu->arch.exception.nr); | 2560 | !kvm_exception_is_soft(vcpu->arch.exception.nr); |
2561 | events->exception.nr = vcpu->arch.exception.nr; | 2561 | events->exception.nr = vcpu->arch.exception.nr; |
2562 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; | 2562 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; |
2563 | events->exception.pad = 0; | ||
2563 | events->exception.error_code = vcpu->arch.exception.error_code; | 2564 | events->exception.error_code = vcpu->arch.exception.error_code; |
2564 | 2565 | ||
2565 | events->interrupt.injected = | 2566 | events->interrupt.injected = |
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2573 | events->nmi.injected = vcpu->arch.nmi_injected; | 2574 | events->nmi.injected = vcpu->arch.nmi_injected; |
2574 | events->nmi.pending = vcpu->arch.nmi_pending; | 2575 | events->nmi.pending = vcpu->arch.nmi_pending; |
2575 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); | 2576 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); |
2577 | events->nmi.pad = 0; | ||
2576 | 2578 | ||
2577 | events->sipi_vector = vcpu->arch.sipi_vector; | 2579 | events->sipi_vector = vcpu->arch.sipi_vector; |
2578 | 2580 | ||
2579 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | 2581 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING |
2580 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR | 2582 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR |
2581 | | KVM_VCPUEVENT_VALID_SHADOW); | 2583 | | KVM_VCPUEVENT_VALID_SHADOW); |
2584 | memset(&events->reserved, 0, sizeof(events->reserved)); | ||
2582 | } | 2585 | } |
2583 | 2586 | ||
2584 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 2587 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, | |||
2623 | dbgregs->dr6 = vcpu->arch.dr6; | 2626 | dbgregs->dr6 = vcpu->arch.dr6; |
2624 | dbgregs->dr7 = vcpu->arch.dr7; | 2627 | dbgregs->dr7 = vcpu->arch.dr7; |
2625 | dbgregs->flags = 0; | 2628 | dbgregs->flags = 0; |
2629 | memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); | ||
2626 | } | 2630 | } |
2627 | 2631 | ||
2628 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | 2632 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, |
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) | |||
3106 | sizeof(ps->channels)); | 3110 | sizeof(ps->channels)); |
3107 | ps->flags = kvm->arch.vpit->pit_state.flags; | 3111 | ps->flags = kvm->arch.vpit->pit_state.flags; |
3108 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 3112 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
3113 | memset(&ps->reserved, 0, sizeof(ps->reserved)); | ||
3109 | return r; | 3114 | return r; |
3110 | } | 3115 | } |
3111 | 3116 | ||
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
3169 | struct kvm_memslots *slots, *old_slots; | 3174 | struct kvm_memslots *slots, *old_slots; |
3170 | unsigned long *dirty_bitmap; | 3175 | unsigned long *dirty_bitmap; |
3171 | 3176 | ||
3172 | spin_lock(&kvm->mmu_lock); | ||
3173 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
3174 | spin_unlock(&kvm->mmu_lock); | ||
3175 | |||
3176 | r = -ENOMEM; | 3177 | r = -ENOMEM; |
3177 | dirty_bitmap = vmalloc(n); | 3178 | dirty_bitmap = vmalloc(n); |
3178 | if (!dirty_bitmap) | 3179 | if (!dirty_bitmap) |
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
3194 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; | 3195 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; |
3195 | kfree(old_slots); | 3196 | kfree(old_slots); |
3196 | 3197 | ||
3198 | spin_lock(&kvm->mmu_lock); | ||
3199 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
3200 | spin_unlock(&kvm->mmu_lock); | ||
3201 | |||
3197 | r = -EFAULT; | 3202 | r = -EFAULT; |
3198 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { | 3203 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { |
3199 | vfree(dirty_bitmap); | 3204 | vfree(dirty_bitmap); |
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
3486 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; | 3491 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; |
3487 | local_irq_enable(); | 3492 | local_irq_enable(); |
3488 | user_ns.flags = 0; | 3493 | user_ns.flags = 0; |
3494 | memset(&user_ns.pad, 0, sizeof(user_ns.pad)); | ||
3489 | 3495 | ||
3490 | r = -EFAULT; | 3496 | r = -EFAULT; |
3491 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) | 3497 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) |
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | |||
3972 | return X86EMUL_CONTINUE; | 3978 | return X86EMUL_CONTINUE; |
3973 | 3979 | ||
3974 | if (kvm_x86_ops->has_wbinvd_exit()) { | 3980 | if (kvm_x86_ops->has_wbinvd_exit()) { |
3981 | preempt_disable(); | ||
3975 | smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, | 3982 | smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, |
3976 | wbinvd_ipi, NULL, 1); | 3983 | wbinvd_ipi, NULL, 1); |
3984 | preempt_enable(); | ||
3977 | cpumask_clear(vcpu->arch.wbinvd_dirty_mask); | 3985 | cpumask_clear(vcpu->arch.wbinvd_dirty_mask); |
3978 | } | 3986 | } |
3979 | wbinvd(); | 3987 | wbinvd(); |