aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/x86.c16
7 files changed, 21 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e538aec..b06bdae04064 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
127 127
128static void kvm_patch_ins_b(u32 *inst, int addr) 128static void kvm_patch_ins_b(u32 *inst, int addr)
129{ 129{
130#ifdef CONFIG_RELOCATABLE 130#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131 /* On relocatable kernels interrupts handlers and our code 131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */ 132 can be in different regions, so we don't patch them */
133 133
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 049846911ce4..1cc471faac2d 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@ lightweight_exit:
416 lwz r3, VCPU_PC(r4) 416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3 417 mtsrr0 r3
418 lwz r3, VCPU_SHARED(r4) 418 lwz r3, VCPU_SHARED(r4)
419 lwz r3, VCPU_SHARED_MSR(r3) 419 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
420 oris r3, r3, KVMPPC_MSR_MASK@h 420 oris r3, r3, KVMPPC_MSR_MASK@h
421 ori r3, r3, KVMPPC_MSR_MASK@l 421 ori r3, r3, KVMPPC_MSR_MASK@l
422 mtsrr1 r3 422 mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2dd5d3..e3768ee9b595 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
139 139
140 free_page((unsigned long)vcpu->arch.shared); 140 free_page((unsigned long)vcpu->arch.shared);
141 kvmppc_e500_tlb_uninit(vcpu_e500);
142 kvm_vcpu_uninit(vcpu); 141 kvm_vcpu_uninit(vcpu);
142 kvmppc_e500_tlb_uninit(vcpu_e500);
143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
144} 144}
145 145
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a1627f6c..38f756f25053 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
617 switch (ioctl) { 617 switch (ioctl) {
618 case KVM_PPC_GET_PVINFO: { 618 case KVM_PPC_GET_PVINFO: {
619 struct kvm_ppc_pvinfo pvinfo; 619 struct kvm_ppc_pvinfo pvinfo;
620 memset(&pvinfo, 0, sizeof(pvinfo));
620 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 621 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
621 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 622 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
622 r = -EFAULT; 623 r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f12a9b..a021f5827a33 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
35 int i; 35 int i;
36 36
37 /* pause guest execution to avoid concurrent updates */ 37 /* pause guest execution to avoid concurrent updates */
38 local_irq_disable();
39 mutex_lock(&vcpu->mutex); 38 mutex_lock(&vcpu->mutex);
40 39
41 vcpu->arch.last_exit_type = 0xDEAD; 40 vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
51 vcpu->arch.timing_last_enter.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0;
52 51
53 mutex_unlock(&vcpu->mutex); 52 mutex_unlock(&vcpu->mutex);
54 local_irq_enable();
55} 53}
56 54
57static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a51..fb8b376bf28c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
720 } 720 }
721} 721}
722 722
723static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723static int set_spte_track_bits(u64 *sptep, u64 new_spte)
724{ 724{
725 pfn_t pfn; 725 pfn_t pfn;
726 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
731 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
732 732
733 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
734 return; 734 return 0;
735 735
736 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
738 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
740 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
741 return 1;
741} 742}
742 743
743static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
744{ 745{
745 set_spte_track_bits(sptep, new_spte); 746 if (set_spte_track_bits(sptep, new_spte))
746 rmap_remove(kvm, sptep); 747 rmap_remove(kvm, sptep);
747} 748}
748 749
749static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 750static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b32..cdac9e592aa5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2561 events->exception.nr = vcpu->arch.exception.nr; 2561 events->exception.nr = vcpu->arch.exception.nr;
2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2563 events->exception.pad = 0;
2563 events->exception.error_code = vcpu->arch.exception.error_code; 2564 events->exception.error_code = vcpu->arch.exception.error_code;
2564 2565
2565 events->interrupt.injected = 2566 events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2573 events->nmi.injected = vcpu->arch.nmi_injected; 2574 events->nmi.injected = vcpu->arch.nmi_injected;
2574 events->nmi.pending = vcpu->arch.nmi_pending; 2575 events->nmi.pending = vcpu->arch.nmi_pending;
2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2577 events->nmi.pad = 0;
2576 2578
2577 events->sipi_vector = vcpu->arch.sipi_vector; 2579 events->sipi_vector = vcpu->arch.sipi_vector;
2578 2580
2579 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2580 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2581 | KVM_VCPUEVENT_VALID_SHADOW); 2583 | KVM_VCPUEVENT_VALID_SHADOW);
2584 memset(&events->reserved, 0, sizeof(events->reserved));
2582} 2585}
2583 2586
2584static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2587static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2623 dbgregs->dr6 = vcpu->arch.dr6; 2626 dbgregs->dr6 = vcpu->arch.dr6;
2624 dbgregs->dr7 = vcpu->arch.dr7; 2627 dbgregs->dr7 = vcpu->arch.dr7;
2625 dbgregs->flags = 0; 2628 dbgregs->flags = 0;
2629 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2626} 2630}
2627 2631
2628static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2632static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3106 sizeof(ps->channels)); 3110 sizeof(ps->channels));
3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3111 ps->flags = kvm->arch.vpit->pit_state.flags;
3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3112 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3113 memset(&ps->reserved, 0, sizeof(ps->reserved));
3109 return r; 3114 return r;
3110} 3115}
3111 3116
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3169 struct kvm_memslots *slots, *old_slots; 3174 struct kvm_memslots *slots, *old_slots;
3170 unsigned long *dirty_bitmap; 3175 unsigned long *dirty_bitmap;
3171 3176
3172 spin_lock(&kvm->mmu_lock);
3173 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3174 spin_unlock(&kvm->mmu_lock);
3175
3176 r = -ENOMEM; 3177 r = -ENOMEM;
3177 dirty_bitmap = vmalloc(n); 3178 dirty_bitmap = vmalloc(n);
3178 if (!dirty_bitmap) 3179 if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3195 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3195 kfree(old_slots); 3196 kfree(old_slots);
3196 3197
3198 spin_lock(&kvm->mmu_lock);
3199 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3200 spin_unlock(&kvm->mmu_lock);
3201
3197 r = -EFAULT; 3202 r = -EFAULT;
3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3203 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3199 vfree(dirty_bitmap); 3204 vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3491 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable(); 3492 local_irq_enable();
3488 user_ns.flags = 0; 3493 user_ns.flags = 0;
3494 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3489 3495
3490 r = -EFAULT; 3496 r = -EFAULT;
3491 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3497 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3972 return X86EMUL_CONTINUE; 3978 return X86EMUL_CONTINUE;
3973 3979
3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3980 if (kvm_x86_ops->has_wbinvd_exit()) {
3981 preempt_disable();
3975 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3976 wbinvd_ipi, NULL, 1); 3983 wbinvd_ipi, NULL, 1);
3984 preempt_enable();
3977 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3985 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3978 } 3986 }
3979 wbinvd(); 3987 wbinvd();