aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/x86.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e86f9b22eaca..3df0b7a140b0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3065,6 +3065,8 @@ static void write_protect_slot(struct kvm *kvm,
3065 unsigned long *dirty_bitmap, 3065 unsigned long *dirty_bitmap,
3066 unsigned long nr_dirty_pages) 3066 unsigned long nr_dirty_pages)
3067{ 3067{
3068 spin_lock(&kvm->mmu_lock);
3069
3068 /* Not many dirty pages compared to # of shadow pages. */ 3070 /* Not many dirty pages compared to # of shadow pages. */
3069 if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { 3071 if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
3070 unsigned long gfn_offset; 3072 unsigned long gfn_offset;
@@ -3072,16 +3074,13 @@ static void write_protect_slot(struct kvm *kvm,
3072 for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { 3074 for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
3073 unsigned long gfn = memslot->base_gfn + gfn_offset; 3075 unsigned long gfn = memslot->base_gfn + gfn_offset;
3074 3076
3075 spin_lock(&kvm->mmu_lock);
3076 kvm_mmu_rmap_write_protect(kvm, gfn, memslot); 3077 kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
3077 spin_unlock(&kvm->mmu_lock);
3078 } 3078 }
3079 kvm_flush_remote_tlbs(kvm); 3079 kvm_flush_remote_tlbs(kvm);
3080 } else { 3080 } else
3081 spin_lock(&kvm->mmu_lock);
3082 kvm_mmu_slot_remove_write_access(kvm, memslot->id); 3081 kvm_mmu_slot_remove_write_access(kvm, memslot->id);
3083 spin_unlock(&kvm->mmu_lock); 3082
3084 } 3083 spin_unlock(&kvm->mmu_lock);
3085} 3084}
3086 3085
3087/* 3086/*