aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <andrea@qumranet.com>2008-07-25 10:32:03 -0400
committerAvi Kivity <avi@qumranet.com>2008-07-29 05:33:50 -0400
commit604b38ac0369bd50fcbb33344aa5553c071009f7 (patch)
tree149325ef8ddbeb607a19052bb807383a6aa7a549 /virt/kvm/kvm_main.c
parenta1708ce8a362c4999f1201237ae7b77c4d13af82 (diff)
KVM: Allow browsing memslots with mmu_lock
This allows reading memslots with only the mmu_lock hold for mmu notifiers that runs in atomic context and with mmu_lock held. Signed-off-by: Andrea Arcangeli <andrea@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a845890b6800..3735212cd3f8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
375 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 375 memset(new.rmap, 0, npages * sizeof(*new.rmap));
376 376
377 new.user_alloc = user_alloc; 377 new.user_alloc = user_alloc;
378 new.userspace_addr = mem->userspace_addr; 378 /*
379 * hva_to_rmmap() serialzies with the mmu_lock and to be
380 * safe it has to ignore memslots with !user_alloc &&
381 * !userspace_addr.
382 */
383 if (user_alloc)
384 new.userspace_addr = mem->userspace_addr;
385 else
386 new.userspace_addr = 0;
379 } 387 }
380 if (npages && !new.lpage_info) { 388 if (npages && !new.lpage_info) {
381 int largepages = npages / KVM_PAGES_PER_HPAGE; 389 int largepages = npages / KVM_PAGES_PER_HPAGE;
@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
408 } 416 }
409#endif /* not defined CONFIG_S390 */ 417#endif /* not defined CONFIG_S390 */
410 418
411 if (mem->slot >= kvm->nmemslots)
412 kvm->nmemslots = mem->slot + 1;
413
414 if (!npages) 419 if (!npages)
415 kvm_arch_flush_shadow(kvm); 420 kvm_arch_flush_shadow(kvm);
416 421
422 spin_lock(&kvm->mmu_lock);
423 if (mem->slot >= kvm->nmemslots)
424 kvm->nmemslots = mem->slot + 1;
425
417 *memslot = new; 426 *memslot = new;
427 spin_unlock(&kvm->mmu_lock);
418 428
419 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); 429 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
420 if (r) { 430 if (r) {
431 spin_lock(&kvm->mmu_lock);
421 *memslot = old; 432 *memslot = old;
433 spin_unlock(&kvm->mmu_lock);
422 goto out_free; 434 goto out_free;
423 } 435 }
424 436