diff options
-rw-r--r-- | arch/x86/kvm/x86.c | 21 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 20 |
2 files changed, 30 insertions, 11 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9870ce422920..c7b01efe0646 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3974 | */ | 3974 | */ |
3975 | if (!user_alloc) { | 3975 | if (!user_alloc) { |
3976 | if (npages && !old.rmap) { | 3976 | if (npages && !old.rmap) { |
3977 | unsigned long userspace_addr; | ||
3978 | |||
3977 | down_write(¤t->mm->mmap_sem); | 3979 | down_write(¤t->mm->mmap_sem); |
3978 | memslot->userspace_addr = do_mmap(NULL, 0, | 3980 | userspace_addr = do_mmap(NULL, 0, |
3979 | npages * PAGE_SIZE, | 3981 | npages * PAGE_SIZE, |
3980 | PROT_READ | PROT_WRITE, | 3982 | PROT_READ | PROT_WRITE, |
3981 | MAP_SHARED | MAP_ANONYMOUS, | 3983 | MAP_SHARED | MAP_ANONYMOUS, |
3982 | 0); | 3984 | 0); |
3983 | up_write(¤t->mm->mmap_sem); | 3985 | up_write(¤t->mm->mmap_sem); |
3984 | 3986 | ||
3985 | if (IS_ERR((void *)memslot->userspace_addr)) | 3987 | if (IS_ERR((void *)userspace_addr)) |
3986 | return PTR_ERR((void *)memslot->userspace_addr); | 3988 | return PTR_ERR((void *)userspace_addr); |
3989 | |||
3990 | /* set userspace_addr atomically for kvm_hva_to_rmapp */ | ||
3991 | spin_lock(&kvm->mmu_lock); | ||
3992 | memslot->userspace_addr = userspace_addr; | ||
3993 | spin_unlock(&kvm->mmu_lock); | ||
3987 | } else { | 3994 | } else { |
3988 | if (!old.user_alloc && old.rmap) { | 3995 | if (!old.user_alloc && old.rmap) { |
3989 | int ret; | 3996 | int ret; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a845890b6800..3735212cd3f8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
375 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | 375 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); |
376 | 376 | ||
377 | new.user_alloc = user_alloc; | 377 | new.user_alloc = user_alloc; |
378 | new.userspace_addr = mem->userspace_addr; | 378 | /* |
379 | * hva_to_rmmap() serialzies with the mmu_lock and to be | ||
380 | * safe it has to ignore memslots with !user_alloc && | ||
381 | * !userspace_addr. | ||
382 | */ | ||
383 | if (user_alloc) | ||
384 | new.userspace_addr = mem->userspace_addr; | ||
385 | else | ||
386 | new.userspace_addr = 0; | ||
379 | } | 387 | } |
380 | if (npages && !new.lpage_info) { | 388 | if (npages && !new.lpage_info) { |
381 | int largepages = npages / KVM_PAGES_PER_HPAGE; | 389 | int largepages = npages / KVM_PAGES_PER_HPAGE; |
@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
408 | } | 416 | } |
409 | #endif /* not defined CONFIG_S390 */ | 417 | #endif /* not defined CONFIG_S390 */ |
410 | 418 | ||
411 | if (mem->slot >= kvm->nmemslots) | ||
412 | kvm->nmemslots = mem->slot + 1; | ||
413 | |||
414 | if (!npages) | 419 | if (!npages) |
415 | kvm_arch_flush_shadow(kvm); | 420 | kvm_arch_flush_shadow(kvm); |
416 | 421 | ||
422 | spin_lock(&kvm->mmu_lock); | ||
423 | if (mem->slot >= kvm->nmemslots) | ||
424 | kvm->nmemslots = mem->slot + 1; | ||
425 | |||
417 | *memslot = new; | 426 | *memslot = new; |
427 | spin_unlock(&kvm->mmu_lock); | ||
418 | 428 | ||
419 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | 429 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); |
420 | if (r) { | 430 | if (r) { |
431 | spin_lock(&kvm->mmu_lock); | ||
421 | *memslot = old; | 432 | *memslot = old; |
433 | spin_unlock(&kvm->mmu_lock); | ||
422 | goto out_free; | 434 | goto out_free; |
423 | } | 435 | } |
424 | 436 | ||