aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-10-12 07:56:27 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-10-13 12:28:58 -0400
commitf0d648bdf0a5bbc91da6099d5282f77996558ea4 (patch)
tree8ba9f9080e3c51144df2fe3efdf5eeea1c231b3e /arch
parent1d8007bdee074fdffcf3539492d8a151a1fb3436 (diff)
KVM: x86: map/unmap private slots in __x86_set_memory_region
Otherwise, two copies (one of them never populated and thus bogus) are allocated for the regular and SMM address spaces. This breaks SMM with EPT but without unrestricted guest support, because the SMM copy of the identity page map is all zeros. By moving the allocation to the caller we also remove the last vestiges of kernel-allocated memory regions (not accessible anymore in userspace since commit b74a07beed0e, "KVM: Remove kernel-allocated memory regions", 2010-06-21); that is a nice bonus. Reported-by: Alexandre DERUMIER <aderumier@odiso.com> Cc: stable@vger.kernel.org Fixes: 9da0e4d5ac969909f6b435ce28ea28135a9cbd69 Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/x86.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7bf8096f013d..3ac33f86c873 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7477,23 +7477,53 @@ void kvm_arch_sync_events(struct kvm *kvm)
7477int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) 7477int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7478{ 7478{
7479 int i, r; 7479 int i, r;
7480 u64 hva;
7481 struct kvm_memslots *slots = kvm_memslots(kvm);
7482 struct kvm_memory_slot *slot, old;
7480 7483
7481 /* Called with kvm->slots_lock held. */ 7484 /* Called with kvm->slots_lock held. */
7482 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 7485 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
7483 return -EINVAL; 7486 return -EINVAL;
7484 7487
7488 slot = id_to_memslot(slots, id);
7489 if (size) {
7490 if (WARN_ON(slot->npages))
7491 return -EEXIST;
7492
7493 /*
7494 * MAP_SHARED to prevent internal slot pages from being moved
7495 * by fork()/COW.
7496 */
7497 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
7498 MAP_SHARED | MAP_ANONYMOUS, 0);
7499 if (IS_ERR((void *)hva))
7500 return PTR_ERR((void *)hva);
7501 } else {
7502 if (!slot->npages)
7503 return 0;
7504
7505 hva = 0;
7506 }
7507
7508 old = *slot;
7485 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 7509 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7486 struct kvm_userspace_memory_region m; 7510 struct kvm_userspace_memory_region m;
7487 7511
7488 m.slot = id | (i << 16); 7512 m.slot = id | (i << 16);
7489 m.flags = 0; 7513 m.flags = 0;
7490 m.guest_phys_addr = gpa; 7514 m.guest_phys_addr = gpa;
7515 m.userspace_addr = hva;
7491 m.memory_size = size; 7516 m.memory_size = size;
7492 r = __kvm_set_memory_region(kvm, &m); 7517 r = __kvm_set_memory_region(kvm, &m);
7493 if (r < 0) 7518 if (r < 0)
7494 return r; 7519 return r;
7495 } 7520 }
7496 7521
7522 if (!size) {
7523 r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
7524 WARN_ON(r < 0);
7525 }
7526
7497 return 0; 7527 return 0;
7498} 7528}
7499EXPORT_SYMBOL_GPL(__x86_set_memory_region); 7529EXPORT_SYMBOL_GPL(__x86_set_memory_region);
@@ -7623,27 +7653,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
7623 const struct kvm_userspace_memory_region *mem, 7653 const struct kvm_userspace_memory_region *mem,
7624 enum kvm_mr_change change) 7654 enum kvm_mr_change change)
7625{ 7655{
7626 /*
7627 * Only private memory slots need to be mapped here since
7628 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
7629 */
7630 if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
7631 unsigned long userspace_addr;
7632
7633 /*
7634 * MAP_SHARED to prevent internal slot pages from being moved
7635 * by fork()/COW.
7636 */
7637 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
7638 PROT_READ | PROT_WRITE,
7639 MAP_SHARED | MAP_ANONYMOUS, 0);
7640
7641 if (IS_ERR((void *)userspace_addr))
7642 return PTR_ERR((void *)userspace_addr);
7643
7644 memslot->userspace_addr = userspace_addr;
7645 }
7646
7647 return 0; 7656 return 0;
7648} 7657}
7649 7658
@@ -7705,17 +7714,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7705{ 7714{
7706 int nr_mmu_pages = 0; 7715 int nr_mmu_pages = 0;
7707 7716
7708 if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
7709 int ret;
7710
7711 ret = vm_munmap(old->userspace_addr,
7712 old->npages * PAGE_SIZE);
7713 if (ret < 0)
7714 printk(KERN_WARNING
7715 "kvm_vm_ioctl_set_memory_region: "
7716 "failed to munmap memory\n");
7717 }
7718
7719 if (!kvm->arch.n_requested_mmu_pages) 7717 if (!kvm->arch.n_requested_mmu_pages)
7720 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7718 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
7721 7719