aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2007-11-20 03:25:04 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:09 -0500
commit0de10343b3ca7aa34dd606145748f73ed19f627e (patch)
treec15dd64e027ce58784ec8296bd7687591d08997e /drivers/kvm/kvm_main.c
parent3ad82a7e874c5d6c4045090cc01d7794dd9eb21c (diff)
KVM: Portability: Split kvm_set_memory_region() to have an arch callout
Moving !user_alloc case to kvm_arch to avoid unnecessary code logic in non-x86 platform. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c38
1 files changed, 5 insertions, 33 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 93ecafbfb1b6..9dd6ad3c6c7b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -291,33 +291,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
291 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 291 memset(new.rmap, 0, npages * sizeof(*new.rmap));
292 292
293 new.user_alloc = user_alloc; 293 new.user_alloc = user_alloc;
294 if (user_alloc) 294 new.userspace_addr = mem->userspace_addr;
295 new.userspace_addr = mem->userspace_addr;
296 else {
297 down_write(&current->mm->mmap_sem);
298 new.userspace_addr = do_mmap(NULL, 0,
299 npages * PAGE_SIZE,
300 PROT_READ | PROT_WRITE,
301 MAP_SHARED | MAP_ANONYMOUS,
302 0);
303 up_write(&current->mm->mmap_sem);
304
305 if (IS_ERR((void *)new.userspace_addr))
306 goto out_free;
307 }
308 } else {
309 if (!old.user_alloc && old.rmap) {
310 int ret;
311
312 down_write(&current->mm->mmap_sem);
313 ret = do_munmap(current->mm, old.userspace_addr,
314 old.npages * PAGE_SIZE);
315 up_write(&current->mm->mmap_sem);
316 if (ret < 0)
317 printk(KERN_WARNING
318 "kvm_vm_ioctl_set_memory_region: "
319 "failed to munmap memory\n");
320 }
321 } 295 }
322 296
323 /* Allocate page dirty bitmap if needed */ 297 /* Allocate page dirty bitmap if needed */
@@ -335,14 +309,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
335 309
336 *memslot = new; 310 *memslot = new;
337 311
338 if (!kvm->n_requested_mmu_pages) { 312 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
339 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 313 if (r) {
340 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 314 *memslot = old;
315 goto out_free;
341 } 316 }
342 317
343 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
344 kvm_flush_remote_tlbs(kvm);
345
346 kvm_free_physmem_slot(&old, &new); 318 kvm_free_physmem_slot(&old, &new);
347 return 0; 319 return 0;
348 320