aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-10-18 10:59:34 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:54 -0500
commit8d4e1288ebb753d3140d81cb349f22b0a6829a4a (patch)
tree3fb754321abd1da8fa107f500dedd4a56f94ac0a
parente56a7a28e21aa2d1af659d8f38952411ce9ae40e (diff)
KVM: Allocate userspace memory for older userspace
Allocate a userspace buffer for older userspaces. Also eliminate phys_mem buffer. The memset() in kvmctl really kills initial memory usage but swapping works even with old userspaces. A side effect is that maximum guest side is reduced for older userspace on i386. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/kvm.h2
-rw-r--r--drivers/kvm/kvm_main.c83
2 files changed, 30 insertions, 55 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index e8a21e8cb2ad..eb006ed696c1 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -406,10 +406,8 @@ struct kvm_memory_slot {
406 gfn_t base_gfn; 406 gfn_t base_gfn;
407 unsigned long npages; 407 unsigned long npages;
408 unsigned long flags; 408 unsigned long flags;
409 struct page **phys_mem;
410 unsigned long *rmap; 409 unsigned long *rmap;
411 unsigned long *dirty_bitmap; 410 unsigned long *dirty_bitmap;
412 int user_alloc; /* user allocated memory */
413 unsigned long userspace_addr; 411 unsigned long userspace_addr;
414}; 412};
415 413
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index f86a47c2f255..8f6c21d02656 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -42,6 +42,7 @@
42#include <linux/profile.h> 42#include <linux/profile.h>
43#include <linux/kvm_para.h> 43#include <linux/kvm_para.h>
44#include <linux/pagemap.h> 44#include <linux/pagemap.h>
45#include <linux/mman.h>
45 46
46#include <asm/processor.h> 47#include <asm/processor.h>
47#include <asm/msr.h> 48#include <asm/msr.h>
@@ -300,36 +301,21 @@ static struct kvm *kvm_create_vm(void)
300 return kvm; 301 return kvm;
301} 302}
302 303
303static void kvm_free_kernel_physmem(struct kvm_memory_slot *free)
304{
305 int i;
306
307 for (i = 0; i < free->npages; ++i)
308 if (free->phys_mem[i])
309 __free_page(free->phys_mem[i]);
310}
311
312/* 304/*
313 * Free any memory in @free but not in @dont. 305 * Free any memory in @free but not in @dont.
314 */ 306 */
315static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 307static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
316 struct kvm_memory_slot *dont) 308 struct kvm_memory_slot *dont)
317{ 309{
318 if (!dont || free->phys_mem != dont->phys_mem)
319 if (free->phys_mem) {
320 if (!free->user_alloc)
321 kvm_free_kernel_physmem(free);
322 vfree(free->phys_mem);
323 }
324 if (!dont || free->rmap != dont->rmap) 310 if (!dont || free->rmap != dont->rmap)
325 vfree(free->rmap); 311 vfree(free->rmap);
326 312
327 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 313 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
328 vfree(free->dirty_bitmap); 314 vfree(free->dirty_bitmap);
329 315
330 free->phys_mem = NULL;
331 free->npages = 0; 316 free->npages = 0;
332 free->dirty_bitmap = NULL; 317 free->dirty_bitmap = NULL;
318 free->rmap = NULL;
333} 319}
334 320
335static void kvm_free_physmem(struct kvm *kvm) 321static void kvm_free_physmem(struct kvm *kvm)
@@ -712,10 +698,6 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
712 goto out_unlock; 698 goto out_unlock;
713 } 699 }
714 700
715 /* Deallocate if slot is being removed */
716 if (!npages)
717 new.phys_mem = NULL;
718
719 /* Free page dirty bitmap if unneeded */ 701 /* Free page dirty bitmap if unneeded */
720 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 702 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
721 new.dirty_bitmap = NULL; 703 new.dirty_bitmap = NULL;
@@ -723,29 +705,27 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
723 r = -ENOMEM; 705 r = -ENOMEM;
724 706
725 /* Allocate if a slot is being created */ 707 /* Allocate if a slot is being created */
726 if (npages && !new.phys_mem) { 708 if (npages && !new.rmap) {
727 new.phys_mem = vmalloc(npages * sizeof(struct page *));
728
729 if (!new.phys_mem)
730 goto out_unlock;
731
732 new.rmap = vmalloc(npages * sizeof(struct page *)); 709 new.rmap = vmalloc(npages * sizeof(struct page *));
733 710
734 if (!new.rmap) 711 if (!new.rmap)
735 goto out_unlock; 712 goto out_unlock;
736 713
737 memset(new.phys_mem, 0, npages * sizeof(struct page *));
738 memset(new.rmap, 0, npages * sizeof(*new.rmap)); 714 memset(new.rmap, 0, npages * sizeof(*new.rmap));
739 if (user_alloc) { 715
740 new.user_alloc = 1; 716 if (user_alloc)
741 new.userspace_addr = mem->userspace_addr; 717 new.userspace_addr = mem->userspace_addr;
742 } else { 718 else {
743 for (i = 0; i < npages; ++i) { 719 down_write(&current->mm->mmap_sem);
744 new.phys_mem[i] = alloc_page(GFP_HIGHUSER 720 new.userspace_addr = do_mmap(NULL, 0,
745 | __GFP_ZERO); 721 npages * PAGE_SIZE,
746 if (!new.phys_mem[i]) 722 PROT_READ | PROT_WRITE,
747 goto out_unlock; 723 MAP_SHARED | MAP_ANONYMOUS,
748 } 724 0);
725 up_write(&current->mm->mmap_sem);
726
727 if (IS_ERR((void *)new.userspace_addr))
728 goto out_unlock;
749 } 729 }
750 } 730 }
751 731
@@ -1010,6 +990,8 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1010struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 990struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1011{ 991{
1012 struct kvm_memory_slot *slot; 992 struct kvm_memory_slot *slot;
993 struct page *page[1];
994 int npages;
1013 995
1014 gfn = unalias_gfn(kvm, gfn); 996 gfn = unalias_gfn(kvm, gfn);
1015 slot = __gfn_to_memslot(kvm, gfn); 997 slot = __gfn_to_memslot(kvm, gfn);
@@ -1017,24 +999,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1017 get_page(bad_page); 999 get_page(bad_page);
1018 return bad_page; 1000 return bad_page;
1019 } 1001 }
1020 if (slot->user_alloc) { 1002
1021 struct page *page[1]; 1003 down_read(&current->mm->mmap_sem);
1022 int npages; 1004 npages = get_user_pages(current, current->mm,
1023 1005 slot->userspace_addr
1024 down_read(&current->mm->mmap_sem); 1006 + (gfn - slot->base_gfn) * PAGE_SIZE, 1,
1025 npages = get_user_pages(current, current->mm, 1007 1, 1, page, NULL);
1026 slot->userspace_addr 1008 up_read(&current->mm->mmap_sem);
1027 + (gfn - slot->base_gfn) * PAGE_SIZE, 1, 1009 if (npages != 1) {
1028 1, 1, page, NULL); 1010 get_page(bad_page);
1029 up_read(&current->mm->mmap_sem); 1011 return bad_page;
1030 if (npages != 1) {
1031 get_page(bad_page);
1032 return bad_page;
1033 }
1034 return page[0];
1035 } 1012 }
1036 get_page(slot->phys_mem[gfn - slot->base_gfn]); 1013
1037 return slot->phys_mem[gfn - slot->base_gfn]; 1014 return page[0];
1038} 1015}
1039EXPORT_SYMBOL_GPL(gfn_to_page); 1016EXPORT_SYMBOL_GPL(gfn_to_page);
1040 1017