diff options
-rw-r--r-- | drivers/kvm/kvm.h | 4 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 38 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 51 |
3 files changed, 60 insertions, 33 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index c2acd74389fa..49094a221f6a 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -391,6 +391,10 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
391 | int __kvm_set_memory_region(struct kvm *kvm, | 391 | int __kvm_set_memory_region(struct kvm *kvm, |
392 | struct kvm_userspace_memory_region *mem, | 392 | struct kvm_userspace_memory_region *mem, |
393 | int user_alloc); | 393 | int user_alloc); |
394 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
395 | struct kvm_userspace_memory_region *mem, | ||
396 | struct kvm_memory_slot old, | ||
397 | int user_alloc); | ||
394 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 398 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
395 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 399 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
396 | void kvm_release_page_clean(struct page *page); | 400 | void kvm_release_page_clean(struct page *page); |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 93ecafbfb1b6..9dd6ad3c6c7b 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -291,33 +291,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
291 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | 291 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); |
292 | 292 | ||
293 | new.user_alloc = user_alloc; | 293 | new.user_alloc = user_alloc; |
294 | if (user_alloc) | 294 | new.userspace_addr = mem->userspace_addr; |
295 | new.userspace_addr = mem->userspace_addr; | ||
296 | else { | ||
297 | down_write(¤t->mm->mmap_sem); | ||
298 | new.userspace_addr = do_mmap(NULL, 0, | ||
299 | npages * PAGE_SIZE, | ||
300 | PROT_READ | PROT_WRITE, | ||
301 | MAP_SHARED | MAP_ANONYMOUS, | ||
302 | 0); | ||
303 | up_write(¤t->mm->mmap_sem); | ||
304 | |||
305 | if (IS_ERR((void *)new.userspace_addr)) | ||
306 | goto out_free; | ||
307 | } | ||
308 | } else { | ||
309 | if (!old.user_alloc && old.rmap) { | ||
310 | int ret; | ||
311 | |||
312 | down_write(¤t->mm->mmap_sem); | ||
313 | ret = do_munmap(current->mm, old.userspace_addr, | ||
314 | old.npages * PAGE_SIZE); | ||
315 | up_write(¤t->mm->mmap_sem); | ||
316 | if (ret < 0) | ||
317 | printk(KERN_WARNING | ||
318 | "kvm_vm_ioctl_set_memory_region: " | ||
319 | "failed to munmap memory\n"); | ||
320 | } | ||
321 | } | 295 | } |
322 | 296 | ||
323 | /* Allocate page dirty bitmap if needed */ | 297 | /* Allocate page dirty bitmap if needed */ |
@@ -335,14 +309,12 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
335 | 309 | ||
336 | *memslot = new; | 310 | *memslot = new; |
337 | 311 | ||
338 | if (!kvm->n_requested_mmu_pages) { | 312 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); |
339 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 313 | if (r) { |
340 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 314 | *memslot = old; |
315 | goto out_free; | ||
341 | } | 316 | } |
342 | 317 | ||
343 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | ||
344 | kvm_flush_remote_tlbs(kvm); | ||
345 | |||
346 | kvm_free_physmem_slot(&old, &new); | 318 | kvm_free_physmem_slot(&old, &new); |
347 | return 0; | 319 | return 0; |
348 | 320 | ||
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index 5a54e328088d..6abb2ed1a908 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/mman.h> | ||
27 | 28 | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/msr.h> | 30 | #include <asm/msr.h> |
@@ -2637,3 +2638,53 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
2637 | kvm_free_physmem(kvm); | 2638 | kvm_free_physmem(kvm); |
2638 | kfree(kvm); | 2639 | kfree(kvm); |
2639 | } | 2640 | } |
2641 | |||
2642 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
2643 | struct kvm_userspace_memory_region *mem, | ||
2644 | struct kvm_memory_slot old, | ||
2645 | int user_alloc) | ||
2646 | { | ||
2647 | int npages = mem->memory_size >> PAGE_SHIFT; | ||
2648 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | ||
2649 | |||
2650 | /*To keep backward compatibility with older userspace, | ||
2651 | *x86 needs to hanlde !user_alloc case. | ||
2652 | */ | ||
2653 | if (!user_alloc) { | ||
2654 | if (npages && !old.rmap) { | ||
2655 | down_write(¤t->mm->mmap_sem); | ||
2656 | memslot->userspace_addr = do_mmap(NULL, 0, | ||
2657 | npages * PAGE_SIZE, | ||
2658 | PROT_READ | PROT_WRITE, | ||
2659 | MAP_SHARED | MAP_ANONYMOUS, | ||
2660 | 0); | ||
2661 | up_write(¤t->mm->mmap_sem); | ||
2662 | |||
2663 | if (IS_ERR((void *)memslot->userspace_addr)) | ||
2664 | return PTR_ERR((void *)memslot->userspace_addr); | ||
2665 | } else { | ||
2666 | if (!old.user_alloc && old.rmap) { | ||
2667 | int ret; | ||
2668 | |||
2669 | down_write(¤t->mm->mmap_sem); | ||
2670 | ret = do_munmap(current->mm, old.userspace_addr, | ||
2671 | old.npages * PAGE_SIZE); | ||
2672 | up_write(¤t->mm->mmap_sem); | ||
2673 | if (ret < 0) | ||
2674 | printk(KERN_WARNING | ||
2675 | "kvm_vm_ioctl_set_memory_region: " | ||
2676 | "failed to munmap memory\n"); | ||
2677 | } | ||
2678 | } | ||
2679 | } | ||
2680 | |||
2681 | if (!kvm->n_requested_mmu_pages) { | ||
2682 | unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | ||
2683 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | ||
2684 | } | ||
2685 | |||
2686 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | ||
2687 | kvm_flush_remote_tlbs(kvm); | ||
2688 | |||
2689 | return 0; | ||
2690 | } | ||