diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 83 |
1 files changed, 46 insertions, 37 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 47000be25479..f86a47c2f255 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -300,19 +300,6 @@ static struct kvm *kvm_create_vm(void) | |||
300 | return kvm; | 300 | return kvm; |
301 | } | 301 | } |
302 | 302 | ||
303 | static void kvm_free_userspace_physmem(struct kvm_memory_slot *free) | ||
304 | { | ||
305 | int i; | ||
306 | |||
307 | for (i = 0; i < free->npages; ++i) { | ||
308 | if (free->phys_mem[i]) { | ||
309 | if (!PageReserved(free->phys_mem[i])) | ||
310 | SetPageDirty(free->phys_mem[i]); | ||
311 | page_cache_release(free->phys_mem[i]); | ||
312 | } | ||
313 | } | ||
314 | } | ||
315 | |||
316 | static void kvm_free_kernel_physmem(struct kvm_memory_slot *free) | 303 | static void kvm_free_kernel_physmem(struct kvm_memory_slot *free) |
317 | { | 304 | { |
318 | int i; | 305 | int i; |
@@ -330,9 +317,7 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | |||
330 | { | 317 | { |
331 | if (!dont || free->phys_mem != dont->phys_mem) | 318 | if (!dont || free->phys_mem != dont->phys_mem) |
332 | if (free->phys_mem) { | 319 | if (free->phys_mem) { |
333 | if (free->user_alloc) | 320 | if (!free->user_alloc) |
334 | kvm_free_userspace_physmem(free); | ||
335 | else | ||
336 | kvm_free_kernel_physmem(free); | 321 | kvm_free_kernel_physmem(free); |
337 | vfree(free->phys_mem); | 322 | vfree(free->phys_mem); |
338 | } | 323 | } |
@@ -361,7 +346,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu) | |||
361 | 346 | ||
362 | for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) | 347 | for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) |
363 | if (vcpu->pio.guest_pages[i]) { | 348 | if (vcpu->pio.guest_pages[i]) { |
364 | __free_page(vcpu->pio.guest_pages[i]); | 349 | kvm_release_page(vcpu->pio.guest_pages[i]); |
365 | vcpu->pio.guest_pages[i] = NULL; | 350 | vcpu->pio.guest_pages[i] = NULL; |
366 | } | 351 | } |
367 | } | 352 | } |
@@ -752,19 +737,8 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | |||
752 | memset(new.phys_mem, 0, npages * sizeof(struct page *)); | 737 | memset(new.phys_mem, 0, npages * sizeof(struct page *)); |
753 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | 738 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); |
754 | if (user_alloc) { | 739 | if (user_alloc) { |
755 | unsigned long pages_num; | ||
756 | |||
757 | new.user_alloc = 1; | 740 | new.user_alloc = 1; |
758 | down_read(¤t->mm->mmap_sem); | 741 | new.userspace_addr = mem->userspace_addr; |
759 | |||
760 | pages_num = get_user_pages(current, current->mm, | ||
761 | mem->userspace_addr, | ||
762 | npages, 1, 1, new.phys_mem, | ||
763 | NULL); | ||
764 | |||
765 | up_read(¤t->mm->mmap_sem); | ||
766 | if (pages_num != npages) | ||
767 | goto out_unlock; | ||
768 | } else { | 742 | } else { |
769 | for (i = 0; i < npages; ++i) { | 743 | for (i = 0; i < npages; ++i) { |
770 | new.phys_mem[i] = alloc_page(GFP_HIGHUSER | 744 | new.phys_mem[i] = alloc_page(GFP_HIGHUSER |
@@ -1039,12 +1013,39 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | |||
1039 | 1013 | ||
1040 | gfn = unalias_gfn(kvm, gfn); | 1014 | gfn = unalias_gfn(kvm, gfn); |
1041 | slot = __gfn_to_memslot(kvm, gfn); | 1015 | slot = __gfn_to_memslot(kvm, gfn); |
1042 | if (!slot) | 1016 | if (!slot) { |
1017 | get_page(bad_page); | ||
1043 | return bad_page; | 1018 | return bad_page; |
1019 | } | ||
1020 | if (slot->user_alloc) { | ||
1021 | struct page *page[1]; | ||
1022 | int npages; | ||
1023 | |||
1024 | down_read(¤t->mm->mmap_sem); | ||
1025 | npages = get_user_pages(current, current->mm, | ||
1026 | slot->userspace_addr | ||
1027 | + (gfn - slot->base_gfn) * PAGE_SIZE, 1, | ||
1028 | 1, 1, page, NULL); | ||
1029 | up_read(¤t->mm->mmap_sem); | ||
1030 | if (npages != 1) { | ||
1031 | get_page(bad_page); | ||
1032 | return bad_page; | ||
1033 | } | ||
1034 | return page[0]; | ||
1035 | } | ||
1036 | get_page(slot->phys_mem[gfn - slot->base_gfn]); | ||
1044 | return slot->phys_mem[gfn - slot->base_gfn]; | 1037 | return slot->phys_mem[gfn - slot->base_gfn]; |
1045 | } | 1038 | } |
1046 | EXPORT_SYMBOL_GPL(gfn_to_page); | 1039 | EXPORT_SYMBOL_GPL(gfn_to_page); |
1047 | 1040 | ||
1041 | void kvm_release_page(struct page *page) | ||
1042 | { | ||
1043 | if (!PageReserved(page)) | ||
1044 | SetPageDirty(page); | ||
1045 | put_page(page); | ||
1046 | } | ||
1047 | EXPORT_SYMBOL_GPL(kvm_release_page); | ||
1048 | |||
1048 | static int next_segment(unsigned long len, int offset) | 1049 | static int next_segment(unsigned long len, int offset) |
1049 | { | 1050 | { |
1050 | if (len > PAGE_SIZE - offset) | 1051 | if (len > PAGE_SIZE - offset) |
@@ -1060,13 +1061,16 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | |||
1060 | struct page *page; | 1061 | struct page *page; |
1061 | 1062 | ||
1062 | page = gfn_to_page(kvm, gfn); | 1063 | page = gfn_to_page(kvm, gfn); |
1063 | if (is_error_page(page)) | 1064 | if (is_error_page(page)) { |
1065 | kvm_release_page(page); | ||
1064 | return -EFAULT; | 1066 | return -EFAULT; |
1067 | } | ||
1065 | page_virt = kmap_atomic(page, KM_USER0); | 1068 | page_virt = kmap_atomic(page, KM_USER0); |
1066 | 1069 | ||
1067 | memcpy(data, page_virt + offset, len); | 1070 | memcpy(data, page_virt + offset, len); |
1068 | 1071 | ||
1069 | kunmap_atomic(page_virt, KM_USER0); | 1072 | kunmap_atomic(page_virt, KM_USER0); |
1073 | kvm_release_page(page); | ||
1070 | return 0; | 1074 | return 0; |
1071 | } | 1075 | } |
1072 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); | 1076 | EXPORT_SYMBOL_GPL(kvm_read_guest_page); |
@@ -1098,14 +1102,17 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | |||
1098 | struct page *page; | 1102 | struct page *page; |
1099 | 1103 | ||
1100 | page = gfn_to_page(kvm, gfn); | 1104 | page = gfn_to_page(kvm, gfn); |
1101 | if (is_error_page(page)) | 1105 | if (is_error_page(page)) { |
1106 | kvm_release_page(page); | ||
1102 | return -EFAULT; | 1107 | return -EFAULT; |
1108 | } | ||
1103 | page_virt = kmap_atomic(page, KM_USER0); | 1109 | page_virt = kmap_atomic(page, KM_USER0); |
1104 | 1110 | ||
1105 | memcpy(page_virt + offset, data, len); | 1111 | memcpy(page_virt + offset, data, len); |
1106 | 1112 | ||
1107 | kunmap_atomic(page_virt, KM_USER0); | 1113 | kunmap_atomic(page_virt, KM_USER0); |
1108 | mark_page_dirty(kvm, gfn); | 1114 | mark_page_dirty(kvm, gfn); |
1115 | kvm_release_page(page); | ||
1109 | return 0; | 1116 | return 0; |
1110 | } | 1117 | } |
1111 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); | 1118 | EXPORT_SYMBOL_GPL(kvm_write_guest_page); |
@@ -1136,13 +1143,16 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) | |||
1136 | struct page *page; | 1143 | struct page *page; |
1137 | 1144 | ||
1138 | page = gfn_to_page(kvm, gfn); | 1145 | page = gfn_to_page(kvm, gfn); |
1139 | if (is_error_page(page)) | 1146 | if (is_error_page(page)) { |
1147 | kvm_release_page(page); | ||
1140 | return -EFAULT; | 1148 | return -EFAULT; |
1149 | } | ||
1141 | page_virt = kmap_atomic(page, KM_USER0); | 1150 | page_virt = kmap_atomic(page, KM_USER0); |
1142 | 1151 | ||
1143 | memset(page_virt + offset, 0, len); | 1152 | memset(page_virt + offset, 0, len); |
1144 | 1153 | ||
1145 | kunmap_atomic(page_virt, KM_USER0); | 1154 | kunmap_atomic(page_virt, KM_USER0); |
1155 | kvm_release_page(page); | ||
1146 | return 0; | 1156 | return 0; |
1147 | } | 1157 | } |
1148 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); | 1158 | EXPORT_SYMBOL_GPL(kvm_clear_guest_page); |
@@ -2070,8 +2080,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2070 | for (i = 0; i < nr_pages; ++i) { | 2080 | for (i = 0; i < nr_pages; ++i) { |
2071 | mutex_lock(&vcpu->kvm->lock); | 2081 | mutex_lock(&vcpu->kvm->lock); |
2072 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); | 2082 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); |
2073 | if (page) | ||
2074 | get_page(page); | ||
2075 | vcpu->pio.guest_pages[i] = page; | 2083 | vcpu->pio.guest_pages[i] = page; |
2076 | mutex_unlock(&vcpu->kvm->lock); | 2084 | mutex_unlock(&vcpu->kvm->lock); |
2077 | if (!page) { | 2085 | if (!page) { |
@@ -3074,9 +3082,10 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, | |||
3074 | 3082 | ||
3075 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 3083 | pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
3076 | page = gfn_to_page(kvm, pgoff); | 3084 | page = gfn_to_page(kvm, pgoff); |
3077 | if (is_error_page(page)) | 3085 | if (is_error_page(page)) { |
3086 | kvm_release_page(page); | ||
3078 | return NOPAGE_SIGBUS; | 3087 | return NOPAGE_SIGBUS; |
3079 | get_page(page); | 3088 | } |
3080 | if (type != NULL) | 3089 | if (type != NULL) |
3081 | *type = VM_FAULT_MINOR; | 3090 | *type = VM_FAULT_MINOR; |
3082 | 3091 | ||