aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-12-23 11:35:18 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:44 -0500
commitf7784b8ec9b6a041fa828cfbe9012fe51933f5ac (patch)
treedc67f35e585bb06492852c01776aea4b737db48b /arch/x86
parentfef9cce0eb28a67e688a411cc30b73625e49002b (diff)
KVM: split kvm_arch_set_memory_region into prepare and commit
Required for SRCU convertion later. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/x86.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ce833191430..43da65feed49 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
5228 kfree(kvm); 5228 kfree(kvm);
5229} 5229}
5230 5230
5231int kvm_arch_set_memory_region(struct kvm *kvm, 5231int kvm_arch_prepare_memory_region(struct kvm *kvm,
5232 struct kvm_userspace_memory_region *mem, 5232 struct kvm_memory_slot *memslot,
5233 struct kvm_memory_slot old, 5233 struct kvm_memory_slot old,
5234 struct kvm_userspace_memory_region *mem,
5234 int user_alloc) 5235 int user_alloc)
5235{ 5236{
5236 int npages = mem->memory_size >> PAGE_SHIFT; 5237 int npages = memslot->npages;
5237 struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
5238 5238
5239 /*To keep backward compatibility with older userspace, 5239 /*To keep backward compatibility with older userspace,
5240 *x86 needs to hanlde !user_alloc case. 5240 *x86 needs to hanlde !user_alloc case.
@@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5254 if (IS_ERR((void *)userspace_addr)) 5254 if (IS_ERR((void *)userspace_addr))
5255 return PTR_ERR((void *)userspace_addr); 5255 return PTR_ERR((void *)userspace_addr);
5256 5256
5257 /* set userspace_addr atomically for kvm_hva_to_rmapp */
5258 spin_lock(&kvm->mmu_lock);
5259 memslot->userspace_addr = userspace_addr; 5257 memslot->userspace_addr = userspace_addr;
5260 spin_unlock(&kvm->mmu_lock);
5261 } else {
5262 if (!old.user_alloc && old.rmap) {
5263 int ret;
5264
5265 down_write(&current->mm->mmap_sem);
5266 ret = do_munmap(current->mm, old.userspace_addr,
5267 old.npages * PAGE_SIZE);
5268 up_write(&current->mm->mmap_sem);
5269 if (ret < 0)
5270 printk(KERN_WARNING
5271 "kvm_vm_ioctl_set_memory_region: "
5272 "failed to munmap memory\n");
5273 }
5274 } 5258 }
5275 } 5259 }
5276 5260
5261
5262 return 0;
5263}
5264
5265void kvm_arch_commit_memory_region(struct kvm *kvm,
5266 struct kvm_userspace_memory_region *mem,
5267 struct kvm_memory_slot old,
5268 int user_alloc)
5269{
5270
5271 int npages = mem->memory_size >> PAGE_SHIFT;
5272
5273 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5274 int ret;
5275
5276 down_write(&current->mm->mmap_sem);
5277 ret = do_munmap(current->mm, old.userspace_addr,
5278 old.npages * PAGE_SIZE);
5279 up_write(&current->mm->mmap_sem);
5280 if (ret < 0)
5281 printk(KERN_WARNING
5282 "kvm_vm_ioctl_set_memory_region: "
5283 "failed to munmap memory\n");
5284 }
5285
5277 spin_lock(&kvm->mmu_lock); 5286 spin_lock(&kvm->mmu_lock);
5278 if (!kvm->arch.n_requested_mmu_pages) { 5287 if (!kvm->arch.n_requested_mmu_pages) {
5279 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 5288 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5282 5291
5283 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 5292 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5284 spin_unlock(&kvm->mmu_lock); 5293 spin_unlock(&kvm->mmu_lock);
5285
5286 return 0;
5287} 5294}
5288 5295
5289void kvm_arch_flush_shadow(struct kvm *kvm) 5296void kvm_arch_flush_shadow(struct kvm *kvm)