aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-12-23 11:35:18 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:44 -0500
commitf7784b8ec9b6a041fa828cfbe9012fe51933f5ac (patch)
treedc67f35e585bb06492852c01776aea4b737db48b
parentfef9cce0eb28a67e688a411cc30b73625e49002b (diff)
KVM: split kvm_arch_set_memory_region into prepare and commit
Required for SRCU convertion later. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c16
-rw-r--r--arch/powerpc/kvm/powerpc.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c25
-rw-r--r--arch/x86/kvm/x86.c51
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--virt/kvm/kvm_main.c12
6 files changed, 82 insertions, 47 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 1ca1dbf48117..0757c7027986 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1578,15 +1578,15 @@ out:
1578 return r; 1578 return r;
1579} 1579}
1580 1580
1581int kvm_arch_set_memory_region(struct kvm *kvm, 1581int kvm_arch_prepare_memory_region(struct kvm *kvm,
1582 struct kvm_userspace_memory_region *mem, 1582 struct kvm_memory_slot *memslot,
1583 struct kvm_memory_slot old, 1583 struct kvm_memory_slot old,
1584 struct kvm_userspace_memory_region *mem,
1584 int user_alloc) 1585 int user_alloc)
1585{ 1586{
1586 unsigned long i; 1587 unsigned long i;
1587 unsigned long pfn; 1588 unsigned long pfn;
1588 int npages = mem->memory_size >> PAGE_SHIFT; 1589 int npages = memslot->npages;
1589 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1590 unsigned long base_gfn = memslot->base_gfn; 1590 unsigned long base_gfn = memslot->base_gfn;
1591 1591
1592 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) 1592 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1610 return 0; 1610 return 0;
1611} 1611}
1612 1612
1613void kvm_arch_commit_memory_region(struct kvm *kvm,
1614 struct kvm_userspace_memory_region *mem,
1615 struct kvm_memory_slot old,
1616 int user_alloc)
1617{
1618 return;
1619}
1620
1613void kvm_arch_flush_shadow(struct kvm *kvm) 1621void kvm_arch_flush_shadow(struct kvm *kvm)
1614{ 1622{
1615 kvm_flush_remote_tlbs(kvm); 1623 kvm_flush_remote_tlbs(kvm);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f06cf93b178e..4633e7850dd2 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
165 return -EINVAL; 165 return -EINVAL;
166} 166}
167 167
168int kvm_arch_set_memory_region(struct kvm *kvm, 168int kvm_arch_prepare_memory_region(struct kvm *kvm,
169 struct kvm_userspace_memory_region *mem, 169 struct kvm_memory_slot *memslot,
170 struct kvm_memory_slot old, 170 struct kvm_memory_slot old,
171 int user_alloc) 171 struct kvm_userspace_memory_region *mem,
172 int user_alloc)
172{ 173{
173 return 0; 174 return 0;
174} 175}
175 176
177void kvm_arch_commit_memory_region(struct kvm *kvm,
178 struct kvm_userspace_memory_region *mem,
179 struct kvm_memory_slot old,
180 int user_alloc)
181{
182 return;
183}
184
185
176void kvm_arch_flush_shadow(struct kvm *kvm) 186void kvm_arch_flush_shadow(struct kvm *kvm)
177{ 187{
178} 188}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3fa0a10e4668..c8002193d9d4 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
690} 690}
691 691
692/* Section: memory related */ 692/* Section: memory related */
693int kvm_arch_set_memory_region(struct kvm *kvm, 693int kvm_arch_prepare_memory_region(struct kvm *kvm,
694 struct kvm_userspace_memory_region *mem, 694 struct kvm_memory_slot *memslot,
695 struct kvm_memory_slot old, 695 struct kvm_memory_slot old,
696 int user_alloc) 696 struct kvm_userspace_memory_region *mem,
697 int user_alloc)
697{ 698{
698 int i;
699 struct kvm_vcpu *vcpu;
700
701 /* A few sanity checks. We can have exactly one memory slot which has 699 /* A few sanity checks. We can have exactly one memory slot which has
702 to start at guest virtual zero and which has to be located at a 700 to start at guest virtual zero and which has to be located at a
703 page boundary in userland and which has to end at a page boundary. 701 page boundary in userland and which has to end at a page boundary.
@@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
720 if (!user_alloc) 718 if (!user_alloc)
721 return -EINVAL; 719 return -EINVAL;
722 720
721 return 0;
722}
723
724void kvm_arch_commit_memory_region(struct kvm *kvm,
725 struct kvm_userspace_memory_region *mem,
726 struct kvm_memory_slot old,
727 int user_alloc)
728{
729 int i;
730 struct kvm_vcpu *vcpu;
731
723 /* request update of sie control block for all available vcpus */ 732 /* request update of sie control block for all available vcpus */
724 kvm_for_each_vcpu(i, vcpu, kvm) { 733 kvm_for_each_vcpu(i, vcpu, kvm) {
725 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 734 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
726 continue; 735 continue;
727 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 736 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
728 } 737 }
729
730 return 0;
731} 738}
732 739
733void kvm_arch_flush_shadow(struct kvm *kvm) 740void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ce833191430..43da65feed49 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
5228 kfree(kvm); 5228 kfree(kvm);
5229} 5229}
5230 5230
5231int kvm_arch_set_memory_region(struct kvm *kvm, 5231int kvm_arch_prepare_memory_region(struct kvm *kvm,
5232 struct kvm_userspace_memory_region *mem, 5232 struct kvm_memory_slot *memslot,
5233 struct kvm_memory_slot old, 5233 struct kvm_memory_slot old,
5234 struct kvm_userspace_memory_region *mem,
5234 int user_alloc) 5235 int user_alloc)
5235{ 5236{
5236 int npages = mem->memory_size >> PAGE_SHIFT; 5237 int npages = memslot->npages;
5237 struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
5238 5238
5239 /*To keep backward compatibility with older userspace, 5239 /*To keep backward compatibility with older userspace,
5240 *x86 needs to hanlde !user_alloc case. 5240 *x86 needs to hanlde !user_alloc case.
@@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5254 if (IS_ERR((void *)userspace_addr)) 5254 if (IS_ERR((void *)userspace_addr))
5255 return PTR_ERR((void *)userspace_addr); 5255 return PTR_ERR((void *)userspace_addr);
5256 5256
5257 /* set userspace_addr atomically for kvm_hva_to_rmapp */
5258 spin_lock(&kvm->mmu_lock);
5259 memslot->userspace_addr = userspace_addr; 5257 memslot->userspace_addr = userspace_addr;
5260 spin_unlock(&kvm->mmu_lock);
5261 } else {
5262 if (!old.user_alloc && old.rmap) {
5263 int ret;
5264
5265 down_write(&current->mm->mmap_sem);
5266 ret = do_munmap(current->mm, old.userspace_addr,
5267 old.npages * PAGE_SIZE);
5268 up_write(&current->mm->mmap_sem);
5269 if (ret < 0)
5270 printk(KERN_WARNING
5271 "kvm_vm_ioctl_set_memory_region: "
5272 "failed to munmap memory\n");
5273 }
5274 } 5258 }
5275 } 5259 }
5276 5260
5261
5262 return 0;
5263}
5264
5265void kvm_arch_commit_memory_region(struct kvm *kvm,
5266 struct kvm_userspace_memory_region *mem,
5267 struct kvm_memory_slot old,
5268 int user_alloc)
5269{
5270
5271 int npages = mem->memory_size >> PAGE_SHIFT;
5272
5273 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5274 int ret;
5275
5276 down_write(&current->mm->mmap_sem);
5277 ret = do_munmap(current->mm, old.userspace_addr,
5278 old.npages * PAGE_SIZE);
5279 up_write(&current->mm->mmap_sem);
5280 if (ret < 0)
5281 printk(KERN_WARNING
5282 "kvm_vm_ioctl_set_memory_region: "
5283 "failed to munmap memory\n");
5284 }
5285
5277 spin_lock(&kvm->mmu_lock); 5286 spin_lock(&kvm->mmu_lock);
5278 if (!kvm->arch.n_requested_mmu_pages) { 5287 if (!kvm->arch.n_requested_mmu_pages) {
5279 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 5288 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
5282 5291
5283 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 5292 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5284 spin_unlock(&kvm->mmu_lock); 5293 spin_unlock(&kvm->mmu_lock);
5285
5286 return 0;
5287} 5294}
5288 5295
5289void kvm_arch_flush_shadow(struct kvm *kvm) 5296void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 782bfb185f8a..3c44687b3425 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
253int __kvm_set_memory_region(struct kvm *kvm, 253int __kvm_set_memory_region(struct kvm *kvm,
254 struct kvm_userspace_memory_region *mem, 254 struct kvm_userspace_memory_region *mem,
255 int user_alloc); 255 int user_alloc);
256int kvm_arch_set_memory_region(struct kvm *kvm, 256int kvm_arch_prepare_memory_region(struct kvm *kvm,
257 struct kvm_memory_slot *memslot,
258 struct kvm_memory_slot old,
259 struct kvm_userspace_memory_region *mem,
260 int user_alloc);
261void kvm_arch_commit_memory_region(struct kvm *kvm,
257 struct kvm_userspace_memory_region *mem, 262 struct kvm_userspace_memory_region *mem,
258 struct kvm_memory_slot old, 263 struct kvm_memory_slot old,
259 int user_alloc); 264 int user_alloc);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 86dd8f3d29c9..c9f6cfe83120 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -663,6 +663,10 @@ skip_lpage:
663 if (!npages) 663 if (!npages)
664 kvm_arch_flush_shadow(kvm); 664 kvm_arch_flush_shadow(kvm);
665 665
666 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
667 if (r)
668 goto out_free;
669
666 spin_lock(&kvm->mmu_lock); 670 spin_lock(&kvm->mmu_lock);
667 if (mem->slot >= kvm->memslots->nmemslots) 671 if (mem->slot >= kvm->memslots->nmemslots)
668 kvm->memslots->nmemslots = mem->slot + 1; 672 kvm->memslots->nmemslots = mem->slot + 1;
@@ -670,13 +674,7 @@ skip_lpage:
670 *memslot = new; 674 *memslot = new;
671 spin_unlock(&kvm->mmu_lock); 675 spin_unlock(&kvm->mmu_lock);
672 676
673 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); 677 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
674 if (r) {
675 spin_lock(&kvm->mmu_lock);
676 *memslot = old;
677 spin_unlock(&kvm->mmu_lock);
678 goto out_free;
679 }
680 678
681 kvm_free_physmem_slot(&old, npages ? &new : NULL); 679 kvm_free_physmem_slot(&old, npages ? &new : NULL);
682 /* Slot deletion case: we have to update the current slot */ 680 /* Slot deletion case: we have to update the current slot */