aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Vivier <Laurent.Vivier@bull.net>2007-08-30 08:56:21 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:27 -0400
commit0d8d2bd4f20c8a2a254b4fe3bc114f12214a6d73 (patch)
treeca8234bdcb71b7cc0955ef89ae5b028d19ca709e
parent1747fb71fd7c9389696e91f354d2f841b5c85790 (diff)
KVM: Simplify memory allocation
The mutex->splinlock convertion alllows us to make some code simplifications. As we can keep the lock longer, we don't have to release it and then have to check if the environment has not been modified before re-taking it. We can remove kvm->busy and kvm->memory_config_version. Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/kvm.h2
-rw-r--r--drivers/kvm/kvm_main.c38
2 files changed, 3 insertions, 37 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index ee9f8bdee755..351da40807c5 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -411,8 +411,6 @@ struct kvm {
411 int n_free_mmu_pages; 411 int n_free_mmu_pages;
412 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 412 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
413 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 413 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
414 int memory_config_version;
415 int busy;
416 unsigned long rmap_overflow; 414 unsigned long rmap_overflow;
417 struct list_head vm_list; 415 struct list_head vm_list;
418 struct file *filp; 416 struct file *filp;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 9dffbbea46a7..8da13a462e3c 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -679,7 +679,6 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
679 unsigned long i; 679 unsigned long i;
680 struct kvm_memory_slot *memslot; 680 struct kvm_memory_slot *memslot;
681 struct kvm_memory_slot old, new; 681 struct kvm_memory_slot old, new;
682 int memory_config_version;
683 682
684 r = -EINVAL; 683 r = -EINVAL;
685 /* General sanity checks */ 684 /* General sanity checks */
@@ -699,10 +698,8 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
699 if (!npages) 698 if (!npages)
700 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; 699 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
701 700
702raced:
703 mutex_lock(&kvm->lock); 701 mutex_lock(&kvm->lock);
704 702
705 memory_config_version = kvm->memory_config_version;
706 new = old = *memslot; 703 new = old = *memslot;
707 704
708 new.base_gfn = base_gfn; 705 new.base_gfn = base_gfn;
@@ -725,11 +722,6 @@ raced:
725 (base_gfn >= s->base_gfn + s->npages))) 722 (base_gfn >= s->base_gfn + s->npages)))
726 goto out_unlock; 723 goto out_unlock;
727 } 724 }
728 /*
729 * Do memory allocations outside lock. memory_config_version will
730 * detect any races.
731 */
732 mutex_unlock(&kvm->lock);
733 725
734 /* Deallocate if slot is being removed */ 726 /* Deallocate if slot is being removed */
735 if (!npages) 727 if (!npages)
@@ -746,14 +738,14 @@ raced:
746 new.phys_mem = vmalloc(npages * sizeof(struct page *)); 738 new.phys_mem = vmalloc(npages * sizeof(struct page *));
747 739
748 if (!new.phys_mem) 740 if (!new.phys_mem)
749 goto out_free; 741 goto out_unlock;
750 742
751 memset(new.phys_mem, 0, npages * sizeof(struct page *)); 743 memset(new.phys_mem, 0, npages * sizeof(struct page *));
752 for (i = 0; i < npages; ++i) { 744 for (i = 0; i < npages; ++i) {
753 new.phys_mem[i] = alloc_page(GFP_HIGHUSER 745 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
754 | __GFP_ZERO); 746 | __GFP_ZERO);
755 if (!new.phys_mem[i]) 747 if (!new.phys_mem[i])
756 goto out_free; 748 goto out_unlock;
757 set_page_private(new.phys_mem[i],0); 749 set_page_private(new.phys_mem[i],0);
758 } 750 }
759 } 751 }
@@ -764,27 +756,14 @@ raced:
764 756
765 new.dirty_bitmap = vmalloc(dirty_bytes); 757 new.dirty_bitmap = vmalloc(dirty_bytes);
766 if (!new.dirty_bitmap) 758 if (!new.dirty_bitmap)
767 goto out_free; 759 goto out_unlock;
768 memset(new.dirty_bitmap, 0, dirty_bytes); 760 memset(new.dirty_bitmap, 0, dirty_bytes);
769 } 761 }
770 762
771 mutex_lock(&kvm->lock);
772
773 if (memory_config_version != kvm->memory_config_version) {
774 mutex_unlock(&kvm->lock);
775 kvm_free_physmem_slot(&new, &old);
776 goto raced;
777 }
778
779 r = -EAGAIN;
780 if (kvm->busy)
781 goto out_unlock;
782
783 if (mem->slot >= kvm->nmemslots) 763 if (mem->slot >= kvm->nmemslots)
784 kvm->nmemslots = mem->slot + 1; 764 kvm->nmemslots = mem->slot + 1;
785 765
786 *memslot = new; 766 *memslot = new;
787 ++kvm->memory_config_version;
788 767
789 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 768 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
790 kvm_flush_remote_tlbs(kvm); 769 kvm_flush_remote_tlbs(kvm);
@@ -796,7 +775,6 @@ raced:
796 775
797out_unlock: 776out_unlock:
798 mutex_unlock(&kvm->lock); 777 mutex_unlock(&kvm->lock);
799out_free:
800 kvm_free_physmem_slot(&new, &old); 778 kvm_free_physmem_slot(&new, &old);
801out: 779out:
802 return r; 780 return r;
@@ -815,12 +793,6 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
815 793
816 mutex_lock(&kvm->lock); 794 mutex_lock(&kvm->lock);
817 795
818 /*
819 * Prevent changes to guest memory configuration even while the lock
820 * is not taken.
821 */
822 ++kvm->busy;
823 mutex_unlock(&kvm->lock);
824 r = -EINVAL; 796 r = -EINVAL;
825 if (log->slot >= KVM_MEMORY_SLOTS) 797 if (log->slot >= KVM_MEMORY_SLOTS)
826 goto out; 798 goto out;
@@ -841,18 +813,14 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
841 813
842 /* If nothing is dirty, don't bother messing with page tables. */ 814 /* If nothing is dirty, don't bother messing with page tables. */
843 if (any) { 815 if (any) {
844 mutex_lock(&kvm->lock);
845 kvm_mmu_slot_remove_write_access(kvm, log->slot); 816 kvm_mmu_slot_remove_write_access(kvm, log->slot);
846 kvm_flush_remote_tlbs(kvm); 817 kvm_flush_remote_tlbs(kvm);
847 memset(memslot->dirty_bitmap, 0, n); 818 memset(memslot->dirty_bitmap, 0, n);
848 mutex_unlock(&kvm->lock);
849 } 819 }
850 820
851 r = 0; 821 r = 0;
852 822
853out: 823out:
854 mutex_lock(&kvm->lock);
855 --kvm->busy;
856 mutex_unlock(&kvm->lock); 824 mutex_unlock(&kvm->lock);
857 return r; 825 return r;
858} 826}