diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-12-23 11:35:21 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:44 -0500 |
commit | bc6678a33d9b952981a8e44a4f876c3ad64ca4d8 (patch) | |
tree | e26027179eb0d76f234509145a395dd6e5910074 /arch | |
parent | 3ad26d8139a82b0510b1e0435ee82ae461d33401 (diff) |
KVM: introduce kvm->srcu and convert kvm_set_memory_region to SRCU update
Use two steps for memslot deletion: mark the slot invalid (which stops
instantiation of new shadow pages for that slot, but allows destruction),
then instantiate the new empty slot.
Also simplifies kvm_handle_hva locking.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 28 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 6 |
3 files changed, 22 insertions, 16 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0757c7027986..b2e4d16dd39e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -1382,7 +1382,7 @@ static void kvm_release_vm_pages(struct kvm *kvm) | |||
1382 | int i, j; | 1382 | int i, j; |
1383 | unsigned long base_gfn; | 1383 | unsigned long base_gfn; |
1384 | 1384 | ||
1385 | slots = kvm->memslots; | 1385 | slots = rcu_dereference(kvm->memslots); |
1386 | for (i = 0; i < slots->nmemslots; i++) { | 1386 | for (i = 0; i < slots->nmemslots; i++) { |
1387 | memslot = &slots->memslots[i]; | 1387 | memslot = &slots->memslots[i]; |
1388 | base_gfn = memslot->base_gfn; | 1388 | base_gfn = memslot->base_gfn; |
@@ -1837,6 +1837,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1837 | struct kvm_memory_slot *memslot; | 1837 | struct kvm_memory_slot *memslot; |
1838 | int is_dirty = 0; | 1838 | int is_dirty = 0; |
1839 | 1839 | ||
1840 | down_write(&kvm->slots_lock); | ||
1840 | spin_lock(&kvm->arch.dirty_log_lock); | 1841 | spin_lock(&kvm->arch.dirty_log_lock); |
1841 | 1842 | ||
1842 | r = kvm_ia64_sync_dirty_log(kvm, log); | 1843 | r = kvm_ia64_sync_dirty_log(kvm, log); |
@@ -1856,6 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1856 | } | 1857 | } |
1857 | r = 0; | 1858 | r = 0; |
1858 | out: | 1859 | out: |
1860 | up_write(&kvm->slots_lock); | ||
1859 | spin_unlock(&kvm->arch.dirty_log_lock); | 1861 | spin_unlock(&kvm->arch.dirty_log_lock); |
1860 | return r; | 1862 | return r; |
1861 | } | 1863 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 81f84d326a84..f8bf42a25995 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/swap.h> | 29 | #include <linux/swap.h> |
30 | #include <linux/hugetlb.h> | 30 | #include <linux/hugetlb.h> |
31 | #include <linux/compiler.h> | 31 | #include <linux/compiler.h> |
32 | #include <linux/srcu.h> | ||
32 | 33 | ||
33 | #include <asm/page.h> | 34 | #include <asm/page.h> |
34 | #include <asm/cmpxchg.h> | 35 | #include <asm/cmpxchg.h> |
@@ -807,21 +808,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
807 | { | 808 | { |
808 | int i, j; | 809 | int i, j; |
809 | int retval = 0; | 810 | int retval = 0; |
810 | struct kvm_memslots *slots = kvm->memslots; | 811 | struct kvm_memslots *slots; |
812 | |||
813 | slots = rcu_dereference(kvm->memslots); | ||
811 | 814 | ||
812 | /* | ||
813 | * If mmap_sem isn't taken, we can look the memslots with only | ||
814 | * the mmu_lock by skipping over the slots with userspace_addr == 0. | ||
815 | */ | ||
816 | for (i = 0; i < slots->nmemslots; i++) { | 815 | for (i = 0; i < slots->nmemslots; i++) { |
817 | struct kvm_memory_slot *memslot = &slots->memslots[i]; | 816 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
818 | unsigned long start = memslot->userspace_addr; | 817 | unsigned long start = memslot->userspace_addr; |
819 | unsigned long end; | 818 | unsigned long end; |
820 | 819 | ||
821 | /* mmu_lock protects userspace_addr */ | ||
822 | if (!start) | ||
823 | continue; | ||
824 | |||
825 | end = start + (memslot->npages << PAGE_SHIFT); | 820 | end = start + (memslot->npages << PAGE_SHIFT); |
826 | if (hva >= start && hva < end) { | 821 | if (hva >= start && hva < end) { |
827 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; | 822 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
@@ -1617,7 +1612,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1617 | 1612 | ||
1618 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | 1613 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) |
1619 | { | 1614 | { |
1620 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); | 1615 | int slot = memslot_id(kvm, gfn); |
1621 | struct kvm_mmu_page *sp = page_header(__pa(pte)); | 1616 | struct kvm_mmu_page *sp = page_header(__pa(pte)); |
1622 | 1617 | ||
1623 | __set_bit(slot, sp->slot_bitmap); | 1618 | __set_bit(slot, sp->slot_bitmap); |
@@ -3021,9 +3016,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) | |||
3021 | int i; | 3016 | int i; |
3022 | unsigned int nr_mmu_pages; | 3017 | unsigned int nr_mmu_pages; |
3023 | unsigned int nr_pages = 0; | 3018 | unsigned int nr_pages = 0; |
3019 | struct kvm_memslots *slots; | ||
3024 | 3020 | ||
3025 | for (i = 0; i < kvm->memslots->nmemslots; i++) | 3021 | slots = rcu_dereference(kvm->memslots); |
3026 | nr_pages += kvm->memslots->memslots[i].npages; | 3022 | for (i = 0; i < slots->nmemslots; i++) |
3023 | nr_pages += slots->memslots[i].npages; | ||
3027 | 3024 | ||
3028 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; | 3025 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
3029 | nr_mmu_pages = max(nr_mmu_pages, | 3026 | nr_mmu_pages = max(nr_mmu_pages, |
@@ -3293,10 +3290,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu) | |||
3293 | static int count_rmaps(struct kvm_vcpu *vcpu) | 3290 | static int count_rmaps(struct kvm_vcpu *vcpu) |
3294 | { | 3291 | { |
3295 | int nmaps = 0; | 3292 | int nmaps = 0; |
3296 | int i, j, k; | 3293 | int i, j, k, idx; |
3297 | 3294 | ||
3295 | idx = srcu_read_lock(&kvm->srcu); | ||
3296 | slots = rcu_dereference(kvm->memslots); | ||
3298 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 3297 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
3299 | struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i]; | 3298 | struct kvm_memory_slot *m = &slots->memslots[i]; |
3300 | struct kvm_rmap_desc *d; | 3299 | struct kvm_rmap_desc *d; |
3301 | 3300 | ||
3302 | for (j = 0; j < m->npages; ++j) { | 3301 | for (j = 0; j < m->npages; ++j) { |
@@ -3319,6 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu) | |||
3319 | } | 3318 | } |
3320 | } | 3319 | } |
3321 | } | 3320 | } |
3321 | srcu_read_unlock(&kvm->srcu, idx); | ||
3322 | return nmaps; | 3322 | return nmaps; |
3323 | } | 3323 | } |
3324 | 3324 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 18698799e365..f1cae7d6113d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1503,7 +1503,11 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1503 | static gva_t rmode_tss_base(struct kvm *kvm) | 1503 | static gva_t rmode_tss_base(struct kvm *kvm) |
1504 | { | 1504 | { |
1505 | if (!kvm->arch.tss_addr) { | 1505 | if (!kvm->arch.tss_addr) { |
1506 | gfn_t base_gfn = kvm->memslots->memslots[0].base_gfn + | 1506 | struct kvm_memslots *slots; |
1507 | gfn_t base_gfn; | ||
1508 | |||
1509 | slots = rcu_dereference(kvm->memslots); | ||
1510 | base_gfn = kvm->memslots->memslots[0].base_gfn + | ||
1507 | kvm->memslots->memslots[0].npages - 3; | 1511 | kvm->memslots->memslots[0].npages - 3; |
1508 | return base_gfn << PAGE_SHIFT; | 1512 | return base_gfn << PAGE_SHIFT; |
1509 | } | 1513 | } |