aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 81f84d326a84..f8bf42a25995 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -29,6 +29,7 @@
29#include <linux/swap.h> 29#include <linux/swap.h>
30#include <linux/hugetlb.h> 30#include <linux/hugetlb.h>
31#include <linux/compiler.h> 31#include <linux/compiler.h>
32#include <linux/srcu.h>
32 33
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/cmpxchg.h> 35#include <asm/cmpxchg.h>
@@ -807,21 +808,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
807{ 808{
808 int i, j; 809 int i, j;
809 int retval = 0; 810 int retval = 0;
810 struct kvm_memslots *slots = kvm->memslots; 811 struct kvm_memslots *slots;
812
813 slots = rcu_dereference(kvm->memslots);
811 814
812 /*
813 * If mmap_sem isn't taken, we can look the memslots with only
814 * the mmu_lock by skipping over the slots with userspace_addr == 0.
815 */
816 for (i = 0; i < slots->nmemslots; i++) { 815 for (i = 0; i < slots->nmemslots; i++) {
817 struct kvm_memory_slot *memslot = &slots->memslots[i]; 816 struct kvm_memory_slot *memslot = &slots->memslots[i];
818 unsigned long start = memslot->userspace_addr; 817 unsigned long start = memslot->userspace_addr;
819 unsigned long end; 818 unsigned long end;
820 819
821 /* mmu_lock protects userspace_addr */
822 if (!start)
823 continue;
824
825 end = start + (memslot->npages << PAGE_SHIFT); 820 end = start + (memslot->npages << PAGE_SHIFT);
826 if (hva >= start && hva < end) { 821 if (hva >= start && hva < end) {
827 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 822 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
@@ -1617,7 +1612,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1617 1612
1618static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) 1613static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1619{ 1614{
1620 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); 1615 int slot = memslot_id(kvm, gfn);
1621 struct kvm_mmu_page *sp = page_header(__pa(pte)); 1616 struct kvm_mmu_page *sp = page_header(__pa(pte));
1622 1617
1623 __set_bit(slot, sp->slot_bitmap); 1618 __set_bit(slot, sp->slot_bitmap);
@@ -3021,9 +3016,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3021 int i; 3016 int i;
3022 unsigned int nr_mmu_pages; 3017 unsigned int nr_mmu_pages;
3023 unsigned int nr_pages = 0; 3018 unsigned int nr_pages = 0;
3019 struct kvm_memslots *slots;
3024 3020
3025 for (i = 0; i < kvm->memslots->nmemslots; i++) 3021 slots = rcu_dereference(kvm->memslots);
3026 nr_pages += kvm->memslots->memslots[i].npages; 3022 for (i = 0; i < slots->nmemslots; i++)
3023 nr_pages += slots->memslots[i].npages;
3027 3024
3028 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; 3025 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3029 nr_mmu_pages = max(nr_mmu_pages, 3026 nr_mmu_pages = max(nr_mmu_pages,
@@ -3293,10 +3290,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
3293static int count_rmaps(struct kvm_vcpu *vcpu) 3290static int count_rmaps(struct kvm_vcpu *vcpu)
3294{ 3291{
3295 int nmaps = 0; 3292 int nmaps = 0;
3296 int i, j, k; 3293 int i, j, k, idx;
3297 3294
3295 idx = srcu_read_lock(&kvm->srcu);
3296 slots = rcu_dereference(kvm->memslots);
3298 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 3297 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3299 struct kvm_memory_slot *m = &vcpu->kvm->memslots->memslots[i]; 3298 struct kvm_memory_slot *m = &slots->memslots[i];
3300 struct kvm_rmap_desc *d; 3299 struct kvm_rmap_desc *d;
3301 3300
3302 for (j = 0; j < m->npages; ++j) { 3301 for (j = 0; j < m->npages; ++j) {
@@ -3319,6 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
3319 } 3318 }
3320 } 3319 }
3321 } 3320 }
3321 srcu_read_unlock(&kvm->srcu, idx);
3322 return nmaps; 3322 return nmaps;
3323} 3323}
3324 3324