diff options
author | Paul Mackerras <paulus@samba.org> | 2013-09-20 00:52:54 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-10-17 08:49:36 -0400 |
commit | 491d6ecc17171518565358c2cfe33b59722d234c (patch) | |
tree | 406fdbe1f7da3b21adf7e2ccad72a1f0534471fd /arch/powerpc/kvm | |
parent | adc0bafe00f4c7e5f052c9f29e75a072e03a19fc (diff) |
KVM: PPC: Book3S PR: Reduce number of shadow PTEs invalidated by MMU notifiers
Currently, whenever any of the MMU notifier callbacks get called, we
invalidate all the shadow PTEs. This is inefficient because it means
that we typically then get a lot of DSIs and ISIs in the guest to fault
the shadow PTEs back in. We do this even if the address range being
notified doesn't correspond to guest memory.
This commit adds code to scan the memslot array to find out what range(s)
of guest physical addresses corresponds to the host virtual address range
being affected. For each such range we flush only the shadow PTEs
for the range, on all cpus.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 40 |
1 files changed, 32 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 2f84ed807184..6075dbd0b364 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -150,24 +150,48 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | /************* MMU Notifiers *************/ | 152 | /************* MMU Notifiers *************/ |
153 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, | ||
154 | unsigned long end) | ||
155 | { | ||
156 | long i; | ||
157 | struct kvm_vcpu *vcpu; | ||
158 | struct kvm_memslots *slots; | ||
159 | struct kvm_memory_slot *memslot; | ||
160 | |||
161 | slots = kvm_memslots(kvm); | ||
162 | kvm_for_each_memslot(memslot, slots) { | ||
163 | unsigned long hva_start, hva_end; | ||
164 | gfn_t gfn, gfn_end; | ||
165 | |||
166 | hva_start = max(start, memslot->userspace_addr); | ||
167 | hva_end = min(end, memslot->userspace_addr + | ||
168 | (memslot->npages << PAGE_SHIFT)); | ||
169 | if (hva_start >= hva_end) | ||
170 | continue; | ||
171 | /* | ||
172 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
173 | * {gfn, gfn+1, ..., gfn_end-1}. | ||
174 | */ | ||
175 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
176 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
177 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
178 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | ||
179 | gfn_end << PAGE_SHIFT); | ||
180 | } | ||
181 | } | ||
153 | 182 | ||
154 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 183 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
155 | { | 184 | { |
156 | trace_kvm_unmap_hva(hva); | 185 | trace_kvm_unmap_hva(hva); |
157 | 186 | ||
158 | /* | 187 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
159 | * Flush all shadow tlb entries everywhere. This is slow, but | ||
160 | * we are 100% sure that we catch the to be unmapped page | ||
161 | */ | ||
162 | kvm_flush_remote_tlbs(kvm); | ||
163 | 188 | ||
164 | return 0; | 189 | return 0; |
165 | } | 190 | } |
166 | 191 | ||
167 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | 192 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
168 | { | 193 | { |
169 | /* kvm_unmap_hva flushes everything anyways */ | 194 | do_kvm_unmap_hva(kvm, start, end); |
170 | kvm_unmap_hva(kvm, start); | ||
171 | 195 | ||
172 | return 0; | 196 | return 0; |
173 | } | 197 | } |
@@ -187,7 +211,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |||
187 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 211 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
188 | { | 212 | { |
189 | /* The page will get remapped properly on its next fault */ | 213 | /* The page will get remapped properly on its next fault */ |
190 | kvm_unmap_hva(kvm, hva); | 214 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
191 | } | 215 | } |
192 | 216 | ||
193 | /*****************************************/ | 217 | /*****************************************/ |