aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-07-02 04:55:48 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-07-18 15:55:04 -0400
commit84504ef38673fa021b3d8f3da2b79cf878b33315 (patch)
tree55c6238ebfa17790dad0136493b7b0f375828ace /arch/x86/kvm/mmu.c
parentd19a748b1c42b133e9263e9023c1d162efa6f4ad (diff)
KVM: MMU: Make kvm_handle_hva() handle range of addresses
When guest's memory is backed by THP pages, MMU notifier needs to call kvm_unmap_hva(), which in turn leads to kvm_handle_hva(), in a loop to invalidate a range of pages which constitute one huge page: for each page for each memslot if page is in memslot unmap using rmap This means although every page in that range is expected to be found in the same memslot, we are forced to check unrelated memslots many times. If the guest has more memslots, the situation will become worse. Furthermore, if the range does not include any pages in the guest's memory, the loop over the pages will just consume extra time. This patch, together with the following patches, solves this problem by introducing kvm_handle_hva_range() which makes the loop look like this: for each memslot for each page in memslot unmap using rmap In this new processing, the actual work is converted to a loop over rmap which is much more cache friendly than before. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Cc: Alexander Graf <agraf@suse.de> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 170a632d9d34..7235b0c9587d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1259,10 +1259,13 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
1259 return 0; 1259 return 0;
1260} 1260}
1261 1261
1262static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 1262static int kvm_handle_hva_range(struct kvm *kvm,
1263 unsigned long data, 1263 unsigned long start,
1264 int (*handler)(struct kvm *kvm, unsigned long *rmapp, 1264 unsigned long end,
1265 unsigned long data)) 1265 unsigned long data,
1266 int (*handler)(struct kvm *kvm,
1267 unsigned long *rmapp,
1268 unsigned long data))
1266{ 1269{
1267 int j; 1270 int j;
1268 int ret; 1271 int ret;
@@ -1273,13 +1276,22 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1273 slots = kvm_memslots(kvm); 1276 slots = kvm_memslots(kvm);
1274 1277
1275 kvm_for_each_memslot(memslot, slots) { 1278 kvm_for_each_memslot(memslot, slots) {
1276 unsigned long start = memslot->userspace_addr; 1279 unsigned long hva_start, hva_end;
1277 unsigned long end; 1280 gfn_t gfn, gfn_end;
1278 1281
1279 end = start + (memslot->npages << PAGE_SHIFT); 1282 hva_start = max(start, memslot->userspace_addr);
1280 if (hva >= start && hva < end) { 1283 hva_end = min(end, memslot->userspace_addr +
1281 gfn_t gfn = hva_to_gfn_memslot(hva, memslot); 1284 (memslot->npages << PAGE_SHIFT));
1285 if (hva_start >= hva_end)
1286 continue;
1287 /*
1288 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1289 * {gfn, gfn+1, ..., gfn_end-1}.
1290 */
1291 gfn = hva_to_gfn_memslot(hva_start, memslot);
1292 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1282 1293
1294 for (; gfn < gfn_end; ++gfn) {
1283 ret = 0; 1295 ret = 0;
1284 1296
1285 for (j = PT_PAGE_TABLE_LEVEL; 1297 for (j = PT_PAGE_TABLE_LEVEL;
@@ -1289,7 +1301,9 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1289 rmapp = __gfn_to_rmap(gfn, j, memslot); 1301 rmapp = __gfn_to_rmap(gfn, j, memslot);
1290 ret |= handler(kvm, rmapp, data); 1302 ret |= handler(kvm, rmapp, data);
1291 } 1303 }
1292 trace_kvm_age_page(hva, memslot, ret); 1304 trace_kvm_age_page(memslot->userspace_addr +
1305 (gfn - memslot->base_gfn) * PAGE_SIZE,
1306 memslot, ret);
1293 retval |= ret; 1307 retval |= ret;
1294 } 1308 }
1295 } 1309 }
@@ -1297,6 +1311,14 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1297 return retval; 1311 return retval;
1298} 1312}
1299 1313
1314static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1315 unsigned long data,
1316 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
1317 unsigned long data))
1318{
1319 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1320}
1321
1300int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1322int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1301{ 1323{
1302 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); 1324 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);