aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-09-11 09:28:18 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:51 -0400
commitdfe49dbd1fc7310a4e0e2f83ae737cd7d34fa0cd (patch)
tree40a86f860b131def8054f0f662053367933c442f /arch/powerpc/kvm/book3s_hv.c
parenta66b48c3a39fa1c4223d4f847fdc7a04ed1618de (diff)
KVM: PPC: Book3S HV: Handle memory slot deletion and modification correctly
This adds an implementation of kvm_arch_flush_shadow_memslot for Book3S HV, and arranges for kvmppc_core_commit_memory_region to flush the dirty log when modifying an existing slot. With this, we can handle deletion and modification of memory slots. kvm_arch_flush_shadow_memslot calls kvmppc_core_flush_memslot, which on Book3S HV now traverses the reverse map chains to remove any HPT (hashed page table) entries referring to pages in the memslot. This gets called by generic code whenever deleting a memslot or changing the guest physical address for a memslot. We flush the dirty log in kvmppc_core_commit_memory_region for consistency with what x86 does. We only need to flush when an existing memslot is being modified, because for a new memslot the rmap array (which stores the dirty bits) is all zero, meaning that every page is considered clean already, and when deleting a memslot we obviously don't care about the dirty bits any more. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 817837de7362..38c7f1bc3495 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1288,7 +1288,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1288 n = kvm_dirty_bitmap_bytes(memslot); 1288 n = kvm_dirty_bitmap_bytes(memslot);
1289 memset(memslot->dirty_bitmap, 0, n); 1289 memset(memslot->dirty_bitmap, 0, n);
1290 1290
1291 r = kvmppc_hv_get_dirty_log(kvm, memslot); 1291 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
1292 if (r) 1292 if (r)
1293 goto out; 1293 goto out;
1294 1294
@@ -1378,8 +1378,22 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1378} 1378}
1379 1379
1380void kvmppc_core_commit_memory_region(struct kvm *kvm, 1380void kvmppc_core_commit_memory_region(struct kvm *kvm,
1381 struct kvm_userspace_memory_region *mem) 1381 struct kvm_userspace_memory_region *mem,
1382 struct kvm_memory_slot old)
1382{ 1383{
1384 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1385 struct kvm_memory_slot *memslot;
1386
1387 if (npages && old.npages) {
1388 /*
1389 * If modifying a memslot, reset all the rmap dirty bits.
1390 * If this is a new memslot, we don't need to do anything
1391 * since the rmap array starts out as all zeroes,
1392 * i.e. no pages are dirty.
1393 */
1394 memslot = id_to_memslot(kvm->memslots, mem->slot);
1395 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
1396 }
1383} 1397}
1384 1398
1385static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1399static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)