aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-09-11 09:28:18 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:51 -0400
commitdfe49dbd1fc7310a4e0e2f83ae737cd7d34fa0cd (patch)
tree40a86f860b131def8054f0f662053367933c442f
parenta66b48c3a39fa1c4223d4f847fdc7a04ed1618de (diff)
KVM: PPC: Book3S HV: Handle memory slot deletion and modification correctly
This adds an implementation of kvm_arch_flush_shadow_memslot for Book3S HV, and arranges for kvmppc_core_commit_memory_region to flush the dirty log when modifying an existing slot. With this, we can handle deletion and modification of memory slots. kvm_arch_flush_shadow_memslot calls kvmppc_core_flush_memslot, which on Book3S HV now traverses the reverse map chains to remove any HPT (hashed page table) entries referring to pages in the memslot. This gets called by generic code whenever deleting a memslot or changing the guest physical address for a memslot. We flush the dirty log in kvmppc_core_commit_memory_region for consistency with what x86 does. We only need to flush when an existing memslot is being modified, because for a new memslot the rmap array (which stores the dirty bits) is all zero, meaning that every page is considered clean already, and when deleting a memslot we obviously don't care about the dirty bits any more. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c33
-rw-r--r--arch/powerpc/kvm/book3s_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c7
-rw-r--r--arch/powerpc/kvm/booke.c7
-rw-r--r--arch/powerpc/kvm/powerpc.c3
8 files changed, 64 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..ab738005d2ea 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -160,7 +160,7 @@ extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
160extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 160extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
161 long pte_index, unsigned long pteh, unsigned long ptel); 161 long pte_index, unsigned long pteh, unsigned long ptel);
162extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 162extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
163 struct kvm_memory_slot *memslot); 163 struct kvm_memory_slot *memslot, unsigned long *map);
164 164
165extern void kvmppc_entry_trampoline(void); 165extern void kvmppc_entry_trampoline(void);
166extern void kvmppc_hv_entry_trampoline(void); 166extern void kvmppc_hv_entry_trampoline(void);
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 41a00eae68c7..3fb980d293e5 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -151,9 +151,12 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
151 struct kvm_memory_slot *memslot, 151 struct kvm_memory_slot *memslot,
152 struct kvm_userspace_memory_region *mem); 152 struct kvm_userspace_memory_region *mem);
153extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 153extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
154 struct kvm_userspace_memory_region *mem); 154 struct kvm_userspace_memory_region *mem,
155 struct kvm_memory_slot old);
155extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, 156extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
156 struct kvm_ppc_smmu_info *info); 157 struct kvm_ppc_smmu_info *info);
158extern void kvmppc_core_flush_memslot(struct kvm *kvm,
159 struct kvm_memory_slot *memslot);
157 160
158extern int kvmppc_bookehv_init(void); 161extern int kvmppc_bookehv_init(void);
159extern void kvmppc_bookehv_exit(void); 162extern void kvmppc_bookehv_exit(void);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index a389cc62b16c..f598366e51c6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -851,7 +851,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
851 psize = hpte_page_size(hptep[0], ptel); 851 psize = hpte_page_size(hptep[0], ptel);
852 if ((hptep[0] & HPTE_V_VALID) && 852 if ((hptep[0] & HPTE_V_VALID) &&
853 hpte_rpn(ptel, psize) == gfn) { 853 hpte_rpn(ptel, psize) == gfn) {
854 hptep[0] |= HPTE_V_ABSENT; 854 if (kvm->arch.using_mmu_notifiers)
855 hptep[0] |= HPTE_V_ABSENT;
855 kvmppc_invalidate_hpte(kvm, hptep, i); 856 kvmppc_invalidate_hpte(kvm, hptep, i);
856 /* Harvest R and C */ 857 /* Harvest R and C */
857 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); 858 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
@@ -878,6 +879,28 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
878 return 0; 879 return 0;
879} 880}
880 881
882void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
883{
884 unsigned long *rmapp;
885 unsigned long gfn;
886 unsigned long n;
887
888 rmapp = memslot->arch.rmap;
889 gfn = memslot->base_gfn;
890 for (n = memslot->npages; n; --n) {
891 /*
892 * Testing the present bit without locking is OK because
893 * the memslot has been marked invalid already, and hence
894 * no new HPTEs referencing this page can be created,
895 * thus the present bit can't go from 0 to 1.
896 */
897 if (*rmapp & KVMPPC_RMAP_PRESENT)
898 kvm_unmap_rmapp(kvm, rmapp, gfn);
899 ++rmapp;
900 ++gfn;
901 }
902}
903
881static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 904static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
882 unsigned long gfn) 905 unsigned long gfn)
883{ 906{
@@ -1031,16 +1054,16 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
1031 return ret; 1054 return ret;
1032} 1055}
1033 1056
1034long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 1057long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1058 unsigned long *map)
1035{ 1059{
1036 unsigned long i; 1060 unsigned long i;
1037 unsigned long *rmapp, *map; 1061 unsigned long *rmapp;
1038 1062
1039 preempt_disable(); 1063 preempt_disable();
1040 rmapp = memslot->arch.rmap; 1064 rmapp = memslot->arch.rmap;
1041 map = memslot->dirty_bitmap;
1042 for (i = 0; i < memslot->npages; ++i) { 1065 for (i = 0; i < memslot->npages; ++i) {
1043 if (kvm_test_clear_dirty(kvm, rmapp)) 1066 if (kvm_test_clear_dirty(kvm, rmapp) && map)
1044 __set_bit_le(i, map); 1067 __set_bit_le(i, map);
1045 ++rmapp; 1068 ++rmapp;
1046 } 1069 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 817837de7362..38c7f1bc3495 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1288,7 +1288,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1288 n = kvm_dirty_bitmap_bytes(memslot); 1288 n = kvm_dirty_bitmap_bytes(memslot);
1289 memset(memslot->dirty_bitmap, 0, n); 1289 memset(memslot->dirty_bitmap, 0, n);
1290 1290
1291 r = kvmppc_hv_get_dirty_log(kvm, memslot); 1291 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
1292 if (r) 1292 if (r)
1293 goto out; 1293 goto out;
1294 1294
@@ -1378,8 +1378,22 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1378} 1378}
1379 1379
1380void kvmppc_core_commit_memory_region(struct kvm *kvm, 1380void kvmppc_core_commit_memory_region(struct kvm *kvm,
1381 struct kvm_userspace_memory_region *mem) 1381 struct kvm_userspace_memory_region *mem,
1382 struct kvm_memory_slot old)
1382{ 1383{
1384 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1385 struct kvm_memory_slot *memslot;
1386
1387 if (npages && old.npages) {
1388 /*
1389 * If modifying a memslot, reset all the rmap dirty bits.
1390 * If this is a new memslot, we don't need to do anything
1391 * since the rmap array starts out as all zeroes,
1392 * i.e. no pages are dirty.
1393 */
1394 memslot = id_to_memslot(kvm->memslots, mem->slot);
1395 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
1396 }
1383} 1397}
1384 1398
1385static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 1399static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 63eb94e63cc3..9955216477a4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -81,7 +81,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
81 ptel = rev->guest_rpte |= rcbits; 81 ptel = rev->guest_rpte |= rcbits;
82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); 82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 84 if (!memslot)
85 return; 85 return;
86 86
87 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); 87 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fdadc9e57da2..4d0667a810a4 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1239,7 +1239,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1239} 1239}
1240 1240
1241void kvmppc_core_commit_memory_region(struct kvm *kvm, 1241void kvmppc_core_commit_memory_region(struct kvm *kvm,
1242 struct kvm_userspace_memory_region *mem) 1242 struct kvm_userspace_memory_region *mem,
1243 struct kvm_memory_slot old)
1244{
1245}
1246
1247void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1243{ 1248{
1244} 1249}
1245 1250
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 514405752988..3a6490fc6fcd 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1457,7 +1457,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1457} 1457}
1458 1458
1459void kvmppc_core_commit_memory_region(struct kvm *kvm, 1459void kvmppc_core_commit_memory_region(struct kvm *kvm,
1460 struct kvm_userspace_memory_region *mem) 1460 struct kvm_userspace_memory_region *mem,
1461 struct kvm_memory_slot old)
1462{
1463}
1464
1465void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1461{ 1466{
1462} 1467}
1463 1468
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 33122dd89da9..8443e23f3605 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -411,7 +411,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
411 struct kvm_memory_slot old, 411 struct kvm_memory_slot old,
412 int user_alloc) 412 int user_alloc)
413{ 413{
414 kvmppc_core_commit_memory_region(kvm, mem); 414 kvmppc_core_commit_memory_region(kvm, mem, old);
415} 415}
416 416
417void kvm_arch_flush_shadow_all(struct kvm *kvm) 417void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -421,6 +421,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
421void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 421void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
422 struct kvm_memory_slot *slot) 422 struct kvm_memory_slot *slot)
423{ 423{
424 kvmppc_core_flush_memslot(kvm, slot);
424} 425}
425 426
426struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 427struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)