aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-11-19 17:52:49 -0500
committerAlexander Graf <agraf@suse.de>2012-12-05 19:33:54 -0500
commit44e5f6be62741bd44968f40f3afa1cff1df983f2 (patch)
tree82476ebfb27757ed9ad146ab4ef5d072e58629a8 /arch
parent4879f241720cda3e6c18a1713bf9b2ed2de14ee4 (diff)
KVM: PPC: Book3S HV: Add a mechanism for recording modified HPTEs
This uses a bit in our record of the guest view of the HPTE to record when the HPTE gets modified. We use a reserved bit for this, and ensure that this bit is always cleared in HPTE values returned to the guest. The recording of modified HPTEs is only done if other code indicates its interest by setting kvm->arch.hpte_mod_interest to a non-zero value. The reason for this is that when later commits add facilities for userspace to read the HPT, the first pass of reading the HPT will be quicker if there are no (or very few) HPTEs marked as modified, rather than having most HPTEs marked as modified. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h9
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c28
3 files changed, 34 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 1472a5b4e4e3..b322e5bd6964 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -50,6 +50,15 @@ extern int kvm_hpt_order; /* order of preallocated HPTs */
50#define HPTE_V_HVLOCK 0x40UL 50#define HPTE_V_HVLOCK 0x40UL
51#define HPTE_V_ABSENT 0x20UL 51#define HPTE_V_ABSENT 0x20UL
52 52
53/*
54 * We use this bit in the guest_rpte field of the revmap entry
55 * to indicate a modified HPTE.
56 */
57#define HPTE_GR_MODIFIED (1ul << 62)
58
59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61
53static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) 62static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
54{ 63{
55 unsigned long tmp, old; 64 unsigned long tmp, old;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3093896015f0..58c72646c445 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -248,6 +248,7 @@ struct kvm_arch {
248 atomic_t vcpus_running; 248 atomic_t vcpus_running;
249 unsigned long hpt_npte; 249 unsigned long hpt_npte;
250 unsigned long hpt_mask; 250 unsigned long hpt_mask;
251 atomic_t hpte_mod_interest;
251 spinlock_t slot_phys_lock; 252 spinlock_t slot_phys_lock;
252 unsigned short last_vcpu[NR_CPUS]; 253 unsigned short last_vcpu[NR_CPUS];
253 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 254 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index ff2da5ce475c..ed563a5f25c8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -66,6 +66,17 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
66} 66}
67EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); 67EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
68 68
69/*
70 * Note modification of an HPTE; set the HPTE modified bit
71 * if anyone is interested.
72 */
73static inline void note_hpte_modification(struct kvm *kvm,
74 struct revmap_entry *rev)
75{
76 if (atomic_read(&kvm->arch.hpte_mod_interest))
77 rev->guest_rpte |= HPTE_GR_MODIFIED;
78}
79
69/* Remove this HPTE from the chain for a real page */ 80/* Remove this HPTE from the chain for a real page */
70static void remove_revmap_chain(struct kvm *kvm, long pte_index, 81static void remove_revmap_chain(struct kvm *kvm, long pte_index,
71 struct revmap_entry *rev, 82 struct revmap_entry *rev,
@@ -138,7 +149,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
138 unsigned long slot_fn, hva; 149 unsigned long slot_fn, hva;
139 unsigned long *hpte; 150 unsigned long *hpte;
140 struct revmap_entry *rev; 151 struct revmap_entry *rev;
141 unsigned long g_ptel = ptel; 152 unsigned long g_ptel;
142 struct kvm_memory_slot *memslot; 153 struct kvm_memory_slot *memslot;
143 unsigned long *physp, pte_size; 154 unsigned long *physp, pte_size;
144 unsigned long is_io; 155 unsigned long is_io;
@@ -153,6 +164,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
153 return H_PARAMETER; 164 return H_PARAMETER;
154 writing = hpte_is_writable(ptel); 165 writing = hpte_is_writable(ptel);
155 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); 166 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
167 ptel &= ~HPTE_GR_RESERVED;
168 g_ptel = ptel;
156 169
157 /* used later to detect if we might have been invalidated */ 170 /* used later to detect if we might have been invalidated */
158 mmu_seq = kvm->mmu_notifier_seq; 171 mmu_seq = kvm->mmu_notifier_seq;
@@ -287,8 +300,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
287 rev = &kvm->arch.revmap[pte_index]; 300 rev = &kvm->arch.revmap[pte_index];
288 if (realmode) 301 if (realmode)
289 rev = real_vmalloc_addr(rev); 302 rev = real_vmalloc_addr(rev);
290 if (rev) 303 if (rev) {
291 rev->guest_rpte = g_ptel; 304 rev->guest_rpte = g_ptel;
305 note_hpte_modification(kvm, rev);
306 }
292 307
293 /* Link HPTE into reverse-map chain */ 308 /* Link HPTE into reverse-map chain */
294 if (pteh & HPTE_V_VALID) { 309 if (pteh & HPTE_V_VALID) {
@@ -392,7 +407,8 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
392 /* Read PTE low word after tlbie to get final R/C values */ 407 /* Read PTE low word after tlbie to get final R/C values */
393 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 408 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
394 } 409 }
395 r = rev->guest_rpte; 410 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
411 note_hpte_modification(kvm, rev);
396 unlock_hpte(hpte, 0); 412 unlock_hpte(hpte, 0);
397 413
398 vcpu->arch.gpr[4] = v; 414 vcpu->arch.gpr[4] = v;
@@ -466,6 +482,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
466 482
467 args[j] = ((0x80 | flags) << 56) + pte_index; 483 args[j] = ((0x80 | flags) << 56) + pte_index;
468 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 484 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
485 note_hpte_modification(kvm, rev);
469 486
470 if (!(hp[0] & HPTE_V_VALID)) { 487 if (!(hp[0] & HPTE_V_VALID)) {
471 /* insert R and C bits from PTE */ 488 /* insert R and C bits from PTE */
@@ -555,6 +572,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
555 if (rev) { 572 if (rev) {
556 r = (rev->guest_rpte & ~mask) | bits; 573 r = (rev->guest_rpte & ~mask) | bits;
557 rev->guest_rpte = r; 574 rev->guest_rpte = r;
575 note_hpte_modification(kvm, rev);
558 } 576 }
559 r = (hpte[1] & ~mask) | bits; 577 r = (hpte[1] & ~mask) | bits;
560 578
@@ -606,8 +624,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
606 v &= ~HPTE_V_ABSENT; 624 v &= ~HPTE_V_ABSENT;
607 v |= HPTE_V_VALID; 625 v |= HPTE_V_VALID;
608 } 626 }
609 if (v & HPTE_V_VALID) 627 if (v & HPTE_V_VALID) {
610 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); 628 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
629 r &= ~HPTE_GR_RESERVED;
630 }
611 vcpu->arch.gpr[4 + i * 2] = v; 631 vcpu->arch.gpr[4 + i * 2] = v;
612 vcpu->arch.gpr[5 + i * 2] = r; 632 vcpu->arch.gpr[5 + i * 2] = r;
613 } 633 }