aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2013-09-20 00:52:53 -0400
committerAlexander Graf <agraf@suse.de>2013-10-17 08:49:36 -0400
commitadc0bafe00f4c7e5f052c9f29e75a072e03a19fc (patch)
treefa596a8a865cd6a4468bf90abd8654d890d1ee4b
parentd78bca72961ae816181b386ff6b347419dfcd5cf (diff)
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest, doesn't actually mark the page as dirty as far as the MM subsystem is concerned. It merely sets a bit in KVM's map of dirty pages, if userspace has requested dirty tracking for the relevant memslot. To tell the MM subsystem that the page is dirty, we have to call kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()). This adds a call to kvm_set_pfn_dirty(), and while we are here, also adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that the page has been accessed. Since we are now using the pfn in several places, this adds a 'pfn' variable to store it and changes the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which is the same thing. This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but PP_RXRX is more informative as being the read-only page permission bit setting. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 307e6e838e0d..e2ab8a747fbe 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -96,20 +96,21 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
96 unsigned long mmu_seq; 96 unsigned long mmu_seq;
97 struct kvm *kvm = vcpu->kvm; 97 struct kvm *kvm = vcpu->kvm;
98 struct hpte_cache *cpte; 98 struct hpte_cache *cpte;
99 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
100 unsigned long pfn;
99 101
100 /* used to check for invalidations in progress */ 102 /* used to check for invalidations in progress */
101 mmu_seq = kvm->mmu_notifier_seq; 103 mmu_seq = kvm->mmu_notifier_seq;
102 smp_rmb(); 104 smp_rmb();
103 105
104 /* Get host physical address for gpa */ 106 /* Get host physical address for gpa */
105 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, 107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
106 iswrite, &writable); 108 if (is_error_noslot_pfn(pfn)) {
107 if (is_error_noslot_pfn(hpaddr)) { 109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
108 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
109 r = -EINVAL; 110 r = -EINVAL;
110 goto out; 111 goto out;
111 } 112 }
112 hpaddr <<= PAGE_SHIFT; 113 hpaddr = pfn << PAGE_SHIFT;
113 114
114 /* and write the mapping ea -> hpa into the pt */ 115 /* and write the mapping ea -> hpa into the pt */
115 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 116 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@@ -129,15 +130,18 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
129 130
130 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); 131 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
131 132
133 kvm_set_pfn_accessed(pfn);
132 if (!orig_pte->may_write || !writable) 134 if (!orig_pte->may_write || !writable)
133 rflags |= HPTE_R_PP; 135 rflags |= PP_RXRX;
134 else 136 else {
135 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 137 mark_page_dirty(vcpu->kvm, gfn);
138 kvm_set_pfn_dirty(pfn);
139 }
136 140
137 if (!orig_pte->may_execute) 141 if (!orig_pte->may_execute)
138 rflags |= HPTE_R_N; 142 rflags |= HPTE_R_N;
139 else 143 else
140 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 144 kvmppc_mmu_flush_icache(pfn);
141 145
142 /* 146 /*
143 * Use 64K pages if possible; otherwise, on 64K page kernels, 147 * Use 64K pages if possible; otherwise, on 64K page kernels,
@@ -191,7 +195,7 @@ map_again:
191 cpte->slot = hpteg + (ret & 7); 195 cpte->slot = hpteg + (ret & 7);
192 cpte->host_vpn = vpn; 196 cpte->host_vpn = vpn;
193 cpte->pte = *orig_pte; 197 cpte->pte = *orig_pte;
194 cpte->pfn = hpaddr >> PAGE_SHIFT; 198 cpte->pfn = pfn;
195 cpte->pagesize = hpsize; 199 cpte->pagesize = hpsize;
196 200
197 kvmppc_mmu_hpte_cache_map(vcpu, cpte); 201 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
@@ -200,7 +204,7 @@ map_again:
200 204
201out_unlock: 205out_unlock:
202 spin_unlock(&kvm->mmu_lock); 206 spin_unlock(&kvm->mmu_lock);
203 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); 207 kvm_release_pfn_clean(pfn);
204 if (cpte) 208 if (cpte)
205 kvmppc_mmu_hpte_cache_free(cpte); 209 kvmppc_mmu_hpte_cache_free(cpte);
206 210