aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2015-03-30 01:09:13 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-04-16 21:23:24 -0400
commitdac5657067919161eb3273ca787d8ae9814801e7 (patch)
tree41a7828ae4d40d6391fc409f5c48ee9a60720a71 /arch/powerpc
parent5e1d44aef1673b504dde475aa714b1bdb9b875c4 (diff)
KVM: PPC: Remove page table walk helpers
This patch remove helpers which we had used only once in the code. Limiting page table walk variants help in ensuring that we won't end up with code walking page table with wrong assumptions. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/pgtable.h21
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c62
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c2
3 files changed, 28 insertions, 57 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9835ac4173b7..92fe01c355a9 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -249,27 +249,6 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
249#endif 249#endif
250pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 250pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
251 unsigned *shift); 251 unsigned *shift);
252
253static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
254 unsigned long *pte_sizep)
255{
256 pte_t *ptep;
257 unsigned long ps = *pte_sizep;
258 unsigned int shift;
259
260 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
261 if (!ptep)
262 return NULL;
263 if (shift)
264 *pte_sizep = 1ul << shift;
265 else
266 *pte_sizep = PAGE_SIZE;
267
268 if (ps > *pte_sizep)
269 return NULL;
270
271 return ptep;
272}
273#endif /* __ASSEMBLY__ */ 252#endif /* __ASSEMBLY__ */
274 253
275#endif /* __KERNEL__ */ 254#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 625407e4d3b0..73e083cb9f7e 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -131,25 +131,6 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
131 unlock_rmap(rmap); 131 unlock_rmap(rmap);
132} 132}
133 133
134static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
135 int writing, unsigned long *pte_sizep)
136{
137 pte_t *ptep;
138 unsigned long ps = *pte_sizep;
139 unsigned int hugepage_shift;
140
141 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
142 if (!ptep)
143 return __pte(0);
144 if (hugepage_shift)
145 *pte_sizep = 1ul << hugepage_shift;
146 else
147 *pte_sizep = PAGE_SIZE;
148 if (ps > *pte_sizep)
149 return __pte(0);
150 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
151}
152
153static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) 134static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
154{ 135{
155 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 136 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
@@ -166,10 +147,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
166 struct revmap_entry *rev; 147 struct revmap_entry *rev;
167 unsigned long g_ptel; 148 unsigned long g_ptel;
168 struct kvm_memory_slot *memslot; 149 struct kvm_memory_slot *memslot;
169 unsigned long pte_size; 150 unsigned hpage_shift;
170 unsigned long is_io; 151 unsigned long is_io;
171 unsigned long *rmap; 152 unsigned long *rmap;
172 pte_t pte; 153 pte_t *ptep;
173 unsigned int writing; 154 unsigned int writing;
174 unsigned long mmu_seq; 155 unsigned long mmu_seq;
175 unsigned long rcbits; 156 unsigned long rcbits;
@@ -208,22 +189,33 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
208 189
209 /* Translate to host virtual address */ 190 /* Translate to host virtual address */
210 hva = __gfn_to_hva_memslot(memslot, gfn); 191 hva = __gfn_to_hva_memslot(memslot, gfn);
192 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
193 if (ptep) {
194 pte_t pte;
195 unsigned int host_pte_size;
211 196
212 /* Look up the Linux PTE for the backing page */ 197 if (hpage_shift)
213 pte_size = psize; 198 host_pte_size = 1ul << hpage_shift;
214 pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); 199 else
215 if (pte_present(pte) && !pte_protnone(pte)) { 200 host_pte_size = PAGE_SIZE;
216 if (writing && !pte_write(pte)) 201 /*
217 /* make the actual HPTE be read-only */ 202 * We should always find the guest page size
218 ptel = hpte_make_readonly(ptel); 203 * to <= host page size, if host is using hugepage
219 is_io = hpte_cache_bits(pte_val(pte)); 204 */
220 pa = pte_pfn(pte) << PAGE_SHIFT; 205 if (host_pte_size < psize)
221 pa |= hva & (pte_size - 1); 206 return H_PARAMETER;
222 pa |= gpa & ~PAGE_MASK;
223 }
224 207
225 if (pte_size < psize) 208 pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
226 return H_PARAMETER; 209 if (pte_present(pte) && !pte_protnone(pte)) {
210 if (writing && !pte_write(pte))
211 /* make the actual HPTE be read-only */
212 ptel = hpte_make_readonly(ptel);
213 is_io = hpte_cache_bits(pte_val(pte));
214 pa = pte_pfn(pte) << PAGE_SHIFT;
215 pa |= hva & (host_pte_size - 1);
216 pa |= gpa & ~PAGE_MASK;
217 }
218 }
227 219
228 ptel &= ~(HPTE_R_PP0 - psize); 220 ptel &= ~(HPTE_R_PP0 - psize);
229 ptel |= pa; 221 ptel |= pa;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 5840d546aa03..a1f5b0d4b1d6 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -468,7 +468,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
468 468
469 469
470 pgdir = vcpu_e500->vcpu.arch.pgdir; 470 pgdir = vcpu_e500->vcpu.arch.pgdir;
471 ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages); 471 ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
472 if (ptep) { 472 if (ptep) {
473 pte_t pte = READ_ONCE(*ptep); 473 pte_t pte = READ_ONCE(*ptep);
474 474