aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/mm/hash_native.c47
-rw-r--r--include/asm-ppc64/mmu.h22
2 files changed, 68 insertions, 1 deletions
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 52b6b9305341..4fec05817d66 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -304,6 +304,50 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
304 local_irq_restore(flags); 304 local_irq_restore(flags);
305} 305}
306 306
307/*
308 * clear all mappings on kexec. All cpus are in real mode (or they will
309 * be when they isi), and we are the only one left. We rely on our kernel
310 * mapping being 0xC0's and the hardware ignoring those two real bits.
311 *
312 * TODO: add batching support when enabled. remember, no dynamic memory here,
313 * athough there is the control page available...
314 */
315static void native_hpte_clear(void)
316{
317 unsigned long slot, slots, flags;
318 HPTE *hptep = htab_address;
319 Hpte_dword0 dw0;
320 unsigned long pteg_count;
321
322 pteg_count = htab_hash_mask + 1;
323
324 local_irq_save(flags);
325
326 /* we take the tlbie lock and hold it. Some hardware will
327 * deadlock if we try to tlbie from two processors at once.
328 */
329 spin_lock(&native_tlbie_lock);
330
331 slots = pteg_count * HPTES_PER_GROUP;
332
333 for (slot = 0; slot < slots; slot++, hptep++) {
334 /*
335 * we could lock the pte here, but we are the only cpu
336 * running, right? and for crash dump, we probably
337 * don't want to wait for a maybe bad cpu.
338 */
339 dw0 = hptep->dw0.dw0;
340
341 if (dw0.v) {
342 hptep->dw0.dword0 = 0;
343 tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l);
344 }
345 }
346
347 spin_unlock(&native_tlbie_lock);
348 local_irq_restore(flags);
349}
350
307static void native_flush_hash_range(unsigned long context, 351static void native_flush_hash_range(unsigned long context,
308 unsigned long number, int local) 352 unsigned long number, int local)
309{ 353{
@@ -415,7 +459,8 @@ void hpte_init_native(void)
415 ppc_md.hpte_updatepp = native_hpte_updatepp; 459 ppc_md.hpte_updatepp = native_hpte_updatepp;
416 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; 460 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
417 ppc_md.hpte_insert = native_hpte_insert; 461 ppc_md.hpte_insert = native_hpte_insert;
418 ppc_md.hpte_remove = native_hpte_remove; 462 ppc_md.hpte_remove = native_hpte_remove;
463 ppc_md.hpte_clear_all = native_hpte_clear;
419 if (tlb_batching_enabled()) 464 if (tlb_batching_enabled())
420 ppc_md.flush_hash_range = native_flush_hash_range; 465 ppc_md.flush_hash_range = native_flush_hash_range;
421 htab_finish_init(); 466 htab_finish_init();
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 9d03a98a4fa3..f373de5e3dd9 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -181,6 +181,28 @@ static inline void tlbiel(unsigned long va)
181 asm volatile("ptesync": : :"memory"); 181 asm volatile("ptesync": : :"memory");
182} 182}
183 183
184static inline unsigned long slot2va(unsigned long avpn, unsigned long large,
185 unsigned long secondary, unsigned long slot)
186{
187 unsigned long va;
188
189 va = avpn << 23;
190
191 if (!large) {
192 unsigned long vpi, pteg;
193
194 pteg = slot / HPTES_PER_GROUP;
195 if (secondary)
196 pteg = ~pteg;
197
198 vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
199
200 va |= vpi << PAGE_SHIFT;
201 }
202
203 return va;
204}
205
184/* 206/*
185 * Handle a fault by adding an HPTE. If the address can't be determined 207 * Handle a fault by adding an HPTE. If the address can't be determined
186 * to be valid via Linux page tables, return 1. If handled return 0 208 * to be valid via Linux page tables, return 1. If handled return 0