aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorR Sharada <sharada@in.ibm.com>2005-06-25 17:58:08 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:51 -0400
commitf4c82d5132b0592f5d6befc5b652cbd4b08f12ff (patch)
tree3889ea8eeb4dfa6c995b6dc93574d24c238a6deb /arch
parent70765aa4bdb8862a49fcf5b28f3deaf561cf5ae7 (diff)
[PATCH] ppc64 kexec: native hash clear
Add code to clear the hash table and invalidate the tlb for native (SMP, non-LPAR) mode. Supports 16M and 4k pages. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: R Sharada <sharada@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/mm/hash_native.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 52b6b9305341..4fec05817d66 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -304,6 +304,50 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
304 local_irq_restore(flags); 304 local_irq_restore(flags);
305} 305}
306 306
307/*
308 * clear all mappings on kexec. All cpus are in real mode (or they will
309 * be when they isi), and we are the only one left. We rely on our kernel
310 * mapping being 0xC0's and the hardware ignoring those two real bits.
311 *
312 * TODO: add batching support when enabled. remember, no dynamic memory here,
313 * athough there is the control page available...
314 */
315static void native_hpte_clear(void)
316{
317 unsigned long slot, slots, flags;
318 HPTE *hptep = htab_address;
319 Hpte_dword0 dw0;
320 unsigned long pteg_count;
321
322 pteg_count = htab_hash_mask + 1;
323
324 local_irq_save(flags);
325
326 /* we take the tlbie lock and hold it. Some hardware will
327 * deadlock if we try to tlbie from two processors at once.
328 */
329 spin_lock(&native_tlbie_lock);
330
331 slots = pteg_count * HPTES_PER_GROUP;
332
333 for (slot = 0; slot < slots; slot++, hptep++) {
334 /*
335 * we could lock the pte here, but we are the only cpu
336 * running, right? and for crash dump, we probably
337 * don't want to wait for a maybe bad cpu.
338 */
339 dw0 = hptep->dw0.dw0;
340
341 if (dw0.v) {
342 hptep->dw0.dword0 = 0;
343 tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l);
344 }
345 }
346
347 spin_unlock(&native_tlbie_lock);
348 local_irq_restore(flags);
349}
350
307static void native_flush_hash_range(unsigned long context, 351static void native_flush_hash_range(unsigned long context,
308 unsigned long number, int local) 352 unsigned long number, int local)
309{ 353{
@@ -415,7 +459,8 @@ void hpte_init_native(void)
415 ppc_md.hpte_updatepp = native_hpte_updatepp; 459 ppc_md.hpte_updatepp = native_hpte_updatepp;
416 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; 460 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
417 ppc_md.hpte_insert = native_hpte_insert; 461 ppc_md.hpte_insert = native_hpte_insert;
418 ppc_md.hpte_remove = native_hpte_remove; 462 ppc_md.hpte_remove = native_hpte_remove;
463 ppc_md.hpte_clear_all = native_hpte_clear;
419 if (tlb_batching_enabled()) 464 if (tlb_batching_enabled())
420 ppc_md.flush_hash_range = native_flush_hash_range; 465 ppc_md.flush_hash_range = native_flush_hash_range;
421 htab_finish_init(); 466 htab_finish_init();