aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuke Browning <lukebr@linux.vnet.ibm.com>2007-05-02 10:19:11 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-07 06:31:12 -0400
commit71bf08b6c083df4ee97874d895f911529f4150dd (patch)
tree72f07b84c25613cb918b863f073cf8dd76aa74bc
parent9f90b997de4efd5404a8c52f89c400f0f4e2d216 (diff)
[POWERPC] 64K page support for kexec
This fixes a couple of kexec problems related to 64K page support in the kernel. kexec issues a tlbie for each pte. The parameters for the tlbie are the page size and the virtual address. Support was missing for the computation of these two parameters for 64K pages. This adds that support. Signed-off-by: Luke Browning <lukebrowning@us.ibm.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/hash_native_64.c84
1 files changed, 62 insertions, 22 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 79aedaf36f2b..7b7fe2d7b9dc 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -26,6 +26,7 @@
26#include <asm/tlb.h> 26#include <asm/tlb.h>
27#include <asm/cputable.h> 27#include <asm/cputable.h>
28#include <asm/udbg.h> 28#include <asm/udbg.h>
29#include <asm/kexec.h>
29 30
30#ifdef DEBUG_LOW 31#ifdef DEBUG_LOW
31#define DBG_LOW(fmt...) udbg_printf(fmt) 32#define DBG_LOW(fmt...) udbg_printf(fmt)
@@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
340 local_irq_restore(flags); 341 local_irq_restore(flags);
341} 342}
342 343
343/* 344#define LP_SHIFT 12
344 * XXX This need fixing based on page size. It's only used by 345#define LP_BITS 8
345 * native_hpte_clear() for now which needs fixing too so they 346#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
346 * make a good pair...
347 */
348static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
349{
350 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
351 unsigned long va;
352 347
353 va = avpn << 23; 348static void hpte_decode(hpte_t *hpte, unsigned long slot,
349 int *psize, unsigned long *va)
350{
351 unsigned long hpte_r = hpte->r;
352 unsigned long hpte_v = hpte->v;
353 unsigned long avpn;
354 int i, size, shift, penc, avpnm_bits;
355
356 if (!(hpte_v & HPTE_V_LARGE))
357 size = MMU_PAGE_4K;
358 else {
359 for (i = 0; i < LP_BITS; i++) {
360 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
361 break;
362 }
363 penc = LP_MASK(i+1) >> LP_SHIFT;
364 for (size = 0; size < MMU_PAGE_COUNT; size++) {
354 365
355 if (! (hpte_v & HPTE_V_LARGE)) { 366 /* 4K pages are not represented by LP */
356 unsigned long vpi, pteg; 367 if (size == MMU_PAGE_4K)
368 continue;
357 369
358 pteg = slot / HPTES_PER_GROUP; 370 /* valid entries have a shift value */
359 if (hpte_v & HPTE_V_SECONDARY) 371 if (!mmu_psize_defs[size].shift)
360 pteg = ~pteg; 372 continue;
361 373
362 vpi = ((va >> 28) ^ pteg) & htab_hash_mask; 374 if (penc == mmu_psize_defs[size].penc)
375 break;
376 }
377 }
363 378
364 va |= vpi << PAGE_SHIFT; 379 /*
380 * FIXME, the code below works for 16M, 64K, and 4K pages as these
381 * fall under the p<=23 rules for calculating the virtual address.
382 * In the case of 16M pages, an extra bit is stolen from the AVPN
383 * field to achieve the requisite 24 bits.
384 *
385 * Does not work for 16G pages or 1 TB segments.
386 */
387 shift = mmu_psize_defs[size].shift;
388 if (mmu_psize_defs[size].avpnm)
389 avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1;
390 else
391 avpnm_bits = 0;
392 if (shift - avpnm_bits <= 23) {
393 avpn = HPTE_V_AVPN_VAL(hpte_v) << 23;
394
395 if (shift < 23) {
396 unsigned long vpi, pteg;
397
398 pteg = slot / HPTES_PER_GROUP;
399 if (hpte_v & HPTE_V_SECONDARY)
400 pteg = ~pteg;
401 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
402 avpn |= (vpi << mmu_psize_defs[size].shift);
403 }
365 } 404 }
366 405
367 return va; 406 *va = avpn;
407 *psize = size;
368} 408}
369 409
370/* 410/*
@@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
374 * 414 *
375 * TODO: add batching support when enabled. remember, no dynamic memory here, 415 * TODO: add batching support when enabled. remember, no dynamic memory here,
376 * athough there is the control page available... 416 * athough there is the control page available...
377 *
378 * XXX FIXME: 4k only for now !
379 */ 417 */
380static void native_hpte_clear(void) 418static void native_hpte_clear(void)
381{ 419{
382 unsigned long slot, slots, flags; 420 unsigned long slot, slots, flags;
383 hpte_t *hptep = htab_address; 421 hpte_t *hptep = htab_address;
384 unsigned long hpte_v; 422 unsigned long hpte_v, va;
385 unsigned long pteg_count; 423 unsigned long pteg_count;
424 int psize;
386 425
387 pteg_count = htab_hash_mask + 1; 426 pteg_count = htab_hash_mask + 1;
388 427
@@ -408,8 +447,9 @@ static void native_hpte_clear(void)
408 * already hold the native_tlbie_lock. 447 * already hold the native_tlbie_lock.
409 */ 448 */
410 if (hpte_v & HPTE_V_VALID) { 449 if (hpte_v & HPTE_V_VALID) {
450 hpte_decode(hptep, slot, &psize, &va);
411 hptep->v = 0; 451 hptep->v = 0;
412 __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K); 452 __tlbie(va, psize);
413 } 453 }
414 } 454 }
415 455