aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-03 10:07:01 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-05 23:32:24 -0400
commit40bd587a88fcd425f489f3d9f0be7daa84014141 (patch)
tree3d6d977fc23cae661ee358b1ad8611ed11611d03
parent77eafe101a65b609b0693ee4eda381f60a4a5bab (diff)
powerpc: Rename slb0_limit() to safe_stack_limit() and add Book3E support
slb0_limit() wasn't a very descriptive name. This changes it along with a comment explaining what it's used for, and provides a 64-bit BookE implementation. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h4
-rw-r--r--arch/powerpc/kernel/setup_64.c23
2 files changed, 22 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index ec61e7b998c0..3ea0f9a259d8 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -245,6 +245,10 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
245extern int mmu_linear_psize; 245extern int mmu_linear_psize;
246extern int mmu_vmemmap_psize; 246extern int mmu_vmemmap_psize;
247 247
248#ifdef CONFIG_PPC64
249extern unsigned long linear_map_top;
250#endif
251
248#endif /* !__ASSEMBLY__ */ 252#endif /* !__ASSEMBLY__ */
249 253
250#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ 254#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 959c63cf62e4..c2ec0a12e14f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -434,17 +434,30 @@ void __init setup_system(void)
434 DBG(" <- setup_system()\n"); 434 DBG(" <- setup_system()\n");
435} 435}
436 436
437static u64 slb0_limit(void) 437/* This returns the limit below which memory accesses to the linear
438 * mapping are guarnateed not to cause a TLB or SLB miss. This is
439 * used to allocate interrupt or emergency stacks for which our
440 * exception entry path doesn't deal with being interrupted.
441 */
442static u64 safe_stack_limit(void)
438{ 443{
439 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { 444#ifdef CONFIG_PPC_BOOK3E
445 /* Freescale BookE bolts the entire linear mapping */
446 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
447 return linear_map_top;
448 /* Other BookE, we assume the first GB is bolted */
449 return 1ul << 30;
450#else
451 /* BookS, the first segment is bolted */
452 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
440 return 1UL << SID_SHIFT_1T; 453 return 1UL << SID_SHIFT_1T;
441 }
442 return 1UL << SID_SHIFT; 454 return 1UL << SID_SHIFT;
455#endif
443} 456}
444 457
445static void __init irqstack_early_init(void) 458static void __init irqstack_early_init(void)
446{ 459{
447 u64 limit = slb0_limit(); 460 u64 limit = safe_stack_limit();
448 unsigned int i; 461 unsigned int i;
449 462
450 /* 463 /*
@@ -497,7 +510,7 @@ static void __init emergency_stack_init(void)
497 * bringup, we need to get at them in real mode. This means they 510 * bringup, we need to get at them in real mode. This means they
498 * must also be within the RMO region. 511 * must also be within the RMO region.
499 */ 512 */
500 limit = min(slb0_limit(), ppc64_rma_size); 513 limit = min(safe_stack_limit(), ppc64_rma_size);
501 514
502 for_each_possible_cpu(i) { 515 for_each_possible_cpu(i) {
503 unsigned long sp; 516 unsigned long sp;