aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-05-10 14:59:18 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-05-21 03:31:10 -0400
commit095c7965f4dc870ed2b65143b1e2610de653416c (patch)
treedfd918c5087025ac16bedddaa0e8c8d6255d17d0 /arch/powerpc
parent5d7a87217de48b234b3c8ff8a73059947d822e07 (diff)
powerpc: Use more accurate limit for first segment memory allocations
Author: Milton Miller <miltonm@bga.com> On large machines we are running out of room below 256MB. In some cases we only need to ensure the allocation is in the first segment, which may be 256MB or 1TB. Add slb0_limit and use it to specify the upper limit for the irqstack and emergency stacks. On a large ppc64 box, this fixes a panic at boot when the crashkernel= option is specified (previously we would run out of memory below 256MB). Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/setup_64.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index cea66987a6ea..f3fb5a79de52 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,9 +424,18 @@ void __init setup_system(void)
424 DBG(" <- setup_system()\n"); 424 DBG(" <- setup_system()\n");
425} 425}
426 426
427static u64 slb0_limit(void)
428{
429 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
430 return 1UL << SID_SHIFT_1T;
431 }
432 return 1UL << SID_SHIFT;
433}
434
427#ifdef CONFIG_IRQSTACKS 435#ifdef CONFIG_IRQSTACKS
428static void __init irqstack_early_init(void) 436static void __init irqstack_early_init(void)
429{ 437{
438 u64 limit = slb0_limit();
430 unsigned int i; 439 unsigned int i;
431 440
432 /* 441 /*
@@ -436,10 +445,10 @@ static void __init irqstack_early_init(void)
436 for_each_possible_cpu(i) { 445 for_each_possible_cpu(i) {
437 softirq_ctx[i] = (struct thread_info *) 446 softirq_ctx[i] = (struct thread_info *)
438 __va(lmb_alloc_base(THREAD_SIZE, 447 __va(lmb_alloc_base(THREAD_SIZE,
439 THREAD_SIZE, 0x10000000)); 448 THREAD_SIZE, limit));
440 hardirq_ctx[i] = (struct thread_info *) 449 hardirq_ctx[i] = (struct thread_info *)
441 __va(lmb_alloc_base(THREAD_SIZE, 450 __va(lmb_alloc_base(THREAD_SIZE,
442 THREAD_SIZE, 0x10000000)); 451 THREAD_SIZE, limit));
443 } 452 }
444} 453}
445#else 454#else
@@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void)
470 */ 479 */
471static void __init emergency_stack_init(void) 480static void __init emergency_stack_init(void)
472{ 481{
473 unsigned long limit; 482 u64 limit;
474 unsigned int i; 483 unsigned int i;
475 484
476 /* 485 /*
@@ -482,7 +491,7 @@ static void __init emergency_stack_init(void)
482 * bringup, we need to get at them in real mode. This means they 491 * bringup, we need to get at them in real mode. This means they
483 * must also be within the RMO region. 492 * must also be within the RMO region.
484 */ 493 */
485 limit = min(0x10000000ULL, lmb.rmo_size); 494 limit = min(slb0_limit(), lmb.rmo_size);
486 495
487 for_each_possible_cpu(i) { 496 for_each_possible_cpu(i) {
488 unsigned long sp; 497 unsigned long sp;