aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5a0401fcaebd..a88bf2713d41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -62,6 +62,7 @@
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
65#include <asm/code-patching.h>
65 66
66#include "setup.h" 67#include "setup.h"
67 68
@@ -72,6 +73,7 @@
72#endif 73#endif
73 74
74int boot_cpuid = 0; 75int boot_cpuid = 0;
76int __initdata boot_cpu_count;
75u64 ppc64_pft_size; 77u64 ppc64_pft_size;
76 78
77/* Pick defaults since we might want to patch instructions 79/* Pick defaults since we might want to patch instructions
@@ -233,6 +235,7 @@ void early_setup_secondary(void)
233void smp_release_cpus(void) 235void smp_release_cpus(void)
234{ 236{
235 unsigned long *ptr; 237 unsigned long *ptr;
238 int i;
236 239
237 DBG(" -> smp_release_cpus()\n"); 240 DBG(" -> smp_release_cpus()\n");
238 241
@@ -245,7 +248,16 @@ void smp_release_cpus(void)
245 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 248 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
246 - PHYSICAL_START); 249 - PHYSICAL_START);
247 *ptr = __pa(generic_secondary_smp_init); 250 *ptr = __pa(generic_secondary_smp_init);
248 mb(); 251
252 /* And wait a bit for them to catch up */
253 for (i = 0; i < 100000; i++) {
254 mb();
255 HMT_low();
256 if (boot_cpu_count == 0)
257 break;
258 udelay(1);
259 }
260 DBG("boot_cpu_count = %d\n", boot_cpu_count);
249 261
250 DBG(" <- smp_release_cpus()\n"); 262 DBG(" <- smp_release_cpus()\n");
251} 263}
@@ -423,17 +435,30 @@ void __init setup_system(void)
423 DBG(" <- setup_system()\n"); 435 DBG(" <- setup_system()\n");
424} 436}
425 437
426static u64 slb0_limit(void) 438/* This returns the limit below which memory accesses to the linear
439 * mapping are guarnateed not to cause a TLB or SLB miss. This is
440 * used to allocate interrupt or emergency stacks for which our
441 * exception entry path doesn't deal with being interrupted.
442 */
443static u64 safe_stack_limit(void)
427{ 444{
428 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { 445#ifdef CONFIG_PPC_BOOK3E
446 /* Freescale BookE bolts the entire linear mapping */
447 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
448 return linear_map_top;
449 /* Other BookE, we assume the first GB is bolted */
450 return 1ul << 30;
451#else
452 /* BookS, the first segment is bolted */
453 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
429 return 1UL << SID_SHIFT_1T; 454 return 1UL << SID_SHIFT_1T;
430 }
431 return 1UL << SID_SHIFT; 455 return 1UL << SID_SHIFT;
456#endif
432} 457}
433 458
434static void __init irqstack_early_init(void) 459static void __init irqstack_early_init(void)
435{ 460{
436 u64 limit = slb0_limit(); 461 u64 limit = safe_stack_limit();
437 unsigned int i; 462 unsigned int i;
438 463
439 /* 464 /*
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void)
453#ifdef CONFIG_PPC_BOOK3E 478#ifdef CONFIG_PPC_BOOK3E
454static void __init exc_lvl_early_init(void) 479static void __init exc_lvl_early_init(void)
455{ 480{
481 extern unsigned int interrupt_base_book3e;
482 extern unsigned int exc_debug_debug_book3e;
483
456 unsigned int i; 484 unsigned int i;
457 485
458 for_each_possible_cpu(i) { 486 for_each_possible_cpu(i) {
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void)
463 mcheckirq_ctx[i] = (struct thread_info *) 491 mcheckirq_ctx[i] = (struct thread_info *)
464 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 492 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
465 } 493 }
494
495 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
496 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
497 (unsigned long)&exc_debug_debug_book3e, 0);
466} 498}
467#else 499#else
468#define exc_lvl_early_init() 500#define exc_lvl_early_init()
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void)
486 * bringup, we need to get at them in real mode. This means they 518 * bringup, we need to get at them in real mode. This means they
487 * must also be within the RMO region. 519 * must also be within the RMO region.
488 */ 520 */
489 limit = min(slb0_limit(), ppc64_rma_size); 521 limit = min(safe_stack_limit(), ppc64_rma_size);
490 522
491 for_each_possible_cpu(i) { 523 for_each_possible_cpu(i) {
492 unsigned long sp; 524 unsigned long sp;