aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/setup_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r--arch/powerpc/kernel/setup_64.c83
1 files changed, 17 insertions, 66 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f96c49b03ba0..59aa92cd6fa4 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -73,7 +73,6 @@
73 73
74int have_of = 1; 74int have_of = 1;
75int boot_cpuid = 0; 75int boot_cpuid = 0;
76int boot_cpuid_phys = 0;
77dev_t boot_dev; 76dev_t boot_dev;
78u64 ppc64_pft_size; 77u64 ppc64_pft_size;
79 78
@@ -96,11 +95,6 @@ int dcache_bsize;
96int icache_bsize; 95int icache_bsize;
97int ucache_bsize; 96int ucache_bsize;
98 97
99/* The main machine-dep calls structure
100 */
101struct machdep_calls ppc_md;
102EXPORT_SYMBOL(ppc_md);
103
104#ifdef CONFIG_MAGIC_SYSRQ 98#ifdef CONFIG_MAGIC_SYSRQ
105unsigned long SYSRQ_KEY; 99unsigned long SYSRQ_KEY;
106#endif /* CONFIG_MAGIC_SYSRQ */ 100#endif /* CONFIG_MAGIC_SYSRQ */
@@ -161,32 +155,6 @@ early_param("smt-enabled", early_smt_enabled);
161#define check_smt_enabled() 155#define check_smt_enabled()
162#endif /* CONFIG_SMP */ 156#endif /* CONFIG_SMP */
163 157
164extern struct machdep_calls pSeries_md;
165extern struct machdep_calls pmac_md;
166extern struct machdep_calls maple_md;
167extern struct machdep_calls cell_md;
168extern struct machdep_calls iseries_md;
169
170/* Ultimately, stuff them in an elf section like initcalls... */
171static struct machdep_calls __initdata *machines[] = {
172#ifdef CONFIG_PPC_PSERIES
173 &pSeries_md,
174#endif /* CONFIG_PPC_PSERIES */
175#ifdef CONFIG_PPC_PMAC
176 &pmac_md,
177#endif /* CONFIG_PPC_PMAC */
178#ifdef CONFIG_PPC_MAPLE
179 &maple_md,
180#endif /* CONFIG_PPC_MAPLE */
181#ifdef CONFIG_PPC_CELL
182 &cell_md,
183#endif
184#ifdef CONFIG_PPC_ISERIES
185 &iseries_md,
186#endif
187 NULL
188};
189
190/* 158/*
191 * Early initialization entry point. This is called by head.S 159 * Early initialization entry point. This is called by head.S
192 * with MMU translation disabled. We rely on the "feature" of 160 * with MMU translation disabled. We rely on the "feature" of
@@ -208,13 +176,10 @@ static struct machdep_calls __initdata *machines[] = {
208 176
209void __init early_setup(unsigned long dt_ptr) 177void __init early_setup(unsigned long dt_ptr)
210{ 178{
211 struct paca_struct *lpaca = get_paca();
212 static struct machdep_calls **mach;
213
214 /* Enable early debugging if any specified (see udbg.h) */ 179 /* Enable early debugging if any specified (see udbg.h) */
215 udbg_early_init(); 180 udbg_early_init();
216 181
217 DBG(" -> early_setup()\n"); 182 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
218 183
219 /* 184 /*
220 * Do early initializations using the flattened device 185 * Do early initializations using the flattened device
@@ -223,22 +188,16 @@ void __init early_setup(unsigned long dt_ptr)
223 */ 188 */
224 early_init_devtree(__va(dt_ptr)); 189 early_init_devtree(__va(dt_ptr));
225 190
226 /* 191 /* Now we know the logical id of our boot cpu, setup the paca. */
227 * Iterate all ppc_md structures until we find the proper 192 setup_boot_paca();
228 * one for the current machine type
229 */
230 DBG("Probing machine type for platform %x...\n", _machine);
231 193
232 for (mach = machines; *mach; mach++) { 194 /* Fix up paca fields required for the boot cpu */
233 if ((*mach)->probe(_machine)) 195 get_paca()->cpu_start = 1;
234 break; 196 get_paca()->stab_real = __pa((u64)&initial_stab);
235 } 197 get_paca()->stab_addr = (u64)&initial_stab;
236 /* What can we do if we didn't find ? */ 198
237 if (*mach == NULL) { 199 /* Probe the machine type */
238 DBG("No suitable machine found !\n"); 200 probe_machine();
239 for (;;);
240 }
241 ppc_md = **mach;
242 201
243#ifdef CONFIG_CRASH_DUMP 202#ifdef CONFIG_CRASH_DUMP
244 kdump_setup(); 203 kdump_setup();
@@ -260,7 +219,7 @@ void __init early_setup(unsigned long dt_ptr)
260 if (cpu_has_feature(CPU_FTR_SLB)) 219 if (cpu_has_feature(CPU_FTR_SLB))
261 slb_initialize(); 220 slb_initialize();
262 else 221 else
263 stab_initialize(lpaca->stab_real); 222 stab_initialize(get_paca()->stab_real);
264 } 223 }
265 224
266 DBG(" <- early_setup()\n"); 225 DBG(" <- early_setup()\n");
@@ -340,7 +299,7 @@ static void __init initialize_cache_info(void)
340 const char *dc, *ic; 299 const char *dc, *ic;
341 300
342 /* Then read cache informations */ 301 /* Then read cache informations */
343 if (_machine == PLATFORM_POWERMAC) { 302 if (machine_is(powermac)) {
344 dc = "d-cache-block-size"; 303 dc = "d-cache-block-size";
345 ic = "i-cache-block-size"; 304 ic = "i-cache-block-size";
346 } else { 305 } else {
@@ -484,7 +443,6 @@ void __init setup_system(void)
484 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 443 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
485 printk("ppc64_interrupt_controller = 0x%ld\n", 444 printk("ppc64_interrupt_controller = 0x%ld\n",
486 ppc64_interrupt_controller); 445 ppc64_interrupt_controller);
487 printk("platform = 0x%x\n", _machine);
488 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 446 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
489 printk("ppc64_caches.dcache_line_size = 0x%x\n", 447 printk("ppc64_caches.dcache_line_size = 0x%x\n",
490 ppc64_caches.dline_size); 448 ppc64_caches.dline_size);
@@ -497,8 +455,6 @@ void __init setup_system(void)
497#endif 455#endif
498 printk("-----------------------------------------------------\n"); 456 printk("-----------------------------------------------------\n");
499 457
500 mm_init_ppc64();
501
502 DBG(" <- setup_system()\n"); 458 DBG(" <- setup_system()\n");
503} 459}
504 460
@@ -518,7 +474,7 @@ static void __init irqstack_early_init(void)
518 * interrupt stacks must be under 256MB, we cannot afford to take 474 * interrupt stacks must be under 256MB, we cannot afford to take
519 * SLB misses on them. 475 * SLB misses on them.
520 */ 476 */
521 for_each_cpu(i) { 477 for_each_possible_cpu(i) {
522 softirq_ctx[i] = (struct thread_info *) 478 softirq_ctx[i] = (struct thread_info *)
523 __va(lmb_alloc_base(THREAD_SIZE, 479 __va(lmb_alloc_base(THREAD_SIZE,
524 THREAD_SIZE, 0x10000000)); 480 THREAD_SIZE, 0x10000000));
@@ -551,7 +507,7 @@ static void __init emergency_stack_init(void)
551 */ 507 */
552 limit = min(0x10000000UL, lmb.rmo_size); 508 limit = min(0x10000000UL, lmb.rmo_size);
553 509
554 for_each_cpu(i) 510 for_each_possible_cpu(i)
555 paca[i].emergency_sp = 511 paca[i].emergency_sp =
556 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; 512 __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
557} 513}
@@ -581,7 +537,8 @@ void __init setup_arch(char **cmdline_p)
581 panic_timeout = 180; 537 panic_timeout = 180;
582 538
583 if (ppc_md.panic) 539 if (ppc_md.panic)
584 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); 540 atomic_notifier_chain_register(&panic_notifier_list,
541 &ppc64_panic_block);
585 542
586 init_mm.start_code = PAGE_OFFSET; 543 init_mm.start_code = PAGE_OFFSET;
587 init_mm.end_code = (unsigned long) _etext; 544 init_mm.end_code = (unsigned long) _etext;
@@ -603,12 +560,6 @@ void __init setup_arch(char **cmdline_p)
603 560
604 ppc_md.setup_arch(); 561 ppc_md.setup_arch();
605 562
606 /* Use the default idle loop if the platform hasn't provided one. */
607 if (NULL == ppc_md.idle_loop) {
608 ppc_md.idle_loop = default_idle;
609 printk(KERN_INFO "Using default idle loop\n");
610 }
611
612 paging_init(); 563 paging_init();
613 ppc64_boot_msg(0x15, "Setup Done"); 564 ppc64_boot_msg(0x15, "Setup Done");
614} 565}
@@ -673,7 +624,7 @@ void __init setup_per_cpu_areas(void)
673 size = PERCPU_ENOUGH_ROOM; 624 size = PERCPU_ENOUGH_ROOM;
674#endif 625#endif
675 626
676 for_each_cpu(i) { 627 for_each_possible_cpu(i) {
677 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); 628 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
678 if (!ptr) 629 if (!ptr)
679 panic("Cannot allocate cpu data for CPU %d\n", i); 630 panic("Cannot allocate cpu data for CPU %d\n", i);