diff options
Diffstat (limited to 'arch/powerpc/kernel/setup_64.c')
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 199 |
1 files changed, 50 insertions, 149 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 40c48100bf1b..b0994050024f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <asm/page.h> | 56 | #include <asm/page.h> |
57 | #include <asm/mmu.h> | 57 | #include <asm/mmu.h> |
58 | #include <asm/lmb.h> | 58 | #include <asm/lmb.h> |
59 | #include <asm/iSeries/ItLpNaca.h> | 59 | #include <asm/iseries/it_lp_naca.h> |
60 | #include <asm/firmware.h> | 60 | #include <asm/firmware.h> |
61 | #include <asm/systemcfg.h> | 61 | #include <asm/systemcfg.h> |
62 | #include <asm/xmon.h> | 62 | #include <asm/xmon.h> |
@@ -103,8 +103,6 @@ extern void htab_initialize(void); | |||
103 | extern void early_init_devtree(void *flat_dt); | 103 | extern void early_init_devtree(void *flat_dt); |
104 | extern void unflatten_device_tree(void); | 104 | extern void unflatten_device_tree(void); |
105 | 105 | ||
106 | extern void smp_release_cpus(void); | ||
107 | |||
108 | int have_of = 1; | 106 | int have_of = 1; |
109 | int boot_cpuid = 0; | 107 | int boot_cpuid = 0; |
110 | int boot_cpuid_phys = 0; | 108 | int boot_cpuid_phys = 0; |
@@ -183,120 +181,14 @@ static int __init early_smt_enabled(char *p) | |||
183 | } | 181 | } |
184 | early_param("smt-enabled", early_smt_enabled); | 182 | early_param("smt-enabled", early_smt_enabled); |
185 | 183 | ||
186 | /** | 184 | #else |
187 | * setup_cpu_maps - initialize the following cpu maps: | 185 | #define check_smt_enabled() |
188 | * cpu_possible_map | ||
189 | * cpu_present_map | ||
190 | * cpu_sibling_map | ||
191 | * | ||
192 | * Having the possible map set up early allows us to restrict allocations | ||
193 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | ||
194 | * | ||
195 | * We do not initialize the online map here; cpus set their own bits in | ||
196 | * cpu_online_map as they come up. | ||
197 | * | ||
198 | * This function is valid only for Open Firmware systems. finish_device_tree | ||
199 | * must be called before using this. | ||
200 | * | ||
201 | * While we're here, we may as well set the "physical" cpu ids in the paca. | ||
202 | */ | ||
203 | static void __init setup_cpu_maps(void) | ||
204 | { | ||
205 | struct device_node *dn = NULL; | ||
206 | int cpu = 0; | ||
207 | int swap_cpuid = 0; | ||
208 | |||
209 | check_smt_enabled(); | ||
210 | |||
211 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | ||
212 | u32 *intserv; | ||
213 | int j, len = sizeof(u32), nthreads; | ||
214 | |||
215 | intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s", | ||
216 | &len); | ||
217 | if (!intserv) | ||
218 | intserv = (u32 *)get_property(dn, "reg", NULL); | ||
219 | |||
220 | nthreads = len / sizeof(u32); | ||
221 | |||
222 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | ||
223 | cpu_set(cpu, cpu_present_map); | ||
224 | set_hard_smp_processor_id(cpu, intserv[j]); | ||
225 | |||
226 | if (intserv[j] == boot_cpuid_phys) | ||
227 | swap_cpuid = cpu; | ||
228 | cpu_set(cpu, cpu_possible_map); | ||
229 | cpu++; | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that | ||
234 | * boot cpu is logical 0. | ||
235 | */ | ||
236 | if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { | ||
237 | u32 tmp; | ||
238 | tmp = get_hard_smp_processor_id(0); | ||
239 | set_hard_smp_processor_id(0, boot_cpuid_phys); | ||
240 | set_hard_smp_processor_id(swap_cpuid, tmp); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * On pSeries LPAR, we need to know how many cpus | ||
245 | * could possibly be added to this partition. | ||
246 | */ | ||
247 | if (systemcfg->platform == PLATFORM_PSERIES_LPAR && | ||
248 | (dn = of_find_node_by_path("/rtas"))) { | ||
249 | int num_addr_cell, num_size_cell, maxcpus; | ||
250 | unsigned int *ireg; | ||
251 | |||
252 | num_addr_cell = prom_n_addr_cells(dn); | ||
253 | num_size_cell = prom_n_size_cells(dn); | ||
254 | |||
255 | ireg = (unsigned int *) | ||
256 | get_property(dn, "ibm,lrdr-capacity", NULL); | ||
257 | |||
258 | if (!ireg) | ||
259 | goto out; | ||
260 | |||
261 | maxcpus = ireg[num_addr_cell + num_size_cell]; | ||
262 | |||
263 | /* Double maxcpus for processors which have SMT capability */ | ||
264 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
265 | maxcpus *= 2; | ||
266 | |||
267 | if (maxcpus > NR_CPUS) { | ||
268 | printk(KERN_WARNING | ||
269 | "Partition configured for %d cpus, " | ||
270 | "operating system maximum is %d.\n", | ||
271 | maxcpus, NR_CPUS); | ||
272 | maxcpus = NR_CPUS; | ||
273 | } else | ||
274 | printk(KERN_INFO "Partition configured for %d cpus.\n", | ||
275 | maxcpus); | ||
276 | |||
277 | for (cpu = 0; cpu < maxcpus; cpu++) | ||
278 | cpu_set(cpu, cpu_possible_map); | ||
279 | out: | ||
280 | of_node_put(dn); | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Do the sibling map; assume only two threads per processor. | ||
285 | */ | ||
286 | for_each_cpu(cpu) { | ||
287 | cpu_set(cpu, cpu_sibling_map[cpu]); | ||
288 | if (cpu_has_feature(CPU_FTR_SMT)) | ||
289 | cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); | ||
290 | } | ||
291 | |||
292 | systemcfg->processorCount = num_present_cpus(); | ||
293 | } | ||
294 | #endif /* CONFIG_SMP */ | 186 | #endif /* CONFIG_SMP */ |
295 | 187 | ||
296 | extern struct machdep_calls pSeries_md; | 188 | extern struct machdep_calls pSeries_md; |
297 | extern struct machdep_calls pmac_md; | 189 | extern struct machdep_calls pmac_md; |
298 | extern struct machdep_calls maple_md; | 190 | extern struct machdep_calls maple_md; |
299 | extern struct machdep_calls bpa_md; | 191 | extern struct machdep_calls cell_md; |
300 | extern struct machdep_calls iseries_md; | 192 | extern struct machdep_calls iseries_md; |
301 | 193 | ||
302 | /* Ultimately, stuff them in an elf section like initcalls... */ | 194 | /* Ultimately, stuff them in an elf section like initcalls... */ |
@@ -310,8 +202,8 @@ static struct machdep_calls __initdata *machines[] = { | |||
310 | #ifdef CONFIG_PPC_MAPLE | 202 | #ifdef CONFIG_PPC_MAPLE |
311 | &maple_md, | 203 | &maple_md, |
312 | #endif /* CONFIG_PPC_MAPLE */ | 204 | #endif /* CONFIG_PPC_MAPLE */ |
313 | #ifdef CONFIG_PPC_BPA | 205 | #ifdef CONFIG_PPC_CELL |
314 | &bpa_md, | 206 | &cell_md, |
315 | #endif | 207 | #endif |
316 | #ifdef CONFIG_PPC_ISERIES | 208 | #ifdef CONFIG_PPC_ISERIES |
317 | &iseries_md, | 209 | &iseries_md, |
@@ -385,21 +277,49 @@ void __init early_setup(unsigned long dt_ptr) | |||
385 | DBG("Found, Initializing memory management...\n"); | 277 | DBG("Found, Initializing memory management...\n"); |
386 | 278 | ||
387 | /* | 279 | /* |
388 | * Initialize stab / SLB management | 280 | * Initialize the MMU Hash table and create the linear mapping |
281 | * of memory. Has to be done before stab/slb initialization as | ||
282 | * this is currently where the page size encoding is obtained | ||
389 | */ | 283 | */ |
390 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 284 | htab_initialize(); |
391 | stab_initialize(lpaca->stab_real); | ||
392 | 285 | ||
393 | /* | 286 | /* |
394 | * Initialize the MMU Hash table and create the linear mapping | 287 | * Initialize stab / SLB management except on iSeries |
395 | * of memory | ||
396 | */ | 288 | */ |
397 | htab_initialize(); | 289 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) { |
290 | if (cpu_has_feature(CPU_FTR_SLB)) | ||
291 | slb_initialize(); | ||
292 | else | ||
293 | stab_initialize(lpaca->stab_real); | ||
294 | } | ||
398 | 295 | ||
399 | DBG(" <- early_setup()\n"); | 296 | DBG(" <- early_setup()\n"); |
400 | } | 297 | } |
401 | 298 | ||
402 | 299 | ||
300 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | ||
301 | void smp_release_cpus(void) | ||
302 | { | ||
303 | extern unsigned long __secondary_hold_spinloop; | ||
304 | |||
305 | DBG(" -> smp_release_cpus()\n"); | ||
306 | |||
307 | /* All secondary cpus are spinning on a common spinloop, release them | ||
308 | * all now so they can start to spin on their individual paca | ||
309 | * spinloops. For non SMP kernels, the secondary cpus never get out | ||
310 | * of the common spinloop. | ||
311 | * This is useless but harmless on iSeries, secondaries are already | ||
312 | * waiting on their paca spinloops. */ | ||
313 | |||
314 | __secondary_hold_spinloop = 1; | ||
315 | mb(); | ||
316 | |||
317 | DBG(" <- smp_release_cpus()\n"); | ||
318 | } | ||
319 | #else | ||
320 | #define smp_release_cpus() | ||
321 | #endif /* CONFIG_SMP || CONFIG_KEXEC */ | ||
322 | |||
403 | /* | 323 | /* |
404 | * Initialize some remaining members of the ppc64_caches and systemcfg structures | 324 | * Initialize some remaining members of the ppc64_caches and systemcfg structures |
405 | * (at least until we get rid of them completely). This is mostly some | 325 | * (at least until we get rid of them completely). This is mostly some |
@@ -589,17 +509,13 @@ void __init setup_system(void) | |||
589 | 509 | ||
590 | parse_early_param(); | 510 | parse_early_param(); |
591 | 511 | ||
592 | #ifdef CONFIG_SMP | 512 | check_smt_enabled(); |
593 | /* | 513 | smp_setup_cpu_maps(); |
594 | * iSeries has already initialized the cpu maps at this point. | ||
595 | */ | ||
596 | setup_cpu_maps(); | ||
597 | 514 | ||
598 | /* Release secondary cpus out of their spinloops at 0x60 now that | 515 | /* Release secondary cpus out of their spinloops at 0x60 now that |
599 | * we can map physical -> logical CPU ids | 516 | * we can map physical -> logical CPU ids |
600 | */ | 517 | */ |
601 | smp_release_cpus(); | 518 | smp_release_cpus(); |
602 | #endif | ||
603 | 519 | ||
604 | printk("Starting Linux PPC64 %s\n", system_utsname.version); | 520 | printk("Starting Linux PPC64 %s\n", system_utsname.version); |
605 | 521 | ||
@@ -631,23 +547,6 @@ static int ppc64_panic_event(struct notifier_block *this, | |||
631 | return NOTIFY_DONE; | 547 | return NOTIFY_DONE; |
632 | } | 548 | } |
633 | 549 | ||
634 | #ifdef CONFIG_PPC_ISERIES | ||
635 | /* | ||
636 | * On iSeries we just parse the mem=X option from the command line. | ||
637 | * On pSeries it's a bit more complicated, see prom_init_mem() | ||
638 | */ | ||
639 | static int __init early_parsemem(char *p) | ||
640 | { | ||
641 | if (!p) | ||
642 | return 0; | ||
643 | |||
644 | memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE); | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | early_param("mem", early_parsemem); | ||
649 | #endif /* CONFIG_PPC_ISERIES */ | ||
650 | |||
651 | #ifdef CONFIG_IRQSTACKS | 550 | #ifdef CONFIG_IRQSTACKS |
652 | static void __init irqstack_early_init(void) | 551 | static void __init irqstack_early_init(void) |
653 | { | 552 | { |
@@ -658,10 +557,12 @@ static void __init irqstack_early_init(void) | |||
658 | * SLB misses on them. | 557 | * SLB misses on them. |
659 | */ | 558 | */ |
660 | for_each_cpu(i) { | 559 | for_each_cpu(i) { |
661 | softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, | 560 | softirq_ctx[i] = (struct thread_info *) |
662 | THREAD_SIZE, 0x10000000)); | 561 | __va(lmb_alloc_base(THREAD_SIZE, |
663 | hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, | 562 | THREAD_SIZE, 0x10000000)); |
664 | THREAD_SIZE, 0x10000000)); | 563 | hardirq_ctx[i] = (struct thread_info *) |
564 | __va(lmb_alloc_base(THREAD_SIZE, | ||
565 | THREAD_SIZE, 0x10000000)); | ||
665 | } | 566 | } |
666 | } | 567 | } |
667 | #else | 568 | #else |
@@ -689,8 +590,8 @@ static void __init emergency_stack_init(void) | |||
689 | limit = min(0x10000000UL, lmb.rmo_size); | 590 | limit = min(0x10000000UL, lmb.rmo_size); |
690 | 591 | ||
691 | for_each_cpu(i) | 592 | for_each_cpu(i) |
692 | paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, | 593 | paca[i].emergency_sp = |
693 | limit)) + PAGE_SIZE; | 594 | __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; |
694 | } | 595 | } |
695 | 596 | ||
696 | /* | 597 | /* |