aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c153
1 files changed, 97 insertions, 56 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 981360509172..cbdbb14be4b0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
57#define DBG(fmt...) 57#define DBG(fmt...)
58#endif 58#endif
59 59
60
61/* Store all idle threads, this can be reused instead of creating
62* a new thread. Also avoids complicated thread destroy functionality
63* for idle threads.
64*/
65#ifdef CONFIG_HOTPLUG_CPU
66/*
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
69 */
70static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
71#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
73#else
74static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
75#define get_idle_for_cpu(x) (idle_thread_array[(x)])
76#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
77#endif
78
60struct thread_info *secondary_ti; 79struct thread_info *secondary_ti;
61 80
62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); 81DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id)
238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 257 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 258}
240 259
241static void __init smp_create_idle(unsigned int cpu)
242{
243 struct task_struct *p;
244
245 /* create a process for the processor */
246 p = fork_idle(cpu);
247 if (IS_ERR(p))
248 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249#ifdef CONFIG_PPC64
250 paca[cpu].__current = p;
251 paca[cpu].kstack = (unsigned long) task_thread_info(p)
252 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253#endif
254 current_set[cpu] = task_thread_info(p);
255 task_thread_info(p)->cpu = cpu;
256}
257
258void __init smp_prepare_cpus(unsigned int max_cpus) 260void __init smp_prepare_cpus(unsigned int max_cpus)
259{ 261{
260 unsigned int cpu; 262 unsigned int cpu;
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
288 max_cpus = NR_CPUS; 290 max_cpus = NR_CPUS;
289 else 291 else
290 max_cpus = 1; 292 max_cpus = 1;
291
292 for_each_possible_cpu(cpu)
293 if (cpu != boot_cpuid)
294 smp_create_idle(cpu);
295} 293}
296 294
297void __devinit smp_prepare_boot_cpu(void) 295void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void)
305 303
306#ifdef CONFIG_HOTPLUG_CPU 304#ifdef CONFIG_HOTPLUG_CPU
307/* State of each CPU during hotplug phases */ 305/* State of each CPU during hotplug phases */
308DEFINE_PER_CPU(int, cpu_state) = { 0 }; 306static DEFINE_PER_CPU(int, cpu_state) = { 0 };
309 307
310int generic_cpu_disable(void) 308int generic_cpu_disable(void)
311{ 309{
@@ -317,30 +315,8 @@ int generic_cpu_disable(void)
317 set_cpu_online(cpu, false); 315 set_cpu_online(cpu, false);
318#ifdef CONFIG_PPC64 316#ifdef CONFIG_PPC64
319 vdso_data->processorCount--; 317 vdso_data->processorCount--;
320 fixup_irqs(cpu_online_mask);
321#endif
322 return 0;
323}
324
325int generic_cpu_enable(unsigned int cpu)
326{
327 /* Do the normal bootup if we haven't
328 * already bootstrapped. */
329 if (system_state != SYSTEM_RUNNING)
330 return -ENOSYS;
331
332 /* get the target out of it's holding state */
333 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
334 smp_wmb();
335
336 while (!cpu_online(cpu))
337 cpu_relax();
338
339#ifdef CONFIG_PPC64
340 fixup_irqs(cpu_online_mask);
341 /* counter the irq disable in fixup_irqs */
342 local_irq_enable();
343#endif 318#endif
319 migrate_irqs();
344 return 0; 320 return 0;
345} 321}
346 322
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void)
362 unsigned int cpu; 338 unsigned int cpu;
363 339
364 local_irq_disable(); 340 local_irq_disable();
341 idle_task_exit();
365 cpu = smp_processor_id(); 342 cpu = smp_processor_id();
366 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 printk(KERN_DEBUG "CPU%d offline\n", cpu);
367 __get_cpu_var(cpu_state) = CPU_DEAD; 344 __get_cpu_var(cpu_state) = CPU_DEAD;
368 smp_wmb(); 345 smp_wmb();
369 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
370 cpu_relax(); 347 cpu_relax();
371 set_cpu_online(cpu, true); 348}
372 local_irq_enable(); 349
350void generic_set_cpu_dead(unsigned int cpu)
351{
352 per_cpu(cpu_state, cpu) = CPU_DEAD;
373} 353}
374#endif 354#endif
375 355
376static int __devinit cpu_enable(unsigned int cpu) 356struct create_idle {
357 struct work_struct work;
358 struct task_struct *idle;
359 struct completion done;
360 int cpu;
361};
362
363static void __cpuinit do_fork_idle(struct work_struct *work)
377{ 364{
378 if (smp_ops && smp_ops->cpu_enable) 365 struct create_idle *c_idle =
379 return smp_ops->cpu_enable(cpu); 366 container_of(work, struct create_idle, work);
367
368 c_idle->idle = fork_idle(c_idle->cpu);
369 complete(&c_idle->done);
370}
371
372static int __cpuinit create_idle(unsigned int cpu)
373{
374 struct thread_info *ti;
375 struct create_idle c_idle = {
376 .cpu = cpu,
377 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
378 };
379 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
380
381 c_idle.idle = get_idle_for_cpu(cpu);
382
383 /* We can't use kernel_thread since we must avoid to
384 * reschedule the child. We use a workqueue because
385 * we want to fork from a kernel thread, not whatever
386 * userspace process happens to be trying to online us.
387 */
388 if (!c_idle.idle) {
389 schedule_work(&c_idle.work);
390 wait_for_completion(&c_idle.done);
391 } else
392 init_idle(c_idle.idle, cpu);
393 if (IS_ERR(c_idle.idle)) {
394 pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
395 return PTR_ERR(c_idle.idle);
396 }
397 ti = task_thread_info(c_idle.idle);
398
399#ifdef CONFIG_PPC64
400 paca[cpu].__current = c_idle.idle;
401 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
402#endif
403 ti->cpu = cpu;
404 current_set[cpu] = ti;
380 405
381 return -ENOSYS; 406 return 0;
382} 407}
383 408
384int __cpuinit __cpu_up(unsigned int cpu) 409int __cpuinit __cpu_up(unsigned int cpu)
385{ 410{
386 int c; 411 int rc, c;
387 412
388 secondary_ti = current_set[cpu]; 413 secondary_ti = current_set[cpu];
389 if (!cpu_enable(cpu))
390 return 0;
391 414
392 if (smp_ops == NULL || 415 if (smp_ops == NULL ||
393 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
394 return -EINVAL; 417 return -EINVAL;
395 418
419 /* Make sure we have an idle thread */
420 rc = create_idle(cpu);
421 if (rc)
422 return rc;
423
396 /* Make sure callin-map entry is 0 (can be leftover a CPU 424 /* Make sure callin-map entry is 0 (can be leftover a CPU
397 * hotplug 425 * hotplug
398 */ 426 */
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
502} 530}
503 531
504/* Activate a secondary processor. */ 532/* Activate a secondary processor. */
505int __devinit start_secondary(void *unused) 533void __devinit start_secondary(void *unused)
506{ 534{
507 unsigned int cpu = smp_processor_id(); 535 unsigned int cpu = smp_processor_id();
508 struct device_node *l2_cache; 536 struct device_node *l2_cache;
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused)
523 551
524 secondary_cpu_time_init(); 552 secondary_cpu_time_init();
525 553
554#ifdef CONFIG_PPC64
555 if (system_state == SYSTEM_RUNNING)
556 vdso_data->processorCount++;
557#endif
526 ipi_call_lock(); 558 ipi_call_lock();
527 notify_cpu_starting(cpu); 559 notify_cpu_starting(cpu);
528 set_cpu_online(cpu, true); 560 set_cpu_online(cpu, true);
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused)
558 local_irq_enable(); 590 local_irq_enable();
559 591
560 cpu_idle(); 592 cpu_idle();
561 return 0; 593
594 BUG();
562} 595}
563 596
564int setup_profiling_timer(unsigned int multiplier) 597int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
585 618
586 free_cpumask_var(old_mask); 619 free_cpumask_var(old_mask);
587 620
621 if (smp_ops && smp_ops->bringup_done)
622 smp_ops->bringup_done();
623
588 dump_numa_cpu_topology(); 624 dump_numa_cpu_topology();
625
589} 626}
590 627
591int arch_sd_sibling_asym_packing(void) 628int arch_sd_sibling_asym_packing(void)
@@ -660,5 +697,9 @@ void cpu_die(void)
660{ 697{
661 if (ppc_md.cpu_die) 698 if (ppc_md.cpu_die)
662 ppc_md.cpu_die(); 699 ppc_md.cpu_die();
700
701 /* If we return, we re-enter start_secondary */
702 start_secondary_resume();
663} 703}
704
664#endif 705#endif