aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c61
1 files changed, 23 insertions, 38 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index addbbe8028c2..b735521a4a54 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -60,32 +60,11 @@ enum ipi_msg_type {
60 60
61static DECLARE_COMPLETION(cpu_running); 61static DECLARE_COMPLETION(cpu_running);
62 62
63int __cpuinit __cpu_up(unsigned int cpu) 63int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
64{ 64{
65 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
66 struct task_struct *idle = ci->idle;
67 int ret; 65 int ret;
68 66
69 /* 67 /*
70 * Spawn a new process manually, if not already done.
71 * Grab a pointer to its task struct so we can mess with it
72 */
73 if (!idle) {
74 idle = fork_idle(cpu);
75 if (IS_ERR(idle)) {
76 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
77 return PTR_ERR(idle);
78 }
79 ci->idle = idle;
80 } else {
81 /*
82 * Since this idle thread is being re-used, call
83 * init_idle() to reinitialize the thread structure.
84 */
85 init_idle(idle, cpu);
86 }
87
88 /*
89 * We need to tell the secondary core where to find 68 * We need to tell the secondary core where to find
90 * its stack and the page tables. 69 * its stack and the page tables.
91 */ 70 */
@@ -251,8 +230,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
251 struct mm_struct *mm = &init_mm; 230 struct mm_struct *mm = &init_mm;
252 unsigned int cpu = smp_processor_id(); 231 unsigned int cpu = smp_processor_id();
253 232
254 printk("CPU%u: Booted secondary processor\n", cpu);
255
256 /* 233 /*
257 * All kernel threads share the same mm context; grab a 234 * All kernel threads share the same mm context; grab a
258 * reference and switch to it. 235 * reference and switch to it.
@@ -264,6 +241,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
264 enter_lazy_tlb(mm, current); 241 enter_lazy_tlb(mm, current);
265 local_flush_tlb_all(); 242 local_flush_tlb_all();
266 243
244 printk("CPU%u: Booted secondary processor\n", cpu);
245
267 cpu_init(); 246 cpu_init();
268 preempt_disable(); 247 preempt_disable();
269 trace_hardirqs_off(); 248 trace_hardirqs_off();
@@ -318,9 +297,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
318 297
319void __init smp_prepare_boot_cpu(void) 298void __init smp_prepare_boot_cpu(void)
320{ 299{
321 unsigned int cpu = smp_processor_id();
322
323 per_cpu(cpu_data, cpu).idle = current;
324} 300}
325 301
326void __init smp_prepare_cpus(unsigned int max_cpus) 302void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -454,6 +430,9 @@ static struct local_timer_ops *lt_ops;
454#ifdef CONFIG_LOCAL_TIMERS 430#ifdef CONFIG_LOCAL_TIMERS
455int local_timer_register(struct local_timer_ops *ops) 431int local_timer_register(struct local_timer_ops *ops)
456{ 432{
433 if (!is_smp() || !setup_max_cpus)
434 return -ENXIO;
435
457 if (lt_ops) 436 if (lt_ops)
458 return -EBUSY; 437 return -EBUSY;
459 438
@@ -510,10 +489,6 @@ static void ipi_cpu_stop(unsigned int cpu)
510 local_fiq_disable(); 489 local_fiq_disable();
511 local_irq_disable(); 490 local_irq_disable();
512 491
513#ifdef CONFIG_HOTPLUG_CPU
514 platform_cpu_kill(cpu);
515#endif
516
517 while (1) 492 while (1)
518 cpu_relax(); 493 cpu_relax();
519} 494}
@@ -576,17 +551,25 @@ void smp_send_reschedule(int cpu)
576 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 551 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
577} 552}
578 553
554#ifdef CONFIG_HOTPLUG_CPU
555static void smp_kill_cpus(cpumask_t *mask)
556{
557 unsigned int cpu;
558 for_each_cpu(cpu, mask)
559 platform_cpu_kill(cpu);
560}
561#else
562static void smp_kill_cpus(cpumask_t *mask) { }
563#endif
564
579void smp_send_stop(void) 565void smp_send_stop(void)
580{ 566{
581 unsigned long timeout; 567 unsigned long timeout;
568 struct cpumask mask;
582 569
583 if (num_online_cpus() > 1) { 570 cpumask_copy(&mask, cpu_online_mask);
584 struct cpumask mask; 571 cpumask_clear_cpu(smp_processor_id(), &mask);
585 cpumask_copy(&mask, cpu_online_mask); 572 smp_cross_call(&mask, IPI_CPU_STOP);
586 cpumask_clear_cpu(smp_processor_id(), &mask);
587
588 smp_cross_call(&mask, IPI_CPU_STOP);
589 }
590 573
591 /* Wait up to one second for other CPUs to stop */ 574 /* Wait up to one second for other CPUs to stop */
592 timeout = USEC_PER_SEC; 575 timeout = USEC_PER_SEC;
@@ -595,6 +578,8 @@ void smp_send_stop(void)
595 578
596 if (num_online_cpus() > 1) 579 if (num_online_cpus() > 1)
597 pr_warning("SMP: failed to stop secondary CPUs\n"); 580 pr_warning("SMP: failed to stop secondary CPUs\n");
581
582 smp_kill_cpus(&mask);
598} 583}
599 584
600/* 585/*