aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 46c56cfd1b2f..6a9bc9ce54e0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -144,13 +144,15 @@ void smp_message_recv(int msg, struct pt_regs *regs)
144 144
145void smp_send_reschedule(int cpu) 145void smp_send_reschedule(int cpu)
146{ 146{
147 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 147 if (likely(smp_ops))
148 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
148} 149}
149 150
150#ifdef CONFIG_DEBUGGER 151#ifdef CONFIG_DEBUGGER
151void smp_send_debugger_break(int cpu) 152void smp_send_debugger_break(int cpu)
152{ 153{
153 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 154 if (likely(smp_ops))
155 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
154} 156}
155#endif 157#endif
156 158
@@ -158,7 +160,7 @@ void smp_send_debugger_break(int cpu)
158void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 160void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
159{ 161{
160 crash_ipi_function_ptr = crash_ipi_callback; 162 crash_ipi_function_ptr = crash_ipi_callback;
161 if (crash_ipi_callback) { 163 if (crash_ipi_callback && smp_ops) {
162 mb(); 164 mb();
163 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 165 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
164 } 166 }
@@ -220,6 +222,9 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
220 /* Can deadlock when called with interrupts disabled */ 222 /* Can deadlock when called with interrupts disabled */
221 WARN_ON(irqs_disabled()); 223 WARN_ON(irqs_disabled());
222 224
225 if (unlikely(smp_ops == NULL))
226 return -1;
227
223 data.func = func; 228 data.func = func;
224 data.info = info; 229 data.info = info;
225 atomic_set(&data.started, 0); 230 atomic_set(&data.started, 0);
@@ -357,7 +362,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
357 smp_store_cpu_info(boot_cpuid); 362 smp_store_cpu_info(boot_cpuid);
358 cpu_callin_map[boot_cpuid] = 1; 363 cpu_callin_map[boot_cpuid] = 1;
359 364
360 max_cpus = smp_ops->probe(); 365 if (smp_ops)
366 max_cpus = smp_ops->probe();
367 else
368 max_cpus = 1;
361 369
362 smp_space_timers(max_cpus); 370 smp_space_timers(max_cpus);
363 371
@@ -453,7 +461,7 @@ void generic_mach_cpu_die(void)
453 461
454static int __devinit cpu_enable(unsigned int cpu) 462static int __devinit cpu_enable(unsigned int cpu)
455{ 463{
456 if (smp_ops->cpu_enable) 464 if (smp_ops && smp_ops->cpu_enable)
457 return smp_ops->cpu_enable(cpu); 465 return smp_ops->cpu_enable(cpu);
458 466
459 return -ENOSYS; 467 return -ENOSYS;
@@ -467,7 +475,8 @@ int __devinit __cpu_up(unsigned int cpu)
467 if (!cpu_enable(cpu)) 475 if (!cpu_enable(cpu))
468 return 0; 476 return 0;
469 477
470 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) 478 if (smp_ops == NULL ||
479 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
471 return -EINVAL; 480 return -EINVAL;
472 481
473 /* Make sure callin-map entry is 0 (can be leftover a CPU 482 /* Make sure callin-map entry is 0 (can be leftover a CPU
@@ -568,7 +577,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
568 old_mask = current->cpus_allowed; 577 old_mask = current->cpus_allowed;
569 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 578 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
570 579
571 smp_ops->setup_cpu(boot_cpuid); 580 if (smp_ops)
581 smp_ops->setup_cpu(boot_cpuid);
572 582
573 set_cpus_allowed(current, old_mask); 583 set_cpus_allowed(current, old_mask);
574 584