aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c140
1 files changed, 96 insertions, 44 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index cbdbb14be4b0..4a6f2ec7e761 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -95,7 +95,7 @@ int smt_enabled_at_boot = 1;
95static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 95static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
96 96
97#ifdef CONFIG_PPC64 97#ifdef CONFIG_PPC64
98void __devinit smp_generic_kick_cpu(int nr) 98int __devinit smp_generic_kick_cpu(int nr)
99{ 99{
100 BUG_ON(nr < 0 || nr >= NR_CPUS); 100 BUG_ON(nr < 0 || nr >= NR_CPUS);
101 101
@@ -106,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr)
106 */ 106 */
107 paca[nr].cpu_start = 1; 107 paca[nr].cpu_start = 1;
108 smp_mb(); 108 smp_mb();
109}
110#endif
111 109
112void smp_message_recv(int msg) 110 return 0;
113{
114 switch(msg) {
115 case PPC_MSG_CALL_FUNCTION:
116 generic_smp_call_function_interrupt();
117 break;
118 case PPC_MSG_RESCHEDULE:
119 /* we notice need_resched on exit */
120 break;
121 case PPC_MSG_CALL_FUNC_SINGLE:
122 generic_smp_call_function_single_interrupt();
123 break;
124 case PPC_MSG_DEBUGGER_BREAK:
125 if (crash_ipi_function_ptr) {
126 crash_ipi_function_ptr(get_irq_regs());
127 break;
128 }
129#ifdef CONFIG_DEBUGGER
130 debugger_ipi(get_irq_regs());
131 break;
132#endif /* CONFIG_DEBUGGER */
133 /* FALLTHROUGH */
134 default:
135 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
136 smp_processor_id(), msg);
137 break;
138 }
139} 111}
112#endif
140 113
141static irqreturn_t call_function_action(int irq, void *data) 114static irqreturn_t call_function_action(int irq, void *data)
142{ 115{
@@ -146,7 +119,7 @@ static irqreturn_t call_function_action(int irq, void *data)
146 119
147static irqreturn_t reschedule_action(int irq, void *data) 120static irqreturn_t reschedule_action(int irq, void *data)
148{ 121{
149 /* we just need the return path side effect of checking need_resched */ 122 scheduler_ipi();
150 return IRQ_HANDLED; 123 return IRQ_HANDLED;
151} 124}
152 125
@@ -156,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
156 return IRQ_HANDLED; 129 return IRQ_HANDLED;
157} 130}
158 131
159static irqreturn_t debug_ipi_action(int irq, void *data) 132irqreturn_t debug_ipi_action(int irq, void *data)
160{ 133{
161 smp_message_recv(PPC_MSG_DEBUGGER_BREAK); 134 if (crash_ipi_function_ptr) {
135 crash_ipi_function_ptr(get_irq_regs());
136 return IRQ_HANDLED;
137 }
138
139#ifdef CONFIG_DEBUGGER
140 debugger_ipi(get_irq_regs());
141#endif /* CONFIG_DEBUGGER */
142
162 return IRQ_HANDLED; 143 return IRQ_HANDLED;
163} 144}
164 145
@@ -197,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg)
197 return err; 178 return err;
198} 179}
199 180
181#ifdef CONFIG_PPC_SMP_MUXED_IPI
182struct cpu_messages {
183 int messages; /* current messages */
184 unsigned long data; /* data for cause ipi */
185};
186static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
187
188void smp_muxed_ipi_set_data(int cpu, unsigned long data)
189{
190 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
191
192 info->data = data;
193}
194
195void smp_muxed_ipi_message_pass(int cpu, int msg)
196{
197 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
198 char *message = (char *)&info->messages;
199
200 message[msg] = 1;
201 mb();
202 smp_ops->cause_ipi(cpu, info->data);
203}
204
205void smp_muxed_ipi_resend(void)
206{
207 struct cpu_messages *info = &__get_cpu_var(ipi_message);
208
209 if (info->messages)
210 smp_ops->cause_ipi(smp_processor_id(), info->data);
211}
212
213irqreturn_t smp_ipi_demux(void)
214{
215 struct cpu_messages *info = &__get_cpu_var(ipi_message);
216 unsigned int all;
217
218 mb(); /* order any irq clear */
219
220 do {
221 all = xchg_local(&info->messages, 0);
222
223#ifdef __BIG_ENDIAN
224 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
225 generic_smp_call_function_interrupt();
226 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
227 scheduler_ipi();
228 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
229 generic_smp_call_function_single_interrupt();
230 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
231 debug_ipi_action(0, NULL);
232#else
233#error Unsupported ENDIAN
234#endif
235 } while (info->messages);
236
237 return IRQ_HANDLED;
238}
239#endif /* CONFIG_PPC_SMP_MUXED_IPI */
240
200void smp_send_reschedule(int cpu) 241void smp_send_reschedule(int cpu)
201{ 242{
202 if (likely(smp_ops)) 243 if (likely(smp_ops))
@@ -216,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
216 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 257 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
217} 258}
218 259
219#ifdef CONFIG_DEBUGGER 260#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
220void smp_send_debugger_break(int cpu) 261void smp_send_debugger_break(void)
221{ 262{
222 if (likely(smp_ops)) 263 int cpu;
223 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 264 int me = raw_smp_processor_id();
265
266 if (unlikely(!smp_ops))
267 return;
268
269 for_each_online_cpu(cpu)
270 if (cpu != me)
271 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
224} 272}
225#endif 273#endif
226 274
@@ -228,9 +276,9 @@ void smp_send_debugger_break(int cpu)
228void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 276void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
229{ 277{
230 crash_ipi_function_ptr = crash_ipi_callback; 278 crash_ipi_function_ptr = crash_ipi_callback;
231 if (crash_ipi_callback && smp_ops) { 279 if (crash_ipi_callback) {
232 mb(); 280 mb();
233 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 281 smp_send_debugger_break();
234 } 282 }
235} 283}
236#endif 284#endif
@@ -410,8 +458,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
410{ 458{
411 int rc, c; 459 int rc, c;
412 460
413 secondary_ti = current_set[cpu];
414
415 if (smp_ops == NULL || 461 if (smp_ops == NULL ||
416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 462 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
417 return -EINVAL; 463 return -EINVAL;
@@ -421,6 +467,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
421 if (rc) 467 if (rc)
422 return rc; 468 return rc;
423 469
470 secondary_ti = current_set[cpu];
471
424 /* Make sure callin-map entry is 0 (can be leftover a CPU 472 /* Make sure callin-map entry is 0 (can be leftover a CPU
425 * hotplug 473 * hotplug
426 */ 474 */
@@ -434,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
434 482
435 /* wake up cpus */ 483 /* wake up cpus */
436 DBG("smp: kicking cpu %d\n", cpu); 484 DBG("smp: kicking cpu %d\n", cpu);
437 smp_ops->kick_cpu(cpu); 485 rc = smp_ops->kick_cpu(cpu);
486 if (rc) {
487 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
488 return rc;
489 }
438 490
439 /* 491 /*
440 * wait to see if the cpu made a callin (is actually up). 492 * wait to see if the cpu made a callin (is actually up).
@@ -507,7 +559,7 @@ int cpu_first_thread_of_core(int core)
507} 559}
508EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 560EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
509 561
510/* Must be called when no change can occur to cpu_present_map, 562/* Must be called when no change can occur to cpu_present_mask,
511 * i.e. during cpu online or offline. 563 * i.e. during cpu online or offline.
512 */ 564 */
513static struct device_node *cpu_to_l2cache(int cpu) 565static struct device_node *cpu_to_l2cache(int cpu)
@@ -608,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
608 * se we pin us down to CPU 0 for a short while 660 * se we pin us down to CPU 0 for a short while
609 */ 661 */
610 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 662 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
611 cpumask_copy(old_mask, &current->cpus_allowed); 663 cpumask_copy(old_mask, tsk_cpus_allowed(current));
612 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 664 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
613 665
614 if (smp_ops && smp_ops->setup_cpu) 666 if (smp_ops && smp_ops->setup_cpu)