aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:13 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:13 -0400
commitb7d7a2404f80386307ccc0cde63d8d2a5e3bc85c (patch)
treee8f4ff391f77ea4f91b4ee1367ca02e008581505
parent3b16cf874861436725c43ba0b68bdd799297be7c (diff)
powerpc: convert to generic helpers for IPI function calls
This converts ppc to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). ppc loses the timeout functionality of smp_call_function_mask() with this change, as the generic code does not provide that. Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/smp.c234
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c1
-rw-r--r--arch/powerpc/platforms/ps3/smp.c7
-rw-r--r--arch/powerpc/platforms/pseries/xics.c6
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--include/asm-powerpc/smp.h8
7 files changed, 33 insertions, 226 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3934e2659407..852d40c29637 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -110,6 +110,7 @@ config PPC
110 select HAVE_KPROBES 110 select HAVE_KPROBES
111 select HAVE_KRETPROBES 111 select HAVE_KRETPROBES
112 select HAVE_LMB 112 select HAVE_LMB
113 select USE_GENERIC_SMP_HELPERS if SMP
113 114
114config EARLY_PRINTK 115config EARLY_PRINTK
115 bool 116 bool
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1457aa0a08f1..37a5ab410dcc 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
72 72
73static volatile unsigned int cpu_callin_map[NR_CPUS]; 73static volatile unsigned int cpu_callin_map[NR_CPUS];
74 74
75void smp_call_function_interrupt(void);
76
77int smt_enabled_at_boot = 1; 75int smt_enabled_at_boot = 1;
78 76
79static int ipi_fail_ok;
80
81static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 77static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
82 78
83#ifdef CONFIG_PPC64 79#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
99{ 95{
100 switch(msg) { 96 switch(msg) {
101 case PPC_MSG_CALL_FUNCTION: 97 case PPC_MSG_CALL_FUNCTION:
102 smp_call_function_interrupt(); 98 generic_smp_call_function_interrupt();
103 break; 99 break;
104 case PPC_MSG_RESCHEDULE: 100 case PPC_MSG_RESCHEDULE:
105 /* XXX Do we have to do this? */ 101 /* XXX Do we have to do this? */
106 set_need_resched(); 102 set_need_resched();
107 break; 103 break;
104 case PPC_MSG_CALL_FUNC_SINGLE:
105 generic_smp_call_function_single_interrupt();
106 break;
108 case PPC_MSG_DEBUGGER_BREAK: 107 case PPC_MSG_DEBUGGER_BREAK:
109 if (crash_ipi_function_ptr) { 108 if (crash_ipi_function_ptr) {
110 crash_ipi_function_ptr(get_irq_regs()); 109 crash_ipi_function_ptr(get_irq_regs());
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu)
128 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 127 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
129} 128}
130 129
130void arch_send_call_function_single_ipi(int cpu)
131{
132 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
133}
134
135void arch_send_call_function_ipi(cpumask_t mask)
136{
137 unsigned int cpu;
138
139 for_each_cpu_mask(cpu, mask)
140 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
141}
142
131#ifdef CONFIG_DEBUGGER 143#ifdef CONFIG_DEBUGGER
132void smp_send_debugger_break(int cpu) 144void smp_send_debugger_break(int cpu)
133{ 145{
@@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy)
154 ; 166 ;
155} 167}
156 168
157/*
158 * Structure and data for smp_call_function(). This is designed to minimise
159 * static memory requirements. It also looks cleaner.
160 * Stolen from the i386 version.
161 */
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
163
164static struct call_data_struct {
165 void (*func) (void *info);
166 void *info;
167 atomic_t started;
168 atomic_t finished;
169 int wait;
170} *call_data;
171
172/* delay of at least 8 seconds */
173#define SMP_CALL_TIMEOUT 8
174
175/*
176 * These functions send a 'generic call function' IPI to other online
177 * CPUS in the system.
178 *
179 * [SUMMARY] Run a function on other CPUs.
180 * <func> The function to run. This must be fast and non-blocking.
181 * <info> An arbitrary pointer to pass to the function.
182 * <nonatomic> currently unused.
183 * <wait> If true, wait (atomically) until function has completed on other CPUs.
184 * [RETURNS] 0 on success, else a negative status code. Does not return until
185 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
186 * <map> is a cpu map of the cpus to send IPI to.
187 *
188 * You must not call this function with disabled interrupts or from a
189 * hardware interrupt handler or from a bottom half handler.
190 */
191static int __smp_call_function_map(void (*func) (void *info), void *info,
192 int nonatomic, int wait, cpumask_t map)
193{
194 struct call_data_struct data;
195 int ret = -1, num_cpus;
196 int cpu;
197 u64 timeout;
198
199 if (unlikely(smp_ops == NULL))
200 return ret;
201
202 data.func = func;
203 data.info = info;
204 atomic_set(&data.started, 0);
205 data.wait = wait;
206 if (wait)
207 atomic_set(&data.finished, 0);
208
209 /* remove 'self' from the map */
210 if (cpu_isset(smp_processor_id(), map))
211 cpu_clear(smp_processor_id(), map);
212
213 /* sanity check the map, remove any non-online processors. */
214 cpus_and(map, map, cpu_online_map);
215
216 num_cpus = cpus_weight(map);
217 if (!num_cpus)
218 goto done;
219
220 call_data = &data;
221 smp_wmb();
222 /* Send a message to all CPUs in the map */
223 for_each_cpu_mask(cpu, map)
224 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
225
226 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
227
228 /* Wait for indication that they have received the message */
229 while (atomic_read(&data.started) != num_cpus) {
230 HMT_low();
231 if (get_tb() >= timeout) {
232 printk("smp_call_function on cpu %d: other cpus not "
233 "responding (%d)\n", smp_processor_id(),
234 atomic_read(&data.started));
235 if (!ipi_fail_ok)
236 debugger(NULL);
237 goto out;
238 }
239 }
240
241 /* optionally wait for the CPUs to complete */
242 if (wait) {
243 while (atomic_read(&data.finished) != num_cpus) {
244 HMT_low();
245 if (get_tb() >= timeout) {
246 printk("smp_call_function on cpu %d: other "
247 "cpus not finishing (%d/%d)\n",
248 smp_processor_id(),
249 atomic_read(&data.finished),
250 atomic_read(&data.started));
251 debugger(NULL);
252 goto out;
253 }
254 }
255 }
256
257 done:
258 ret = 0;
259
260 out:
261 call_data = NULL;
262 HMT_medium();
263 return ret;
264}
265
266static int __smp_call_function(void (*func)(void *info), void *info,
267 int nonatomic, int wait)
268{
269 int ret;
270 spin_lock(&call_lock);
271 ret =__smp_call_function_map(func, info, nonatomic, wait,
272 cpu_online_map);
273 spin_unlock(&call_lock);
274 return ret;
275}
276
277int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
278 int wait)
279{
280 /* Can deadlock when called with interrupts disabled */
281 WARN_ON(irqs_disabled());
282
283 return __smp_call_function(func, info, nonatomic, wait);
284}
285EXPORT_SYMBOL(smp_call_function);
286
287int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
288 int nonatomic, int wait)
289{
290 cpumask_t map = CPU_MASK_NONE;
291 int ret = 0;
292
293 /* Can deadlock when called with interrupts disabled */
294 WARN_ON(irqs_disabled());
295
296 if (!cpu_online(cpu))
297 return -EINVAL;
298
299 cpu_set(cpu, map);
300 if (cpu != get_cpu()) {
301 spin_lock(&call_lock);
302 ret = __smp_call_function_map(func, info, nonatomic, wait, map);
303 spin_unlock(&call_lock);
304 } else {
305 local_irq_disable();
306 func(info);
307 local_irq_enable();
308 }
309 put_cpu();
310 return ret;
311}
312EXPORT_SYMBOL(smp_call_function_single);
313
314void smp_send_stop(void) 169void smp_send_stop(void)
315{ 170{
316 int nolock; 171 smp_call_function(stop_this_cpu, NULL, 0, 0);
317
318 /* It's OK to fail sending the IPI, since the alternative is to
319 * be stuck forever waiting on the other CPU to take the interrupt.
320 *
321 * It's better to at least continue and go through reboot, since this
322 * function is usually called at panic or reboot time in the first
323 * place.
324 */
325 ipi_fail_ok = 1;
326
327 /* Don't deadlock in case we got called through panic */
328 nolock = !spin_trylock(&call_lock);
329 __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
330 if (!nolock)
331 spin_unlock(&call_lock);
332}
333
334void smp_call_function_interrupt(void)
335{
336 void (*func) (void *info);
337 void *info;
338 int wait;
339
340 /* call_data will be NULL if the sender timed out while
341 * waiting on us to receive the call.
342 */
343 if (!call_data)
344 return;
345
346 func = call_data->func;
347 info = call_data->info;
348 wait = call_data->wait;
349
350 if (!wait)
351 smp_mb__before_atomic_inc();
352
353 /*
354 * Notify initiating CPU that I've grabbed the data and am
355 * about to execute the function
356 */
357 atomic_inc(&call_data->started);
358 /*
359 * At this point the info structure may be out of scope unless wait==1
360 */
361 (*func)(info);
362 if (wait) {
363 smp_mb__before_atomic_inc();
364 atomic_inc(&call_data->finished);
365 }
366} 172}
367 173
368extern struct gettimeofday_struct do_gtod; 174extern struct gettimeofday_struct do_gtod;
@@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused)
596 402
597 secondary_cpu_time_init(); 403 secondary_cpu_time_init();
598 404
599 spin_lock(&call_lock); 405 ipi_call_lock();
600 cpu_set(cpu, cpu_online_map); 406 cpu_set(cpu, cpu_online_map);
601 spin_unlock(&call_lock); 407 ipi_call_unlock();
602 408
603 local_irq_enable(); 409 local_irq_enable();
604 410
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5bf7df146022..2d5bb22d6c09 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -218,6 +218,7 @@ void iic_request_IPIs(void)
218{ 218{
219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); 219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); 220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
221 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
221#ifdef CONFIG_DEBUGGER 222#ifdef CONFIG_DEBUGGER
222 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 223 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
223#endif /* CONFIG_DEBUGGER */ 224#endif /* CONFIG_DEBUGGER */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f0b12f212363..a0927a3bacb7 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu)
105 * to index needs to be setup. 105 * to index needs to be setup.
106 */ 106 */
107 107
108 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); 108 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
109 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); 109 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
110 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); 110 BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
111 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
111 112
112 for (i = 0; i < MSG_COUNT; i++) { 113 for (i = 0; i < MSG_COUNT; i++) {
113 result = ps3_event_receive_port_setup(cpu, &virqs[i]); 114 result = ps3_event_receive_port_setup(cpu, &virqs[i]);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index ebebc28fe895..0fc830f576f5 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu)
383 mb(); 383 mb();
384 smp_message_recv(PPC_MSG_RESCHEDULE); 384 smp_message_recv(PPC_MSG_RESCHEDULE);
385 } 385 }
386#if 0 386 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
387 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
388 &xics_ipi_message[cpu].value)) { 387 &xics_ipi_message[cpu].value)) {
389 mb(); 388 mb();
390 smp_message_recv(PPC_MSG_MIGRATE_TASK); 389 smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
391 } 390 }
392#endif
393#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 391#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
394 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, 392 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
395 &xics_ipi_message[cpu].value)) { 393 &xics_ipi_message[cpu].value)) {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7680001676a6..6c90c95b454e 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1494,7 +1494,7 @@ void mpic_request_ipis(void)
1494 static char *ipi_names[] = { 1494 static char *ipi_names[] = {
1495 "IPI0 (call function)", 1495 "IPI0 (call function)",
1496 "IPI1 (reschedule)", 1496 "IPI1 (reschedule)",
1497 "IPI2 (unused)", 1497 "IPI2 (call function single)",
1498 "IPI3 (debugger break)", 1498 "IPI3 (debugger break)",
1499 }; 1499 };
1500 BUG_ON(mpic == NULL); 1500 BUG_ON(mpic == NULL);
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 505f35bacaa9..c663a1fa77c5 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
67 * in /proc/interrupts will be wrong!!! --Troy */ 67 * in /proc/interrupts will be wrong!!! --Troy */
68#define PPC_MSG_CALL_FUNCTION 0 68#define PPC_MSG_CALL_FUNCTION 0
69#define PPC_MSG_RESCHEDULE 1 69#define PPC_MSG_RESCHEDULE 1
70/* This is unused now */ 70#define PPC_MSG_CALL_FUNC_SINGLE 2
71#if 0
72#define PPC_MSG_MIGRATE_TASK 2
73#endif
74#define PPC_MSG_DEBUGGER_BREAK 3 71#define PPC_MSG_DEBUGGER_BREAK 3
75 72
76void smp_init_iSeries(void); 73void smp_init_iSeries(void);
@@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void);
117 114
118extern struct smp_ops_t *smp_ops; 115extern struct smp_ops_t *smp_ops;
119 116
117extern void arch_send_call_function_single_ipi(int cpu);
118extern void arch_send_call_function_ipi(cpumask_t mask);
119
120#endif /* __ASSEMBLY__ */ 120#endif /* __ASSEMBLY__ */
121 121
122#endif /* __KERNEL__ */ 122#endif /* __KERNEL__ */