aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/smp.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-10 14:52:59 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:58 -0400
commit490f5de52a87063fcb40e3b22f61b0779603ff6d (patch)
tree8decf32351fcd840a743d374c0cb7f924bcd9dea /arch/sh/kernel/smp.c
parentdbcf4787d816a4694ec83b5fde1a947c3ce74d57 (diff)
sh: convert to generic helpers for IPI function calls
This converts sh to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Not tested, but it compiles. Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/sh/kernel/smp.c')
-rw-r--r--arch/sh/kernel/smp.c48
1 files changed, 8 insertions, 40 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d168f57..2ed8dceb297b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
36cpumask_t cpu_online_map; 36cpumask_t cpu_online_map;
37EXPORT_SYMBOL(cpu_online_map); 37EXPORT_SYMBOL(cpu_online_map);
38 38
39static atomic_t cpus_booted = ATOMIC_INIT(0);
40
41/*
42 * Run specified function on a particular processor.
43 */
44void __smp_call_function(unsigned int cpu);
45
46static inline void __init smp_store_cpu_info(unsigned int cpu) 39static inline void __init smp_store_cpu_info(unsigned int cpu)
47{ 40{
48 struct sh_cpuinfo *c = cpu_data + cpu; 41 struct sh_cpuinfo *c = cpu_data + cpu;
@@ -178,42 +171,17 @@ void smp_send_stop(void)
178 smp_call_function(stop_this_cpu, 0, 1, 0); 171 smp_call_function(stop_this_cpu, 0, 1, 0);
179} 172}
180 173
181struct smp_fn_call_struct smp_fn_call = { 174void arch_send_call_function_ipi(cpumask_t mask)
182 .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
183 .finished = ATOMIC_INIT(0),
184};
185
186/*
187 * The caller of this wants the passed function to run on every cpu. If wait
188 * is set, wait until all cpus have finished the function before returning.
189 * The lock is here to protect the call structure.
190 * You must not call this function with disabled interrupts or from a
191 * hardware interrupt handler or from a bottom half handler.
192 */
193int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
194{ 175{
195 unsigned int nr_cpus = atomic_read(&cpus_booted); 176 int cpu;
196 int i;
197
198 /* Can deadlock when called with interrupts disabled */
199 WARN_ON(irqs_disabled());
200
201 spin_lock(&smp_fn_call.lock);
202
203 atomic_set(&smp_fn_call.finished, 0);
204 smp_fn_call.fn = func;
205 smp_fn_call.data = info;
206
207 for (i = 0; i < nr_cpus; i++)
208 if (i != smp_processor_id())
209 plat_send_ipi(i, SMP_MSG_FUNCTION);
210
211 if (wait)
212 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
213 177
214 spin_unlock(&smp_fn_call.lock); 178 for_each_cpu_mask(cpu, mask)
179 plat_send_ipi(cpu, SMP_MSG_FUNCTION);
180}
215 181
216 return 0; 182void arch_send_call_function_single_ipi(int cpu)
183{
184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
217} 185}
218 186
219/* Not really SMP stuff ... */ 187/* Not really SMP stuff ... */