aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorwill schmidt <will_schmidt@vnet.ibm.com>2007-05-02 13:12:34 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-07 06:31:13 -0400
commit44755d11a3c054adf7eb974a4720936563cf7dcf (patch)
tree933431e24ce2db69e2857e1daa7c4ed1c7bdbf86 /arch/powerpc/kernel/smp.c
parente9e77ce8718def7838626aa52bed02fe1b9837b9 (diff)
[POWERPC] Add smp_call_function_map and smp_call_function_single
Add a new function named smp_call_function_single(). This matches a generic prototype from include/linux/smp.h. Add a function smp_call_function_map(). This is, for the most part, a rename of smp_call_function, with some added cpumask support. smp_call_function and smp_call_function_single call into smp_call_function_map. Lightly tested on 970mp (blade), power4 and power5. Signed-off-by: Will Schmidt <will_schmidt@vnet.ibm.com> cc: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c73
1 files changed, 52 insertions, 21 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d8e503b2e1af..22f1ef1b3100 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -176,10 +176,10 @@ static struct call_data_struct {
176#define SMP_CALL_TIMEOUT 8 176#define SMP_CALL_TIMEOUT 8
177 177
178/* 178/*
179 * This function sends a 'generic call function' IPI to all other CPUs 179 * These functions send a 'generic call function' IPI to other online
180 * in the system. 180 * CPUS in the system.
181 * 181 *
182 * [SUMMARY] Run a function on all other CPUs. 182 * [SUMMARY] Run a function on other CPUs.
183 * <func> The function to run. This must be fast and non-blocking. 183 * <func> The function to run. This must be fast and non-blocking.
184 * <info> An arbitrary pointer to pass to the function. 184 * <info> An arbitrary pointer to pass to the function.
185 * <nonatomic> currently unused. 185 * <nonatomic> currently unused.
@@ -190,18 +190,26 @@ static struct call_data_struct {
190 * You must not call this function with disabled interrupts or from a 190 * You must not call this function with disabled interrupts or from a
191 * hardware interrupt handler or from a bottom half handler. 191 * hardware interrupt handler or from a bottom half handler.
192 */ 192 */
193int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 193int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
194 int wait) 194 int wait, cpumask_t map)
195{ 195{
196 struct call_data_struct data; 196 struct call_data_struct data;
197 int ret = -1, cpus; 197 int ret = -1, num_cpus;
198 int cpu;
198 u64 timeout; 199 u64 timeout;
199 200
200 /* Can deadlock when called with interrupts disabled */ 201 /* Can deadlock when called with interrupts disabled */
201 WARN_ON(irqs_disabled()); 202 WARN_ON(irqs_disabled());
202 203
204 /* remove 'self' from the map */
205 if (cpu_isset(smp_processor_id(), map))
206 cpu_clear(smp_processor_id(), map);
207
208 /* sanity check the map, remove any non-online processors. */
209 cpus_and(map, map, cpu_online_map);
210
203 if (unlikely(smp_ops == NULL)) 211 if (unlikely(smp_ops == NULL))
204 return -1; 212 return ret;
205 213
206 data.func = func; 214 data.func = func;
207 data.info = info; 215 data.info = info;
@@ -213,40 +221,42 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
213 spin_lock(&call_lock); 221 spin_lock(&call_lock);
214 /* Must grab online cpu count with preempt disabled, otherwise 222 /* Must grab online cpu count with preempt disabled, otherwise
215 * it can change. */ 223 * it can change. */
216 cpus = num_online_cpus() - 1; 224 num_cpus = num_online_cpus() - 1;
217 if (!cpus) { 225 if (!num_cpus || cpus_empty(map)) {
218 ret = 0; 226 ret = 0;
219 goto out; 227 goto out;
220 } 228 }
221 229
222 call_data = &data; 230 call_data = &data;
223 smp_wmb(); 231 smp_wmb();
224 /* Send a message to all other CPUs and wait for them to respond */ 232 /* Send a message to all CPUs in the map */
225 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); 233 for_each_cpu_mask(cpu, map)
234 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
226 235
227 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; 236 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
228 237
229 /* Wait for response */ 238 /* Wait for indication that they have received the message */
230 while (atomic_read(&data.started) != cpus) { 239 while (atomic_read(&data.started) != num_cpus) {
231 HMT_low(); 240 HMT_low();
232 if (get_tb() >= timeout) { 241 if (get_tb() >= timeout) {
233 printk("smp_call_function on cpu %d: other cpus not " 242 printk("smp_call_function on cpu %d: other cpus not "
234 "responding (%d)\n", smp_processor_id(), 243 "responding (%d)\n", smp_processor_id(),
235 atomic_read(&data.started)); 244 atomic_read(&data.started));
236 debugger(NULL); 245 debugger(NULL);
237 goto out; 246 goto out;
238 } 247 }
239 } 248 }
240 249
250 /* optionally wait for the CPUs to complete */
241 if (wait) { 251 if (wait) {
242 while (atomic_read(&data.finished) != cpus) { 252 while (atomic_read(&data.finished) != num_cpus) {
243 HMT_low(); 253 HMT_low();
244 if (get_tb() >= timeout) { 254 if (get_tb() >= timeout) {
245 printk("smp_call_function on cpu %d: other " 255 printk("smp_call_function on cpu %d: other "
246 "cpus not finishing (%d/%d)\n", 256 "cpus not finishing (%d/%d)\n",
247 smp_processor_id(), 257 smp_processor_id(),
248 atomic_read(&data.finished), 258 atomic_read(&data.finished),
249 atomic_read(&data.started)); 259 atomic_read(&data.started));
250 debugger(NULL); 260 debugger(NULL);
251 goto out; 261 goto out;
252 } 262 }
@@ -262,8 +272,29 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
262 return ret; 272 return ret;
263} 273}
264 274
275int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
276 int wait)
277{
278 return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
279}
265EXPORT_SYMBOL(smp_call_function); 280EXPORT_SYMBOL(smp_call_function);
266 281
282int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
283 int wait)
284{
285 cpumask_t map=CPU_MASK_NONE;
286
287 if (!cpu_online(cpu))
288 return -EINVAL;
289
290 if (cpu == smp_processor_id())
291 return -EBUSY;
292
293 cpu_set(cpu, map);
294 return smp_call_function_map(func,info,nonatomic,wait,map);
295}
296EXPORT_SYMBOL(smp_call_function_single);
297
267void smp_call_function_interrupt(void) 298void smp_call_function_interrupt(void)
268{ 299{
269 void (*func) (void *info); 300 void (*func) (void *info);