aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c54
1 files changed, 47 insertions, 7 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 96fc7c0edc59..e6084f6efb4d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
260 generic_exec_single(cpu, data); 260 generic_exec_single(cpu, data);
261} 261}
262 262
263/* Dummy function */
264static void quiesce_dummy(void *unused)
265{
266}
267
268/*
269 * Ensure stack based data used in call function mask is safe to free.
270 *
271 * This is needed by smp_call_function_mask when using on-stack data, because
272 * a single call function queue is shared by all CPUs, and any CPU may pick up
273 * the data item on the queue at any time before it is deleted. So we need to
274 * ensure that all CPUs have transitioned through a quiescent state after
275 * this call.
276 *
277 * This is a very slow function, implemented by sending synchronous IPIs to
278 * all possible CPUs. For this reason, we have to alloc data rather than use
279 * stack based data even in the case of synchronous calls. The stack based
280 * data is then just used for deadlock/oom fallback which will be very rare.
281 *
282 * If a faster scheme can be made, we could go back to preferring stack based
283 * data -- the data allocation/free is non-zero cost.
284 */
285static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
286{
287 struct call_single_data data;
288 int cpu;
289
290 data.func = quiesce_dummy;
291 data.info = NULL;
292 data.flags = CSD_FLAG_WAIT;
293
294 for_each_cpu_mask(cpu, mask)
295 generic_exec_single(cpu, &data);
296}
297
263/** 298/**
264 * smp_call_function_mask(): Run a function on a set of other CPUs. 299 * smp_call_function_mask(): Run a function on a set of other CPUs.
265 * @mask: The set of cpus to run on. 300 * @mask: The set of cpus to run on.
@@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
285 cpumask_t allbutself; 320 cpumask_t allbutself;
286 unsigned long flags; 321 unsigned long flags;
287 int cpu, num_cpus; 322 int cpu, num_cpus;
323 int slowpath = 0;
288 324
289 /* Can deadlock when called with interrupts disabled */ 325 /* Can deadlock when called with interrupts disabled */
290 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
@@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
306 return smp_call_function_single(cpu, func, info, wait); 342 return smp_call_function_single(cpu, func, info, wait);
307 } 343 }
308 344
309 if (!wait) { 345 data = kmalloc(sizeof(*data), GFP_ATOMIC);
310 data = kmalloc(sizeof(*data), GFP_ATOMIC); 346 if (data) {
311 if (data) 347 data->csd.flags = CSD_FLAG_ALLOC;
312 data->csd.flags = CSD_FLAG_ALLOC; 348 if (wait)
313 } 349 data->csd.flags |= CSD_FLAG_WAIT;
314 if (!data) { 350 } else {
315 data = &d; 351 data = &d;
316 data->csd.flags = CSD_FLAG_WAIT; 352 data->csd.flags = CSD_FLAG_WAIT;
317 wait = 1; 353 wait = 1;
354 slowpath = 1;
318 } 355 }
319 356
320 spin_lock_init(&data->lock); 357 spin_lock_init(&data->lock);
@@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
331 arch_send_call_function_ipi(mask); 368 arch_send_call_function_ipi(mask);
332 369
333 /* optionally wait for the CPUs to complete */ 370 /* optionally wait for the CPUs to complete */
334 if (wait) 371 if (wait) {
335 csd_flag_wait(&data->csd); 372 csd_flag_wait(&data->csd);
373 if (unlikely(slowpath))
374 smp_call_function_mask_quiesce_stack(allbutself);
375 }
336 376
337 return 0; 377 return 0;
338} 378}