diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 58 |
1 files changed, 50 insertions, 8 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 96fc7c0edc59..782e2b93e465 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -135,7 +135,8 @@ void generic_smp_call_function_interrupt(void) | |||
135 | */ | 135 | */ |
136 | smp_wmb(); | 136 | smp_wmb(); |
137 | data->csd.flags &= ~CSD_FLAG_WAIT; | 137 | data->csd.flags &= ~CSD_FLAG_WAIT; |
138 | } else | 138 | } |
139 | if (data->csd.flags & CSD_FLAG_ALLOC) | ||
139 | call_rcu(&data->rcu_head, rcu_free_call_data); | 140 | call_rcu(&data->rcu_head, rcu_free_call_data); |
140 | } | 141 | } |
141 | rcu_read_unlock(); | 142 | rcu_read_unlock(); |
@@ -260,6 +261,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
260 | generic_exec_single(cpu, data); | 261 | generic_exec_single(cpu, data); |
261 | } | 262 | } |
262 | 263 | ||
264 | /* Dummy function */ | ||
265 | static void quiesce_dummy(void *unused) | ||
266 | { | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Ensure stack based data used in call function mask is safe to free. | ||
271 | * | ||
272 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
273 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
274 | * the data item on the queue at any time before it is deleted. So we need to | ||
275 | * ensure that all CPUs have transitioned through a quiescent state after | ||
276 | * this call. | ||
277 | * | ||
278 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
279 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
280 | * stack based data even in the case of synchronous calls. The stack based | ||
281 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
282 | * | ||
283 | * If a faster scheme can be made, we could go back to preferring stack based | ||
284 | * data -- the data allocation/free is non-zero cost. | ||
285 | */ | ||
286 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
287 | { | ||
288 | struct call_single_data data; | ||
289 | int cpu; | ||
290 | |||
291 | data.func = quiesce_dummy; | ||
292 | data.info = NULL; | ||
293 | |||
294 | for_each_cpu_mask(cpu, mask) { | ||
295 | data.flags = CSD_FLAG_WAIT; | ||
296 | generic_exec_single(cpu, &data); | ||
297 | } | ||
298 | } | ||
299 | |||
263 | /** | 300 | /** |
264 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 301 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
265 | * @mask: The set of cpus to run on. | 302 | * @mask: The set of cpus to run on. |
@@ -285,6 +322,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
285 | cpumask_t allbutself; | 322 | cpumask_t allbutself; |
286 | unsigned long flags; | 323 | unsigned long flags; |
287 | int cpu, num_cpus; | 324 | int cpu, num_cpus; |
325 | int slowpath = 0; | ||
288 | 326 | ||
289 | /* Can deadlock when called with interrupts disabled */ | 327 | /* Can deadlock when called with interrupts disabled */ |
290 | WARN_ON(irqs_disabled()); | 328 | WARN_ON(irqs_disabled()); |
@@ -306,15 +344,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
306 | return smp_call_function_single(cpu, func, info, wait); | 344 | return smp_call_function_single(cpu, func, info, wait); |
307 | } | 345 | } |
308 | 346 | ||
309 | if (!wait) { | 347 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
310 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 348 | if (data) { |
311 | if (data) | 349 | data->csd.flags = CSD_FLAG_ALLOC; |
312 | data->csd.flags = CSD_FLAG_ALLOC; | 350 | if (wait) |
313 | } | 351 | data->csd.flags |= CSD_FLAG_WAIT; |
314 | if (!data) { | 352 | } else { |
315 | data = &d; | 353 | data = &d; |
316 | data->csd.flags = CSD_FLAG_WAIT; | 354 | data->csd.flags = CSD_FLAG_WAIT; |
317 | wait = 1; | 355 | wait = 1; |
356 | slowpath = 1; | ||
318 | } | 357 | } |
319 | 358 | ||
320 | spin_lock_init(&data->lock); | 359 | spin_lock_init(&data->lock); |
@@ -331,8 +370,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
331 | arch_send_call_function_ipi(mask); | 370 | arch_send_call_function_ipi(mask); |
332 | 371 | ||
333 | /* optionally wait for the CPUs to complete */ | 372 | /* optionally wait for the CPUs to complete */ |
334 | if (wait) | 373 | if (wait) { |
335 | csd_flag_wait(&data->csd); | 374 | csd_flag_wait(&data->csd); |
375 | if (unlikely(slowpath)) | ||
376 | smp_call_function_mask_quiesce_stack(mask); | ||
377 | } | ||
336 | 378 | ||
337 | return 0; | 379 | return 0; |
338 | } | 380 | } |