diff options
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 68 |
1 files changed, 57 insertions, 11 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 96fc7c0edc59..f362a8553777 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -135,7 +135,8 @@ void generic_smp_call_function_interrupt(void) | |||
| 135 | */ | 135 | */ |
| 136 | smp_wmb(); | 136 | smp_wmb(); |
| 137 | data->csd.flags &= ~CSD_FLAG_WAIT; | 137 | data->csd.flags &= ~CSD_FLAG_WAIT; |
| 138 | } else | 138 | } |
| 139 | if (data->csd.flags & CSD_FLAG_ALLOC) | ||
| 139 | call_rcu(&data->rcu_head, rcu_free_call_data); | 140 | call_rcu(&data->rcu_head, rcu_free_call_data); |
| 140 | } | 141 | } |
| 141 | rcu_read_unlock(); | 142 | rcu_read_unlock(); |
| @@ -209,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 209 | { | 210 | { |
| 210 | struct call_single_data d; | 211 | struct call_single_data d; |
| 211 | unsigned long flags; | 212 | unsigned long flags; |
| 212 | /* prevent preemption and reschedule on another processor */ | 213 | /* prevent preemption and reschedule on another processor, |
| 214 | as well as CPU removal */ | ||
| 213 | int me = get_cpu(); | 215 | int me = get_cpu(); |
| 216 | int err = 0; | ||
| 214 | 217 | ||
| 215 | /* Can deadlock when called with interrupts disabled */ | 218 | /* Can deadlock when called with interrupts disabled */ |
| 216 | WARN_ON(irqs_disabled()); | 219 | WARN_ON(irqs_disabled()); |
| @@ -219,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 219 | local_irq_save(flags); | 222 | local_irq_save(flags); |
| 220 | func(info); | 223 | func(info); |
| 221 | local_irq_restore(flags); | 224 | local_irq_restore(flags); |
| 222 | } else { | 225 | } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { |
| 223 | struct call_single_data *data = NULL; | 226 | struct call_single_data *data = NULL; |
| 224 | 227 | ||
| 225 | if (!wait) { | 228 | if (!wait) { |
| @@ -235,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 235 | data->func = func; | 238 | data->func = func; |
| 236 | data->info = info; | 239 | data->info = info; |
| 237 | generic_exec_single(cpu, data); | 240 | generic_exec_single(cpu, data); |
| 241 | } else { | ||
| 242 | err = -ENXIO; /* CPU not online */ | ||
| 238 | } | 243 | } |
| 239 | 244 | ||
| 240 | put_cpu(); | 245 | put_cpu(); |
| 241 | return 0; | 246 | return err; |
| 242 | } | 247 | } |
| 243 | EXPORT_SYMBOL(smp_call_function_single); | 248 | EXPORT_SYMBOL(smp_call_function_single); |
| 244 | 249 | ||
| @@ -260,6 +265,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
| 260 | generic_exec_single(cpu, data); | 265 | generic_exec_single(cpu, data); |
| 261 | } | 266 | } |
| 262 | 267 | ||
| 268 | /* Dummy function */ | ||
| 269 | static void quiesce_dummy(void *unused) | ||
| 270 | { | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * Ensure stack based data used in call function mask is safe to free. | ||
| 275 | * | ||
| 276 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
| 277 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
| 278 | * the data item on the queue at any time before it is deleted. So we need to | ||
| 279 | * ensure that all CPUs have transitioned through a quiescent state after | ||
| 280 | * this call. | ||
| 281 | * | ||
| 282 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
| 283 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
| 284 | * stack based data even in the case of synchronous calls. The stack based | ||
| 285 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
| 286 | * | ||
| 287 | * If a faster scheme can be made, we could go back to preferring stack based | ||
| 288 | * data -- the data allocation/free is non-zero cost. | ||
| 289 | */ | ||
| 290 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
| 291 | { | ||
| 292 | struct call_single_data data; | ||
| 293 | int cpu; | ||
| 294 | |||
| 295 | data.func = quiesce_dummy; | ||
| 296 | data.info = NULL; | ||
| 297 | |||
| 298 | for_each_cpu_mask(cpu, mask) { | ||
| 299 | data.flags = CSD_FLAG_WAIT; | ||
| 300 | generic_exec_single(cpu, &data); | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 263 | /** | 304 | /** |
| 264 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 305 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
| 265 | * @mask: The set of cpus to run on. | 306 | * @mask: The set of cpus to run on. |
| @@ -285,6 +326,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 285 | cpumask_t allbutself; | 326 | cpumask_t allbutself; |
| 286 | unsigned long flags; | 327 | unsigned long flags; |
| 287 | int cpu, num_cpus; | 328 | int cpu, num_cpus; |
| 329 | int slowpath = 0; | ||
| 288 | 330 | ||
| 289 | /* Can deadlock when called with interrupts disabled */ | 331 | /* Can deadlock when called with interrupts disabled */ |
| 290 | WARN_ON(irqs_disabled()); | 332 | WARN_ON(irqs_disabled()); |
| @@ -306,15 +348,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 306 | return smp_call_function_single(cpu, func, info, wait); | 348 | return smp_call_function_single(cpu, func, info, wait); |
| 307 | } | 349 | } |
| 308 | 350 | ||
| 309 | if (!wait) { | 351 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
| 310 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 352 | if (data) { |
| 311 | if (data) | 353 | data->csd.flags = CSD_FLAG_ALLOC; |
| 312 | data->csd.flags = CSD_FLAG_ALLOC; | 354 | if (wait) |
| 313 | } | 355 | data->csd.flags |= CSD_FLAG_WAIT; |
| 314 | if (!data) { | 356 | } else { |
| 315 | data = &d; | 357 | data = &d; |
| 316 | data->csd.flags = CSD_FLAG_WAIT; | 358 | data->csd.flags = CSD_FLAG_WAIT; |
| 317 | wait = 1; | 359 | wait = 1; |
| 360 | slowpath = 1; | ||
| 318 | } | 361 | } |
| 319 | 362 | ||
| 320 | spin_lock_init(&data->lock); | 363 | spin_lock_init(&data->lock); |
| @@ -331,8 +374,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 331 | arch_send_call_function_ipi(mask); | 374 | arch_send_call_function_ipi(mask); |
| 332 | 375 | ||
| 333 | /* optionally wait for the CPUs to complete */ | 376 | /* optionally wait for the CPUs to complete */ |
| 334 | if (wait) | 377 | if (wait) { |
| 335 | csd_flag_wait(&data->csd); | 378 | csd_flag_wait(&data->csd); |
| 379 | if (unlikely(slowpath)) | ||
| 380 | smp_call_function_mask_quiesce_stack(mask); | ||
| 381 | } | ||
| 336 | 382 | ||
| 337 | return 0; | 383 | return 0; |
| 338 | } | 384 | } |
