diff options
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 62 | 
1 files changed, 53 insertions, 9 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 462c785ca1ee..782e2b93e465 100644 --- a/kernel/smp.c +++ b/kernel/smp.c  | |||
| @@ -33,7 +33,7 @@ struct call_single_queue { | |||
| 33 | spinlock_t lock; | 33 | spinlock_t lock; | 
| 34 | }; | 34 | }; | 
| 35 | 35 | ||
| 36 | void __cpuinit init_call_single_data(void) | 36 | static int __cpuinit init_call_single_data(void) | 
| 37 | { | 37 | { | 
| 38 | int i; | 38 | int i; | 
| 39 | 39 | ||
| @@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void) | |||
| 43 | spin_lock_init(&q->lock); | 43 | spin_lock_init(&q->lock); | 
| 44 | INIT_LIST_HEAD(&q->list); | 44 | INIT_LIST_HEAD(&q->list); | 
| 45 | } | 45 | } | 
| 46 | return 0; | ||
| 46 | } | 47 | } | 
| 48 | early_initcall(init_call_single_data); | ||
| 47 | 49 | ||
| 48 | static void csd_flag_wait(struct call_single_data *data) | 50 | static void csd_flag_wait(struct call_single_data *data) | 
| 49 | { | 51 | { | 
| @@ -133,7 +135,8 @@ void generic_smp_call_function_interrupt(void) | |||
| 133 | */ | 135 | */ | 
| 134 | smp_wmb(); | 136 | smp_wmb(); | 
| 135 | data->csd.flags &= ~CSD_FLAG_WAIT; | 137 | data->csd.flags &= ~CSD_FLAG_WAIT; | 
| 136 | } else | 138 | } | 
| 139 | if (data->csd.flags & CSD_FLAG_ALLOC) | ||
| 137 | call_rcu(&data->rcu_head, rcu_free_call_data); | 140 | call_rcu(&data->rcu_head, rcu_free_call_data); | 
| 138 | } | 141 | } | 
| 139 | rcu_read_unlock(); | 142 | rcu_read_unlock(); | 
| @@ -258,6 +261,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
| 258 | generic_exec_single(cpu, data); | 261 | generic_exec_single(cpu, data); | 
| 259 | } | 262 | } | 
| 260 | 263 | ||
| 264 | /* Dummy function */ | ||
| 265 | static void quiesce_dummy(void *unused) | ||
| 266 | { | ||
| 267 | } | ||
| 268 | |||
| 269 | /* | ||
| 270 | * Ensure stack based data used in call function mask is safe to free. | ||
| 271 | * | ||
| 272 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
| 273 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
| 274 | * the data item on the queue at any time before it is deleted. So we need to | ||
| 275 | * ensure that all CPUs have transitioned through a quiescent state after | ||
| 276 | * this call. | ||
| 277 | * | ||
| 278 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
| 279 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
| 280 | * stack based data even in the case of synchronous calls. The stack based | ||
| 281 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
| 282 | * | ||
| 283 | * If a faster scheme can be made, we could go back to preferring stack based | ||
| 284 | * data -- the data allocation/free is non-zero cost. | ||
| 285 | */ | ||
| 286 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
| 287 | { | ||
| 288 | struct call_single_data data; | ||
| 289 | int cpu; | ||
| 290 | |||
| 291 | data.func = quiesce_dummy; | ||
| 292 | data.info = NULL; | ||
| 293 | |||
| 294 | for_each_cpu_mask(cpu, mask) { | ||
| 295 | data.flags = CSD_FLAG_WAIT; | ||
| 296 | generic_exec_single(cpu, &data); | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 261 | /** | 300 | /** | 
| 262 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 301 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 
| 263 | * @mask: The set of cpus to run on. | 302 | * @mask: The set of cpus to run on. | 
| @@ -283,6 +322,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 283 | cpumask_t allbutself; | 322 | cpumask_t allbutself; | 
| 284 | unsigned long flags; | 323 | unsigned long flags; | 
| 285 | int cpu, num_cpus; | 324 | int cpu, num_cpus; | 
| 325 | int slowpath = 0; | ||
| 286 | 326 | ||
| 287 | /* Can deadlock when called with interrupts disabled */ | 327 | /* Can deadlock when called with interrupts disabled */ | 
| 288 | WARN_ON(irqs_disabled()); | 328 | WARN_ON(irqs_disabled()); | 
| @@ -304,15 +344,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 304 | return smp_call_function_single(cpu, func, info, wait); | 344 | return smp_call_function_single(cpu, func, info, wait); | 
| 305 | } | 345 | } | 
| 306 | 346 | ||
| 307 | if (!wait) { | 347 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 
| 308 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 348 | if (data) { | 
| 309 | if (data) | 349 | data->csd.flags = CSD_FLAG_ALLOC; | 
| 310 | data->csd.flags = CSD_FLAG_ALLOC; | 350 | if (wait) | 
| 311 | } | 351 | data->csd.flags |= CSD_FLAG_WAIT; | 
| 312 | if (!data) { | 352 | } else { | 
| 313 | data = &d; | 353 | data = &d; | 
| 314 | data->csd.flags = CSD_FLAG_WAIT; | 354 | data->csd.flags = CSD_FLAG_WAIT; | 
| 315 | wait = 1; | 355 | wait = 1; | 
| 356 | slowpath = 1; | ||
| 316 | } | 357 | } | 
| 317 | 358 | ||
| 318 | spin_lock_init(&data->lock); | 359 | spin_lock_init(&data->lock); | 
| @@ -329,8 +370,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 329 | arch_send_call_function_ipi(mask); | 370 | arch_send_call_function_ipi(mask); | 
| 330 | 371 | ||
| 331 | /* optionally wait for the CPUs to complete */ | 372 | /* optionally wait for the CPUs to complete */ | 
| 332 | if (wait) | 373 | if (wait) { | 
| 333 | csd_flag_wait(&data->csd); | 374 | csd_flag_wait(&data->csd); | 
| 375 | if (unlikely(slowpath)) | ||
| 376 | smp_call_function_mask_quiesce_stack(mask); | ||
| 377 | } | ||
| 334 | 378 | ||
| 335 | return 0; | 379 | return 0; | 
| 336 | } | 380 | } | 
