diff options
| author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-06 12:00:33 -0500 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-06 12:00:33 -0500 |
| commit | 3d14b5beba35250c548d3851a2b84fce742d8311 (patch) | |
| tree | 065e3d93c3fcbc5ee4c44fa78662393cddbdf6de /kernel/smp.c | |
| parent | 0719dc341389882cc834ed18fc9b7fc6006b2b85 (diff) | |
| parent | 1bf8e6219552d5dd27012d567ec8c4bb9c2d86b4 (diff) | |
Merge branch 'sa1100' into devel
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 56 |
1 files changed, 48 insertions, 8 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index c9d1c7835c2f..a8c76069cf50 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data); | |||
| 265 | * @info: An arbitrary pointer to pass to the function. | 265 | * @info: An arbitrary pointer to pass to the function. |
| 266 | * @wait: If true, wait until function has completed on other CPUs. | 266 | * @wait: If true, wait until function has completed on other CPUs. |
| 267 | * | 267 | * |
| 268 | * Returns 0 on success, else a negative status code. Note that @wait | 268 | * Returns 0 on success, else a negative status code. |
| 269 | * will be implicitly turned on in case of allocation failures, since | ||
| 270 | * we fall back to on-stack allocation. | ||
| 271 | */ | 269 | */ |
| 272 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 270 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
| 273 | int wait) | 271 | int wait) |
| @@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 321 | } | 319 | } |
| 322 | EXPORT_SYMBOL(smp_call_function_single); | 320 | EXPORT_SYMBOL(smp_call_function_single); |
| 323 | 321 | ||
| 322 | /* | ||
| 323 | * smp_call_function_any - Run a function on any of the given cpus | ||
| 324 | * @mask: The mask of cpus it can run on. | ||
| 325 | * @func: The function to run. This must be fast and non-blocking. | ||
| 326 | * @info: An arbitrary pointer to pass to the function. | ||
| 327 | * @wait: If true, wait until function has completed. | ||
| 328 | * | ||
| 329 | * Returns 0 on success, else a negative status code (if no cpus were online). | ||
| 330 | * Note that @wait will be implicitly turned on in case of allocation failures, | ||
| 331 | * since we fall back to on-stack allocation. | ||
| 332 | * | ||
| 333 | * Selection preference: | ||
| 334 | * 1) current cpu if in @mask | ||
| 335 | * 2) any cpu of current node if in @mask | ||
| 336 | * 3) any other online cpu in @mask | ||
| 337 | */ | ||
| 338 | int smp_call_function_any(const struct cpumask *mask, | ||
| 339 | void (*func)(void *info), void *info, int wait) | ||
| 340 | { | ||
| 341 | unsigned int cpu; | ||
| 342 | const struct cpumask *nodemask; | ||
| 343 | int ret; | ||
| 344 | |||
| 345 | /* Try for same CPU (cheapest) */ | ||
| 346 | cpu = get_cpu(); | ||
| 347 | if (cpumask_test_cpu(cpu, mask)) | ||
| 348 | goto call; | ||
| 349 | |||
| 350 | /* Try for same node. */ | ||
| 351 | nodemask = cpumask_of_node(cpu); | ||
| 352 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | ||
| 353 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | ||
| 354 | if (cpu_online(cpu)) | ||
| 355 | goto call; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ | ||
| 359 | cpu = cpumask_any_and(mask, cpu_online_mask); | ||
| 360 | call: | ||
| 361 | ret = smp_call_function_single(cpu, func, info, wait); | ||
| 362 | put_cpu(); | ||
| 363 | return ret; | ||
| 364 | } | ||
| 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | ||
| 366 | |||
| 324 | /** | 367 | /** |
| 325 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on another CPU |
| 326 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
| @@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 355 | * @wait: If true, wait (atomically) until function has completed | 398 | * @wait: If true, wait (atomically) until function has completed |
| 356 | * on other CPUs. | 399 | * on other CPUs. |
| 357 | * | 400 | * |
| 358 | * If @wait is true, then returns once @func has returned. Note that @wait | 401 | * If @wait is true, then returns once @func has returned. |
| 359 | * will be implicitly turned on in case of allocation failures, since | ||
| 360 | * we fall back to on-stack allocation. | ||
| 361 | * | 402 | * |
| 362 | * You must not call this function with disabled interrupts or from a | 403 | * You must not call this function with disabled interrupts or from a |
| 363 | * hardware interrupt handler or from a bottom half handler. Preemption | 404 | * hardware interrupt handler or from a bottom half handler. Preemption |
| @@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many); | |||
| 443 | * Returns 0. | 484 | * Returns 0. |
| 444 | * | 485 | * |
| 445 | * If @wait is true, then returns once @func has returned; otherwise | 486 | * If @wait is true, then returns once @func has returned; otherwise |
| 446 | * it returns just before the target cpu calls @func. In case of allocation | 487 | * it returns just before the target cpu calls @func. |
| 447 | * failure, @wait will be implicitly turned on. | ||
| 448 | * | 488 | * |
| 449 | * You must not call this function with disabled interrupts or from a | 489 | * You must not call this function with disabled interrupts or from a |
| 450 | * hardware interrupt handler or from a bottom half handler. | 490 | * hardware interrupt handler or from a bottom half handler. |
