diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-11-17 17:27:27 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-11-18 08:52:25 -0500 |
commit | 2ea6dec4a22a6f66f6633876212fd4d195cf8277 (patch) | |
tree | f630c63a9e20fab5b31caa88368293a203103408 | |
parent | 72f279b256d520e321a850880d094bc0bcbf45d6 (diff) |
generic-ipi: Add smp_call_function_any()
Andrew points out that acpi-cpufreq uses cpumask_any, when it really
would prefer to use the same CPU if possible (to avoid an IPI). In
general, this seems a good idea to offer.
[ tglx: Documented selection preference and Inlined the UP case to
avoid the copy of smp_call_function_single() and the extra
EXPORT ]
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Zhao Yakui <yakui.zhao@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/smp.h | 11 | ||||
-rw-r--r-- | kernel/smp.c | 45 |
2 files changed, 55 insertions, 1 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h index 39c64bae776d..7a0570e6a596 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -76,6 +76,9 @@ void smp_call_function_many(const struct cpumask *mask, | |||
76 | void __smp_call_function_single(int cpuid, struct call_single_data *data, | 76 | void __smp_call_function_single(int cpuid, struct call_single_data *data, |
77 | int wait); | 77 | int wait); |
78 | 78 | ||
79 | int smp_call_function_any(const struct cpumask *mask, | ||
80 | void (*func)(void *info), void *info, int wait); | ||
81 | |||
79 | /* | 82 | /* |
80 | * Generic and arch helpers | 83 | * Generic and arch helpers |
81 | */ | 84 | */ |
@@ -137,9 +140,15 @@ static inline void smp_send_reschedule(int cpu) { } | |||
137 | #define smp_prepare_boot_cpu() do {} while (0) | 140 | #define smp_prepare_boot_cpu() do {} while (0) |
138 | #define smp_call_function_many(mask, func, info, wait) \ | 141 | #define smp_call_function_many(mask, func, info, wait) \ |
139 | (up_smp_call_function(func, info)) | 142 | (up_smp_call_function(func, info)) |
140 | static inline void init_call_single_data(void) | 143 | static inline void init_call_single_data(void) { } |
144 | |||
145 | static inline int | ||
146 | smp_call_function_any(const struct cpumask *mask, void (*func)(void *info), | ||
147 | void *info, int wait) | ||
141 | { | 148 | { |
149 | return smp_call_function_single(0, func, info, wait); | ||
142 | } | 150 | } |
151 | |||
143 | #endif /* !SMP */ | 152 | #endif /* !SMP */ |
144 | 153 | ||
145 | /* | 154 | /* |
diff --git a/kernel/smp.c b/kernel/smp.c index 8bd618f0364d..a8c76069cf50 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -319,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
319 | } | 319 | } |
320 | EXPORT_SYMBOL(smp_call_function_single); | 320 | EXPORT_SYMBOL(smp_call_function_single); |
321 | 321 | ||
322 | /* | ||
323 | * smp_call_function_any - Run a function on any of the given cpus | ||
324 | * @mask: The mask of cpus it can run on. | ||
325 | * @func: The function to run. This must be fast and non-blocking. | ||
326 | * @info: An arbitrary pointer to pass to the function. | ||
327 | * @wait: If true, wait until function has completed. | ||
328 | * | ||
329 | * Returns 0 on success, else a negative status code (if no cpus were online). | ||
330 | * Note that @wait will be implicitly turned on in case of allocation failures, | ||
331 | * since we fall back to on-stack allocation. | ||
332 | * | ||
333 | * Selection preference: | ||
334 | * 1) current cpu if in @mask | ||
335 | * 2) any cpu of current node if in @mask | ||
336 | * 3) any other online cpu in @mask | ||
337 | */ | ||
338 | int smp_call_function_any(const struct cpumask *mask, | ||
339 | void (*func)(void *info), void *info, int wait) | ||
340 | { | ||
341 | unsigned int cpu; | ||
342 | const struct cpumask *nodemask; | ||
343 | int ret; | ||
344 | |||
345 | /* Try for same CPU (cheapest) */ | ||
346 | cpu = get_cpu(); | ||
347 | if (cpumask_test_cpu(cpu, mask)) | ||
348 | goto call; | ||
349 | |||
350 | /* Try for same node. */ | ||
351 | nodemask = cpumask_of_node(cpu); | ||
352 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | ||
353 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | ||
354 | if (cpu_online(cpu)) | ||
355 | goto call; | ||
356 | } | ||
357 | |||
358 | /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ | ||
359 | cpu = cpumask_any_and(mask, cpu_online_mask); | ||
360 | call: | ||
361 | ret = smp_call_function_single(cpu, func, info, wait); | ||
362 | put_cpu(); | ||
363 | return ret; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | ||
366 | |||
322 | /** | 367 | /** |
323 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on another CPU |
324 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |