diff options
author | Gilad Ben-Yossef <gilad@benyossef.com> | 2012-03-28 17:42:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 20:14:35 -0400 |
commit | b3a7e98e024ffa9f7e4554dd720c508015c4a831 (patch) | |
tree | 50c09e1a47418ba4bb55b11f756bf4d99cf76123 | |
parent | 3fc498f165304dc913f1d13b5ac9ab4c758ee7ab (diff) |
smp: add func to IPI cpus based on parameter func
Add the on_each_cpu_cond() function that wraps on_each_cpu_mask() and
calculates the cpumask of cpus to IPI by calling a function supplied as a
parameter in order to determine whether to IPI each specific cpu.
The function works around allocation failure of cpumask variable in
CONFIG_CPUMASK_OFFSTACK=y by itereating over cpus sending an IPI a time
via smp_call_function_single().
The function is useful since it allows to seperate the specific code that
decided in each case whether to IPI a specific cpu for a specific request
from the common boilerplate code of handling creating the mask, handling
failures etc.
[akpm@linux-foundation.org: s/gfpflags/gfp_flags/]
[akpm@linux-foundation.org: avoid double-evaluation of `info' (per Michal), parenthesise evaluation of `cond_func']
[akpm@linux-foundation.org: s/CPU/CPUs, use all 80 cols in comment]
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Reviewed-by: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/smp.h | 24 | ||||
-rw-r--r-- | kernel/smp.c | 61 |
2 files changed, 85 insertions, 0 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h index d0adb7898d54..10530d92c04b 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -109,6 +109,15 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | |||
109 | void *info, bool wait); | 109 | void *info, bool wait); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Call a function on each processor for which the supplied function | ||
113 | * cond_func returns a positive value. This may include the local | ||
114 | * processor. | ||
115 | */ | ||
116 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
117 | smp_call_func_t func, void *info, bool wait, | ||
118 | gfp_t gfp_flags); | ||
119 | |||
120 | /* | ||
112 | * Mark the boot cpu "online" so that it can call console drivers in | 121 | * Mark the boot cpu "online" so that it can call console drivers in |
113 | * printk() and can access its per-cpu storage. | 122 | * printk() and can access its per-cpu storage. |
114 | */ | 123 | */ |
@@ -153,6 +162,21 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) | |||
153 | local_irq_enable(); \ | 162 | local_irq_enable(); \ |
154 | } \ | 163 | } \ |
155 | } while (0) | 164 | } while (0) |
165 | /* | ||
166 | * Preemption is disabled here to make sure the cond_func is called under the | ||
167 | * same condtions in UP and SMP. | ||
168 | */ | ||
169 | #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ | ||
170 | do { \ | ||
171 | void *__info = (info); \ | ||
172 | preempt_disable(); \ | ||
173 | if ((cond_func)(0, __info)) { \ | ||
174 | local_irq_disable(); \ | ||
175 | (func)(__info); \ | ||
176 | local_irq_enable(); \ | ||
177 | } \ | ||
178 | preempt_enable(); \ | ||
179 | } while (0) | ||
156 | 180 | ||
157 | static inline void smp_send_reschedule(int cpu) { } | 181 | static inline void smp_send_reschedule(int cpu) { } |
158 | #define num_booting_cpus() 1 | 182 | #define num_booting_cpus() 1 |
diff --git a/kernel/smp.c b/kernel/smp.c index a081e6ce0e0a..2f8b10ecf759 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -730,3 +730,64 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | |||
730 | put_cpu(); | 730 | put_cpu(); |
731 | } | 731 | } |
732 | EXPORT_SYMBOL(on_each_cpu_mask); | 732 | EXPORT_SYMBOL(on_each_cpu_mask); |
733 | |||
734 | /* | ||
735 | * on_each_cpu_cond(): Call a function on each processor for which | ||
736 | * the supplied function cond_func returns true, optionally waiting | ||
737 | * for all the required CPUs to finish. This may include the local | ||
738 | * processor. | ||
739 | * @cond_func: A callback function that is passed a cpu id and | ||
740 | * the the info parameter. The function is called | ||
741 | * with preemption disabled. The function should | ||
742 | * return a blooean value indicating whether to IPI | ||
743 | * the specified CPU. | ||
744 | * @func: The function to run on all applicable CPUs. | ||
745 | * This must be fast and non-blocking. | ||
746 | * @info: An arbitrary pointer to pass to both functions. | ||
747 | * @wait: If true, wait (atomically) until function has | ||
748 | * completed on other CPUs. | ||
749 | * @gfp_flags: GFP flags to use when allocating the cpumask | ||
750 | * used internally by the function. | ||
751 | * | ||
752 | * The function might sleep if the GFP flags indicates a non | ||
753 | * atomic allocation is allowed. | ||
754 | * | ||
755 | * Preemption is disabled to protect against CPUs going offline but not online. | ||
756 | * CPUs going online during the call will not be seen or sent an IPI. | ||
757 | * | ||
758 | * You must not call this function with disabled interrupts or | ||
759 | * from a hardware interrupt handler or from a bottom half handler. | ||
760 | */ | ||
761 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
762 | smp_call_func_t func, void *info, bool wait, | ||
763 | gfp_t gfp_flags) | ||
764 | { | ||
765 | cpumask_var_t cpus; | ||
766 | int cpu, ret; | ||
767 | |||
768 | might_sleep_if(gfp_flags & __GFP_WAIT); | ||
769 | |||
770 | if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { | ||
771 | preempt_disable(); | ||
772 | for_each_online_cpu(cpu) | ||
773 | if (cond_func(cpu, info)) | ||
774 | cpumask_set_cpu(cpu, cpus); | ||
775 | on_each_cpu_mask(cpus, func, info, wait); | ||
776 | preempt_enable(); | ||
777 | free_cpumask_var(cpus); | ||
778 | } else { | ||
779 | /* | ||
780 | * No free cpumask, bother. No matter, we'll | ||
781 | * just have to IPI them one by one. | ||
782 | */ | ||
783 | preempt_disable(); | ||
784 | for_each_online_cpu(cpu) | ||
785 | if (cond_func(cpu, info)) { | ||
786 | ret = smp_call_function_single(cpu, func, | ||
787 | info, wait); | ||
788 | WARN_ON_ONCE(!ret); | ||
789 | } | ||
790 | preempt_enable(); | ||
791 | } | ||
792 | } | ||
793 | EXPORT_SYMBOL(on_each_cpu_cond); | ||