diff options
author | David Daney <david.daney@cavium.com> | 2013-09-11 17:23:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:58:23 -0400 |
commit | fa688207c9db48b64ab6538abc3fcdf26110b9ec (patch) | |
tree | 47fff6ebaa5b0b7d3feca64010051899e29db475 /include/linux/smp.h | |
parent | c14c338cb05c700a260480c197cfd6da8f8b7d2e (diff) |
smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in
!SMP version of on_each_cpu()"), we don't want to enable irqs if they
are not already enabled. There are currently no known problematical
callers of these functions, but since it is a known failure pattern, we
preemptively fix them.
Since they are not trivial functions, make them non-inline by moving
them to up.c. This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/smp.h')
-rw-r--r-- | include/linux/smp.h | 62 |
1 files changed, 16 insertions, 46 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h index c8488763277f..3724a9070907 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -29,6 +29,22 @@ extern unsigned int total_cpus; | |||
29 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | 29 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, |
30 | int wait); | 30 | int wait); |
31 | 31 | ||
32 | /* | ||
33 | * Call a function on processors specified by mask, which might include | ||
34 | * the local one. | ||
35 | */ | ||
36 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
37 | void *info, bool wait); | ||
38 | |||
39 | /* | ||
40 | * Call a function on each processor for which the supplied function | ||
41 | * cond_func returns a positive value. This may include the local | ||
42 | * processor. | ||
43 | */ | ||
44 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
45 | smp_call_func_t func, void *info, bool wait, | ||
46 | gfp_t gfp_flags); | ||
47 | |||
32 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
33 | 49 | ||
34 | #include <linux/preempt.h> | 50 | #include <linux/preempt.h> |
@@ -101,22 +117,6 @@ static inline void call_function_init(void) { } | |||
101 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | 117 | int on_each_cpu(smp_call_func_t func, void *info, int wait); |
102 | 118 | ||
103 | /* | 119 | /* |
104 | * Call a function on processors specified by mask, which might include | ||
105 | * the local one. | ||
106 | */ | ||
107 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
108 | void *info, bool wait); | ||
109 | |||
110 | /* | ||
111 | * Call a function on each processor for which the supplied function | ||
112 | * cond_func returns a positive value. This may include the local | ||
113 | * processor. | ||
114 | */ | ||
115 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
116 | smp_call_func_t func, void *info, bool wait, | ||
117 | gfp_t gfp_flags); | ||
118 | |||
119 | /* | ||
120 | * Mark the boot cpu "online" so that it can call console drivers in | 120 | * Mark the boot cpu "online" so that it can call console drivers in |
121 | * printk() and can access its per-cpu storage. | 121 | * printk() and can access its per-cpu storage. |
122 | */ | 122 | */ |
@@ -151,36 +151,6 @@ static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) | |||
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | /* | ||
155 | * Note we still need to test the mask even for UP | ||
156 | * because we actually can get an empty mask from | ||
157 | * code that on SMP might call us without the local | ||
158 | * CPU in the mask. | ||
159 | */ | ||
160 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
161 | do { \ | ||
162 | if (cpumask_test_cpu(0, (mask))) { \ | ||
163 | local_irq_disable(); \ | ||
164 | (func)(info); \ | ||
165 | local_irq_enable(); \ | ||
166 | } \ | ||
167 | } while (0) | ||
168 | /* | ||
169 | * Preemption is disabled here to make sure the cond_func is called under the | ||
170 | * same condtions in UP and SMP. | ||
171 | */ | ||
172 | #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ | ||
173 | do { \ | ||
174 | void *__info = (info); \ | ||
175 | preempt_disable(); \ | ||
176 | if ((cond_func)(0, __info)) { \ | ||
177 | local_irq_disable(); \ | ||
178 | (func)(__info); \ | ||
179 | local_irq_enable(); \ | ||
180 | } \ | ||
181 | preempt_enable(); \ | ||
182 | } while (0) | ||
183 | |||
184 | static inline void smp_send_reschedule(int cpu) { } | 154 | static inline void smp_send_reschedule(int cpu) { } |
185 | #define smp_prepare_boot_cpu() do {} while (0) | 155 | #define smp_prepare_boot_cpu() do {} while (0) |
186 | #define smp_call_function_many(mask, func, info, wait) \ | 156 | #define smp_call_function_many(mask, func, info, wait) \ |