diff options
| author | David Daney <david.daney@cavium.com> | 2013-09-11 17:23:24 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:58:23 -0400 |
| commit | fa688207c9db48b64ab6538abc3fcdf26110b9ec (patch) | |
| tree | 47fff6ebaa5b0b7d3feca64010051899e29db475 | |
| parent | c14c338cb05c700a260480c197cfd6da8f8b7d2e (diff) | |
smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in
!SMP version of on_each_cpu()"), we don't want to enable irqs if they
are not already enabled. There are currently no known problematical
callers of these functions, but since it is a known failure pattern, we
preemptively fix them.
Since they are not trivial functions, make them non-inline by moving
them to up.c. This also makes it so we don't have to fix #include
dependancies for preempt_{disable,enable}.
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | include/linux/smp.h | 62 | ||||
| -rw-r--r-- | kernel/up.c | 39 |
2 files changed, 55 insertions, 46 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h index c8488763277f..3724a9070907 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -29,6 +29,22 @@ extern unsigned int total_cpus; | |||
| 29 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | 29 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, |
| 30 | int wait); | 30 | int wait); |
| 31 | 31 | ||
| 32 | /* | ||
| 33 | * Call a function on processors specified by mask, which might include | ||
| 34 | * the local one. | ||
| 35 | */ | ||
| 36 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
| 37 | void *info, bool wait); | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Call a function on each processor for which the supplied function | ||
| 41 | * cond_func returns a positive value. This may include the local | ||
| 42 | * processor. | ||
| 43 | */ | ||
| 44 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 45 | smp_call_func_t func, void *info, bool wait, | ||
| 46 | gfp_t gfp_flags); | ||
| 47 | |||
| 32 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
| 33 | 49 | ||
| 34 | #include <linux/preempt.h> | 50 | #include <linux/preempt.h> |
| @@ -101,22 +117,6 @@ static inline void call_function_init(void) { } | |||
| 101 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | 117 | int on_each_cpu(smp_call_func_t func, void *info, int wait); |
| 102 | 118 | ||
| 103 | /* | 119 | /* |
| 104 | * Call a function on processors specified by mask, which might include | ||
| 105 | * the local one. | ||
| 106 | */ | ||
| 107 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | ||
| 108 | void *info, bool wait); | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Call a function on each processor for which the supplied function | ||
| 112 | * cond_func returns a positive value. This may include the local | ||
| 113 | * processor. | ||
| 114 | */ | ||
| 115 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 116 | smp_call_func_t func, void *info, bool wait, | ||
| 117 | gfp_t gfp_flags); | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Mark the boot cpu "online" so that it can call console drivers in | 120 | * Mark the boot cpu "online" so that it can call console drivers in |
| 121 | * printk() and can access its per-cpu storage. | 121 | * printk() and can access its per-cpu storage. |
| 122 | */ | 122 | */ |
| @@ -151,36 +151,6 @@ static inline int on_each_cpu(smp_call_func_t func, void *info, int wait) | |||
| 151 | return 0; | 151 | return 0; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /* | ||
| 155 | * Note we still need to test the mask even for UP | ||
| 156 | * because we actually can get an empty mask from | ||
| 157 | * code that on SMP might call us without the local | ||
| 158 | * CPU in the mask. | ||
| 159 | */ | ||
| 160 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
| 161 | do { \ | ||
| 162 | if (cpumask_test_cpu(0, (mask))) { \ | ||
| 163 | local_irq_disable(); \ | ||
| 164 | (func)(info); \ | ||
| 165 | local_irq_enable(); \ | ||
| 166 | } \ | ||
| 167 | } while (0) | ||
| 168 | /* | ||
| 169 | * Preemption is disabled here to make sure the cond_func is called under the | ||
| 170 | * same condtions in UP and SMP. | ||
| 171 | */ | ||
| 172 | #define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\ | ||
| 173 | do { \ | ||
| 174 | void *__info = (info); \ | ||
| 175 | preempt_disable(); \ | ||
| 176 | if ((cond_func)(0, __info)) { \ | ||
| 177 | local_irq_disable(); \ | ||
| 178 | (func)(__info); \ | ||
| 179 | local_irq_enable(); \ | ||
| 180 | } \ | ||
| 181 | preempt_enable(); \ | ||
| 182 | } while (0) | ||
| 183 | |||
| 184 | static inline void smp_send_reschedule(int cpu) { } | 154 | static inline void smp_send_reschedule(int cpu) { } |
| 185 | #define smp_prepare_boot_cpu() do {} while (0) | 155 | #define smp_prepare_boot_cpu() do {} while (0) |
| 186 | #define smp_call_function_many(mask, func, info, wait) \ | 156 | #define smp_call_function_many(mask, func, info, wait) \ |
diff --git a/kernel/up.c b/kernel/up.c index c54c75e9faf7..144e57255234 100644 --- a/kernel/up.c +++ b/kernel/up.c | |||
| @@ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 19 | return 0; | 19 | return 0; |
| 20 | } | 20 | } |
| 21 | EXPORT_SYMBOL(smp_call_function_single); | 21 | EXPORT_SYMBOL(smp_call_function_single); |
| 22 | |||
| 23 | /* | ||
| 24 | * Note we still need to test the mask even for UP | ||
| 25 | * because we actually can get an empty mask from | ||
| 26 | * code that on SMP might call us without the local | ||
| 27 | * CPU in the mask. | ||
| 28 | */ | ||
| 29 | void on_each_cpu_mask(const struct cpumask *mask, | ||
| 30 | smp_call_func_t func, void *info, bool wait) | ||
| 31 | { | ||
| 32 | unsigned long flags; | ||
| 33 | |||
| 34 | if (cpumask_test_cpu(0, mask)) { | ||
| 35 | local_irq_save(flags); | ||
| 36 | func(info); | ||
| 37 | local_irq_restore(flags); | ||
| 38 | } | ||
| 39 | } | ||
| 40 | EXPORT_SYMBOL(on_each_cpu_mask); | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Preemption is disabled here to make sure the cond_func is called under the | ||
| 44 | * same condtions in UP and SMP. | ||
| 45 | */ | ||
| 46 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
| 47 | smp_call_func_t func, void *info, bool wait, | ||
| 48 | gfp_t gfp_flags) | ||
| 49 | { | ||
| 50 | unsigned long flags; | ||
| 51 | |||
| 52 | preempt_disable(); | ||
| 53 | if (cond_func(0, info)) { | ||
| 54 | local_irq_save(flags); | ||
| 55 | func(info); | ||
| 56 | local_irq_restore(flags); | ||
| 57 | } | ||
| 58 | preempt_enable(); | ||
| 59 | } | ||
| 60 | EXPORT_SYMBOL(on_each_cpu_cond); | ||
