aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-09-11 17:23:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:23 -0400
commitfa688207c9db48b64ab6538abc3fcdf26110b9ec (patch)
tree47fff6ebaa5b0b7d3feca64010051899e29db475 /kernel
parentc14c338cb05c700a260480c197cfd6da8f8b7d2e (diff)
smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in !SMP version of on_each_cpu()"), we don't want to enable irqs if they are not already enabled. There are currently no known problematical callers of these functions, but since it is a known failure pattern, we preemptively fix them. Since they are not trivial functions, make them non-inline by moving them to up.c. This also makes it so we don't have to fix #include dependancies for preempt_{disable,enable}. Signed-off-by: David Daney <david.daney@cavium.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/up.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e9faf7..144e57255234 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
19 return 0; 19 return 0;
20} 20}
21EXPORT_SYMBOL(smp_call_function_single); 21EXPORT_SYMBOL(smp_call_function_single);
22
23/*
24 * Note we still need to test the mask even for UP
25 * because we actually can get an empty mask from
26 * code that on SMP might call us without the local
27 * CPU in the mask.
28 */
29void on_each_cpu_mask(const struct cpumask *mask,
30 smp_call_func_t func, void *info, bool wait)
31{
32 unsigned long flags;
33
34 if (cpumask_test_cpu(0, mask)) {
35 local_irq_save(flags);
36 func(info);
37 local_irq_restore(flags);
38 }
39}
40EXPORT_SYMBOL(on_each_cpu_mask);
41
42/*
43 * Preemption is disabled here to make sure the cond_func is called under the
44 * same condtions in UP and SMP.
45 */
46void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
47 smp_call_func_t func, void *info, bool wait,
48 gfp_t gfp_flags)
49{
50 unsigned long flags;
51
52 preempt_disable();
53 if (cond_func(0, info)) {
54 local_irq_save(flags);
55 func(info);
56 local_irq_restore(flags);
57 }
58 preempt_enable();
59}
60EXPORT_SYMBOL(on_each_cpu_cond);