aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-09-11 17:23:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:25 -0400
commit202da400570d991bacda4a06e878cb901e96a783 (patch)
tree135d76acc021c2a67b8c24dc1084b3d6cf8e01e5 /kernel
parentf9597f24c089dcbddbd2d9e99fbf00df57fb70c6 (diff)
kernel/smp.c: quit unconditionally enabling irqs in on_each_cpu_mask().
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in !SMP version of on_each_cpu()"), we don't want to enable irqs if they are not already enabled. I don't know of any bugs currently caused by this unconditional local_irq_enable(), but I want to use this function in MIPS/OCTEON early boot (when we have early_boot_irqs_disabled). This also makes this function have similar semantics to on_each_cpu() which is good in itself. Signed-off-by: David Daney <david.daney@cavium.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 3bb6ae533cdf..0564571dcdf7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -575,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
575 * 575 *
576 * If @wait is true, then returns once @func has returned. 576 * If @wait is true, then returns once @func has returned.
577 * 577 *
578 * You must not call this function with disabled interrupts or 578 * You must not call this function with disabled interrupts or from a
579 * from a hardware interrupt handler or from a bottom half handler. 579 * hardware interrupt handler or from a bottom half handler. The
580 * exception is that it may be used during early boot while
581 * early_boot_irqs_disabled is set.
580 */ 582 */
581void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, 583void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
582 void *info, bool wait) 584 void *info, bool wait)
@@ -585,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
585 587
586 smp_call_function_many(mask, func, info, wait); 588 smp_call_function_many(mask, func, info, wait);
587 if (cpumask_test_cpu(cpu, mask)) { 589 if (cpumask_test_cpu(cpu, mask)) {
588 local_irq_disable(); 590 unsigned long flags;
591 local_irq_save(flags);
589 func(info); 592 func(info);
590 local_irq_enable(); 593 local_irq_restore(flags);
591 } 594 }
592 put_cpu(); 595 put_cpu();
593} 596}