aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-22 03:08:16 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:59 -0500
commit78eef01b0fae087c5fadbd85dd4fe2918c3a015f (patch)
tree78057039596aa733ff904a36260cca3a51af6981
parentac2b898ca6fb06196a26869c23b66afe7944e52e (diff)
[PATCH] on_each_cpu(): disable local interrupts
When on_each_cpu() runs the callback on other CPUs, it runs with local interrupts disabled. So we should run the function with local interrupts disabled on this CPU, too. And do the same for UP, so the callback is run in the same environment on both UP and SMP. (strictly it should do preempt_disable() too, but I think local_irq_disable is sufficiently equivalent). Also uninlines on_each_cpu(). softirq.c was the most appropriate file I could find, but it doesn't seem to justify creating a new file. Oh, and fix up that comment over (under?) x86's smp_call_function(). It drives me nuts. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/smp.c28
-rw-r--r--include/linux/smp.h23
-rw-r--r--kernel/softirq.c20
3 files changed, 41 insertions, 30 deletions
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 218d725a5a1e..d134e9643a58 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -504,27 +504,23 @@ void unlock_ipi_call_lock(void)
504 spin_unlock_irq(&call_lock); 504 spin_unlock_irq(&call_lock);
505} 505}
506 506
507static struct call_data_struct * call_data; 507static struct call_data_struct *call_data;
508 508
509/* 509/**
510 * this function sends a 'generic call function' IPI to all other CPUs 510 * smp_call_function(): Run a function on all other CPUs.
511 * in the system. 511 * @func: The function to run. This must be fast and non-blocking.
512 */ 512 * @info: An arbitrary pointer to pass to the function.
513 513 * @nonatomic: currently unused.
514int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 514 * @wait: If true, wait (atomically) until function has completed on other CPUs.
515 int wait) 515 *
516/* 516 * Returns 0 on success, else a negative status code. Does not return until
517 * [SUMMARY] Run a function on all other CPUs.
518 * <func> The function to run. This must be fast and non-blocking.
519 * <info> An arbitrary pointer to pass to the function.
520 * <nonatomic> currently unused.
521 * <wait> If true, wait (atomically) until function has completed on other CPUs.
522 * [RETURNS] 0 on success, else a negative status code. Does not return until
523 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 517 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
524 * 518 *
525 * You must not call this function with disabled interrupts or from a 519 * You must not call this function with disabled interrupts or from a
526 * hardware interrupt handler or from a bottom half handler. 520 * hardware interrupt handler or from a bottom half handler.
527 */ 521 */
522int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
523 int wait)
528{ 524{
529 struct call_data_struct data; 525 struct call_data_struct data;
530 int cpus; 526 int cpus;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 44153fdf73fc..d699a16b0cb2 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
52/* 52/*
53 * Call a function on all other processors 53 * Call a function on all other processors
54 */ 54 */
55extern int smp_call_function (void (*func) (void *info), void *info, 55int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
56 int retry, int wait);
57 56
58/* 57/*
59 * Call a function on all processors 58 * Call a function on all processors
60 */ 59 */
61static inline int on_each_cpu(void (*func) (void *info), void *info, 60int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
62 int retry, int wait)
63{
64 int ret = 0;
65
66 preempt_disable();
67 ret = smp_call_function(func, info, retry, wait);
68 func(info);
69 preempt_enable();
70 return ret;
71}
72 61
73#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ 62#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
74#define MSG_ALL 0x8001 63#define MSG_ALL 0x8001
@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
94#define raw_smp_processor_id() 0 83#define raw_smp_processor_id() 0
95#define hard_smp_processor_id() 0 84#define hard_smp_processor_id() 0
96#define smp_call_function(func,info,retry,wait) ({ 0; }) 85#define smp_call_function(func,info,retry,wait) ({ 0; })
97#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) 86#define on_each_cpu(func,info,retry,wait) \
87 ({ \
88 local_irq_disable(); \
89 func(info); \
90 local_irq_enable(); \
91 0; \
92 })
98static inline void smp_send_reschedule(int cpu) { } 93static inline void smp_send_reschedule(int cpu) { }
99#define num_booting_cpus() 1 94#define num_booting_cpus() 1
100#define smp_prepare_boot_cpu() do {} while (0) 95#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ad3295cdded5..ec8fed42a86f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -16,6 +16,7 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/kthread.h> 17#include <linux/kthread.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/smp.h>
19 20
20#include <asm/irq.h> 21#include <asm/irq.h>
21/* 22/*
@@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void)
495 register_cpu_notifier(&cpu_nfb); 496 register_cpu_notifier(&cpu_nfb);
496 return 0; 497 return 0;
497} 498}
499
500#ifdef CONFIG_SMP
501/*
502 * Call a function on all processors
503 */
504int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
505{
506 int ret = 0;
507
508 preempt_disable();
509 ret = smp_call_function(func, info, retry, wait);
510 local_irq_disable();
511 func(info);
512 local_irq_enable();
513 preempt_enable();
514 return ret;
515}
516EXPORT_SYMBOL(on_each_cpu);
517#endif