diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-05-06 12:49:21 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-05-06 12:49:21 -0400 |
commit | cc631fb732b8ccd6a0cc45557475ea09b0c21a68 (patch) | |
tree | 37e8c84f227a21d628a9d3a96dbbf5fedd506db1 /kernel | |
parent | 94458d5ecb3da844823cc191e73e5c5ead98a464 (diff) |
sched: correctly place paranioa memory barriers in synchronize_sched_expedited()
The memory barriers must be in the SMP case, not in the !SMP case.
Also add a barrier after the atomic_inc() in order to ensure that
other CPUs see post-synchronize_sched_expedited() actions as following
the expedited grace period.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e9c6d798831a..155a16d52146 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8932,6 +8932,15 @@ struct cgroup_subsys cpuacct_subsys = { | |||
8932 | 8932 | ||
8933 | void synchronize_sched_expedited(void) | 8933 | void synchronize_sched_expedited(void) |
8934 | { | 8934 | { |
8935 | } | ||
8936 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
8937 | |||
8938 | #else /* #ifndef CONFIG_SMP */ | ||
8939 | |||
8940 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); | ||
8941 | |||
8942 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
8943 | { | ||
8935 | /* | 8944 | /* |
8936 | * There must be a full memory barrier on each affected CPU | 8945 | * There must be a full memory barrier on each affected CPU |
8937 | * between the time that try_stop_cpus() is called and the | 8946 | * between the time that try_stop_cpus() is called and the |
@@ -8943,16 +8952,7 @@ void synchronize_sched_expedited(void) | |||
8943 | * necessary. Do smp_mb() anyway for documentation and | 8952 | * necessary. Do smp_mb() anyway for documentation and |
8944 | * robustness against future implementation changes. | 8953 | * robustness against future implementation changes. |
8945 | */ | 8954 | */ |
8946 | smp_mb(); | 8955 | smp_mb(); /* See above comment block. */ |
8947 | } | ||
8948 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
8949 | |||
8950 | #else /* #ifndef CONFIG_SMP */ | ||
8951 | |||
8952 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); | ||
8953 | |||
8954 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
8955 | { | ||
8956 | return 0; | 8956 | return 0; |
8957 | } | 8957 | } |
8958 | 8958 | ||
@@ -8990,6 +8990,7 @@ void synchronize_sched_expedited(void) | |||
8990 | get_online_cpus(); | 8990 | get_online_cpus(); |
8991 | } | 8991 | } |
8992 | atomic_inc(&synchronize_sched_expedited_count); | 8992 | atomic_inc(&synchronize_sched_expedited_count); |
8993 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ | ||
8993 | put_online_cpus(); | 8994 | put_online_cpus(); |
8994 | } | 8995 | } |
8995 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 8996 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |