diff options
author | Tejun Heo <tj@kernel.org> | 2010-05-06 12:49:21 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-05-06 12:49:21 -0400 |
commit | 94458d5ecb3da844823cc191e73e5c5ead98a464 (patch) | |
tree | 866f0ed3b4e522d98664cb4b2603cd0d34cfde0d /kernel/sched.c | |
parent | 969c79215a35b06e5e3efe69b9412f858df7856c (diff) |
sched: kill paranoia check in synchronize_sched_expedited()
The paranoid check which verifies that the cpu_stop callback is
actually called on all online cpus is completely superflous. It's
guaranteed by cpu_stop facility and if it didn't work as advertised
other things would go horribly wrong and trying to recover using
synchronize_sched() wouldn't be very meaningful.
Kill the paranoid check. Removal of this feature is done as a
separate step so that it can serve as a bisection point if something
actually goes wrong.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Cc: Josh Triplett <josh@freedesktop.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 40 |
1 files changed, 3 insertions, 37 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f1d577a0a8ab..e9c6d798831a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8953,14 +8953,6 @@ static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); | |||
8953 | 8953 | ||
8954 | static int synchronize_sched_expedited_cpu_stop(void *data) | 8954 | static int synchronize_sched_expedited_cpu_stop(void *data) |
8955 | { | 8955 | { |
8956 | static DEFINE_SPINLOCK(done_mask_lock); | ||
8957 | struct cpumask *done_mask = data; | ||
8958 | |||
8959 | if (done_mask) { | ||
8960 | spin_lock(&done_mask_lock); | ||
8961 | cpumask_set_cpu(smp_processor_id(), done_mask); | ||
8962 | spin_unlock(&done_mask_lock); | ||
8963 | } | ||
8964 | return 0; | 8956 | return 0; |
8965 | } | 8957 | } |
8966 | 8958 | ||
@@ -8976,55 +8968,29 @@ static int synchronize_sched_expedited_cpu_stop(void *data) | |||
8976 | */ | 8968 | */ |
8977 | void synchronize_sched_expedited(void) | 8969 | void synchronize_sched_expedited(void) |
8978 | { | 8970 | { |
8979 | cpumask_var_t done_mask_var; | ||
8980 | struct cpumask *done_mask = NULL; | ||
8981 | int snap, trycount = 0; | 8971 | int snap, trycount = 0; |
8982 | 8972 | ||
8983 | /* | ||
8984 | * done_mask is used to check that all cpus actually have | ||
8985 | * finished running the stopper, which is guaranteed by | ||
8986 | * stop_cpus() if it's called with cpu hotplug blocked. Keep | ||
8987 | * the paranoia for now but it's best effort if cpumask is off | ||
8988 | * stack. | ||
8989 | */ | ||
8990 | if (zalloc_cpumask_var(&done_mask_var, GFP_ATOMIC)) | ||
8991 | done_mask = done_mask_var; | ||
8992 | |||
8993 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | 8973 | smp_mb(); /* ensure prior mod happens before capturing snap. */ |
8994 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; | 8974 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; |
8995 | get_online_cpus(); | 8975 | get_online_cpus(); |
8996 | while (try_stop_cpus(cpu_online_mask, | 8976 | while (try_stop_cpus(cpu_online_mask, |
8997 | synchronize_sched_expedited_cpu_stop, | 8977 | synchronize_sched_expedited_cpu_stop, |
8998 | done_mask) == -EAGAIN) { | 8978 | NULL) == -EAGAIN) { |
8999 | put_online_cpus(); | 8979 | put_online_cpus(); |
9000 | if (trycount++ < 10) | 8980 | if (trycount++ < 10) |
9001 | udelay(trycount * num_online_cpus()); | 8981 | udelay(trycount * num_online_cpus()); |
9002 | else { | 8982 | else { |
9003 | synchronize_sched(); | 8983 | synchronize_sched(); |
9004 | goto free_out; | 8984 | return; |
9005 | } | 8985 | } |
9006 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { | 8986 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { |
9007 | smp_mb(); /* ensure test happens before caller kfree */ | 8987 | smp_mb(); /* ensure test happens before caller kfree */ |
9008 | goto free_out; | 8988 | return; |
9009 | } | 8989 | } |
9010 | get_online_cpus(); | 8990 | get_online_cpus(); |
9011 | } | 8991 | } |
9012 | atomic_inc(&synchronize_sched_expedited_count); | 8992 | atomic_inc(&synchronize_sched_expedited_count); |
9013 | if (done_mask) | ||
9014 | cpumask_xor(done_mask, done_mask, cpu_online_mask); | ||
9015 | put_online_cpus(); | 8993 | put_online_cpus(); |
9016 | |||
9017 | /* paranoia - this can't happen */ | ||
9018 | if (done_mask && cpumask_weight(done_mask)) { | ||
9019 | char buf[80]; | ||
9020 | |||
9021 | cpulist_scnprintf(buf, sizeof(buf), done_mask); | ||
9022 | WARN_ONCE(1, "synchronize_sched_expedited: cpu online and done masks disagree on %d cpus: %s\n", | ||
9023 | cpumask_weight(done_mask), buf); | ||
9024 | synchronize_sched(); | ||
9025 | } | ||
9026 | free_out: | ||
9027 | free_cpumask_var(done_mask_var); | ||
9028 | } | 8994 | } |
9029 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 8995 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
9030 | 8996 | ||