aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2010-10-20 23:29:05 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-11-30 01:01:58 -0500
commit7b27d5475f86186914e54e4a6bb994e9a985337b (patch)
tree36349c88d75497db89956d6721d82c4f889cbc2b /kernel/sched.c
parentdeb7a41815a8a32d4f9ea2af7a48ed1175222cec (diff)
rcu,cleanup: move synchronize_sched_expedited() out of sched.c
The first version of synchronize_sched_expedited() used the migration code in the scheduler, and was therefore implemented in kernel/sched.c. However, the more recent version of this code no longer uses the migration code, so this commit moves it to the main RCU source files. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c69
1 files changed, 0 insertions, 69 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ae8f75a5ceb4..d1e8889872a1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9131,72 +9131,3 @@ struct cgroup_subsys cpuacct_subsys = {
9131}; 9131};
9132#endif /* CONFIG_CGROUP_CPUACCT */ 9132#endif /* CONFIG_CGROUP_CPUACCT */
9133 9133
9134#ifndef CONFIG_SMP
9135
9136void synchronize_sched_expedited(void)
9137{
9138 barrier();
9139}
9140EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9141
9142#else /* #ifndef CONFIG_SMP */
9143
9144static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
9145
9146static int synchronize_sched_expedited_cpu_stop(void *data)
9147{
9148 /*
9149 * There must be a full memory barrier on each affected CPU
9150 * between the time that try_stop_cpus() is called and the
9151 * time that it returns.
9152 *
9153 * In the current initial implementation of cpu_stop, the
9154 * above condition is already met when the control reaches
9155 * this point and the following smp_mb() is not strictly
9156 * necessary. Do smp_mb() anyway for documentation and
9157 * robustness against future implementation changes.
9158 */
9159 smp_mb(); /* See above comment block. */
9160 return 0;
9161}
9162
9163/*
9164 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
9165 * approach to force grace period to end quickly. This consumes
9166 * significant time on all CPUs, and is thus not recommended for
9167 * any sort of common-case code.
9168 *
9169 * Note that it is illegal to call this function while holding any
9170 * lock that is acquired by a CPU-hotplug notifier. Failing to
9171 * observe this restriction will result in deadlock.
9172 */
9173void synchronize_sched_expedited(void)
9174{
9175 int snap, trycount = 0;
9176
9177 smp_mb(); /* ensure prior mod happens before capturing snap. */
9178 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
9179 get_online_cpus();
9180 while (try_stop_cpus(cpu_online_mask,
9181 synchronize_sched_expedited_cpu_stop,
9182 NULL) == -EAGAIN) {
9183 put_online_cpus();
9184 if (trycount++ < 10)
9185 udelay(trycount * num_online_cpus());
9186 else {
9187 synchronize_sched();
9188 return;
9189 }
9190 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
9191 smp_mb(); /* ensure test happens before caller kfree */
9192 return;
9193 }
9194 get_online_cpus();
9195 }
9196 atomic_inc(&synchronize_sched_expedited_count);
9197 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
9198 put_online_cpus();
9199}
9200EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9201
9202#endif /* #else #ifndef CONFIG_SMP */