aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c71
1 files changed, 0 insertions, 71 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 114a0deb2b0..04949089e76 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8067,8 +8067,6 @@ void __init sched_init(void)
8067 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 8067 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
8068#endif /* SMP */ 8068#endif /* SMP */
8069 8069
8070 perf_event_init();
8071
8072 scheduler_running = 1; 8070 scheduler_running = 1;
8073} 8071}
8074 8072
@@ -9241,72 +9239,3 @@ struct cgroup_subsys cpuacct_subsys = {
9241}; 9239};
9242#endif /* CONFIG_CGROUP_CPUACCT */ 9240#endif /* CONFIG_CGROUP_CPUACCT */
9243 9241
9244#ifndef CONFIG_SMP
9245
9246void synchronize_sched_expedited(void)
9247{
9248 barrier();
9249}
9250EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9251
9252#else /* #ifndef CONFIG_SMP */
9253
9254static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
9255
9256static int synchronize_sched_expedited_cpu_stop(void *data)
9257{
9258 /*
9259 * There must be a full memory barrier on each affected CPU
9260 * between the time that try_stop_cpus() is called and the
9261 * time that it returns.
9262 *
9263 * In the current initial implementation of cpu_stop, the
9264 * above condition is already met when the control reaches
9265 * this point and the following smp_mb() is not strictly
9266 * necessary. Do smp_mb() anyway for documentation and
9267 * robustness against future implementation changes.
9268 */
9269 smp_mb(); /* See above comment block. */
9270 return 0;
9271}
9272
9273/*
9274 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
9275 * approach to force grace period to end quickly. This consumes
9276 * significant time on all CPUs, and is thus not recommended for
9277 * any sort of common-case code.
9278 *
9279 * Note that it is illegal to call this function while holding any
9280 * lock that is acquired by a CPU-hotplug notifier. Failing to
9281 * observe this restriction will result in deadlock.
9282 */
9283void synchronize_sched_expedited(void)
9284{
9285 int snap, trycount = 0;
9286
9287 smp_mb(); /* ensure prior mod happens before capturing snap. */
9288 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
9289 get_online_cpus();
9290 while (try_stop_cpus(cpu_online_mask,
9291 synchronize_sched_expedited_cpu_stop,
9292 NULL) == -EAGAIN) {
9293 put_online_cpus();
9294 if (trycount++ < 10)
9295 udelay(trycount * num_online_cpus());
9296 else {
9297 synchronize_sched();
9298 return;
9299 }
9300 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
9301 smp_mb(); /* ensure test happens before caller kfree */
9302 return;
9303 }
9304 get_online_cpus();
9305 }
9306 atomic_inc(&synchronize_sched_expedited_count);
9307 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
9308 put_online_cpus();
9309}
9310EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9311
9312#endif /* #else #ifndef CONFIG_SMP */