aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorBen Segall <bsegall@google.com>2013-10-16 14:16:12 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-29 07:02:19 -0400
commit1ee14e6c8cddeeb8a490d7b54cd9016e4bb900b4 (patch)
tree1fe48f0122fdda3bf75bb637a9e103e541598e8a /kernel/sched/core.c
parentac9ff7997b6f2b31949dcd2495ac671fd9ddc990 (diff)
sched: Fix race on toggling cfs_bandwidth_used
When we transition cfs_bandwidth_used to false, any currently throttled groups will incorrectly return false from cfs_rq_throttled. While tg_set_cfs_bandwidth will unthrottle them eventually, currently running code (including at least dequeue_task_fair and distribute_cfs_runtime) will cause errors. Fix this by turning off cfs_bandwidth_used only after unthrottling all cfs_rqs. Tested: toggle bandwidth back and forth on a loaded cgroup. Caused crashes in minutes without the patch, hasn't crashed with it. Signed-off-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: pjt@google.com Link: http://lkml.kernel.org/r/20131016181611.22647.80365.stgit@sword-of-the-dawn.mtv.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7c61f313521d..450a34b2a637 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7436,7 +7436,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7436 7436
7437 runtime_enabled = quota != RUNTIME_INF; 7437 runtime_enabled = quota != RUNTIME_INF;
7438 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7438 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7439 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled); 7439 /*
7440 * If we need to toggle cfs_bandwidth_used, off->on must occur
7441 * before making related changes, and on->off must occur afterwards
7442 */
7443 if (runtime_enabled && !runtime_was_enabled)
7444 cfs_bandwidth_usage_inc();
7440 raw_spin_lock_irq(&cfs_b->lock); 7445 raw_spin_lock_irq(&cfs_b->lock);
7441 cfs_b->period = ns_to_ktime(period); 7446 cfs_b->period = ns_to_ktime(period);
7442 cfs_b->quota = quota; 7447 cfs_b->quota = quota;
@@ -7462,6 +7467,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7462 unthrottle_cfs_rq(cfs_rq); 7467 unthrottle_cfs_rq(cfs_rq);
7463 raw_spin_unlock_irq(&rq->lock); 7468 raw_spin_unlock_irq(&rq->lock);
7464 } 7469 }
7470 if (runtime_was_enabled && !runtime_enabled)
7471 cfs_bandwidth_usage_dec();
7465out_unlock: 7472out_unlock:
7466 mutex_unlock(&cfs_constraints_mutex); 7473 mutex_unlock(&cfs_constraints_mutex);
7467 7474