aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBen Segall <bsegall@google.com>2013-10-16 14:16:12 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-15 18:28:54 -0500
commit9d80092f8d9e0fc4aa2b6a7c8d2e4a7437899ca5 (patch)
treec1147a830101185a34a04fc263f0d48f36b941ba /kernel
parente23fe36a8cf5faa57d0c45868a3f7679c4f07cb0 (diff)
sched: Fix race on toggling cfs_bandwidth_used
commit 1ee14e6c8cddeeb8a490d7b54cd9016e4bb900b4 upstream. When we transition cfs_bandwidth_used to false, any currently throttled groups will incorrectly return false from cfs_rq_throttled. While tg_set_cfs_bandwidth will unthrottle them eventually, currently running code (including at least dequeue_task_fair and distribute_cfs_runtime) will cause errors. Fix this by turning off cfs_bandwidth_used only after unthrottling all cfs_rqs. Tested: toggle bandwidth back and forth on a loaded cgroup. Caused crashes in minutes without the patch, hasn't crashed with it. Signed-off-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: pjt@google.com Link: http://lkml.kernel.org/r/20131016181611.22647.80365.stgit@sword-of-the-dawn.mtv.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Chris J Arges <chris.j.arges@canonical.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/sched.h3
3 files changed, 19 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9e35b1e7713..b4308d7da339 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7812,7 +7812,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7812 7812
7813 runtime_enabled = quota != RUNTIME_INF; 7813 runtime_enabled = quota != RUNTIME_INF;
7814 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7814 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7815 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled); 7815 /*
7816 * If we need to toggle cfs_bandwidth_used, off->on must occur
7817 * before making related changes, and on->off must occur afterwards
7818 */
7819 if (runtime_enabled && !runtime_was_enabled)
7820 cfs_bandwidth_usage_inc();
7816 raw_spin_lock_irq(&cfs_b->lock); 7821 raw_spin_lock_irq(&cfs_b->lock);
7817 cfs_b->period = ns_to_ktime(period); 7822 cfs_b->period = ns_to_ktime(period);
7818 cfs_b->quota = quota; 7823 cfs_b->quota = quota;
@@ -7838,6 +7843,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7838 unthrottle_cfs_rq(cfs_rq); 7843 unthrottle_cfs_rq(cfs_rq);
7839 raw_spin_unlock_irq(&rq->lock); 7844 raw_spin_unlock_irq(&rq->lock);
7840 } 7845 }
7846 if (runtime_was_enabled && !runtime_enabled)
7847 cfs_bandwidth_usage_dec();
7841out_unlock: 7848out_unlock:
7842 mutex_unlock(&cfs_constraints_mutex); 7849 mutex_unlock(&cfs_constraints_mutex);
7843 7850
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ce60006132b1..54e85898c390 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2029,13 +2029,14 @@ static inline bool cfs_bandwidth_used(void)
2029 return static_key_false(&__cfs_bandwidth_used); 2029 return static_key_false(&__cfs_bandwidth_used);
2030} 2030}
2031 2031
2032void account_cfs_bandwidth_used(int enabled, int was_enabled) 2032void cfs_bandwidth_usage_inc(void)
2033{ 2033{
2034 /* only need to count groups transitioning between enabled/!enabled */ 2034 static_key_slow_inc(&__cfs_bandwidth_used);
2035 if (enabled && !was_enabled) 2035}
2036 static_key_slow_inc(&__cfs_bandwidth_used); 2036
2037 else if (!enabled && was_enabled) 2037void cfs_bandwidth_usage_dec(void)
2038 static_key_slow_dec(&__cfs_bandwidth_used); 2038{
2039 static_key_slow_dec(&__cfs_bandwidth_used);
2039} 2040}
2040#else /* HAVE_JUMP_LABEL */ 2041#else /* HAVE_JUMP_LABEL */
2041static bool cfs_bandwidth_used(void) 2042static bool cfs_bandwidth_used(void)
@@ -2043,7 +2044,8 @@ static bool cfs_bandwidth_used(void)
2043 return true; 2044 return true;
2044} 2045}
2045 2046
2046void account_cfs_bandwidth_used(int enabled, int was_enabled) {} 2047void cfs_bandwidth_usage_inc(void) {}
2048void cfs_bandwidth_usage_dec(void) {}
2047#endif /* HAVE_JUMP_LABEL */ 2049#endif /* HAVE_JUMP_LABEL */
2048 2050
2049/* 2051/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ce39224d6155..dfa31d533e3f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1318,7 +1318,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1318extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1318extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1319extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1319extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1320 1320
1321extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1321extern void cfs_bandwidth_usage_inc(void);
1322extern void cfs_bandwidth_usage_dec(void);
1322 1323
1323#ifdef CONFIG_NO_HZ_COMMON 1324#ifdef CONFIG_NO_HZ_COMMON
1324enum rq_nohz_flag_bits { 1325enum rq_nohz_flag_bits {