diff options
author | Roman Gushchin <klamm@yandex-team.ru> | 2014-05-19 07:10:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-06-05 05:51:34 -0400 |
commit | 09dc4ab03936df5c5aa711d27c81283c6d09f495 (patch) | |
tree | 58266dad582dcf05a46036b2a75a5b9de57872ca /kernel/sched | |
parent | 0f397f2c90ce68821ee864c2c53baafe78de765d (diff) |
sched/fair: Fix tg_set_cfs_bandwidth() deadlock on rq->lock
tg_set_cfs_bandwidth() sets cfs_b->timer_active to 0 to
force the period timer restart. It's not safe, because
can lead to deadlock, described in commit 927b54fccbf0:
"__start_cfs_bandwidth calls hrtimer_cancel while holding rq->lock,
waiting for the hrtimer to finish. However, if sched_cfs_period_timer
runs for another loop iteration, the hrtimer can attempt to take
rq->lock, resulting in deadlock."
Three CPUs must be involved:
CPU0 CPU1 CPU2
take rq->lock period timer fired
... take cfs_b lock
... ... tg_set_cfs_bandwidth()
throttle_cfs_rq() release cfs_b lock take cfs_b lock
... distribute_cfs_runtime() timer_active = 0
take cfs_b->lock wait for rq->lock ...
__start_cfs_bandwidth()
{wait for timer callback
break if timer_active == 1}
So, CPU0 and CPU1 are deadlocked.
Instead of resetting cfs_b->timer_active, tg_set_cfs_bandwidth can
wait for period timer callbacks (ignoring cfs_b->timer_active) and
restart the timer explicitly.
Signed-off-by: Roman Gushchin <klamm@yandex-team.ru>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/87wqdi9g8e.wl\%klamm@yandex-team.ru
Cc: pjt@google.com
Cc: chris.j.arges@canonical.com
Cc: gregkh@linuxfoundation.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 3 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 6 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 540542b93f9f..f3f48e7a2dda 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7751,8 +7751,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) | |||
7751 | /* restart the period timer (if active) to handle new period expiry */ | 7751 | /* restart the period timer (if active) to handle new period expiry */ |
7752 | if (runtime_enabled && cfs_b->timer_active) { | 7752 | if (runtime_enabled && cfs_b->timer_active) { |
7753 | /* force a reprogram */ | 7753 | /* force a reprogram */ |
7754 | cfs_b->timer_active = 0; | 7754 | __start_cfs_bandwidth(cfs_b, true); |
7755 | __start_cfs_bandwidth(cfs_b); | ||
7756 | } | 7755 | } |
7757 | raw_spin_unlock_irq(&cfs_b->lock); | 7756 | raw_spin_unlock_irq(&cfs_b->lock); |
7758 | 7757 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b4768c069392..8cbe2d2c16de 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3130,7 +3130,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
3130 | */ | 3130 | */ |
3131 | if (!cfs_b->timer_active) { | 3131 | if (!cfs_b->timer_active) { |
3132 | __refill_cfs_bandwidth_runtime(cfs_b); | 3132 | __refill_cfs_bandwidth_runtime(cfs_b); |
3133 | __start_cfs_bandwidth(cfs_b); | 3133 | __start_cfs_bandwidth(cfs_b, false); |
3134 | } | 3134 | } |
3135 | 3135 | ||
3136 | if (cfs_b->runtime > 0) { | 3136 | if (cfs_b->runtime > 0) { |
@@ -3309,7 +3309,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) | |||
3309 | raw_spin_lock(&cfs_b->lock); | 3309 | raw_spin_lock(&cfs_b->lock); |
3310 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | 3310 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); |
3311 | if (!cfs_b->timer_active) | 3311 | if (!cfs_b->timer_active) |
3312 | __start_cfs_bandwidth(cfs_b); | 3312 | __start_cfs_bandwidth(cfs_b, false); |
3313 | raw_spin_unlock(&cfs_b->lock); | 3313 | raw_spin_unlock(&cfs_b->lock); |
3314 | } | 3314 | } |
3315 | 3315 | ||
@@ -3691,7 +3691,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | |||
3691 | } | 3691 | } |
3692 | 3692 | ||
3693 | /* requires cfs_b->lock, may release to reprogram timer */ | 3693 | /* requires cfs_b->lock, may release to reprogram timer */ |
3694 | void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 3694 | void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) |
3695 | { | 3695 | { |
3696 | /* | 3696 | /* |
3697 | * The timer may be active because we're trying to set a new bandwidth | 3697 | * The timer may be active because we're trying to set a new bandwidth |
@@ -3706,7 +3706,7 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
3706 | cpu_relax(); | 3706 | cpu_relax(); |
3707 | raw_spin_lock(&cfs_b->lock); | 3707 | raw_spin_lock(&cfs_b->lock); |
3708 | /* if someone else restarted the timer then we're done */ | 3708 | /* if someone else restarted the timer then we're done */ |
3709 | if (cfs_b->timer_active) | 3709 | if (!force && cfs_b->timer_active) |
3710 | return; | 3710 | return; |
3711 | } | 3711 | } |
3712 | 3712 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 456e492a3dca..369b4d663c42 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -278,7 +278,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); | |||
278 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | 278 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
279 | 279 | ||
280 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); | 280 | extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
281 | extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); | 281 | extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force); |
282 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); | 282 | extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
283 | 283 | ||
284 | extern void free_rt_sched_group(struct task_group *tg); | 284 | extern void free_rt_sched_group(struct task_group *tg); |