diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-10-20 09:03:45 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-10-20 09:03:45 -0400 |
commit | 14dbc56aa27d8b3f7cf99fa06e77fdc592b577c1 (patch) | |
tree | 71456ce7ed9031067190a5717d91bbf336a714c4 /kernel | |
parent | 9b00eb8ac2738b27a1469046f2ca76449dcdf04a (diff) | |
parent | 9845c49cc9bbb317a0bc9e9cf78d8e09d54c9af0 (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Ingo writes:
"scheduler fixes:
Two fixes: a CFS-throttling bug fix, and an interactivity fix."
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Fix the min_vruntime update logic in dequeue_entity()
sched/fair: Fix throttle_list starvation with low CFS quota
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 24 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 22 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7fc4a371bdd2..908c9cdae2f0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4001,7 +4001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
4001 | * put back on, and if we advance min_vruntime, we'll be placed back | 4001 | * put back on, and if we advance min_vruntime, we'll be placed back |
4002 | * further than we started -- ie. we'll be penalized. | 4002 | * further than we started -- ie. we'll be penalized. |
4003 | */ | 4003 | */ |
4004 | if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) | 4004 | if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) |
4005 | update_min_vruntime(cfs_rq); | 4005 | update_min_vruntime(cfs_rq); |
4006 | } | 4006 | } |
4007 | 4007 | ||
@@ -4476,9 +4476,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) | |||
4476 | 4476 | ||
4477 | /* | 4477 | /* |
4478 | * Add to the _head_ of the list, so that an already-started | 4478 | * Add to the _head_ of the list, so that an already-started |
4479 | * distribute_cfs_runtime will not see us | 4479 | * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is |
4480 | * not running add to the tail so that later runqueues don't get starved. | ||
4480 | */ | 4481 | */ |
4481 | list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | 4482 | if (cfs_b->distribute_running) |
4483 | list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | ||
4484 | else | ||
4485 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | ||
4482 | 4486 | ||
4483 | /* | 4487 | /* |
4484 | * If we're the first throttled task, make sure the bandwidth | 4488 | * If we're the first throttled task, make sure the bandwidth |
@@ -4622,14 +4626,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) | |||
4622 | * in us over-using our runtime if it is all used during this loop, but | 4626 | * in us over-using our runtime if it is all used during this loop, but |
4623 | * only by limited amounts in that extreme case. | 4627 | * only by limited amounts in that extreme case. |
4624 | */ | 4628 | */ |
4625 | while (throttled && cfs_b->runtime > 0) { | 4629 | while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { |
4626 | runtime = cfs_b->runtime; | 4630 | runtime = cfs_b->runtime; |
4631 | cfs_b->distribute_running = 1; | ||
4627 | raw_spin_unlock(&cfs_b->lock); | 4632 | raw_spin_unlock(&cfs_b->lock); |
4628 | /* we can't nest cfs_b->lock while distributing bandwidth */ | 4633 | /* we can't nest cfs_b->lock while distributing bandwidth */ |
4629 | runtime = distribute_cfs_runtime(cfs_b, runtime, | 4634 | runtime = distribute_cfs_runtime(cfs_b, runtime, |
4630 | runtime_expires); | 4635 | runtime_expires); |
4631 | raw_spin_lock(&cfs_b->lock); | 4636 | raw_spin_lock(&cfs_b->lock); |
4632 | 4637 | ||
4638 | cfs_b->distribute_running = 0; | ||
4633 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); | 4639 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
4634 | 4640 | ||
4635 | cfs_b->runtime -= min(runtime, cfs_b->runtime); | 4641 | cfs_b->runtime -= min(runtime, cfs_b->runtime); |
@@ -4740,6 +4746,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | |||
4740 | 4746 | ||
4741 | /* confirm we're still not at a refresh boundary */ | 4747 | /* confirm we're still not at a refresh boundary */ |
4742 | raw_spin_lock(&cfs_b->lock); | 4748 | raw_spin_lock(&cfs_b->lock); |
4749 | if (cfs_b->distribute_running) { | ||
4750 | raw_spin_unlock(&cfs_b->lock); | ||
4751 | return; | ||
4752 | } | ||
4753 | |||
4743 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { | 4754 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { |
4744 | raw_spin_unlock(&cfs_b->lock); | 4755 | raw_spin_unlock(&cfs_b->lock); |
4745 | return; | 4756 | return; |
@@ -4749,6 +4760,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | |||
4749 | runtime = cfs_b->runtime; | 4760 | runtime = cfs_b->runtime; |
4750 | 4761 | ||
4751 | expires = cfs_b->runtime_expires; | 4762 | expires = cfs_b->runtime_expires; |
4763 | if (runtime) | ||
4764 | cfs_b->distribute_running = 1; | ||
4765 | |||
4752 | raw_spin_unlock(&cfs_b->lock); | 4766 | raw_spin_unlock(&cfs_b->lock); |
4753 | 4767 | ||
4754 | if (!runtime) | 4768 | if (!runtime) |
@@ -4759,6 +4773,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | |||
4759 | raw_spin_lock(&cfs_b->lock); | 4773 | raw_spin_lock(&cfs_b->lock); |
4760 | if (expires == cfs_b->runtime_expires) | 4774 | if (expires == cfs_b->runtime_expires) |
4761 | cfs_b->runtime -= min(runtime, cfs_b->runtime); | 4775 | cfs_b->runtime -= min(runtime, cfs_b->runtime); |
4776 | cfs_b->distribute_running = 0; | ||
4762 | raw_spin_unlock(&cfs_b->lock); | 4777 | raw_spin_unlock(&cfs_b->lock); |
4763 | } | 4778 | } |
4764 | 4779 | ||
@@ -4867,6 +4882,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
4867 | cfs_b->period_timer.function = sched_cfs_period_timer; | 4882 | cfs_b->period_timer.function = sched_cfs_period_timer; |
4868 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4883 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
4869 | cfs_b->slack_timer.function = sched_cfs_slack_timer; | 4884 | cfs_b->slack_timer.function = sched_cfs_slack_timer; |
4885 | cfs_b->distribute_running = 0; | ||
4870 | } | 4886 | } |
4871 | 4887 | ||
4872 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | 4888 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 455fa330de04..9683f458aec7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -346,6 +346,8 @@ struct cfs_bandwidth { | |||
346 | int nr_periods; | 346 | int nr_periods; |
347 | int nr_throttled; | 347 | int nr_throttled; |
348 | u64 throttled_time; | 348 | u64 throttled_time; |
349 | |||
350 | bool distribute_running; | ||
349 | #endif | 351 | #endif |
350 | }; | 352 | }; |
351 | 353 | ||