aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched_fair.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5a2089492a98..1d4acbea9e60 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2348,6 +2348,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
2348 if (unlikely(se == pse)) 2348 if (unlikely(se == pse))
2349 return; 2349 return;
2350 2350
2351 /*
2352 * This is possible from callers such as pull_task(), in which we
2353 * unconditionally check_prempt_curr() after an enqueue (which may have
2354 * lead to a throttle). This both saves work and prevents false
2355 * next-buddy nomination below.
2356 */
2357 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
2358 return;
2359
2351 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 2360 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
2352 set_next_buddy(pse); 2361 set_next_buddy(pse);
2353 next_buddy_marked = 1; 2362 next_buddy_marked = 1;
@@ -2356,6 +2365,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
2356 /* 2365 /*
2357 * We can come here with TIF_NEED_RESCHED already set from new task 2366 * We can come here with TIF_NEED_RESCHED already set from new task
2358 * wake up path. 2367 * wake up path.
2368 *
2369 * Note: this also catches the edge-case of curr being in a throttled
2370 * group (e.g. via set_curr_task), since update_curr() (in the
2371 * enqueue of curr) will have resulted in resched being set. This
2372 * prevents us from potentially nominating it as a false LAST_BUDDY
2373 * below.
2359 */ 2374 */
2360 if (test_tsk_need_resched(curr)) 2375 if (test_tsk_need_resched(curr))
2361 return; 2376 return;
@@ -2474,7 +2489,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
2474{ 2489{
2475 struct sched_entity *se = &p->se; 2490 struct sched_entity *se = &p->se;
2476 2491
2477 if (!se->on_rq) 2492 /* throttled hierarchies are not runnable */
2493 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
2478 return false; 2494 return false;
2479 2495
2480 /* Tell the scheduler that we'd really like pse to run next. */ 2496 /* Tell the scheduler that we'd really like pse to run next. */