aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a6b1db8a0bd8..51aa3e102acb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1319,6 +1319,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1319 return 0; 1319 return 0;
1320} 1320}
1321 1321
1322static void set_last_buddy(struct sched_entity *se)
1323{
1324 for_each_sched_entity(se)
1325 cfs_rq_of(se)->last = se;
1326}
1327
1328static void set_next_buddy(struct sched_entity *se)
1329{
1330 for_each_sched_entity(se)
1331 cfs_rq_of(se)->next = se;
1332}
1333
1322/* 1334/*
1323 * Preempt the current task with a newly woken task if needed: 1335 * Preempt the current task with a newly woken task if needed:
1324 */ 1336 */
@@ -1352,8 +1364,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1352 * obvious reasons its a bad idea to schedule back to the idle thread. 1364 * obvious reasons its a bad idea to schedule back to the idle thread.
1353 */ 1365 */
1354 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) 1366 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1355 cfs_rq_of(se)->last = se; 1367 set_last_buddy(se);
1356 cfs_rq_of(pse)->next = pse; 1368 set_next_buddy(pse);
1357 1369
1358 /* 1370 /*
1359 * We can come here with TIF_NEED_RESCHED already set from new task 1371 * We can come here with TIF_NEED_RESCHED already set from new task