diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 83 |
1 files changed, 56 insertions, 27 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ce514afd78ff..98345e45b059 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -341,23 +341,20 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
341 | cfs_rq->rb_leftmost = next_node; | 341 | cfs_rq->rb_leftmost = next_node; |
342 | } | 342 | } |
343 | 343 | ||
344 | if (cfs_rq->next == se) | ||
345 | cfs_rq->next = NULL; | ||
346 | |||
347 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 344 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
348 | } | 345 | } |
349 | 346 | ||
350 | static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) | ||
351 | { | ||
352 | return cfs_rq->rb_leftmost; | ||
353 | } | ||
354 | |||
355 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | 347 | static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) |
356 | { | 348 | { |
357 | return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); | 349 | struct rb_node *left = cfs_rq->rb_leftmost; |
350 | |||
351 | if (!left) | ||
352 | return NULL; | ||
353 | |||
354 | return rb_entry(left, struct sched_entity, run_node); | ||
358 | } | 355 | } |
359 | 356 | ||
360 | static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | 357 | static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) |
361 | { | 358 | { |
362 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | 359 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); |
363 | 360 | ||
@@ -719,6 +716,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
719 | __enqueue_entity(cfs_rq, se); | 716 | __enqueue_entity(cfs_rq, se); |
720 | } | 717 | } |
721 | 718 | ||
719 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
720 | { | ||
721 | if (cfs_rq->last == se) | ||
722 | cfs_rq->last = NULL; | ||
723 | |||
724 | if (cfs_rq->next == se) | ||
725 | cfs_rq->next = NULL; | ||
726 | } | ||
727 | |||
722 | static void | 728 | static void |
723 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 729 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
724 | { | 730 | { |
@@ -741,6 +747,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
741 | #endif | 747 | #endif |
742 | } | 748 | } |
743 | 749 | ||
750 | clear_buddies(cfs_rq, se); | ||
751 | |||
744 | if (se != cfs_rq->curr) | 752 | if (se != cfs_rq->curr) |
745 | __dequeue_entity(cfs_rq, se); | 753 | __dequeue_entity(cfs_rq, se); |
746 | account_entity_dequeue(cfs_rq, se); | 754 | account_entity_dequeue(cfs_rq, se); |
@@ -794,24 +802,15 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
794 | static int | 802 | static int |
795 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | 803 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); |
796 | 804 | ||
797 | static struct sched_entity * | ||
798 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
799 | { | ||
800 | if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1) | ||
801 | return se; | ||
802 | |||
803 | return cfs_rq->next; | ||
804 | } | ||
805 | |||
806 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 805 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
807 | { | 806 | { |
808 | struct sched_entity *se = NULL; | 807 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
809 | 808 | ||
810 | if (first_fair(cfs_rq)) { | 809 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) |
811 | se = __pick_next_entity(cfs_rq); | 810 | return cfs_rq->next; |
812 | se = pick_next(cfs_rq, se); | 811 | |
813 | set_next_entity(cfs_rq, se); | 812 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) |
814 | } | 813 | return cfs_rq->last; |
815 | 814 | ||
816 | return se; | 815 | return se; |
817 | } | 816 | } |
@@ -983,6 +982,8 @@ static void yield_task_fair(struct rq *rq) | |||
983 | if (unlikely(cfs_rq->nr_running == 1)) | 982 | if (unlikely(cfs_rq->nr_running == 1)) |
984 | return; | 983 | return; |
985 | 984 | ||
985 | clear_buddies(cfs_rq, se); | ||
986 | |||
986 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 987 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { |
987 | update_rq_clock(rq); | 988 | update_rq_clock(rq); |
988 | /* | 989 | /* |
@@ -1325,26 +1326,53 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1325 | return 0; | 1326 | return 0; |
1326 | } | 1327 | } |
1327 | 1328 | ||
1329 | static void set_last_buddy(struct sched_entity *se) | ||
1330 | { | ||
1331 | for_each_sched_entity(se) | ||
1332 | cfs_rq_of(se)->last = se; | ||
1333 | } | ||
1334 | |||
1335 | static void set_next_buddy(struct sched_entity *se) | ||
1336 | { | ||
1337 | for_each_sched_entity(se) | ||
1338 | cfs_rq_of(se)->next = se; | ||
1339 | } | ||
1340 | |||
1328 | /* | 1341 | /* |
1329 | * Preempt the current task with a newly woken task if needed: | 1342 | * Preempt the current task with a newly woken task if needed: |
1330 | */ | 1343 | */ |
1331 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 1344 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1332 | { | 1345 | { |
1333 | struct task_struct *curr = rq->curr; | 1346 | struct task_struct *curr = rq->curr; |
1334 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1335 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1347 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1336 | 1348 | ||
1337 | if (unlikely(rt_prio(p->prio))) { | 1349 | if (unlikely(rt_prio(p->prio))) { |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1351 | |||
1338 | update_rq_clock(rq); | 1352 | update_rq_clock(rq); |
1339 | update_curr(cfs_rq); | 1353 | update_curr(cfs_rq); |
1340 | resched_task(curr); | 1354 | resched_task(curr); |
1341 | return; | 1355 | return; |
1342 | } | 1356 | } |
1343 | 1357 | ||
1358 | if (unlikely(p->sched_class != &fair_sched_class)) | ||
1359 | return; | ||
1360 | |||
1344 | if (unlikely(se == pse)) | 1361 | if (unlikely(se == pse)) |
1345 | return; | 1362 | return; |
1346 | 1363 | ||
1347 | cfs_rq_of(pse)->next = pse; | 1364 | /* |
1365 | * Only set the backward buddy when the current task is still on the | ||
1366 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
1367 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
1368 | * drop the rq lock. | ||
1369 | * | ||
1370 | * Also, during early boot the idle thread is in the fair class, for | ||
1371 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
1372 | */ | ||
1373 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
1374 | set_last_buddy(se); | ||
1375 | set_next_buddy(pse); | ||
1348 | 1376 | ||
1349 | /* | 1377 | /* |
1350 | * We can come here with TIF_NEED_RESCHED already set from new task | 1378 | * We can come here with TIF_NEED_RESCHED already set from new task |
@@ -1396,6 +1424,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1396 | 1424 | ||
1397 | do { | 1425 | do { |
1398 | se = pick_next_entity(cfs_rq); | 1426 | se = pick_next_entity(cfs_rq); |
1427 | set_next_entity(cfs_rq, se); | ||
1399 | cfs_rq = group_cfs_rq(se); | 1428 | cfs_rq = group_cfs_rq(se); |
1400 | } while (cfs_rq); | 1429 | } while (cfs_rq); |
1401 | 1430 | ||