diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9d003c9d2a48..31c4a2988b64 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -207,6 +207,9 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | if (cfs_rq->next == se) | ||
211 | cfs_rq->next = NULL; | ||
212 | |||
210 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | 213 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); |
211 | } | 214 | } |
212 | 215 | ||
@@ -626,12 +629,32 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
626 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | 629 | se->prev_sum_exec_runtime = se->sum_exec_runtime; |
627 | } | 630 | } |
628 | 631 | ||
632 | static struct sched_entity * | ||
633 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
634 | { | ||
635 | s64 diff, gran; | ||
636 | |||
637 | if (!cfs_rq->next) | ||
638 | return se; | ||
639 | |||
640 | diff = cfs_rq->next->vruntime - se->vruntime; | ||
641 | if (diff < 0) | ||
642 | return se; | ||
643 | |||
644 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load); | ||
645 | if (diff > gran) | ||
646 | return se; | ||
647 | |||
648 | return cfs_rq->next; | ||
649 | } | ||
650 | |||
629 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 651 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
630 | { | 652 | { |
631 | struct sched_entity *se = NULL; | 653 | struct sched_entity *se = NULL; |
632 | 654 | ||
633 | if (first_fair(cfs_rq)) { | 655 | if (first_fair(cfs_rq)) { |
634 | se = __pick_next_entity(cfs_rq); | 656 | se = __pick_next_entity(cfs_rq); |
657 | se = pick_next(cfs_rq, se); | ||
635 | set_next_entity(cfs_rq, se); | 658 | set_next_entity(cfs_rq, se); |
636 | } | 659 | } |
637 | 660 | ||
@@ -1070,6 +1093,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1070 | resched_task(curr); | 1093 | resched_task(curr); |
1071 | return; | 1094 | return; |
1072 | } | 1095 | } |
1096 | |||
1097 | cfs_rq_of(pse)->next = pse; | ||
1098 | |||
1073 | /* | 1099 | /* |
1074 | * Batch tasks do not preempt (their preemption is driven by | 1100 | * Batch tasks do not preempt (their preemption is driven by |
1075 | * the tick): | 1101 | * the tick): |