aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4cbc9121094c..55040f3938d8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -4078,33 +4078,62 @@ static void task_fork_fair(struct task_struct *p)
4078 * Priority of the task has changed. Check to see if we preempt 4078 * Priority of the task has changed. Check to see if we preempt
4079 * the current task. 4079 * the current task.
4080 */ 4080 */
4081static void prio_changed_fair(struct rq *rq, struct task_struct *p, 4081static void
4082 int oldprio, int running) 4082prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
4083{ 4083{
4084 if (!p->se.on_rq)
4085 return;
4086
4084 /* 4087 /*
4085 * Reschedule if we are currently running on this runqueue and 4088 * Reschedule if we are currently running on this runqueue and
4086 * our priority decreased, or if we are not currently running on 4089 * our priority decreased, or if we are not currently running on
4087 * this runqueue and our priority is higher than the current's 4090 * this runqueue and our priority is higher than the current's
4088 */ 4091 */
4089 if (running) { 4092 if (rq->curr == p) {
4090 if (p->prio > oldprio) 4093 if (p->prio > oldprio)
4091 resched_task(rq->curr); 4094 resched_task(rq->curr);
4092 } else 4095 } else
4093 check_preempt_curr(rq, p, 0); 4096 check_preempt_curr(rq, p, 0);
4094} 4097}
4095 4098
4099static void switched_from_fair(struct rq *rq, struct task_struct *p)
4100{
4101 struct sched_entity *se = &p->se;
4102 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4103
4104 /*
4105 * Ensure the task's vruntime is normalized, so that when its
4106 * switched back to the fair class the enqueue_entity(.flags=0) will
4107 * do the right thing.
4108 *
4109 * If it was on_rq, then the dequeue_entity(.flags=0) will already
4110 * have normalized the vruntime, if it was !on_rq, then only when
4111 * the task is sleeping will it still have non-normalized vruntime.
4112 */
4113 if (!se->on_rq && p->state != TASK_RUNNING) {
4114 /*
4115 * Fix up our vruntime so that the current sleep doesn't
4116 * cause 'unlimited' sleep bonus.
4117 */
4118 place_entity(cfs_rq, se, 0);
4119 se->vruntime -= cfs_rq->min_vruntime;
4120 }
4121}
4122
4096/* 4123/*
4097 * We switched to the sched_fair class. 4124 * We switched to the sched_fair class.
4098 */ 4125 */
4099static void switched_to_fair(struct rq *rq, struct task_struct *p, 4126static void switched_to_fair(struct rq *rq, struct task_struct *p)
4100 int running)
4101{ 4127{
4128 if (!p->se.on_rq)
4129 return;
4130
4102 /* 4131 /*
4103 * We were most likely switched from sched_rt, so 4132 * We were most likely switched from sched_rt, so
4104 * kick off the schedule if running, otherwise just see 4133 * kick off the schedule if running, otherwise just see
4105 * if we can still preempt the current task. 4134 * if we can still preempt the current task.
4106 */ 4135 */
4107 if (running) 4136 if (rq->curr == p)
4108 resched_task(rq->curr); 4137 resched_task(rq->curr);
4109 else 4138 else
4110 check_preempt_curr(rq, p, 0); 4139 check_preempt_curr(rq, p, 0);
@@ -4190,6 +4219,7 @@ static const struct sched_class fair_sched_class = {
4190 .task_fork = task_fork_fair, 4219 .task_fork = task_fork_fair,
4191 4220
4192 .prio_changed = prio_changed_fair, 4221 .prio_changed = prio_changed_fair,
4222 .switched_from = switched_from_fair,
4193 .switched_to = switched_to_fair, 4223 .switched_to = switched_to_fair,
4194 4224
4195 .get_rr_interval = get_rr_interval_fair, 4225 .get_rr_interval = get_rr_interval_fair,