aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8b718b59b09f..78fa75394011 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2057,14 +2057,14 @@ inline int task_curr(const struct task_struct *p)
2057 2057
2058static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2058static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2059 const struct sched_class *prev_class, 2059 const struct sched_class *prev_class,
2060 int oldprio, int running) 2060 int oldprio)
2061{ 2061{
2062 if (prev_class != p->sched_class) { 2062 if (prev_class != p->sched_class) {
2063 if (prev_class->switched_from) 2063 if (prev_class->switched_from)
2064 prev_class->switched_from(rq, p, running); 2064 prev_class->switched_from(rq, p);
2065 p->sched_class->switched_to(rq, p, running); 2065 p->sched_class->switched_to(rq, p);
2066 } else 2066 } else if (oldprio != p->prio)
2067 p->sched_class->prio_changed(rq, p, oldprio, running); 2067 p->sched_class->prio_changed(rq, p, oldprio);
2068} 2068}
2069 2069
2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2598,6 +2598,7 @@ static void __sched_fork(struct task_struct *p)
2598 p->se.sum_exec_runtime = 0; 2598 p->se.sum_exec_runtime = 0;
2599 p->se.prev_sum_exec_runtime = 0; 2599 p->se.prev_sum_exec_runtime = 0;
2600 p->se.nr_migrations = 0; 2600 p->se.nr_migrations = 0;
2601 p->se.vruntime = 0;
2601 2602
2602#ifdef CONFIG_SCHEDSTATS 2603#ifdef CONFIG_SCHEDSTATS
2603 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2604 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -4696,11 +4697,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4696 4697
4697 if (running) 4698 if (running)
4698 p->sched_class->set_curr_task(rq); 4699 p->sched_class->set_curr_task(rq);
4699 if (on_rq) { 4700 if (on_rq)
4700 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 4701 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4701 4702
4702 check_class_changed(rq, p, prev_class, oldprio, running); 4703 check_class_changed(rq, p, prev_class, oldprio);
4703 }
4704 task_rq_unlock(rq, &flags); 4704 task_rq_unlock(rq, &flags);
4705} 4705}
4706 4706
@@ -5028,11 +5028,10 @@ recheck:
5028 5028
5029 if (running) 5029 if (running)
5030 p->sched_class->set_curr_task(rq); 5030 p->sched_class->set_curr_task(rq);
5031 if (on_rq) { 5031 if (on_rq)
5032 activate_task(rq, p, 0); 5032 activate_task(rq, p, 0);
5033 5033
5034 check_class_changed(rq, p, prev_class, oldprio, running); 5034 check_class_changed(rq, p, prev_class, oldprio);
5035 }
5036 __task_rq_unlock(rq); 5035 __task_rq_unlock(rq);
5037 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5036 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5038 5037
@@ -8237,6 +8236,8 @@ EXPORT_SYMBOL(__might_sleep);
8237#ifdef CONFIG_MAGIC_SYSRQ 8236#ifdef CONFIG_MAGIC_SYSRQ
8238static void normalize_task(struct rq *rq, struct task_struct *p) 8237static void normalize_task(struct rq *rq, struct task_struct *p)
8239{ 8238{
8239 const struct sched_class *prev_class = p->sched_class;
8240 int old_prio = p->prio;
8240 int on_rq; 8241 int on_rq;
8241 8242
8242 on_rq = p->se.on_rq; 8243 on_rq = p->se.on_rq;
@@ -8247,6 +8248,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8247 activate_task(rq, p, 0); 8248 activate_task(rq, p, 0);
8248 resched_task(rq->curr); 8249 resched_task(rq->curr);
8249 } 8250 }
8251
8252 check_class_changed(rq, p, prev_class, old_prio);
8250} 8253}
8251 8254
8252void normalize_rt_tasks(void) 8255void normalize_rt_tasks(void)