aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/sched_fair.c42
-rw-r--r--kernel/sched_idletask.c7
-rw-r--r--kernel/sched_rt.c19
-rw-r--r--kernel/sched_stoptask.c7
6 files changed, 69 insertions, 39 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index af6e15fbfb78..0542774914d4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1084,12 +1084,10 @@ struct sched_class {
1084 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1084 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1085 void (*task_fork) (struct task_struct *p); 1085 void (*task_fork) (struct task_struct *p);
1086 1086
1087 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1087 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1088 int running); 1088 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1089 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1090 int running);
1091 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1089 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1092 int oldprio, int running); 1090 int oldprio);
1093 1091
1094 unsigned int (*get_rr_interval) (struct rq *rq, 1092 unsigned int (*get_rr_interval) (struct rq *rq,
1095 struct task_struct *task); 1093 struct task_struct *task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8b718b59b09f..78fa75394011 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2057,14 +2057,14 @@ inline int task_curr(const struct task_struct *p)
2057 2057
2058static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2058static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2059 const struct sched_class *prev_class, 2059 const struct sched_class *prev_class,
2060 int oldprio, int running) 2060 int oldprio)
2061{ 2061{
2062 if (prev_class != p->sched_class) { 2062 if (prev_class != p->sched_class) {
2063 if (prev_class->switched_from) 2063 if (prev_class->switched_from)
2064 prev_class->switched_from(rq, p, running); 2064 prev_class->switched_from(rq, p);
2065 p->sched_class->switched_to(rq, p, running); 2065 p->sched_class->switched_to(rq, p);
2066 } else 2066 } else if (oldprio != p->prio)
2067 p->sched_class->prio_changed(rq, p, oldprio, running); 2067 p->sched_class->prio_changed(rq, p, oldprio);
2068} 2068}
2069 2069
2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2070static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2598,6 +2598,7 @@ static void __sched_fork(struct task_struct *p)
2598 p->se.sum_exec_runtime = 0; 2598 p->se.sum_exec_runtime = 0;
2599 p->se.prev_sum_exec_runtime = 0; 2599 p->se.prev_sum_exec_runtime = 0;
2600 p->se.nr_migrations = 0; 2600 p->se.nr_migrations = 0;
2601 p->se.vruntime = 0;
2601 2602
2602#ifdef CONFIG_SCHEDSTATS 2603#ifdef CONFIG_SCHEDSTATS
2603 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2604 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -4696,11 +4697,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4696 4697
4697 if (running) 4698 if (running)
4698 p->sched_class->set_curr_task(rq); 4699 p->sched_class->set_curr_task(rq);
4699 if (on_rq) { 4700 if (on_rq)
4700 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 4701 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4701 4702
4702 check_class_changed(rq, p, prev_class, oldprio, running); 4703 check_class_changed(rq, p, prev_class, oldprio);
4703 }
4704 task_rq_unlock(rq, &flags); 4704 task_rq_unlock(rq, &flags);
4705} 4705}
4706 4706
@@ -5028,11 +5028,10 @@ recheck:
5028 5028
5029 if (running) 5029 if (running)
5030 p->sched_class->set_curr_task(rq); 5030 p->sched_class->set_curr_task(rq);
5031 if (on_rq) { 5031 if (on_rq)
5032 activate_task(rq, p, 0); 5032 activate_task(rq, p, 0);
5033 5033
5034 check_class_changed(rq, p, prev_class, oldprio, running); 5034 check_class_changed(rq, p, prev_class, oldprio);
5035 }
5036 __task_rq_unlock(rq); 5035 __task_rq_unlock(rq);
5037 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5036 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5038 5037
@@ -8237,6 +8236,8 @@ EXPORT_SYMBOL(__might_sleep);
8237#ifdef CONFIG_MAGIC_SYSRQ 8236#ifdef CONFIG_MAGIC_SYSRQ
8238static void normalize_task(struct rq *rq, struct task_struct *p) 8237static void normalize_task(struct rq *rq, struct task_struct *p)
8239{ 8238{
8239 const struct sched_class *prev_class = p->sched_class;
8240 int old_prio = p->prio;
8240 int on_rq; 8241 int on_rq;
8241 8242
8242 on_rq = p->se.on_rq; 8243 on_rq = p->se.on_rq;
@@ -8247,6 +8248,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8247 activate_task(rq, p, 0); 8248 activate_task(rq, p, 0);
8248 resched_task(rq->curr); 8249 resched_task(rq->curr);
8249 } 8250 }
8251
8252 check_class_changed(rq, p, prev_class, old_prio);
8250} 8253}
8251 8254
8252void normalize_rt_tasks(void) 8255void normalize_rt_tasks(void)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4cbc9121094c..55040f3938d8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -4078,33 +4078,62 @@ static void task_fork_fair(struct task_struct *p)
4078 * Priority of the task has changed. Check to see if we preempt 4078 * Priority of the task has changed. Check to see if we preempt
4079 * the current task. 4079 * the current task.
4080 */ 4080 */
4081static void prio_changed_fair(struct rq *rq, struct task_struct *p, 4081static void
4082 int oldprio, int running) 4082prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
4083{ 4083{
4084 if (!p->se.on_rq)
4085 return;
4086
4084 /* 4087 /*
4085 * Reschedule if we are currently running on this runqueue and 4088 * Reschedule if we are currently running on this runqueue and
4086 * our priority decreased, or if we are not currently running on 4089 * our priority decreased, or if we are not currently running on
4087 * this runqueue and our priority is higher than the current's 4090 * this runqueue and our priority is higher than the current's
4088 */ 4091 */
4089 if (running) { 4092 if (rq->curr == p) {
4090 if (p->prio > oldprio) 4093 if (p->prio > oldprio)
4091 resched_task(rq->curr); 4094 resched_task(rq->curr);
4092 } else 4095 } else
4093 check_preempt_curr(rq, p, 0); 4096 check_preempt_curr(rq, p, 0);
4094} 4097}
4095 4098
4099static void switched_from_fair(struct rq *rq, struct task_struct *p)
4100{
4101 struct sched_entity *se = &p->se;
4102 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4103
4104 /*
4105 * Ensure the task's vruntime is normalized, so that when its
4106 * switched back to the fair class the enqueue_entity(.flags=0) will
4107 * do the right thing.
4108 *
4109 * If it was on_rq, then the dequeue_entity(.flags=0) will already
4110 * have normalized the vruntime, if it was !on_rq, then only when
4111 * the task is sleeping will it still have non-normalized vruntime.
4112 */
4113 if (!se->on_rq && p->state != TASK_RUNNING) {
4114 /*
4115 * Fix up our vruntime so that the current sleep doesn't
4116 * cause 'unlimited' sleep bonus.
4117 */
4118 place_entity(cfs_rq, se, 0);
4119 se->vruntime -= cfs_rq->min_vruntime;
4120 }
4121}
4122
4096/* 4123/*
4097 * We switched to the sched_fair class. 4124 * We switched to the sched_fair class.
4098 */ 4125 */
4099static void switched_to_fair(struct rq *rq, struct task_struct *p, 4126static void switched_to_fair(struct rq *rq, struct task_struct *p)
4100 int running)
4101{ 4127{
4128 if (!p->se.on_rq)
4129 return;
4130
4102 /* 4131 /*
4103 * We were most likely switched from sched_rt, so 4132 * We were most likely switched from sched_rt, so
4104 * kick off the schedule if running, otherwise just see 4133 * kick off the schedule if running, otherwise just see
4105 * if we can still preempt the current task. 4134 * if we can still preempt the current task.
4106 */ 4135 */
4107 if (running) 4136 if (rq->curr == p)
4108 resched_task(rq->curr); 4137 resched_task(rq->curr);
4109 else 4138 else
4110 check_preempt_curr(rq, p, 0); 4139 check_preempt_curr(rq, p, 0);
@@ -4190,6 +4219,7 @@ static const struct sched_class fair_sched_class = {
4190 .task_fork = task_fork_fair, 4219 .task_fork = task_fork_fair,
4191 4220
4192 .prio_changed = prio_changed_fair, 4221 .prio_changed = prio_changed_fair,
4222 .switched_from = switched_from_fair,
4193 .switched_to = switched_to_fair, 4223 .switched_to = switched_to_fair,
4194 4224
4195 .get_rr_interval = get_rr_interval_fair, 4225 .get_rr_interval = get_rr_interval_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 41eb62a0808b..c82f26c1b7c3 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -52,14 +52,13 @@ static void set_curr_task_idle(struct rq *rq)
52{ 52{
53} 53}
54 54
55static void 55static void switched_to_idle(struct rq *rq, struct task_struct *p)
56switched_to_idle(struct rq *rq, struct task_struct *p, int running)
57{ 56{
58 BUG(); 57 BUG();
59} 58}
60 59
61static void prio_changed_idle(struct rq *rq, struct task_struct *p, 60static void
62 int oldprio, int running) 61prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
63{ 62{
64 BUG(); 63 BUG();
65} 64}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec747ca6..c381fdc18c64 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1595,8 +1595,7 @@ static void rq_offline_rt(struct rq *rq)
1595 * When switch from the rt queue, we bring ourselves to a position 1595 * When switch from the rt queue, we bring ourselves to a position
1596 * that we might want to pull RT tasks from other runqueues. 1596 * that we might want to pull RT tasks from other runqueues.
1597 */ 1597 */
1598static void switched_from_rt(struct rq *rq, struct task_struct *p, 1598static void switched_from_rt(struct rq *rq, struct task_struct *p)
1599 int running)
1600{ 1599{
1601 /* 1600 /*
1602 * If there are other RT tasks then we will reschedule 1601 * If there are other RT tasks then we will reschedule
@@ -1605,7 +1604,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
1605 * we may need to handle the pulling of RT tasks 1604 * we may need to handle the pulling of RT tasks
1606 * now. 1605 * now.
1607 */ 1606 */
1608 if (!rq->rt.rt_nr_running) 1607 if (p->se.on_rq && !rq->rt.rt_nr_running)
1609 pull_rt_task(rq); 1608 pull_rt_task(rq);
1610} 1609}
1611 1610
@@ -1624,8 +1623,7 @@ static inline void init_sched_rt_class(void)
1624 * with RT tasks. In this case we try to push them off to 1623 * with RT tasks. In this case we try to push them off to
1625 * other runqueues. 1624 * other runqueues.
1626 */ 1625 */
1627static void switched_to_rt(struct rq *rq, struct task_struct *p, 1626static void switched_to_rt(struct rq *rq, struct task_struct *p)
1628 int running)
1629{ 1627{
1630 int check_resched = 1; 1628 int check_resched = 1;
1631 1629
@@ -1636,7 +1634,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
1636 * If that current running task is also an RT task 1634 * If that current running task is also an RT task
1637 * then see if we can move to another run queue. 1635 * then see if we can move to another run queue.
1638 */ 1636 */
1639 if (!running) { 1637 if (p->se.on_rq && rq->curr != p) {
1640#ifdef CONFIG_SMP 1638#ifdef CONFIG_SMP
1641 if (rq->rt.overloaded && push_rt_task(rq) && 1639 if (rq->rt.overloaded && push_rt_task(rq) &&
1642 /* Don't resched if we changed runqueues */ 1640 /* Don't resched if we changed runqueues */
@@ -1652,10 +1650,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
1652 * Priority of the task has changed. This may cause 1650 * Priority of the task has changed. This may cause
1653 * us to initiate a push or pull. 1651 * us to initiate a push or pull.
1654 */ 1652 */
1655static void prio_changed_rt(struct rq *rq, struct task_struct *p, 1653static void
1656 int oldprio, int running) 1654prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1657{ 1655{
1658 if (running) { 1656 if (!p->se.on_rq)
1657 return;
1658
1659 if (rq->curr == p) {
1659#ifdef CONFIG_SMP 1660#ifdef CONFIG_SMP
1660 /* 1661 /*
1661 * If our priority decreases while running, we 1662 * If our priority decreases while running, we
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 2bf6b47058c1..84ec9bcf82d9 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq)
59{ 59{
60} 60}
61 61
62static void switched_to_stop(struct rq *rq, struct task_struct *p, 62static void switched_to_stop(struct rq *rq, struct task_struct *p)
63 int running)
64{ 63{
65 BUG(); /* its impossible to change to this class */ 64 BUG(); /* its impossible to change to this class */
66} 65}
67 66
68static void prio_changed_stop(struct rq *rq, struct task_struct *p, 67static void
69 int oldprio, int running) 68prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
70{ 69{
71 BUG(); /* how!?, what priority? */ 70 BUG(); /* how!?, what priority? */
72} 71}