aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c42
-rw-r--r--kernel/sched_fair.c39
-rw-r--r--kernel/sched_idletask.c31
-rw-r--r--kernel/sched_rt.c89
4 files changed, 179 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2368a0d882e3..5834c7fb79a5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1152,6 +1152,18 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1152#endif 1152#endif
1153} 1153}
1154 1154
1155static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1156 const struct sched_class *prev_class,
1157 int oldprio, int running)
1158{
1159 if (prev_class != p->sched_class) {
1160 if (prev_class->switched_from)
1161 prev_class->switched_from(rq, p, running);
1162 p->sched_class->switched_to(rq, p, running);
1163 } else
1164 p->sched_class->prio_changed(rq, p, oldprio, running);
1165}
1166
1155#ifdef CONFIG_SMP 1167#ifdef CONFIG_SMP
1156 1168
1157/* 1169/*
@@ -4017,6 +4029,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4017 unsigned long flags; 4029 unsigned long flags;
4018 int oldprio, on_rq, running; 4030 int oldprio, on_rq, running;
4019 struct rq *rq; 4031 struct rq *rq;
4032 const struct sched_class *prev_class = p->sched_class;
4020 4033
4021 BUG_ON(prio < 0 || prio > MAX_PRIO); 4034 BUG_ON(prio < 0 || prio > MAX_PRIO);
4022 4035
@@ -4042,18 +4055,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4042 if (on_rq) { 4055 if (on_rq) {
4043 if (running) 4056 if (running)
4044 p->sched_class->set_curr_task(rq); 4057 p->sched_class->set_curr_task(rq);
4058
4045 enqueue_task(rq, p, 0); 4059 enqueue_task(rq, p, 0);
4046 /* 4060
4047 * Reschedule if we are currently running on this runqueue and 4061 check_class_changed(rq, p, prev_class, oldprio, running);
4048 * our priority decreased, or if we are not currently running on
4049 * this runqueue and our priority is higher than the current's
4050 */
4051 if (running) {
4052 if (p->prio > oldprio)
4053 resched_task(rq->curr);
4054 } else {
4055 check_preempt_curr(rq, p);
4056 }
4057 } 4062 }
4058 task_rq_unlock(rq, &flags); 4063 task_rq_unlock(rq, &flags);
4059} 4064}
@@ -4253,6 +4258,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
4253{ 4258{
4254 int retval, oldprio, oldpolicy = -1, on_rq, running; 4259 int retval, oldprio, oldpolicy = -1, on_rq, running;
4255 unsigned long flags; 4260 unsigned long flags;
4261 const struct sched_class *prev_class = p->sched_class;
4256 struct rq *rq; 4262 struct rq *rq;
4257 4263
4258 /* may grab non-irq protected spin_locks */ 4264 /* may grab non-irq protected spin_locks */
@@ -4346,18 +4352,10 @@ recheck:
4346 if (on_rq) { 4352 if (on_rq) {
4347 if (running) 4353 if (running)
4348 p->sched_class->set_curr_task(rq); 4354 p->sched_class->set_curr_task(rq);
4355
4349 activate_task(rq, p, 0); 4356 activate_task(rq, p, 0);
4350 /* 4357
4351 * Reschedule if we are currently running on this runqueue and 4358 check_class_changed(rq, p, prev_class, oldprio, running);
4352 * our priority decreased, or if we are not currently running on
4353 * this runqueue and our priority is higher than the current's
4354 */
4355 if (running) {
4356 if (p->prio > oldprio)
4357 resched_task(rq->curr);
4358 } else {
4359 check_preempt_curr(rq, p);
4360 }
4361 } 4359 }
4362 __task_rq_unlock(rq); 4360 __task_rq_unlock(rq);
4363 spin_unlock_irqrestore(&p->pi_lock, flags); 4361 spin_unlock_irqrestore(&p->pi_lock, flags);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 10aa6e1ae3dd..dfa18d55561d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1280,6 +1280,42 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1280 resched_task(rq->curr); 1280 resched_task(rq->curr);
1281} 1281}
1282 1282
1283/*
1284 * Priority of the task has changed. Check to see if we preempt
1285 * the current task.
1286 */
1287static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1288 int oldprio, int running)
1289{
1290 /*
1291 * Reschedule if we are currently running on this runqueue and
1292 * our priority decreased, or if we are not currently running on
1293 * this runqueue and our priority is higher than the current's
1294 */
1295 if (running) {
1296 if (p->prio > oldprio)
1297 resched_task(rq->curr);
1298 } else
1299 check_preempt_curr(rq, p);
1300}
1301
1302/*
1303 * We switched to the sched_fair class.
1304 */
1305static void switched_to_fair(struct rq *rq, struct task_struct *p,
1306 int running)
1307{
1308 /*
1309 * We were most likely switched from sched_rt, so
1310 * kick off the schedule if running, otherwise just see
1311 * if we can still preempt the current task.
1312 */
1313 if (running)
1314 resched_task(rq->curr);
1315 else
1316 check_preempt_curr(rq, p);
1317}
1318
1283/* Account for a task changing its policy or group. 1319/* Account for a task changing its policy or group.
1284 * 1320 *
1285 * This routine is mostly called to set cfs_rq->curr field when a task 1321 * This routine is mostly called to set cfs_rq->curr field when a task
@@ -1318,6 +1354,9 @@ static const struct sched_class fair_sched_class = {
1318 .set_curr_task = set_curr_task_fair, 1354 .set_curr_task = set_curr_task_fair,
1319 .task_tick = task_tick_fair, 1355 .task_tick = task_tick_fair,
1320 .task_new = task_new_fair, 1356 .task_new = task_new_fair,
1357
1358 .prio_changed = prio_changed_fair,
1359 .switched_to = switched_to_fair,
1321}; 1360};
1322 1361
1323#ifdef CONFIG_SCHED_DEBUG 1362#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index ca5374860aef..ef7a2661fa10 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -69,6 +69,33 @@ static void set_curr_task_idle(struct rq *rq)
69{ 69{
70} 70}
71 71
72static void switched_to_idle(struct rq *rq, struct task_struct *p,
73 int running)
74{
75 /* Can this actually happen?? */
76 if (running)
77 resched_task(rq->curr);
78 else
79 check_preempt_curr(rq, p);
80}
81
82static void prio_changed_idle(struct rq *rq, struct task_struct *p,
83 int oldprio, int running)
84{
85 /* This can happen for hot plug CPUS */
86
87 /*
88 * Reschedule if we are currently running on this runqueue and
89 * our priority decreased, or if we are not currently running on
90 * this runqueue and our priority is higher than the current's
91 */
92 if (running) {
93 if (p->prio > oldprio)
94 resched_task(rq->curr);
95 } else
96 check_preempt_curr(rq, p);
97}
98
72/* 99/*
73 * Simple, special scheduling class for the per-CPU idle tasks: 100 * Simple, special scheduling class for the per-CPU idle tasks:
74 */ 101 */
@@ -94,5 +121,9 @@ const struct sched_class idle_sched_class = {
94 121
95 .set_curr_task = set_curr_task_idle, 122 .set_curr_task = set_curr_task_idle,
96 .task_tick = task_tick_idle, 123 .task_tick = task_tick_idle,
124
125 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle,
127
97 /* no .task_new for idle tasks */ 128 /* no .task_new for idle tasks */
98}; 129};
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a5a45104603a..57fa3d96847b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -779,7 +779,92 @@ static void leave_domain_rt(struct rq *rq)
779 if (rq->rt.overloaded) 779 if (rq->rt.overloaded)
780 rt_clear_overload(rq); 780 rt_clear_overload(rq);
781} 781}
782
783/*
784 * When switch from the rt queue, we bring ourselves to a position
785 * that we might want to pull RT tasks from other runqueues.
786 */
787static void switched_from_rt(struct rq *rq, struct task_struct *p,
788 int running)
789{
790 /*
791 * If there are other RT tasks then we will reschedule
792 * and the scheduling of the other RT tasks will handle
793 * the balancing. But if we are the last RT task
794 * we may need to handle the pulling of RT tasks
795 * now.
796 */
797 if (!rq->rt.rt_nr_running)
798 pull_rt_task(rq);
799}
800#endif /* CONFIG_SMP */
801
802/*
803 * When switching a task to RT, we may overload the runqueue
804 * with RT tasks. In this case we try to push them off to
805 * other runqueues.
806 */
807static void switched_to_rt(struct rq *rq, struct task_struct *p,
808 int running)
809{
810 int check_resched = 1;
811
812 /*
813 * If we are already running, then there's nothing
814 * that needs to be done. But if we are not running
815 * we may need to preempt the current running task.
816 * If that current running task is also an RT task
817 * then see if we can move to another run queue.
818 */
819 if (!running) {
820#ifdef CONFIG_SMP
821 if (rq->rt.overloaded && push_rt_task(rq) &&
822 /* Don't resched if we changed runqueues */
823 rq != task_rq(p))
824 check_resched = 0;
825#endif /* CONFIG_SMP */
826 if (check_resched && p->prio < rq->curr->prio)
827 resched_task(rq->curr);
828 }
829}
830
831/*
832 * Priority of the task has changed. This may cause
833 * us to initiate a push or pull.
834 */
835static void prio_changed_rt(struct rq *rq, struct task_struct *p,
836 int oldprio, int running)
837{
838 if (running) {
839#ifdef CONFIG_SMP
840 /*
841 * If our priority decreases while running, we
842 * may need to pull tasks to this runqueue.
843 */
844 if (oldprio < p->prio)
845 pull_rt_task(rq);
846 /*
847 * If there's a higher priority task waiting to run
848 * then reschedule.
849 */
850 if (p->prio > rq->rt.highest_prio)
851 resched_task(p);
852#else
853 /* For UP simply resched on drop of prio */
854 if (oldprio < p->prio)
855 resched_task(p);
782#endif /* CONFIG_SMP */ 856#endif /* CONFIG_SMP */
857 } else {
858 /*
859 * This task is not running, but if it is
860 * greater than the current running task
861 * then reschedule.
862 */
863 if (p->prio < rq->curr->prio)
864 resched_task(rq->curr);
865 }
866}
867
783 868
784static void task_tick_rt(struct rq *rq, struct task_struct *p) 869static void task_tick_rt(struct rq *rq, struct task_struct *p)
785{ 870{
@@ -837,8 +922,12 @@ const struct sched_class rt_sched_class = {
837 .pre_schedule = pre_schedule_rt, 922 .pre_schedule = pre_schedule_rt,
838 .post_schedule = post_schedule_rt, 923 .post_schedule = post_schedule_rt,
839 .task_wake_up = task_wake_up_rt, 924 .task_wake_up = task_wake_up_rt,
925 .switched_from = switched_from_rt,
840#endif 926#endif
841 927
842 .set_curr_task = set_curr_task_rt, 928 .set_curr_task = set_curr_task_rt,
843 .task_tick = task_tick_rt, 929 .task_tick = task_tick_rt,
930
931 .prio_changed = prio_changed_rt,
932 .switched_to = switched_to_rt,
844}; 933};