aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df00cb09263e..e067df1fd01a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -723,9 +723,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
723 p->sched_class->dequeue_task(rq, p, flags); 723 p->sched_class->dequeue_task(rq, p, flags);
724} 724}
725 725
726/*
727 * activate_task - move a task to the runqueue.
728 */
729void activate_task(struct rq *rq, struct task_struct *p, int flags) 726void activate_task(struct rq *rq, struct task_struct *p, int flags)
730{ 727{
731 if (task_contributes_to_load(p)) 728 if (task_contributes_to_load(p))
@@ -734,9 +731,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
734 enqueue_task(rq, p, flags); 731 enqueue_task(rq, p, flags);
735} 732}
736 733
737/*
738 * deactivate_task - remove a task from the runqueue.
739 */
740void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 734void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
741{ 735{
742 if (task_contributes_to_load(p)) 736 if (task_contributes_to_load(p))
@@ -4134,7 +4128,7 @@ recheck:
4134 on_rq = p->on_rq; 4128 on_rq = p->on_rq;
4135 running = task_current(rq, p); 4129 running = task_current(rq, p);
4136 if (on_rq) 4130 if (on_rq)
4137 deactivate_task(rq, p, 0); 4131 dequeue_task(rq, p, 0);
4138 if (running) 4132 if (running)
4139 p->sched_class->put_prev_task(rq, p); 4133 p->sched_class->put_prev_task(rq, p);
4140 4134
@@ -4147,7 +4141,7 @@ recheck:
4147 if (running) 4141 if (running)
4148 p->sched_class->set_curr_task(rq); 4142 p->sched_class->set_curr_task(rq);
4149 if (on_rq) 4143 if (on_rq)
4150 activate_task(rq, p, 0); 4144 enqueue_task(rq, p, 0);
4151 4145
4152 check_class_changed(rq, p, prev_class, oldprio); 4146 check_class_changed(rq, p, prev_class, oldprio);
4153 task_rq_unlock(rq, p, &flags); 4147 task_rq_unlock(rq, p, &flags);
@@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4998 * placed properly. 4992 * placed properly.
4999 */ 4993 */
5000 if (p->on_rq) { 4994 if (p->on_rq) {
5001 deactivate_task(rq_src, p, 0); 4995 dequeue_task(rq_src, p, 0);
5002 set_task_cpu(p, dest_cpu); 4996 set_task_cpu(p, dest_cpu);
5003 activate_task(rq_dest, p, 0); 4997 enqueue_task(rq_dest, p, 0);
5004 check_preempt_curr(rq_dest, p, 0); 4998 check_preempt_curr(rq_dest, p, 0);
5005 } 4999 }
5006done: 5000done:
@@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7032 7026
7033 on_rq = p->on_rq; 7027 on_rq = p->on_rq;
7034 if (on_rq) 7028 if (on_rq)
7035 deactivate_task(rq, p, 0); 7029 dequeue_task(rq, p, 0);
7036 __setscheduler(rq, p, SCHED_NORMAL, 0); 7030 __setscheduler(rq, p, SCHED_NORMAL, 0);
7037 if (on_rq) { 7031 if (on_rq) {
7038 activate_task(rq, p, 0); 7032 enqueue_task(rq, p, 0);
7039 resched_task(rq->curr); 7033 resched_task(rq->curr);
7040 } 7034 }
7041 7035