aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/sched_fair.c55
-rw-r--r--kernel/sched_idletask.c5
-rw-r--r--kernel/sched_rt.c5
5 files changed, 35 insertions, 67 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 66169005f008..abcb02738d95 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -871,7 +871,6 @@ struct sched_class {
871 struct sched_domain *sd, enum cpu_idle_type idle, 871 struct sched_domain *sd, enum cpu_idle_type idle,
872 int *all_pinned, int *this_best_prio); 872 int *all_pinned, int *this_best_prio);
873 873
874 void (*set_curr_task) (struct rq *rq);
875 void (*task_tick) (struct rq *rq, struct task_struct *p); 874 void (*task_tick) (struct rq *rq, struct task_struct *p);
876 void (*task_new) (struct rq *rq, struct task_struct *p); 875 void (*task_new) (struct rq *rq, struct task_struct *p);
877}; 876};
diff --git a/kernel/sched.c b/kernel/sched.c
index e1f784f4b4db..72c936d3e332 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3915,8 +3915,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
3915 */ 3915 */
3916void rt_mutex_setprio(struct task_struct *p, int prio) 3916void rt_mutex_setprio(struct task_struct *p, int prio)
3917{ 3917{
3918 int oldprio, on_rq, running;
3919 unsigned long flags; 3918 unsigned long flags;
3919 int oldprio, on_rq;
3920 struct rq *rq; 3920 struct rq *rq;
3921 3921
3922 BUG_ON(prio < 0 || prio > MAX_PRIO); 3922 BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3926,12 +3926,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3926 3926
3927 oldprio = p->prio; 3927 oldprio = p->prio;
3928 on_rq = p->se.on_rq; 3928 on_rq = p->se.on_rq;
3929 running = task_running(rq, p); 3929 if (on_rq)
3930 if (on_rq) {
3931 dequeue_task(rq, p, 0); 3930 dequeue_task(rq, p, 0);
3932 if (running)
3933 p->sched_class->put_prev_task(rq, p);
3934 }
3935 3931
3936 if (rt_prio(prio)) 3932 if (rt_prio(prio))
3937 p->sched_class = &rt_sched_class; 3933 p->sched_class = &rt_sched_class;
@@ -3941,15 +3937,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3941 p->prio = prio; 3937 p->prio = prio;
3942 3938
3943 if (on_rq) { 3939 if (on_rq) {
3944 if (running)
3945 p->sched_class->set_curr_task(rq);
3946 enqueue_task(rq, p, 0); 3940 enqueue_task(rq, p, 0);
3947 /* 3941 /*
3948 * Reschedule if we are currently running on this runqueue and 3942 * Reschedule if we are currently running on this runqueue and
3949 * our priority decreased, or if we are not currently running on 3943 * our priority decreased, or if we are not currently running on
3950 * this runqueue and our priority is higher than the current's 3944 * this runqueue and our priority is higher than the current's
3951 */ 3945 */
3952 if (running) { 3946 if (task_running(rq, p)) {
3953 if (p->prio > oldprio) 3947 if (p->prio > oldprio)
3954 resched_task(rq->curr); 3948 resched_task(rq->curr);
3955 } else { 3949 } else {
@@ -4155,7 +4149,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4155int sched_setscheduler(struct task_struct *p, int policy, 4149int sched_setscheduler(struct task_struct *p, int policy,
4156 struct sched_param *param) 4150 struct sched_param *param)
4157{ 4151{
4158 int retval, oldprio, oldpolicy = -1, on_rq, running; 4152 int retval, oldprio, oldpolicy = -1, on_rq;
4159 unsigned long flags; 4153 unsigned long flags;
4160 struct rq *rq; 4154 struct rq *rq;
4161 4155
@@ -4237,24 +4231,20 @@ recheck:
4237 } 4231 }
4238 update_rq_clock(rq); 4232 update_rq_clock(rq);
4239 on_rq = p->se.on_rq; 4233 on_rq = p->se.on_rq;
4240 running = task_running(rq, p); 4234 if (on_rq)
4241 if (on_rq) {
4242 deactivate_task(rq, p, 0); 4235 deactivate_task(rq, p, 0);
4243 if (running) 4236
4244 p->sched_class->put_prev_task(rq, p);
4245 }
4246 oldprio = p->prio; 4237 oldprio = p->prio;
4247 __setscheduler(rq, p, policy, param->sched_priority); 4238 __setscheduler(rq, p, policy, param->sched_priority);
4239
4248 if (on_rq) { 4240 if (on_rq) {
4249 if (running)
4250 p->sched_class->set_curr_task(rq);
4251 activate_task(rq, p, 0); 4241 activate_task(rq, p, 0);
4252 /* 4242 /*
4253 * Reschedule if we are currently running on this runqueue and 4243 * Reschedule if we are currently running on this runqueue and
4254 * our priority decreased, or if we are not currently running on 4244 * our priority decreased, or if we are not currently running on
4255 * this runqueue and our priority is higher than the current's 4245 * this runqueue and our priority is higher than the current's
4256 */ 4246 */
4257 if (running) { 4247 if (task_running(rq, p)) {
4258 if (p->prio > oldprio) 4248 if (p->prio > oldprio)
4259 resched_task(rq->curr); 4249 resched_task(rq->curr);
4260 } else { 4250 } else {
@@ -6855,19 +6845,13 @@ static void sched_move_task(struct container_subsys *ss, struct container *cont,
6855 running = task_running(rq, tsk); 6845 running = task_running(rq, tsk);
6856 on_rq = tsk->se.on_rq; 6846 on_rq = tsk->se.on_rq;
6857 6847
6858 if (on_rq) { 6848 if (on_rq)
6859 dequeue_task(rq, tsk, 0); 6849 dequeue_task(rq, tsk, 0);
6860 if (unlikely(running))
6861 tsk->sched_class->put_prev_task(rq, tsk);
6862 }
6863 6850
6864 set_task_cfs_rq(tsk); 6851 set_task_cfs_rq(tsk);
6865 6852
6866 if (on_rq) { 6853 if (on_rq)
6867 if (unlikely(running))
6868 tsk->sched_class->set_curr_task(rq);
6869 enqueue_task(rq, tsk, 0); 6854 enqueue_task(rq, tsk, 0);
6870 }
6871 6855
6872done: 6856done:
6873 task_rq_unlock(rq, &flags); 6857 task_rq_unlock(rq, &flags);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4dd256d46853..568e922255c6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -472,9 +472,20 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
472} 472}
473 473
474static void 474static void
475enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) 475enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
476 int wakeup, int set_curr)
476{ 477{
477 /* 478 /*
479 * In case of the 'current'.
480 */
481 if (unlikely(set_curr)) {
482 update_stats_curr_start(cfs_rq, se);
483 cfs_rq->curr = se;
484 account_entity_enqueue(cfs_rq, se);
485 return;
486 }
487
488 /*
478 * Update the fair clock. 489 * Update the fair clock.
479 */ 490 */
480 update_curr(cfs_rq); 491 update_curr(cfs_rq);
@@ -485,8 +496,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
485 } 496 }
486 497
487 update_stats_enqueue(cfs_rq, se); 498 update_stats_enqueue(cfs_rq, se);
488 if (se != cfs_rq->curr) 499 __enqueue_entity(cfs_rq, se);
489 __enqueue_entity(cfs_rq, se);
490 account_entity_enqueue(cfs_rq, se); 500 account_entity_enqueue(cfs_rq, se);
491} 501}
492 502
@@ -506,8 +516,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
506 } 516 }
507 } 517 }
508#endif 518#endif
509 if (se != cfs_rq->curr) 519 if (likely(se != cfs_rq->curr))
510 __dequeue_entity(cfs_rq, se); 520 __dequeue_entity(cfs_rq, se);
521 else {
522 update_stats_curr_end(cfs_rq, se);
523 cfs_rq->curr = NULL;
524 }
511 account_entity_dequeue(cfs_rq, se); 525 account_entity_dequeue(cfs_rq, se);
512} 526}
513 527
@@ -689,12 +703,17 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
689{ 703{
690 struct cfs_rq *cfs_rq; 704 struct cfs_rq *cfs_rq;
691 struct sched_entity *se = &p->se; 705 struct sched_entity *se = &p->se;
706 int set_curr = 0;
707
708 /* Are we enqueuing the current task? */
709 if (unlikely(task_running(rq, p)))
710 set_curr = 1;
692 711
693 for_each_sched_entity(se) { 712 for_each_sched_entity(se) {
694 if (se->on_rq) 713 if (se->on_rq)
695 break; 714 break;
696 cfs_rq = cfs_rq_of(se); 715 cfs_rq = cfs_rq_of(se);
697 enqueue_entity(cfs_rq, se, wakeup); 716 enqueue_entity(cfs_rq, se, wakeup, set_curr);
698 } 717 }
699} 718}
700 719
@@ -742,7 +761,7 @@ static void yield_task_fair(struct rq *rq)
742 * position within the tree: 761 * position within the tree:
743 */ 762 */
744 dequeue_entity(cfs_rq, se, 0); 763 dequeue_entity(cfs_rq, se, 0);
745 enqueue_entity(cfs_rq, se, 0); 764 enqueue_entity(cfs_rq, se, 0, 1);
746 765
747 return; 766 return;
748 } 767 }
@@ -985,29 +1004,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
985 resched_task(rq->curr); 1004 resched_task(rq->curr);
986} 1005}
987 1006
988#ifdef CONFIG_FAIR_GROUP_SCHED
989/* Account for a task changing its policy or group.
990 *
991 * This routine is mostly called to set cfs_rq->curr field when a task
992 * migrates between groups/classes.
993 */
994static void set_curr_task_fair(struct rq *rq)
995{
996 struct sched_entity *se = &rq->curr->se;
997
998 for_each_sched_entity(se)
999 set_next_entity(cfs_rq_of(se), se);
1000}
1001#else
1002static void set_curr_task_fair(struct rq *rq)
1003{
1004 struct sched_entity *se = &rq->curr->se;
1005 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1006
1007 cfs_rq->curr = se;
1008}
1009#endif
1010
1011/* 1007/*
1012 * All the scheduling class methods: 1008 * All the scheduling class methods:
1013 */ 1009 */
@@ -1023,7 +1019,6 @@ struct sched_class fair_sched_class __read_mostly = {
1023 1019
1024 .load_balance = load_balance_fair, 1020 .load_balance = load_balance_fair,
1025 1021
1026 .set_curr_task = set_curr_task_fair,
1027 .task_tick = task_tick_fair, 1022 .task_tick = task_tick_fair,
1028 .task_new = task_new_fair, 1023 .task_new = task_new_fair,
1029}; 1024};
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 5ebf829cdd73..3503fb2d9f96 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -50,10 +50,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
50{ 50{
51} 51}
52 52
53static void set_curr_task_idle(struct rq *rq)
54{
55}
56
57/* 53/*
58 * Simple, special scheduling class for the per-CPU idle tasks: 54 * Simple, special scheduling class for the per-CPU idle tasks:
59 */ 55 */
@@ -70,7 +66,6 @@ static struct sched_class idle_sched_class __read_mostly = {
70 66
71 .load_balance = load_balance_idle, 67 .load_balance = load_balance_idle,
72 68
73 .set_curr_task = set_curr_task_idle,
74 .task_tick = task_tick_idle, 69 .task_tick = task_tick_idle,
75 /* no .task_new for idle tasks */ 70 /* no .task_new for idle tasks */
76}; 71};
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b86944c20f9f..3c77c03bdf1e 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -218,10 +218,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
218 } 218 }
219} 219}
220 220
221static void set_curr_task_rt(struct rq *rq)
222{
223}
224
225static struct sched_class rt_sched_class __read_mostly = { 221static struct sched_class rt_sched_class __read_mostly = {
226 .enqueue_task = enqueue_task_rt, 222 .enqueue_task = enqueue_task_rt,
227 .dequeue_task = dequeue_task_rt, 223 .dequeue_task = dequeue_task_rt,
@@ -234,6 +230,5 @@ static struct sched_class rt_sched_class __read_mostly = {
234 230
235 .load_balance = load_balance_rt, 231 .load_balance = load_balance_rt,
236 232
237 .set_curr_task = set_curr_task_rt,
238 .task_tick = task_tick_rt, 233 .task_tick = task_tick_rt,
239}; 234};