diff options
author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2007-10-15 11:00:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:08 -0400 |
commit | 83b699ed20f5218580a1b7042064082e2e05f8c5 (patch) | |
tree | 544c42283f41065f22e96e02af0badb7ed70a371 | |
parent | edcb60a309769a5f6e7c9e76d7c98b34d1757448 (diff) |
sched: revert recent removal of set_curr_task()
Revert removal of set_curr_task.
Use put_prev_task/set_curr_task when changing groups/policies
Signed-off-by: Srivatsa Vaddagiri < vatsa@linux.vnet.ibm.com>
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 34 | ||||
-rw-r--r-- | kernel/sched_fair.c | 68 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 5 | ||||
-rw-r--r-- | kernel/sched_rt.c | 8 |
5 files changed, 72 insertions, 44 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index abcb02738d95..66169005f008 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -871,6 +871,7 @@ struct sched_class { | |||
871 | struct sched_domain *sd, enum cpu_idle_type idle, | 871 | struct sched_domain *sd, enum cpu_idle_type idle, |
872 | int *all_pinned, int *this_best_prio); | 872 | int *all_pinned, int *this_best_prio); |
873 | 873 | ||
874 | void (*set_curr_task) (struct rq *rq); | ||
874 | void (*task_tick) (struct rq *rq, struct task_struct *p); | 875 | void (*task_tick) (struct rq *rq, struct task_struct *p); |
875 | void (*task_new) (struct rq *rq, struct task_struct *p); | 876 | void (*task_new) (struct rq *rq, struct task_struct *p); |
876 | }; | 877 | }; |
diff --git a/kernel/sched.c b/kernel/sched.c index 72c936d3e332..ee7ac71b12f8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3916,7 +3916,7 @@ EXPORT_SYMBOL(sleep_on_timeout); | |||
3916 | void rt_mutex_setprio(struct task_struct *p, int prio) | 3916 | void rt_mutex_setprio(struct task_struct *p, int prio) |
3917 | { | 3917 | { |
3918 | unsigned long flags; | 3918 | unsigned long flags; |
3919 | int oldprio, on_rq; | 3919 | int oldprio, on_rq, running; |
3920 | struct rq *rq; | 3920 | struct rq *rq; |
3921 | 3921 | ||
3922 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 3922 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
@@ -3926,8 +3926,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3926 | 3926 | ||
3927 | oldprio = p->prio; | 3927 | oldprio = p->prio; |
3928 | on_rq = p->se.on_rq; | 3928 | on_rq = p->se.on_rq; |
3929 | if (on_rq) | 3929 | running = task_running(rq, p); |
3930 | if (on_rq) { | ||
3930 | dequeue_task(rq, p, 0); | 3931 | dequeue_task(rq, p, 0); |
3932 | if (running) | ||
3933 | p->sched_class->put_prev_task(rq, p); | ||
3934 | } | ||
3931 | 3935 | ||
3932 | if (rt_prio(prio)) | 3936 | if (rt_prio(prio)) |
3933 | p->sched_class = &rt_sched_class; | 3937 | p->sched_class = &rt_sched_class; |
@@ -3937,13 +3941,15 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3937 | p->prio = prio; | 3941 | p->prio = prio; |
3938 | 3942 | ||
3939 | if (on_rq) { | 3943 | if (on_rq) { |
3944 | if (running) | ||
3945 | p->sched_class->set_curr_task(rq); | ||
3940 | enqueue_task(rq, p, 0); | 3946 | enqueue_task(rq, p, 0); |
3941 | /* | 3947 | /* |
3942 | * Reschedule if we are currently running on this runqueue and | 3948 | * Reschedule if we are currently running on this runqueue and |
3943 | * our priority decreased, or if we are not currently running on | 3949 | * our priority decreased, or if we are not currently running on |
3944 | * this runqueue and our priority is higher than the current's | 3950 | * this runqueue and our priority is higher than the current's |
3945 | */ | 3951 | */ |
3946 | if (task_running(rq, p)) { | 3952 | if (running) { |
3947 | if (p->prio > oldprio) | 3953 | if (p->prio > oldprio) |
3948 | resched_task(rq->curr); | 3954 | resched_task(rq->curr); |
3949 | } else { | 3955 | } else { |
@@ -4149,7 +4155,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
4149 | int sched_setscheduler(struct task_struct *p, int policy, | 4155 | int sched_setscheduler(struct task_struct *p, int policy, |
4150 | struct sched_param *param) | 4156 | struct sched_param *param) |
4151 | { | 4157 | { |
4152 | int retval, oldprio, oldpolicy = -1, on_rq; | 4158 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
4153 | unsigned long flags; | 4159 | unsigned long flags; |
4154 | struct rq *rq; | 4160 | struct rq *rq; |
4155 | 4161 | ||
@@ -4231,20 +4237,26 @@ recheck: | |||
4231 | } | 4237 | } |
4232 | update_rq_clock(rq); | 4238 | update_rq_clock(rq); |
4233 | on_rq = p->se.on_rq; | 4239 | on_rq = p->se.on_rq; |
4234 | if (on_rq) | 4240 | running = task_running(rq, p); |
4241 | if (on_rq) { | ||
4235 | deactivate_task(rq, p, 0); | 4242 | deactivate_task(rq, p, 0); |
4243 | if (running) | ||
4244 | p->sched_class->put_prev_task(rq, p); | ||
4245 | } | ||
4236 | 4246 | ||
4237 | oldprio = p->prio; | 4247 | oldprio = p->prio; |
4238 | __setscheduler(rq, p, policy, param->sched_priority); | 4248 | __setscheduler(rq, p, policy, param->sched_priority); |
4239 | 4249 | ||
4240 | if (on_rq) { | 4250 | if (on_rq) { |
4251 | if (running) | ||
4252 | p->sched_class->set_curr_task(rq); | ||
4241 | activate_task(rq, p, 0); | 4253 | activate_task(rq, p, 0); |
4242 | /* | 4254 | /* |
4243 | * Reschedule if we are currently running on this runqueue and | 4255 | * Reschedule if we are currently running on this runqueue and |
4244 | * our priority decreased, or if we are not currently running on | 4256 | * our priority decreased, or if we are not currently running on |
4245 | * this runqueue and our priority is higher than the current's | 4257 | * this runqueue and our priority is higher than the current's |
4246 | */ | 4258 | */ |
4247 | if (task_running(rq, p)) { | 4259 | if (running) { |
4248 | if (p->prio > oldprio) | 4260 | if (p->prio > oldprio) |
4249 | resched_task(rq->curr); | 4261 | resched_task(rq->curr); |
4250 | } else { | 4262 | } else { |
@@ -6845,13 +6857,19 @@ static void sched_move_task(struct container_subsys *ss, struct container *cont, | |||
6845 | running = task_running(rq, tsk); | 6857 | running = task_running(rq, tsk); |
6846 | on_rq = tsk->se.on_rq; | 6858 | on_rq = tsk->se.on_rq; |
6847 | 6859 | ||
6848 | if (on_rq) | 6860 | if (on_rq) { |
6849 | dequeue_task(rq, tsk, 0); | 6861 | dequeue_task(rq, tsk, 0); |
6862 | if (unlikely(running)) | ||
6863 | tsk->sched_class->put_prev_task(rq, tsk); | ||
6864 | } | ||
6850 | 6865 | ||
6851 | set_task_cfs_rq(tsk); | 6866 | set_task_cfs_rq(tsk); |
6852 | 6867 | ||
6853 | if (on_rq) | 6868 | if (on_rq) { |
6869 | if (unlikely(running)) | ||
6870 | tsk->sched_class->set_curr_task(rq); | ||
6854 | enqueue_task(rq, tsk, 0); | 6871 | enqueue_task(rq, tsk, 0); |
6872 | } | ||
6855 | 6873 | ||
6856 | done: | 6874 | done: |
6857 | task_rq_unlock(rq, &flags); | 6875 | task_rq_unlock(rq, &flags); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9f93a5c127e8..92563cd5af75 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -472,20 +472,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
472 | } | 472 | } |
473 | 473 | ||
474 | static void | 474 | static void |
475 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 475 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) |
476 | int wakeup, int set_curr) | ||
477 | { | 476 | { |
478 | /* | 477 | /* |
479 | * In case of the 'current'. | ||
480 | */ | ||
481 | if (unlikely(set_curr)) { | ||
482 | update_stats_curr_start(cfs_rq, se); | ||
483 | cfs_rq->curr = se; | ||
484 | account_entity_enqueue(cfs_rq, se); | ||
485 | return; | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Update the fair clock. | 478 | * Update the fair clock. |
490 | */ | 479 | */ |
491 | update_curr(cfs_rq); | 480 | update_curr(cfs_rq); |
@@ -496,7 +485,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
496 | } | 485 | } |
497 | 486 | ||
498 | update_stats_enqueue(cfs_rq, se); | 487 | update_stats_enqueue(cfs_rq, se); |
499 | __enqueue_entity(cfs_rq, se); | 488 | if (se != cfs_rq->curr) |
489 | __enqueue_entity(cfs_rq, se); | ||
500 | account_entity_enqueue(cfs_rq, se); | 490 | account_entity_enqueue(cfs_rq, se); |
501 | } | 491 | } |
502 | 492 | ||
@@ -516,12 +506,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
516 | } | 506 | } |
517 | } | 507 | } |
518 | #endif | 508 | #endif |
519 | if (likely(se != cfs_rq->curr)) | 509 | if (se != cfs_rq->curr) |
520 | __dequeue_entity(cfs_rq, se); | 510 | __dequeue_entity(cfs_rq, se); |
521 | else { | ||
522 | update_stats_curr_end(cfs_rq, se); | ||
523 | cfs_rq->curr = NULL; | ||
524 | } | ||
525 | account_entity_dequeue(cfs_rq, se); | 511 | account_entity_dequeue(cfs_rq, se); |
526 | } | 512 | } |
527 | 513 | ||
@@ -539,15 +525,20 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
539 | resched_task(rq_of(cfs_rq)->curr); | 525 | resched_task(rq_of(cfs_rq)->curr); |
540 | } | 526 | } |
541 | 527 | ||
542 | static inline void | 528 | static void |
543 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | 529 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
544 | { | 530 | { |
545 | /* | 531 | /* 'current' is not kept within the tree. */ |
546 | * Any task has to be enqueued before it get to execute on | 532 | if (se->on_rq) { |
547 | * a CPU. So account for the time it spent waiting on the | 533 | /* |
548 | * runqueue. | 534 | * Any task has to be enqueued before it get to execute on |
549 | */ | 535 | * a CPU. So account for the time it spent waiting on the |
550 | update_stats_wait_end(cfs_rq, se); | 536 | * runqueue. |
537 | */ | ||
538 | update_stats_wait_end(cfs_rq, se); | ||
539 | __dequeue_entity(cfs_rq, se); | ||
540 | } | ||
541 | |||
551 | update_stats_curr_start(cfs_rq, se); | 542 | update_stats_curr_start(cfs_rq, se); |
552 | cfs_rq->curr = se; | 543 | cfs_rq->curr = se; |
553 | #ifdef CONFIG_SCHEDSTATS | 544 | #ifdef CONFIG_SCHEDSTATS |
@@ -568,10 +559,6 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | |||
568 | { | 559 | { |
569 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 560 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
570 | 561 | ||
571 | /* 'current' is not kept within the tree. */ | ||
572 | if (se) | ||
573 | __dequeue_entity(cfs_rq, se); | ||
574 | |||
575 | set_next_entity(cfs_rq, se); | 562 | set_next_entity(cfs_rq, se); |
576 | 563 | ||
577 | return se; | 564 | return se; |
@@ -703,17 +690,12 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) | |||
703 | { | 690 | { |
704 | struct cfs_rq *cfs_rq; | 691 | struct cfs_rq *cfs_rq; |
705 | struct sched_entity *se = &p->se; | 692 | struct sched_entity *se = &p->se; |
706 | int set_curr = 0; | ||
707 | |||
708 | /* Are we enqueuing the current task? */ | ||
709 | if (unlikely(task_running(rq, p))) | ||
710 | set_curr = 1; | ||
711 | 693 | ||
712 | for_each_sched_entity(se) { | 694 | for_each_sched_entity(se) { |
713 | if (se->on_rq) | 695 | if (se->on_rq) |
714 | break; | 696 | break; |
715 | cfs_rq = cfs_rq_of(se); | 697 | cfs_rq = cfs_rq_of(se); |
716 | enqueue_entity(cfs_rq, se, wakeup, set_curr); | 698 | enqueue_entity(cfs_rq, se, wakeup); |
717 | } | 699 | } |
718 | } | 700 | } |
719 | 701 | ||
@@ -761,7 +743,7 @@ static void yield_task_fair(struct rq *rq) | |||
761 | * position within the tree: | 743 | * position within the tree: |
762 | */ | 744 | */ |
763 | dequeue_entity(cfs_rq, se, 0); | 745 | dequeue_entity(cfs_rq, se, 0); |
764 | enqueue_entity(cfs_rq, se, 0, 1); | 746 | enqueue_entity(cfs_rq, se, 0); |
765 | 747 | ||
766 | return; | 748 | return; |
767 | } | 749 | } |
@@ -1004,6 +986,19 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1004 | resched_task(rq->curr); | 986 | resched_task(rq->curr); |
1005 | } | 987 | } |
1006 | 988 | ||
989 | /* Account for a task changing its policy or group. | ||
990 | * | ||
991 | * This routine is mostly called to set cfs_rq->curr field when a task | ||
992 | * migrates between groups/classes. | ||
993 | */ | ||
994 | static void set_curr_task_fair(struct rq *rq) | ||
995 | { | ||
996 | struct sched_entity *se = &rq->curr->se; | ||
997 | |||
998 | for_each_sched_entity(se) | ||
999 | set_next_entity(cfs_rq_of(se), se); | ||
1000 | } | ||
1001 | |||
1007 | /* | 1002 | /* |
1008 | * All the scheduling class methods: | 1003 | * All the scheduling class methods: |
1009 | */ | 1004 | */ |
@@ -1019,6 +1014,7 @@ struct sched_class fair_sched_class __read_mostly = { | |||
1019 | 1014 | ||
1020 | .load_balance = load_balance_fair, | 1015 | .load_balance = load_balance_fair, |
1021 | 1016 | ||
1017 | .set_curr_task = set_curr_task_fair, | ||
1022 | .task_tick = task_tick_fair, | 1018 | .task_tick = task_tick_fair, |
1023 | .task_new = task_new_fair, | 1019 | .task_new = task_new_fair, |
1024 | }; | 1020 | }; |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 3503fb2d9f96..5ebf829cdd73 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -50,6 +50,10 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr) | |||
50 | { | 50 | { |
51 | } | 51 | } |
52 | 52 | ||
53 | static void set_curr_task_idle(struct rq *rq) | ||
54 | { | ||
55 | } | ||
56 | |||
53 | /* | 57 | /* |
54 | * Simple, special scheduling class for the per-CPU idle tasks: | 58 | * Simple, special scheduling class for the per-CPU idle tasks: |
55 | */ | 59 | */ |
@@ -66,6 +70,7 @@ static struct sched_class idle_sched_class __read_mostly = { | |||
66 | 70 | ||
67 | .load_balance = load_balance_idle, | 71 | .load_balance = load_balance_idle, |
68 | 72 | ||
73 | .set_curr_task = set_curr_task_idle, | ||
69 | .task_tick = task_tick_idle, | 74 | .task_tick = task_tick_idle, |
70 | /* no .task_new for idle tasks */ | 75 | /* no .task_new for idle tasks */ |
71 | }; | 76 | }; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3c77c03bdf1e..e1d5f1c8b532 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -218,6 +218,13 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) | |||
218 | } | 218 | } |
219 | } | 219 | } |
220 | 220 | ||
221 | static void set_curr_task_rt(struct rq *rq) | ||
222 | { | ||
223 | struct task_struct *p = rq->curr; | ||
224 | |||
225 | p->se.exec_start = rq->clock; | ||
226 | } | ||
227 | |||
221 | static struct sched_class rt_sched_class __read_mostly = { | 228 | static struct sched_class rt_sched_class __read_mostly = { |
222 | .enqueue_task = enqueue_task_rt, | 229 | .enqueue_task = enqueue_task_rt, |
223 | .dequeue_task = dequeue_task_rt, | 230 | .dequeue_task = dequeue_task_rt, |
@@ -230,5 +237,6 @@ static struct sched_class rt_sched_class __read_mostly = { | |||
230 | 237 | ||
231 | .load_balance = load_balance_rt, | 238 | .load_balance = load_balance_rt, |
232 | 239 | ||
240 | .set_curr_task = set_curr_task_rt, | ||
233 | .task_tick = task_tick_rt, | 241 | .task_tick = task_tick_rt, |
234 | }; | 242 | }; |