diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-08-09 05:16:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-09 05:16:49 -0400 |
commit | 2e1cb74a501c4b1bca5e55dabff24f267349193c (patch) | |
tree | 76b0daeb19f11526bb4834c31667c6d663d26f49 /kernel | |
parent | 69be72c13db0e9165796422b544f989033146171 (diff) |
sched: remove the 'u64 now' parameter from deactivate_task()
remove the 'u64 now' parameter from deactivate_task().
( identity transformation that causes no change in functionality. )
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 05ce3f54e815..2dc5d2f7b392 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -942,8 +942,7 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | |||
942 | /* | 942 | /* |
943 | * deactivate_task - remove a task from the runqueue. | 943 | * deactivate_task - remove a task from the runqueue. |
944 | */ | 944 | */ |
945 | static void | 945 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) |
946 | deactivate_task(struct rq *rq, struct task_struct *p, int sleep, u64 now) | ||
947 | { | 946 | { |
948 | if (p->state == TASK_UNINTERRUPTIBLE) | 947 | if (p->state == TASK_UNINTERRUPTIBLE) |
949 | rq->nr_uninterruptible++; | 948 | rq->nr_uninterruptible++; |
@@ -2128,7 +2127,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
2128 | struct rq *this_rq, int this_cpu) | 2127 | struct rq *this_rq, int this_cpu) |
2129 | { | 2128 | { |
2130 | update_rq_clock(src_rq); | 2129 | update_rq_clock(src_rq); |
2131 | deactivate_task(src_rq, p, 0, src_rq->clock); | 2130 | deactivate_task(src_rq, p, 0); |
2132 | set_task_cpu(p, this_cpu); | 2131 | set_task_cpu(p, this_cpu); |
2133 | activate_task(this_rq, p, 0); | 2132 | activate_task(this_rq, p, 0); |
2134 | /* | 2133 | /* |
@@ -3458,7 +3457,7 @@ need_resched_nonpreemptible: | |||
3458 | unlikely(signal_pending(prev)))) { | 3457 | unlikely(signal_pending(prev)))) { |
3459 | prev->state = TASK_RUNNING; | 3458 | prev->state = TASK_RUNNING; |
3460 | } else { | 3459 | } else { |
3461 | deactivate_task(rq, prev, 1, now); | 3460 | deactivate_task(rq, prev, 1); |
3462 | } | 3461 | } |
3463 | switch_count = &prev->nvcsw; | 3462 | switch_count = &prev->nvcsw; |
3464 | } | 3463 | } |
@@ -4228,7 +4227,7 @@ recheck: | |||
4228 | on_rq = p->se.on_rq; | 4227 | on_rq = p->se.on_rq; |
4229 | if (on_rq) { | 4228 | if (on_rq) { |
4230 | update_rq_clock(rq); | 4229 | update_rq_clock(rq); |
4231 | deactivate_task(rq, p, 0, rq->clock); | 4230 | deactivate_task(rq, p, 0); |
4232 | } | 4231 | } |
4233 | oldprio = p->prio; | 4232 | oldprio = p->prio; |
4234 | __setscheduler(rq, p, policy, param->sched_priority); | 4233 | __setscheduler(rq, p, policy, param->sched_priority); |
@@ -4983,7 +4982,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4983 | on_rq = p->se.on_rq; | 4982 | on_rq = p->se.on_rq; |
4984 | if (on_rq) { | 4983 | if (on_rq) { |
4985 | update_rq_clock(rq_src); | 4984 | update_rq_clock(rq_src); |
4986 | deactivate_task(rq_src, p, 0, rq_src->clock); | 4985 | deactivate_task(rq_src, p, 0); |
4987 | } | 4986 | } |
4988 | set_task_cpu(p, dest_cpu); | 4987 | set_task_cpu(p, dest_cpu); |
4989 | if (on_rq) { | 4988 | if (on_rq) { |
@@ -5404,7 +5403,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5404 | /* Idle task back to normal (off runqueue, low prio) */ | 5403 | /* Idle task back to normal (off runqueue, low prio) */ |
5405 | rq = task_rq_lock(rq->idle, &flags); | 5404 | rq = task_rq_lock(rq->idle, &flags); |
5406 | update_rq_clock(rq); | 5405 | update_rq_clock(rq); |
5407 | deactivate_task(rq, rq->idle, 0, rq->clock); | 5406 | deactivate_task(rq, rq->idle, 0); |
5408 | rq->idle->static_prio = MAX_PRIO; | 5407 | rq->idle->static_prio = MAX_PRIO; |
5409 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5408 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
5410 | rq->idle->sched_class = &idle_sched_class; | 5409 | rq->idle->sched_class = &idle_sched_class; |
@@ -6644,7 +6643,7 @@ void normalize_rt_tasks(void) | |||
6644 | on_rq = p->se.on_rq; | 6643 | on_rq = p->se.on_rq; |
6645 | if (on_rq) { | 6644 | if (on_rq) { |
6646 | update_rq_clock(task_rq(p)); | 6645 | update_rq_clock(task_rq(p)); |
6647 | deactivate_task(task_rq(p), p, 0, task_rq(p)->clock); | 6646 | deactivate_task(task_rq(p), p, 0); |
6648 | } | 6647 | } |
6649 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 6648 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
6650 | if (on_rq) { | 6649 | if (on_rq) { |