diff options
-rw-r--r-- | kernel/sched.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3f5d52949990..9ccd91e5b65b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -910,8 +910,6 @@ static int effective_prio(struct task_struct *p) | |||
910 | */ | 910 | */ |
911 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 911 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) |
912 | { | 912 | { |
913 | update_rq_clock(rq); | ||
914 | |||
915 | if (p->state == TASK_UNINTERRUPTIBLE) | 913 | if (p->state == TASK_UNINTERRUPTIBLE) |
916 | rq->nr_uninterruptible--; | 914 | rq->nr_uninterruptible--; |
917 | 915 | ||
@@ -1510,6 +1508,7 @@ out_set_cpu: | |||
1510 | 1508 | ||
1511 | out_activate: | 1509 | out_activate: |
1512 | #endif /* CONFIG_SMP */ | 1510 | #endif /* CONFIG_SMP */ |
1511 | update_rq_clock(rq); | ||
1513 | activate_task(rq, p, 1); | 1512 | activate_task(rq, p, 1); |
1514 | /* | 1513 | /* |
1515 | * Sync wakeups (i.e. those types of wakeups where the waker | 1514 | * Sync wakeups (i.e. those types of wakeups where the waker |
@@ -2117,6 +2116,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
2117 | update_rq_clock(src_rq); | 2116 | update_rq_clock(src_rq); |
2118 | deactivate_task(src_rq, p, 0); | 2117 | deactivate_task(src_rq, p, 0); |
2119 | set_task_cpu(p, this_cpu); | 2118 | set_task_cpu(p, this_cpu); |
2119 | __update_rq_clock(this_rq); | ||
2120 | activate_task(this_rq, p, 0); | 2120 | activate_task(this_rq, p, 0); |
2121 | /* | 2121 | /* |
2122 | * Note that idle threads have a prio of MAX_PRIO, for this test | 2122 | * Note that idle threads have a prio of MAX_PRIO, for this test |
@@ -4207,11 +4207,10 @@ recheck: | |||
4207 | spin_unlock_irqrestore(&p->pi_lock, flags); | 4207 | spin_unlock_irqrestore(&p->pi_lock, flags); |
4208 | goto recheck; | 4208 | goto recheck; |
4209 | } | 4209 | } |
4210 | update_rq_clock(rq); | ||
4210 | on_rq = p->se.on_rq; | 4211 | on_rq = p->se.on_rq; |
4211 | if (on_rq) { | 4212 | if (on_rq) |
4212 | update_rq_clock(rq); | ||
4213 | deactivate_task(rq, p, 0); | 4213 | deactivate_task(rq, p, 0); |
4214 | } | ||
4215 | oldprio = p->prio; | 4214 | oldprio = p->prio; |
4216 | __setscheduler(rq, p, policy, param->sched_priority); | 4215 | __setscheduler(rq, p, policy, param->sched_priority); |
4217 | if (on_rq) { | 4216 | if (on_rq) { |
@@ -4969,6 +4968,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4969 | } | 4968 | } |
4970 | set_task_cpu(p, dest_cpu); | 4969 | set_task_cpu(p, dest_cpu); |
4971 | if (on_rq) { | 4970 | if (on_rq) { |
4971 | update_rq_clock(rq_dest); | ||
4972 | activate_task(rq_dest, p, 0); | 4972 | activate_task(rq_dest, p, 0); |
4973 | check_preempt_curr(rq_dest, p); | 4973 | check_preempt_curr(rq_dest, p); |
4974 | } | 4974 | } |
@@ -6623,14 +6623,13 @@ void normalize_rt_tasks(void) | |||
6623 | goto out_unlock; | 6623 | goto out_unlock; |
6624 | #endif | 6624 | #endif |
6625 | 6625 | ||
6626 | update_rq_clock(rq); | ||
6626 | on_rq = p->se.on_rq; | 6627 | on_rq = p->se.on_rq; |
6627 | if (on_rq) { | 6628 | if (on_rq) |
6628 | update_rq_clock(task_rq(p)); | 6629 | deactivate_task(rq, p, 0); |
6629 | deactivate_task(task_rq(p), p, 0); | ||
6630 | } | ||
6631 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 6630 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
6632 | if (on_rq) { | 6631 | if (on_rq) { |
6633 | activate_task(task_rq(p), p, 0); | 6632 | activate_task(rq, p, 0); |
6634 | resched_task(rq->curr); | 6633 | resched_task(rq->curr); |
6635 | } | 6634 | } |
6636 | #ifdef CONFIG_SMP | 6635 | #ifdef CONFIG_SMP |