diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0112f63ad376..49f5b281c561 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -940,10 +940,9 @@ static inline void activate_idle_task(struct task_struct *p, struct rq *rq) | |||
940 | /* | 940 | /* |
941 | * deactivate_task - remove a task from the runqueue. | 941 | * deactivate_task - remove a task from the runqueue. |
942 | */ | 942 | */ |
943 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 943 | static void |
944 | deactivate_task(struct rq *rq, struct task_struct *p, int sleep, u64 now) | ||
944 | { | 945 | { |
945 | u64 now = rq_clock(rq); | ||
946 | |||
947 | if (p->state == TASK_UNINTERRUPTIBLE) | 946 | if (p->state == TASK_UNINTERRUPTIBLE) |
948 | rq->nr_uninterruptible++; | 947 | rq->nr_uninterruptible++; |
949 | 948 | ||
@@ -2122,7 +2121,7 @@ void sched_exec(void) | |||
2122 | static void pull_task(struct rq *src_rq, struct task_struct *p, | 2121 | static void pull_task(struct rq *src_rq, struct task_struct *p, |
2123 | struct rq *this_rq, int this_cpu) | 2122 | struct rq *this_rq, int this_cpu) |
2124 | { | 2123 | { |
2125 | deactivate_task(src_rq, p, 0); | 2124 | deactivate_task(src_rq, p, 0, rq_clock(src_rq)); |
2126 | set_task_cpu(p, this_cpu); | 2125 | set_task_cpu(p, this_cpu); |
2127 | activate_task(this_rq, p, 0); | 2126 | activate_task(this_rq, p, 0); |
2128 | /* | 2127 | /* |
@@ -3446,13 +3445,14 @@ need_resched_nonpreemptible: | |||
3446 | 3445 | ||
3447 | spin_lock_irq(&rq->lock); | 3446 | spin_lock_irq(&rq->lock); |
3448 | clear_tsk_need_resched(prev); | 3447 | clear_tsk_need_resched(prev); |
3448 | now = __rq_clock(rq); | ||
3449 | 3449 | ||
3450 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3450 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3451 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 3451 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && |
3452 | unlikely(signal_pending(prev)))) { | 3452 | unlikely(signal_pending(prev)))) { |
3453 | prev->state = TASK_RUNNING; | 3453 | prev->state = TASK_RUNNING; |
3454 | } else { | 3454 | } else { |
3455 | deactivate_task(rq, prev, 1); | 3455 | deactivate_task(rq, prev, 1, now); |
3456 | } | 3456 | } |
3457 | switch_count = &prev->nvcsw; | 3457 | switch_count = &prev->nvcsw; |
3458 | } | 3458 | } |
@@ -3460,7 +3460,6 @@ need_resched_nonpreemptible: | |||
3460 | if (unlikely(!rq->nr_running)) | 3460 | if (unlikely(!rq->nr_running)) |
3461 | idle_balance(cpu, rq); | 3461 | idle_balance(cpu, rq); |
3462 | 3462 | ||
3463 | now = __rq_clock(rq); | ||
3464 | prev->sched_class->put_prev_task(rq, prev, now); | 3463 | prev->sched_class->put_prev_task(rq, prev, now); |
3465 | next = pick_next_task(rq, prev, now); | 3464 | next = pick_next_task(rq, prev, now); |
3466 | 3465 | ||
@@ -4220,7 +4219,7 @@ recheck: | |||
4220 | } | 4219 | } |
4221 | on_rq = p->se.on_rq; | 4220 | on_rq = p->se.on_rq; |
4222 | if (on_rq) | 4221 | if (on_rq) |
4223 | deactivate_task(rq, p, 0); | 4222 | deactivate_task(rq, p, 0, rq_clock(rq)); |
4224 | oldprio = p->prio; | 4223 | oldprio = p->prio; |
4225 | __setscheduler(rq, p, policy, param->sched_priority); | 4224 | __setscheduler(rq, p, policy, param->sched_priority); |
4226 | if (on_rq) { | 4225 | if (on_rq) { |
@@ -4973,7 +4972,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
4973 | 4972 | ||
4974 | on_rq = p->se.on_rq; | 4973 | on_rq = p->se.on_rq; |
4975 | if (on_rq) | 4974 | if (on_rq) |
4976 | deactivate_task(rq_src, p, 0); | 4975 | deactivate_task(rq_src, p, 0, rq_clock(rq_src)); |
4977 | set_task_cpu(p, dest_cpu); | 4976 | set_task_cpu(p, dest_cpu); |
4978 | if (on_rq) { | 4977 | if (on_rq) { |
4979 | activate_task(rq_dest, p, 0); | 4978 | activate_task(rq_dest, p, 0); |
@@ -5387,7 +5386,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5387 | rq->migration_thread = NULL; | 5386 | rq->migration_thread = NULL; |
5388 | /* Idle task back to normal (off runqueue, low prio) */ | 5387 | /* Idle task back to normal (off runqueue, low prio) */ |
5389 | rq = task_rq_lock(rq->idle, &flags); | 5388 | rq = task_rq_lock(rq->idle, &flags); |
5390 | deactivate_task(rq, rq->idle, 0); | 5389 | deactivate_task(rq, rq->idle, 0, rq_clock(rq)); |
5391 | rq->idle->static_prio = MAX_PRIO; | 5390 | rq->idle->static_prio = MAX_PRIO; |
5392 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5391 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
5393 | rq->idle->sched_class = &idle_sched_class; | 5392 | rq->idle->sched_class = &idle_sched_class; |
@@ -6626,7 +6625,7 @@ void normalize_rt_tasks(void) | |||
6626 | 6625 | ||
6627 | on_rq = p->se.on_rq; | 6626 | on_rq = p->se.on_rq; |
6628 | if (on_rq) | 6627 | if (on_rq) |
6629 | deactivate_task(task_rq(p), p, 0); | 6628 | deactivate_task(task_rq(p), p, 0, rq_clock(task_rq(p))); |
6630 | __setscheduler(rq, p, SCHED_NORMAL, 0); | 6629 | __setscheduler(rq, p, SCHED_NORMAL, 0); |
6631 | if (on_rq) { | 6630 | if (on_rq) { |
6632 | activate_task(task_rq(p), p, 0); | 6631 | activate_task(task_rq(p), p, 0); |