aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7a5eb2620785..fd32b78c123c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2519,18 +2519,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2519 if (unlikely(task_running(rq, p))) 2519 if (unlikely(task_running(rq, p)))
2520 goto out_activate; 2520 goto out_activate;
2521 2521
2522 /* 2522 p->sched_contributes_to_load = !!task_contributes_to_load(p);
2523 * In order to handle concurrent wakeups and release the rq->lock
2524 * we put the task in TASK_WAKING state.
2525 *
2526 * First fix up the nr_uninterruptible count:
2527 */
2528 if (task_contributes_to_load(p)) {
2529 if (likely(cpu_online(orig_cpu)))
2530 rq->nr_uninterruptible--;
2531 else
2532 this_rq()->nr_uninterruptible--;
2533 }
2534 p->state = TASK_WAKING; 2523 p->state = TASK_WAKING;
2535 2524
2536 if (p->sched_class->task_waking) { 2525 if (p->sched_class->task_waking) {
@@ -2555,6 +2544,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2555 WARN_ON(task_cpu(p) != cpu); 2544 WARN_ON(task_cpu(p) != cpu);
2556 WARN_ON(p->state != TASK_WAKING); 2545 WARN_ON(p->state != TASK_WAKING);
2557 2546
2547 if (p->sched_contributes_to_load)
2548 rq->nr_uninterruptible--;
2549
2558out_activate: 2550out_activate:
2559#endif /* CONFIG_SMP */ 2551#endif /* CONFIG_SMP */
2560 ttwu_activate(rq, p, en_flags); 2552 ttwu_activate(rq, p, en_flags);