diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc37bb97159f..9e6ca0d88f51 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7494,7 +7494,7 @@ static void task_fork_fair(struct task_struct *p) | |||
7494 | static void | 7494 | static void |
7495 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | 7495 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) |
7496 | { | 7496 | { |
7497 | if (!p->on_rq) | 7497 | if (!task_on_rq_queued(p)) |
7498 | return; | 7498 | return; |
7499 | 7499 | ||
7500 | /* | 7500 | /* |
@@ -7519,11 +7519,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) | |||
7519 | * switched back to the fair class the enqueue_entity(.flags=0) will | 7519 | * switched back to the fair class the enqueue_entity(.flags=0) will |
7520 | * do the right thing. | 7520 | * do the right thing. |
7521 | * | 7521 | * |
7522 | * If it's on_rq, then the dequeue_entity(.flags=0) will already | 7522 | * If it's queued, then the dequeue_entity(.flags=0) will already |
7523 | * have normalized the vruntime, if it's !on_rq, then only when | 7523 | * have normalized the vruntime, if it's !queued, then only when |
7524 | * the task is sleeping will it still have non-normalized vruntime. | 7524 | * the task is sleeping will it still have non-normalized vruntime. |
7525 | */ | 7525 | */ |
7526 | if (!p->on_rq && p->state != TASK_RUNNING) { | 7526 | if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) { |
7527 | /* | 7527 | /* |
7528 | * Fix up our vruntime so that the current sleep doesn't | 7528 | * Fix up our vruntime so that the current sleep doesn't |
7529 | * cause 'unlimited' sleep bonus. | 7529 | * cause 'unlimited' sleep bonus. |
@@ -7558,7 +7558,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) | |||
7558 | */ | 7558 | */ |
7559 | se->depth = se->parent ? se->parent->depth + 1 : 0; | 7559 | se->depth = se->parent ? se->parent->depth + 1 : 0; |
7560 | #endif | 7560 | #endif |
7561 | if (!p->on_rq) | 7561 | if (!task_on_rq_queued(p)) |
7562 | return; | 7562 | return; |
7563 | 7563 | ||
7564 | /* | 7564 | /* |
@@ -7604,7 +7604,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) | |||
7604 | } | 7604 | } |
7605 | 7605 | ||
7606 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7606 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7607 | static void task_move_group_fair(struct task_struct *p, int on_rq) | 7607 | static void task_move_group_fair(struct task_struct *p, int queued) |
7608 | { | 7608 | { |
7609 | struct sched_entity *se = &p->se; | 7609 | struct sched_entity *se = &p->se; |
7610 | struct cfs_rq *cfs_rq; | 7610 | struct cfs_rq *cfs_rq; |
@@ -7623,7 +7623,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) | |||
7623 | * fair sleeper stuff for the first placement, but who cares. | 7623 | * fair sleeper stuff for the first placement, but who cares. |
7624 | */ | 7624 | */ |
7625 | /* | 7625 | /* |
7626 | * When !on_rq, vruntime of the task has usually NOT been normalized. | 7626 | * When !queued, vruntime of the task has usually NOT been normalized. |
7627 | * But there are some cases where it has already been normalized: | 7627 | * But there are some cases where it has already been normalized: |
7628 | * | 7628 | * |
7629 | * - Moving a forked child which is waiting for being woken up by | 7629 | * - Moving a forked child which is waiting for being woken up by |
@@ -7634,14 +7634,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq) | |||
7634 | * To prevent boost or penalty in the new cfs_rq caused by delta | 7634 | * To prevent boost or penalty in the new cfs_rq caused by delta |
7635 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. | 7635 | * min_vruntime between the two cfs_rqs, we skip vruntime adjustment. |
7636 | */ | 7636 | */ |
7637 | if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING)) | 7637 | if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING)) |
7638 | on_rq = 1; | 7638 | queued = 1; |
7639 | 7639 | ||
7640 | if (!on_rq) | 7640 | if (!queued) |
7641 | se->vruntime -= cfs_rq_of(se)->min_vruntime; | 7641 | se->vruntime -= cfs_rq_of(se)->min_vruntime; |
7642 | set_task_rq(p, task_cpu(p)); | 7642 | set_task_rq(p, task_cpu(p)); |
7643 | se->depth = se->parent ? se->parent->depth + 1 : 0; | 7643 | se->depth = se->parent ? se->parent->depth + 1 : 0; |
7644 | if (!on_rq) { | 7644 | if (!queued) { |
7645 | cfs_rq = cfs_rq_of(se); | 7645 | cfs_rq = cfs_rq_of(se); |
7646 | se->vruntime += cfs_rq->min_vruntime; | 7646 | se->vruntime += cfs_rq->min_vruntime; |
7647 | #ifdef CONFIG_SMP | 7647 | #ifdef CONFIG_SMP |