diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-12-15 06:56:58 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-01-14 07:34:16 -0500 |
commit | bb04159df99fa353d0fb524574aca03ce2c6515b (patch) | |
tree | 1ea21466b45395836fb0fd6abb6398eb012b135b | |
parent | 1f8a7633094b7886c0677b78ba60b82e501f3ce6 (diff) |
sched/fair: Fix sched_entity::avg::decay_count initialization
Child has the same decay_count as parent. If it's not zero,
we add it to parent's cfs_rq->removed_load:
wake_up_new_task()->set_task_cpu()->migrate_task_rq_fair().
Child's load is a just garbade after copying of parent,
it hasn't been on cfs_rq yet, and it must not be added to
cfs_rq::removed_load in migrate_task_rq_fair().
The patch moves sched_entity::avg::decay_count intialization
in sched_fork(). So, migrate_task_rq_fair() does not change
removed_load.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ben Segall <bsegall@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1418644618.6074.13.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/core.c | 3 | ||||
-rw-r--r-- | kernel/sched/fair.c | 1 |
2 files changed, 3 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 816c17203c16..95ac795ab3d3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1832,6 +1832,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
1832 | p->se.prev_sum_exec_runtime = 0; | 1832 | p->se.prev_sum_exec_runtime = 0; |
1833 | p->se.nr_migrations = 0; | 1833 | p->se.nr_migrations = 0; |
1834 | p->se.vruntime = 0; | 1834 | p->se.vruntime = 0; |
1835 | #ifdef CONFIG_SMP | ||
1836 | p->se.avg.decay_count = 0; | ||
1837 | #endif | ||
1835 | INIT_LIST_HEAD(&p->se.group_node); | 1838 | INIT_LIST_HEAD(&p->se.group_node); |
1836 | 1839 | ||
1837 | #ifdef CONFIG_SCHEDSTATS | 1840 | #ifdef CONFIG_SCHEDSTATS |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 97000a99a293..2a0b302e51de 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p) | |||
676 | { | 676 | { |
677 | u32 slice; | 677 | u32 slice; |
678 | 678 | ||
679 | p->se.avg.decay_count = 0; | ||
680 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; | 679 | slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; |
681 | p->se.avg.runnable_avg_sum = slice; | 680 | p->se.avg.runnable_avg_sum = slice; |
682 | p->se.avg.runnable_avg_period = slice; | 681 | p->se.avg.runnable_avg_period = slice; |