aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-01-21 23:45:03 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-26 06:33:19 -0500
commitf07333bf6ee66d9b49286cec4371cf375e745b7a (patch)
tree03a1e9968eadf2465b6fc04fb8c1ab67327ea0f2 /kernel/sched.c
parent6d5ab2932a21ea54406ab95c43ecff90a3eddfda (diff)
sched: Avoid expensive initial update_cfs_load()
Since cfs->{load_stamp,load_last} are zero-initalized the initial load update will consider the delta to be 'since the beginning of time'. This results in a lot of pointless divisions to bring this large period to be within the sysctl_sched_shares_window. Fix this by initializing load_stamp to be 1 at cfs_rq initialization, this allows for an initial load_stamp > load_last which then lets standard idle truncation proceed. We avoid spinning (and slightly improve consistency) by fixing delta to be [period - 1] in this path resulting in a slightly more predictable shares ramp. (Previously the amount of idle time preserved by the overflow would range between [period/2,period-1].) Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110122044852.102126037@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e0fa3ff7f194..6820b5b3a969 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7796,6 +7796,8 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7796 INIT_LIST_HEAD(&cfs_rq->tasks); 7796 INIT_LIST_HEAD(&cfs_rq->tasks);
7797#ifdef CONFIG_FAIR_GROUP_SCHED 7797#ifdef CONFIG_FAIR_GROUP_SCHED
7798 cfs_rq->rq = rq; 7798 cfs_rq->rq = rq;
7799 /* allow initial update_cfs_load() to truncate */
7800 cfs_rq->load_stamp = 1;
7799#endif 7801#endif
7800 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 7802 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7801} 7803}