diff options
author | Paul Turner <pjt@google.com> | 2010-11-15 18:47:08 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-18 07:27:49 -0500 |
commit | 3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37 (patch) | |
tree | 6839bc473200dcb69c5de998921684ac569ce18b /kernel/sched.c | |
parent | c66eaf619c0c7937e9ded160ae83b5a7a6b19b56 (diff) |
sched: Implement demand based update_cfs_load()
When the system is busy, dilation of rq->next_balance makes lb->update_shares()
insufficiently frequent for threads which don't sleep (no dequeue/enqueue
updates). Adjust for this by making demand based updates based on the
accumulation of execution time sufficient to wrap our averaging window.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.291159744@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dadab4d13875..e914a716e1d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -353,9 +353,16 @@ struct cfs_rq { | |||
353 | */ | 353 | */ |
354 | unsigned long h_load; | 354 | unsigned long h_load; |
355 | 355 | ||
356 | /* | ||
357 | * Maintaining per-cpu shares distribution for group scheduling | ||
358 | * | ||
359 | * load_stamp is the last time we updated the load average | ||
360 | * load_last is the last time we updated the load average and saw load | ||
361 | * load_unacc_exec_time is currently unaccounted execution time | ||
362 | */ | ||
356 | u64 load_avg; | 363 | u64 load_avg; |
357 | u64 load_period; | 364 | u64 load_period; |
358 | u64 load_stamp, load_last; | 365 | u64 load_stamp, load_last, load_unacc_exec_time; |
359 | 366 | ||
360 | unsigned long load_contribution; | 367 | unsigned long load_contribution; |
361 | #endif | 368 | #endif |