diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-11-15 18:47:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-18 07:27:46 -0500 |
commit | 2069dd75c7d0f49355939e5586daf5a9ab216db7 (patch) | |
tree | c221747420e47b194a2a634024438a55420224d5 /include | |
parent | 48c5ccae88dcd989d9de507e8510313c6cbd352b (diff) |
sched: Rewrite tg_shares_up)
By tracking a per-cpu load-avg for each cfs_rq and folding it into a
global task_group load on each tick we can rework tg_shares_up to be
strictly per-cpu.
This should improve cpu-cgroup performance for smp systems
significantly.
[ Paul: changed to use queueing cfs_rq + bug fixes ]
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234937.580480400@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/sched.h | 2 |
1 files changed, 0 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 29d953abb5ad..8abb8aa59664 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1885,8 +1885,6 @@ static inline void wake_up_idle_cpu(int cpu) { } | |||
1885 | extern unsigned int sysctl_sched_latency; | 1885 | extern unsigned int sysctl_sched_latency; |
1886 | extern unsigned int sysctl_sched_min_granularity; | 1886 | extern unsigned int sysctl_sched_min_granularity; |
1887 | extern unsigned int sysctl_sched_wakeup_granularity; | 1887 | extern unsigned int sysctl_sched_wakeup_granularity; |
1888 | extern unsigned int sysctl_sched_shares_ratelimit; | ||
1889 | extern unsigned int sysctl_sched_shares_thresh; | ||
1890 | extern unsigned int sysctl_sched_child_runs_first; | 1888 | extern unsigned int sysctl_sched_child_runs_first; |
1891 | 1889 | ||
1892 | enum sched_tunable_scaling { | 1890 | enum sched_tunable_scaling { |