aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-15 18:47:00 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:46 -0500
commit2069dd75c7d0f49355939e5586daf5a9ab216db7 (patch)
treec221747420e47b194a2a634024438a55420224d5 /include/linux
parent48c5ccae88dcd989d9de507e8510313c6cbd352b (diff)
sched: Rewrite tg_shares_up)
By tracking a per-cpu load-avg for each cfs_rq and folding it into a global task_group load on each tick we can rework tg_shares_up to be strictly per-cpu. This should improve cpu-cgroup performance for smp systems significantly. [ Paul: changed to use queueing cfs_rq + bug fixes ] Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.580480400@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h2
1 files changed, 0 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 29d953abb5ad..8abb8aa59664 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1885,8 +1885,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1885extern unsigned int sysctl_sched_latency; 1885extern unsigned int sysctl_sched_latency;
1886extern unsigned int sysctl_sched_min_granularity; 1886extern unsigned int sysctl_sched_min_granularity;
1887extern unsigned int sysctl_sched_wakeup_granularity; 1887extern unsigned int sysctl_sched_wakeup_granularity;
1888extern unsigned int sysctl_sched_shares_ratelimit;
1889extern unsigned int sysctl_sched_shares_thresh;
1890extern unsigned int sysctl_sched_child_runs_first; 1888extern unsigned int sysctl_sched_child_runs_first;
1891 1889
1892enum sched_tunable_scaling { 1890enum sched_tunable_scaling {