aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-15 18:47:00 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:46 -0500
commit2069dd75c7d0f49355939e5586daf5a9ab216db7 (patch)
treec221747420e47b194a2a634024438a55420224d5 /kernel/sched_debug.c
parent48c5ccae88dcd989d9de507e8510313c6cbd352b (diff)
sched: Rewrite tg_shares_up)
By tracking a per-cpu load-avg for each cfs_rq and folding it into a global task_group load on each tick we can rework tg_shares_up to be strictly per-cpu. This should improve cpu-cgroup performance for smp systems significantly. [ Paul: changed to use queueing cfs_rq + bug fixes ] Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.580480400@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 2e1b0d17dd9b..e6590e7312e8 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -202,15 +202,22 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
202 spread0 = min_vruntime - rq0_min_vruntime; 202 spread0 = min_vruntime - rq0_min_vruntime;
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
204 SPLIT_NS(spread0)); 204 SPLIT_NS(spread0));
205 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
207
208 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 205 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
209 cfs_rq->nr_spread_over); 206 cfs_rq->nr_spread_over);
207 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
208 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
210#ifdef CONFIG_FAIR_GROUP_SCHED 209#ifdef CONFIG_FAIR_GROUP_SCHED
211#ifdef CONFIG_SMP 210#ifdef CONFIG_SMP
212 SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); 211 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
212 SPLIT_NS(cfs_rq->load_avg));
213 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
214 SPLIT_NS(cfs_rq->load_period));
215 SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
216 cfs_rq->load_contribution);
217 SEQ_printf(m, " .%-30s: %d\n", "load_tg",
218 atomic_read(&tg->load_weight));
213#endif 219#endif
220
214 print_cfs_group_stats(m, cpu, cfs_rq->tg); 221 print_cfs_group_stats(m, cpu, cfs_rq->tg);
215#endif 222#endif
216} 223}