aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKen Chen <kenchen@google.com>2007-10-18 15:32:56 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-18 15:32:56 -0400
commit480b9434c542ddf2833aaed3dabba71bc0b787b5 (patch)
tree78c2638ac583cc57165ee1393ebbbbbe367f46fb /kernel
parentcc4ea79588e688ea9b1161650979a194dd709169 (diff)
sched: reduce schedstat variable overhead a bit
schedstat is useful in investigating CPU scheduler behavior. Ideally, I think it is beneficial to have it on all the time. However, the cost of turning it on in production system is quite high, largely due to number of events it collects and also due to its large memory footprint. Most of the fields probably don't need to be full 64-bit on 64-bit arch. Rolling over 4 billion events will most like take a long time and user space tool can be made to accommodate that. I'm proposing kernel to cut back most of variable width on 64-bit system. (note, the following patch doesn't affect 32-bit system). Signed-off-by: Ken Chen <kenchen@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_stats.h8
3 files changed, 14 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b19cc5b79e26..b60f8a5ae2be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -332,22 +332,22 @@ struct rq {
332 struct sched_info rq_sched_info; 332 struct sched_info rq_sched_info;
333 333
334 /* sys_sched_yield() stats */ 334 /* sys_sched_yield() stats */
335 unsigned long yld_exp_empty; 335 unsigned int yld_exp_empty;
336 unsigned long yld_act_empty; 336 unsigned int yld_act_empty;
337 unsigned long yld_both_empty; 337 unsigned int yld_both_empty;
338 unsigned long yld_count; 338 unsigned int yld_count;
339 339
340 /* schedule() stats */ 340 /* schedule() stats */
341 unsigned long sched_switch; 341 unsigned int sched_switch;
342 unsigned long sched_count; 342 unsigned int sched_count;
343 unsigned long sched_goidle; 343 unsigned int sched_goidle;
344 344
345 /* try_to_wake_up() stats */ 345 /* try_to_wake_up() stats */
346 unsigned long ttwu_count; 346 unsigned int ttwu_count;
347 unsigned long ttwu_local; 347 unsigned int ttwu_local;
348 348
349 /* BKL stats */ 349 /* BKL stats */
350 unsigned long bkl_count; 350 unsigned int bkl_count;
351#endif 351#endif
352 struct lock_class_key rq_lock_key; 352 struct lock_class_key rq_lock_key;
353}; 353};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index a5e517ec07c3..e6fb392e5164 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); 137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
139#ifdef CONFIG_SCHEDSTATS 139#ifdef CONFIG_SCHEDSTATS
140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", 140 SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
141 rq->bkl_count); 141 rq->bkl_count);
142#endif 142#endif
143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", 143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 1c084842c3e7..ef1a7df80ea2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
21 21
22 /* runqueue-specific stats */ 22 /* runqueue-specific stats */
23 seq_printf(seq, 23 seq_printf(seq,
24 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", 24 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
25 cpu, rq->yld_both_empty, 25 cpu, rq->yld_both_empty,
26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, 26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
27 rq->sched_switch, rq->sched_count, rq->sched_goidle, 27 rq->sched_switch, rq->sched_count, rq->sched_goidle,
@@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
42 seq_printf(seq, "domain%d %s", dcount++, mask_str); 42 seq_printf(seq, "domain%d %s", dcount++, mask_str);
43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
44 itype++) { 44 itype++) {
45 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " 45 seq_printf(seq, " %u %u %u %u %u %u %u %u",
46 "%lu",
47 sd->lb_count[itype], 46 sd->lb_count[itype],
48 sd->lb_balanced[itype], 47 sd->lb_balanced[itype],
49 sd->lb_failed[itype], 48 sd->lb_failed[itype],
@@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
53 sd->lb_nobusyq[itype], 52 sd->lb_nobusyq[itype],
54 sd->lb_nobusyg[itype]); 53 sd->lb_nobusyg[itype]);
55 } 54 }
56 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" 55 seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
57 " %lu %lu %lu\n",
58 sd->alb_count, sd->alb_failed, sd->alb_pushed, 56 sd->alb_count, sd->alb_failed, sd->alb_pushed,
59 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, 57 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
60 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, 58 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,