aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/sched_debug.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index efb84409bc43..19be00ba6123 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
114 { 114 {
115 char path[64]; 115 char path[64];
116 116
117 rcu_read_lock();
117 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); 118 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
119 rcu_read_unlock();
118 SEQ_printf(m, " %s", path); 120 SEQ_printf(m, " %s", path);
119 } 121 }
120#endif 122#endif
@@ -184,7 +186,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 186 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
185 SPLIT_NS(cfs_rq->exec_clock)); 187 SPLIT_NS(cfs_rq->exec_clock));
186 188
187 spin_lock_irqsave(&rq->lock, flags); 189 raw_spin_lock_irqsave(&rq->lock, flags);
188 if (cfs_rq->rb_leftmost) 190 if (cfs_rq->rb_leftmost)
189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; 191 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
190 last = __pick_last_entity(cfs_rq); 192 last = __pick_last_entity(cfs_rq);
@@ -192,7 +194,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
192 max_vruntime = last->vruntime; 194 max_vruntime = last->vruntime;
193 min_vruntime = cfs_rq->min_vruntime; 195 min_vruntime = cfs_rq->min_vruntime;
194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 196 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195 spin_unlock_irqrestore(&rq->lock, flags); 197 raw_spin_unlock_irqrestore(&rq->lock, flags);
196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 SPLIT_NS(MIN_vruntime)); 199 SPLIT_NS(MIN_vruntime));
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
@@ -285,12 +287,16 @@ static void print_cpu(struct seq_file *m, int cpu)
285 287
286#ifdef CONFIG_SCHEDSTATS 288#ifdef CONFIG_SCHEDSTATS
287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 289#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
290#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
288 291
289 P(yld_count); 292 P(yld_count);
290 293
291 P(sched_switch); 294 P(sched_switch);
292 P(sched_count); 295 P(sched_count);
293 P(sched_goidle); 296 P(sched_goidle);
297#ifdef CONFIG_SMP
298 P64(avg_idle);
299#endif
294 300
295 P(ttwu_count); 301 P(ttwu_count);
296 P(ttwu_local); 302 P(ttwu_local);
@@ -305,6 +311,12 @@ static void print_cpu(struct seq_file *m, int cpu)
305 print_rq(m, rq, cpu); 311 print_rq(m, rq, cpu);
306} 312}
307 313
314static const char *sched_tunable_scaling_names[] = {
315 "none",
316 "logaritmic",
317 "linear"
318};
319
308static int sched_debug_show(struct seq_file *m, void *v) 320static int sched_debug_show(struct seq_file *m, void *v)
309{ 321{
310 u64 now = ktime_to_ns(ktime_get()); 322 u64 now = ktime_to_ns(ktime_get());
@@ -330,6 +342,10 @@ static int sched_debug_show(struct seq_file *m, void *v)
330#undef PN 342#undef PN
331#undef P 343#undef P
332 344
345 SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
346 sysctl_sched_tunable_scaling,
347 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
348
333 for_each_online_cpu(cpu) 349 for_each_online_cpu(cpu)
334 print_cpu(m, cpu); 350 print_cpu(m, cpu);
335 351
@@ -395,7 +411,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
395 PN(se.sum_exec_runtime); 411 PN(se.sum_exec_runtime);
396 PN(se.avg_overlap); 412 PN(se.avg_overlap);
397 PN(se.avg_wakeup); 413 PN(se.avg_wakeup);
398 PN(se.avg_running);
399 414
400 nr_switches = p->nvcsw + p->nivcsw; 415 nr_switches = p->nvcsw + p->nivcsw;
401 416
@@ -419,7 +434,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
419 P(se.nr_failed_migrations_running); 434 P(se.nr_failed_migrations_running);
420 P(se.nr_failed_migrations_hot); 435 P(se.nr_failed_migrations_hot);
421 P(se.nr_forced_migrations); 436 P(se.nr_forced_migrations);
422 P(se.nr_forced2_migrations);
423 P(se.nr_wakeups); 437 P(se.nr_wakeups);
424 P(se.nr_wakeups_sync); 438 P(se.nr_wakeups_sync);
425 P(se.nr_wakeups_migrate); 439 P(se.nr_wakeups_migrate);
@@ -495,7 +509,6 @@ void proc_sched_set_task(struct task_struct *p)
495 p->se.nr_failed_migrations_running = 0; 509 p->se.nr_failed_migrations_running = 0;
496 p->se.nr_failed_migrations_hot = 0; 510 p->se.nr_failed_migrations_hot = 0;
497 p->se.nr_forced_migrations = 0; 511 p->se.nr_forced_migrations = 0;
498 p->se.nr_forced2_migrations = 0;
499 p->se.nr_wakeups = 0; 512 p->se.nr_wakeups = 0;
500 p->se.nr_wakeups_sync = 0; 513 p->se.nr_wakeups_sync = 0;
501 p->se.nr_wakeups_migrate = 0; 514 p->se.nr_wakeups_migrate = 0;
@@ -507,8 +520,4 @@ void proc_sched_set_task(struct task_struct *p)
507 p->se.nr_wakeups_idle = 0; 520 p->se.nr_wakeups_idle = 0;
508 p->sched_info.bkl_count = 0; 521 p->sched_info.bkl_count = 0;
509#endif 522#endif
510 p->se.sum_exec_runtime = 0;
511 p->se.prev_sum_exec_runtime = 0;
512 p->nvcsw = 0;
513 p->nivcsw = 0;
514} 523}