aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-06-04 05:33:10 -0400
committerRobert Richter <robert.richter@amd.com>2010-06-04 05:33:10 -0400
commitd8a382d2662822248a97ce9d670b90e68aefbd3a (patch)
tree4f5bbd5d0a5881ed42de611402ea4ac2c6d6ff48 /kernel/sched_debug.c
parent45c34e05c4e3d36e7c44e790241ea11a1d90d54e (diff)
parentc6df8d5ab87a246942d138321e1721edbb69f6e1 (diff)
Merge remote branch 'tip/perf/urgent' into oprofile/urgent
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c118
1 files changed, 40 insertions, 78 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 19be00ba6123..35565395d00d 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
70 PN(se->vruntime); 70 PN(se->vruntime);
71 PN(se->sum_exec_runtime); 71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS 72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start); 73 PN(se->statistics.wait_start);
74 PN(se->sleep_start); 74 PN(se->statistics.sleep_start);
75 PN(se->block_start); 75 PN(se->statistics.block_start);
76 PN(se->sleep_max); 76 PN(se->statistics.sleep_max);
77 PN(se->block_max); 77 PN(se->statistics.block_max);
78 PN(se->exec_max); 78 PN(se->statistics.exec_max);
79 PN(se->slice_max); 79 PN(se->statistics.slice_max);
80 PN(se->wait_max); 80 PN(se->statistics.wait_max);
81 PN(se->wait_sum); 81 PN(se->statistics.wait_sum);
82 P(se->wait_count); 82 P(se->statistics.wait_count);
83#endif 83#endif
84 P(se->load.weight); 84 P(se->load.weight);
85#undef PN 85#undef PN
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
105 SPLIT_NS(p->se.vruntime), 105 SPLIT_NS(p->se.vruntime),
106 SPLIT_NS(p->se.sum_exec_runtime), 106 SPLIT_NS(p->se.sum_exec_runtime),
107 SPLIT_NS(p->se.sum_sleep_runtime)); 107 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
108#else 108#else
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -175,11 +175,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
175 task_group_path(tg, path, sizeof(path)); 175 task_group_path(tg, path, sizeof(path));
176 176
177 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 177 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
178#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
179 {
180 uid_t uid = cfs_rq->tg->uid;
181 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
182 }
183#else 178#else
184 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 179 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
185#endif 180#endif
@@ -386,15 +381,9 @@ __initcall(init_sched_debug_procfs);
386void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 381void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
387{ 382{
388 unsigned long nr_switches; 383 unsigned long nr_switches;
389 unsigned long flags;
390 int num_threads = 1;
391
392 if (lock_task_sighand(p, &flags)) {
393 num_threads = atomic_read(&p->signal->count);
394 unlock_task_sighand(p, &flags);
395 }
396 384
397 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 385 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
386 get_nr_threads(p));
398 SEQ_printf(m, 387 SEQ_printf(m,
399 "---------------------------------------------------------\n"); 388 "---------------------------------------------------------\n");
400#define __P(F) \ 389#define __P(F) \
@@ -409,40 +398,38 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
409 PN(se.exec_start); 398 PN(se.exec_start);
410 PN(se.vruntime); 399 PN(se.vruntime);
411 PN(se.sum_exec_runtime); 400 PN(se.sum_exec_runtime);
412 PN(se.avg_overlap);
413 PN(se.avg_wakeup);
414 401
415 nr_switches = p->nvcsw + p->nivcsw; 402 nr_switches = p->nvcsw + p->nivcsw;
416 403
417#ifdef CONFIG_SCHEDSTATS 404#ifdef CONFIG_SCHEDSTATS
418 PN(se.wait_start); 405 PN(se.statistics.wait_start);
419 PN(se.sleep_start); 406 PN(se.statistics.sleep_start);
420 PN(se.block_start); 407 PN(se.statistics.block_start);
421 PN(se.sleep_max); 408 PN(se.statistics.sleep_max);
422 PN(se.block_max); 409 PN(se.statistics.block_max);
423 PN(se.exec_max); 410 PN(se.statistics.exec_max);
424 PN(se.slice_max); 411 PN(se.statistics.slice_max);
425 PN(se.wait_max); 412 PN(se.statistics.wait_max);
426 PN(se.wait_sum); 413 PN(se.statistics.wait_sum);
427 P(se.wait_count); 414 P(se.statistics.wait_count);
428 PN(se.iowait_sum); 415 PN(se.statistics.iowait_sum);
429 P(se.iowait_count); 416 P(se.statistics.iowait_count);
430 P(sched_info.bkl_count); 417 P(sched_info.bkl_count);
431 P(se.nr_migrations); 418 P(se.nr_migrations);
432 P(se.nr_migrations_cold); 419 P(se.statistics.nr_migrations_cold);
433 P(se.nr_failed_migrations_affine); 420 P(se.statistics.nr_failed_migrations_affine);
434 P(se.nr_failed_migrations_running); 421 P(se.statistics.nr_failed_migrations_running);
435 P(se.nr_failed_migrations_hot); 422 P(se.statistics.nr_failed_migrations_hot);
436 P(se.nr_forced_migrations); 423 P(se.statistics.nr_forced_migrations);
437 P(se.nr_wakeups); 424 P(se.statistics.nr_wakeups);
438 P(se.nr_wakeups_sync); 425 P(se.statistics.nr_wakeups_sync);
439 P(se.nr_wakeups_migrate); 426 P(se.statistics.nr_wakeups_migrate);
440 P(se.nr_wakeups_local); 427 P(se.statistics.nr_wakeups_local);
441 P(se.nr_wakeups_remote); 428 P(se.statistics.nr_wakeups_remote);
442 P(se.nr_wakeups_affine); 429 P(se.statistics.nr_wakeups_affine);
443 P(se.nr_wakeups_affine_attempts); 430 P(se.statistics.nr_wakeups_affine_attempts);
444 P(se.nr_wakeups_passive); 431 P(se.statistics.nr_wakeups_passive);
445 P(se.nr_wakeups_idle); 432 P(se.statistics.nr_wakeups_idle);
446 433
447 { 434 {
448 u64 avg_atom, avg_per_cpu; 435 u64 avg_atom, avg_per_cpu;
@@ -493,31 +480,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
493void proc_sched_set_task(struct task_struct *p) 480void proc_sched_set_task(struct task_struct *p)
494{ 481{
495#ifdef CONFIG_SCHEDSTATS 482#ifdef CONFIG_SCHEDSTATS
496 p->se.wait_max = 0; 483 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
497 p->se.wait_sum = 0;
498 p->se.wait_count = 0;
499 p->se.iowait_sum = 0;
500 p->se.iowait_count = 0;
501 p->se.sleep_max = 0;
502 p->se.sum_sleep_runtime = 0;
503 p->se.block_max = 0;
504 p->se.exec_max = 0;
505 p->se.slice_max = 0;
506 p->se.nr_migrations = 0;
507 p->se.nr_migrations_cold = 0;
508 p->se.nr_failed_migrations_affine = 0;
509 p->se.nr_failed_migrations_running = 0;
510 p->se.nr_failed_migrations_hot = 0;
511 p->se.nr_forced_migrations = 0;
512 p->se.nr_wakeups = 0;
513 p->se.nr_wakeups_sync = 0;
514 p->se.nr_wakeups_migrate = 0;
515 p->se.nr_wakeups_local = 0;
516 p->se.nr_wakeups_remote = 0;
517 p->se.nr_wakeups_affine = 0;
518 p->se.nr_wakeups_affine_attempts = 0;
519 p->se.nr_wakeups_passive = 0;
520 p->se.nr_wakeups_idle = 0;
521 p->sched_info.bkl_count = 0;
522#endif 484#endif
523} 485}