aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_debug.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:27:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:27:54 -0400
commitb8ae30ee26d379db436b0b8c8c3ff1b52f69e5d1 (patch)
tree506aa0b4bdbf90f61e7e9261c7db90aa1452dcce /kernel/sched_debug.c
parent4d7b4ac22fbec1a03206c6cde353f2fd6942f828 (diff)
parent9c6f7e43b4e02c161b53e97ba913855246876c61 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (49 commits) stop_machine: Move local variable closer to the usage site in cpu_stop_cpu_callback() sched, wait: Use wrapper functions sched: Remove a stale comment ondemand: Make the iowait-is-busy time a sysfs tunable ondemand: Solve a big performance issue by counting IOWAIT time as busy sched: Intoduce get_cpu_iowait_time_us() sched: Eliminate the ts->idle_lastupdate field sched: Fold updating of the last_update_time_info into update_ts_time_stats() sched: Update the idle statistics in get_cpu_idle_time_us() sched: Introduce a function to update the idle statistics sched: Add a comment to get_cpu_idle_time_us() cpu_stop: add dummy implementation for UP sched: Remove rq argument to the tracepoints rcu: need barrier() in UP synchronize_sched_expedited() sched: correctly place paranioa memory barriers in synchronize_sched_expedited() sched: kill paranoia check in synchronize_sched_expedited() sched: replace migration_thread with cpu_stop stop_machine: reimplement using cpu_stop cpu_stop: implement stop_cpu[s]() sched: Fix select_idle_sibling() logic in select_task_rq_fair() ...
Diffstat (limited to 'kernel/sched_debug.c')
-rw-r--r--kernel/sched_debug.c108
1 files changed, 38 insertions, 70 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 19be00ba6123..87a330a7185f 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
70 PN(se->vruntime); 70 PN(se->vruntime);
71 PN(se->sum_exec_runtime); 71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS 72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start); 73 PN(se->statistics.wait_start);
74 PN(se->sleep_start); 74 PN(se->statistics.sleep_start);
75 PN(se->block_start); 75 PN(se->statistics.block_start);
76 PN(se->sleep_max); 76 PN(se->statistics.sleep_max);
77 PN(se->block_max); 77 PN(se->statistics.block_max);
78 PN(se->exec_max); 78 PN(se->statistics.exec_max);
79 PN(se->slice_max); 79 PN(se->statistics.slice_max);
80 PN(se->wait_max); 80 PN(se->statistics.wait_max);
81 PN(se->wait_sum); 81 PN(se->statistics.wait_sum);
82 P(se->wait_count); 82 P(se->statistics.wait_count);
83#endif 83#endif
84 P(se->load.weight); 84 P(se->load.weight);
85#undef PN 85#undef PN
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
105 SPLIT_NS(p->se.vruntime), 105 SPLIT_NS(p->se.vruntime),
106 SPLIT_NS(p->se.sum_exec_runtime), 106 SPLIT_NS(p->se.sum_exec_runtime),
107 SPLIT_NS(p->se.sum_sleep_runtime)); 107 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
108#else 108#else
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -175,11 +175,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
175 task_group_path(tg, path, sizeof(path)); 175 task_group_path(tg, path, sizeof(path));
176 176
177 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 177 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
178#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
179 {
180 uid_t uid = cfs_rq->tg->uid;
181 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
182 }
183#else 178#else
184 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 179 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
185#endif 180#endif
@@ -409,40 +404,38 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
409 PN(se.exec_start); 404 PN(se.exec_start);
410 PN(se.vruntime); 405 PN(se.vruntime);
411 PN(se.sum_exec_runtime); 406 PN(se.sum_exec_runtime);
412 PN(se.avg_overlap);
413 PN(se.avg_wakeup);
414 407
415 nr_switches = p->nvcsw + p->nivcsw; 408 nr_switches = p->nvcsw + p->nivcsw;
416 409
417#ifdef CONFIG_SCHEDSTATS 410#ifdef CONFIG_SCHEDSTATS
418 PN(se.wait_start); 411 PN(se.statistics.wait_start);
419 PN(se.sleep_start); 412 PN(se.statistics.sleep_start);
420 PN(se.block_start); 413 PN(se.statistics.block_start);
421 PN(se.sleep_max); 414 PN(se.statistics.sleep_max);
422 PN(se.block_max); 415 PN(se.statistics.block_max);
423 PN(se.exec_max); 416 PN(se.statistics.exec_max);
424 PN(se.slice_max); 417 PN(se.statistics.slice_max);
425 PN(se.wait_max); 418 PN(se.statistics.wait_max);
426 PN(se.wait_sum); 419 PN(se.statistics.wait_sum);
427 P(se.wait_count); 420 P(se.statistics.wait_count);
428 PN(se.iowait_sum); 421 PN(se.statistics.iowait_sum);
429 P(se.iowait_count); 422 P(se.statistics.iowait_count);
430 P(sched_info.bkl_count); 423 P(sched_info.bkl_count);
431 P(se.nr_migrations); 424 P(se.nr_migrations);
432 P(se.nr_migrations_cold); 425 P(se.statistics.nr_migrations_cold);
433 P(se.nr_failed_migrations_affine); 426 P(se.statistics.nr_failed_migrations_affine);
434 P(se.nr_failed_migrations_running); 427 P(se.statistics.nr_failed_migrations_running);
435 P(se.nr_failed_migrations_hot); 428 P(se.statistics.nr_failed_migrations_hot);
436 P(se.nr_forced_migrations); 429 P(se.statistics.nr_forced_migrations);
437 P(se.nr_wakeups); 430 P(se.statistics.nr_wakeups);
438 P(se.nr_wakeups_sync); 431 P(se.statistics.nr_wakeups_sync);
439 P(se.nr_wakeups_migrate); 432 P(se.statistics.nr_wakeups_migrate);
440 P(se.nr_wakeups_local); 433 P(se.statistics.nr_wakeups_local);
441 P(se.nr_wakeups_remote); 434 P(se.statistics.nr_wakeups_remote);
442 P(se.nr_wakeups_affine); 435 P(se.statistics.nr_wakeups_affine);
443 P(se.nr_wakeups_affine_attempts); 436 P(se.statistics.nr_wakeups_affine_attempts);
444 P(se.nr_wakeups_passive); 437 P(se.statistics.nr_wakeups_passive);
445 P(se.nr_wakeups_idle); 438 P(se.statistics.nr_wakeups_idle);
446 439
447 { 440 {
448 u64 avg_atom, avg_per_cpu; 441 u64 avg_atom, avg_per_cpu;
@@ -493,31 +486,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
493void proc_sched_set_task(struct task_struct *p) 486void proc_sched_set_task(struct task_struct *p)
494{ 487{
495#ifdef CONFIG_SCHEDSTATS 488#ifdef CONFIG_SCHEDSTATS
496 p->se.wait_max = 0; 489 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
497 p->se.wait_sum = 0;
498 p->se.wait_count = 0;
499 p->se.iowait_sum = 0;
500 p->se.iowait_count = 0;
501 p->se.sleep_max = 0;
502 p->se.sum_sleep_runtime = 0;
503 p->se.block_max = 0;
504 p->se.exec_max = 0;
505 p->se.slice_max = 0;
506 p->se.nr_migrations = 0;
507 p->se.nr_migrations_cold = 0;
508 p->se.nr_failed_migrations_affine = 0;
509 p->se.nr_failed_migrations_running = 0;
510 p->se.nr_failed_migrations_hot = 0;
511 p->se.nr_forced_migrations = 0;
512 p->se.nr_wakeups = 0;
513 p->se.nr_wakeups_sync = 0;
514 p->se.nr_wakeups_migrate = 0;
515 p->se.nr_wakeups_local = 0;
516 p->se.nr_wakeups_remote = 0;
517 p->se.nr_wakeups_affine = 0;
518 p->se.nr_wakeups_affine_attempts = 0;
519 p->se.nr_wakeups_passive = 0;
520 p->se.nr_wakeups_idle = 0;
521 p->sched_info.bkl_count = 0;
522#endif 490#endif
523} 491}