diff options
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 133 |
1 files changed, 105 insertions, 28 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 6f79596e0ea9..75024a673520 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -61,14 +61,20 @@ static unsigned long nsec_low(unsigned long long nsec) | |||
61 | static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) | 61 | static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) |
62 | { | 62 | { |
63 | struct sched_entity *se = tg->se[cpu]; | 63 | struct sched_entity *se = tg->se[cpu]; |
64 | if (!se) | ||
65 | return; | ||
66 | 64 | ||
67 | #define P(F) \ | 65 | #define P(F) \ |
68 | SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) | 66 | SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) |
69 | #define PN(F) \ | 67 | #define PN(F) \ |
70 | SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) | 68 | SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) |
71 | 69 | ||
70 | if (!se) { | ||
71 | struct sched_avg *avg = &cpu_rq(cpu)->avg; | ||
72 | P(avg->runnable_avg_sum); | ||
73 | P(avg->runnable_avg_period); | ||
74 | return; | ||
75 | } | ||
76 | |||
77 | |||
72 | PN(se->exec_start); | 78 | PN(se->exec_start); |
73 | PN(se->vruntime); | 79 | PN(se->vruntime); |
74 | PN(se->sum_exec_runtime); | 80 | PN(se->sum_exec_runtime); |
@@ -85,6 +91,12 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group | |||
85 | P(se->statistics.wait_count); | 91 | P(se->statistics.wait_count); |
86 | #endif | 92 | #endif |
87 | P(se->load.weight); | 93 | P(se->load.weight); |
94 | #ifdef CONFIG_SMP | ||
95 | P(se->avg.runnable_avg_sum); | ||
96 | P(se->avg.runnable_avg_period); | ||
97 | P(se->avg.load_avg_contrib); | ||
98 | P(se->avg.decay_count); | ||
99 | #endif | ||
88 | #undef PN | 100 | #undef PN |
89 | #undef P | 101 | #undef P |
90 | } | 102 | } |
@@ -98,13 +110,6 @@ static char *task_group_path(struct task_group *tg) | |||
98 | if (autogroup_path(tg, group_path, PATH_MAX)) | 110 | if (autogroup_path(tg, group_path, PATH_MAX)) |
99 | return group_path; | 111 | return group_path; |
100 | 112 | ||
101 | /* | ||
102 | * May be NULL if the underlying cgroup isn't fully-created yet | ||
103 | */ | ||
104 | if (!tg->css.cgroup) { | ||
105 | group_path[0] = '\0'; | ||
106 | return group_path; | ||
107 | } | ||
108 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | 113 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); |
109 | return group_path; | 114 | return group_path; |
110 | } | 115 | } |
@@ -206,14 +211,18 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
206 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 211 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
207 | #ifdef CONFIG_FAIR_GROUP_SCHED | 212 | #ifdef CONFIG_FAIR_GROUP_SCHED |
208 | #ifdef CONFIG_SMP | 213 | #ifdef CONFIG_SMP |
209 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg", | 214 | SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg", |
210 | SPLIT_NS(cfs_rq->load_avg)); | 215 | cfs_rq->runnable_load_avg); |
211 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period", | 216 | SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", |
212 | SPLIT_NS(cfs_rq->load_period)); | 217 | cfs_rq->blocked_load_avg); |
213 | SEQ_printf(m, " .%-30s: %ld\n", "load_contrib", | 218 | SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", |
214 | cfs_rq->load_contribution); | 219 | (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); |
215 | SEQ_printf(m, " .%-30s: %d\n", "load_tg", | 220 | SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", |
216 | atomic_read(&cfs_rq->tg->load_weight)); | 221 | cfs_rq->tg_load_contrib); |
222 | SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", | ||
223 | cfs_rq->tg_runnable_contrib); | ||
224 | SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", | ||
225 | atomic_read(&cfs_rq->tg->runnable_avg)); | ||
217 | #endif | 226 | #endif |
218 | 227 | ||
219 | print_cfs_group_stats(m, cpu, cfs_rq->tg); | 228 | print_cfs_group_stats(m, cpu, cfs_rq->tg); |
@@ -253,11 +262,11 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
253 | { | 262 | { |
254 | unsigned int freq = cpu_khz ? : 1; | 263 | unsigned int freq = cpu_khz ? : 1; |
255 | 264 | ||
256 | SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n", | 265 | SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", |
257 | cpu, freq / 1000, (freq % 1000)); | 266 | cpu, freq / 1000, (freq % 1000)); |
258 | } | 267 | } |
259 | #else | 268 | #else |
260 | SEQ_printf(m, "\ncpu#%d\n", cpu); | 269 | SEQ_printf(m, "cpu#%d\n", cpu); |
261 | #endif | 270 | #endif |
262 | 271 | ||
263 | #define P(x) \ | 272 | #define P(x) \ |
@@ -314,6 +323,7 @@ do { \ | |||
314 | print_rq(m, rq, cpu); | 323 | print_rq(m, rq, cpu); |
315 | rcu_read_unlock(); | 324 | rcu_read_unlock(); |
316 | spin_unlock_irqrestore(&sched_debug_lock, flags); | 325 | spin_unlock_irqrestore(&sched_debug_lock, flags); |
326 | SEQ_printf(m, "\n"); | ||
317 | } | 327 | } |
318 | 328 | ||
319 | static const char *sched_tunable_scaling_names[] = { | 329 | static const char *sched_tunable_scaling_names[] = { |
@@ -322,11 +332,10 @@ static const char *sched_tunable_scaling_names[] = { | |||
322 | "linear" | 332 | "linear" |
323 | }; | 333 | }; |
324 | 334 | ||
325 | static int sched_debug_show(struct seq_file *m, void *v) | 335 | static void sched_debug_header(struct seq_file *m) |
326 | { | 336 | { |
327 | u64 ktime, sched_clk, cpu_clk; | 337 | u64 ktime, sched_clk, cpu_clk; |
328 | unsigned long flags; | 338 | unsigned long flags; |
329 | int cpu; | ||
330 | 339 | ||
331 | local_irq_save(flags); | 340 | local_irq_save(flags); |
332 | ktime = ktime_to_ns(ktime_get()); | 341 | ktime = ktime_to_ns(ktime_get()); |
@@ -368,33 +377,101 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
368 | #undef PN | 377 | #undef PN |
369 | #undef P | 378 | #undef P |
370 | 379 | ||
371 | SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling", | 380 | SEQ_printf(m, " .%-40s: %d (%s)\n", |
381 | "sysctl_sched_tunable_scaling", | ||
372 | sysctl_sched_tunable_scaling, | 382 | sysctl_sched_tunable_scaling, |
373 | sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); | 383 | sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); |
384 | SEQ_printf(m, "\n"); | ||
385 | } | ||
374 | 386 | ||
375 | for_each_online_cpu(cpu) | 387 | static int sched_debug_show(struct seq_file *m, void *v) |
376 | print_cpu(m, cpu); | 388 | { |
389 | int cpu = (unsigned long)(v - 2); | ||
377 | 390 | ||
378 | SEQ_printf(m, "\n"); | 391 | if (cpu != -1) |
392 | print_cpu(m, cpu); | ||
393 | else | ||
394 | sched_debug_header(m); | ||
379 | 395 | ||
380 | return 0; | 396 | return 0; |
381 | } | 397 | } |
382 | 398 | ||
383 | void sysrq_sched_debug_show(void) | 399 | void sysrq_sched_debug_show(void) |
384 | { | 400 | { |
385 | sched_debug_show(NULL, NULL); | 401 | int cpu; |
402 | |||
403 | sched_debug_header(NULL); | ||
404 | for_each_online_cpu(cpu) | ||
405 | print_cpu(NULL, cpu); | ||
406 | |||
407 | } | ||
408 | |||
409 | /* | ||
410 | * This itererator needs some explanation. | ||
411 | * It returns 1 for the header position. | ||
412 | * This means 2 is cpu 0. | ||
413 | * In a hotplugged system some cpus, including cpu 0, may be missing so we have | ||
414 | * to use cpumask_* to iterate over the cpus. | ||
415 | */ | ||
416 | static void *sched_debug_start(struct seq_file *file, loff_t *offset) | ||
417 | { | ||
418 | unsigned long n = *offset; | ||
419 | |||
420 | if (n == 0) | ||
421 | return (void *) 1; | ||
422 | |||
423 | n--; | ||
424 | |||
425 | if (n > 0) | ||
426 | n = cpumask_next(n - 1, cpu_online_mask); | ||
427 | else | ||
428 | n = cpumask_first(cpu_online_mask); | ||
429 | |||
430 | *offset = n + 1; | ||
431 | |||
432 | if (n < nr_cpu_ids) | ||
433 | return (void *)(unsigned long)(n + 2); | ||
434 | return NULL; | ||
435 | } | ||
436 | |||
437 | static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) | ||
438 | { | ||
439 | (*offset)++; | ||
440 | return sched_debug_start(file, offset); | ||
441 | } | ||
442 | |||
443 | static void sched_debug_stop(struct seq_file *file, void *data) | ||
444 | { | ||
445 | } | ||
446 | |||
447 | static const struct seq_operations sched_debug_sops = { | ||
448 | .start = sched_debug_start, | ||
449 | .next = sched_debug_next, | ||
450 | .stop = sched_debug_stop, | ||
451 | .show = sched_debug_show, | ||
452 | }; | ||
453 | |||
454 | static int sched_debug_release(struct inode *inode, struct file *file) | ||
455 | { | ||
456 | seq_release(inode, file); | ||
457 | |||
458 | return 0; | ||
386 | } | 459 | } |
387 | 460 | ||
388 | static int sched_debug_open(struct inode *inode, struct file *filp) | 461 | static int sched_debug_open(struct inode *inode, struct file *filp) |
389 | { | 462 | { |
390 | return single_open(filp, sched_debug_show, NULL); | 463 | int ret = 0; |
464 | |||
465 | ret = seq_open(filp, &sched_debug_sops); | ||
466 | |||
467 | return ret; | ||
391 | } | 468 | } |
392 | 469 | ||
393 | static const struct file_operations sched_debug_fops = { | 470 | static const struct file_operations sched_debug_fops = { |
394 | .open = sched_debug_open, | 471 | .open = sched_debug_open, |
395 | .read = seq_read, | 472 | .read = seq_read, |
396 | .llseek = seq_lseek, | 473 | .llseek = seq_lseek, |
397 | .release = single_release, | 474 | .release = sched_debug_release, |
398 | }; | 475 | }; |
399 | 476 | ||
400 | static int __init init_sched_debug_procfs(void) | 477 | static int __init init_sched_debug_procfs(void) |