diff options
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r-- | kernel/sched/debug.c | 101 |
1 files changed, 81 insertions, 20 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cd3c1b4e582..75024a673520 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -110,13 +110,6 @@ static char *task_group_path(struct task_group *tg) | |||
110 | if (autogroup_path(tg, group_path, PATH_MAX)) | 110 | if (autogroup_path(tg, group_path, PATH_MAX)) |
111 | return group_path; | 111 | return group_path; |
112 | 112 | ||
113 | /* | ||
114 | * May be NULL if the underlying cgroup isn't fully-created yet | ||
115 | */ | ||
116 | if (!tg->css.cgroup) { | ||
117 | group_path[0] = '\0'; | ||
118 | return group_path; | ||
119 | } | ||
120 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | 113 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); |
121 | return group_path; | 114 | return group_path; |
122 | } | 115 | } |
@@ -222,8 +215,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
222 | cfs_rq->runnable_load_avg); | 215 | cfs_rq->runnable_load_avg); |
223 | SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", | 216 | SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", |
224 | cfs_rq->blocked_load_avg); | 217 | cfs_rq->blocked_load_avg); |
225 | SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", | 218 | SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", |
226 | atomic64_read(&cfs_rq->tg->load_avg)); | 219 | (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); |
227 | SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", | 220 | SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", |
228 | cfs_rq->tg_load_contrib); | 221 | cfs_rq->tg_load_contrib); |
229 | SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", | 222 | SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", |
@@ -269,11 +262,11 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
269 | { | 262 | { |
270 | unsigned int freq = cpu_khz ? : 1; | 263 | unsigned int freq = cpu_khz ? : 1; |
271 | 264 | ||
272 | SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n", | 265 | SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", |
273 | cpu, freq / 1000, (freq % 1000)); | 266 | cpu, freq / 1000, (freq % 1000)); |
274 | } | 267 | } |
275 | #else | 268 | #else |
276 | SEQ_printf(m, "\ncpu#%d\n", cpu); | 269 | SEQ_printf(m, "cpu#%d\n", cpu); |
277 | #endif | 270 | #endif |
278 | 271 | ||
279 | #define P(x) \ | 272 | #define P(x) \ |
@@ -330,6 +323,7 @@ do { \ | |||
330 | print_rq(m, rq, cpu); | 323 | print_rq(m, rq, cpu); |
331 | rcu_read_unlock(); | 324 | rcu_read_unlock(); |
332 | spin_unlock_irqrestore(&sched_debug_lock, flags); | 325 | spin_unlock_irqrestore(&sched_debug_lock, flags); |
326 | SEQ_printf(m, "\n"); | ||
333 | } | 327 | } |
334 | 328 | ||
335 | static const char *sched_tunable_scaling_names[] = { | 329 | static const char *sched_tunable_scaling_names[] = { |
@@ -338,11 +332,10 @@ static const char *sched_tunable_scaling_names[] = { | |||
338 | "linear" | 332 | "linear" |
339 | }; | 333 | }; |
340 | 334 | ||
341 | static int sched_debug_show(struct seq_file *m, void *v) | 335 | static void sched_debug_header(struct seq_file *m) |
342 | { | 336 | { |
343 | u64 ktime, sched_clk, cpu_clk; | 337 | u64 ktime, sched_clk, cpu_clk; |
344 | unsigned long flags; | 338 | unsigned long flags; |
345 | int cpu; | ||
346 | 339 | ||
347 | local_irq_save(flags); | 340 | local_irq_save(flags); |
348 | ktime = ktime_to_ns(ktime_get()); | 341 | ktime = ktime_to_ns(ktime_get()); |
@@ -384,33 +377,101 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
384 | #undef PN | 377 | #undef PN |
385 | #undef P | 378 | #undef P |
386 | 379 | ||
387 | SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling", | 380 | SEQ_printf(m, " .%-40s: %d (%s)\n", |
381 | "sysctl_sched_tunable_scaling", | ||
388 | sysctl_sched_tunable_scaling, | 382 | sysctl_sched_tunable_scaling, |
389 | sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); | 383 | sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); |
384 | SEQ_printf(m, "\n"); | ||
385 | } | ||
390 | 386 | ||
391 | for_each_online_cpu(cpu) | 387 | static int sched_debug_show(struct seq_file *m, void *v) |
392 | print_cpu(m, cpu); | 388 | { |
389 | int cpu = (unsigned long)(v - 2); | ||
393 | 390 | ||
394 | SEQ_printf(m, "\n"); | 391 | if (cpu != -1) |
392 | print_cpu(m, cpu); | ||
393 | else | ||
394 | sched_debug_header(m); | ||
395 | 395 | ||
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
399 | void sysrq_sched_debug_show(void) | 399 | void sysrq_sched_debug_show(void) |
400 | { | 400 | { |
401 | sched_debug_show(NULL, NULL); | 401 | int cpu; |
402 | |||
403 | sched_debug_header(NULL); | ||
404 | for_each_online_cpu(cpu) | ||
405 | print_cpu(NULL, cpu); | ||
406 | |||
407 | } | ||
408 | |||
409 | /* | ||
410 | * This itererator needs some explanation. | ||
411 | * It returns 1 for the header position. | ||
412 | * This means 2 is cpu 0. | ||
413 | * In a hotplugged system some cpus, including cpu 0, may be missing so we have | ||
414 | * to use cpumask_* to iterate over the cpus. | ||
415 | */ | ||
416 | static void *sched_debug_start(struct seq_file *file, loff_t *offset) | ||
417 | { | ||
418 | unsigned long n = *offset; | ||
419 | |||
420 | if (n == 0) | ||
421 | return (void *) 1; | ||
422 | |||
423 | n--; | ||
424 | |||
425 | if (n > 0) | ||
426 | n = cpumask_next(n - 1, cpu_online_mask); | ||
427 | else | ||
428 | n = cpumask_first(cpu_online_mask); | ||
429 | |||
430 | *offset = n + 1; | ||
431 | |||
432 | if (n < nr_cpu_ids) | ||
433 | return (void *)(unsigned long)(n + 2); | ||
434 | return NULL; | ||
435 | } | ||
436 | |||
437 | static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) | ||
438 | { | ||
439 | (*offset)++; | ||
440 | return sched_debug_start(file, offset); | ||
441 | } | ||
442 | |||
443 | static void sched_debug_stop(struct seq_file *file, void *data) | ||
444 | { | ||
445 | } | ||
446 | |||
447 | static const struct seq_operations sched_debug_sops = { | ||
448 | .start = sched_debug_start, | ||
449 | .next = sched_debug_next, | ||
450 | .stop = sched_debug_stop, | ||
451 | .show = sched_debug_show, | ||
452 | }; | ||
453 | |||
454 | static int sched_debug_release(struct inode *inode, struct file *file) | ||
455 | { | ||
456 | seq_release(inode, file); | ||
457 | |||
458 | return 0; | ||
402 | } | 459 | } |
403 | 460 | ||
404 | static int sched_debug_open(struct inode *inode, struct file *filp) | 461 | static int sched_debug_open(struct inode *inode, struct file *filp) |
405 | { | 462 | { |
406 | return single_open(filp, sched_debug_show, NULL); | 463 | int ret = 0; |
464 | |||
465 | ret = seq_open(filp, &sched_debug_sops); | ||
466 | |||
467 | return ret; | ||
407 | } | 468 | } |
408 | 469 | ||
409 | static const struct file_operations sched_debug_fops = { | 470 | static const struct file_operations sched_debug_fops = { |
410 | .open = sched_debug_open, | 471 | .open = sched_debug_open, |
411 | .read = seq_read, | 472 | .read = seq_read, |
412 | .llseek = seq_lseek, | 473 | .llseek = seq_lseek, |
413 | .release = single_release, | 474 | .release = sched_debug_release, |
414 | }; | 475 | }; |
415 | 476 | ||
416 | static int __init init_sched_debug_procfs(void) | 477 | static int __init init_sched_debug_procfs(void) |