diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-03-20 12:50:56 -0400 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-03-24 23:40:00 -0400 |
commit | bac429f037f1a51a74d62bad6d1518c3be065df3 (patch) | |
tree | 40c3dbdd0f6c097c3c9a1087d219dd8632bd7b74 /kernel/trace/ftrace.c | |
parent | 425480081e936d8725f0d44b8829d699bf088c6b (diff) |
tracing: add function profiler
Impact: new profiling feature
This patch adds a function profiler. In debugfs/tracing/ two new
files are created.
function_profile_enabled - to enable or disable profiling
trace_stat/functions - the profiled functions.
For example:
echo 1 > /debugfs/tracing/function_profile_enabled
./hackbench 50
echo 0 > /debugfs/tracing/function_profile_enabled
yields:
cat /debugfs/tracing/trace_stat/functions
Function Hit
-------- ---
_spin_lock 10106442
_spin_unlock 10097492
kfree 6013704
_spin_unlock_irqrestore 4423941
_spin_lock_irqsave 4406825
__phys_addr 4181686
__slab_free 4038222
dput 4030130
path_put 4023387
unroll_tree_refs 4019532
[...]
The most hit functions are listed first. Functions that are not
hit are not listed.
This feature depends on and uses dynamic function tracing. When the
function profiling is disabled, no overhead occurs. But it still
takes up around 300KB to hold the data, thus it is not recomended
to keep it enabled for systems low on memory.
When a '1' is echoed into the function_profile_enabled file, the
counters for is function is reset back to zero. Thus you can see what
functions are hit most by different programs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 313 |
1 files changed, 311 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7b8722baf153..11f364c776d5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace.h" |
37 | #include "trace_stat.h" | ||
37 | 38 | ||
38 | #define FTRACE_WARN_ON(cond) \ | 39 | #define FTRACE_WARN_ON(cond) \ |
39 | do { \ | 40 | do { \ |
@@ -261,7 +262,6 @@ struct ftrace_func_probe { | |||
261 | struct rcu_head rcu; | 262 | struct rcu_head rcu; |
262 | }; | 263 | }; |
263 | 264 | ||
264 | |||
265 | enum { | 265 | enum { |
266 | FTRACE_ENABLE_CALLS = (1 << 0), | 266 | FTRACE_ENABLE_CALLS = (1 << 0), |
267 | FTRACE_DISABLE_CALLS = (1 << 1), | 267 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -309,6 +309,307 @@ static struct dyn_ftrace *ftrace_free_records; | |||
309 | } \ | 309 | } \ |
310 | } | 310 | } |
311 | 311 | ||
312 | #ifdef CONFIG_FUNCTION_PROFILER | ||
313 | static struct hlist_head *ftrace_profile_hash; | ||
314 | static int ftrace_profile_bits; | ||
315 | static int ftrace_profile_enabled; | ||
316 | static DEFINE_MUTEX(ftrace_profile_lock); | ||
317 | |||
318 | static void * | ||
319 | function_stat_next(void *v, int idx) | ||
320 | { | ||
321 | struct dyn_ftrace *rec = v; | ||
322 | struct ftrace_page *pg; | ||
323 | |||
324 | pg = (struct ftrace_page *)((unsigned long)rec & PAGE_MASK); | ||
325 | |||
326 | again: | ||
327 | rec++; | ||
328 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | ||
329 | pg = pg->next; | ||
330 | if (!pg) | ||
331 | return NULL; | ||
332 | rec = &pg->records[0]; | ||
333 | } | ||
334 | |||
335 | if (rec->flags & FTRACE_FL_FREE || | ||
336 | rec->flags & FTRACE_FL_FAILED || | ||
337 | !(rec->flags & FTRACE_FL_CONVERTED) || | ||
338 | /* ignore non hit functions */ | ||
339 | !rec->counter) | ||
340 | goto again; | ||
341 | |||
342 | return rec; | ||
343 | } | ||
344 | |||
345 | static void *function_stat_start(struct tracer_stat *trace) | ||
346 | { | ||
347 | return function_stat_next(&ftrace_pages_start->records[0], 0); | ||
348 | } | ||
349 | |||
350 | static int function_stat_cmp(void *p1, void *p2) | ||
351 | { | ||
352 | struct dyn_ftrace *a = p1; | ||
353 | struct dyn_ftrace *b = p2; | ||
354 | |||
355 | if (a->counter < b->counter) | ||
356 | return -1; | ||
357 | if (a->counter > b->counter) | ||
358 | return 1; | ||
359 | else | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static int function_stat_headers(struct seq_file *m) | ||
364 | { | ||
365 | seq_printf(m, " Function Hit\n" | ||
366 | " -------- ---\n"); | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int function_stat_show(struct seq_file *m, void *v) | ||
371 | { | ||
372 | struct dyn_ftrace *rec = v; | ||
373 | char str[KSYM_SYMBOL_LEN]; | ||
374 | |||
375 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
376 | |||
377 | seq_printf(m, " %-30.30s %10lu\n", str, rec->counter); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static struct tracer_stat function_stats = { | ||
382 | .name = "functions", | ||
383 | .stat_start = function_stat_start, | ||
384 | .stat_next = function_stat_next, | ||
385 | .stat_cmp = function_stat_cmp, | ||
386 | .stat_headers = function_stat_headers, | ||
387 | .stat_show = function_stat_show | ||
388 | }; | ||
389 | |||
390 | static void ftrace_profile_init(int nr_funcs) | ||
391 | { | ||
392 | unsigned long addr; | ||
393 | int order; | ||
394 | int size; | ||
395 | |||
396 | /* | ||
397 | * We are profiling all functions, lets make it 1/4th of the | ||
398 | * number of functions that are in core kernel. So we have to | ||
399 | * iterate 4 times. | ||
400 | */ | ||
401 | order = (sizeof(struct hlist_head) * nr_funcs) / 4; | ||
402 | order = get_order(order); | ||
403 | size = 1 << (PAGE_SHIFT + order); | ||
404 | |||
405 | pr_info("Allocating %d KB for profiler hash\n", size >> 10); | ||
406 | |||
407 | addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
408 | if (!addr) { | ||
409 | pr_warning("Could not allocate function profiler hash\n"); | ||
410 | return; | ||
411 | } | ||
412 | |||
413 | ftrace_profile_hash = (void *)addr; | ||
414 | |||
415 | /* | ||
416 | * struct hlist_head should be a pointer of 4 or 8 bytes. | ||
417 | * And a simple bit manipulation can be done, but if for | ||
418 | * some reason struct hlist_head is not a mulitple of 2, | ||
419 | * then we play it safe, and simply count. This function | ||
420 | * is done once at boot up, so it is not that critical in | ||
421 | * performance. | ||
422 | */ | ||
423 | |||
424 | size--; | ||
425 | size /= sizeof(struct hlist_head); | ||
426 | |||
427 | for (; size; size >>= 1) | ||
428 | ftrace_profile_bits++; | ||
429 | |||
430 | pr_info("Function profiler has %d hash buckets\n", | ||
431 | 1 << ftrace_profile_bits); | ||
432 | |||
433 | return; | ||
434 | } | ||
435 | |||
436 | static ssize_t | ||
437 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
438 | size_t cnt, loff_t *ppos) | ||
439 | { | ||
440 | char buf[64]; | ||
441 | int r; | ||
442 | |||
443 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | ||
444 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
445 | } | ||
446 | |||
447 | static void ftrace_profile_reset(void) | ||
448 | { | ||
449 | struct dyn_ftrace *rec; | ||
450 | struct ftrace_page *pg; | ||
451 | |||
452 | do_for_each_ftrace_rec(pg, rec) { | ||
453 | rec->counter = 0; | ||
454 | } while_for_each_ftrace_rec(); | ||
455 | } | ||
456 | |||
457 | static struct dyn_ftrace *ftrace_find_profiled_func(unsigned long ip) | ||
458 | { | ||
459 | struct dyn_ftrace *rec; | ||
460 | struct hlist_head *hhd; | ||
461 | struct hlist_node *n; | ||
462 | unsigned long flags; | ||
463 | unsigned long key; | ||
464 | |||
465 | if (!ftrace_profile_hash) | ||
466 | return NULL; | ||
467 | |||
468 | key = hash_long(ip, ftrace_profile_bits); | ||
469 | hhd = &ftrace_profile_hash[key]; | ||
470 | |||
471 | if (hlist_empty(hhd)) | ||
472 | return NULL; | ||
473 | |||
474 | local_irq_save(flags); | ||
475 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | ||
476 | if (rec->ip == ip) | ||
477 | goto out; | ||
478 | } | ||
479 | rec = NULL; | ||
480 | out: | ||
481 | local_irq_restore(flags); | ||
482 | |||
483 | return rec; | ||
484 | } | ||
485 | |||
486 | static void | ||
487 | function_profile_call(unsigned long ip, unsigned long parent_ip) | ||
488 | { | ||
489 | struct dyn_ftrace *rec; | ||
490 | unsigned long flags; | ||
491 | |||
492 | if (!ftrace_profile_enabled) | ||
493 | return; | ||
494 | |||
495 | local_irq_save(flags); | ||
496 | rec = ftrace_find_profiled_func(ip); | ||
497 | if (!rec) | ||
498 | goto out; | ||
499 | |||
500 | rec->counter++; | ||
501 | out: | ||
502 | local_irq_restore(flags); | ||
503 | } | ||
504 | |||
505 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | ||
506 | { | ||
507 | .func = function_profile_call, | ||
508 | }; | ||
509 | |||
510 | static ssize_t | ||
511 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | ||
512 | size_t cnt, loff_t *ppos) | ||
513 | { | ||
514 | unsigned long val; | ||
515 | char buf[64]; | ||
516 | int ret; | ||
517 | |||
518 | if (!ftrace_profile_hash) { | ||
519 | pr_info("Can not enable hash due to earlier problems\n"); | ||
520 | return -ENODEV; | ||
521 | } | ||
522 | |||
523 | if (cnt >= sizeof(buf)) | ||
524 | return -EINVAL; | ||
525 | |||
526 | if (copy_from_user(&buf, ubuf, cnt)) | ||
527 | return -EFAULT; | ||
528 | |||
529 | buf[cnt] = 0; | ||
530 | |||
531 | ret = strict_strtoul(buf, 10, &val); | ||
532 | if (ret < 0) | ||
533 | return ret; | ||
534 | |||
535 | val = !!val; | ||
536 | |||
537 | mutex_lock(&ftrace_profile_lock); | ||
538 | if (ftrace_profile_enabled ^ val) { | ||
539 | if (val) { | ||
540 | ftrace_profile_reset(); | ||
541 | register_ftrace_function(&ftrace_profile_ops); | ||
542 | ftrace_profile_enabled = 1; | ||
543 | } else { | ||
544 | ftrace_profile_enabled = 0; | ||
545 | unregister_ftrace_function(&ftrace_profile_ops); | ||
546 | } | ||
547 | } | ||
548 | mutex_unlock(&ftrace_profile_lock); | ||
549 | |||
550 | filp->f_pos += cnt; | ||
551 | |||
552 | return cnt; | ||
553 | } | ||
554 | |||
555 | static const struct file_operations ftrace_profile_fops = { | ||
556 | .open = tracing_open_generic, | ||
557 | .read = ftrace_profile_read, | ||
558 | .write = ftrace_profile_write, | ||
559 | }; | ||
560 | |||
561 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
562 | { | ||
563 | struct dentry *entry; | ||
564 | int ret; | ||
565 | |||
566 | ret = register_stat_tracer(&function_stats); | ||
567 | if (ret) { | ||
568 | pr_warning("Warning: could not register " | ||
569 | "function stats\n"); | ||
570 | return; | ||
571 | } | ||
572 | |||
573 | entry = debugfs_create_file("function_profile_enabled", 0644, | ||
574 | d_tracer, NULL, &ftrace_profile_fops); | ||
575 | if (!entry) | ||
576 | pr_warning("Could not create debugfs " | ||
577 | "'function_profile_enabled' entry\n"); | ||
578 | } | ||
579 | |||
580 | static void ftrace_add_profile(struct dyn_ftrace *rec) | ||
581 | { | ||
582 | unsigned long key; | ||
583 | |||
584 | if (!ftrace_profile_hash) | ||
585 | return; | ||
586 | |||
587 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
588 | hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]); | ||
589 | } | ||
590 | |||
591 | static void ftrace_profile_release(struct dyn_ftrace *rec) | ||
592 | { | ||
593 | mutex_lock(&ftrace_profile_lock); | ||
594 | hlist_del(&rec->node); | ||
595 | mutex_unlock(&ftrace_profile_lock); | ||
596 | } | ||
597 | |||
598 | #else /* CONFIG_FUNCTION_PROFILER */ | ||
599 | static void ftrace_profile_init(int nr_funcs) | ||
600 | { | ||
601 | } | ||
602 | static void ftrace_add_profile(struct dyn_ftrace *rec) | ||
603 | { | ||
604 | } | ||
605 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
606 | { | ||
607 | } | ||
608 | static void ftrace_profile_release(struct dyn_ftrace *rec) | ||
609 | { | ||
610 | } | ||
611 | #endif /* CONFIG_FUNCTION_PROFILER */ | ||
612 | |||
312 | #ifdef CONFIG_KPROBES | 613 | #ifdef CONFIG_KPROBES |
313 | 614 | ||
314 | static int frozen_record_count; | 615 | static int frozen_record_count; |
@@ -359,8 +660,10 @@ void ftrace_release(void *start, unsigned long size) | |||
359 | mutex_lock(&ftrace_lock); | 660 | mutex_lock(&ftrace_lock); |
360 | do_for_each_ftrace_rec(pg, rec) { | 661 | do_for_each_ftrace_rec(pg, rec) { |
361 | if ((rec->ip >= s) && (rec->ip < e) && | 662 | if ((rec->ip >= s) && (rec->ip < e) && |
362 | !(rec->flags & FTRACE_FL_FREE)) | 663 | !(rec->flags & FTRACE_FL_FREE)) { |
363 | ftrace_free_rec(rec); | 664 | ftrace_free_rec(rec); |
665 | ftrace_profile_release(rec); | ||
666 | } | ||
364 | } while_for_each_ftrace_rec(); | 667 | } while_for_each_ftrace_rec(); |
365 | mutex_unlock(&ftrace_lock); | 668 | mutex_unlock(&ftrace_lock); |
366 | } | 669 | } |
@@ -414,6 +717,8 @@ ftrace_record_ip(unsigned long ip) | |||
414 | rec->newlist = ftrace_new_addrs; | 717 | rec->newlist = ftrace_new_addrs; |
415 | ftrace_new_addrs = rec; | 718 | ftrace_new_addrs = rec; |
416 | 719 | ||
720 | ftrace_add_profile(rec); | ||
721 | |||
417 | return rec; | 722 | return rec; |
418 | } | 723 | } |
419 | 724 | ||
@@ -2157,6 +2462,8 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
2157 | "'set_graph_function' entry\n"); | 2462 | "'set_graph_function' entry\n"); |
2158 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2463 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2159 | 2464 | ||
2465 | ftrace_profile_debugfs(d_tracer); | ||
2466 | |||
2160 | return 0; | 2467 | return 0; |
2161 | } | 2468 | } |
2162 | 2469 | ||
@@ -2225,6 +2532,8 @@ void __init ftrace_init(void) | |||
2225 | if (ret) | 2532 | if (ret) |
2226 | goto failed; | 2533 | goto failed; |
2227 | 2534 | ||
2535 | ftrace_profile_init(count); | ||
2536 | |||
2228 | last_ftrace_enabled = ftrace_enabled = 1; | 2537 | last_ftrace_enabled = ftrace_enabled = 1; |
2229 | 2538 | ||
2230 | ret = ftrace_convert_nops(NULL, | 2539 | ret = ftrace_convert_nops(NULL, |