diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 15 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 581 | ||||
-rw-r--r-- | kernel/trace/trace.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 25 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_stat.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_workqueue.c | 2 |
11 files changed, 633 insertions, 29 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2246141bda4d..23b96ebbf893 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -104,6 +104,7 @@ config FUNCTION_GRAPH_TRACER | |||
104 | the return value. This is done by setting the current return | 104 | the return value. This is done by setting the current return |
105 | address on the current task structure into a stack of calls. | 105 | address on the current task structure into a stack of calls. |
106 | 106 | ||
107 | |||
107 | config IRQSOFF_TRACER | 108 | config IRQSOFF_TRACER |
108 | bool "Interrupts-off Latency Tracer" | 109 | bool "Interrupts-off Latency Tracer" |
109 | default n | 110 | default n |
@@ -375,6 +376,20 @@ config DYNAMIC_FTRACE | |||
375 | were made. If so, it runs stop_machine (stops all CPUS) | 376 | were made. If so, it runs stop_machine (stops all CPUS) |
376 | and modifies the code to jump over the call to ftrace. | 377 | and modifies the code to jump over the call to ftrace. |
377 | 378 | ||
379 | config FUNCTION_PROFILER | ||
380 | bool "Kernel function profiler" | ||
381 | depends on FUNCTION_TRACER | ||
382 | default n | ||
383 | help | ||
384 | This option enables the kernel function profiler. A file is created | ||
385 | in debugfs called function_profile_enabled which defaults to zero. | ||
386 | When a 1 is echoed into this file profiling begins, and when a | ||
387 | zero is entered, profiling stops. A file in the trace_stats | ||
388 | directory called functions, that show the list of functions that | ||
389 | have been hit and their counters. | ||
390 | |||
391 | If in doubt, say N | ||
392 | |||
378 | config FTRACE_MCOUNT_RECORD | 393 | config FTRACE_MCOUNT_RECORD |
379 | def_bool y | 394 | def_bool y |
380 | depends on DYNAMIC_FTRACE | 395 | depends on DYNAMIC_FTRACE |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f1ed080406c3..678e3d6caf85 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace_output.h" |
37 | #include "trace_stat.h" | ||
37 | 38 | ||
38 | #define FTRACE_WARN_ON(cond) \ | 39 | #define FTRACE_WARN_ON(cond) \ |
39 | do { \ | 40 | do { \ |
@@ -68,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock); | |||
68 | 69 | ||
69 | static struct ftrace_ops ftrace_list_end __read_mostly = | 70 | static struct ftrace_ops ftrace_list_end __read_mostly = |
70 | { | 71 | { |
71 | .func = ftrace_stub, | 72 | .func = ftrace_stub, |
72 | }; | 73 | }; |
73 | 74 | ||
74 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 75 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
@@ -240,6 +241,576 @@ static void ftrace_update_pid_func(void) | |||
240 | #endif | 241 | #endif |
241 | } | 242 | } |
242 | 243 | ||
244 | #ifdef CONFIG_FUNCTION_PROFILER | ||
245 | struct ftrace_profile { | ||
246 | struct hlist_node node; | ||
247 | unsigned long ip; | ||
248 | unsigned long counter; | ||
249 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
250 | unsigned long long time; | ||
251 | #endif | ||
252 | }; | ||
253 | |||
254 | struct ftrace_profile_page { | ||
255 | struct ftrace_profile_page *next; | ||
256 | unsigned long index; | ||
257 | struct ftrace_profile records[]; | ||
258 | }; | ||
259 | |||
260 | struct ftrace_profile_stat { | ||
261 | atomic_t disabled; | ||
262 | struct hlist_head *hash; | ||
263 | struct ftrace_profile_page *pages; | ||
264 | struct ftrace_profile_page *start; | ||
265 | struct tracer_stat stat; | ||
266 | }; | ||
267 | |||
268 | #define PROFILE_RECORDS_SIZE \ | ||
269 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | ||
270 | |||
271 | #define PROFILES_PER_PAGE \ | ||
272 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | ||
273 | |||
274 | static int ftrace_profile_bits __read_mostly; | ||
275 | static int ftrace_profile_enabled __read_mostly; | ||
276 | |||
277 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | ||
278 | static DEFINE_MUTEX(ftrace_profile_lock); | ||
279 | |||
280 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | ||
281 | |||
282 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ | ||
283 | |||
284 | static void * | ||
285 | function_stat_next(void *v, int idx) | ||
286 | { | ||
287 | struct ftrace_profile *rec = v; | ||
288 | struct ftrace_profile_page *pg; | ||
289 | |||
290 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | ||
291 | |||
292 | again: | ||
293 | rec++; | ||
294 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | ||
295 | pg = pg->next; | ||
296 | if (!pg) | ||
297 | return NULL; | ||
298 | rec = &pg->records[0]; | ||
299 | if (!rec->counter) | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return rec; | ||
304 | } | ||
305 | |||
306 | static void *function_stat_start(struct tracer_stat *trace) | ||
307 | { | ||
308 | struct ftrace_profile_stat *stat = | ||
309 | container_of(trace, struct ftrace_profile_stat, stat); | ||
310 | |||
311 | if (!stat || !stat->start) | ||
312 | return NULL; | ||
313 | |||
314 | return function_stat_next(&stat->start->records[0], 0); | ||
315 | } | ||
316 | |||
317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
318 | /* function graph compares on total time */ | ||
319 | static int function_stat_cmp(void *p1, void *p2) | ||
320 | { | ||
321 | struct ftrace_profile *a = p1; | ||
322 | struct ftrace_profile *b = p2; | ||
323 | |||
324 | if (a->time < b->time) | ||
325 | return -1; | ||
326 | if (a->time > b->time) | ||
327 | return 1; | ||
328 | else | ||
329 | return 0; | ||
330 | } | ||
331 | #else | ||
332 | /* not function graph compares against hits */ | ||
333 | static int function_stat_cmp(void *p1, void *p2) | ||
334 | { | ||
335 | struct ftrace_profile *a = p1; | ||
336 | struct ftrace_profile *b = p2; | ||
337 | |||
338 | if (a->counter < b->counter) | ||
339 | return -1; | ||
340 | if (a->counter > b->counter) | ||
341 | return 1; | ||
342 | else | ||
343 | return 0; | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | static int function_stat_headers(struct seq_file *m) | ||
348 | { | ||
349 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
350 | seq_printf(m, " Function " | ||
351 | "Hit Time Avg\n" | ||
352 | " -------- " | ||
353 | "--- ---- ---\n"); | ||
354 | #else | ||
355 | seq_printf(m, " Function Hit\n" | ||
356 | " -------- ---\n"); | ||
357 | #endif | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static int function_stat_show(struct seq_file *m, void *v) | ||
362 | { | ||
363 | struct ftrace_profile *rec = v; | ||
364 | char str[KSYM_SYMBOL_LEN]; | ||
365 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
366 | static DEFINE_MUTEX(mutex); | ||
367 | static struct trace_seq s; | ||
368 | unsigned long long avg; | ||
369 | #endif | ||
370 | |||
371 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
372 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | ||
373 | |||
374 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
375 | seq_printf(m, " "); | ||
376 | avg = rec->time; | ||
377 | do_div(avg, rec->counter); | ||
378 | |||
379 | mutex_lock(&mutex); | ||
380 | trace_seq_init(&s); | ||
381 | trace_print_graph_duration(rec->time, &s); | ||
382 | trace_seq_puts(&s, " "); | ||
383 | trace_print_graph_duration(avg, &s); | ||
384 | trace_print_seq(m, &s); | ||
385 | mutex_unlock(&mutex); | ||
386 | #endif | ||
387 | seq_putc(m, '\n'); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | ||
393 | { | ||
394 | struct ftrace_profile_page *pg; | ||
395 | |||
396 | pg = stat->pages = stat->start; | ||
397 | |||
398 | while (pg) { | ||
399 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | ||
400 | pg->index = 0; | ||
401 | pg = pg->next; | ||
402 | } | ||
403 | |||
404 | memset(stat->hash, 0, | ||
405 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | ||
406 | } | ||
407 | |||
408 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | ||
409 | { | ||
410 | struct ftrace_profile_page *pg; | ||
411 | int functions; | ||
412 | int pages; | ||
413 | int i; | ||
414 | |||
415 | /* If we already allocated, do nothing */ | ||
416 | if (stat->pages) | ||
417 | return 0; | ||
418 | |||
419 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | ||
420 | if (!stat->pages) | ||
421 | return -ENOMEM; | ||
422 | |||
423 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
424 | functions = ftrace_update_tot_cnt; | ||
425 | #else | ||
426 | /* | ||
427 | * We do not know the number of functions that exist because | ||
428 | * dynamic tracing is what counts them. With past experience | ||
429 | * we have around 20K functions. That should be more than enough. | ||
430 | * It is highly unlikely we will execute every function in | ||
431 | * the kernel. | ||
432 | */ | ||
433 | functions = 20000; | ||
434 | #endif | ||
435 | |||
436 | pg = stat->start = stat->pages; | ||
437 | |||
438 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | ||
439 | |||
440 | for (i = 0; i < pages; i++) { | ||
441 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
442 | if (!pg->next) | ||
443 | goto out_free; | ||
444 | pg = pg->next; | ||
445 | } | ||
446 | |||
447 | return 0; | ||
448 | |||
449 | out_free: | ||
450 | pg = stat->start; | ||
451 | while (pg) { | ||
452 | unsigned long tmp = (unsigned long)pg; | ||
453 | |||
454 | pg = pg->next; | ||
455 | free_page(tmp); | ||
456 | } | ||
457 | |||
458 | free_page((unsigned long)stat->pages); | ||
459 | stat->pages = NULL; | ||
460 | stat->start = NULL; | ||
461 | |||
462 | return -ENOMEM; | ||
463 | } | ||
464 | |||
465 | static int ftrace_profile_init_cpu(int cpu) | ||
466 | { | ||
467 | struct ftrace_profile_stat *stat; | ||
468 | int size; | ||
469 | |||
470 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
471 | |||
472 | if (stat->hash) { | ||
473 | /* If the profile is already created, simply reset it */ | ||
474 | ftrace_profile_reset(stat); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * We are profiling all functions, but usually only a few thousand | ||
480 | * functions are hit. We'll make a hash of 1024 items. | ||
481 | */ | ||
482 | size = FTRACE_PROFILE_HASH_SIZE; | ||
483 | |||
484 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | ||
485 | |||
486 | if (!stat->hash) | ||
487 | return -ENOMEM; | ||
488 | |||
489 | if (!ftrace_profile_bits) { | ||
490 | size--; | ||
491 | |||
492 | for (; size; size >>= 1) | ||
493 | ftrace_profile_bits++; | ||
494 | } | ||
495 | |||
496 | /* Preallocate the function profiling pages */ | ||
497 | if (ftrace_profile_pages_init(stat) < 0) { | ||
498 | kfree(stat->hash); | ||
499 | stat->hash = NULL; | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int ftrace_profile_init(void) | ||
507 | { | ||
508 | int cpu; | ||
509 | int ret = 0; | ||
510 | |||
511 | for_each_online_cpu(cpu) { | ||
512 | ret = ftrace_profile_init_cpu(cpu); | ||
513 | if (ret) | ||
514 | break; | ||
515 | } | ||
516 | |||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | /* interrupts must be disabled */ | ||
521 | static struct ftrace_profile * | ||
522 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | ||
523 | { | ||
524 | struct ftrace_profile *rec; | ||
525 | struct hlist_head *hhd; | ||
526 | struct hlist_node *n; | ||
527 | unsigned long key; | ||
528 | |||
529 | key = hash_long(ip, ftrace_profile_bits); | ||
530 | hhd = &stat->hash[key]; | ||
531 | |||
532 | if (hlist_empty(hhd)) | ||
533 | return NULL; | ||
534 | |||
535 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | ||
536 | if (rec->ip == ip) | ||
537 | return rec; | ||
538 | } | ||
539 | |||
540 | return NULL; | ||
541 | } | ||
542 | |||
543 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | ||
544 | struct ftrace_profile *rec) | ||
545 | { | ||
546 | unsigned long key; | ||
547 | |||
548 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
549 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * The memory is already allocated, this simply finds a new record to use. | ||
554 | */ | ||
555 | static struct ftrace_profile * | ||
556 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | ||
557 | { | ||
558 | struct ftrace_profile *rec = NULL; | ||
559 | |||
560 | /* prevent recursion (from NMIs) */ | ||
561 | if (atomic_inc_return(&stat->disabled) != 1) | ||
562 | goto out; | ||
563 | |||
564 | /* | ||
565 | * Try to find the function again since an NMI | ||
566 | * could have added it | ||
567 | */ | ||
568 | rec = ftrace_find_profiled_func(stat, ip); | ||
569 | if (rec) | ||
570 | goto out; | ||
571 | |||
572 | if (stat->pages->index == PROFILES_PER_PAGE) { | ||
573 | if (!stat->pages->next) | ||
574 | goto out; | ||
575 | stat->pages = stat->pages->next; | ||
576 | } | ||
577 | |||
578 | rec = &stat->pages->records[stat->pages->index++]; | ||
579 | rec->ip = ip; | ||
580 | ftrace_add_profile(stat, rec); | ||
581 | |||
582 | out: | ||
583 | atomic_dec(&stat->disabled); | ||
584 | |||
585 | return rec; | ||
586 | } | ||
587 | |||
588 | static void | ||
589 | function_profile_call(unsigned long ip, unsigned long parent_ip) | ||
590 | { | ||
591 | struct ftrace_profile_stat *stat; | ||
592 | struct ftrace_profile *rec; | ||
593 | unsigned long flags; | ||
594 | |||
595 | if (!ftrace_profile_enabled) | ||
596 | return; | ||
597 | |||
598 | local_irq_save(flags); | ||
599 | |||
600 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
601 | if (!stat->hash) | ||
602 | goto out; | ||
603 | |||
604 | rec = ftrace_find_profiled_func(stat, ip); | ||
605 | if (!rec) { | ||
606 | rec = ftrace_profile_alloc(stat, ip); | ||
607 | if (!rec) | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | rec->counter++; | ||
612 | out: | ||
613 | local_irq_restore(flags); | ||
614 | } | ||
615 | |||
616 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
617 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | ||
618 | { | ||
619 | function_profile_call(trace->func, 0); | ||
620 | return 1; | ||
621 | } | ||
622 | |||
623 | static void profile_graph_return(struct ftrace_graph_ret *trace) | ||
624 | { | ||
625 | struct ftrace_profile_stat *stat; | ||
626 | unsigned long long calltime; | ||
627 | struct ftrace_profile *rec; | ||
628 | unsigned long flags; | ||
629 | |||
630 | local_irq_save(flags); | ||
631 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
632 | if (!stat->hash) | ||
633 | goto out; | ||
634 | |||
635 | calltime = trace->rettime - trace->calltime; | ||
636 | |||
637 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | ||
638 | int index; | ||
639 | |||
640 | index = trace->depth; | ||
641 | |||
642 | /* Append this call time to the parent time to subtract */ | ||
643 | if (index) | ||
644 | current->ret_stack[index - 1].subtime += calltime; | ||
645 | |||
646 | if (current->ret_stack[index].subtime < calltime) | ||
647 | calltime -= current->ret_stack[index].subtime; | ||
648 | else | ||
649 | calltime = 0; | ||
650 | } | ||
651 | |||
652 | rec = ftrace_find_profiled_func(stat, trace->func); | ||
653 | if (rec) | ||
654 | rec->time += calltime; | ||
655 | |||
656 | out: | ||
657 | local_irq_restore(flags); | ||
658 | } | ||
659 | |||
660 | static int register_ftrace_profiler(void) | ||
661 | { | ||
662 | return register_ftrace_graph(&profile_graph_return, | ||
663 | &profile_graph_entry); | ||
664 | } | ||
665 | |||
666 | static void unregister_ftrace_profiler(void) | ||
667 | { | ||
668 | unregister_ftrace_graph(); | ||
669 | } | ||
670 | #else | ||
671 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | ||
672 | { | ||
673 | .func = function_profile_call, | ||
674 | }; | ||
675 | |||
676 | static int register_ftrace_profiler(void) | ||
677 | { | ||
678 | return register_ftrace_function(&ftrace_profile_ops); | ||
679 | } | ||
680 | |||
681 | static void unregister_ftrace_profiler(void) | ||
682 | { | ||
683 | unregister_ftrace_function(&ftrace_profile_ops); | ||
684 | } | ||
685 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
686 | |||
687 | static ssize_t | ||
688 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | ||
689 | size_t cnt, loff_t *ppos) | ||
690 | { | ||
691 | unsigned long val; | ||
692 | char buf[64]; /* big enough to hold a number */ | ||
693 | int ret; | ||
694 | |||
695 | if (cnt >= sizeof(buf)) | ||
696 | return -EINVAL; | ||
697 | |||
698 | if (copy_from_user(&buf, ubuf, cnt)) | ||
699 | return -EFAULT; | ||
700 | |||
701 | buf[cnt] = 0; | ||
702 | |||
703 | ret = strict_strtoul(buf, 10, &val); | ||
704 | if (ret < 0) | ||
705 | return ret; | ||
706 | |||
707 | val = !!val; | ||
708 | |||
709 | mutex_lock(&ftrace_profile_lock); | ||
710 | if (ftrace_profile_enabled ^ val) { | ||
711 | if (val) { | ||
712 | ret = ftrace_profile_init(); | ||
713 | if (ret < 0) { | ||
714 | cnt = ret; | ||
715 | goto out; | ||
716 | } | ||
717 | |||
718 | ret = register_ftrace_profiler(); | ||
719 | if (ret < 0) { | ||
720 | cnt = ret; | ||
721 | goto out; | ||
722 | } | ||
723 | ftrace_profile_enabled = 1; | ||
724 | } else { | ||
725 | ftrace_profile_enabled = 0; | ||
726 | unregister_ftrace_profiler(); | ||
727 | } | ||
728 | } | ||
729 | out: | ||
730 | mutex_unlock(&ftrace_profile_lock); | ||
731 | |||
732 | filp->f_pos += cnt; | ||
733 | |||
734 | return cnt; | ||
735 | } | ||
736 | |||
737 | static ssize_t | ||
738 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
739 | size_t cnt, loff_t *ppos) | ||
740 | { | ||
741 | char buf[64]; /* big enough to hold a number */ | ||
742 | int r; | ||
743 | |||
744 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | ||
745 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
746 | } | ||
747 | |||
748 | static const struct file_operations ftrace_profile_fops = { | ||
749 | .open = tracing_open_generic, | ||
750 | .read = ftrace_profile_read, | ||
751 | .write = ftrace_profile_write, | ||
752 | }; | ||
753 | |||
754 | /* used to initialize the real stat files */ | ||
755 | static struct tracer_stat function_stats __initdata = { | ||
756 | .name = "functions", | ||
757 | .stat_start = function_stat_start, | ||
758 | .stat_next = function_stat_next, | ||
759 | .stat_cmp = function_stat_cmp, | ||
760 | .stat_headers = function_stat_headers, | ||
761 | .stat_show = function_stat_show | ||
762 | }; | ||
763 | |||
764 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
765 | { | ||
766 | struct ftrace_profile_stat *stat; | ||
767 | struct dentry *entry; | ||
768 | char *name; | ||
769 | int ret; | ||
770 | int cpu; | ||
771 | |||
772 | for_each_possible_cpu(cpu) { | ||
773 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
774 | |||
775 | /* allocate enough for function name + cpu number */ | ||
776 | name = kmalloc(32, GFP_KERNEL); | ||
777 | if (!name) { | ||
778 | /* | ||
779 | * The files created are permanent, if something happens | ||
780 | * we still do not free memory. | ||
781 | */ | ||
782 | kfree(stat); | ||
783 | WARN(1, | ||
784 | "Could not allocate stat file for cpu %d\n", | ||
785 | cpu); | ||
786 | return; | ||
787 | } | ||
788 | stat->stat = function_stats; | ||
789 | snprintf(name, 32, "function%d", cpu); | ||
790 | stat->stat.name = name; | ||
791 | ret = register_stat_tracer(&stat->stat); | ||
792 | if (ret) { | ||
793 | WARN(1, | ||
794 | "Could not register function stat for cpu %d\n", | ||
795 | cpu); | ||
796 | kfree(name); | ||
797 | return; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | entry = debugfs_create_file("function_profile_enabled", 0644, | ||
802 | d_tracer, NULL, &ftrace_profile_fops); | ||
803 | if (!entry) | ||
804 | pr_warning("Could not create debugfs " | ||
805 | "'function_profile_enabled' entry\n"); | ||
806 | } | ||
807 | |||
808 | #else /* CONFIG_FUNCTION_PROFILER */ | ||
809 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
810 | { | ||
811 | } | ||
812 | #endif /* CONFIG_FUNCTION_PROFILER */ | ||
813 | |||
243 | /* set when tracing only a pid */ | 814 | /* set when tracing only a pid */ |
244 | struct pid *ftrace_pid_trace; | 815 | struct pid *ftrace_pid_trace; |
245 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 816 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
@@ -261,7 +832,6 @@ struct ftrace_func_probe { | |||
261 | struct rcu_head rcu; | 832 | struct rcu_head rcu; |
262 | }; | 833 | }; |
263 | 834 | ||
264 | |||
265 | enum { | 835 | enum { |
266 | FTRACE_ENABLE_CALLS = (1 << 0), | 836 | FTRACE_ENABLE_CALLS = (1 << 0), |
267 | FTRACE_DISABLE_CALLS = (1 << 1), | 837 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -1408,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1408 | 1978 | ||
1409 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1979 | static struct ftrace_ops trace_probe_ops __read_mostly = |
1410 | { | 1980 | { |
1411 | .func = function_trace_probe_call, | 1981 | .func = function_trace_probe_call, |
1412 | }; | 1982 | }; |
1413 | 1983 | ||
1414 | static int ftrace_probe_registered; | 1984 | static int ftrace_probe_registered; |
@@ -2430,6 +3000,9 @@ static __init int ftrace_init_debugfs(void) | |||
2430 | if (!entry) | 3000 | if (!entry) |
2431 | pr_warning("Could not create debugfs " | 3001 | pr_warning("Could not create debugfs " |
2432 | "'set_ftrace_pid' entry\n"); | 3002 | "'set_ftrace_pid' entry\n"); |
3003 | |||
3004 | ftrace_profile_debugfs(d_tracer); | ||
3005 | |||
2433 | return 0; | 3006 | return 0; |
2434 | } | 3007 | } |
2435 | fs_initcall(ftrace_init_debugfs); | 3008 | fs_initcall(ftrace_init_debugfs); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a0174a40c563..2a81decf99bc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
255 | 255 | ||
256 | /* trace_flags holds trace_options default values */ | 256 | /* trace_flags holds trace_options default values */ |
257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
259 | TRACE_ITER_GRAPH_TIME; | ||
259 | 260 | ||
260 | /** | 261 | /** |
261 | * trace_wake_up - wake up tasks waiting for trace input | 262 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -317,6 +318,7 @@ static const char *trace_options[] = { | |||
317 | "latency-format", | 318 | "latency-format", |
318 | "global-clock", | 319 | "global-clock", |
319 | "sleep-time", | 320 | "sleep-time", |
321 | "graph-time", | ||
320 | NULL | 322 | NULL |
321 | }; | 323 | }; |
322 | 324 | ||
@@ -402,17 +404,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
402 | return cnt; | 404 | return cnt; |
403 | } | 405 | } |
404 | 406 | ||
405 | static void | ||
406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
407 | { | ||
408 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
409 | |||
410 | s->buffer[len] = 0; | ||
411 | seq_puts(m, s->buffer); | ||
412 | |||
413 | trace_seq_init(s); | ||
414 | } | ||
415 | |||
416 | /** | 407 | /** |
417 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 408 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
418 | * @tr: tracer | 409 | * @tr: tracer |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cbc168f1e43d..fec6521ffa13 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -613,6 +613,8 @@ extern unsigned long trace_flags; | |||
613 | /* Standard output formatting function used for function return traces */ | 613 | /* Standard output formatting function used for function return traces */ |
614 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 614 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
615 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 615 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
616 | extern enum print_line_t | ||
617 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | ||
616 | 618 | ||
617 | #ifdef CONFIG_DYNAMIC_FTRACE | 619 | #ifdef CONFIG_DYNAMIC_FTRACE |
618 | /* TODO: make this variable */ | 620 | /* TODO: make this variable */ |
@@ -644,7 +646,6 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
644 | return 1; | 646 | return 1; |
645 | } | 647 | } |
646 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 648 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
647 | |||
648 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 649 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
649 | static inline enum print_line_t | 650 | static inline enum print_line_t |
650 | print_graph_function(struct trace_iterator *iter) | 651 | print_graph_function(struct trace_iterator *iter) |
@@ -692,6 +693,7 @@ enum trace_iterator_flags { | |||
692 | TRACE_ITER_LATENCY_FMT = 0x40000, | 693 | TRACE_ITER_LATENCY_FMT = 0x40000, |
693 | TRACE_ITER_GLOBAL_CLK = 0x80000, | 694 | TRACE_ITER_GLOBAL_CLK = 0x80000, |
694 | TRACE_ITER_SLEEP_TIME = 0x100000, | 695 | TRACE_ITER_SLEEP_TIME = 0x100000, |
696 | TRACE_ITER_GRAPH_TIME = 0x200000, | ||
695 | }; | 697 | }; |
696 | 698 | ||
697 | /* | 699 | /* |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..e6e32912ffb8 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -263,7 +263,7 @@ static int branch_stat_show(struct seq_file *m, void *v) | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static void *annotated_branch_stat_start(void) | 266 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
267 | { | 267 | { |
268 | return __start_annotated_branch_profile; | 268 | return __start_annotated_branch_profile; |
269 | } | 269 | } |
@@ -338,7 +338,7 @@ static int all_branch_stat_headers(struct seq_file *m) | |||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | 340 | ||
341 | static void *all_branch_stat_start(void) | 341 | static void *all_branch_stat_start(struct tracer_stat *trace) |
342 | { | 342 | { |
343 | return __start_branch_profile; | 343 | return __start_branch_profile; |
344 | } | 344 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..10f6ad7d85f6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
78 | current->ret_stack[index].ret = ret; | 78 | current->ret_stack[index].ret = ret; |
79 | current->ret_stack[index].func = func; | 79 | current->ret_stack[index].func = func; |
80 | current->ret_stack[index].calltime = calltime; | 80 | current->ret_stack[index].calltime = calltime; |
81 | current->ret_stack[index].subtime = 0; | ||
81 | *depth = index; | 82 | *depth = index; |
82 | 83 | ||
83 | return 0; | 84 | return 0; |
84 | } | 85 | } |
85 | 86 | ||
86 | /* Retrieve a function return address to the trace stack on thread info.*/ | 87 | /* Retrieve a function return address to the trace stack on thread info.*/ |
87 | void | 88 | static void |
88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | 89 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
89 | { | 90 | { |
90 | int index; | 91 | int index; |
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | |||
104 | trace->calltime = current->ret_stack[index].calltime; | 105 | trace->calltime = current->ret_stack[index].calltime; |
105 | trace->overrun = atomic_read(¤t->trace_overrun); | 106 | trace->overrun = atomic_read(¤t->trace_overrun); |
106 | trace->depth = index; | 107 | trace->depth = index; |
107 | barrier(); | ||
108 | current->curr_ret_stack--; | ||
109 | |||
110 | } | 108 | } |
111 | 109 | ||
112 | /* | 110 | /* |
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void) | |||
121 | ftrace_pop_return_trace(&trace, &ret); | 119 | ftrace_pop_return_trace(&trace, &ret); |
122 | trace.rettime = trace_clock_local(); | 120 | trace.rettime = trace_clock_local(); |
123 | ftrace_graph_return(&trace); | 121 | ftrace_graph_return(&trace); |
122 | barrier(); | ||
123 | current->curr_ret_stack--; | ||
124 | 124 | ||
125 | if (unlikely(!ret)) { | 125 | if (unlikely(!ret)) { |
126 | ftrace_graph_stop(); | 126 | ftrace_graph_stop(); |
@@ -426,8 +426,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
426 | return TRACE_TYPE_HANDLED; | 426 | return TRACE_TYPE_HANDLED; |
427 | } | 427 | } |
428 | 428 | ||
429 | static enum print_line_t | 429 | enum print_line_t |
430 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 430 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
431 | { | 431 | { |
432 | unsigned long nsecs_rem = do_div(duration, 1000); | 432 | unsigned long nsecs_rem = do_div(duration, 1000); |
433 | /* log10(ULONG_MAX) + '\0' */ | 433 | /* log10(ULONG_MAX) + '\0' */ |
@@ -464,12 +464,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
464 | if (!ret) | 464 | if (!ret) |
465 | return TRACE_TYPE_PARTIAL_LINE; | 465 | return TRACE_TYPE_PARTIAL_LINE; |
466 | } | 466 | } |
467 | return TRACE_TYPE_HANDLED; | ||
468 | } | ||
469 | |||
470 | static enum print_line_t | ||
471 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
472 | { | ||
473 | int ret; | ||
474 | |||
475 | ret = trace_print_graph_duration(duration, s); | ||
476 | if (ret != TRACE_TYPE_HANDLED) | ||
477 | return ret; | ||
467 | 478 | ||
468 | ret = trace_seq_printf(s, "| "); | 479 | ret = trace_seq_printf(s, "| "); |
469 | if (!ret) | 480 | if (!ret) |
470 | return TRACE_TYPE_PARTIAL_LINE; | 481 | return TRACE_TYPE_PARTIAL_LINE; |
471 | return TRACE_TYPE_HANDLED; | ||
472 | 482 | ||
483 | return TRACE_TYPE_HANDLED; | ||
473 | } | 484 | } |
474 | 485 | ||
475 | /* Case of a leaf function on its call entry */ | 486 | /* Case of a leaf function on its call entry */ |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index d72b9a63b247..aeac358ee231 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -19,6 +19,16 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
19 | 19 | ||
20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 20 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
21 | 21 | ||
22 | void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
23 | { | ||
24 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
25 | |||
26 | s->buffer[len] = 0; | ||
27 | seq_puts(m, s->buffer); | ||
28 | |||
29 | trace_seq_init(s); | ||
30 | } | ||
31 | |||
22 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 32 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
23 | { | 33 | { |
24 | struct trace_seq *s = &iter->seq; | 34 | struct trace_seq *s = &iter->seq; |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index e0bde39c2dd9..91630217fb46 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -20,6 +20,8 @@ trace_print_bprintk_msg_only(struct trace_iterator *iter); | |||
20 | extern enum print_line_t | 20 | extern enum print_line_t |
21 | trace_print_printk_msg_only(struct trace_iterator *iter); | 21 | trace_print_printk_msg_only(struct trace_iterator *iter); |
22 | 22 | ||
23 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
24 | |||
23 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 25 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
24 | __attribute__ ((format (printf, 2, 3))); | 26 | __attribute__ ((format (printf, 2, 3))); |
25 | extern int | 27 | extern int |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index acdebd771a93..fdde3a4a94cd 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -85,7 +85,7 @@ static int stat_seq_init(struct tracer_stat_session *session) | |||
85 | if (!ts->stat_cmp) | 85 | if (!ts->stat_cmp) |
86 | ts->stat_cmp = dummy_cmp; | 86 | ts->stat_cmp = dummy_cmp; |
87 | 87 | ||
88 | stat = ts->stat_start(); | 88 | stat = ts->stat_start(ts); |
89 | if (!stat) | 89 | if (!stat) |
90 | goto exit; | 90 | goto exit; |
91 | 91 | ||
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h index 202274cf7f3d..f3546a2cd826 100644 --- a/kernel/trace/trace_stat.h +++ b/kernel/trace/trace_stat.h | |||
@@ -12,7 +12,7 @@ struct tracer_stat { | |||
12 | /* The name of your stat file */ | 12 | /* The name of your stat file */ |
13 | const char *name; | 13 | const char *name; |
14 | /* Iteration over statistic entries */ | 14 | /* Iteration over statistic entries */ |
15 | void *(*stat_start)(void); | 15 | void *(*stat_start)(struct tracer_stat *trace); |
16 | void *(*stat_next)(void *prev, int idx); | 16 | void *(*stat_next)(void *prev, int idx); |
17 | /* Compare two entries for stats sorting */ | 17 | /* Compare two entries for stats sorting */ |
18 | int (*stat_cmp)(void *p1, void *p2); | 18 | int (*stat_cmp)(void *p1, void *p2); |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 797201e4a137..984b9175c13d 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -152,7 +152,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | |||
152 | return ret; | 152 | return ret; |
153 | } | 153 | } |
154 | 154 | ||
155 | static void *workqueue_stat_start(void) | 155 | static void *workqueue_stat_start(struct tracer_stat *trace) |
156 | { | 156 | { |
157 | int cpu; | 157 | int cpu; |
158 | void *ret = NULL; | 158 | void *ret = NULL; |