diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 901 |
1 files changed, 759 insertions, 142 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d2e75479dc50..6adf660fc816 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
33 | #include <linux/seq_file.h> | ||
33 | #include <linux/writeback.h> | 34 | #include <linux/writeback.h> |
34 | 35 | ||
35 | #include <linux/stacktrace.h> | 36 | #include <linux/stacktrace.h> |
@@ -43,6 +44,38 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 44 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 45 | unsigned long __read_mostly tracing_thresh; |
45 | 46 | ||
47 | /* | ||
48 | * We need to change this state when a selftest is running. | ||
49 | * A selftest will lurk into the ring-buffer to count the | ||
50 | * entries inserted during the selftest although some concurrent | ||
51 | * insertions into the ring-buffer such as ftrace_printk could occurred | ||
52 | * at the same time, giving false positive or negative results. | ||
53 | */ | ||
54 | static bool __read_mostly tracing_selftest_running; | ||
55 | |||
56 | /* For tracers that don't implement custom flags */ | ||
57 | static struct tracer_opt dummy_tracer_opt[] = { | ||
58 | { } | ||
59 | }; | ||
60 | |||
61 | static struct tracer_flags dummy_tracer_flags = { | ||
62 | .val = 0, | ||
63 | .opts = dummy_tracer_opt | ||
64 | }; | ||
65 | |||
66 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Kill all tracing for good (never come back). | ||
73 | * It is initialized to 1 but will turn to zero if the initialization | ||
74 | * of the tracer is successful. But that is the only place that sets | ||
75 | * this back to zero. | ||
76 | */ | ||
77 | int tracing_disabled = 1; | ||
78 | |||
46 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 79 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
47 | 80 | ||
48 | static inline void ftrace_disable_cpu(void) | 81 | static inline void ftrace_disable_cpu(void) |
@@ -62,7 +95,36 @@ static cpumask_t __read_mostly tracing_buffer_mask; | |||
62 | #define for_each_tracing_cpu(cpu) \ | 95 | #define for_each_tracing_cpu(cpu) \ |
63 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 96 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
64 | 97 | ||
65 | static int tracing_disabled = 1; | 98 | /* |
99 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | ||
100 | * | ||
101 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | ||
102 | * is set, then ftrace_dump is called. This will output the contents | ||
103 | * of the ftrace buffers to the console. This is very useful for | ||
104 | * capturing traces that lead to crashes and outputing it to a | ||
105 | * serial console. | ||
106 | * | ||
107 | * It is default off, but you can enable it with either specifying | ||
108 | * "ftrace_dump_on_oops" in the kernel command line, or setting | ||
109 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | ||
110 | */ | ||
111 | int ftrace_dump_on_oops; | ||
112 | |||
113 | static int tracing_set_tracer(char *buf); | ||
114 | |||
115 | static int __init set_ftrace(char *str) | ||
116 | { | ||
117 | tracing_set_tracer(str); | ||
118 | return 1; | ||
119 | } | ||
120 | __setup("ftrace", set_ftrace); | ||
121 | |||
122 | static int __init set_ftrace_dump_on_oops(char *str) | ||
123 | { | ||
124 | ftrace_dump_on_oops = 1; | ||
125 | return 1; | ||
126 | } | ||
127 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | ||
66 | 128 | ||
67 | long | 129 | long |
68 | ns2usecs(cycle_t nsec) | 130 | ns2usecs(cycle_t nsec) |
@@ -112,6 +174,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |||
112 | /* tracer_enabled is used to toggle activation of a tracer */ | 174 | /* tracer_enabled is used to toggle activation of a tracer */ |
113 | static int tracer_enabled = 1; | 175 | static int tracer_enabled = 1; |
114 | 176 | ||
177 | /** | ||
178 | * tracing_is_enabled - return tracer_enabled status | ||
179 | * | ||
180 | * This function is used by other tracers to know the status | ||
181 | * of the tracer_enabled flag. Tracers may use this function | ||
182 | * to know if it should enable their features when starting | ||
183 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | ||
184 | */ | ||
185 | int tracing_is_enabled(void) | ||
186 | { | ||
187 | return tracer_enabled; | ||
188 | } | ||
189 | |||
115 | /* function tracing enabled */ | 190 | /* function tracing enabled */ |
116 | int ftrace_function_enabled; | 191 | int ftrace_function_enabled; |
117 | 192 | ||
@@ -153,8 +228,9 @@ static DEFINE_MUTEX(trace_types_lock); | |||
153 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 228 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
154 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 229 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
155 | 230 | ||
156 | /* trace_flags holds iter_ctrl options */ | 231 | /* trace_flags holds trace_options default values */ |
157 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; | 232 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
233 | TRACE_ITER_ANNOTATE; | ||
158 | 234 | ||
159 | /** | 235 | /** |
160 | * trace_wake_up - wake up tasks waiting for trace input | 236 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -193,13 +269,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
193 | return nsecs / 1000; | 269 | return nsecs / 1000; |
194 | } | 270 | } |
195 | 271 | ||
196 | /* | ||
197 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
198 | * control the output of kernel symbols. | ||
199 | */ | ||
200 | #define TRACE_ITER_SYM_MASK \ | ||
201 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
202 | |||
203 | /* These must match the bit postions in trace_iterator_flags */ | 272 | /* These must match the bit postions in trace_iterator_flags */ |
204 | static const char *trace_options[] = { | 273 | static const char *trace_options[] = { |
205 | "print-parent", | 274 | "print-parent", |
@@ -213,6 +282,11 @@ static const char *trace_options[] = { | |||
213 | "stacktrace", | 282 | "stacktrace", |
214 | "sched-tree", | 283 | "sched-tree", |
215 | "ftrace_printk", | 284 | "ftrace_printk", |
285 | "ftrace_preempt", | ||
286 | "branch", | ||
287 | "annotate", | ||
288 | "userstacktrace", | ||
289 | "sym-userobj", | ||
216 | NULL | 290 | NULL |
217 | }; | 291 | }; |
218 | 292 | ||
@@ -359,6 +433,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | |||
359 | return trace_seq_putmem(s, hex, j); | 433 | return trace_seq_putmem(s, hex, j); |
360 | } | 434 | } |
361 | 435 | ||
436 | static int | ||
437 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
438 | { | ||
439 | unsigned char *p; | ||
440 | |||
441 | if (s->len >= (PAGE_SIZE - 1)) | ||
442 | return 0; | ||
443 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
444 | if (!IS_ERR(p)) { | ||
445 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
446 | if (p) { | ||
447 | s->len = p - s->buffer; | ||
448 | return 1; | ||
449 | } | ||
450 | } else { | ||
451 | s->buffer[s->len++] = '?'; | ||
452 | return 1; | ||
453 | } | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
362 | static void | 458 | static void |
363 | trace_seq_reset(struct trace_seq *s) | 459 | trace_seq_reset(struct trace_seq *s) |
364 | { | 460 | { |
@@ -470,7 +566,17 @@ int register_tracer(struct tracer *type) | |||
470 | return -1; | 566 | return -1; |
471 | } | 567 | } |
472 | 568 | ||
569 | /* | ||
570 | * When this gets called we hold the BKL which means that | ||
571 | * preemption is disabled. Various trace selftests however | ||
572 | * need to disable and enable preemption for successful tests. | ||
573 | * So we drop the BKL here and grab it after the tests again. | ||
574 | */ | ||
575 | unlock_kernel(); | ||
473 | mutex_lock(&trace_types_lock); | 576 | mutex_lock(&trace_types_lock); |
577 | |||
578 | tracing_selftest_running = true; | ||
579 | |||
474 | for (t = trace_types; t; t = t->next) { | 580 | for (t = trace_types; t; t = t->next) { |
475 | if (strcmp(type->name, t->name) == 0) { | 581 | if (strcmp(type->name, t->name) == 0) { |
476 | /* already found */ | 582 | /* already found */ |
@@ -481,12 +587,20 @@ int register_tracer(struct tracer *type) | |||
481 | } | 587 | } |
482 | } | 588 | } |
483 | 589 | ||
590 | if (!type->set_flag) | ||
591 | type->set_flag = &dummy_set_flag; | ||
592 | if (!type->flags) | ||
593 | type->flags = &dummy_tracer_flags; | ||
594 | else | ||
595 | if (!type->flags->opts) | ||
596 | type->flags->opts = dummy_tracer_opt; | ||
597 | |||
484 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 598 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
485 | if (type->selftest) { | 599 | if (type->selftest) { |
486 | struct tracer *saved_tracer = current_trace; | 600 | struct tracer *saved_tracer = current_trace; |
487 | struct trace_array *tr = &global_trace; | 601 | struct trace_array *tr = &global_trace; |
488 | int saved_ctrl = tr->ctrl; | ||
489 | int i; | 602 | int i; |
603 | |||
490 | /* | 604 | /* |
491 | * Run a selftest on this tracer. | 605 | * Run a selftest on this tracer. |
492 | * Here we reset the trace buffer, and set the current | 606 | * Here we reset the trace buffer, and set the current |
@@ -494,25 +608,23 @@ int register_tracer(struct tracer *type) | |||
494 | * internal tracing to verify that everything is in order. | 608 | * internal tracing to verify that everything is in order. |
495 | * If we fail, we do not register this tracer. | 609 | * If we fail, we do not register this tracer. |
496 | */ | 610 | */ |
497 | for_each_tracing_cpu(i) { | 611 | for_each_tracing_cpu(i) |
498 | tracing_reset(tr, i); | 612 | tracing_reset(tr, i); |
499 | } | 613 | |
500 | current_trace = type; | 614 | current_trace = type; |
501 | tr->ctrl = 0; | ||
502 | /* the test is responsible for initializing and enabling */ | 615 | /* the test is responsible for initializing and enabling */ |
503 | pr_info("Testing tracer %s: ", type->name); | 616 | pr_info("Testing tracer %s: ", type->name); |
504 | ret = type->selftest(type, tr); | 617 | ret = type->selftest(type, tr); |
505 | /* the test is responsible for resetting too */ | 618 | /* the test is responsible for resetting too */ |
506 | current_trace = saved_tracer; | 619 | current_trace = saved_tracer; |
507 | tr->ctrl = saved_ctrl; | ||
508 | if (ret) { | 620 | if (ret) { |
509 | printk(KERN_CONT "FAILED!\n"); | 621 | printk(KERN_CONT "FAILED!\n"); |
510 | goto out; | 622 | goto out; |
511 | } | 623 | } |
512 | /* Only reset on passing, to avoid touching corrupted buffers */ | 624 | /* Only reset on passing, to avoid touching corrupted buffers */ |
513 | for_each_tracing_cpu(i) { | 625 | for_each_tracing_cpu(i) |
514 | tracing_reset(tr, i); | 626 | tracing_reset(tr, i); |
515 | } | 627 | |
516 | printk(KERN_CONT "PASSED\n"); | 628 | printk(KERN_CONT "PASSED\n"); |
517 | } | 629 | } |
518 | #endif | 630 | #endif |
@@ -524,7 +636,9 @@ int register_tracer(struct tracer *type) | |||
524 | max_tracer_type_len = len; | 636 | max_tracer_type_len = len; |
525 | 637 | ||
526 | out: | 638 | out: |
639 | tracing_selftest_running = false; | ||
527 | mutex_unlock(&trace_types_lock); | 640 | mutex_unlock(&trace_types_lock); |
641 | lock_kernel(); | ||
528 | 642 | ||
529 | return ret; | 643 | return ret; |
530 | } | 644 | } |
@@ -581,6 +695,91 @@ static void trace_init_cmdlines(void) | |||
581 | cmdline_idx = 0; | 695 | cmdline_idx = 0; |
582 | } | 696 | } |
583 | 697 | ||
698 | static int trace_stop_count; | ||
699 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
700 | |||
701 | /** | ||
702 | * ftrace_off_permanent - disable all ftrace code permanently | ||
703 | * | ||
704 | * This should only be called when a serious anomally has | ||
705 | * been detected. This will turn off the function tracing, | ||
706 | * ring buffers, and other tracing utilites. It takes no | ||
707 | * locks and can be called from any context. | ||
708 | */ | ||
709 | void ftrace_off_permanent(void) | ||
710 | { | ||
711 | tracing_disabled = 1; | ||
712 | ftrace_stop(); | ||
713 | tracing_off_permanent(); | ||
714 | } | ||
715 | |||
716 | /** | ||
717 | * tracing_start - quick start of the tracer | ||
718 | * | ||
719 | * If tracing is enabled but was stopped by tracing_stop, | ||
720 | * this will start the tracer back up. | ||
721 | */ | ||
722 | void tracing_start(void) | ||
723 | { | ||
724 | struct ring_buffer *buffer; | ||
725 | unsigned long flags; | ||
726 | |||
727 | if (tracing_disabled) | ||
728 | return; | ||
729 | |||
730 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
731 | if (--trace_stop_count) | ||
732 | goto out; | ||
733 | |||
734 | if (trace_stop_count < 0) { | ||
735 | /* Someone screwed up their debugging */ | ||
736 | WARN_ON_ONCE(1); | ||
737 | trace_stop_count = 0; | ||
738 | goto out; | ||
739 | } | ||
740 | |||
741 | |||
742 | buffer = global_trace.buffer; | ||
743 | if (buffer) | ||
744 | ring_buffer_record_enable(buffer); | ||
745 | |||
746 | buffer = max_tr.buffer; | ||
747 | if (buffer) | ||
748 | ring_buffer_record_enable(buffer); | ||
749 | |||
750 | ftrace_start(); | ||
751 | out: | ||
752 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
753 | } | ||
754 | |||
755 | /** | ||
756 | * tracing_stop - quick stop of the tracer | ||
757 | * | ||
758 | * Light weight way to stop tracing. Use in conjunction with | ||
759 | * tracing_start. | ||
760 | */ | ||
761 | void tracing_stop(void) | ||
762 | { | ||
763 | struct ring_buffer *buffer; | ||
764 | unsigned long flags; | ||
765 | |||
766 | ftrace_stop(); | ||
767 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
768 | if (trace_stop_count++) | ||
769 | goto out; | ||
770 | |||
771 | buffer = global_trace.buffer; | ||
772 | if (buffer) | ||
773 | ring_buffer_record_disable(buffer); | ||
774 | |||
775 | buffer = max_tr.buffer; | ||
776 | if (buffer) | ||
777 | ring_buffer_record_disable(buffer); | ||
778 | |||
779 | out: | ||
780 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
781 | } | ||
782 | |||
584 | void trace_stop_cmdline_recording(void); | 783 | void trace_stop_cmdline_recording(void); |
585 | 784 | ||
586 | static void trace_save_cmdline(struct task_struct *tsk) | 785 | static void trace_save_cmdline(struct task_struct *tsk) |
@@ -618,7 +817,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
618 | spin_unlock(&trace_cmdline_lock); | 817 | spin_unlock(&trace_cmdline_lock); |
619 | } | 818 | } |
620 | 819 | ||
621 | static char *trace_find_cmdline(int pid) | 820 | char *trace_find_cmdline(int pid) |
622 | { | 821 | { |
623 | char *cmdline = "<...>"; | 822 | char *cmdline = "<...>"; |
624 | unsigned map; | 823 | unsigned map; |
@@ -655,6 +854,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
655 | 854 | ||
656 | entry->preempt_count = pc & 0xff; | 855 | entry->preempt_count = pc & 0xff; |
657 | entry->pid = (tsk) ? tsk->pid : 0; | 856 | entry->pid = (tsk) ? tsk->pid : 0; |
857 | entry->tgid = (tsk) ? tsk->tgid : 0; | ||
658 | entry->flags = | 858 | entry->flags = |
659 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 859 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
660 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 860 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -691,6 +891,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
691 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 891 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
692 | } | 892 | } |
693 | 893 | ||
894 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
895 | static void __trace_graph_entry(struct trace_array *tr, | ||
896 | struct trace_array_cpu *data, | ||
897 | struct ftrace_graph_ent *trace, | ||
898 | unsigned long flags, | ||
899 | int pc) | ||
900 | { | ||
901 | struct ring_buffer_event *event; | ||
902 | struct ftrace_graph_ent_entry *entry; | ||
903 | unsigned long irq_flags; | ||
904 | |||
905 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
906 | return; | ||
907 | |||
908 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
909 | &irq_flags); | ||
910 | if (!event) | ||
911 | return; | ||
912 | entry = ring_buffer_event_data(event); | ||
913 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
914 | entry->ent.type = TRACE_GRAPH_ENT; | ||
915 | entry->graph_ent = *trace; | ||
916 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
917 | } | ||
918 | |||
919 | static void __trace_graph_return(struct trace_array *tr, | ||
920 | struct trace_array_cpu *data, | ||
921 | struct ftrace_graph_ret *trace, | ||
922 | unsigned long flags, | ||
923 | int pc) | ||
924 | { | ||
925 | struct ring_buffer_event *event; | ||
926 | struct ftrace_graph_ret_entry *entry; | ||
927 | unsigned long irq_flags; | ||
928 | |||
929 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
930 | return; | ||
931 | |||
932 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
933 | &irq_flags); | ||
934 | if (!event) | ||
935 | return; | ||
936 | entry = ring_buffer_event_data(event); | ||
937 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
938 | entry->ent.type = TRACE_GRAPH_RET; | ||
939 | entry->ret = *trace; | ||
940 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
941 | } | ||
942 | #endif | ||
943 | |||
694 | void | 944 | void |
695 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 945 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
696 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 946 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
@@ -742,6 +992,46 @@ void __trace_stack(struct trace_array *tr, | |||
742 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 992 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); |
743 | } | 993 | } |
744 | 994 | ||
995 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
996 | struct trace_array_cpu *data, | ||
997 | unsigned long flags, int pc) | ||
998 | { | ||
999 | #ifdef CONFIG_STACKTRACE | ||
1000 | struct ring_buffer_event *event; | ||
1001 | struct userstack_entry *entry; | ||
1002 | struct stack_trace trace; | ||
1003 | unsigned long irq_flags; | ||
1004 | |||
1005 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | ||
1006 | return; | ||
1007 | |||
1008 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
1009 | &irq_flags); | ||
1010 | if (!event) | ||
1011 | return; | ||
1012 | entry = ring_buffer_event_data(event); | ||
1013 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1014 | entry->ent.type = TRACE_USER_STACK; | ||
1015 | |||
1016 | memset(&entry->caller, 0, sizeof(entry->caller)); | ||
1017 | |||
1018 | trace.nr_entries = 0; | ||
1019 | trace.max_entries = FTRACE_STACK_ENTRIES; | ||
1020 | trace.skip = 0; | ||
1021 | trace.entries = entry->caller; | ||
1022 | |||
1023 | save_stack_trace_user(&trace); | ||
1024 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1025 | #endif | ||
1026 | } | ||
1027 | |||
1028 | void __trace_userstack(struct trace_array *tr, | ||
1029 | struct trace_array_cpu *data, | ||
1030 | unsigned long flags) | ||
1031 | { | ||
1032 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | ||
1033 | } | ||
1034 | |||
745 | static void | 1035 | static void |
746 | ftrace_trace_special(void *__tr, void *__data, | 1036 | ftrace_trace_special(void *__tr, void *__data, |
747 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1037 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
@@ -765,6 +1055,7 @@ ftrace_trace_special(void *__tr, void *__data, | |||
765 | entry->arg3 = arg3; | 1055 | entry->arg3 = arg3; |
766 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1056 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
767 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | 1057 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); |
1058 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
768 | 1059 | ||
769 | trace_wake_up(); | 1060 | trace_wake_up(); |
770 | } | 1061 | } |
@@ -803,6 +1094,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
803 | entry->next_cpu = task_cpu(next); | 1094 | entry->next_cpu = task_cpu(next); |
804 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1095 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
805 | ftrace_trace_stack(tr, data, flags, 5, pc); | 1096 | ftrace_trace_stack(tr, data, flags, 5, pc); |
1097 | ftrace_trace_userstack(tr, data, flags, pc); | ||
806 | } | 1098 | } |
807 | 1099 | ||
808 | void | 1100 | void |
@@ -832,6 +1124,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
832 | entry->next_cpu = task_cpu(wakee); | 1124 | entry->next_cpu = task_cpu(wakee); |
833 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1125 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
834 | ftrace_trace_stack(tr, data, flags, 6, pc); | 1126 | ftrace_trace_stack(tr, data, flags, 6, pc); |
1127 | ftrace_trace_userstack(tr, data, flags, pc); | ||
835 | 1128 | ||
836 | trace_wake_up(); | 1129 | trace_wake_up(); |
837 | } | 1130 | } |
@@ -841,26 +1134,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
841 | { | 1134 | { |
842 | struct trace_array *tr = &global_trace; | 1135 | struct trace_array *tr = &global_trace; |
843 | struct trace_array_cpu *data; | 1136 | struct trace_array_cpu *data; |
1137 | unsigned long flags; | ||
844 | int cpu; | 1138 | int cpu; |
845 | int pc; | 1139 | int pc; |
846 | 1140 | ||
847 | if (tracing_disabled || !tr->ctrl) | 1141 | if (tracing_disabled) |
848 | return; | 1142 | return; |
849 | 1143 | ||
850 | pc = preempt_count(); | 1144 | pc = preempt_count(); |
851 | preempt_disable_notrace(); | 1145 | local_irq_save(flags); |
852 | cpu = raw_smp_processor_id(); | 1146 | cpu = raw_smp_processor_id(); |
853 | data = tr->data[cpu]; | 1147 | data = tr->data[cpu]; |
854 | 1148 | ||
855 | if (likely(!atomic_read(&data->disabled))) | 1149 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
856 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | 1150 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); |
857 | 1151 | ||
858 | preempt_enable_notrace(); | 1152 | atomic_dec(&data->disabled); |
1153 | local_irq_restore(flags); | ||
859 | } | 1154 | } |
860 | 1155 | ||
861 | #ifdef CONFIG_FUNCTION_TRACER | 1156 | #ifdef CONFIG_FUNCTION_TRACER |
862 | static void | 1157 | static void |
863 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 1158 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) |
864 | { | 1159 | { |
865 | struct trace_array *tr = &global_trace; | 1160 | struct trace_array *tr = &global_trace; |
866 | struct trace_array_cpu *data; | 1161 | struct trace_array_cpu *data; |
@@ -873,8 +1168,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
873 | return; | 1168 | return; |
874 | 1169 | ||
875 | pc = preempt_count(); | 1170 | pc = preempt_count(); |
876 | resched = need_resched(); | 1171 | resched = ftrace_preempt_disable(); |
877 | preempt_disable_notrace(); | ||
878 | local_save_flags(flags); | 1172 | local_save_flags(flags); |
879 | cpu = raw_smp_processor_id(); | 1173 | cpu = raw_smp_processor_id(); |
880 | data = tr->data[cpu]; | 1174 | data = tr->data[cpu]; |
@@ -884,11 +1178,96 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
884 | trace_function(tr, data, ip, parent_ip, flags, pc); | 1178 | trace_function(tr, data, ip, parent_ip, flags, pc); |
885 | 1179 | ||
886 | atomic_dec(&data->disabled); | 1180 | atomic_dec(&data->disabled); |
887 | if (resched) | 1181 | ftrace_preempt_enable(resched); |
888 | preempt_enable_no_resched_notrace(); | 1182 | } |
889 | else | 1183 | |
890 | preempt_enable_notrace(); | 1184 | static void |
1185 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
1186 | { | ||
1187 | struct trace_array *tr = &global_trace; | ||
1188 | struct trace_array_cpu *data; | ||
1189 | unsigned long flags; | ||
1190 | long disabled; | ||
1191 | int cpu; | ||
1192 | int pc; | ||
1193 | |||
1194 | if (unlikely(!ftrace_function_enabled)) | ||
1195 | return; | ||
1196 | |||
1197 | /* | ||
1198 | * Need to use raw, since this must be called before the | ||
1199 | * recursive protection is performed. | ||
1200 | */ | ||
1201 | local_irq_save(flags); | ||
1202 | cpu = raw_smp_processor_id(); | ||
1203 | data = tr->data[cpu]; | ||
1204 | disabled = atomic_inc_return(&data->disabled); | ||
1205 | |||
1206 | if (likely(disabled == 1)) { | ||
1207 | pc = preempt_count(); | ||
1208 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1209 | } | ||
1210 | |||
1211 | atomic_dec(&data->disabled); | ||
1212 | local_irq_restore(flags); | ||
1213 | } | ||
1214 | |||
1215 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1216 | int trace_graph_entry(struct ftrace_graph_ent *trace) | ||
1217 | { | ||
1218 | struct trace_array *tr = &global_trace; | ||
1219 | struct trace_array_cpu *data; | ||
1220 | unsigned long flags; | ||
1221 | long disabled; | ||
1222 | int cpu; | ||
1223 | int pc; | ||
1224 | |||
1225 | if (!ftrace_trace_task(current)) | ||
1226 | return 0; | ||
1227 | |||
1228 | if (!ftrace_graph_addr(trace->func)) | ||
1229 | return 0; | ||
1230 | |||
1231 | local_irq_save(flags); | ||
1232 | cpu = raw_smp_processor_id(); | ||
1233 | data = tr->data[cpu]; | ||
1234 | disabled = atomic_inc_return(&data->disabled); | ||
1235 | if (likely(disabled == 1)) { | ||
1236 | pc = preempt_count(); | ||
1237 | __trace_graph_entry(tr, data, trace, flags, pc); | ||
1238 | } | ||
1239 | /* Only do the atomic if it is not already set */ | ||
1240 | if (!test_tsk_trace_graph(current)) | ||
1241 | set_tsk_trace_graph(current); | ||
1242 | atomic_dec(&data->disabled); | ||
1243 | local_irq_restore(flags); | ||
1244 | |||
1245 | return 1; | ||
1246 | } | ||
1247 | |||
1248 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
1249 | { | ||
1250 | struct trace_array *tr = &global_trace; | ||
1251 | struct trace_array_cpu *data; | ||
1252 | unsigned long flags; | ||
1253 | long disabled; | ||
1254 | int cpu; | ||
1255 | int pc; | ||
1256 | |||
1257 | local_irq_save(flags); | ||
1258 | cpu = raw_smp_processor_id(); | ||
1259 | data = tr->data[cpu]; | ||
1260 | disabled = atomic_inc_return(&data->disabled); | ||
1261 | if (likely(disabled == 1)) { | ||
1262 | pc = preempt_count(); | ||
1263 | __trace_graph_return(tr, data, trace, flags, pc); | ||
1264 | } | ||
1265 | if (!trace->depth) | ||
1266 | clear_tsk_trace_graph(current); | ||
1267 | atomic_dec(&data->disabled); | ||
1268 | local_irq_restore(flags); | ||
891 | } | 1269 | } |
1270 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
892 | 1271 | ||
893 | static struct ftrace_ops trace_ops __read_mostly = | 1272 | static struct ftrace_ops trace_ops __read_mostly = |
894 | { | 1273 | { |
@@ -898,9 +1277,14 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
898 | void tracing_start_function_trace(void) | 1277 | void tracing_start_function_trace(void) |
899 | { | 1278 | { |
900 | ftrace_function_enabled = 0; | 1279 | ftrace_function_enabled = 0; |
1280 | |||
1281 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1282 | trace_ops.func = function_trace_call_preempt_only; | ||
1283 | else | ||
1284 | trace_ops.func = function_trace_call; | ||
1285 | |||
901 | register_ftrace_function(&trace_ops); | 1286 | register_ftrace_function(&trace_ops); |
902 | if (tracer_enabled) | 1287 | ftrace_function_enabled = 1; |
903 | ftrace_function_enabled = 1; | ||
904 | } | 1288 | } |
905 | 1289 | ||
906 | void tracing_stop_function_trace(void) | 1290 | void tracing_stop_function_trace(void) |
@@ -912,6 +1296,7 @@ void tracing_stop_function_trace(void) | |||
912 | 1296 | ||
913 | enum trace_file_type { | 1297 | enum trace_file_type { |
914 | TRACE_FILE_LAT_FMT = 1, | 1298 | TRACE_FILE_LAT_FMT = 1, |
1299 | TRACE_FILE_ANNOTATE = 2, | ||
915 | }; | 1300 | }; |
916 | 1301 | ||
917 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 1302 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) |
@@ -1047,10 +1432,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1047 | 1432 | ||
1048 | atomic_inc(&trace_record_cmdline_disabled); | 1433 | atomic_inc(&trace_record_cmdline_disabled); |
1049 | 1434 | ||
1050 | /* let the tracer grab locks here if needed */ | ||
1051 | if (current_trace->start) | ||
1052 | current_trace->start(iter); | ||
1053 | |||
1054 | if (*pos != iter->pos) { | 1435 | if (*pos != iter->pos) { |
1055 | iter->ent = NULL; | 1436 | iter->ent = NULL; |
1056 | iter->cpu = 0; | 1437 | iter->cpu = 0; |
@@ -1077,14 +1458,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1077 | 1458 | ||
1078 | static void s_stop(struct seq_file *m, void *p) | 1459 | static void s_stop(struct seq_file *m, void *p) |
1079 | { | 1460 | { |
1080 | struct trace_iterator *iter = m->private; | ||
1081 | |||
1082 | atomic_dec(&trace_record_cmdline_disabled); | 1461 | atomic_dec(&trace_record_cmdline_disabled); |
1083 | |||
1084 | /* let the tracer release locks here if needed */ | ||
1085 | if (current_trace && current_trace == iter->trace && iter->trace->stop) | ||
1086 | iter->trace->stop(iter); | ||
1087 | |||
1088 | mutex_unlock(&trace_types_lock); | 1462 | mutex_unlock(&trace_types_lock); |
1089 | } | 1463 | } |
1090 | 1464 | ||
@@ -1143,7 +1517,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1143 | # define IP_FMT "%016lx" | 1517 | # define IP_FMT "%016lx" |
1144 | #endif | 1518 | #endif |
1145 | 1519 | ||
1146 | static int | 1520 | int |
1147 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 1521 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
1148 | { | 1522 | { |
1149 | int ret; | 1523 | int ret; |
@@ -1164,6 +1538,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
1164 | return ret; | 1538 | return ret; |
1165 | } | 1539 | } |
1166 | 1540 | ||
1541 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
1542 | unsigned long ip, unsigned long sym_flags) | ||
1543 | { | ||
1544 | struct file *file = NULL; | ||
1545 | unsigned long vmstart = 0; | ||
1546 | int ret = 1; | ||
1547 | |||
1548 | if (mm) { | ||
1549 | const struct vm_area_struct *vma; | ||
1550 | |||
1551 | down_read(&mm->mmap_sem); | ||
1552 | vma = find_vma(mm, ip); | ||
1553 | if (vma) { | ||
1554 | file = vma->vm_file; | ||
1555 | vmstart = vma->vm_start; | ||
1556 | } | ||
1557 | if (file) { | ||
1558 | ret = trace_seq_path(s, &file->f_path); | ||
1559 | if (ret) | ||
1560 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
1561 | } | ||
1562 | up_read(&mm->mmap_sem); | ||
1563 | } | ||
1564 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
1565 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1566 | return ret; | ||
1567 | } | ||
1568 | |||
1569 | static int | ||
1570 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
1571 | unsigned long sym_flags) | ||
1572 | { | ||
1573 | struct mm_struct *mm = NULL; | ||
1574 | int ret = 1; | ||
1575 | unsigned int i; | ||
1576 | |||
1577 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
1578 | struct task_struct *task; | ||
1579 | /* | ||
1580 | * we do the lookup on the thread group leader, | ||
1581 | * since individual threads might have already quit! | ||
1582 | */ | ||
1583 | rcu_read_lock(); | ||
1584 | task = find_task_by_vpid(entry->ent.tgid); | ||
1585 | if (task) | ||
1586 | mm = get_task_mm(task); | ||
1587 | rcu_read_unlock(); | ||
1588 | } | ||
1589 | |||
1590 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1591 | unsigned long ip = entry->caller[i]; | ||
1592 | |||
1593 | if (ip == ULONG_MAX || !ret) | ||
1594 | break; | ||
1595 | if (i && ret) | ||
1596 | ret = trace_seq_puts(s, " <- "); | ||
1597 | if (!ip) { | ||
1598 | if (ret) | ||
1599 | ret = trace_seq_puts(s, "??"); | ||
1600 | continue; | ||
1601 | } | ||
1602 | if (!ret) | ||
1603 | break; | ||
1604 | if (ret) | ||
1605 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
1606 | } | ||
1607 | |||
1608 | if (mm) | ||
1609 | mmput(mm); | ||
1610 | return ret; | ||
1611 | } | ||
1612 | |||
1167 | static void print_lat_help_header(struct seq_file *m) | 1613 | static void print_lat_help_header(struct seq_file *m) |
1168 | { | 1614 | { |
1169 | seq_puts(m, "# _------=> CPU# \n"); | 1615 | seq_puts(m, "# _------=> CPU# \n"); |
@@ -1338,6 +1784,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | |||
1338 | trace_seq_putc(s, '\n'); | 1784 | trace_seq_putc(s, '\n'); |
1339 | } | 1785 | } |
1340 | 1786 | ||
1787 | static void test_cpu_buff_start(struct trace_iterator *iter) | ||
1788 | { | ||
1789 | struct trace_seq *s = &iter->seq; | ||
1790 | |||
1791 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | ||
1792 | return; | ||
1793 | |||
1794 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | ||
1795 | return; | ||
1796 | |||
1797 | if (cpu_isset(iter->cpu, iter->started)) | ||
1798 | return; | ||
1799 | |||
1800 | cpu_set(iter->cpu, iter->started); | ||
1801 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | ||
1802 | } | ||
1803 | |||
1341 | static enum print_line_t | 1804 | static enum print_line_t |
1342 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | 1805 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) |
1343 | { | 1806 | { |
@@ -1357,6 +1820,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1357 | if (entry->type == TRACE_CONT) | 1820 | if (entry->type == TRACE_CONT) |
1358 | return TRACE_TYPE_HANDLED; | 1821 | return TRACE_TYPE_HANDLED; |
1359 | 1822 | ||
1823 | test_cpu_buff_start(iter); | ||
1824 | |||
1360 | next_entry = find_next_entry(iter, NULL, &next_ts); | 1825 | next_entry = find_next_entry(iter, NULL, &next_ts); |
1361 | if (!next_entry) | 1826 | if (!next_entry) |
1362 | next_ts = iter->ts; | 1827 | next_ts = iter->ts; |
@@ -1448,6 +1913,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1448 | trace_seq_print_cont(s, iter); | 1913 | trace_seq_print_cont(s, iter); |
1449 | break; | 1914 | break; |
1450 | } | 1915 | } |
1916 | case TRACE_BRANCH: { | ||
1917 | struct trace_branch *field; | ||
1918 | |||
1919 | trace_assign_type(field, entry); | ||
1920 | |||
1921 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1922 | field->correct ? " ok " : " MISS ", | ||
1923 | field->func, | ||
1924 | field->file, | ||
1925 | field->line); | ||
1926 | break; | ||
1927 | } | ||
1928 | case TRACE_USER_STACK: { | ||
1929 | struct userstack_entry *field; | ||
1930 | |||
1931 | trace_assign_type(field, entry); | ||
1932 | |||
1933 | seq_print_userip_objs(field, s, sym_flags); | ||
1934 | trace_seq_putc(s, '\n'); | ||
1935 | break; | ||
1936 | } | ||
1451 | default: | 1937 | default: |
1452 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1938 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1453 | } | 1939 | } |
@@ -1472,6 +1958,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1472 | if (entry->type == TRACE_CONT) | 1958 | if (entry->type == TRACE_CONT) |
1473 | return TRACE_TYPE_HANDLED; | 1959 | return TRACE_TYPE_HANDLED; |
1474 | 1960 | ||
1961 | test_cpu_buff_start(iter); | ||
1962 | |||
1475 | comm = trace_find_cmdline(iter->ent->pid); | 1963 | comm = trace_find_cmdline(iter->ent->pid); |
1476 | 1964 | ||
1477 | t = ns2usecs(iter->ts); | 1965 | t = ns2usecs(iter->ts); |
@@ -1581,6 +2069,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1581 | trace_seq_print_cont(s, iter); | 2069 | trace_seq_print_cont(s, iter); |
1582 | break; | 2070 | break; |
1583 | } | 2071 | } |
2072 | case TRACE_GRAPH_RET: { | ||
2073 | return print_graph_function(iter); | ||
2074 | } | ||
2075 | case TRACE_GRAPH_ENT: { | ||
2076 | return print_graph_function(iter); | ||
2077 | } | ||
2078 | case TRACE_BRANCH: { | ||
2079 | struct trace_branch *field; | ||
2080 | |||
2081 | trace_assign_type(field, entry); | ||
2082 | |||
2083 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
2084 | field->correct ? " ok " : " MISS ", | ||
2085 | field->func, | ||
2086 | field->file, | ||
2087 | field->line); | ||
2088 | break; | ||
2089 | } | ||
2090 | case TRACE_USER_STACK: { | ||
2091 | struct userstack_entry *field; | ||
2092 | |||
2093 | trace_assign_type(field, entry); | ||
2094 | |||
2095 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
2096 | if (!ret) | ||
2097 | return TRACE_TYPE_PARTIAL_LINE; | ||
2098 | ret = trace_seq_putc(s, '\n'); | ||
2099 | if (!ret) | ||
2100 | return TRACE_TYPE_PARTIAL_LINE; | ||
2101 | break; | ||
2102 | } | ||
1584 | } | 2103 | } |
1585 | return TRACE_TYPE_HANDLED; | 2104 | return TRACE_TYPE_HANDLED; |
1586 | } | 2105 | } |
@@ -1640,6 +2159,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1640 | break; | 2159 | break; |
1641 | } | 2160 | } |
1642 | case TRACE_SPECIAL: | 2161 | case TRACE_SPECIAL: |
2162 | case TRACE_USER_STACK: | ||
1643 | case TRACE_STACK: { | 2163 | case TRACE_STACK: { |
1644 | struct special_entry *field; | 2164 | struct special_entry *field; |
1645 | 2165 | ||
@@ -1728,6 +2248,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1728 | break; | 2248 | break; |
1729 | } | 2249 | } |
1730 | case TRACE_SPECIAL: | 2250 | case TRACE_SPECIAL: |
2251 | case TRACE_USER_STACK: | ||
1731 | case TRACE_STACK: { | 2252 | case TRACE_STACK: { |
1732 | struct special_entry *field; | 2253 | struct special_entry *field; |
1733 | 2254 | ||
@@ -1782,6 +2303,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1782 | break; | 2303 | break; |
1783 | } | 2304 | } |
1784 | case TRACE_SPECIAL: | 2305 | case TRACE_SPECIAL: |
2306 | case TRACE_USER_STACK: | ||
1785 | case TRACE_STACK: { | 2307 | case TRACE_STACK: { |
1786 | struct special_entry *field; | 2308 | struct special_entry *field; |
1787 | 2309 | ||
@@ -1847,7 +2369,9 @@ static int s_show(struct seq_file *m, void *v) | |||
1847 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 2369 | seq_printf(m, "# tracer: %s\n", iter->trace->name); |
1848 | seq_puts(m, "#\n"); | 2370 | seq_puts(m, "#\n"); |
1849 | } | 2371 | } |
1850 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2372 | if (iter->trace && iter->trace->print_header) |
2373 | iter->trace->print_header(m); | ||
2374 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
1851 | /* print nothing if the buffers are empty */ | 2375 | /* print nothing if the buffers are empty */ |
1852 | if (trace_empty(iter)) | 2376 | if (trace_empty(iter)) |
1853 | return 0; | 2377 | return 0; |
@@ -1899,6 +2423,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1899 | iter->trace = current_trace; | 2423 | iter->trace = current_trace; |
1900 | iter->pos = -1; | 2424 | iter->pos = -1; |
1901 | 2425 | ||
2426 | /* Notify the tracer early; before we stop tracing. */ | ||
2427 | if (iter->trace && iter->trace->open) | ||
2428 | iter->trace->open(iter); | ||
2429 | |||
2430 | /* Annotate start of buffers if we had overruns */ | ||
2431 | if (ring_buffer_overruns(iter->tr->buffer)) | ||
2432 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | ||
2433 | |||
2434 | |||
1902 | for_each_tracing_cpu(cpu) { | 2435 | for_each_tracing_cpu(cpu) { |
1903 | 2436 | ||
1904 | iter->buffer_iter[cpu] = | 2437 | iter->buffer_iter[cpu] = |
@@ -1917,13 +2450,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1917 | m->private = iter; | 2450 | m->private = iter; |
1918 | 2451 | ||
1919 | /* stop the trace while dumping */ | 2452 | /* stop the trace while dumping */ |
1920 | if (iter->tr->ctrl) { | 2453 | tracing_stop(); |
1921 | tracer_enabled = 0; | ||
1922 | ftrace_function_enabled = 0; | ||
1923 | } | ||
1924 | |||
1925 | if (iter->trace && iter->trace->open) | ||
1926 | iter->trace->open(iter); | ||
1927 | 2454 | ||
1928 | mutex_unlock(&trace_types_lock); | 2455 | mutex_unlock(&trace_types_lock); |
1929 | 2456 | ||
@@ -1966,14 +2493,7 @@ int tracing_release(struct inode *inode, struct file *file) | |||
1966 | iter->trace->close(iter); | 2493 | iter->trace->close(iter); |
1967 | 2494 | ||
1968 | /* reenable tracing if it was previously enabled */ | 2495 | /* reenable tracing if it was previously enabled */ |
1969 | if (iter->tr->ctrl) { | 2496 | tracing_start(); |
1970 | tracer_enabled = 1; | ||
1971 | /* | ||
1972 | * It is safe to enable function tracing even if it | ||
1973 | * isn't used | ||
1974 | */ | ||
1975 | ftrace_function_enabled = 1; | ||
1976 | } | ||
1977 | mutex_unlock(&trace_types_lock); | 2497 | mutex_unlock(&trace_types_lock); |
1978 | 2498 | ||
1979 | seq_release(inode, file); | 2499 | seq_release(inode, file); |
@@ -2151,7 +2671,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2151 | if (err) | 2671 | if (err) |
2152 | goto err_unlock; | 2672 | goto err_unlock; |
2153 | 2673 | ||
2154 | raw_local_irq_disable(); | 2674 | local_irq_disable(); |
2155 | __raw_spin_lock(&ftrace_max_lock); | 2675 | __raw_spin_lock(&ftrace_max_lock); |
2156 | for_each_tracing_cpu(cpu) { | 2676 | for_each_tracing_cpu(cpu) { |
2157 | /* | 2677 | /* |
@@ -2168,7 +2688,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2168 | } | 2688 | } |
2169 | } | 2689 | } |
2170 | __raw_spin_unlock(&ftrace_max_lock); | 2690 | __raw_spin_unlock(&ftrace_max_lock); |
2171 | raw_local_irq_enable(); | 2691 | local_irq_enable(); |
2172 | 2692 | ||
2173 | tracing_cpumask = tracing_cpumask_new; | 2693 | tracing_cpumask = tracing_cpumask_new; |
2174 | 2694 | ||
@@ -2189,13 +2709,16 @@ static struct file_operations tracing_cpumask_fops = { | |||
2189 | }; | 2709 | }; |
2190 | 2710 | ||
2191 | static ssize_t | 2711 | static ssize_t |
2192 | tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | 2712 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2193 | size_t cnt, loff_t *ppos) | 2713 | size_t cnt, loff_t *ppos) |
2194 | { | 2714 | { |
2715 | int i; | ||
2195 | char *buf; | 2716 | char *buf; |
2196 | int r = 0; | 2717 | int r = 0; |
2197 | int len = 0; | 2718 | int len = 0; |
2198 | int i; | 2719 | u32 tracer_flags = current_trace->flags->val; |
2720 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
2721 | |||
2199 | 2722 | ||
2200 | /* calulate max size */ | 2723 | /* calulate max size */ |
2201 | for (i = 0; trace_options[i]; i++) { | 2724 | for (i = 0; trace_options[i]; i++) { |
@@ -2203,6 +2726,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2203 | len += 3; /* "no" and space */ | 2726 | len += 3; /* "no" and space */ |
2204 | } | 2727 | } |
2205 | 2728 | ||
2729 | /* | ||
2730 | * Increase the size with names of options specific | ||
2731 | * of the current tracer. | ||
2732 | */ | ||
2733 | for (i = 0; trace_opts[i].name; i++) { | ||
2734 | len += strlen(trace_opts[i].name); | ||
2735 | len += 3; /* "no" and space */ | ||
2736 | } | ||
2737 | |||
2206 | /* +2 for \n and \0 */ | 2738 | /* +2 for \n and \0 */ |
2207 | buf = kmalloc(len + 2, GFP_KERNEL); | 2739 | buf = kmalloc(len + 2, GFP_KERNEL); |
2208 | if (!buf) | 2740 | if (!buf) |
@@ -2215,6 +2747,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2215 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2747 | r += sprintf(buf + r, "no%s ", trace_options[i]); |
2216 | } | 2748 | } |
2217 | 2749 | ||
2750 | for (i = 0; trace_opts[i].name; i++) { | ||
2751 | if (tracer_flags & trace_opts[i].bit) | ||
2752 | r += sprintf(buf + r, "%s ", | ||
2753 | trace_opts[i].name); | ||
2754 | else | ||
2755 | r += sprintf(buf + r, "no%s ", | ||
2756 | trace_opts[i].name); | ||
2757 | } | ||
2758 | |||
2218 | r += sprintf(buf + r, "\n"); | 2759 | r += sprintf(buf + r, "\n"); |
2219 | WARN_ON(r >= len + 2); | 2760 | WARN_ON(r >= len + 2); |
2220 | 2761 | ||
@@ -2225,13 +2766,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2225 | return r; | 2766 | return r; |
2226 | } | 2767 | } |
2227 | 2768 | ||
2769 | /* Try to assign a tracer specific option */ | ||
2770 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | ||
2771 | { | ||
2772 | struct tracer_flags *trace_flags = trace->flags; | ||
2773 | struct tracer_opt *opts = NULL; | ||
2774 | int ret = 0, i = 0; | ||
2775 | int len; | ||
2776 | |||
2777 | for (i = 0; trace_flags->opts[i].name; i++) { | ||
2778 | opts = &trace_flags->opts[i]; | ||
2779 | len = strlen(opts->name); | ||
2780 | |||
2781 | if (strncmp(cmp, opts->name, len) == 0) { | ||
2782 | ret = trace->set_flag(trace_flags->val, | ||
2783 | opts->bit, !neg); | ||
2784 | break; | ||
2785 | } | ||
2786 | } | ||
2787 | /* Not found */ | ||
2788 | if (!trace_flags->opts[i].name) | ||
2789 | return -EINVAL; | ||
2790 | |||
2791 | /* Refused to handle */ | ||
2792 | if (ret) | ||
2793 | return ret; | ||
2794 | |||
2795 | if (neg) | ||
2796 | trace_flags->val &= ~opts->bit; | ||
2797 | else | ||
2798 | trace_flags->val |= opts->bit; | ||
2799 | |||
2800 | return 0; | ||
2801 | } | ||
2802 | |||
2228 | static ssize_t | 2803 | static ssize_t |
2229 | tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | 2804 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2230 | size_t cnt, loff_t *ppos) | 2805 | size_t cnt, loff_t *ppos) |
2231 | { | 2806 | { |
2232 | char buf[64]; | 2807 | char buf[64]; |
2233 | char *cmp = buf; | 2808 | char *cmp = buf; |
2234 | int neg = 0; | 2809 | int neg = 0; |
2810 | int ret; | ||
2235 | int i; | 2811 | int i; |
2236 | 2812 | ||
2237 | if (cnt >= sizeof(buf)) | 2813 | if (cnt >= sizeof(buf)) |
@@ -2258,11 +2834,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2258 | break; | 2834 | break; |
2259 | } | 2835 | } |
2260 | } | 2836 | } |
2261 | /* | 2837 | |
2262 | * If no option could be set, return an error: | 2838 | /* If no option could be set, test the specific tracer options */ |
2263 | */ | 2839 | if (!trace_options[i]) { |
2264 | if (!trace_options[i]) | 2840 | ret = set_tracer_option(current_trace, cmp, neg); |
2265 | return -EINVAL; | 2841 | if (ret) |
2842 | return ret; | ||
2843 | } | ||
2266 | 2844 | ||
2267 | filp->f_pos += cnt; | 2845 | filp->f_pos += cnt; |
2268 | 2846 | ||
@@ -2271,8 +2849,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2271 | 2849 | ||
2272 | static struct file_operations tracing_iter_fops = { | 2850 | static struct file_operations tracing_iter_fops = { |
2273 | .open = tracing_open_generic, | 2851 | .open = tracing_open_generic, |
2274 | .read = tracing_iter_ctrl_read, | 2852 | .read = tracing_trace_options_read, |
2275 | .write = tracing_iter_ctrl_write, | 2853 | .write = tracing_trace_options_write, |
2276 | }; | 2854 | }; |
2277 | 2855 | ||
2278 | static const char readme_msg[] = | 2856 | static const char readme_msg[] = |
@@ -2286,9 +2864,9 @@ static const char readme_msg[] = | |||
2286 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2864 | "# echo sched_switch > /debug/tracing/current_tracer\n" |
2287 | "# cat /debug/tracing/current_tracer\n" | 2865 | "# cat /debug/tracing/current_tracer\n" |
2288 | "sched_switch\n" | 2866 | "sched_switch\n" |
2289 | "# cat /debug/tracing/iter_ctrl\n" | 2867 | "# cat /debug/tracing/trace_options\n" |
2290 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2868 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2291 | "# echo print-parent > /debug/tracing/iter_ctrl\n" | 2869 | "# echo print-parent > /debug/tracing/trace_options\n" |
2292 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2870 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
2293 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2871 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" |
2294 | "echo 0 > /debug/tracing/tracing_enabled\n" | 2872 | "echo 0 > /debug/tracing/tracing_enabled\n" |
@@ -2311,11 +2889,10 @@ static ssize_t | |||
2311 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2889 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2312 | size_t cnt, loff_t *ppos) | 2890 | size_t cnt, loff_t *ppos) |
2313 | { | 2891 | { |
2314 | struct trace_array *tr = filp->private_data; | ||
2315 | char buf[64]; | 2892 | char buf[64]; |
2316 | int r; | 2893 | int r; |
2317 | 2894 | ||
2318 | r = sprintf(buf, "%ld\n", tr->ctrl); | 2895 | r = sprintf(buf, "%u\n", tracer_enabled); |
2319 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2896 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2320 | } | 2897 | } |
2321 | 2898 | ||
@@ -2343,16 +2920,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2343 | val = !!val; | 2920 | val = !!val; |
2344 | 2921 | ||
2345 | mutex_lock(&trace_types_lock); | 2922 | mutex_lock(&trace_types_lock); |
2346 | if (tr->ctrl ^ val) { | 2923 | if (tracer_enabled ^ val) { |
2347 | if (val) | 2924 | if (val) { |
2348 | tracer_enabled = 1; | 2925 | tracer_enabled = 1; |
2349 | else | 2926 | if (current_trace->start) |
2927 | current_trace->start(tr); | ||
2928 | tracing_start(); | ||
2929 | } else { | ||
2350 | tracer_enabled = 0; | 2930 | tracer_enabled = 0; |
2351 | 2931 | tracing_stop(); | |
2352 | tr->ctrl = val; | 2932 | if (current_trace->stop) |
2353 | 2933 | current_trace->stop(tr); | |
2354 | if (current_trace && current_trace->ctrl_update) | 2934 | } |
2355 | current_trace->ctrl_update(tr); | ||
2356 | } | 2935 | } |
2357 | mutex_unlock(&trace_types_lock); | 2936 | mutex_unlock(&trace_types_lock); |
2358 | 2937 | ||
@@ -2378,29 +2957,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
2378 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2957 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2379 | } | 2958 | } |
2380 | 2959 | ||
2381 | static ssize_t | 2960 | static int tracing_set_tracer(char *buf) |
2382 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
2383 | size_t cnt, loff_t *ppos) | ||
2384 | { | 2961 | { |
2385 | struct trace_array *tr = &global_trace; | 2962 | struct trace_array *tr = &global_trace; |
2386 | struct tracer *t; | 2963 | struct tracer *t; |
2387 | char buf[max_tracer_type_len+1]; | 2964 | int ret = 0; |
2388 | int i; | ||
2389 | size_t ret; | ||
2390 | |||
2391 | ret = cnt; | ||
2392 | |||
2393 | if (cnt > max_tracer_type_len) | ||
2394 | cnt = max_tracer_type_len; | ||
2395 | |||
2396 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2397 | return -EFAULT; | ||
2398 | |||
2399 | buf[cnt] = 0; | ||
2400 | |||
2401 | /* strip ending whitespace. */ | ||
2402 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
2403 | buf[i] = 0; | ||
2404 | 2965 | ||
2405 | mutex_lock(&trace_types_lock); | 2966 | mutex_lock(&trace_types_lock); |
2406 | for (t = trace_types; t; t = t->next) { | 2967 | for (t = trace_types; t; t = t->next) { |
@@ -2414,18 +2975,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2414 | if (t == current_trace) | 2975 | if (t == current_trace) |
2415 | goto out; | 2976 | goto out; |
2416 | 2977 | ||
2978 | trace_branch_disable(); | ||
2417 | if (current_trace && current_trace->reset) | 2979 | if (current_trace && current_trace->reset) |
2418 | current_trace->reset(tr); | 2980 | current_trace->reset(tr); |
2419 | 2981 | ||
2420 | current_trace = t; | 2982 | current_trace = t; |
2421 | if (t->init) | 2983 | if (t->init) { |
2422 | t->init(tr); | 2984 | ret = t->init(tr); |
2985 | if (ret) | ||
2986 | goto out; | ||
2987 | } | ||
2423 | 2988 | ||
2989 | trace_branch_enable(tr); | ||
2424 | out: | 2990 | out: |
2425 | mutex_unlock(&trace_types_lock); | 2991 | mutex_unlock(&trace_types_lock); |
2426 | 2992 | ||
2427 | if (ret > 0) | 2993 | return ret; |
2428 | filp->f_pos += ret; | 2994 | } |
2995 | |||
2996 | static ssize_t | ||
2997 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
2998 | size_t cnt, loff_t *ppos) | ||
2999 | { | ||
3000 | char buf[max_tracer_type_len+1]; | ||
3001 | int i; | ||
3002 | size_t ret; | ||
3003 | int err; | ||
3004 | |||
3005 | ret = cnt; | ||
3006 | |||
3007 | if (cnt > max_tracer_type_len) | ||
3008 | cnt = max_tracer_type_len; | ||
3009 | |||
3010 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3011 | return -EFAULT; | ||
3012 | |||
3013 | buf[cnt] = 0; | ||
3014 | |||
3015 | /* strip ending whitespace. */ | ||
3016 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
3017 | buf[i] = 0; | ||
3018 | |||
3019 | err = tracing_set_tracer(buf); | ||
3020 | if (err) | ||
3021 | return err; | ||
3022 | |||
3023 | filp->f_pos += ret; | ||
2429 | 3024 | ||
2430 | return ret; | 3025 | return ret; |
2431 | } | 3026 | } |
@@ -2492,6 +3087,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2492 | return -ENOMEM; | 3087 | return -ENOMEM; |
2493 | 3088 | ||
2494 | mutex_lock(&trace_types_lock); | 3089 | mutex_lock(&trace_types_lock); |
3090 | |||
3091 | /* trace pipe does not show start of buffer */ | ||
3092 | cpus_setall(iter->started); | ||
3093 | |||
2495 | iter->tr = &global_trace; | 3094 | iter->tr = &global_trace; |
2496 | iter->trace = current_trace; | 3095 | iter->trace = current_trace; |
2497 | filp->private_data = iter; | 3096 | filp->private_data = iter; |
@@ -2667,7 +3266,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
2667 | char buf[64]; | 3266 | char buf[64]; |
2668 | int r; | 3267 | int r; |
2669 | 3268 | ||
2670 | r = sprintf(buf, "%lu\n", tr->entries); | 3269 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
2671 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3270 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2672 | } | 3271 | } |
2673 | 3272 | ||
@@ -2678,7 +3277,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2678 | unsigned long val; | 3277 | unsigned long val; |
2679 | char buf[64]; | 3278 | char buf[64]; |
2680 | int ret, cpu; | 3279 | int ret, cpu; |
2681 | struct trace_array *tr = filp->private_data; | ||
2682 | 3280 | ||
2683 | if (cnt >= sizeof(buf)) | 3281 | if (cnt >= sizeof(buf)) |
2684 | return -EINVAL; | 3282 | return -EINVAL; |
@@ -2698,12 +3296,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2698 | 3296 | ||
2699 | mutex_lock(&trace_types_lock); | 3297 | mutex_lock(&trace_types_lock); |
2700 | 3298 | ||
2701 | if (tr->ctrl) { | 3299 | tracing_stop(); |
2702 | cnt = -EBUSY; | ||
2703 | pr_info("ftrace: please disable tracing" | ||
2704 | " before modifying buffer size\n"); | ||
2705 | goto out; | ||
2706 | } | ||
2707 | 3300 | ||
2708 | /* disable all cpu buffers */ | 3301 | /* disable all cpu buffers */ |
2709 | for_each_tracing_cpu(cpu) { | 3302 | for_each_tracing_cpu(cpu) { |
@@ -2713,6 +3306,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2713 | atomic_inc(&max_tr.data[cpu]->disabled); | 3306 | atomic_inc(&max_tr.data[cpu]->disabled); |
2714 | } | 3307 | } |
2715 | 3308 | ||
3309 | /* value is in KB */ | ||
3310 | val <<= 10; | ||
3311 | |||
2716 | if (val != global_trace.entries) { | 3312 | if (val != global_trace.entries) { |
2717 | ret = ring_buffer_resize(global_trace.buffer, val); | 3313 | ret = ring_buffer_resize(global_trace.buffer, val); |
2718 | if (ret < 0) { | 3314 | if (ret < 0) { |
@@ -2751,6 +3347,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2751 | atomic_dec(&max_tr.data[cpu]->disabled); | 3347 | atomic_dec(&max_tr.data[cpu]->disabled); |
2752 | } | 3348 | } |
2753 | 3349 | ||
3350 | tracing_start(); | ||
2754 | max_tr.entries = global_trace.entries; | 3351 | max_tr.entries = global_trace.entries; |
2755 | mutex_unlock(&trace_types_lock); | 3352 | mutex_unlock(&trace_types_lock); |
2756 | 3353 | ||
@@ -2762,7 +3359,7 @@ static int mark_printk(const char *fmt, ...) | |||
2762 | int ret; | 3359 | int ret; |
2763 | va_list args; | 3360 | va_list args; |
2764 | va_start(args, fmt); | 3361 | va_start(args, fmt); |
2765 | ret = trace_vprintk(0, fmt, args); | 3362 | ret = trace_vprintk(0, -1, fmt, args); |
2766 | va_end(args); | 3363 | va_end(args); |
2767 | return ret; | 3364 | return ret; |
2768 | } | 3365 | } |
@@ -2773,9 +3370,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
2773 | { | 3370 | { |
2774 | char *buf; | 3371 | char *buf; |
2775 | char *end; | 3372 | char *end; |
2776 | struct trace_array *tr = &global_trace; | ||
2777 | 3373 | ||
2778 | if (!tr->ctrl || tracing_disabled) | 3374 | if (tracing_disabled) |
2779 | return -EINVAL; | 3375 | return -EINVAL; |
2780 | 3376 | ||
2781 | if (cnt > TRACE_BUF_SIZE) | 3377 | if (cnt > TRACE_BUF_SIZE) |
@@ -2841,22 +3437,38 @@ static struct file_operations tracing_mark_fops = { | |||
2841 | 3437 | ||
2842 | #ifdef CONFIG_DYNAMIC_FTRACE | 3438 | #ifdef CONFIG_DYNAMIC_FTRACE |
2843 | 3439 | ||
3440 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | ||
3441 | { | ||
3442 | return 0; | ||
3443 | } | ||
3444 | |||
2844 | static ssize_t | 3445 | static ssize_t |
2845 | tracing_read_long(struct file *filp, char __user *ubuf, | 3446 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
2846 | size_t cnt, loff_t *ppos) | 3447 | size_t cnt, loff_t *ppos) |
2847 | { | 3448 | { |
3449 | static char ftrace_dyn_info_buffer[1024]; | ||
3450 | static DEFINE_MUTEX(dyn_info_mutex); | ||
2848 | unsigned long *p = filp->private_data; | 3451 | unsigned long *p = filp->private_data; |
2849 | char buf[64]; | 3452 | char *buf = ftrace_dyn_info_buffer; |
3453 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | ||
2850 | int r; | 3454 | int r; |
2851 | 3455 | ||
2852 | r = sprintf(buf, "%ld\n", *p); | 3456 | mutex_lock(&dyn_info_mutex); |
3457 | r = sprintf(buf, "%ld ", *p); | ||
2853 | 3458 | ||
2854 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3459 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
3460 | buf[r++] = '\n'; | ||
3461 | |||
3462 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
3463 | |||
3464 | mutex_unlock(&dyn_info_mutex); | ||
3465 | |||
3466 | return r; | ||
2855 | } | 3467 | } |
2856 | 3468 | ||
2857 | static struct file_operations tracing_read_long_fops = { | 3469 | static struct file_operations tracing_dyn_info_fops = { |
2858 | .open = tracing_open_generic, | 3470 | .open = tracing_open_generic, |
2859 | .read = tracing_read_long, | 3471 | .read = tracing_read_dyn_info, |
2860 | }; | 3472 | }; |
2861 | #endif | 3473 | #endif |
2862 | 3474 | ||
@@ -2897,10 +3509,10 @@ static __init int tracer_init_debugfs(void) | |||
2897 | if (!entry) | 3509 | if (!entry) |
2898 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | 3510 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); |
2899 | 3511 | ||
2900 | entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, | 3512 | entry = debugfs_create_file("trace_options", 0644, d_tracer, |
2901 | NULL, &tracing_iter_fops); | 3513 | NULL, &tracing_iter_fops); |
2902 | if (!entry) | 3514 | if (!entry) |
2903 | pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); | 3515 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
2904 | 3516 | ||
2905 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3517 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
2906 | NULL, &tracing_cpumask_fops); | 3518 | NULL, &tracing_cpumask_fops); |
@@ -2950,11 +3562,11 @@ static __init int tracer_init_debugfs(void) | |||
2950 | pr_warning("Could not create debugfs " | 3562 | pr_warning("Could not create debugfs " |
2951 | "'trace_pipe' entry\n"); | 3563 | "'trace_pipe' entry\n"); |
2952 | 3564 | ||
2953 | entry = debugfs_create_file("trace_entries", 0644, d_tracer, | 3565 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, |
2954 | &global_trace, &tracing_entries_fops); | 3566 | &global_trace, &tracing_entries_fops); |
2955 | if (!entry) | 3567 | if (!entry) |
2956 | pr_warning("Could not create debugfs " | 3568 | pr_warning("Could not create debugfs " |
2957 | "'trace_entries' entry\n"); | 3569 | "'buffer_size_kb' entry\n"); |
2958 | 3570 | ||
2959 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | 3571 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, |
2960 | NULL, &tracing_mark_fops); | 3572 | NULL, &tracing_mark_fops); |
@@ -2965,7 +3577,7 @@ static __init int tracer_init_debugfs(void) | |||
2965 | #ifdef CONFIG_DYNAMIC_FTRACE | 3577 | #ifdef CONFIG_DYNAMIC_FTRACE |
2966 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3578 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
2967 | &ftrace_update_tot_cnt, | 3579 | &ftrace_update_tot_cnt, |
2968 | &tracing_read_long_fops); | 3580 | &tracing_dyn_info_fops); |
2969 | if (!entry) | 3581 | if (!entry) |
2970 | pr_warning("Could not create debugfs " | 3582 | pr_warning("Could not create debugfs " |
2971 | "'dyn_ftrace_total_info' entry\n"); | 3583 | "'dyn_ftrace_total_info' entry\n"); |
@@ -2976,7 +3588,7 @@ static __init int tracer_init_debugfs(void) | |||
2976 | return 0; | 3588 | return 0; |
2977 | } | 3589 | } |
2978 | 3590 | ||
2979 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 3591 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
2980 | { | 3592 | { |
2981 | static DEFINE_SPINLOCK(trace_buf_lock); | 3593 | static DEFINE_SPINLOCK(trace_buf_lock); |
2982 | static char trace_buf[TRACE_BUF_SIZE]; | 3594 | static char trace_buf[TRACE_BUF_SIZE]; |
@@ -2984,11 +3596,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2984 | struct ring_buffer_event *event; | 3596 | struct ring_buffer_event *event; |
2985 | struct trace_array *tr = &global_trace; | 3597 | struct trace_array *tr = &global_trace; |
2986 | struct trace_array_cpu *data; | 3598 | struct trace_array_cpu *data; |
2987 | struct print_entry *entry; | ||
2988 | unsigned long flags, irq_flags; | ||
2989 | int cpu, len = 0, size, pc; | 3599 | int cpu, len = 0, size, pc; |
3600 | struct print_entry *entry; | ||
3601 | unsigned long irq_flags; | ||
2990 | 3602 | ||
2991 | if (!tr->ctrl || tracing_disabled) | 3603 | if (tracing_disabled || tracing_selftest_running) |
2992 | return 0; | 3604 | return 0; |
2993 | 3605 | ||
2994 | pc = preempt_count(); | 3606 | pc = preempt_count(); |
@@ -2999,7 +3611,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2999 | if (unlikely(atomic_read(&data->disabled))) | 3611 | if (unlikely(atomic_read(&data->disabled))) |
3000 | goto out; | 3612 | goto out; |
3001 | 3613 | ||
3002 | spin_lock_irqsave(&trace_buf_lock, flags); | 3614 | pause_graph_tracing(); |
3615 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | ||
3003 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3616 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
3004 | 3617 | ||
3005 | len = min(len, TRACE_BUF_SIZE-1); | 3618 | len = min(len, TRACE_BUF_SIZE-1); |
@@ -3010,17 +3623,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
3010 | if (!event) | 3623 | if (!event) |
3011 | goto out_unlock; | 3624 | goto out_unlock; |
3012 | entry = ring_buffer_event_data(event); | 3625 | entry = ring_buffer_event_data(event); |
3013 | tracing_generic_entry_update(&entry->ent, flags, pc); | 3626 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); |
3014 | entry->ent.type = TRACE_PRINT; | 3627 | entry->ent.type = TRACE_PRINT; |
3015 | entry->ip = ip; | 3628 | entry->ip = ip; |
3629 | entry->depth = depth; | ||
3016 | 3630 | ||
3017 | memcpy(&entry->buf, trace_buf, len); | 3631 | memcpy(&entry->buf, trace_buf, len); |
3018 | entry->buf[len] = 0; | 3632 | entry->buf[len] = 0; |
3019 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 3633 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
3020 | 3634 | ||
3021 | out_unlock: | 3635 | out_unlock: |
3022 | spin_unlock_irqrestore(&trace_buf_lock, flags); | 3636 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
3023 | 3637 | unpause_graph_tracing(); | |
3024 | out: | 3638 | out: |
3025 | preempt_enable_notrace(); | 3639 | preempt_enable_notrace(); |
3026 | 3640 | ||
@@ -3037,7 +3651,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
3037 | return 0; | 3651 | return 0; |
3038 | 3652 | ||
3039 | va_start(ap, fmt); | 3653 | va_start(ap, fmt); |
3040 | ret = trace_vprintk(ip, fmt, ap); | 3654 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); |
3041 | va_end(ap); | 3655 | va_end(ap); |
3042 | return ret; | 3656 | return ret; |
3043 | } | 3657 | } |
@@ -3046,7 +3660,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk); | |||
3046 | static int trace_panic_handler(struct notifier_block *this, | 3660 | static int trace_panic_handler(struct notifier_block *this, |
3047 | unsigned long event, void *unused) | 3661 | unsigned long event, void *unused) |
3048 | { | 3662 | { |
3049 | ftrace_dump(); | 3663 | if (ftrace_dump_on_oops) |
3664 | ftrace_dump(); | ||
3050 | return NOTIFY_OK; | 3665 | return NOTIFY_OK; |
3051 | } | 3666 | } |
3052 | 3667 | ||
@@ -3062,7 +3677,8 @@ static int trace_die_handler(struct notifier_block *self, | |||
3062 | { | 3677 | { |
3063 | switch (val) { | 3678 | switch (val) { |
3064 | case DIE_OOPS: | 3679 | case DIE_OOPS: |
3065 | ftrace_dump(); | 3680 | if (ftrace_dump_on_oops) |
3681 | ftrace_dump(); | ||
3066 | break; | 3682 | break; |
3067 | default: | 3683 | default: |
3068 | break; | 3684 | break; |
@@ -3103,7 +3719,6 @@ trace_printk_seq(struct trace_seq *s) | |||
3103 | trace_seq_reset(s); | 3719 | trace_seq_reset(s); |
3104 | } | 3720 | } |
3105 | 3721 | ||
3106 | |||
3107 | void ftrace_dump(void) | 3722 | void ftrace_dump(void) |
3108 | { | 3723 | { |
3109 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3724 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
@@ -3128,6 +3743,9 @@ void ftrace_dump(void) | |||
3128 | atomic_inc(&global_trace.data[cpu]->disabled); | 3743 | atomic_inc(&global_trace.data[cpu]->disabled); |
3129 | } | 3744 | } |
3130 | 3745 | ||
3746 | /* don't look at user memory in panic mode */ | ||
3747 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | ||
3748 | |||
3131 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 3749 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
3132 | 3750 | ||
3133 | iter.tr = &global_trace; | 3751 | iter.tr = &global_trace; |
@@ -3221,7 +3839,6 @@ __init static int tracer_alloc_buffers(void) | |||
3221 | #endif | 3839 | #endif |
3222 | 3840 | ||
3223 | /* All seems OK, enable tracing */ | 3841 | /* All seems OK, enable tracing */ |
3224 | global_trace.ctrl = tracer_enabled; | ||
3225 | tracing_disabled = 0; | 3842 | tracing_disabled = 0; |
3226 | 3843 | ||
3227 | atomic_notifier_chain_register(&panic_notifier_list, | 3844 | atomic_notifier_chain_register(&panic_notifier_list, |