aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c976
1 files changed, 811 insertions, 165 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ee9e4e454a0..f4bb3800318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
33#include <linux/writeback.h> 34#include <linux/writeback.h>
34 35
35#include <linux/stacktrace.h> 36#include <linux/stacktrace.h>
@@ -43,6 +44,38 @@
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
45 46
47/*
48 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent
51 * insertions into the ring-buffer such as ftrace_printk could occurred
52 * at the same time, giving false positive or negative results.
53 */
54static bool __read_mostly tracing_selftest_running;
55
56/* For tracers that don't implement custom flags */
57static struct tracer_opt dummy_tracer_opt[] = {
58 { }
59};
60
61static struct tracer_flags dummy_tracer_flags = {
62 .val = 0,
63 .opts = dummy_tracer_opt
64};
65
66static int dummy_set_flag(u32 old_flags, u32 bit, int set)
67{
68 return 0;
69}
70
71/*
72 * Kill all tracing for good (never come back).
73 * It is initialized to 1 but will turn to zero if the initialization
74 * of the tracer is successful. But that is the only place that sets
75 * this back to zero.
76 */
77int tracing_disabled = 1;
78
46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 79static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 80
48static inline void ftrace_disable_cpu(void) 81static inline void ftrace_disable_cpu(void)
@@ -62,7 +95,36 @@ static cpumask_t __read_mostly tracing_buffer_mask;
62#define for_each_tracing_cpu(cpu) \ 95#define for_each_tracing_cpu(cpu) \
63 for_each_cpu_mask(cpu, tracing_buffer_mask) 96 for_each_cpu_mask(cpu, tracing_buffer_mask)
64 97
65static int tracing_disabled = 1; 98/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
100 *
101 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
102 * is set, then ftrace_dump is called. This will output the contents
103 * of the ftrace buffers to the console. This is very useful for
104 * capturing traces that lead to crashes and outputing it to a
105 * serial console.
106 *
107 * It is default off, but you can enable it with either specifying
108 * "ftrace_dump_on_oops" in the kernel command line, or setting
109 * /proc/sys/kernel/ftrace_dump_on_oops to true.
110 */
111int ftrace_dump_on_oops;
112
113static int tracing_set_tracer(char *buf);
114
115static int __init set_ftrace(char *str)
116{
117 tracing_set_tracer(str);
118 return 1;
119}
120__setup("ftrace", set_ftrace);
121
122static int __init set_ftrace_dump_on_oops(char *str)
123{
124 ftrace_dump_on_oops = 1;
125 return 1;
126}
127__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
66 128
67long 129long
68ns2usecs(cycle_t nsec) 130ns2usecs(cycle_t nsec)
@@ -112,6 +174,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
112/* tracer_enabled is used to toggle activation of a tracer */ 174/* tracer_enabled is used to toggle activation of a tracer */
113static int tracer_enabled = 1; 175static int tracer_enabled = 1;
114 176
177/**
178 * tracing_is_enabled - return tracer_enabled status
179 *
180 * This function is used by other tracers to know the status
181 * of the tracer_enabled flag. Tracers may use this function
182 * to know if it should enable their features when starting
183 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
184 */
185int tracing_is_enabled(void)
186{
187 return tracer_enabled;
188}
189
115/* function tracing enabled */ 190/* function tracing enabled */
116int ftrace_function_enabled; 191int ftrace_function_enabled;
117 192
@@ -153,8 +228,9 @@ static DEFINE_MUTEX(trace_types_lock);
153/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 228/* trace_wait is a waitqueue for tasks blocked on trace_poll */
154static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 229static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 230
156/* trace_flags holds iter_ctrl options */ 231/* trace_flags holds trace_options default values */
157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 232unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
233 TRACE_ITER_ANNOTATE;
158 234
159/** 235/**
160 * trace_wake_up - wake up tasks waiting for trace input 236 * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +269,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
193 return nsecs / 1000; 269 return nsecs / 1000;
194} 270}
195 271
196/*
197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198 * control the output of kernel symbols.
199 */
200#define TRACE_ITER_SYM_MASK \
201 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202
203/* These must match the bit postions in trace_iterator_flags */ 272/* These must match the bit postions in trace_iterator_flags */
204static const char *trace_options[] = { 273static const char *trace_options[] = {
205 "print-parent", 274 "print-parent",
@@ -213,6 +282,12 @@ static const char *trace_options[] = {
213 "stacktrace", 282 "stacktrace",
214 "sched-tree", 283 "sched-tree",
215 "ftrace_printk", 284 "ftrace_printk",
285 "ftrace_preempt",
286 "branch",
287 "annotate",
288 "userstacktrace",
289 "sym-userobj",
290 "printk-msg-only",
216 NULL 291 NULL
217}; 292};
218 293
@@ -359,6 +434,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
359 return trace_seq_putmem(s, hex, j); 434 return trace_seq_putmem(s, hex, j);
360} 435}
361 436
437static int
438trace_seq_path(struct trace_seq *s, struct path *path)
439{
440 unsigned char *p;
441
442 if (s->len >= (PAGE_SIZE - 1))
443 return 0;
444 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
445 if (!IS_ERR(p)) {
446 p = mangle_path(s->buffer + s->len, p, "\n");
447 if (p) {
448 s->len = p - s->buffer;
449 return 1;
450 }
451 } else {
452 s->buffer[s->len++] = '?';
453 return 1;
454 }
455
456 return 0;
457}
458
362static void 459static void
363trace_seq_reset(struct trace_seq *s) 460trace_seq_reset(struct trace_seq *s)
364{ 461{
@@ -470,7 +567,17 @@ int register_tracer(struct tracer *type)
470 return -1; 567 return -1;
471 } 568 }
472 569
570 /*
571 * When this gets called we hold the BKL which means that
572 * preemption is disabled. Various trace selftests however
573 * need to disable and enable preemption for successful tests.
574 * So we drop the BKL here and grab it after the tests again.
575 */
576 unlock_kernel();
473 mutex_lock(&trace_types_lock); 577 mutex_lock(&trace_types_lock);
578
579 tracing_selftest_running = true;
580
474 for (t = trace_types; t; t = t->next) { 581 for (t = trace_types; t; t = t->next) {
475 if (strcmp(type->name, t->name) == 0) { 582 if (strcmp(type->name, t->name) == 0) {
476 /* already found */ 583 /* already found */
@@ -481,12 +588,20 @@ int register_tracer(struct tracer *type)
481 } 588 }
482 } 589 }
483 590
591 if (!type->set_flag)
592 type->set_flag = &dummy_set_flag;
593 if (!type->flags)
594 type->flags = &dummy_tracer_flags;
595 else
596 if (!type->flags->opts)
597 type->flags->opts = dummy_tracer_opt;
598
484#ifdef CONFIG_FTRACE_STARTUP_TEST 599#ifdef CONFIG_FTRACE_STARTUP_TEST
485 if (type->selftest) { 600 if (type->selftest) {
486 struct tracer *saved_tracer = current_trace; 601 struct tracer *saved_tracer = current_trace;
487 struct trace_array *tr = &global_trace; 602 struct trace_array *tr = &global_trace;
488 int saved_ctrl = tr->ctrl;
489 int i; 603 int i;
604
490 /* 605 /*
491 * Run a selftest on this tracer. 606 * Run a selftest on this tracer.
492 * Here we reset the trace buffer, and set the current 607 * Here we reset the trace buffer, and set the current
@@ -494,25 +609,23 @@ int register_tracer(struct tracer *type)
494 * internal tracing to verify that everything is in order. 609 * internal tracing to verify that everything is in order.
495 * If we fail, we do not register this tracer. 610 * If we fail, we do not register this tracer.
496 */ 611 */
497 for_each_tracing_cpu(i) { 612 for_each_tracing_cpu(i)
498 tracing_reset(tr, i); 613 tracing_reset(tr, i);
499 } 614
500 current_trace = type; 615 current_trace = type;
501 tr->ctrl = 0;
502 /* the test is responsible for initializing and enabling */ 616 /* the test is responsible for initializing and enabling */
503 pr_info("Testing tracer %s: ", type->name); 617 pr_info("Testing tracer %s: ", type->name);
504 ret = type->selftest(type, tr); 618 ret = type->selftest(type, tr);
505 /* the test is responsible for resetting too */ 619 /* the test is responsible for resetting too */
506 current_trace = saved_tracer; 620 current_trace = saved_tracer;
507 tr->ctrl = saved_ctrl;
508 if (ret) { 621 if (ret) {
509 printk(KERN_CONT "FAILED!\n"); 622 printk(KERN_CONT "FAILED!\n");
510 goto out; 623 goto out;
511 } 624 }
512 /* Only reset on passing, to avoid touching corrupted buffers */ 625 /* Only reset on passing, to avoid touching corrupted buffers */
513 for_each_tracing_cpu(i) { 626 for_each_tracing_cpu(i)
514 tracing_reset(tr, i); 627 tracing_reset(tr, i);
515 } 628
516 printk(KERN_CONT "PASSED\n"); 629 printk(KERN_CONT "PASSED\n");
517 } 630 }
518#endif 631#endif
@@ -524,7 +637,9 @@ int register_tracer(struct tracer *type)
524 max_tracer_type_len = len; 637 max_tracer_type_len = len;
525 638
526 out: 639 out:
640 tracing_selftest_running = false;
527 mutex_unlock(&trace_types_lock); 641 mutex_unlock(&trace_types_lock);
642 lock_kernel();
528 643
529 return ret; 644 return ret;
530} 645}
@@ -564,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
564 ftrace_enable_cpu(); 679 ftrace_enable_cpu();
565} 680}
566 681
682void tracing_reset_online_cpus(struct trace_array *tr)
683{
684 int cpu;
685
686 tr->time_start = ftrace_now(tr->cpu);
687
688 for_each_online_cpu(cpu)
689 tracing_reset(tr, cpu);
690}
691
567#define SAVED_CMDLINES 128 692#define SAVED_CMDLINES 128
568static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 693static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
569static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 694static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -581,6 +706,91 @@ static void trace_init_cmdlines(void)
581 cmdline_idx = 0; 706 cmdline_idx = 0;
582} 707}
583 708
709static int trace_stop_count;
710static DEFINE_SPINLOCK(tracing_start_lock);
711
712/**
713 * ftrace_off_permanent - disable all ftrace code permanently
714 *
715 * This should only be called when a serious anomally has
716 * been detected. This will turn off the function tracing,
717 * ring buffers, and other tracing utilites. It takes no
718 * locks and can be called from any context.
719 */
720void ftrace_off_permanent(void)
721{
722 tracing_disabled = 1;
723 ftrace_stop();
724 tracing_off_permanent();
725}
726
727/**
728 * tracing_start - quick start of the tracer
729 *
730 * If tracing is enabled but was stopped by tracing_stop,
731 * this will start the tracer back up.
732 */
733void tracing_start(void)
734{
735 struct ring_buffer *buffer;
736 unsigned long flags;
737
738 if (tracing_disabled)
739 return;
740
741 spin_lock_irqsave(&tracing_start_lock, flags);
742 if (--trace_stop_count)
743 goto out;
744
745 if (trace_stop_count < 0) {
746 /* Someone screwed up their debugging */
747 WARN_ON_ONCE(1);
748 trace_stop_count = 0;
749 goto out;
750 }
751
752
753 buffer = global_trace.buffer;
754 if (buffer)
755 ring_buffer_record_enable(buffer);
756
757 buffer = max_tr.buffer;
758 if (buffer)
759 ring_buffer_record_enable(buffer);
760
761 ftrace_start();
762 out:
763 spin_unlock_irqrestore(&tracing_start_lock, flags);
764}
765
766/**
767 * tracing_stop - quick stop of the tracer
768 *
769 * Light weight way to stop tracing. Use in conjunction with
770 * tracing_start.
771 */
772void tracing_stop(void)
773{
774 struct ring_buffer *buffer;
775 unsigned long flags;
776
777 ftrace_stop();
778 spin_lock_irqsave(&tracing_start_lock, flags);
779 if (trace_stop_count++)
780 goto out;
781
782 buffer = global_trace.buffer;
783 if (buffer)
784 ring_buffer_record_disable(buffer);
785
786 buffer = max_tr.buffer;
787 if (buffer)
788 ring_buffer_record_disable(buffer);
789
790 out:
791 spin_unlock_irqrestore(&tracing_start_lock, flags);
792}
793
584void trace_stop_cmdline_recording(void); 794void trace_stop_cmdline_recording(void);
585 795
586static void trace_save_cmdline(struct task_struct *tsk) 796static void trace_save_cmdline(struct task_struct *tsk)
@@ -618,7 +828,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
618 spin_unlock(&trace_cmdline_lock); 828 spin_unlock(&trace_cmdline_lock);
619} 829}
620 830
621static char *trace_find_cmdline(int pid) 831char *trace_find_cmdline(int pid)
622{ 832{
623 char *cmdline = "<...>"; 833 char *cmdline = "<...>";
624 unsigned map; 834 unsigned map;
@@ -655,6 +865,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
655 865
656 entry->preempt_count = pc & 0xff; 866 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 867 entry->pid = (tsk) ? tsk->pid : 0;
868 entry->tgid = (tsk) ? tsk->tgid : 0;
658 entry->flags = 869 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 870#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 871 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -691,6 +902,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 902 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692} 903}
693 904
905#ifdef CONFIG_FUNCTION_GRAPH_TRACER
906static void __trace_graph_entry(struct trace_array *tr,
907 struct trace_array_cpu *data,
908 struct ftrace_graph_ent *trace,
909 unsigned long flags,
910 int pc)
911{
912 struct ring_buffer_event *event;
913 struct ftrace_graph_ent_entry *entry;
914 unsigned long irq_flags;
915
916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
917 return;
918
919 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
920 &irq_flags);
921 if (!event)
922 return;
923 entry = ring_buffer_event_data(event);
924 tracing_generic_entry_update(&entry->ent, flags, pc);
925 entry->ent.type = TRACE_GRAPH_ENT;
926 entry->graph_ent = *trace;
927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
928}
929
930static void __trace_graph_return(struct trace_array *tr,
931 struct trace_array_cpu *data,
932 struct ftrace_graph_ret *trace,
933 unsigned long flags,
934 int pc)
935{
936 struct ring_buffer_event *event;
937 struct ftrace_graph_ret_entry *entry;
938 unsigned long irq_flags;
939
940 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
941 return;
942
943 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
944 &irq_flags);
945 if (!event)
946 return;
947 entry = ring_buffer_event_data(event);
948 tracing_generic_entry_update(&entry->ent, flags, pc);
949 entry->ent.type = TRACE_GRAPH_RET;
950 entry->ret = *trace;
951 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
952}
953#endif
954
694void 955void
695ftrace(struct trace_array *tr, struct trace_array_cpu *data, 956ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696 unsigned long ip, unsigned long parent_ip, unsigned long flags, 957 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -742,6 +1003,46 @@ void __trace_stack(struct trace_array *tr,
742 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1003 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
743} 1004}
744 1005
1006static void ftrace_trace_userstack(struct trace_array *tr,
1007 struct trace_array_cpu *data,
1008 unsigned long flags, int pc)
1009{
1010#ifdef CONFIG_STACKTRACE
1011 struct ring_buffer_event *event;
1012 struct userstack_entry *entry;
1013 struct stack_trace trace;
1014 unsigned long irq_flags;
1015
1016 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1017 return;
1018
1019 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1020 &irq_flags);
1021 if (!event)
1022 return;
1023 entry = ring_buffer_event_data(event);
1024 tracing_generic_entry_update(&entry->ent, flags, pc);
1025 entry->ent.type = TRACE_USER_STACK;
1026
1027 memset(&entry->caller, 0, sizeof(entry->caller));
1028
1029 trace.nr_entries = 0;
1030 trace.max_entries = FTRACE_STACK_ENTRIES;
1031 trace.skip = 0;
1032 trace.entries = entry->caller;
1033
1034 save_stack_trace_user(&trace);
1035 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1036#endif
1037}
1038
1039void __trace_userstack(struct trace_array *tr,
1040 struct trace_array_cpu *data,
1041 unsigned long flags)
1042{
1043 ftrace_trace_userstack(tr, data, flags, preempt_count());
1044}
1045
745static void 1046static void
746ftrace_trace_special(void *__tr, void *__data, 1047ftrace_trace_special(void *__tr, void *__data,
747 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1048 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -765,6 +1066,7 @@ ftrace_trace_special(void *__tr, void *__data,
765 entry->arg3 = arg3; 1066 entry->arg3 = arg3;
766 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1067 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 1068 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1069 ftrace_trace_userstack(tr, data, irq_flags, pc);
768 1070
769 trace_wake_up(); 1071 trace_wake_up();
770} 1072}
@@ -803,6 +1105,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
803 entry->next_cpu = task_cpu(next); 1105 entry->next_cpu = task_cpu(next);
804 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1106 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 ftrace_trace_stack(tr, data, flags, 5, pc); 1107 ftrace_trace_stack(tr, data, flags, 5, pc);
1108 ftrace_trace_userstack(tr, data, flags, pc);
806} 1109}
807 1110
808void 1111void
@@ -832,6 +1135,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
832 entry->next_cpu = task_cpu(wakee); 1135 entry->next_cpu = task_cpu(wakee);
833 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1136 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 ftrace_trace_stack(tr, data, flags, 6, pc); 1137 ftrace_trace_stack(tr, data, flags, 6, pc);
1138 ftrace_trace_userstack(tr, data, flags, pc);
835 1139
836 trace_wake_up(); 1140 trace_wake_up();
837} 1141}
@@ -841,26 +1145,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841{ 1145{
842 struct trace_array *tr = &global_trace; 1146 struct trace_array *tr = &global_trace;
843 struct trace_array_cpu *data; 1147 struct trace_array_cpu *data;
1148 unsigned long flags;
844 int cpu; 1149 int cpu;
845 int pc; 1150 int pc;
846 1151
847 if (tracing_disabled || !tr->ctrl) 1152 if (tracing_disabled)
848 return; 1153 return;
849 1154
850 pc = preempt_count(); 1155 pc = preempt_count();
851 preempt_disable_notrace(); 1156 local_irq_save(flags);
852 cpu = raw_smp_processor_id(); 1157 cpu = raw_smp_processor_id();
853 data = tr->data[cpu]; 1158 data = tr->data[cpu];
854 1159
855 if (likely(!atomic_read(&data->disabled))) 1160 if (likely(atomic_inc_return(&data->disabled) == 1))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1161 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 1162
858 preempt_enable_notrace(); 1163 atomic_dec(&data->disabled);
1164 local_irq_restore(flags);
859} 1165}
860 1166
861#ifdef CONFIG_FUNCTION_TRACER 1167#ifdef CONFIG_FUNCTION_TRACER
862static void 1168static void
863function_trace_call(unsigned long ip, unsigned long parent_ip) 1169function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
864{ 1170{
865 struct trace_array *tr = &global_trace; 1171 struct trace_array *tr = &global_trace;
866 struct trace_array_cpu *data; 1172 struct trace_array_cpu *data;
@@ -873,8 +1179,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
873 return; 1179 return;
874 1180
875 pc = preempt_count(); 1181 pc = preempt_count();
876 resched = need_resched(); 1182 resched = ftrace_preempt_disable();
877 preempt_disable_notrace();
878 local_save_flags(flags); 1183 local_save_flags(flags);
879 cpu = raw_smp_processor_id(); 1184 cpu = raw_smp_processor_id();
880 data = tr->data[cpu]; 1185 data = tr->data[cpu];
@@ -884,12 +1189,97 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
884 trace_function(tr, data, ip, parent_ip, flags, pc); 1189 trace_function(tr, data, ip, parent_ip, flags, pc);
885 1190
886 atomic_dec(&data->disabled); 1191 atomic_dec(&data->disabled);
887 if (resched) 1192 ftrace_preempt_enable(resched);
888 preempt_enable_no_resched_notrace();
889 else
890 preempt_enable_notrace();
891} 1193}
892 1194
1195static void
1196function_trace_call(unsigned long ip, unsigned long parent_ip)
1197{
1198 struct trace_array *tr = &global_trace;
1199 struct trace_array_cpu *data;
1200 unsigned long flags;
1201 long disabled;
1202 int cpu;
1203 int pc;
1204
1205 if (unlikely(!ftrace_function_enabled))
1206 return;
1207
1208 /*
1209 * Need to use raw, since this must be called before the
1210 * recursive protection is performed.
1211 */
1212 local_irq_save(flags);
1213 cpu = raw_smp_processor_id();
1214 data = tr->data[cpu];
1215 disabled = atomic_inc_return(&data->disabled);
1216
1217 if (likely(disabled == 1)) {
1218 pc = preempt_count();
1219 trace_function(tr, data, ip, parent_ip, flags, pc);
1220 }
1221
1222 atomic_dec(&data->disabled);
1223 local_irq_restore(flags);
1224}
1225
1226#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1227int trace_graph_entry(struct ftrace_graph_ent *trace)
1228{
1229 struct trace_array *tr = &global_trace;
1230 struct trace_array_cpu *data;
1231 unsigned long flags;
1232 long disabled;
1233 int cpu;
1234 int pc;
1235
1236 if (!ftrace_trace_task(current))
1237 return 0;
1238
1239 if (!ftrace_graph_addr(trace->func))
1240 return 0;
1241
1242 local_irq_save(flags);
1243 cpu = raw_smp_processor_id();
1244 data = tr->data[cpu];
1245 disabled = atomic_inc_return(&data->disabled);
1246 if (likely(disabled == 1)) {
1247 pc = preempt_count();
1248 __trace_graph_entry(tr, data, trace, flags, pc);
1249 }
1250 /* Only do the atomic if it is not already set */
1251 if (!test_tsk_trace_graph(current))
1252 set_tsk_trace_graph(current);
1253 atomic_dec(&data->disabled);
1254 local_irq_restore(flags);
1255
1256 return 1;
1257}
1258
1259void trace_graph_return(struct ftrace_graph_ret *trace)
1260{
1261 struct trace_array *tr = &global_trace;
1262 struct trace_array_cpu *data;
1263 unsigned long flags;
1264 long disabled;
1265 int cpu;
1266 int pc;
1267
1268 local_irq_save(flags);
1269 cpu = raw_smp_processor_id();
1270 data = tr->data[cpu];
1271 disabled = atomic_inc_return(&data->disabled);
1272 if (likely(disabled == 1)) {
1273 pc = preempt_count();
1274 __trace_graph_return(tr, data, trace, flags, pc);
1275 }
1276 if (!trace->depth)
1277 clear_tsk_trace_graph(current);
1278 atomic_dec(&data->disabled);
1279 local_irq_restore(flags);
1280}
1281#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1282
893static struct ftrace_ops trace_ops __read_mostly = 1283static struct ftrace_ops trace_ops __read_mostly =
894{ 1284{
895 .func = function_trace_call, 1285 .func = function_trace_call,
@@ -898,9 +1288,14 @@ static struct ftrace_ops trace_ops __read_mostly =
898void tracing_start_function_trace(void) 1288void tracing_start_function_trace(void)
899{ 1289{
900 ftrace_function_enabled = 0; 1290 ftrace_function_enabled = 0;
1291
1292 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1293 trace_ops.func = function_trace_call_preempt_only;
1294 else
1295 trace_ops.func = function_trace_call;
1296
901 register_ftrace_function(&trace_ops); 1297 register_ftrace_function(&trace_ops);
902 if (tracer_enabled) 1298 ftrace_function_enabled = 1;
903 ftrace_function_enabled = 1;
904} 1299}
905 1300
906void tracing_stop_function_trace(void) 1301void tracing_stop_function_trace(void)
@@ -912,6 +1307,7 @@ void tracing_stop_function_trace(void)
912 1307
913enum trace_file_type { 1308enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 1309 TRACE_FILE_LAT_FMT = 1,
1310 TRACE_FILE_ANNOTATE = 2,
915}; 1311};
916 1312
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1313static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
@@ -1047,10 +1443,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1047 1443
1048 atomic_inc(&trace_record_cmdline_disabled); 1444 atomic_inc(&trace_record_cmdline_disabled);
1049 1445
1050 /* let the tracer grab locks here if needed */
1051 if (current_trace->start)
1052 current_trace->start(iter);
1053
1054 if (*pos != iter->pos) { 1446 if (*pos != iter->pos) {
1055 iter->ent = NULL; 1447 iter->ent = NULL;
1056 iter->cpu = 0; 1448 iter->cpu = 0;
@@ -1077,14 +1469,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1077 1469
1078static void s_stop(struct seq_file *m, void *p) 1470static void s_stop(struct seq_file *m, void *p)
1079{ 1471{
1080 struct trace_iterator *iter = m->private;
1081
1082 atomic_dec(&trace_record_cmdline_disabled); 1472 atomic_dec(&trace_record_cmdline_disabled);
1083
1084 /* let the tracer release locks here if needed */
1085 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 iter->trace->stop(iter);
1087
1088 mutex_unlock(&trace_types_lock); 1473 mutex_unlock(&trace_types_lock);
1089} 1474}
1090 1475
@@ -1143,7 +1528,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1143# define IP_FMT "%016lx" 1528# define IP_FMT "%016lx"
1144#endif 1529#endif
1145 1530
1146static int 1531int
1147seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 1532seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148{ 1533{
1149 int ret; 1534 int ret;
@@ -1164,6 +1549,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1164 return ret; 1549 return ret;
1165} 1550}
1166 1551
1552static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1553 unsigned long ip, unsigned long sym_flags)
1554{
1555 struct file *file = NULL;
1556 unsigned long vmstart = 0;
1557 int ret = 1;
1558
1559 if (mm) {
1560 const struct vm_area_struct *vma;
1561
1562 down_read(&mm->mmap_sem);
1563 vma = find_vma(mm, ip);
1564 if (vma) {
1565 file = vma->vm_file;
1566 vmstart = vma->vm_start;
1567 }
1568 if (file) {
1569 ret = trace_seq_path(s, &file->f_path);
1570 if (ret)
1571 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1572 }
1573 up_read(&mm->mmap_sem);
1574 }
1575 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1576 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1577 return ret;
1578}
1579
1580static int
1581seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1582 unsigned long sym_flags)
1583{
1584 struct mm_struct *mm = NULL;
1585 int ret = 1;
1586 unsigned int i;
1587
1588 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1589 struct task_struct *task;
1590 /*
1591 * we do the lookup on the thread group leader,
1592 * since individual threads might have already quit!
1593 */
1594 rcu_read_lock();
1595 task = find_task_by_vpid(entry->ent.tgid);
1596 if (task)
1597 mm = get_task_mm(task);
1598 rcu_read_unlock();
1599 }
1600
1601 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1602 unsigned long ip = entry->caller[i];
1603
1604 if (ip == ULONG_MAX || !ret)
1605 break;
1606 if (i && ret)
1607 ret = trace_seq_puts(s, " <- ");
1608 if (!ip) {
1609 if (ret)
1610 ret = trace_seq_puts(s, "??");
1611 continue;
1612 }
1613 if (!ret)
1614 break;
1615 if (ret)
1616 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1617 }
1618
1619 if (mm)
1620 mmput(mm);
1621 return ret;
1622}
1623
1167static void print_lat_help_header(struct seq_file *m) 1624static void print_lat_help_header(struct seq_file *m)
1168{ 1625{
1169 seq_puts(m, "# _------=> CPU# \n"); 1626 seq_puts(m, "# _------=> CPU# \n");
@@ -1301,6 +1758,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1301 1758
1302static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1759static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1303 1760
1761static int task_state_char(unsigned long state)
1762{
1763 int bit = state ? __ffs(state) + 1 : 0;
1764
1765 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1766}
1767
1304/* 1768/*
1305 * The message is supposed to contain an ending newline. 1769 * The message is supposed to contain an ending newline.
1306 * If the printing stops prematurely, try to add a newline of our own. 1770 * If the printing stops prematurely, try to add a newline of our own.
@@ -1338,6 +1802,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1338 trace_seq_putc(s, '\n'); 1802 trace_seq_putc(s, '\n');
1339} 1803}
1340 1804
1805static void test_cpu_buff_start(struct trace_iterator *iter)
1806{
1807 struct trace_seq *s = &iter->seq;
1808
1809 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1810 return;
1811
1812 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1813 return;
1814
1815 if (cpu_isset(iter->cpu, iter->started))
1816 return;
1817
1818 cpu_set(iter->cpu, iter->started);
1819 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1820}
1821
1341static enum print_line_t 1822static enum print_line_t
1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1823print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343{ 1824{
@@ -1352,11 +1833,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1352 char *comm; 1833 char *comm;
1353 int S, T; 1834 int S, T;
1354 int i; 1835 int i;
1355 unsigned state;
1356 1836
1357 if (entry->type == TRACE_CONT) 1837 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED; 1838 return TRACE_TYPE_HANDLED;
1359 1839
1840 test_cpu_buff_start(iter);
1841
1360 next_entry = find_next_entry(iter, NULL, &next_ts); 1842 next_entry = find_next_entry(iter, NULL, &next_ts);
1361 if (!next_entry) 1843 if (!next_entry)
1362 next_ts = iter->ts; 1844 next_ts = iter->ts;
@@ -1396,12 +1878,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1396 1878
1397 trace_assign_type(field, entry); 1879 trace_assign_type(field, entry);
1398 1880
1399 T = field->next_state < sizeof(state_to_char) ? 1881 T = task_state_char(field->next_state);
1400 state_to_char[field->next_state] : 'X'; 1882 S = task_state_char(field->prev_state);
1401
1402 state = field->prev_state ?
1403 __ffs(field->prev_state) + 1 : 0;
1404 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1405 comm = trace_find_cmdline(field->next_pid); 1883 comm = trace_find_cmdline(field->next_pid);
1406 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1884 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1407 field->prev_pid, 1885 field->prev_pid,
@@ -1448,6 +1926,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1448 trace_seq_print_cont(s, iter); 1926 trace_seq_print_cont(s, iter);
1449 break; 1927 break;
1450 } 1928 }
1929 case TRACE_BRANCH: {
1930 struct trace_branch *field;
1931
1932 trace_assign_type(field, entry);
1933
1934 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1935 field->correct ? " ok " : " MISS ",
1936 field->func,
1937 field->file,
1938 field->line);
1939 break;
1940 }
1941 case TRACE_USER_STACK: {
1942 struct userstack_entry *field;
1943
1944 trace_assign_type(field, entry);
1945
1946 seq_print_userip_objs(field, s, sym_flags);
1947 trace_seq_putc(s, '\n');
1948 break;
1949 }
1451 default: 1950 default:
1452 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1951 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 } 1952 }
@@ -1472,6 +1971,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1472 if (entry->type == TRACE_CONT) 1971 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED; 1972 return TRACE_TYPE_HANDLED;
1474 1973
1974 test_cpu_buff_start(iter);
1975
1475 comm = trace_find_cmdline(iter->ent->pid); 1976 comm = trace_find_cmdline(iter->ent->pid);
1476 1977
1477 t = ns2usecs(iter->ts); 1978 t = ns2usecs(iter->ts);
@@ -1519,10 +2020,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1519 2020
1520 trace_assign_type(field, entry); 2021 trace_assign_type(field, entry);
1521 2022
1522 S = field->prev_state < sizeof(state_to_char) ? 2023 T = task_state_char(field->next_state);
1523 state_to_char[field->prev_state] : 'X'; 2024 S = task_state_char(field->prev_state);
1524 T = field->next_state < sizeof(state_to_char) ?
1525 state_to_char[field->next_state] : 'X';
1526 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2025 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 field->prev_pid, 2026 field->prev_pid,
1528 field->prev_prio, 2027 field->prev_prio,
@@ -1581,6 +2080,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1581 trace_seq_print_cont(s, iter); 2080 trace_seq_print_cont(s, iter);
1582 break; 2081 break;
1583 } 2082 }
2083 case TRACE_GRAPH_RET: {
2084 return print_graph_function(iter);
2085 }
2086 case TRACE_GRAPH_ENT: {
2087 return print_graph_function(iter);
2088 }
2089 case TRACE_BRANCH: {
2090 struct trace_branch *field;
2091
2092 trace_assign_type(field, entry);
2093
2094 trace_seq_printf(s, "[%s] %s:%s:%d\n",
2095 field->correct ? " ok " : " MISS ",
2096 field->func,
2097 field->file,
2098 field->line);
2099 break;
2100 }
2101 case TRACE_USER_STACK: {
2102 struct userstack_entry *field;
2103
2104 trace_assign_type(field, entry);
2105
2106 ret = seq_print_userip_objs(field, s, sym_flags);
2107 if (!ret)
2108 return TRACE_TYPE_PARTIAL_LINE;
2109 ret = trace_seq_putc(s, '\n');
2110 if (!ret)
2111 return TRACE_TYPE_PARTIAL_LINE;
2112 break;
2113 }
1584 } 2114 }
1585 return TRACE_TYPE_HANDLED; 2115 return TRACE_TYPE_HANDLED;
1586} 2116}
@@ -1621,12 +2151,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1621 2151
1622 trace_assign_type(field, entry); 2152 trace_assign_type(field, entry);
1623 2153
1624 S = field->prev_state < sizeof(state_to_char) ? 2154 T = task_state_char(field->next_state);
1625 state_to_char[field->prev_state] : 'X'; 2155 S = entry->type == TRACE_WAKE ? '+' :
1626 T = field->next_state < sizeof(state_to_char) ? 2156 task_state_char(field->prev_state);
1627 state_to_char[field->next_state] : 'X';
1628 if (entry->type == TRACE_WAKE)
1629 S = '+';
1630 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2157 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1631 field->prev_pid, 2158 field->prev_pid,
1632 field->prev_prio, 2159 field->prev_prio,
@@ -1640,6 +2167,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1640 break; 2167 break;
1641 } 2168 }
1642 case TRACE_SPECIAL: 2169 case TRACE_SPECIAL:
2170 case TRACE_USER_STACK:
1643 case TRACE_STACK: { 2171 case TRACE_STACK: {
1644 struct special_entry *field; 2172 struct special_entry *field;
1645 2173
@@ -1712,12 +2240,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1712 2240
1713 trace_assign_type(field, entry); 2241 trace_assign_type(field, entry);
1714 2242
1715 S = field->prev_state < sizeof(state_to_char) ? 2243 T = task_state_char(field->next_state);
1716 state_to_char[field->prev_state] : 'X'; 2244 S = entry->type == TRACE_WAKE ? '+' :
1717 T = field->next_state < sizeof(state_to_char) ? 2245 task_state_char(field->prev_state);
1718 state_to_char[field->next_state] : 'X';
1719 if (entry->type == TRACE_WAKE)
1720 S = '+';
1721 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1722 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2247 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1723 SEQ_PUT_HEX_FIELD_RET(s, S); 2248 SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -1728,6 +2253,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1728 break; 2253 break;
1729 } 2254 }
1730 case TRACE_SPECIAL: 2255 case TRACE_SPECIAL:
2256 case TRACE_USER_STACK:
1731 case TRACE_STACK: { 2257 case TRACE_STACK: {
1732 struct special_entry *field; 2258 struct special_entry *field;
1733 2259
@@ -1744,6 +2270,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1744 return TRACE_TYPE_HANDLED; 2270 return TRACE_TYPE_HANDLED;
1745} 2271}
1746 2272
2273static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2274{
2275 struct trace_seq *s = &iter->seq;
2276 struct trace_entry *entry = iter->ent;
2277 struct print_entry *field;
2278 int ret;
2279
2280 trace_assign_type(field, entry);
2281
2282 ret = trace_seq_printf(s, field->buf);
2283 if (!ret)
2284 return TRACE_TYPE_PARTIAL_LINE;
2285
2286 if (entry->flags & TRACE_FLAG_CONT)
2287 trace_seq_print_cont(s, iter);
2288
2289 return TRACE_TYPE_HANDLED;
2290}
2291
1747static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2292static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1748{ 2293{
1749 struct trace_seq *s = &iter->seq; 2294 struct trace_seq *s = &iter->seq;
@@ -1782,6 +2327,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1782 break; 2327 break;
1783 } 2328 }
1784 case TRACE_SPECIAL: 2329 case TRACE_SPECIAL:
2330 case TRACE_USER_STACK:
1785 case TRACE_STACK: { 2331 case TRACE_STACK: {
1786 struct special_entry *field; 2332 struct special_entry *field;
1787 2333
@@ -1823,6 +2369,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1823 return ret; 2369 return ret;
1824 } 2370 }
1825 2371
2372 if (iter->ent->type == TRACE_PRINT &&
2373 trace_flags & TRACE_ITER_PRINTK &&
2374 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2375 return print_printk_msg_only(iter);
2376
1826 if (trace_flags & TRACE_ITER_BIN) 2377 if (trace_flags & TRACE_ITER_BIN)
1827 return print_bin_fmt(iter); 2378 return print_bin_fmt(iter);
1828 2379
@@ -1847,7 +2398,9 @@ static int s_show(struct seq_file *m, void *v)
1847 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2398 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1848 seq_puts(m, "#\n"); 2399 seq_puts(m, "#\n");
1849 } 2400 }
1850 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2401 if (iter->trace && iter->trace->print_header)
2402 iter->trace->print_header(m);
2403 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1851 /* print nothing if the buffers are empty */ 2404 /* print nothing if the buffers are empty */
1852 if (trace_empty(iter)) 2405 if (trace_empty(iter))
1853 return 0; 2406 return 0;
@@ -1899,6 +2452,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1899 iter->trace = current_trace; 2452 iter->trace = current_trace;
1900 iter->pos = -1; 2453 iter->pos = -1;
1901 2454
2455 /* Notify the tracer early; before we stop tracing. */
2456 if (iter->trace && iter->trace->open)
2457 iter->trace->open(iter);
2458
2459 /* Annotate start of buffers if we had overruns */
2460 if (ring_buffer_overruns(iter->tr->buffer))
2461 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2462
2463
1902 for_each_tracing_cpu(cpu) { 2464 for_each_tracing_cpu(cpu) {
1903 2465
1904 iter->buffer_iter[cpu] = 2466 iter->buffer_iter[cpu] =
@@ -1917,13 +2479,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1917 m->private = iter; 2479 m->private = iter;
1918 2480
1919 /* stop the trace while dumping */ 2481 /* stop the trace while dumping */
1920 if (iter->tr->ctrl) { 2482 tracing_stop();
1921 tracer_enabled = 0;
1922 ftrace_function_enabled = 0;
1923 }
1924
1925 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter);
1927 2483
1928 mutex_unlock(&trace_types_lock); 2484 mutex_unlock(&trace_types_lock);
1929 2485
@@ -1966,14 +2522,7 @@ int tracing_release(struct inode *inode, struct file *file)
1966 iter->trace->close(iter); 2522 iter->trace->close(iter);
1967 2523
1968 /* reenable tracing if it was previously enabled */ 2524 /* reenable tracing if it was previously enabled */
1969 if (iter->tr->ctrl) { 2525 tracing_start();
1970 tracer_enabled = 1;
1971 /*
1972 * It is safe to enable function tracing even if it
1973 * isn't used
1974 */
1975 ftrace_function_enabled = 1;
1976 }
1977 mutex_unlock(&trace_types_lock); 2526 mutex_unlock(&trace_types_lock);
1978 2527
1979 seq_release(inode, file); 2528 seq_release(inode, file);
@@ -2151,7 +2700,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2151 if (err) 2700 if (err)
2152 goto err_unlock; 2701 goto err_unlock;
2153 2702
2154 raw_local_irq_disable(); 2703 local_irq_disable();
2155 __raw_spin_lock(&ftrace_max_lock); 2704 __raw_spin_lock(&ftrace_max_lock);
2156 for_each_tracing_cpu(cpu) { 2705 for_each_tracing_cpu(cpu) {
2157 /* 2706 /*
@@ -2168,7 +2717,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2168 } 2717 }
2169 } 2718 }
2170 __raw_spin_unlock(&ftrace_max_lock); 2719 __raw_spin_unlock(&ftrace_max_lock);
2171 raw_local_irq_enable(); 2720 local_irq_enable();
2172 2721
2173 tracing_cpumask = tracing_cpumask_new; 2722 tracing_cpumask = tracing_cpumask_new;
2174 2723
@@ -2189,13 +2738,16 @@ static struct file_operations tracing_cpumask_fops = {
2189}; 2738};
2190 2739
2191static ssize_t 2740static ssize_t
2192tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, 2741tracing_trace_options_read(struct file *filp, char __user *ubuf,
2193 size_t cnt, loff_t *ppos) 2742 size_t cnt, loff_t *ppos)
2194{ 2743{
2744 int i;
2195 char *buf; 2745 char *buf;
2196 int r = 0; 2746 int r = 0;
2197 int len = 0; 2747 int len = 0;
2198 int i; 2748 u32 tracer_flags = current_trace->flags->val;
2749 struct tracer_opt *trace_opts = current_trace->flags->opts;
2750
2199 2751
2200 /* calulate max size */ 2752 /* calulate max size */
2201 for (i = 0; trace_options[i]; i++) { 2753 for (i = 0; trace_options[i]; i++) {
@@ -2203,6 +2755,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2203 len += 3; /* "no" and space */ 2755 len += 3; /* "no" and space */
2204 } 2756 }
2205 2757
2758 /*
2759 * Increase the size with names of options specific
2760 * of the current tracer.
2761 */
2762 for (i = 0; trace_opts[i].name; i++) {
2763 len += strlen(trace_opts[i].name);
2764 len += 3; /* "no" and space */
2765 }
2766
2206 /* +2 for \n and \0 */ 2767 /* +2 for \n and \0 */
2207 buf = kmalloc(len + 2, GFP_KERNEL); 2768 buf = kmalloc(len + 2, GFP_KERNEL);
2208 if (!buf) 2769 if (!buf)
@@ -2215,6 +2776,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2215 r += sprintf(buf + r, "no%s ", trace_options[i]); 2776 r += sprintf(buf + r, "no%s ", trace_options[i]);
2216 } 2777 }
2217 2778
2779 for (i = 0; trace_opts[i].name; i++) {
2780 if (tracer_flags & trace_opts[i].bit)
2781 r += sprintf(buf + r, "%s ",
2782 trace_opts[i].name);
2783 else
2784 r += sprintf(buf + r, "no%s ",
2785 trace_opts[i].name);
2786 }
2787
2218 r += sprintf(buf + r, "\n"); 2788 r += sprintf(buf + r, "\n");
2219 WARN_ON(r >= len + 2); 2789 WARN_ON(r >= len + 2);
2220 2790
@@ -2225,13 +2795,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2225 return r; 2795 return r;
2226} 2796}
2227 2797
2798/* Try to assign a tracer specific option */
2799static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2800{
2801 struct tracer_flags *trace_flags = trace->flags;
2802 struct tracer_opt *opts = NULL;
2803 int ret = 0, i = 0;
2804 int len;
2805
2806 for (i = 0; trace_flags->opts[i].name; i++) {
2807 opts = &trace_flags->opts[i];
2808 len = strlen(opts->name);
2809
2810 if (strncmp(cmp, opts->name, len) == 0) {
2811 ret = trace->set_flag(trace_flags->val,
2812 opts->bit, !neg);
2813 break;
2814 }
2815 }
2816 /* Not found */
2817 if (!trace_flags->opts[i].name)
2818 return -EINVAL;
2819
2820 /* Refused to handle */
2821 if (ret)
2822 return ret;
2823
2824 if (neg)
2825 trace_flags->val &= ~opts->bit;
2826 else
2827 trace_flags->val |= opts->bit;
2828
2829 return 0;
2830}
2831
2228static ssize_t 2832static ssize_t
2229tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, 2833tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2230 size_t cnt, loff_t *ppos) 2834 size_t cnt, loff_t *ppos)
2231{ 2835{
2232 char buf[64]; 2836 char buf[64];
2233 char *cmp = buf; 2837 char *cmp = buf;
2234 int neg = 0; 2838 int neg = 0;
2839 int ret;
2235 int i; 2840 int i;
2236 2841
2237 if (cnt >= sizeof(buf)) 2842 if (cnt >= sizeof(buf))
@@ -2258,11 +2863,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2258 break; 2863 break;
2259 } 2864 }
2260 } 2865 }
2261 /* 2866
2262 * If no option could be set, return an error: 2867 /* If no option could be set, test the specific tracer options */
2263 */ 2868 if (!trace_options[i]) {
2264 if (!trace_options[i]) 2869 ret = set_tracer_option(current_trace, cmp, neg);
2265 return -EINVAL; 2870 if (ret)
2871 return ret;
2872 }
2266 2873
2267 filp->f_pos += cnt; 2874 filp->f_pos += cnt;
2268 2875
@@ -2271,8 +2878,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2271 2878
2272static struct file_operations tracing_iter_fops = { 2879static struct file_operations tracing_iter_fops = {
2273 .open = tracing_open_generic, 2880 .open = tracing_open_generic,
2274 .read = tracing_iter_ctrl_read, 2881 .read = tracing_trace_options_read,
2275 .write = tracing_iter_ctrl_write, 2882 .write = tracing_trace_options_write,
2276}; 2883};
2277 2884
2278static const char readme_msg[] = 2885static const char readme_msg[] =
@@ -2286,9 +2893,9 @@ static const char readme_msg[] =
2286 "# echo sched_switch > /debug/tracing/current_tracer\n" 2893 "# echo sched_switch > /debug/tracing/current_tracer\n"
2287 "# cat /debug/tracing/current_tracer\n" 2894 "# cat /debug/tracing/current_tracer\n"
2288 "sched_switch\n" 2895 "sched_switch\n"
2289 "# cat /debug/tracing/iter_ctrl\n" 2896 "# cat /debug/tracing/trace_options\n"
2290 "noprint-parent nosym-offset nosym-addr noverbose\n" 2897 "noprint-parent nosym-offset nosym-addr noverbose\n"
2291 "# echo print-parent > /debug/tracing/iter_ctrl\n" 2898 "# echo print-parent > /debug/tracing/trace_options\n"
2292 "# echo 1 > /debug/tracing/tracing_enabled\n" 2899 "# echo 1 > /debug/tracing/tracing_enabled\n"
2293 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2900 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2294 "echo 0 > /debug/tracing/tracing_enabled\n" 2901 "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2311,11 +2918,10 @@ static ssize_t
2311tracing_ctrl_read(struct file *filp, char __user *ubuf, 2918tracing_ctrl_read(struct file *filp, char __user *ubuf,
2312 size_t cnt, loff_t *ppos) 2919 size_t cnt, loff_t *ppos)
2313{ 2920{
2314 struct trace_array *tr = filp->private_data;
2315 char buf[64]; 2921 char buf[64];
2316 int r; 2922 int r;
2317 2923
2318 r = sprintf(buf, "%ld\n", tr->ctrl); 2924 r = sprintf(buf, "%u\n", tracer_enabled);
2319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2925 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2320} 2926}
2321 2927
@@ -2343,16 +2949,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2343 val = !!val; 2949 val = !!val;
2344 2950
2345 mutex_lock(&trace_types_lock); 2951 mutex_lock(&trace_types_lock);
2346 if (tr->ctrl ^ val) { 2952 if (tracer_enabled ^ val) {
2347 if (val) 2953 if (val) {
2348 tracer_enabled = 1; 2954 tracer_enabled = 1;
2349 else 2955 if (current_trace->start)
2956 current_trace->start(tr);
2957 tracing_start();
2958 } else {
2350 tracer_enabled = 0; 2959 tracer_enabled = 0;
2351 2960 tracing_stop();
2352 tr->ctrl = val; 2961 if (current_trace->stop)
2353 2962 current_trace->stop(tr);
2354 if (current_trace && current_trace->ctrl_update) 2963 }
2355 current_trace->ctrl_update(tr);
2356 } 2964 }
2357 mutex_unlock(&trace_types_lock); 2965 mutex_unlock(&trace_types_lock);
2358 2966
@@ -2378,29 +2986,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2378 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2986 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2379} 2987}
2380 2988
2381static ssize_t 2989static int tracing_set_tracer(char *buf)
2382tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 size_t cnt, loff_t *ppos)
2384{ 2990{
2385 struct trace_array *tr = &global_trace; 2991 struct trace_array *tr = &global_trace;
2386 struct tracer *t; 2992 struct tracer *t;
2387 char buf[max_tracer_type_len+1]; 2993 int ret = 0;
2388 int i;
2389 size_t ret;
2390
2391 ret = cnt;
2392
2393 if (cnt > max_tracer_type_len)
2394 cnt = max_tracer_type_len;
2395
2396 if (copy_from_user(&buf, ubuf, cnt))
2397 return -EFAULT;
2398
2399 buf[cnt] = 0;
2400
2401 /* strip ending whitespace. */
2402 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2403 buf[i] = 0;
2404 2994
2405 mutex_lock(&trace_types_lock); 2995 mutex_lock(&trace_types_lock);
2406 for (t = trace_types; t; t = t->next) { 2996 for (t = trace_types; t; t = t->next) {
@@ -2414,18 +3004,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2414 if (t == current_trace) 3004 if (t == current_trace)
2415 goto out; 3005 goto out;
2416 3006
3007 trace_branch_disable();
2417 if (current_trace && current_trace->reset) 3008 if (current_trace && current_trace->reset)
2418 current_trace->reset(tr); 3009 current_trace->reset(tr);
2419 3010
2420 current_trace = t; 3011 current_trace = t;
2421 if (t->init) 3012 if (t->init) {
2422 t->init(tr); 3013 ret = t->init(tr);
3014 if (ret)
3015 goto out;
3016 }
2423 3017
3018 trace_branch_enable(tr);
2424 out: 3019 out:
2425 mutex_unlock(&trace_types_lock); 3020 mutex_unlock(&trace_types_lock);
2426 3021
2427 if (ret > 0) 3022 return ret;
2428 filp->f_pos += ret; 3023}
3024
3025static ssize_t
3026tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3027 size_t cnt, loff_t *ppos)
3028{
3029 char buf[max_tracer_type_len+1];
3030 int i;
3031 size_t ret;
3032 int err;
3033
3034 ret = cnt;
3035
3036 if (cnt > max_tracer_type_len)
3037 cnt = max_tracer_type_len;
3038
3039 if (copy_from_user(&buf, ubuf, cnt))
3040 return -EFAULT;
3041
3042 buf[cnt] = 0;
3043
3044 /* strip ending whitespace. */
3045 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3046 buf[i] = 0;
3047
3048 err = tracing_set_tracer(buf);
3049 if (err)
3050 return err;
3051
3052 filp->f_pos += ret;
2429 3053
2430 return ret; 3054 return ret;
2431} 3055}
@@ -2492,6 +3116,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2492 return -ENOMEM; 3116 return -ENOMEM;
2493 3117
2494 mutex_lock(&trace_types_lock); 3118 mutex_lock(&trace_types_lock);
3119
3120 /* trace pipe does not show start of buffer */
3121 cpus_setall(iter->started);
3122
2495 iter->tr = &global_trace; 3123 iter->tr = &global_trace;
2496 iter->trace = current_trace; 3124 iter->trace = current_trace;
2497 filp->private_data = iter; 3125 filp->private_data = iter;
@@ -2667,7 +3295,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2667 char buf[64]; 3295 char buf[64];
2668 int r; 3296 int r;
2669 3297
2670 r = sprintf(buf, "%lu\n", tr->entries); 3298 r = sprintf(buf, "%lu\n", tr->entries >> 10);
2671 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3299 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2672} 3300}
2673 3301
@@ -2678,7 +3306,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2678 unsigned long val; 3306 unsigned long val;
2679 char buf[64]; 3307 char buf[64];
2680 int ret, cpu; 3308 int ret, cpu;
2681 struct trace_array *tr = filp->private_data;
2682 3309
2683 if (cnt >= sizeof(buf)) 3310 if (cnt >= sizeof(buf))
2684 return -EINVAL; 3311 return -EINVAL;
@@ -2698,12 +3325,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2698 3325
2699 mutex_lock(&trace_types_lock); 3326 mutex_lock(&trace_types_lock);
2700 3327
2701 if (tr->ctrl) { 3328 tracing_stop();
2702 cnt = -EBUSY;
2703 pr_info("ftrace: please disable tracing"
2704 " before modifying buffer size\n");
2705 goto out;
2706 }
2707 3329
2708 /* disable all cpu buffers */ 3330 /* disable all cpu buffers */
2709 for_each_tracing_cpu(cpu) { 3331 for_each_tracing_cpu(cpu) {
@@ -2713,6 +3335,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2713 atomic_inc(&max_tr.data[cpu]->disabled); 3335 atomic_inc(&max_tr.data[cpu]->disabled);
2714 } 3336 }
2715 3337
3338 /* value is in KB */
3339 val <<= 10;
3340
2716 if (val != global_trace.entries) { 3341 if (val != global_trace.entries) {
2717 ret = ring_buffer_resize(global_trace.buffer, val); 3342 ret = ring_buffer_resize(global_trace.buffer, val);
2718 if (ret < 0) { 3343 if (ret < 0) {
@@ -2751,6 +3376,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2751 atomic_dec(&max_tr.data[cpu]->disabled); 3376 atomic_dec(&max_tr.data[cpu]->disabled);
2752 } 3377 }
2753 3378
3379 tracing_start();
2754 max_tr.entries = global_trace.entries; 3380 max_tr.entries = global_trace.entries;
2755 mutex_unlock(&trace_types_lock); 3381 mutex_unlock(&trace_types_lock);
2756 3382
@@ -2762,7 +3388,7 @@ static int mark_printk(const char *fmt, ...)
2762 int ret; 3388 int ret;
2763 va_list args; 3389 va_list args;
2764 va_start(args, fmt); 3390 va_start(args, fmt);
2765 ret = trace_vprintk(0, fmt, args); 3391 ret = trace_vprintk(0, -1, fmt, args);
2766 va_end(args); 3392 va_end(args);
2767 return ret; 3393 return ret;
2768} 3394}
@@ -2773,9 +3399,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
2773{ 3399{
2774 char *buf; 3400 char *buf;
2775 char *end; 3401 char *end;
2776 struct trace_array *tr = &global_trace;
2777 3402
2778 if (!tr->ctrl || tracing_disabled) 3403 if (tracing_disabled)
2779 return -EINVAL; 3404 return -EINVAL;
2780 3405
2781 if (cnt > TRACE_BUF_SIZE) 3406 if (cnt > TRACE_BUF_SIZE)
@@ -2841,22 +3466,38 @@ static struct file_operations tracing_mark_fops = {
2841 3466
2842#ifdef CONFIG_DYNAMIC_FTRACE 3467#ifdef CONFIG_DYNAMIC_FTRACE
2843 3468
3469int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3470{
3471 return 0;
3472}
3473
2844static ssize_t 3474static ssize_t
2845tracing_read_long(struct file *filp, char __user *ubuf, 3475tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2846 size_t cnt, loff_t *ppos) 3476 size_t cnt, loff_t *ppos)
2847{ 3477{
3478 static char ftrace_dyn_info_buffer[1024];
3479 static DEFINE_MUTEX(dyn_info_mutex);
2848 unsigned long *p = filp->private_data; 3480 unsigned long *p = filp->private_data;
2849 char buf[64]; 3481 char *buf = ftrace_dyn_info_buffer;
3482 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2850 int r; 3483 int r;
2851 3484
2852 r = sprintf(buf, "%ld\n", *p); 3485 mutex_lock(&dyn_info_mutex);
3486 r = sprintf(buf, "%ld ", *p);
2853 3487
2854 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3488 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3489 buf[r++] = '\n';
3490
3491 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3492
3493 mutex_unlock(&dyn_info_mutex);
3494
3495 return r;
2855} 3496}
2856 3497
2857static struct file_operations tracing_read_long_fops = { 3498static struct file_operations tracing_dyn_info_fops = {
2858 .open = tracing_open_generic, 3499 .open = tracing_open_generic,
2859 .read = tracing_read_long, 3500 .read = tracing_read_dyn_info,
2860}; 3501};
2861#endif 3502#endif
2862 3503
@@ -2897,10 +3538,10 @@ static __init int tracer_init_debugfs(void)
2897 if (!entry) 3538 if (!entry)
2898 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 3539 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2899 3540
2900 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, 3541 entry = debugfs_create_file("trace_options", 0644, d_tracer,
2901 NULL, &tracing_iter_fops); 3542 NULL, &tracing_iter_fops);
2902 if (!entry) 3543 if (!entry)
2903 pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); 3544 pr_warning("Could not create debugfs 'trace_options' entry\n");
2904 3545
2905 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3546 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2906 NULL, &tracing_cpumask_fops); 3547 NULL, &tracing_cpumask_fops);
@@ -2950,11 +3591,11 @@ static __init int tracer_init_debugfs(void)
2950 pr_warning("Could not create debugfs " 3591 pr_warning("Could not create debugfs "
2951 "'trace_pipe' entry\n"); 3592 "'trace_pipe' entry\n");
2952 3593
2953 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 3594 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2954 &global_trace, &tracing_entries_fops); 3595 &global_trace, &tracing_entries_fops);
2955 if (!entry) 3596 if (!entry)
2956 pr_warning("Could not create debugfs " 3597 pr_warning("Could not create debugfs "
2957 "'trace_entries' entry\n"); 3598 "'buffer_size_kb' entry\n");
2958 3599
2959 entry = debugfs_create_file("trace_marker", 0220, d_tracer, 3600 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2960 NULL, &tracing_mark_fops); 3601 NULL, &tracing_mark_fops);
@@ -2965,7 +3606,7 @@ static __init int tracer_init_debugfs(void)
2965#ifdef CONFIG_DYNAMIC_FTRACE 3606#ifdef CONFIG_DYNAMIC_FTRACE
2966 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3607 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2967 &ftrace_update_tot_cnt, 3608 &ftrace_update_tot_cnt,
2968 &tracing_read_long_fops); 3609 &tracing_dyn_info_fops);
2969 if (!entry) 3610 if (!entry)
2970 pr_warning("Could not create debugfs " 3611 pr_warning("Could not create debugfs "
2971 "'dyn_ftrace_total_info' entry\n"); 3612 "'dyn_ftrace_total_info' entry\n");
@@ -2976,7 +3617,7 @@ static __init int tracer_init_debugfs(void)
2976 return 0; 3617 return 0;
2977} 3618}
2978 3619
2979int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3620int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2980{ 3621{
2981 static DEFINE_SPINLOCK(trace_buf_lock); 3622 static DEFINE_SPINLOCK(trace_buf_lock);
2982 static char trace_buf[TRACE_BUF_SIZE]; 3623 static char trace_buf[TRACE_BUF_SIZE];
@@ -2984,11 +3625,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2984 struct ring_buffer_event *event; 3625 struct ring_buffer_event *event;
2985 struct trace_array *tr = &global_trace; 3626 struct trace_array *tr = &global_trace;
2986 struct trace_array_cpu *data; 3627 struct trace_array_cpu *data;
2987 struct print_entry *entry;
2988 unsigned long flags, irq_flags;
2989 int cpu, len = 0, size, pc; 3628 int cpu, len = 0, size, pc;
3629 struct print_entry *entry;
3630 unsigned long irq_flags;
2990 3631
2991 if (!tr->ctrl || tracing_disabled) 3632 if (tracing_disabled || tracing_selftest_running)
2992 return 0; 3633 return 0;
2993 3634
2994 pc = preempt_count(); 3635 pc = preempt_count();
@@ -2999,7 +3640,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 if (unlikely(atomic_read(&data->disabled))) 3640 if (unlikely(atomic_read(&data->disabled)))
3000 goto out; 3641 goto out;
3001 3642
3002 spin_lock_irqsave(&trace_buf_lock, flags); 3643 pause_graph_tracing();
3644 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3003 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3645 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3004 3646
3005 len = min(len, TRACE_BUF_SIZE-1); 3647 len = min(len, TRACE_BUF_SIZE-1);
@@ -3010,17 +3652,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3010 if (!event) 3652 if (!event)
3011 goto out_unlock; 3653 goto out_unlock;
3012 entry = ring_buffer_event_data(event); 3654 entry = ring_buffer_event_data(event);
3013 tracing_generic_entry_update(&entry->ent, flags, pc); 3655 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3014 entry->ent.type = TRACE_PRINT; 3656 entry->ent.type = TRACE_PRINT;
3015 entry->ip = ip; 3657 entry->ip = ip;
3658 entry->depth = depth;
3016 3659
3017 memcpy(&entry->buf, trace_buf, len); 3660 memcpy(&entry->buf, trace_buf, len);
3018 entry->buf[len] = 0; 3661 entry->buf[len] = 0;
3019 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3662 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3020 3663
3021 out_unlock: 3664 out_unlock:
3022 spin_unlock_irqrestore(&trace_buf_lock, flags); 3665 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3023 3666 unpause_graph_tracing();
3024 out: 3667 out:
3025 preempt_enable_notrace(); 3668 preempt_enable_notrace();
3026 3669
@@ -3037,7 +3680,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3037 return 0; 3680 return 0;
3038 3681
3039 va_start(ap, fmt); 3682 va_start(ap, fmt);
3040 ret = trace_vprintk(ip, fmt, ap); 3683 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3041 va_end(ap); 3684 va_end(ap);
3042 return ret; 3685 return ret;
3043} 3686}
@@ -3046,7 +3689,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
3046static int trace_panic_handler(struct notifier_block *this, 3689static int trace_panic_handler(struct notifier_block *this,
3047 unsigned long event, void *unused) 3690 unsigned long event, void *unused)
3048{ 3691{
3049 ftrace_dump(); 3692 if (ftrace_dump_on_oops)
3693 ftrace_dump();
3050 return NOTIFY_OK; 3694 return NOTIFY_OK;
3051} 3695}
3052 3696
@@ -3062,7 +3706,8 @@ static int trace_die_handler(struct notifier_block *self,
3062{ 3706{
3063 switch (val) { 3707 switch (val) {
3064 case DIE_OOPS: 3708 case DIE_OOPS:
3065 ftrace_dump(); 3709 if (ftrace_dump_on_oops)
3710 ftrace_dump();
3066 break; 3711 break;
3067 default: 3712 default:
3068 break; 3713 break;
@@ -3103,7 +3748,6 @@ trace_printk_seq(struct trace_seq *s)
3103 trace_seq_reset(s); 3748 trace_seq_reset(s);
3104} 3749}
3105 3750
3106
3107void ftrace_dump(void) 3751void ftrace_dump(void)
3108{ 3752{
3109 static DEFINE_SPINLOCK(ftrace_dump_lock); 3753 static DEFINE_SPINLOCK(ftrace_dump_lock);
@@ -3128,6 +3772,9 @@ void ftrace_dump(void)
3128 atomic_inc(&global_trace.data[cpu]->disabled); 3772 atomic_inc(&global_trace.data[cpu]->disabled);
3129 } 3773 }
3130 3774
3775 /* don't look at user memory in panic mode */
3776 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3777
3131 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3778 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3132 3779
3133 iter.tr = &global_trace; 3780 iter.tr = &global_trace;
@@ -3221,7 +3868,6 @@ __init static int tracer_alloc_buffers(void)
3221#endif 3868#endif
3222 3869
3223 /* All seems OK, enable tracing */ 3870 /* All seems OK, enable tracing */
3224 global_trace.ctrl = tracer_enabled;
3225 tracing_disabled = 0; 3871 tracing_disabled = 0;
3226 3872
3227 atomic_notifier_chain_register(&panic_notifier_list, 3873 atomic_notifier_chain_register(&panic_notifier_list,