aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c1043
1 files changed, 853 insertions, 190 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d86e3252f300..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,6 +43,38 @@
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
45 45
46/*
47 * We need to change this state when a selftest is running.
48 * A selftest will lurk into the ring-buffer to count the
49 * entries inserted during the selftest although some concurrent
50 * insertions into the ring-buffer such as ftrace_printk could occurred
51 * at the same time, giving false positive or negative results.
52 */
53static bool __read_mostly tracing_selftest_running;
54
55/* For tracers that don't implement custom flags */
56static struct tracer_opt dummy_tracer_opt[] = {
57 { }
58};
59
60static struct tracer_flags dummy_tracer_flags = {
61 .val = 0,
62 .opts = dummy_tracer_opt
63};
64
65static int dummy_set_flag(u32 old_flags, u32 bit, int set)
66{
67 return 0;
68}
69
70/*
71 * Kill all tracing for good (never come back).
72 * It is initialized to 1 but will turn to zero if the initialization
73 * of the tracer is successful. But that is the only place that sets
74 * this back to zero.
75 */
76int tracing_disabled = 1;
77
46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 78static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 79
48static inline void ftrace_disable_cpu(void) 80static inline void ftrace_disable_cpu(void)
@@ -57,12 +89,41 @@ static inline void ftrace_enable_cpu(void)
57 preempt_enable(); 89 preempt_enable();
58} 90}
59 91
60static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
61 93
62#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
63 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
96
97/*
98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
99 *
100 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
101 * is set, then ftrace_dump is called. This will output the contents
102 * of the ftrace buffers to the console. This is very useful for
103 * capturing traces that lead to crashes and outputing it to a
104 * serial console.
105 *
106 * It is default off, but you can enable it with either specifying
107 * "ftrace_dump_on_oops" in the kernel command line, or setting
108 * /proc/sys/kernel/ftrace_dump_on_oops to true.
109 */
110int ftrace_dump_on_oops;
64 111
65static int tracing_disabled = 1; 112static int tracing_set_tracer(char *buf);
113
114static int __init set_ftrace(char *str)
115{
116 tracing_set_tracer(str);
117 return 1;
118}
119__setup("ftrace", set_ftrace);
120
121static int __init set_ftrace_dump_on_oops(char *str)
122{
123 ftrace_dump_on_oops = 1;
124 return 1;
125}
126__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
66 127
67long 128long
68ns2usecs(cycle_t nsec) 129ns2usecs(cycle_t nsec)
@@ -112,6 +173,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
112/* tracer_enabled is used to toggle activation of a tracer */ 173/* tracer_enabled is used to toggle activation of a tracer */
113static int tracer_enabled = 1; 174static int tracer_enabled = 1;
114 175
176/**
177 * tracing_is_enabled - return tracer_enabled status
178 *
179 * This function is used by other tracers to know the status
180 * of the tracer_enabled flag. Tracers may use this function
181 * to know if it should enable their features when starting
182 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
183 */
184int tracing_is_enabled(void)
185{
186 return tracer_enabled;
187}
188
115/* function tracing enabled */ 189/* function tracing enabled */
116int ftrace_function_enabled; 190int ftrace_function_enabled;
117 191
@@ -153,8 +227,9 @@ static DEFINE_MUTEX(trace_types_lock);
153/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 227/* trace_wait is a waitqueue for tasks blocked on trace_poll */
154static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 228static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 229
156/* trace_flags holds iter_ctrl options */ 230/* trace_flags holds trace_options default values */
157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 231unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
232 TRACE_ITER_ANNOTATE;
158 233
159/** 234/**
160 * trace_wake_up - wake up tasks waiting for trace input 235 * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +268,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
193 return nsecs / 1000; 268 return nsecs / 1000;
194} 269}
195 270
196/*
197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198 * control the output of kernel symbols.
199 */
200#define TRACE_ITER_SYM_MASK \
201 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202
203/* These must match the bit postions in trace_iterator_flags */ 271/* These must match the bit postions in trace_iterator_flags */
204static const char *trace_options[] = { 272static const char *trace_options[] = {
205 "print-parent", 273 "print-parent",
@@ -213,6 +281,12 @@ static const char *trace_options[] = {
213 "stacktrace", 281 "stacktrace",
214 "sched-tree", 282 "sched-tree",
215 "ftrace_printk", 283 "ftrace_printk",
284 "ftrace_preempt",
285 "branch",
286 "annotate",
287 "userstacktrace",
288 "sym-userobj",
289 "printk-msg-only",
216 NULL 290 NULL
217}; 291};
218 292
@@ -246,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
246 320
247 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 321 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
248 data->pid = tsk->pid; 322 data->pid = tsk->pid;
249 data->uid = tsk->uid; 323 data->uid = task_uid(tsk);
250 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 324 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
251 data->policy = tsk->policy; 325 data->policy = tsk->policy;
252 data->rt_priority = tsk->rt_priority; 326 data->rt_priority = tsk->rt_priority;
@@ -359,6 +433,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
359 return trace_seq_putmem(s, hex, j); 433 return trace_seq_putmem(s, hex, j);
360} 434}
361 435
436static int
437trace_seq_path(struct trace_seq *s, struct path *path)
438{
439 unsigned char *p;
440
441 if (s->len >= (PAGE_SIZE - 1))
442 return 0;
443 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
444 if (!IS_ERR(p)) {
445 p = mangle_path(s->buffer + s->len, p, "\n");
446 if (p) {
447 s->len = p - s->buffer;
448 return 1;
449 }
450 } else {
451 s->buffer[s->len++] = '?';
452 return 1;
453 }
454
455 return 0;
456}
457
362static void 458static void
363trace_seq_reset(struct trace_seq *s) 459trace_seq_reset(struct trace_seq *s)
364{ 460{
@@ -470,7 +566,17 @@ int register_tracer(struct tracer *type)
470 return -1; 566 return -1;
471 } 567 }
472 568
569 /*
570 * When this gets called we hold the BKL which means that
571 * preemption is disabled. Various trace selftests however
572 * need to disable and enable preemption for successful tests.
573 * So we drop the BKL here and grab it after the tests again.
574 */
575 unlock_kernel();
473 mutex_lock(&trace_types_lock); 576 mutex_lock(&trace_types_lock);
577
578 tracing_selftest_running = true;
579
474 for (t = trace_types; t; t = t->next) { 580 for (t = trace_types; t; t = t->next) {
475 if (strcmp(type->name, t->name) == 0) { 581 if (strcmp(type->name, t->name) == 0) {
476 /* already found */ 582 /* already found */
@@ -481,12 +587,20 @@ int register_tracer(struct tracer *type)
481 } 587 }
482 } 588 }
483 589
590 if (!type->set_flag)
591 type->set_flag = &dummy_set_flag;
592 if (!type->flags)
593 type->flags = &dummy_tracer_flags;
594 else
595 if (!type->flags->opts)
596 type->flags->opts = dummy_tracer_opt;
597
484#ifdef CONFIG_FTRACE_STARTUP_TEST 598#ifdef CONFIG_FTRACE_STARTUP_TEST
485 if (type->selftest) { 599 if (type->selftest) {
486 struct tracer *saved_tracer = current_trace; 600 struct tracer *saved_tracer = current_trace;
487 struct trace_array *tr = &global_trace; 601 struct trace_array *tr = &global_trace;
488 int saved_ctrl = tr->ctrl;
489 int i; 602 int i;
603
490 /* 604 /*
491 * Run a selftest on this tracer. 605 * Run a selftest on this tracer.
492 * Here we reset the trace buffer, and set the current 606 * Here we reset the trace buffer, and set the current
@@ -494,25 +608,23 @@ int register_tracer(struct tracer *type)
494 * internal tracing to verify that everything is in order. 608 * internal tracing to verify that everything is in order.
495 * If we fail, we do not register this tracer. 609 * If we fail, we do not register this tracer.
496 */ 610 */
497 for_each_tracing_cpu(i) { 611 for_each_tracing_cpu(i)
498 tracing_reset(tr, i); 612 tracing_reset(tr, i);
499 } 613
500 current_trace = type; 614 current_trace = type;
501 tr->ctrl = 0;
502 /* the test is responsible for initializing and enabling */ 615 /* the test is responsible for initializing and enabling */
503 pr_info("Testing tracer %s: ", type->name); 616 pr_info("Testing tracer %s: ", type->name);
504 ret = type->selftest(type, tr); 617 ret = type->selftest(type, tr);
505 /* the test is responsible for resetting too */ 618 /* the test is responsible for resetting too */
506 current_trace = saved_tracer; 619 current_trace = saved_tracer;
507 tr->ctrl = saved_ctrl;
508 if (ret) { 620 if (ret) {
509 printk(KERN_CONT "FAILED!\n"); 621 printk(KERN_CONT "FAILED!\n");
510 goto out; 622 goto out;
511 } 623 }
512 /* Only reset on passing, to avoid touching corrupted buffers */ 624 /* Only reset on passing, to avoid touching corrupted buffers */
513 for_each_tracing_cpu(i) { 625 for_each_tracing_cpu(i)
514 tracing_reset(tr, i); 626 tracing_reset(tr, i);
515 } 627
516 printk(KERN_CONT "PASSED\n"); 628 printk(KERN_CONT "PASSED\n");
517 } 629 }
518#endif 630#endif
@@ -524,7 +636,9 @@ int register_tracer(struct tracer *type)
524 max_tracer_type_len = len; 636 max_tracer_type_len = len;
525 637
526 out: 638 out:
639 tracing_selftest_running = false;
527 mutex_unlock(&trace_types_lock); 640 mutex_unlock(&trace_types_lock);
641 lock_kernel();
528 642
529 return ret; 643 return ret;
530} 644}
@@ -564,6 +678,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
564 ftrace_enable_cpu(); 678 ftrace_enable_cpu();
565} 679}
566 680
681void tracing_reset_online_cpus(struct trace_array *tr)
682{
683 int cpu;
684
685 tr->time_start = ftrace_now(tr->cpu);
686
687 for_each_online_cpu(cpu)
688 tracing_reset(tr, cpu);
689}
690
567#define SAVED_CMDLINES 128 691#define SAVED_CMDLINES 128
568static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 692static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
569static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 693static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -581,6 +705,91 @@ static void trace_init_cmdlines(void)
581 cmdline_idx = 0; 705 cmdline_idx = 0;
582} 706}
583 707
708static int trace_stop_count;
709static DEFINE_SPINLOCK(tracing_start_lock);
710
711/**
712 * ftrace_off_permanent - disable all ftrace code permanently
713 *
714 * This should only be called when a serious anomally has
715 * been detected. This will turn off the function tracing,
716 * ring buffers, and other tracing utilites. It takes no
717 * locks and can be called from any context.
718 */
719void ftrace_off_permanent(void)
720{
721 tracing_disabled = 1;
722 ftrace_stop();
723 tracing_off_permanent();
724}
725
726/**
727 * tracing_start - quick start of the tracer
728 *
729 * If tracing is enabled but was stopped by tracing_stop,
730 * this will start the tracer back up.
731 */
732void tracing_start(void)
733{
734 struct ring_buffer *buffer;
735 unsigned long flags;
736
737 if (tracing_disabled)
738 return;
739
740 spin_lock_irqsave(&tracing_start_lock, flags);
741 if (--trace_stop_count)
742 goto out;
743
744 if (trace_stop_count < 0) {
745 /* Someone screwed up their debugging */
746 WARN_ON_ONCE(1);
747 trace_stop_count = 0;
748 goto out;
749 }
750
751
752 buffer = global_trace.buffer;
753 if (buffer)
754 ring_buffer_record_enable(buffer);
755
756 buffer = max_tr.buffer;
757 if (buffer)
758 ring_buffer_record_enable(buffer);
759
760 ftrace_start();
761 out:
762 spin_unlock_irqrestore(&tracing_start_lock, flags);
763}
764
765/**
766 * tracing_stop - quick stop of the tracer
767 *
768 * Light weight way to stop tracing. Use in conjunction with
769 * tracing_start.
770 */
771void tracing_stop(void)
772{
773 struct ring_buffer *buffer;
774 unsigned long flags;
775
776 ftrace_stop();
777 spin_lock_irqsave(&tracing_start_lock, flags);
778 if (trace_stop_count++)
779 goto out;
780
781 buffer = global_trace.buffer;
782 if (buffer)
783 ring_buffer_record_disable(buffer);
784
785 buffer = max_tr.buffer;
786 if (buffer)
787 ring_buffer_record_disable(buffer);
788
789 out:
790 spin_unlock_irqrestore(&tracing_start_lock, flags);
791}
792
584void trace_stop_cmdline_recording(void); 793void trace_stop_cmdline_recording(void);
585 794
586static void trace_save_cmdline(struct task_struct *tsk) 795static void trace_save_cmdline(struct task_struct *tsk)
@@ -618,7 +827,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
618 spin_unlock(&trace_cmdline_lock); 827 spin_unlock(&trace_cmdline_lock);
619} 828}
620 829
621static char *trace_find_cmdline(int pid) 830char *trace_find_cmdline(int pid)
622{ 831{
623 char *cmdline = "<...>"; 832 char *cmdline = "<...>";
624 unsigned map; 833 unsigned map;
@@ -655,6 +864,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
655 864
656 entry->preempt_count = pc & 0xff; 865 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 866 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0;
658 entry->flags = 868 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 869#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 870 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -691,6 +901,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692} 902}
693 903
904#ifdef CONFIG_FUNCTION_GRAPH_TRACER
905static void __trace_graph_entry(struct trace_array *tr,
906 struct trace_array_cpu *data,
907 struct ftrace_graph_ent *trace,
908 unsigned long flags,
909 int pc)
910{
911 struct ring_buffer_event *event;
912 struct ftrace_graph_ent_entry *entry;
913 unsigned long irq_flags;
914
915 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
916 return;
917
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
919 &irq_flags);
920 if (!event)
921 return;
922 entry = ring_buffer_event_data(event);
923 tracing_generic_entry_update(&entry->ent, flags, pc);
924 entry->ent.type = TRACE_GRAPH_ENT;
925 entry->graph_ent = *trace;
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
927}
928
929static void __trace_graph_return(struct trace_array *tr,
930 struct trace_array_cpu *data,
931 struct ftrace_graph_ret *trace,
932 unsigned long flags,
933 int pc)
934{
935 struct ring_buffer_event *event;
936 struct ftrace_graph_ret_entry *entry;
937 unsigned long irq_flags;
938
939 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
940 return;
941
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
943 &irq_flags);
944 if (!event)
945 return;
946 entry = ring_buffer_event_data(event);
947 tracing_generic_entry_update(&entry->ent, flags, pc);
948 entry->ent.type = TRACE_GRAPH_RET;
949 entry->ret = *trace;
950 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
951}
952#endif
953
694void 954void
695ftrace(struct trace_array *tr, struct trace_array_cpu *data, 955ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696 unsigned long ip, unsigned long parent_ip, unsigned long flags, 956 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -742,6 +1002,46 @@ void __trace_stack(struct trace_array *tr,
742 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1002 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
743} 1003}
744 1004
1005static void ftrace_trace_userstack(struct trace_array *tr,
1006 struct trace_array_cpu *data,
1007 unsigned long flags, int pc)
1008{
1009#ifdef CONFIG_STACKTRACE
1010 struct ring_buffer_event *event;
1011 struct userstack_entry *entry;
1012 struct stack_trace trace;
1013 unsigned long irq_flags;
1014
1015 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1016 return;
1017
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1019 &irq_flags);
1020 if (!event)
1021 return;
1022 entry = ring_buffer_event_data(event);
1023 tracing_generic_entry_update(&entry->ent, flags, pc);
1024 entry->ent.type = TRACE_USER_STACK;
1025
1026 memset(&entry->caller, 0, sizeof(entry->caller));
1027
1028 trace.nr_entries = 0;
1029 trace.max_entries = FTRACE_STACK_ENTRIES;
1030 trace.skip = 0;
1031 trace.entries = entry->caller;
1032
1033 save_stack_trace_user(&trace);
1034 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1035#endif
1036}
1037
1038void __trace_userstack(struct trace_array *tr,
1039 struct trace_array_cpu *data,
1040 unsigned long flags)
1041{
1042 ftrace_trace_userstack(tr, data, flags, preempt_count());
1043}
1044
745static void 1045static void
746ftrace_trace_special(void *__tr, void *__data, 1046ftrace_trace_special(void *__tr, void *__data,
747 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1047 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -765,6 +1065,7 @@ ftrace_trace_special(void *__tr, void *__data,
765 entry->arg3 = arg3; 1065 entry->arg3 = arg3;
766 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1066 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 1067 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1068 ftrace_trace_userstack(tr, data, irq_flags, pc);
768 1069
769 trace_wake_up(); 1070 trace_wake_up();
770} 1071}
@@ -803,6 +1104,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
803 entry->next_cpu = task_cpu(next); 1104 entry->next_cpu = task_cpu(next);
804 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1105 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 ftrace_trace_stack(tr, data, flags, 5, pc); 1106 ftrace_trace_stack(tr, data, flags, 5, pc);
1107 ftrace_trace_userstack(tr, data, flags, pc);
806} 1108}
807 1109
808void 1110void
@@ -832,6 +1134,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
832 entry->next_cpu = task_cpu(wakee); 1134 entry->next_cpu = task_cpu(wakee);
833 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 ftrace_trace_stack(tr, data, flags, 6, pc); 1136 ftrace_trace_stack(tr, data, flags, 6, pc);
1137 ftrace_trace_userstack(tr, data, flags, pc);
835 1138
836 trace_wake_up(); 1139 trace_wake_up();
837} 1140}
@@ -841,26 +1144,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841{ 1144{
842 struct trace_array *tr = &global_trace; 1145 struct trace_array *tr = &global_trace;
843 struct trace_array_cpu *data; 1146 struct trace_array_cpu *data;
1147 unsigned long flags;
844 int cpu; 1148 int cpu;
845 int pc; 1149 int pc;
846 1150
847 if (tracing_disabled || !tr->ctrl) 1151 if (tracing_disabled)
848 return; 1152 return;
849 1153
850 pc = preempt_count(); 1154 pc = preempt_count();
851 preempt_disable_notrace(); 1155 local_irq_save(flags);
852 cpu = raw_smp_processor_id(); 1156 cpu = raw_smp_processor_id();
853 data = tr->data[cpu]; 1157 data = tr->data[cpu];
854 1158
855 if (likely(!atomic_read(&data->disabled))) 1159 if (likely(atomic_inc_return(&data->disabled) == 1))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1160 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 1161
858 preempt_enable_notrace(); 1162 atomic_dec(&data->disabled);
1163 local_irq_restore(flags);
859} 1164}
860 1165
861#ifdef CONFIG_FUNCTION_TRACER 1166#ifdef CONFIG_FUNCTION_TRACER
862static void 1167static void
863function_trace_call(unsigned long ip, unsigned long parent_ip) 1168function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
864{ 1169{
865 struct trace_array *tr = &global_trace; 1170 struct trace_array *tr = &global_trace;
866 struct trace_array_cpu *data; 1171 struct trace_array_cpu *data;
@@ -873,8 +1178,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
873 return; 1178 return;
874 1179
875 pc = preempt_count(); 1180 pc = preempt_count();
876 resched = need_resched(); 1181 resched = ftrace_preempt_disable();
877 preempt_disable_notrace();
878 local_save_flags(flags); 1182 local_save_flags(flags);
879 cpu = raw_smp_processor_id(); 1183 cpu = raw_smp_processor_id();
880 data = tr->data[cpu]; 1184 data = tr->data[cpu];
@@ -884,11 +1188,96 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
884 trace_function(tr, data, ip, parent_ip, flags, pc); 1188 trace_function(tr, data, ip, parent_ip, flags, pc);
885 1189
886 atomic_dec(&data->disabled); 1190 atomic_dec(&data->disabled);
887 if (resched) 1191 ftrace_preempt_enable(resched);
888 preempt_enable_no_resched_notrace(); 1192}
889 else 1193
890 preempt_enable_notrace(); 1194static void
1195function_trace_call(unsigned long ip, unsigned long parent_ip)
1196{
1197 struct trace_array *tr = &global_trace;
1198 struct trace_array_cpu *data;
1199 unsigned long flags;
1200 long disabled;
1201 int cpu;
1202 int pc;
1203
1204 if (unlikely(!ftrace_function_enabled))
1205 return;
1206
1207 /*
1208 * Need to use raw, since this must be called before the
1209 * recursive protection is performed.
1210 */
1211 local_irq_save(flags);
1212 cpu = raw_smp_processor_id();
1213 data = tr->data[cpu];
1214 disabled = atomic_inc_return(&data->disabled);
1215
1216 if (likely(disabled == 1)) {
1217 pc = preempt_count();
1218 trace_function(tr, data, ip, parent_ip, flags, pc);
1219 }
1220
1221 atomic_dec(&data->disabled);
1222 local_irq_restore(flags);
1223}
1224
1225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1226int trace_graph_entry(struct ftrace_graph_ent *trace)
1227{
1228 struct trace_array *tr = &global_trace;
1229 struct trace_array_cpu *data;
1230 unsigned long flags;
1231 long disabled;
1232 int cpu;
1233 int pc;
1234
1235 if (!ftrace_trace_task(current))
1236 return 0;
1237
1238 if (!ftrace_graph_addr(trace->func))
1239 return 0;
1240
1241 local_irq_save(flags);
1242 cpu = raw_smp_processor_id();
1243 data = tr->data[cpu];
1244 disabled = atomic_inc_return(&data->disabled);
1245 if (likely(disabled == 1)) {
1246 pc = preempt_count();
1247 __trace_graph_entry(tr, data, trace, flags, pc);
1248 }
1249 /* Only do the atomic if it is not already set */
1250 if (!test_tsk_trace_graph(current))
1251 set_tsk_trace_graph(current);
1252 atomic_dec(&data->disabled);
1253 local_irq_restore(flags);
1254
1255 return 1;
1256}
1257
1258void trace_graph_return(struct ftrace_graph_ret *trace)
1259{
1260 struct trace_array *tr = &global_trace;
1261 struct trace_array_cpu *data;
1262 unsigned long flags;
1263 long disabled;
1264 int cpu;
1265 int pc;
1266
1267 local_irq_save(flags);
1268 cpu = raw_smp_processor_id();
1269 data = tr->data[cpu];
1270 disabled = atomic_inc_return(&data->disabled);
1271 if (likely(disabled == 1)) {
1272 pc = preempt_count();
1273 __trace_graph_return(tr, data, trace, flags, pc);
1274 }
1275 if (!trace->depth)
1276 clear_tsk_trace_graph(current);
1277 atomic_dec(&data->disabled);
1278 local_irq_restore(flags);
891} 1279}
1280#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
892 1281
893static struct ftrace_ops trace_ops __read_mostly = 1282static struct ftrace_ops trace_ops __read_mostly =
894{ 1283{
@@ -898,9 +1287,14 @@ static struct ftrace_ops trace_ops __read_mostly =
898void tracing_start_function_trace(void) 1287void tracing_start_function_trace(void)
899{ 1288{
900 ftrace_function_enabled = 0; 1289 ftrace_function_enabled = 0;
1290
1291 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1292 trace_ops.func = function_trace_call_preempt_only;
1293 else
1294 trace_ops.func = function_trace_call;
1295
901 register_ftrace_function(&trace_ops); 1296 register_ftrace_function(&trace_ops);
902 if (tracer_enabled) 1297 ftrace_function_enabled = 1;
903 ftrace_function_enabled = 1;
904} 1298}
905 1299
906void tracing_stop_function_trace(void) 1300void tracing_stop_function_trace(void)
@@ -912,9 +1306,10 @@ void tracing_stop_function_trace(void)
912 1306
913enum trace_file_type { 1307enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 1308 TRACE_FILE_LAT_FMT = 1,
1309 TRACE_FILE_ANNOTATE = 2,
915}; 1310};
916 1311
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1312static void trace_iterator_increment(struct trace_iterator *iter)
918{ 1313{
919 /* Don't allow ftrace to trace into the ring buffers */ 1314 /* Don't allow ftrace to trace into the ring buffers */
920 ftrace_disable_cpu(); 1315 ftrace_disable_cpu();
@@ -993,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
993 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1388 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
994 1389
995 if (iter->ent) 1390 if (iter->ent)
996 trace_iterator_increment(iter, iter->cpu); 1391 trace_iterator_increment(iter);
997 1392
998 return iter->ent ? iter : NULL; 1393 return iter->ent ? iter : NULL;
999} 1394}
@@ -1047,10 +1442,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1047 1442
1048 atomic_inc(&trace_record_cmdline_disabled); 1443 atomic_inc(&trace_record_cmdline_disabled);
1049 1444
1050 /* let the tracer grab locks here if needed */
1051 if (current_trace->start)
1052 current_trace->start(iter);
1053
1054 if (*pos != iter->pos) { 1445 if (*pos != iter->pos) {
1055 iter->ent = NULL; 1446 iter->ent = NULL;
1056 iter->cpu = 0; 1447 iter->cpu = 0;
@@ -1077,14 +1468,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1077 1468
1078static void s_stop(struct seq_file *m, void *p) 1469static void s_stop(struct seq_file *m, void *p)
1079{ 1470{
1080 struct trace_iterator *iter = m->private;
1081
1082 atomic_dec(&trace_record_cmdline_disabled); 1471 atomic_dec(&trace_record_cmdline_disabled);
1083
1084 /* let the tracer release locks here if needed */
1085 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 iter->trace->stop(iter);
1087
1088 mutex_unlock(&trace_types_lock); 1472 mutex_unlock(&trace_types_lock);
1089} 1473}
1090 1474
@@ -1143,7 +1527,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1143# define IP_FMT "%016lx" 1527# define IP_FMT "%016lx"
1144#endif 1528#endif
1145 1529
1146static int 1530int
1147seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 1531seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148{ 1532{
1149 int ret; 1533 int ret;
@@ -1164,6 +1548,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1164 return ret; 1548 return ret;
1165} 1549}
1166 1550
1551static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1552 unsigned long ip, unsigned long sym_flags)
1553{
1554 struct file *file = NULL;
1555 unsigned long vmstart = 0;
1556 int ret = 1;
1557
1558 if (mm) {
1559 const struct vm_area_struct *vma;
1560
1561 down_read(&mm->mmap_sem);
1562 vma = find_vma(mm, ip);
1563 if (vma) {
1564 file = vma->vm_file;
1565 vmstart = vma->vm_start;
1566 }
1567 if (file) {
1568 ret = trace_seq_path(s, &file->f_path);
1569 if (ret)
1570 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1571 }
1572 up_read(&mm->mmap_sem);
1573 }
1574 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1575 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1576 return ret;
1577}
1578
1579static int
1580seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1581 unsigned long sym_flags)
1582{
1583 struct mm_struct *mm = NULL;
1584 int ret = 1;
1585 unsigned int i;
1586
1587 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1588 struct task_struct *task;
1589 /*
1590 * we do the lookup on the thread group leader,
1591 * since individual threads might have already quit!
1592 */
1593 rcu_read_lock();
1594 task = find_task_by_vpid(entry->ent.tgid);
1595 if (task)
1596 mm = get_task_mm(task);
1597 rcu_read_unlock();
1598 }
1599
1600 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1601 unsigned long ip = entry->caller[i];
1602
1603 if (ip == ULONG_MAX || !ret)
1604 break;
1605 if (i && ret)
1606 ret = trace_seq_puts(s, " <- ");
1607 if (!ip) {
1608 if (ret)
1609 ret = trace_seq_puts(s, "??");
1610 continue;
1611 }
1612 if (!ret)
1613 break;
1614 if (ret)
1615 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1616 }
1617
1618 if (mm)
1619 mmput(mm);
1620 return ret;
1621}
1622
1167static void print_lat_help_header(struct seq_file *m) 1623static void print_lat_help_header(struct seq_file *m)
1168{ 1624{
1169 seq_puts(m, "# _------=> CPU# \n"); 1625 seq_puts(m, "# _------=> CPU# \n");
@@ -1301,6 +1757,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1301 1757
1302static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1758static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1303 1759
1760static int task_state_char(unsigned long state)
1761{
1762 int bit = state ? __ffs(state) + 1 : 0;
1763
1764 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1765}
1766
1304/* 1767/*
1305 * The message is supposed to contain an ending newline. 1768 * The message is supposed to contain an ending newline.
1306 * If the printing stops prematurely, try to add a newline of our own. 1769 * If the printing stops prematurely, try to add a newline of our own.
@@ -1338,6 +1801,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1338 trace_seq_putc(s, '\n'); 1801 trace_seq_putc(s, '\n');
1339} 1802}
1340 1803
1804static void test_cpu_buff_start(struct trace_iterator *iter)
1805{
1806 struct trace_seq *s = &iter->seq;
1807
1808 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1809 return;
1810
1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1812 return;
1813
1814 if (cpumask_test_cpu(iter->cpu, iter->started))
1815 return;
1816
1817 cpumask_set_cpu(iter->cpu, iter->started);
1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1819}
1820
1341static enum print_line_t 1821static enum print_line_t
1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1822print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343{ 1823{
@@ -1352,11 +1832,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1352 char *comm; 1832 char *comm;
1353 int S, T; 1833 int S, T;
1354 int i; 1834 int i;
1355 unsigned state;
1356 1835
1357 if (entry->type == TRACE_CONT) 1836 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED; 1837 return TRACE_TYPE_HANDLED;
1359 1838
1839 test_cpu_buff_start(iter);
1840
1360 next_entry = find_next_entry(iter, NULL, &next_ts); 1841 next_entry = find_next_entry(iter, NULL, &next_ts);
1361 if (!next_entry) 1842 if (!next_entry)
1362 next_ts = iter->ts; 1843 next_ts = iter->ts;
@@ -1396,12 +1877,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1396 1877
1397 trace_assign_type(field, entry); 1878 trace_assign_type(field, entry);
1398 1879
1399 T = field->next_state < sizeof(state_to_char) ? 1880 T = task_state_char(field->next_state);
1400 state_to_char[field->next_state] : 'X'; 1881 S = task_state_char(field->prev_state);
1401
1402 state = field->prev_state ?
1403 __ffs(field->prev_state) + 1 : 0;
1404 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1405 comm = trace_find_cmdline(field->next_pid); 1882 comm = trace_find_cmdline(field->next_pid);
1406 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1883 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1407 field->prev_pid, 1884 field->prev_pid,
@@ -1448,6 +1925,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1448 trace_seq_print_cont(s, iter); 1925 trace_seq_print_cont(s, iter);
1449 break; 1926 break;
1450 } 1927 }
1928 case TRACE_BRANCH: {
1929 struct trace_branch *field;
1930
1931 trace_assign_type(field, entry);
1932
1933 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1934 field->correct ? " ok " : " MISS ",
1935 field->func,
1936 field->file,
1937 field->line);
1938 break;
1939 }
1940 case TRACE_USER_STACK: {
1941 struct userstack_entry *field;
1942
1943 trace_assign_type(field, entry);
1944
1945 seq_print_userip_objs(field, s, sym_flags);
1946 trace_seq_putc(s, '\n');
1947 break;
1948 }
1451 default: 1949 default:
1452 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1950 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 } 1951 }
@@ -1472,6 +1970,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1472 if (entry->type == TRACE_CONT) 1970 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED; 1971 return TRACE_TYPE_HANDLED;
1474 1972
1973 test_cpu_buff_start(iter);
1974
1475 comm = trace_find_cmdline(iter->ent->pid); 1975 comm = trace_find_cmdline(iter->ent->pid);
1476 1976
1477 t = ns2usecs(iter->ts); 1977 t = ns2usecs(iter->ts);
@@ -1519,10 +2019,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1519 2019
1520 trace_assign_type(field, entry); 2020 trace_assign_type(field, entry);
1521 2021
1522 S = field->prev_state < sizeof(state_to_char) ? 2022 T = task_state_char(field->next_state);
1523 state_to_char[field->prev_state] : 'X'; 2023 S = task_state_char(field->prev_state);
1524 T = field->next_state < sizeof(state_to_char) ?
1525 state_to_char[field->next_state] : 'X';
1526 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2024 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 field->prev_pid, 2025 field->prev_pid,
1528 field->prev_prio, 2026 field->prev_prio,
@@ -1581,6 +2079,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1581 trace_seq_print_cont(s, iter); 2079 trace_seq_print_cont(s, iter);
1582 break; 2080 break;
1583 } 2081 }
2082 case TRACE_GRAPH_RET: {
2083 return print_graph_function(iter);
2084 }
2085 case TRACE_GRAPH_ENT: {
2086 return print_graph_function(iter);
2087 }
2088 case TRACE_BRANCH: {
2089 struct trace_branch *field;
2090
2091 trace_assign_type(field, entry);
2092
2093 trace_seq_printf(s, "[%s] %s:%s:%d\n",
2094 field->correct ? " ok " : " MISS ",
2095 field->func,
2096 field->file,
2097 field->line);
2098 break;
2099 }
2100 case TRACE_USER_STACK: {
2101 struct userstack_entry *field;
2102
2103 trace_assign_type(field, entry);
2104
2105 ret = seq_print_userip_objs(field, s, sym_flags);
2106 if (!ret)
2107 return TRACE_TYPE_PARTIAL_LINE;
2108 ret = trace_seq_putc(s, '\n');
2109 if (!ret)
2110 return TRACE_TYPE_PARTIAL_LINE;
2111 break;
2112 }
1584 } 2113 }
1585 return TRACE_TYPE_HANDLED; 2114 return TRACE_TYPE_HANDLED;
1586} 2115}
@@ -1621,12 +2150,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1621 2150
1622 trace_assign_type(field, entry); 2151 trace_assign_type(field, entry);
1623 2152
1624 S = field->prev_state < sizeof(state_to_char) ? 2153 T = task_state_char(field->next_state);
1625 state_to_char[field->prev_state] : 'X'; 2154 S = entry->type == TRACE_WAKE ? '+' :
1626 T = field->next_state < sizeof(state_to_char) ? 2155 task_state_char(field->prev_state);
1627 state_to_char[field->next_state] : 'X';
1628 if (entry->type == TRACE_WAKE)
1629 S = '+';
1630 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2156 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1631 field->prev_pid, 2157 field->prev_pid,
1632 field->prev_prio, 2158 field->prev_prio,
@@ -1640,6 +2166,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1640 break; 2166 break;
1641 } 2167 }
1642 case TRACE_SPECIAL: 2168 case TRACE_SPECIAL:
2169 case TRACE_USER_STACK:
1643 case TRACE_STACK: { 2170 case TRACE_STACK: {
1644 struct special_entry *field; 2171 struct special_entry *field;
1645 2172
@@ -1712,12 +2239,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1712 2239
1713 trace_assign_type(field, entry); 2240 trace_assign_type(field, entry);
1714 2241
1715 S = field->prev_state < sizeof(state_to_char) ? 2242 T = task_state_char(field->next_state);
1716 state_to_char[field->prev_state] : 'X'; 2243 S = entry->type == TRACE_WAKE ? '+' :
1717 T = field->next_state < sizeof(state_to_char) ? 2244 task_state_char(field->prev_state);
1718 state_to_char[field->next_state] : 'X';
1719 if (entry->type == TRACE_WAKE)
1720 S = '+';
1721 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2245 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1722 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1723 SEQ_PUT_HEX_FIELD_RET(s, S); 2247 SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -1728,6 +2252,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1728 break; 2252 break;
1729 } 2253 }
1730 case TRACE_SPECIAL: 2254 case TRACE_SPECIAL:
2255 case TRACE_USER_STACK:
1731 case TRACE_STACK: { 2256 case TRACE_STACK: {
1732 struct special_entry *field; 2257 struct special_entry *field;
1733 2258
@@ -1744,6 +2269,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1744 return TRACE_TYPE_HANDLED; 2269 return TRACE_TYPE_HANDLED;
1745} 2270}
1746 2271
2272static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2273{
2274 struct trace_seq *s = &iter->seq;
2275 struct trace_entry *entry = iter->ent;
2276 struct print_entry *field;
2277 int ret;
2278
2279 trace_assign_type(field, entry);
2280
2281 ret = trace_seq_printf(s, field->buf);
2282 if (!ret)
2283 return TRACE_TYPE_PARTIAL_LINE;
2284
2285 if (entry->flags & TRACE_FLAG_CONT)
2286 trace_seq_print_cont(s, iter);
2287
2288 return TRACE_TYPE_HANDLED;
2289}
2290
1747static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2291static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1748{ 2292{
1749 struct trace_seq *s = &iter->seq; 2293 struct trace_seq *s = &iter->seq;
@@ -1782,6 +2326,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1782 break; 2326 break;
1783 } 2327 }
1784 case TRACE_SPECIAL: 2328 case TRACE_SPECIAL:
2329 case TRACE_USER_STACK:
1785 case TRACE_STACK: { 2330 case TRACE_STACK: {
1786 struct special_entry *field; 2331 struct special_entry *field;
1787 2332
@@ -1823,6 +2368,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1823 return ret; 2368 return ret;
1824 } 2369 }
1825 2370
2371 if (iter->ent->type == TRACE_PRINT &&
2372 trace_flags & TRACE_ITER_PRINTK &&
2373 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2374 return print_printk_msg_only(iter);
2375
1826 if (trace_flags & TRACE_ITER_BIN) 2376 if (trace_flags & TRACE_ITER_BIN)
1827 return print_bin_fmt(iter); 2377 return print_bin_fmt(iter);
1828 2378
@@ -1847,7 +2397,9 @@ static int s_show(struct seq_file *m, void *v)
1847 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2397 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1848 seq_puts(m, "#\n"); 2398 seq_puts(m, "#\n");
1849 } 2399 }
1850 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2400 if (iter->trace && iter->trace->print_header)
2401 iter->trace->print_header(m);
2402 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1851 /* print nothing if the buffers are empty */ 2403 /* print nothing if the buffers are empty */
1852 if (trace_empty(iter)) 2404 if (trace_empty(iter))
1853 return 0; 2405 return 0;
@@ -1899,6 +2451,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1899 iter->trace = current_trace; 2451 iter->trace = current_trace;
1900 iter->pos = -1; 2452 iter->pos = -1;
1901 2453
2454 /* Notify the tracer early; before we stop tracing. */
2455 if (iter->trace && iter->trace->open)
2456 iter->trace->open(iter);
2457
2458 /* Annotate start of buffers if we had overruns */
2459 if (ring_buffer_overruns(iter->tr->buffer))
2460 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2461
2462
1902 for_each_tracing_cpu(cpu) { 2463 for_each_tracing_cpu(cpu) {
1903 2464
1904 iter->buffer_iter[cpu] = 2465 iter->buffer_iter[cpu] =
@@ -1917,13 +2478,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1917 m->private = iter; 2478 m->private = iter;
1918 2479
1919 /* stop the trace while dumping */ 2480 /* stop the trace while dumping */
1920 if (iter->tr->ctrl) { 2481 tracing_stop();
1921 tracer_enabled = 0;
1922 ftrace_function_enabled = 0;
1923 }
1924
1925 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter);
1927 2482
1928 mutex_unlock(&trace_types_lock); 2483 mutex_unlock(&trace_types_lock);
1929 2484
@@ -1966,14 +2521,7 @@ int tracing_release(struct inode *inode, struct file *file)
1966 iter->trace->close(iter); 2521 iter->trace->close(iter);
1967 2522
1968 /* reenable tracing if it was previously enabled */ 2523 /* reenable tracing if it was previously enabled */
1969 if (iter->tr->ctrl) { 2524 tracing_start();
1970 tracer_enabled = 1;
1971 /*
1972 * It is safe to enable function tracing even if it
1973 * isn't used
1974 */
1975 ftrace_function_enabled = 1;
1976 }
1977 mutex_unlock(&trace_types_lock); 2525 mutex_unlock(&trace_types_lock);
1978 2526
1979 seq_release(inode, file); 2527 seq_release(inode, file);
@@ -2098,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2098/* 2646/*
2099 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2100 */ 2648 */
2101static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2102
2103/*
2104 * When tracing/tracing_cpu_mask is modified then this holds
2105 * the new bitmask we are about to install:
2106 */
2107static cpumask_t tracing_cpumask_new;
2108 2650
2109/* 2651/*
2110 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2145,39 +2687,45 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2145 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2146{ 2688{
2147 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2148 2694
2149 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2150 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2151 if (err) 2697 if (err)
2152 goto err_unlock; 2698 goto err_unlock;
2153 2699
2154 raw_local_irq_disable(); 2700 local_irq_disable();
2155 __raw_spin_lock(&ftrace_max_lock); 2701 __raw_spin_lock(&ftrace_max_lock);
2156 for_each_tracing_cpu(cpu) { 2702 for_each_tracing_cpu(cpu) {
2157 /* 2703 /*
2158 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2159 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2160 */ 2706 */
2161 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2162 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2163 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2164 } 2710 }
2165 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2166 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2167 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2168 } 2714 }
2169 } 2715 }
2170 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2171 raw_local_irq_enable(); 2717 local_irq_enable();
2172 2718
2173 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2174 2720
2175 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2176 2723
2177 return count; 2724 return count;
2178 2725
2179err_unlock: 2726err_unlock:
2180 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2181 2729
2182 return err; 2730 return err;
2183} 2731}
@@ -2189,13 +2737,16 @@ static struct file_operations tracing_cpumask_fops = {
2189}; 2737};
2190 2738
2191static ssize_t 2739static ssize_t
2192tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, 2740tracing_trace_options_read(struct file *filp, char __user *ubuf,
2193 size_t cnt, loff_t *ppos) 2741 size_t cnt, loff_t *ppos)
2194{ 2742{
2743 int i;
2195 char *buf; 2744 char *buf;
2196 int r = 0; 2745 int r = 0;
2197 int len = 0; 2746 int len = 0;
2198 int i; 2747 u32 tracer_flags = current_trace->flags->val;
2748 struct tracer_opt *trace_opts = current_trace->flags->opts;
2749
2199 2750
2200 /* calulate max size */ 2751 /* calulate max size */
2201 for (i = 0; trace_options[i]; i++) { 2752 for (i = 0; trace_options[i]; i++) {
@@ -2203,6 +2754,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2203 len += 3; /* "no" and space */ 2754 len += 3; /* "no" and space */
2204 } 2755 }
2205 2756
2757 /*
2758 * Increase the size with names of options specific
2759 * of the current tracer.
2760 */
2761 for (i = 0; trace_opts[i].name; i++) {
2762 len += strlen(trace_opts[i].name);
2763 len += 3; /* "no" and space */
2764 }
2765
2206 /* +2 for \n and \0 */ 2766 /* +2 for \n and \0 */
2207 buf = kmalloc(len + 2, GFP_KERNEL); 2767 buf = kmalloc(len + 2, GFP_KERNEL);
2208 if (!buf) 2768 if (!buf)
@@ -2215,6 +2775,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2215 r += sprintf(buf + r, "no%s ", trace_options[i]); 2775 r += sprintf(buf + r, "no%s ", trace_options[i]);
2216 } 2776 }
2217 2777
2778 for (i = 0; trace_opts[i].name; i++) {
2779 if (tracer_flags & trace_opts[i].bit)
2780 r += sprintf(buf + r, "%s ",
2781 trace_opts[i].name);
2782 else
2783 r += sprintf(buf + r, "no%s ",
2784 trace_opts[i].name);
2785 }
2786
2218 r += sprintf(buf + r, "\n"); 2787 r += sprintf(buf + r, "\n");
2219 WARN_ON(r >= len + 2); 2788 WARN_ON(r >= len + 2);
2220 2789
@@ -2225,13 +2794,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2225 return r; 2794 return r;
2226} 2795}
2227 2796
2797/* Try to assign a tracer specific option */
2798static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2799{
2800 struct tracer_flags *trace_flags = trace->flags;
2801 struct tracer_opt *opts = NULL;
2802 int ret = 0, i = 0;
2803 int len;
2804
2805 for (i = 0; trace_flags->opts[i].name; i++) {
2806 opts = &trace_flags->opts[i];
2807 len = strlen(opts->name);
2808
2809 if (strncmp(cmp, opts->name, len) == 0) {
2810 ret = trace->set_flag(trace_flags->val,
2811 opts->bit, !neg);
2812 break;
2813 }
2814 }
2815 /* Not found */
2816 if (!trace_flags->opts[i].name)
2817 return -EINVAL;
2818
2819 /* Refused to handle */
2820 if (ret)
2821 return ret;
2822
2823 if (neg)
2824 trace_flags->val &= ~opts->bit;
2825 else
2826 trace_flags->val |= opts->bit;
2827
2828 return 0;
2829}
2830
2228static ssize_t 2831static ssize_t
2229tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, 2832tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2230 size_t cnt, loff_t *ppos) 2833 size_t cnt, loff_t *ppos)
2231{ 2834{
2232 char buf[64]; 2835 char buf[64];
2233 char *cmp = buf; 2836 char *cmp = buf;
2234 int neg = 0; 2837 int neg = 0;
2838 int ret;
2235 int i; 2839 int i;
2236 2840
2237 if (cnt >= sizeof(buf)) 2841 if (cnt >= sizeof(buf))
@@ -2258,11 +2862,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2258 break; 2862 break;
2259 } 2863 }
2260 } 2864 }
2261 /* 2865
2262 * If no option could be set, return an error: 2866 /* If no option could be set, test the specific tracer options */
2263 */ 2867 if (!trace_options[i]) {
2264 if (!trace_options[i]) 2868 ret = set_tracer_option(current_trace, cmp, neg);
2265 return -EINVAL; 2869 if (ret)
2870 return ret;
2871 }
2266 2872
2267 filp->f_pos += cnt; 2873 filp->f_pos += cnt;
2268 2874
@@ -2271,8 +2877,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2271 2877
2272static struct file_operations tracing_iter_fops = { 2878static struct file_operations tracing_iter_fops = {
2273 .open = tracing_open_generic, 2879 .open = tracing_open_generic,
2274 .read = tracing_iter_ctrl_read, 2880 .read = tracing_trace_options_read,
2275 .write = tracing_iter_ctrl_write, 2881 .write = tracing_trace_options_write,
2276}; 2882};
2277 2883
2278static const char readme_msg[] = 2884static const char readme_msg[] =
@@ -2286,9 +2892,9 @@ static const char readme_msg[] =
2286 "# echo sched_switch > /debug/tracing/current_tracer\n" 2892 "# echo sched_switch > /debug/tracing/current_tracer\n"
2287 "# cat /debug/tracing/current_tracer\n" 2893 "# cat /debug/tracing/current_tracer\n"
2288 "sched_switch\n" 2894 "sched_switch\n"
2289 "# cat /debug/tracing/iter_ctrl\n" 2895 "# cat /debug/tracing/trace_options\n"
2290 "noprint-parent nosym-offset nosym-addr noverbose\n" 2896 "noprint-parent nosym-offset nosym-addr noverbose\n"
2291 "# echo print-parent > /debug/tracing/iter_ctrl\n" 2897 "# echo print-parent > /debug/tracing/trace_options\n"
2292 "# echo 1 > /debug/tracing/tracing_enabled\n" 2898 "# echo 1 > /debug/tracing/tracing_enabled\n"
2293 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2899 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2294 "echo 0 > /debug/tracing/tracing_enabled\n" 2900 "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2311,11 +2917,10 @@ static ssize_t
2311tracing_ctrl_read(struct file *filp, char __user *ubuf, 2917tracing_ctrl_read(struct file *filp, char __user *ubuf,
2312 size_t cnt, loff_t *ppos) 2918 size_t cnt, loff_t *ppos)
2313{ 2919{
2314 struct trace_array *tr = filp->private_data;
2315 char buf[64]; 2920 char buf[64];
2316 int r; 2921 int r;
2317 2922
2318 r = sprintf(buf, "%ld\n", tr->ctrl); 2923 r = sprintf(buf, "%u\n", tracer_enabled);
2319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2320} 2925}
2321 2926
@@ -2343,16 +2948,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2343 val = !!val; 2948 val = !!val;
2344 2949
2345 mutex_lock(&trace_types_lock); 2950 mutex_lock(&trace_types_lock);
2346 if (tr->ctrl ^ val) { 2951 if (tracer_enabled ^ val) {
2347 if (val) 2952 if (val) {
2348 tracer_enabled = 1; 2953 tracer_enabled = 1;
2349 else 2954 if (current_trace->start)
2955 current_trace->start(tr);
2956 tracing_start();
2957 } else {
2350 tracer_enabled = 0; 2958 tracer_enabled = 0;
2351 2959 tracing_stop();
2352 tr->ctrl = val; 2960 if (current_trace->stop)
2353 2961 current_trace->stop(tr);
2354 if (current_trace && current_trace->ctrl_update) 2962 }
2355 current_trace->ctrl_update(tr);
2356 } 2963 }
2357 mutex_unlock(&trace_types_lock); 2964 mutex_unlock(&trace_types_lock);
2358 2965
@@ -2378,29 +2985,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2378 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2379} 2986}
2380 2987
2381static ssize_t 2988static int tracing_set_tracer(char *buf)
2382tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 size_t cnt, loff_t *ppos)
2384{ 2989{
2385 struct trace_array *tr = &global_trace; 2990 struct trace_array *tr = &global_trace;
2386 struct tracer *t; 2991 struct tracer *t;
2387 char buf[max_tracer_type_len+1]; 2992 int ret = 0;
2388 int i;
2389 size_t ret;
2390
2391 ret = cnt;
2392
2393 if (cnt > max_tracer_type_len)
2394 cnt = max_tracer_type_len;
2395
2396 if (copy_from_user(&buf, ubuf, cnt))
2397 return -EFAULT;
2398
2399 buf[cnt] = 0;
2400
2401 /* strip ending whitespace. */
2402 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2403 buf[i] = 0;
2404 2993
2405 mutex_lock(&trace_types_lock); 2994 mutex_lock(&trace_types_lock);
2406 for (t = trace_types; t; t = t->next) { 2995 for (t = trace_types; t; t = t->next) {
@@ -2414,18 +3003,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2414 if (t == current_trace) 3003 if (t == current_trace)
2415 goto out; 3004 goto out;
2416 3005
3006 trace_branch_disable();
2417 if (current_trace && current_trace->reset) 3007 if (current_trace && current_trace->reset)
2418 current_trace->reset(tr); 3008 current_trace->reset(tr);
2419 3009
2420 current_trace = t; 3010 current_trace = t;
2421 if (t->init) 3011 if (t->init) {
2422 t->init(tr); 3012 ret = t->init(tr);
3013 if (ret)
3014 goto out;
3015 }
2423 3016
3017 trace_branch_enable(tr);
2424 out: 3018 out:
2425 mutex_unlock(&trace_types_lock); 3019 mutex_unlock(&trace_types_lock);
2426 3020
2427 if (ret > 0) 3021 return ret;
2428 filp->f_pos += ret; 3022}
3023
3024static ssize_t
3025tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3026 size_t cnt, loff_t *ppos)
3027{
3028 char buf[max_tracer_type_len+1];
3029 int i;
3030 size_t ret;
3031 int err;
3032
3033 ret = cnt;
3034
3035 if (cnt > max_tracer_type_len)
3036 cnt = max_tracer_type_len;
3037
3038 if (copy_from_user(&buf, ubuf, cnt))
3039 return -EFAULT;
3040
3041 buf[cnt] = 0;
3042
3043 /* strip ending whitespace. */
3044 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3045 buf[i] = 0;
3046
3047 err = tracing_set_tracer(buf);
3048 if (err)
3049 return err;
3050
3051 filp->f_pos += ret;
2429 3052
2430 return ret; 3053 return ret;
2431} 3054}
@@ -2491,7 +3114,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2491 if (!iter) 3114 if (!iter)
2492 return -ENOMEM; 3115 return -ENOMEM;
2493 3116
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3118 kfree(iter);
3119 return -ENOMEM;
3120 }
3121
2494 mutex_lock(&trace_types_lock); 3122 mutex_lock(&trace_types_lock);
3123
3124 /* trace pipe does not show start of buffer */
3125 cpumask_setall(iter->started);
3126
2495 iter->tr = &global_trace; 3127 iter->tr = &global_trace;
2496 iter->trace = current_trace; 3128 iter->trace = current_trace;
2497 filp->private_data = iter; 3129 filp->private_data = iter;
@@ -2507,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2507{ 3139{
2508 struct trace_iterator *iter = file->private_data; 3140 struct trace_iterator *iter = file->private_data;
2509 3141
3142 free_cpumask_var(iter->started);
2510 kfree(iter); 3143 kfree(iter);
2511 atomic_dec(&tracing_reader); 3144 atomic_dec(&tracing_reader);
2512 3145
@@ -2667,7 +3300,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2667 char buf[64]; 3300 char buf[64];
2668 int r; 3301 int r;
2669 3302
2670 r = sprintf(buf, "%lu\n", tr->entries); 3303 r = sprintf(buf, "%lu\n", tr->entries >> 10);
2671 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3304 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2672} 3305}
2673 3306
@@ -2678,7 +3311,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2678 unsigned long val; 3311 unsigned long val;
2679 char buf[64]; 3312 char buf[64];
2680 int ret, cpu; 3313 int ret, cpu;
2681 struct trace_array *tr = filp->private_data;
2682 3314
2683 if (cnt >= sizeof(buf)) 3315 if (cnt >= sizeof(buf))
2684 return -EINVAL; 3316 return -EINVAL;
@@ -2698,12 +3330,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2698 3330
2699 mutex_lock(&trace_types_lock); 3331 mutex_lock(&trace_types_lock);
2700 3332
2701 if (tr->ctrl) { 3333 tracing_stop();
2702 cnt = -EBUSY;
2703 pr_info("ftrace: please disable tracing"
2704 " before modifying buffer size\n");
2705 goto out;
2706 }
2707 3334
2708 /* disable all cpu buffers */ 3335 /* disable all cpu buffers */
2709 for_each_tracing_cpu(cpu) { 3336 for_each_tracing_cpu(cpu) {
@@ -2713,6 +3340,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2713 atomic_inc(&max_tr.data[cpu]->disabled); 3340 atomic_inc(&max_tr.data[cpu]->disabled);
2714 } 3341 }
2715 3342
3343 /* value is in KB */
3344 val <<= 10;
3345
2716 if (val != global_trace.entries) { 3346 if (val != global_trace.entries) {
2717 ret = ring_buffer_resize(global_trace.buffer, val); 3347 ret = ring_buffer_resize(global_trace.buffer, val);
2718 if (ret < 0) { 3348 if (ret < 0) {
@@ -2751,6 +3381,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2751 atomic_dec(&max_tr.data[cpu]->disabled); 3381 atomic_dec(&max_tr.data[cpu]->disabled);
2752 } 3382 }
2753 3383
3384 tracing_start();
2754 max_tr.entries = global_trace.entries; 3385 max_tr.entries = global_trace.entries;
2755 mutex_unlock(&trace_types_lock); 3386 mutex_unlock(&trace_types_lock);
2756 3387
@@ -2762,7 +3393,7 @@ static int mark_printk(const char *fmt, ...)
2762 int ret; 3393 int ret;
2763 va_list args; 3394 va_list args;
2764 va_start(args, fmt); 3395 va_start(args, fmt);
2765 ret = trace_vprintk(0, fmt, args); 3396 ret = trace_vprintk(0, -1, fmt, args);
2766 va_end(args); 3397 va_end(args);
2767 return ret; 3398 return ret;
2768} 3399}
@@ -2773,9 +3404,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
2773{ 3404{
2774 char *buf; 3405 char *buf;
2775 char *end; 3406 char *end;
2776 struct trace_array *tr = &global_trace;
2777 3407
2778 if (!tr->ctrl || tracing_disabled) 3408 if (tracing_disabled)
2779 return -EINVAL; 3409 return -EINVAL;
2780 3410
2781 if (cnt > TRACE_BUF_SIZE) 3411 if (cnt > TRACE_BUF_SIZE)
@@ -2841,22 +3471,38 @@ static struct file_operations tracing_mark_fops = {
2841 3471
2842#ifdef CONFIG_DYNAMIC_FTRACE 3472#ifdef CONFIG_DYNAMIC_FTRACE
2843 3473
3474int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3475{
3476 return 0;
3477}
3478
2844static ssize_t 3479static ssize_t
2845tracing_read_long(struct file *filp, char __user *ubuf, 3480tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2846 size_t cnt, loff_t *ppos) 3481 size_t cnt, loff_t *ppos)
2847{ 3482{
3483 static char ftrace_dyn_info_buffer[1024];
3484 static DEFINE_MUTEX(dyn_info_mutex);
2848 unsigned long *p = filp->private_data; 3485 unsigned long *p = filp->private_data;
2849 char buf[64]; 3486 char *buf = ftrace_dyn_info_buffer;
3487 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2850 int r; 3488 int r;
2851 3489
2852 r = sprintf(buf, "%ld\n", *p); 3490 mutex_lock(&dyn_info_mutex);
3491 r = sprintf(buf, "%ld ", *p);
2853 3492
2854 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3493 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3494 buf[r++] = '\n';
3495
3496 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3497
3498 mutex_unlock(&dyn_info_mutex);
3499
3500 return r;
2855} 3501}
2856 3502
2857static struct file_operations tracing_read_long_fops = { 3503static struct file_operations tracing_dyn_info_fops = {
2858 .open = tracing_open_generic, 3504 .open = tracing_open_generic,
2859 .read = tracing_read_long, 3505 .read = tracing_read_dyn_info,
2860}; 3506};
2861#endif 3507#endif
2862 3508
@@ -2897,10 +3543,10 @@ static __init int tracer_init_debugfs(void)
2897 if (!entry) 3543 if (!entry)
2898 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 3544 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2899 3545
2900 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, 3546 entry = debugfs_create_file("trace_options", 0644, d_tracer,
2901 NULL, &tracing_iter_fops); 3547 NULL, &tracing_iter_fops);
2902 if (!entry) 3548 if (!entry)
2903 pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); 3549 pr_warning("Could not create debugfs 'trace_options' entry\n");
2904 3550
2905 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3551 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2906 NULL, &tracing_cpumask_fops); 3552 NULL, &tracing_cpumask_fops);
@@ -2950,11 +3596,11 @@ static __init int tracer_init_debugfs(void)
2950 pr_warning("Could not create debugfs " 3596 pr_warning("Could not create debugfs "
2951 "'trace_pipe' entry\n"); 3597 "'trace_pipe' entry\n");
2952 3598
2953 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 3599 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2954 &global_trace, &tracing_entries_fops); 3600 &global_trace, &tracing_entries_fops);
2955 if (!entry) 3601 if (!entry)
2956 pr_warning("Could not create debugfs " 3602 pr_warning("Could not create debugfs "
2957 "'trace_entries' entry\n"); 3603 "'buffer_size_kb' entry\n");
2958 3604
2959 entry = debugfs_create_file("trace_marker", 0220, d_tracer, 3605 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2960 NULL, &tracing_mark_fops); 3606 NULL, &tracing_mark_fops);
@@ -2965,7 +3611,7 @@ static __init int tracer_init_debugfs(void)
2965#ifdef CONFIG_DYNAMIC_FTRACE 3611#ifdef CONFIG_DYNAMIC_FTRACE
2966 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3612 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2967 &ftrace_update_tot_cnt, 3613 &ftrace_update_tot_cnt,
2968 &tracing_read_long_fops); 3614 &tracing_dyn_info_fops);
2969 if (!entry) 3615 if (!entry)
2970 pr_warning("Could not create debugfs " 3616 pr_warning("Could not create debugfs "
2971 "'dyn_ftrace_total_info' entry\n"); 3617 "'dyn_ftrace_total_info' entry\n");
@@ -2976,7 +3622,7 @@ static __init int tracer_init_debugfs(void)
2976 return 0; 3622 return 0;
2977} 3623}
2978 3624
2979int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3625int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2980{ 3626{
2981 static DEFINE_SPINLOCK(trace_buf_lock); 3627 static DEFINE_SPINLOCK(trace_buf_lock);
2982 static char trace_buf[TRACE_BUF_SIZE]; 3628 static char trace_buf[TRACE_BUF_SIZE];
@@ -2984,11 +3630,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2984 struct ring_buffer_event *event; 3630 struct ring_buffer_event *event;
2985 struct trace_array *tr = &global_trace; 3631 struct trace_array *tr = &global_trace;
2986 struct trace_array_cpu *data; 3632 struct trace_array_cpu *data;
2987 struct print_entry *entry;
2988 unsigned long flags, irq_flags;
2989 int cpu, len = 0, size, pc; 3633 int cpu, len = 0, size, pc;
3634 struct print_entry *entry;
3635 unsigned long irq_flags;
2990 3636
2991 if (!tr->ctrl || tracing_disabled) 3637 if (tracing_disabled || tracing_selftest_running)
2992 return 0; 3638 return 0;
2993 3639
2994 pc = preempt_count(); 3640 pc = preempt_count();
@@ -2999,7 +3645,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 if (unlikely(atomic_read(&data->disabled))) 3645 if (unlikely(atomic_read(&data->disabled)))
3000 goto out; 3646 goto out;
3001 3647
3002 spin_lock_irqsave(&trace_buf_lock, flags); 3648 pause_graph_tracing();
3649 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3003 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3650 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3004 3651
3005 len = min(len, TRACE_BUF_SIZE-1); 3652 len = min(len, TRACE_BUF_SIZE-1);
@@ -3010,17 +3657,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3010 if (!event) 3657 if (!event)
3011 goto out_unlock; 3658 goto out_unlock;
3012 entry = ring_buffer_event_data(event); 3659 entry = ring_buffer_event_data(event);
3013 tracing_generic_entry_update(&entry->ent, flags, pc); 3660 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3014 entry->ent.type = TRACE_PRINT; 3661 entry->ent.type = TRACE_PRINT;
3015 entry->ip = ip; 3662 entry->ip = ip;
3663 entry->depth = depth;
3016 3664
3017 memcpy(&entry->buf, trace_buf, len); 3665 memcpy(&entry->buf, trace_buf, len);
3018 entry->buf[len] = 0; 3666 entry->buf[len] = 0;
3019 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3667 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3020 3668
3021 out_unlock: 3669 out_unlock:
3022 spin_unlock_irqrestore(&trace_buf_lock, flags); 3670 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3023 3671 unpause_graph_tracing();
3024 out: 3672 out:
3025 preempt_enable_notrace(); 3673 preempt_enable_notrace();
3026 3674
@@ -3037,7 +3685,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3037 return 0; 3685 return 0;
3038 3686
3039 va_start(ap, fmt); 3687 va_start(ap, fmt);
3040 ret = trace_vprintk(ip, fmt, ap); 3688 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3041 va_end(ap); 3689 va_end(ap);
3042 return ret; 3690 return ret;
3043} 3691}
@@ -3046,7 +3694,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
3046static int trace_panic_handler(struct notifier_block *this, 3694static int trace_panic_handler(struct notifier_block *this,
3047 unsigned long event, void *unused) 3695 unsigned long event, void *unused)
3048{ 3696{
3049 ftrace_dump(); 3697 if (ftrace_dump_on_oops)
3698 ftrace_dump();
3050 return NOTIFY_OK; 3699 return NOTIFY_OK;
3051} 3700}
3052 3701
@@ -3062,7 +3711,8 @@ static int trace_die_handler(struct notifier_block *self,
3062{ 3711{
3063 switch (val) { 3712 switch (val) {
3064 case DIE_OOPS: 3713 case DIE_OOPS:
3065 ftrace_dump(); 3714 if (ftrace_dump_on_oops)
3715 ftrace_dump();
3066 break; 3716 break;
3067 default: 3717 default:
3068 break; 3718 break;
@@ -3103,13 +3753,11 @@ trace_printk_seq(struct trace_seq *s)
3103 trace_seq_reset(s); 3753 trace_seq_reset(s);
3104} 3754}
3105 3755
3106
3107void ftrace_dump(void) 3756void ftrace_dump(void)
3108{ 3757{
3109 static DEFINE_SPINLOCK(ftrace_dump_lock); 3758 static DEFINE_SPINLOCK(ftrace_dump_lock);
3110 /* use static because iter can be a bit big for the stack */ 3759 /* use static because iter can be a bit big for the stack */
3111 static struct trace_iterator iter; 3760 static struct trace_iterator iter;
3112 static cpumask_t mask;
3113 static int dump_ran; 3761 static int dump_ran;
3114 unsigned long flags; 3762 unsigned long flags;
3115 int cnt = 0, cpu; 3763 int cnt = 0, cpu;
@@ -3128,6 +3776,9 @@ void ftrace_dump(void)
3128 atomic_inc(&global_trace.data[cpu]->disabled); 3776 atomic_inc(&global_trace.data[cpu]->disabled);
3129 } 3777 }
3130 3778
3779 /* don't look at user memory in panic mode */
3780 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3781
3131 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3782 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3132 3783
3133 iter.tr = &global_trace; 3784 iter.tr = &global_trace;
@@ -3140,8 +3791,6 @@ void ftrace_dump(void)
3140 * and then release the locks again. 3791 * and then release the locks again.
3141 */ 3792 */
3142 3793
3143 cpus_clear(mask);
3144
3145 while (!trace_empty(&iter)) { 3794 while (!trace_empty(&iter)) {
3146 3795
3147 if (!cnt) 3796 if (!cnt)
@@ -3177,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
3177{ 3826{
3178 struct trace_array_cpu *data; 3827 struct trace_array_cpu *data;
3179 int i; 3828 int i;
3829 int ret = -ENOMEM;
3180 3830
3181 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3182 tracing_buffer_mask = cpu_possible_map; 3832 goto out;
3833
3834 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3835 goto out_free_buffer_mask;
3836
3837 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3838 cpumask_copy(tracing_cpumask, cpu_all_mask);
3183 3839
3840 /* TODO: make the number of buffers hot pluggable with CPUS */
3184 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3841 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3185 TRACE_BUFFER_FLAGS); 3842 TRACE_BUFFER_FLAGS);
3186 if (!global_trace.buffer) { 3843 if (!global_trace.buffer) {
3187 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3188 WARN_ON(1); 3845 WARN_ON(1);
3189 return 0; 3846 goto out_free_cpumask;
3190 } 3847 }
3191 global_trace.entries = ring_buffer_size(global_trace.buffer); 3848 global_trace.entries = ring_buffer_size(global_trace.buffer);
3192 3849
3850
3193#ifdef CONFIG_TRACER_MAX_TRACE 3851#ifdef CONFIG_TRACER_MAX_TRACE
3194 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3852 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3195 TRACE_BUFFER_FLAGS); 3853 TRACE_BUFFER_FLAGS);
@@ -3197,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
3197 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3198 WARN_ON(1); 3856 WARN_ON(1);
3199 ring_buffer_free(global_trace.buffer); 3857 ring_buffer_free(global_trace.buffer);
3200 return 0; 3858 goto out_free_cpumask;
3201 } 3859 }
3202 max_tr.entries = ring_buffer_size(max_tr.buffer); 3860 max_tr.entries = ring_buffer_size(max_tr.buffer);
3203 WARN_ON(max_tr.entries != global_trace.entries); 3861 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3221,15 +3879,20 @@ __init static int tracer_alloc_buffers(void)
3221#endif 3879#endif
3222 3880
3223 /* All seems OK, enable tracing */ 3881 /* All seems OK, enable tracing */
3224 global_trace.ctrl = tracer_enabled;
3225 tracing_disabled = 0; 3882 tracing_disabled = 0;
3226 3883
3227 atomic_notifier_chain_register(&panic_notifier_list, 3884 atomic_notifier_chain_register(&panic_notifier_list,
3228 &trace_panic_notifier); 3885 &trace_panic_notifier);
3229 3886
3230 register_die_notifier(&trace_die_notifier); 3887 register_die_notifier(&trace_die_notifier);
3888 ret = 0;
3231 3889
3232 return 0; 3890out_free_cpumask:
3891 free_cpumask_var(tracing_cpumask);
3892out_free_buffer_mask:
3893 free_cpumask_var(tracing_buffer_mask);
3894out:
3895 return ret;
3233} 3896}
3234early_initcall(tracer_alloc_buffers); 3897early_initcall(tracer_alloc_buffers);
3235fs_initcall(tracer_init_debugfs); 3898fs_initcall(tracer_init_debugfs);