aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c612
1 files changed, 486 insertions, 126 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 697eda36b86a..4ee6f0375222 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,6 +43,29 @@
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
45 45
46/* For tracers that don't implement custom flags */
47static struct tracer_opt dummy_tracer_opt[] = {
48 { }
49};
50
51static struct tracer_flags dummy_tracer_flags = {
52 .val = 0,
53 .opts = dummy_tracer_opt
54};
55
56static int dummy_set_flag(u32 old_flags, u32 bit, int set)
57{
58 return 0;
59}
60
61/*
62 * Kill all tracing for good (never come back).
63 * It is initialized to 1 but will turn to zero if the initialization
64 * of the tracer is successful. But that is the only place that sets
65 * this back to zero.
66 */
67int tracing_disabled = 1;
68
46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 69static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 70
48static inline void ftrace_disable_cpu(void) 71static inline void ftrace_disable_cpu(void)
@@ -62,7 +85,36 @@ static cpumask_t __read_mostly tracing_buffer_mask;
62#define for_each_tracing_cpu(cpu) \ 85#define for_each_tracing_cpu(cpu) \
63 for_each_cpu_mask(cpu, tracing_buffer_mask) 86 for_each_cpu_mask(cpu, tracing_buffer_mask)
64 87
65static int tracing_disabled = 1; 88/*
89 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
90 *
91 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
92 * is set, then ftrace_dump is called. This will output the contents
93 * of the ftrace buffers to the console. This is very useful for
94 * capturing traces that lead to crashes and outputing it to a
95 * serial console.
96 *
97 * It is default off, but you can enable it with either specifying
98 * "ftrace_dump_on_oops" in the kernel command line, or setting
99 * /proc/sys/kernel/ftrace_dump_on_oops to true.
100 */
101int ftrace_dump_on_oops;
102
103static int tracing_set_tracer(char *buf);
104
105static int __init set_ftrace(char *str)
106{
107 tracing_set_tracer(str);
108 return 1;
109}
110__setup("ftrace", set_ftrace);
111
112static int __init set_ftrace_dump_on_oops(char *str)
113{
114 ftrace_dump_on_oops = 1;
115 return 1;
116}
117__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
66 118
67long 119long
68ns2usecs(cycle_t nsec) 120ns2usecs(cycle_t nsec)
@@ -112,6 +164,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
112/* tracer_enabled is used to toggle activation of a tracer */ 164/* tracer_enabled is used to toggle activation of a tracer */
113static int tracer_enabled = 1; 165static int tracer_enabled = 1;
114 166
167/**
168 * tracing_is_enabled - return tracer_enabled status
169 *
170 * This function is used by other tracers to know the status
171 * of the tracer_enabled flag. Tracers may use this function
172 * to know if it should enable their features when starting
173 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
174 */
175int tracing_is_enabled(void)
176{
177 return tracer_enabled;
178}
179
115/* function tracing enabled */ 180/* function tracing enabled */
116int ftrace_function_enabled; 181int ftrace_function_enabled;
117 182
@@ -153,8 +218,9 @@ static DEFINE_MUTEX(trace_types_lock);
153/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 218/* trace_wait is a waitqueue for tasks blocked on trace_poll */
154static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 219static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 220
156/* trace_flags holds iter_ctrl options */ 221/* trace_flags holds trace_options default values */
157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 222unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
223 TRACE_ITER_ANNOTATE;
158 224
159/** 225/**
160 * trace_wake_up - wake up tasks waiting for trace input 226 * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +259,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
193 return nsecs / 1000; 259 return nsecs / 1000;
194} 260}
195 261
196/*
197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198 * control the output of kernel symbols.
199 */
200#define TRACE_ITER_SYM_MASK \
201 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202
203/* These must match the bit postions in trace_iterator_flags */ 262/* These must match the bit postions in trace_iterator_flags */
204static const char *trace_options[] = { 263static const char *trace_options[] = {
205 "print-parent", 264 "print-parent",
@@ -213,6 +272,9 @@ static const char *trace_options[] = {
213 "stacktrace", 272 "stacktrace",
214 "sched-tree", 273 "sched-tree",
215 "ftrace_printk", 274 "ftrace_printk",
275 "ftrace_preempt",
276 "branch",
277 "annotate",
216 NULL 278 NULL
217}; 279};
218 280
@@ -470,7 +532,15 @@ int register_tracer(struct tracer *type)
470 return -1; 532 return -1;
471 } 533 }
472 534
535 /*
536 * When this gets called we hold the BKL which means that
537 * preemption is disabled. Various trace selftests however
538 * need to disable and enable preemption for successful tests.
539 * So we drop the BKL here and grab it after the tests again.
540 */
541 unlock_kernel();
473 mutex_lock(&trace_types_lock); 542 mutex_lock(&trace_types_lock);
543
474 for (t = trace_types; t; t = t->next) { 544 for (t = trace_types; t; t = t->next) {
475 if (strcmp(type->name, t->name) == 0) { 545 if (strcmp(type->name, t->name) == 0) {
476 /* already found */ 546 /* already found */
@@ -481,11 +551,18 @@ int register_tracer(struct tracer *type)
481 } 551 }
482 } 552 }
483 553
554 if (!type->set_flag)
555 type->set_flag = &dummy_set_flag;
556 if (!type->flags)
557 type->flags = &dummy_tracer_flags;
558 else
559 if (!type->flags->opts)
560 type->flags->opts = dummy_tracer_opt;
561
484#ifdef CONFIG_FTRACE_STARTUP_TEST 562#ifdef CONFIG_FTRACE_STARTUP_TEST
485 if (type->selftest) { 563 if (type->selftest) {
486 struct tracer *saved_tracer = current_trace; 564 struct tracer *saved_tracer = current_trace;
487 struct trace_array *tr = &global_trace; 565 struct trace_array *tr = &global_trace;
488 int saved_ctrl = tr->ctrl;
489 int i; 566 int i;
490 /* 567 /*
491 * Run a selftest on this tracer. 568 * Run a selftest on this tracer.
@@ -494,25 +571,23 @@ int register_tracer(struct tracer *type)
494 * internal tracing to verify that everything is in order. 571 * internal tracing to verify that everything is in order.
495 * If we fail, we do not register this tracer. 572 * If we fail, we do not register this tracer.
496 */ 573 */
497 for_each_tracing_cpu(i) { 574 for_each_tracing_cpu(i)
498 tracing_reset(tr, i); 575 tracing_reset(tr, i);
499 } 576
500 current_trace = type; 577 current_trace = type;
501 tr->ctrl = 0;
502 /* the test is responsible for initializing and enabling */ 578 /* the test is responsible for initializing and enabling */
503 pr_info("Testing tracer %s: ", type->name); 579 pr_info("Testing tracer %s: ", type->name);
504 ret = type->selftest(type, tr); 580 ret = type->selftest(type, tr);
505 /* the test is responsible for resetting too */ 581 /* the test is responsible for resetting too */
506 current_trace = saved_tracer; 582 current_trace = saved_tracer;
507 tr->ctrl = saved_ctrl;
508 if (ret) { 583 if (ret) {
509 printk(KERN_CONT "FAILED!\n"); 584 printk(KERN_CONT "FAILED!\n");
510 goto out; 585 goto out;
511 } 586 }
512 /* Only reset on passing, to avoid touching corrupted buffers */ 587 /* Only reset on passing, to avoid touching corrupted buffers */
513 for_each_tracing_cpu(i) { 588 for_each_tracing_cpu(i)
514 tracing_reset(tr, i); 589 tracing_reset(tr, i);
515 } 590
516 printk(KERN_CONT "PASSED\n"); 591 printk(KERN_CONT "PASSED\n");
517 } 592 }
518#endif 593#endif
@@ -525,6 +600,7 @@ int register_tracer(struct tracer *type)
525 600
526 out: 601 out:
527 mutex_unlock(&trace_types_lock); 602 mutex_unlock(&trace_types_lock);
603 lock_kernel();
528 604
529 return ret; 605 return ret;
530} 606}
@@ -581,6 +657,76 @@ static void trace_init_cmdlines(void)
581 cmdline_idx = 0; 657 cmdline_idx = 0;
582} 658}
583 659
660static int trace_stop_count;
661static DEFINE_SPINLOCK(tracing_start_lock);
662
663/**
664 * tracing_start - quick start of the tracer
665 *
666 * If tracing is enabled but was stopped by tracing_stop,
667 * this will start the tracer back up.
668 */
669void tracing_start(void)
670{
671 struct ring_buffer *buffer;
672 unsigned long flags;
673
674 if (tracing_disabled)
675 return;
676
677 spin_lock_irqsave(&tracing_start_lock, flags);
678 if (--trace_stop_count)
679 goto out;
680
681 if (trace_stop_count < 0) {
682 /* Someone screwed up their debugging */
683 WARN_ON_ONCE(1);
684 trace_stop_count = 0;
685 goto out;
686 }
687
688
689 buffer = global_trace.buffer;
690 if (buffer)
691 ring_buffer_record_enable(buffer);
692
693 buffer = max_tr.buffer;
694 if (buffer)
695 ring_buffer_record_enable(buffer);
696
697 ftrace_start();
698 out:
699 spin_unlock_irqrestore(&tracing_start_lock, flags);
700}
701
702/**
703 * tracing_stop - quick stop of the tracer
704 *
705 * Light weight way to stop tracing. Use in conjunction with
706 * tracing_start.
707 */
708void tracing_stop(void)
709{
710 struct ring_buffer *buffer;
711 unsigned long flags;
712
713 ftrace_stop();
714 spin_lock_irqsave(&tracing_start_lock, flags);
715 if (trace_stop_count++)
716 goto out;
717
718 buffer = global_trace.buffer;
719 if (buffer)
720 ring_buffer_record_disable(buffer);
721
722 buffer = max_tr.buffer;
723 if (buffer)
724 ring_buffer_record_disable(buffer);
725
726 out:
727 spin_unlock_irqrestore(&tracing_start_lock, flags);
728}
729
584void trace_stop_cmdline_recording(void); 730void trace_stop_cmdline_recording(void);
585 731
586static void trace_save_cmdline(struct task_struct *tsk) 732static void trace_save_cmdline(struct task_struct *tsk)
@@ -691,6 +837,36 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 837 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692} 838}
693 839
840#ifdef CONFIG_FUNCTION_RET_TRACER
841static void __trace_function_return(struct trace_array *tr,
842 struct trace_array_cpu *data,
843 struct ftrace_retfunc *trace,
844 unsigned long flags,
845 int pc)
846{
847 struct ring_buffer_event *event;
848 struct ftrace_ret_entry *entry;
849 unsigned long irq_flags;
850
851 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
852 return;
853
854 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
855 &irq_flags);
856 if (!event)
857 return;
858 entry = ring_buffer_event_data(event);
859 tracing_generic_entry_update(&entry->ent, flags, pc);
860 entry->ent.type = TRACE_FN_RET;
861 entry->ip = trace->func;
862 entry->parent_ip = trace->ret;
863 entry->rettime = trace->rettime;
864 entry->calltime = trace->calltime;
865 entry->overrun = trace->overrun;
866 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
867}
868#endif
869
694void 870void
695ftrace(struct trace_array *tr, struct trace_array_cpu *data, 871ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696 unsigned long ip, unsigned long parent_ip, unsigned long flags, 872 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -841,26 +1017,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841{ 1017{
842 struct trace_array *tr = &global_trace; 1018 struct trace_array *tr = &global_trace;
843 struct trace_array_cpu *data; 1019 struct trace_array_cpu *data;
1020 unsigned long flags;
844 int cpu; 1021 int cpu;
845 int pc; 1022 int pc;
846 1023
847 if (tracing_disabled || !tr->ctrl) 1024 if (tracing_disabled)
848 return; 1025 return;
849 1026
850 pc = preempt_count(); 1027 pc = preempt_count();
851 preempt_disable_notrace(); 1028 local_irq_save(flags);
852 cpu = raw_smp_processor_id(); 1029 cpu = raw_smp_processor_id();
853 data = tr->data[cpu]; 1030 data = tr->data[cpu];
854 1031
855 if (likely(!atomic_read(&data->disabled))) 1032 if (likely(atomic_inc_return(&data->disabled) == 1))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1033 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 1034
858 preempt_enable_notrace(); 1035 atomic_dec(&data->disabled);
1036 local_irq_restore(flags);
859} 1037}
860 1038
861#ifdef CONFIG_FUNCTION_TRACER 1039#ifdef CONFIG_FUNCTION_TRACER
862static void 1040static void
863function_trace_call(unsigned long ip, unsigned long parent_ip) 1041function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
864{ 1042{
865 struct trace_array *tr = &global_trace; 1043 struct trace_array *tr = &global_trace;
866 struct trace_array_cpu *data; 1044 struct trace_array_cpu *data;
@@ -873,8 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
873 return; 1051 return;
874 1052
875 pc = preempt_count(); 1053 pc = preempt_count();
876 resched = need_resched(); 1054 resched = ftrace_preempt_disable();
877 preempt_disable_notrace();
878 local_save_flags(flags); 1055 local_save_flags(flags);
879 cpu = raw_smp_processor_id(); 1056 cpu = raw_smp_processor_id();
880 data = tr->data[cpu]; 1057 data = tr->data[cpu];
@@ -884,12 +1061,63 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
884 trace_function(tr, data, ip, parent_ip, flags, pc); 1061 trace_function(tr, data, ip, parent_ip, flags, pc);
885 1062
886 atomic_dec(&data->disabled); 1063 atomic_dec(&data->disabled);
887 if (resched) 1064 ftrace_preempt_enable(resched);
888 preempt_enable_no_resched_notrace(); 1065}
889 else 1066
890 preempt_enable_notrace(); 1067static void
1068function_trace_call(unsigned long ip, unsigned long parent_ip)
1069{
1070 struct trace_array *tr = &global_trace;
1071 struct trace_array_cpu *data;
1072 unsigned long flags;
1073 long disabled;
1074 int cpu;
1075 int pc;
1076
1077 if (unlikely(!ftrace_function_enabled))
1078 return;
1079
1080 /*
1081 * Need to use raw, since this must be called before the
1082 * recursive protection is performed.
1083 */
1084 local_irq_save(flags);
1085 cpu = raw_smp_processor_id();
1086 data = tr->data[cpu];
1087 disabled = atomic_inc_return(&data->disabled);
1088
1089 if (likely(disabled == 1)) {
1090 pc = preempt_count();
1091 trace_function(tr, data, ip, parent_ip, flags, pc);
1092 }
1093
1094 atomic_dec(&data->disabled);
1095 local_irq_restore(flags);
891} 1096}
892 1097
1098#ifdef CONFIG_FUNCTION_RET_TRACER
1099void trace_function_return(struct ftrace_retfunc *trace)
1100{
1101 struct trace_array *tr = &global_trace;
1102 struct trace_array_cpu *data;
1103 unsigned long flags;
1104 long disabled;
1105 int cpu;
1106 int pc;
1107
1108 raw_local_irq_save(flags);
1109 cpu = raw_smp_processor_id();
1110 data = tr->data[cpu];
1111 disabled = atomic_inc_return(&data->disabled);
1112 if (likely(disabled == 1)) {
1113 pc = preempt_count();
1114 __trace_function_return(tr, data, trace, flags, pc);
1115 }
1116 atomic_dec(&data->disabled);
1117 raw_local_irq_restore(flags);
1118}
1119#endif /* CONFIG_FUNCTION_RET_TRACER */
1120
893static struct ftrace_ops trace_ops __read_mostly = 1121static struct ftrace_ops trace_ops __read_mostly =
894{ 1122{
895 .func = function_trace_call, 1123 .func = function_trace_call,
@@ -898,9 +1126,14 @@ static struct ftrace_ops trace_ops __read_mostly =
898void tracing_start_function_trace(void) 1126void tracing_start_function_trace(void)
899{ 1127{
900 ftrace_function_enabled = 0; 1128 ftrace_function_enabled = 0;
1129
1130 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1131 trace_ops.func = function_trace_call_preempt_only;
1132 else
1133 trace_ops.func = function_trace_call;
1134
901 register_ftrace_function(&trace_ops); 1135 register_ftrace_function(&trace_ops);
902 if (tracer_enabled) 1136 ftrace_function_enabled = 1;
903 ftrace_function_enabled = 1;
904} 1137}
905 1138
906void tracing_stop_function_trace(void) 1139void tracing_stop_function_trace(void)
@@ -912,6 +1145,7 @@ void tracing_stop_function_trace(void)
912 1145
913enum trace_file_type { 1146enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 1147 TRACE_FILE_LAT_FMT = 1,
1148 TRACE_FILE_ANNOTATE = 2,
915}; 1149};
916 1150
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1151static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
@@ -1047,10 +1281,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1047 1281
1048 atomic_inc(&trace_record_cmdline_disabled); 1282 atomic_inc(&trace_record_cmdline_disabled);
1049 1283
1050 /* let the tracer grab locks here if needed */
1051 if (current_trace->start)
1052 current_trace->start(iter);
1053
1054 if (*pos != iter->pos) { 1284 if (*pos != iter->pos) {
1055 iter->ent = NULL; 1285 iter->ent = NULL;
1056 iter->cpu = 0; 1286 iter->cpu = 0;
@@ -1077,14 +1307,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1077 1307
1078static void s_stop(struct seq_file *m, void *p) 1308static void s_stop(struct seq_file *m, void *p)
1079{ 1309{
1080 struct trace_iterator *iter = m->private;
1081
1082 atomic_dec(&trace_record_cmdline_disabled); 1310 atomic_dec(&trace_record_cmdline_disabled);
1083
1084 /* let the tracer release locks here if needed */
1085 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 iter->trace->stop(iter);
1087
1088 mutex_unlock(&trace_types_lock); 1311 mutex_unlock(&trace_types_lock);
1089} 1312}
1090 1313
@@ -1143,7 +1366,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1143# define IP_FMT "%016lx" 1366# define IP_FMT "%016lx"
1144#endif 1367#endif
1145 1368
1146static int 1369int
1147seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 1370seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148{ 1371{
1149 int ret; 1372 int ret;
@@ -1338,6 +1561,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1338 trace_seq_putc(s, '\n'); 1561 trace_seq_putc(s, '\n');
1339} 1562}
1340 1563
1564static void test_cpu_buff_start(struct trace_iterator *iter)
1565{
1566 struct trace_seq *s = &iter->seq;
1567
1568 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1569 return;
1570
1571 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1572 return;
1573
1574 if (cpu_isset(iter->cpu, iter->started))
1575 return;
1576
1577 cpu_set(iter->cpu, iter->started);
1578 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1579}
1580
1341static enum print_line_t 1581static enum print_line_t
1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1582print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343{ 1583{
@@ -1357,6 +1597,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1357 if (entry->type == TRACE_CONT) 1597 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED; 1598 return TRACE_TYPE_HANDLED;
1359 1599
1600 test_cpu_buff_start(iter);
1601
1360 next_entry = find_next_entry(iter, NULL, &next_ts); 1602 next_entry = find_next_entry(iter, NULL, &next_ts);
1361 if (!next_entry) 1603 if (!next_entry)
1362 next_ts = iter->ts; 1604 next_ts = iter->ts;
@@ -1448,6 +1690,18 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1448 trace_seq_print_cont(s, iter); 1690 trace_seq_print_cont(s, iter);
1449 break; 1691 break;
1450 } 1692 }
1693 case TRACE_BRANCH: {
1694 struct trace_branch *field;
1695
1696 trace_assign_type(field, entry);
1697
1698 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1699 field->correct ? " ok " : " MISS ",
1700 field->func,
1701 field->file,
1702 field->line);
1703 break;
1704 }
1451 default: 1705 default:
1452 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1706 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 } 1707 }
@@ -1472,6 +1726,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1472 if (entry->type == TRACE_CONT) 1726 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED; 1727 return TRACE_TYPE_HANDLED;
1474 1728
1729 test_cpu_buff_start(iter);
1730
1475 comm = trace_find_cmdline(iter->ent->pid); 1731 comm = trace_find_cmdline(iter->ent->pid);
1476 1732
1477 t = ns2usecs(iter->ts); 1733 t = ns2usecs(iter->ts);
@@ -1581,6 +1837,22 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1581 trace_seq_print_cont(s, iter); 1837 trace_seq_print_cont(s, iter);
1582 break; 1838 break;
1583 } 1839 }
1840 case TRACE_FN_RET: {
1841 return print_return_function(iter);
1842 break;
1843 }
1844 case TRACE_BRANCH: {
1845 struct trace_branch *field;
1846
1847 trace_assign_type(field, entry);
1848
1849 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1850 field->correct ? " ok " : " MISS ",
1851 field->func,
1852 field->file,
1853 field->line);
1854 break;
1855 }
1584 } 1856 }
1585 return TRACE_TYPE_HANDLED; 1857 return TRACE_TYPE_HANDLED;
1586} 1858}
@@ -1899,6 +2171,11 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1899 iter->trace = current_trace; 2171 iter->trace = current_trace;
1900 iter->pos = -1; 2172 iter->pos = -1;
1901 2173
2174 /* Annotate start of buffers if we had overruns */
2175 if (ring_buffer_overruns(iter->tr->buffer))
2176 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2177
2178
1902 for_each_tracing_cpu(cpu) { 2179 for_each_tracing_cpu(cpu) {
1903 2180
1904 iter->buffer_iter[cpu] = 2181 iter->buffer_iter[cpu] =
@@ -1917,10 +2194,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1917 m->private = iter; 2194 m->private = iter;
1918 2195
1919 /* stop the trace while dumping */ 2196 /* stop the trace while dumping */
1920 if (iter->tr->ctrl) { 2197 tracing_stop();
1921 tracer_enabled = 0;
1922 ftrace_function_enabled = 0;
1923 }
1924 2198
1925 if (iter->trace && iter->trace->open) 2199 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter); 2200 iter->trace->open(iter);
@@ -1936,6 +2210,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1936 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2210 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1937 } 2211 }
1938 mutex_unlock(&trace_types_lock); 2212 mutex_unlock(&trace_types_lock);
2213 kfree(iter);
1939 2214
1940 return ERR_PTR(-ENOMEM); 2215 return ERR_PTR(-ENOMEM);
1941} 2216}
@@ -1965,14 +2240,7 @@ int tracing_release(struct inode *inode, struct file *file)
1965 iter->trace->close(iter); 2240 iter->trace->close(iter);
1966 2241
1967 /* reenable tracing if it was previously enabled */ 2242 /* reenable tracing if it was previously enabled */
1968 if (iter->tr->ctrl) { 2243 tracing_start();
1969 tracer_enabled = 1;
1970 /*
1971 * It is safe to enable function tracing even if it
1972 * isn't used
1973 */
1974 ftrace_function_enabled = 1;
1975 }
1976 mutex_unlock(&trace_types_lock); 2244 mutex_unlock(&trace_types_lock);
1977 2245
1978 seq_release(inode, file); 2246 seq_release(inode, file);
@@ -2188,13 +2456,16 @@ static struct file_operations tracing_cpumask_fops = {
2188}; 2456};
2189 2457
2190static ssize_t 2458static ssize_t
2191tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, 2459tracing_trace_options_read(struct file *filp, char __user *ubuf,
2192 size_t cnt, loff_t *ppos) 2460 size_t cnt, loff_t *ppos)
2193{ 2461{
2462 int i;
2194 char *buf; 2463 char *buf;
2195 int r = 0; 2464 int r = 0;
2196 int len = 0; 2465 int len = 0;
2197 int i; 2466 u32 tracer_flags = current_trace->flags->val;
2467 struct tracer_opt *trace_opts = current_trace->flags->opts;
2468
2198 2469
2199 /* calulate max size */ 2470 /* calulate max size */
2200 for (i = 0; trace_options[i]; i++) { 2471 for (i = 0; trace_options[i]; i++) {
@@ -2202,6 +2473,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2202 len += 3; /* "no" and space */ 2473 len += 3; /* "no" and space */
2203 } 2474 }
2204 2475
2476 /*
2477 * Increase the size with names of options specific
2478 * of the current tracer.
2479 */
2480 for (i = 0; trace_opts[i].name; i++) {
2481 len += strlen(trace_opts[i].name);
2482 len += 3; /* "no" and space */
2483 }
2484
2205 /* +2 for \n and \0 */ 2485 /* +2 for \n and \0 */
2206 buf = kmalloc(len + 2, GFP_KERNEL); 2486 buf = kmalloc(len + 2, GFP_KERNEL);
2207 if (!buf) 2487 if (!buf)
@@ -2214,6 +2494,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2214 r += sprintf(buf + r, "no%s ", trace_options[i]); 2494 r += sprintf(buf + r, "no%s ", trace_options[i]);
2215 } 2495 }
2216 2496
2497 for (i = 0; trace_opts[i].name; i++) {
2498 if (tracer_flags & trace_opts[i].bit)
2499 r += sprintf(buf + r, "%s ",
2500 trace_opts[i].name);
2501 else
2502 r += sprintf(buf + r, "no%s ",
2503 trace_opts[i].name);
2504 }
2505
2217 r += sprintf(buf + r, "\n"); 2506 r += sprintf(buf + r, "\n");
2218 WARN_ON(r >= len + 2); 2507 WARN_ON(r >= len + 2);
2219 2508
@@ -2224,13 +2513,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2224 return r; 2513 return r;
2225} 2514}
2226 2515
2516/* Try to assign a tracer specific option */
2517static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2518{
2519 struct tracer_flags *trace_flags = trace->flags;
2520 struct tracer_opt *opts = NULL;
2521 int ret = 0, i = 0;
2522 int len;
2523
2524 for (i = 0; trace_flags->opts[i].name; i++) {
2525 opts = &trace_flags->opts[i];
2526 len = strlen(opts->name);
2527
2528 if (strncmp(cmp, opts->name, len) == 0) {
2529 ret = trace->set_flag(trace_flags->val,
2530 opts->bit, !neg);
2531 break;
2532 }
2533 }
2534 /* Not found */
2535 if (!trace_flags->opts[i].name)
2536 return -EINVAL;
2537
2538 /* Refused to handle */
2539 if (ret)
2540 return ret;
2541
2542 if (neg)
2543 trace_flags->val &= ~opts->bit;
2544 else
2545 trace_flags->val |= opts->bit;
2546
2547 return 0;
2548}
2549
2227static ssize_t 2550static ssize_t
2228tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, 2551tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2229 size_t cnt, loff_t *ppos) 2552 size_t cnt, loff_t *ppos)
2230{ 2553{
2231 char buf[64]; 2554 char buf[64];
2232 char *cmp = buf; 2555 char *cmp = buf;
2233 int neg = 0; 2556 int neg = 0;
2557 int ret;
2234 int i; 2558 int i;
2235 2559
2236 if (cnt >= sizeof(buf)) 2560 if (cnt >= sizeof(buf))
@@ -2257,11 +2581,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2257 break; 2581 break;
2258 } 2582 }
2259 } 2583 }
2260 /* 2584
2261 * If no option could be set, return an error: 2585 /* If no option could be set, test the specific tracer options */
2262 */ 2586 if (!trace_options[i]) {
2263 if (!trace_options[i]) 2587 ret = set_tracer_option(current_trace, cmp, neg);
2264 return -EINVAL; 2588 if (ret)
2589 return ret;
2590 }
2265 2591
2266 filp->f_pos += cnt; 2592 filp->f_pos += cnt;
2267 2593
@@ -2270,8 +2596,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2270 2596
2271static struct file_operations tracing_iter_fops = { 2597static struct file_operations tracing_iter_fops = {
2272 .open = tracing_open_generic, 2598 .open = tracing_open_generic,
2273 .read = tracing_iter_ctrl_read, 2599 .read = tracing_trace_options_read,
2274 .write = tracing_iter_ctrl_write, 2600 .write = tracing_trace_options_write,
2275}; 2601};
2276 2602
2277static const char readme_msg[] = 2603static const char readme_msg[] =
@@ -2285,9 +2611,9 @@ static const char readme_msg[] =
2285 "# echo sched_switch > /debug/tracing/current_tracer\n" 2611 "# echo sched_switch > /debug/tracing/current_tracer\n"
2286 "# cat /debug/tracing/current_tracer\n" 2612 "# cat /debug/tracing/current_tracer\n"
2287 "sched_switch\n" 2613 "sched_switch\n"
2288 "# cat /debug/tracing/iter_ctrl\n" 2614 "# cat /debug/tracing/trace_options\n"
2289 "noprint-parent nosym-offset nosym-addr noverbose\n" 2615 "noprint-parent nosym-offset nosym-addr noverbose\n"
2290 "# echo print-parent > /debug/tracing/iter_ctrl\n" 2616 "# echo print-parent > /debug/tracing/trace_options\n"
2291 "# echo 1 > /debug/tracing/tracing_enabled\n" 2617 "# echo 1 > /debug/tracing/tracing_enabled\n"
2292 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2618 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2293 "echo 0 > /debug/tracing/tracing_enabled\n" 2619 "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2310,11 +2636,10 @@ static ssize_t
2310tracing_ctrl_read(struct file *filp, char __user *ubuf, 2636tracing_ctrl_read(struct file *filp, char __user *ubuf,
2311 size_t cnt, loff_t *ppos) 2637 size_t cnt, loff_t *ppos)
2312{ 2638{
2313 struct trace_array *tr = filp->private_data;
2314 char buf[64]; 2639 char buf[64];
2315 int r; 2640 int r;
2316 2641
2317 r = sprintf(buf, "%ld\n", tr->ctrl); 2642 r = sprintf(buf, "%u\n", tracer_enabled);
2318 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2643 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2319} 2644}
2320 2645
@@ -2342,16 +2667,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2342 val = !!val; 2667 val = !!val;
2343 2668
2344 mutex_lock(&trace_types_lock); 2669 mutex_lock(&trace_types_lock);
2345 if (tr->ctrl ^ val) { 2670 if (tracer_enabled ^ val) {
2346 if (val) 2671 if (val) {
2347 tracer_enabled = 1; 2672 tracer_enabled = 1;
2348 else 2673 if (current_trace->start)
2674 current_trace->start(tr);
2675 tracing_start();
2676 } else {
2349 tracer_enabled = 0; 2677 tracer_enabled = 0;
2350 2678 tracing_stop();
2351 tr->ctrl = val; 2679 if (current_trace->stop)
2352 2680 current_trace->stop(tr);
2353 if (current_trace && current_trace->ctrl_update) 2681 }
2354 current_trace->ctrl_update(tr);
2355 } 2682 }
2356 mutex_unlock(&trace_types_lock); 2683 mutex_unlock(&trace_types_lock);
2357 2684
@@ -2377,29 +2704,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2377 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2704 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2378} 2705}
2379 2706
2380static ssize_t 2707static int tracing_set_tracer(char *buf)
2381tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2382 size_t cnt, loff_t *ppos)
2383{ 2708{
2384 struct trace_array *tr = &global_trace; 2709 struct trace_array *tr = &global_trace;
2385 struct tracer *t; 2710 struct tracer *t;
2386 char buf[max_tracer_type_len+1]; 2711 int ret = 0;
2387 int i;
2388 size_t ret;
2389
2390 ret = cnt;
2391
2392 if (cnt > max_tracer_type_len)
2393 cnt = max_tracer_type_len;
2394
2395 if (copy_from_user(&buf, ubuf, cnt))
2396 return -EFAULT;
2397
2398 buf[cnt] = 0;
2399
2400 /* strip ending whitespace. */
2401 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2402 buf[i] = 0;
2403 2712
2404 mutex_lock(&trace_types_lock); 2713 mutex_lock(&trace_types_lock);
2405 for (t = trace_types; t; t = t->next) { 2714 for (t = trace_types; t; t = t->next) {
@@ -2413,18 +2722,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2413 if (t == current_trace) 2722 if (t == current_trace)
2414 goto out; 2723 goto out;
2415 2724
2725 trace_branch_disable();
2416 if (current_trace && current_trace->reset) 2726 if (current_trace && current_trace->reset)
2417 current_trace->reset(tr); 2727 current_trace->reset(tr);
2418 2728
2419 current_trace = t; 2729 current_trace = t;
2420 if (t->init) 2730 if (t->init) {
2421 t->init(tr); 2731 ret = t->init(tr);
2732 if (ret)
2733 goto out;
2734 }
2422 2735
2736 trace_branch_enable(tr);
2423 out: 2737 out:
2424 mutex_unlock(&trace_types_lock); 2738 mutex_unlock(&trace_types_lock);
2425 2739
2426 if (ret > 0) 2740 return ret;
2427 filp->f_pos += ret; 2741}
2742
2743static ssize_t
2744tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2745 size_t cnt, loff_t *ppos)
2746{
2747 char buf[max_tracer_type_len+1];
2748 int i;
2749 size_t ret;
2750 int err;
2751
2752 ret = cnt;
2753
2754 if (cnt > max_tracer_type_len)
2755 cnt = max_tracer_type_len;
2756
2757 if (copy_from_user(&buf, ubuf, cnt))
2758 return -EFAULT;
2759
2760 buf[cnt] = 0;
2761
2762 /* strip ending whitespace. */
2763 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2764 buf[i] = 0;
2765
2766 err = tracing_set_tracer(buf);
2767 if (err)
2768 return err;
2769
2770 filp->f_pos += ret;
2428 2771
2429 return ret; 2772 return ret;
2430} 2773}
@@ -2491,6 +2834,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2491 return -ENOMEM; 2834 return -ENOMEM;
2492 2835
2493 mutex_lock(&trace_types_lock); 2836 mutex_lock(&trace_types_lock);
2837
2838 /* trace pipe does not show start of buffer */
2839 cpus_setall(iter->started);
2840
2494 iter->tr = &global_trace; 2841 iter->tr = &global_trace;
2495 iter->trace = current_trace; 2842 iter->trace = current_trace;
2496 filp->private_data = iter; 2843 filp->private_data = iter;
@@ -2666,7 +3013,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2666 char buf[64]; 3013 char buf[64];
2667 int r; 3014 int r;
2668 3015
2669 r = sprintf(buf, "%lu\n", tr->entries); 3016 r = sprintf(buf, "%lu\n", tr->entries >> 10);
2670 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3017 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2671} 3018}
2672 3019
@@ -2677,7 +3024,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2677 unsigned long val; 3024 unsigned long val;
2678 char buf[64]; 3025 char buf[64];
2679 int ret, cpu; 3026 int ret, cpu;
2680 struct trace_array *tr = filp->private_data;
2681 3027
2682 if (cnt >= sizeof(buf)) 3028 if (cnt >= sizeof(buf))
2683 return -EINVAL; 3029 return -EINVAL;
@@ -2697,12 +3043,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2697 3043
2698 mutex_lock(&trace_types_lock); 3044 mutex_lock(&trace_types_lock);
2699 3045
2700 if (tr->ctrl) { 3046 tracing_stop();
2701 cnt = -EBUSY;
2702 pr_info("ftrace: please disable tracing"
2703 " before modifying buffer size\n");
2704 goto out;
2705 }
2706 3047
2707 /* disable all cpu buffers */ 3048 /* disable all cpu buffers */
2708 for_each_tracing_cpu(cpu) { 3049 for_each_tracing_cpu(cpu) {
@@ -2712,6 +3053,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2712 atomic_inc(&max_tr.data[cpu]->disabled); 3053 atomic_inc(&max_tr.data[cpu]->disabled);
2713 } 3054 }
2714 3055
3056 /* value is in KB */
3057 val <<= 10;
3058
2715 if (val != global_trace.entries) { 3059 if (val != global_trace.entries) {
2716 ret = ring_buffer_resize(global_trace.buffer, val); 3060 ret = ring_buffer_resize(global_trace.buffer, val);
2717 if (ret < 0) { 3061 if (ret < 0) {
@@ -2750,6 +3094,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2750 atomic_dec(&max_tr.data[cpu]->disabled); 3094 atomic_dec(&max_tr.data[cpu]->disabled);
2751 } 3095 }
2752 3096
3097 tracing_start();
2753 max_tr.entries = global_trace.entries; 3098 max_tr.entries = global_trace.entries;
2754 mutex_unlock(&trace_types_lock); 3099 mutex_unlock(&trace_types_lock);
2755 3100
@@ -2772,9 +3117,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
2772{ 3117{
2773 char *buf; 3118 char *buf;
2774 char *end; 3119 char *end;
2775 struct trace_array *tr = &global_trace;
2776 3120
2777 if (!tr->ctrl || tracing_disabled) 3121 if (tracing_disabled)
2778 return -EINVAL; 3122 return -EINVAL;
2779 3123
2780 if (cnt > TRACE_BUF_SIZE) 3124 if (cnt > TRACE_BUF_SIZE)
@@ -2840,22 +3184,38 @@ static struct file_operations tracing_mark_fops = {
2840 3184
2841#ifdef CONFIG_DYNAMIC_FTRACE 3185#ifdef CONFIG_DYNAMIC_FTRACE
2842 3186
3187int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3188{
3189 return 0;
3190}
3191
2843static ssize_t 3192static ssize_t
2844tracing_read_long(struct file *filp, char __user *ubuf, 3193tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2845 size_t cnt, loff_t *ppos) 3194 size_t cnt, loff_t *ppos)
2846{ 3195{
3196 static char ftrace_dyn_info_buffer[1024];
3197 static DEFINE_MUTEX(dyn_info_mutex);
2847 unsigned long *p = filp->private_data; 3198 unsigned long *p = filp->private_data;
2848 char buf[64]; 3199 char *buf = ftrace_dyn_info_buffer;
3200 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2849 int r; 3201 int r;
2850 3202
2851 r = sprintf(buf, "%ld\n", *p); 3203 mutex_lock(&dyn_info_mutex);
3204 r = sprintf(buf, "%ld ", *p);
2852 3205
2853 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3206 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3207 buf[r++] = '\n';
3208
3209 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3210
3211 mutex_unlock(&dyn_info_mutex);
3212
3213 return r;
2854} 3214}
2855 3215
2856static struct file_operations tracing_read_long_fops = { 3216static struct file_operations tracing_dyn_info_fops = {
2857 .open = tracing_open_generic, 3217 .open = tracing_open_generic,
2858 .read = tracing_read_long, 3218 .read = tracing_read_dyn_info,
2859}; 3219};
2860#endif 3220#endif
2861 3221
@@ -2896,10 +3256,10 @@ static __init int tracer_init_debugfs(void)
2896 if (!entry) 3256 if (!entry)
2897 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 3257 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2898 3258
2899 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, 3259 entry = debugfs_create_file("trace_options", 0644, d_tracer,
2900 NULL, &tracing_iter_fops); 3260 NULL, &tracing_iter_fops);
2901 if (!entry) 3261 if (!entry)
2902 pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); 3262 pr_warning("Could not create debugfs 'trace_options' entry\n");
2903 3263
2904 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3264 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2905 NULL, &tracing_cpumask_fops); 3265 NULL, &tracing_cpumask_fops);
@@ -2949,11 +3309,11 @@ static __init int tracer_init_debugfs(void)
2949 pr_warning("Could not create debugfs " 3309 pr_warning("Could not create debugfs "
2950 "'trace_pipe' entry\n"); 3310 "'trace_pipe' entry\n");
2951 3311
2952 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 3312 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2953 &global_trace, &tracing_entries_fops); 3313 &global_trace, &tracing_entries_fops);
2954 if (!entry) 3314 if (!entry)
2955 pr_warning("Could not create debugfs " 3315 pr_warning("Could not create debugfs "
2956 "'trace_entries' entry\n"); 3316 "'buffer_size_kb' entry\n");
2957 3317
2958 entry = debugfs_create_file("trace_marker", 0220, d_tracer, 3318 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2959 NULL, &tracing_mark_fops); 3319 NULL, &tracing_mark_fops);
@@ -2964,7 +3324,7 @@ static __init int tracer_init_debugfs(void)
2964#ifdef CONFIG_DYNAMIC_FTRACE 3324#ifdef CONFIG_DYNAMIC_FTRACE
2965 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3325 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2966 &ftrace_update_tot_cnt, 3326 &ftrace_update_tot_cnt,
2967 &tracing_read_long_fops); 3327 &tracing_dyn_info_fops);
2968 if (!entry) 3328 if (!entry)
2969 pr_warning("Could not create debugfs " 3329 pr_warning("Could not create debugfs "
2970 "'dyn_ftrace_total_info' entry\n"); 3330 "'dyn_ftrace_total_info' entry\n");
@@ -2987,7 +3347,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2987 unsigned long flags, irq_flags; 3347 unsigned long flags, irq_flags;
2988 int cpu, len = 0, size, pc; 3348 int cpu, len = 0, size, pc;
2989 3349
2990 if (!tr->ctrl || tracing_disabled) 3350 if (tracing_disabled)
2991 return 0; 3351 return 0;
2992 3352
2993 pc = preempt_count(); 3353 pc = preempt_count();
@@ -3045,7 +3405,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
3045static int trace_panic_handler(struct notifier_block *this, 3405static int trace_panic_handler(struct notifier_block *this,
3046 unsigned long event, void *unused) 3406 unsigned long event, void *unused)
3047{ 3407{
3048 ftrace_dump(); 3408 if (ftrace_dump_on_oops)
3409 ftrace_dump();
3049 return NOTIFY_OK; 3410 return NOTIFY_OK;
3050} 3411}
3051 3412
@@ -3061,7 +3422,8 @@ static int trace_die_handler(struct notifier_block *self,
3061{ 3422{
3062 switch (val) { 3423 switch (val) {
3063 case DIE_OOPS: 3424 case DIE_OOPS:
3064 ftrace_dump(); 3425 if (ftrace_dump_on_oops)
3426 ftrace_dump();
3065 break; 3427 break;
3066 default: 3428 default:
3067 break; 3429 break;
@@ -3102,7 +3464,6 @@ trace_printk_seq(struct trace_seq *s)
3102 trace_seq_reset(s); 3464 trace_seq_reset(s);
3103} 3465}
3104 3466
3105
3106void ftrace_dump(void) 3467void ftrace_dump(void)
3107{ 3468{
3108 static DEFINE_SPINLOCK(ftrace_dump_lock); 3469 static DEFINE_SPINLOCK(ftrace_dump_lock);
@@ -3220,7 +3581,6 @@ __init static int tracer_alloc_buffers(void)
3220#endif 3581#endif
3221 3582
3222 /* All seems OK, enable tracing */ 3583 /* All seems OK, enable tracing */
3223 global_trace.ctrl = tracer_enabled;
3224 tracing_disabled = 0; 3584 tracing_disabled = 0;
3225 3585
3226 atomic_notifier_chain_register(&panic_notifier_list, 3586 atomic_notifier_chain_register(&panic_notifier_list,