aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:35:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:35:04 -0400
commit752f114fb83c5839de37a250b4f8257ed5438341 (patch)
treec565c3b6670d21ad7f5cd6cdda5864a76d3f2e0c
parentb8ae30ee26d379db436b0b8c8c3ff1b52f69e5d1 (diff)
parentad56b0797e67df5e04b2f1a1e02900145c5c16f2 (diff)
Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: tracing: Fix "integer as NULL pointer" warning. tracing: Fix tracepoint.h DECLARE_TRACE() to allow more than one header tracing: Make the documentation clear on trace_event boot option ring-buffer: Wrap open-coded WARN_ONCE tracing: Convert nop macros to static inlines tracing: Fix sleep time function profiling tracing: Show sample std dev in function profiling tracing: Add documentation for trace commands mod, traceon/traceoff ring-buffer: Make benchmark handle missed events ring-buffer: Make non-consuming read less expensive with lots of cpus. tracing: Add graph output support for irqsoff tracer tracing: Have graph flags passed in to ouput functions tracing: Add ftrace events for graph tracer tracing: Dump either the oops's cpu source or all cpus buffers tracing: Fix uninitialized variable of tracing/trace output
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/trace/events.txt3
-rw-r--r--Documentation/trace/ftrace.txt50
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--include/linux/ftrace.h49
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/tracepoint.h114
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--kernel/trace/ftrace.c30
-rw-r--r--kernel/trace/ring_buffer.c78
-rw-r--r--kernel/trace/ring_buffer_benchmark.c3
-rw-r--r--kernel/trace/trace.c99
-rw-r--r--kernel/trace/trace.h27
-rw-r--r--kernel/trace/trace_functions_graph.c164
-rw-r--r--kernel/trace/trace_irqsoff.c271
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_selftest.c5
19 files changed, 723 insertions, 210 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0c6c56076d19..567b7a8eb878 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -786,8 +786,12 @@ and is between 256 and 4096 characters. It is defined in the file
786 as early as possible in order to facilitate early 786 as early as possible in order to facilitate early
787 boot debugging. 787 boot debugging.
788 788
789 ftrace_dump_on_oops 789 ftrace_dump_on_oops[=orig_cpu]
790 [FTRACE] will dump the trace buffers on oops. 790 [FTRACE] will dump the trace buffers on oops.
791 If no parameter is passed, ftrace will dump
792 buffers of all CPUs, but if you pass orig_cpu, it will
793 dump only the buffer of the CPU that triggered the
794 oops.
791 795
792 ftrace_filter=[function-list] 796 ftrace_filter=[function-list]
793 [FTRACE] Limit the functions traced by the function 797 [FTRACE] Limit the functions traced by the function
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 02ac6ed38b2d..778ddf38b82c 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -90,7 +90,8 @@ In order to facilitate early boot debugging, use boot option:
90 90
91 trace_event=[event-list] 91 trace_event=[event-list]
92 92
93The format of this boot option is the same as described in section 2.1. 93event-list is a comma separated list of events. See section 2.1 for event
94format.
94 95
953. Defining an event-enabled tracepoint 963. Defining an event-enabled tracepoint
96======================================= 97=======================================
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 03485bfbd797..557c1edeccaf 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -155,6 +155,9 @@ of ftrace. Here is a list of some of the key files:
155 to be traced. Echoing names of functions into this file 155 to be traced. Echoing names of functions into this file
156 will limit the trace to only those functions. 156 will limit the trace to only those functions.
157 157
158 This interface also allows for commands to be used. See the
159 "Filter commands" section for more details.
160
158 set_ftrace_notrace: 161 set_ftrace_notrace:
159 162
160 This has an effect opposite to that of 163 This has an effect opposite to that of
@@ -1337,12 +1340,14 @@ ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
1337can either use the sysctl function or set it via the proc system 1340can either use the sysctl function or set it via the proc system
1338interface. 1341interface.
1339 1342
1340 sysctl kernel.ftrace_dump_on_oops=1 1343 sysctl kernel.ftrace_dump_on_oops=n
1341 1344
1342or 1345or
1343 1346
1344 echo 1 > /proc/sys/kernel/ftrace_dump_on_oops 1347 echo n > /proc/sys/kernel/ftrace_dump_on_oops
1345 1348
1349If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
1350only dump the buffer of the CPU that triggered the oops.
1346 1351
1347Here's an example of such a dump after a null pointer 1352Here's an example of such a dump after a null pointer
1348dereference in a kernel module: 1353dereference in a kernel module:
@@ -1822,6 +1827,47 @@ this special filter via:
1822 echo > set_graph_function 1827 echo > set_graph_function
1823 1828
1824 1829
1830Filter commands
1831---------------
1832
1833A few commands are supported by the set_ftrace_filter interface.
1834Trace commands have the following format:
1835
1836<function>:<command>:<parameter>
1837
1838The following commands are supported:
1839
1840- mod
1841 This command enables function filtering per module. The
1842 parameter defines the module. For example, if only the write*
1843 functions in the ext3 module are desired, run:
1844
1845 echo 'write*:mod:ext3' > set_ftrace_filter
1846
1847 This command interacts with the filter in the same way as
1848 filtering based on function names. Thus, adding more functions
1849 in a different module is accomplished by appending (>>) to the
1850 filter file. Remove specific module functions by prepending
1851 '!':
1852
1853 echo '!writeback*:mod:ext3' >> set_ftrace_filter
1854
1855- traceon/traceoff
1856 These commands turn tracing on and off when the specified
1857 functions are hit. The parameter determines how many times the
1858 tracing system is turned on and off. If unspecified, there is
1859 no limit. For example, to disable tracing when a schedule bug
1860 is hit the first 5 times, run:
1861
1862 echo '__schedule_bug:traceoff:5' > set_ftrace_filter
1863
1864 These commands are cumulative whether or not they are appended
1865 to set_ftrace_filter. To remove a command, prepend it by '!'
1866 and drop the parameter:
1867
1868 echo '!__schedule_bug:traceoff' > set_ftrace_filter
1869
1870
1825trace_pipe 1871trace_pipe
1826---------- 1872----------
1827 1873
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d303..d4e8b213a462 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -289,7 +289,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
289 289
290static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 290static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
291{ 291{
292 ftrace_dump(); 292 ftrace_dump(DUMP_ALL);
293} 293}
294static struct sysrq_key_op sysrq_ftrace_dump_op = { 294static struct sysrq_key_op sysrq_ftrace_dump_op = {
295 .handler = sysrq_ftrace_dump, 295 .handler = sysrq_ftrace_dump,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index cc12b3c556b3..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
82extern void ftrace_stub(unsigned long a0, unsigned long a1); 82extern void ftrace_stub(unsigned long a0, unsigned long a1);
83 83
84#else /* !CONFIG_FUNCTION_TRACER */ 84#else /* !CONFIG_FUNCTION_TRACER */
85# define register_ftrace_function(ops) do { } while (0) 85/*
86# define unregister_ftrace_function(ops) do { } while (0) 86 * (un)register_ftrace_function must be a macro since the ops parameter
87# define clear_ftrace_function(ops) do { } while (0) 87 * must not be evaluated.
88 */
89#define register_ftrace_function(ops) ({ 0; })
90#define unregister_ftrace_function(ops) ({ 0; })
91static inline void clear_ftrace_function(void) { }
88static inline void ftrace_kill(void) { } 92static inline void ftrace_kill(void) { }
89static inline void ftrace_stop(void) { } 93static inline void ftrace_stop(void) { }
90static inline void ftrace_start(void) { } 94static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
237extern void ftrace_disable_daemon(void); 241extern void ftrace_disable_daemon(void);
238extern void ftrace_enable_daemon(void); 242extern void ftrace_enable_daemon(void);
239#else 243#else
240# define skip_trace(ip) ({ 0; }) 244static inline int skip_trace(unsigned long ip) { return 0; }
241# define ftrace_force_update() ({ 0; }) 245static inline int ftrace_force_update(void) { return 0; }
242# define ftrace_set_filter(buf, len, reset) do { } while (0) 246static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
243# define ftrace_disable_daemon() do { } while (0) 247{
244# define ftrace_enable_daemon() do { } while (0) 248}
249static inline void ftrace_disable_daemon(void) { }
250static inline void ftrace_enable_daemon(void) { }
245static inline void ftrace_release_mod(struct module *mod) {} 251static inline void ftrace_release_mod(struct module *mod) {}
246static inline int register_ftrace_command(struct ftrace_func_command *cmd) 252static inline int register_ftrace_command(struct ftrace_func_command *cmd)
247{ 253{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
314 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 320 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
315 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 321 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
316#else 322#else
317# define time_hardirqs_on(a0, a1) do { } while (0) 323 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
318# define time_hardirqs_off(a0, a1) do { } while (0) 324 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
319#endif 325#endif
320 326
321#ifdef CONFIG_PREEMPT_TRACER 327#ifdef CONFIG_PREEMPT_TRACER
322 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 328 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
323 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 329 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
324#else 330#else
325# define trace_preempt_on(a0, a1) do { } while (0) 331 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
326# define trace_preempt_off(a0, a1) do { } while (0) 332 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
327#endif 333#endif
328 334
329#ifdef CONFIG_FTRACE_MCOUNT_RECORD 335#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
352 int depth; 358 int depth;
353}; 359};
354 360
361/* Type of the callback handlers for tracing function graph*/
362typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
363typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
364
355#ifdef CONFIG_FUNCTION_GRAPH_TRACER 365#ifdef CONFIG_FUNCTION_GRAPH_TRACER
356 366
357/* for init task */ 367/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
400 410
401#define FTRACE_RETFUNC_DEPTH 50 411#define FTRACE_RETFUNC_DEPTH 50
402#define FTRACE_RETSTACK_ALLOC_SIZE 32 412#define FTRACE_RETSTACK_ALLOC_SIZE 32
403/* Type of the callback handlers for tracing function graph*/
404typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
405typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
406
407extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 413extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
408 trace_func_graph_ent_t entryfunc); 414 trace_func_graph_ent_t entryfunc);
409 415
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
441static inline void ftrace_graph_init_task(struct task_struct *t) { } 447static inline void ftrace_graph_init_task(struct task_struct *t) { }
442static inline void ftrace_graph_exit_task(struct task_struct *t) { } 448static inline void ftrace_graph_exit_task(struct task_struct *t) { }
443 449
450static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
451 trace_func_graph_ent_t entryfunc)
452{
453 return -1;
454}
455static inline void unregister_ftrace_graph(void) { }
456
444static inline int task_curr_ret_stack(struct task_struct *tsk) 457static inline int task_curr_ret_stack(struct task_struct *tsk)
445{ 458{
446 return -1; 459 return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
492 return tsk->trace & TSK_TRACE_FL_GRAPH; 505 return tsk->trace & TSK_TRACE_FL_GRAPH;
493} 506}
494 507
495extern int ftrace_dump_on_oops; 508enum ftrace_dump_mode;
509
510extern enum ftrace_dump_mode ftrace_dump_on_oops;
496 511
497#ifdef CONFIG_PREEMPT 512#ifdef CONFIG_PREEMPT
498#define INIT_TRACE_RECURSION .trace_recursion = 0, 513#define INIT_TRACE_RECURSION .trace_recursion = 0,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9365227dbaf6..9fb1c1299032 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -490,6 +490,13 @@ static inline void tracing_off(void) { }
490static inline void tracing_off_permanent(void) { } 490static inline void tracing_off_permanent(void) { }
491static inline int tracing_is_on(void) { return 0; } 491static inline int tracing_is_on(void) { return 0; }
492#endif 492#endif
493
494enum ftrace_dump_mode {
495 DUMP_NONE,
496 DUMP_ALL,
497 DUMP_ORIG,
498};
499
493#ifdef CONFIG_TRACING 500#ifdef CONFIG_TRACING
494extern void tracing_start(void); 501extern void tracing_start(void);
495extern void tracing_stop(void); 502extern void tracing_stop(void);
@@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
571extern int 578extern int
572__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 579__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
573 580
574extern void ftrace_dump(void); 581extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
575#else 582#else
576static inline void 583static inline void
577ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 584ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
592{ 599{
593 return 0; 600 return 0;
594} 601}
595static inline void ftrace_dump(void) { } 602static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
596#endif /* CONFIG_TRACING */ 603#endif /* CONFIG_TRACING */
597 604
598/* 605/*
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index c8297761e414..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -127,7 +127,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
127 unsigned long *lost_events); 127 unsigned long *lost_events);
128 128
129struct ring_buffer_iter * 129struct ring_buffer_iter *
130ring_buffer_read_start(struct ring_buffer *buffer, int cpu); 130ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
131void ring_buffer_read_prepare_sync(void);
132void ring_buffer_read_start(struct ring_buffer_iter *iter);
131void ring_buffer_read_finish(struct ring_buffer_iter *iter); 133void ring_buffer_read_finish(struct ring_buffer_iter *iter);
132 134
133struct ring_buffer_event * 135struct ring_buffer_event *
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..1d85f9a6a199 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,6 +33,65 @@ struct tracepoint {
33 * Keep in sync with vmlinux.lds.h. 33 * Keep in sync with vmlinux.lds.h.
34 */ 34 */
35 35
36/*
37 * Connect a probe to a tracepoint.
38 * Internal API, should not be used directly.
39 */
40extern int tracepoint_probe_register(const char *name, void *probe);
41
42/*
43 * Disconnect a probe from a tracepoint.
44 * Internal API, should not be used directly.
45 */
46extern int tracepoint_probe_unregister(const char *name, void *probe);
47
48extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
49extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
50extern void tracepoint_probe_update_all(void);
51
52struct tracepoint_iter {
53 struct module *module;
54 struct tracepoint *tracepoint;
55};
56
57extern void tracepoint_iter_start(struct tracepoint_iter *iter);
58extern void tracepoint_iter_next(struct tracepoint_iter *iter);
59extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
60extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
61extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
62 struct tracepoint *begin, struct tracepoint *end);
63
64/*
65 * tracepoint_synchronize_unregister must be called between the last tracepoint
66 * probe unregistration and the end of module exit to make sure there is no
67 * caller executing a probe when it is freed.
68 */
69static inline void tracepoint_synchronize_unregister(void)
70{
71 synchronize_sched();
72}
73
74#define PARAMS(args...) args
75
76#ifdef CONFIG_TRACEPOINTS
77extern void tracepoint_update_probe_range(struct tracepoint *begin,
78 struct tracepoint *end);
79#else
80static inline void tracepoint_update_probe_range(struct tracepoint *begin,
81 struct tracepoint *end)
82{ }
83#endif /* CONFIG_TRACEPOINTS */
84
85#endif /* _LINUX_TRACEPOINT_H */
86
87/*
88 * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
89 * file ifdef protection.
90 * This is due to the way trace events work. If a file includes two
91 * trace event headers under one "CREATE_TRACE_POINTS" the first include
92 * will override the TRACE_EVENT and break the second include.
93 */
94
36#ifndef DECLARE_TRACE 95#ifndef DECLARE_TRACE
37 96
38#define TP_PROTO(args...) args 97#define TP_PROTO(args...) args
@@ -96,9 +155,6 @@ struct tracepoint {
96#define EXPORT_TRACEPOINT_SYMBOL(name) \ 155#define EXPORT_TRACEPOINT_SYMBOL(name) \
97 EXPORT_SYMBOL(__tracepoint_##name) 156 EXPORT_SYMBOL(__tracepoint_##name)
98 157
99extern void tracepoint_update_probe_range(struct tracepoint *begin,
100 struct tracepoint *end);
101
102#else /* !CONFIG_TRACEPOINTS */ 158#else /* !CONFIG_TRACEPOINTS */
103#define DECLARE_TRACE(name, proto, args) \ 159#define DECLARE_TRACE(name, proto, args) \
104 static inline void _do_trace_##name(struct tracepoint *tp, proto) \ 160 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
@@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
119#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 175#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
120#define EXPORT_TRACEPOINT_SYMBOL(name) 176#define EXPORT_TRACEPOINT_SYMBOL(name)
121 177
122static inline void tracepoint_update_probe_range(struct tracepoint *begin,
123 struct tracepoint *end)
124{ }
125#endif /* CONFIG_TRACEPOINTS */ 178#endif /* CONFIG_TRACEPOINTS */
126#endif /* DECLARE_TRACE */ 179#endif /* DECLARE_TRACE */
127 180
128/*
129 * Connect a probe to a tracepoint.
130 * Internal API, should not be used directly.
131 */
132extern int tracepoint_probe_register(const char *name, void *probe);
133
134/*
135 * Disconnect a probe from a tracepoint.
136 * Internal API, should not be used directly.
137 */
138extern int tracepoint_probe_unregister(const char *name, void *probe);
139
140extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
141extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
142extern void tracepoint_probe_update_all(void);
143
144struct tracepoint_iter {
145 struct module *module;
146 struct tracepoint *tracepoint;
147};
148
149extern void tracepoint_iter_start(struct tracepoint_iter *iter);
150extern void tracepoint_iter_next(struct tracepoint_iter *iter);
151extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
152extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
153extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
154 struct tracepoint *begin, struct tracepoint *end);
155
156/*
157 * tracepoint_synchronize_unregister must be called between the last tracepoint
158 * probe unregistration and the end of module exit to make sure there is no
159 * caller executing a probe when it is freed.
160 */
161static inline void tracepoint_synchronize_unregister(void)
162{
163 synchronize_sched();
164}
165
166#define PARAMS(args...) args
167
168#endif /* _LINUX_TRACEPOINT_H */
169
170/*
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176
177#ifndef TRACE_EVENT 181#ifndef TRACE_EVENT
178/* 182/*
179 * For use with the TRACE_EVENT macro: 183 * For use with the TRACE_EVENT macro:
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
65 65
66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
67 67
68/* Make all open coded DECLARE_TRACE nops */
69#undef DECLARE_TRACE
70#define DECLARE_TRACE(name, proto, args)
71
68#ifdef CONFIG_EVENT_TRACING 72#ifdef CONFIG_EVENT_TRACING
69#include <trace/ftrace.h> 73#include <trace/ftrace.h>
70#endif 74#endif
@@ -75,6 +79,7 @@
75#undef DEFINE_EVENT 79#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT 80#undef DEFINE_EVENT_PRINT
77#undef TRACE_HEADER_MULTI_READ 81#undef TRACE_HEADER_MULTI_READ
82#undef DECLARE_TRACE
78 83
79/* Only undef what we defined in this file */ 84/* Only undef what we defined in this file */
80#ifdef UNDEF_TRACE_INCLUDE_FILE 85#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
1#ifndef _TRACE_NAPI_H_ 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM napi
3
4#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_NAPI_H_ 5#define _TRACE_NAPI_H_
3 6
4#include <linux/netdevice.h> 7#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi), 11 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi)); 12 TP_ARGS(napi));
10 13
11#endif 14#endif /* _TRACE_NAPI_H_ */
15
16/* This part must be outside protection */
17#include <trace/define_trace.h>
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index aa3a92b511e2..32837e19e3bd 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -264,6 +264,7 @@ struct ftrace_profile {
264 unsigned long counter; 264 unsigned long counter;
265#ifdef CONFIG_FUNCTION_GRAPH_TRACER 265#ifdef CONFIG_FUNCTION_GRAPH_TRACER
266 unsigned long long time; 266 unsigned long long time;
267 unsigned long long time_squared;
267#endif 268#endif
268}; 269};
269 270
@@ -366,9 +367,9 @@ static int function_stat_headers(struct seq_file *m)
366{ 367{
367#ifdef CONFIG_FUNCTION_GRAPH_TRACER 368#ifdef CONFIG_FUNCTION_GRAPH_TRACER
368 seq_printf(m, " Function " 369 seq_printf(m, " Function "
369 "Hit Time Avg\n" 370 "Hit Time Avg s^2\n"
370 " -------- " 371 " -------- "
371 "--- ---- ---\n"); 372 "--- ---- --- ---\n");
372#else 373#else
373 seq_printf(m, " Function Hit\n" 374 seq_printf(m, " Function Hit\n"
374 " -------- ---\n"); 375 " -------- ---\n");
@@ -384,6 +385,7 @@ static int function_stat_show(struct seq_file *m, void *v)
384 static DEFINE_MUTEX(mutex); 385 static DEFINE_MUTEX(mutex);
385 static struct trace_seq s; 386 static struct trace_seq s;
386 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev;
387#endif 389#endif
388 390
389 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
@@ -394,11 +396,25 @@ static int function_stat_show(struct seq_file *m, void *v)
394 avg = rec->time; 396 avg = rec->time;
395 do_div(avg, rec->counter); 397 do_div(avg, rec->counter);
396 398
399 /* Sample standard deviation (s^2) */
400 if (rec->counter <= 1)
401 stddev = 0;
402 else {
403 stddev = rec->time_squared - rec->counter * avg * avg;
404 /*
405 * Divide only 1000 for ns^2 -> us^2 conversion.
406 * trace_print_graph_duration will divide 1000 again.
407 */
408 do_div(stddev, (rec->counter - 1) * 1000);
409 }
410
397 mutex_lock(&mutex); 411 mutex_lock(&mutex);
398 trace_seq_init(&s); 412 trace_seq_init(&s);
399 trace_print_graph_duration(rec->time, &s); 413 trace_print_graph_duration(rec->time, &s);
400 trace_seq_puts(&s, " "); 414 trace_seq_puts(&s, " ");
401 trace_print_graph_duration(avg, &s); 415 trace_print_graph_duration(avg, &s);
416 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s);
402 trace_print_seq(m, &s); 418 trace_print_seq(m, &s);
403 mutex_unlock(&mutex); 419 mutex_unlock(&mutex);
404#endif 420#endif
@@ -650,6 +666,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
650 if (!stat->hash || !ftrace_profile_enabled) 666 if (!stat->hash || !ftrace_profile_enabled)
651 goto out; 667 goto out;
652 668
669 /* If the calltime was zero'd ignore it */
670 if (!trace->calltime)
671 goto out;
672
653 calltime = trace->rettime - trace->calltime; 673 calltime = trace->rettime - trace->calltime;
654 674
655 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { 675 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
@@ -668,8 +688,10 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
668 } 688 }
669 689
670 rec = ftrace_find_profiled_func(stat, trace->func); 690 rec = ftrace_find_profiled_func(stat, trace->func);
671 if (rec) 691 if (rec) {
672 rec->time += calltime; 692 rec->time += calltime;
693 rec->time_squared += calltime * calltime;
694 }
673 695
674 out: 696 out:
675 local_irq_restore(flags); 697 local_irq_restore(flags);
@@ -3338,11 +3360,11 @@ void unregister_ftrace_graph(void)
3338 goto out; 3360 goto out;
3339 3361
3340 ftrace_graph_active--; 3362 ftrace_graph_active--;
3341 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3342 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 3363 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3343 ftrace_graph_entry = ftrace_graph_entry_stub; 3364 ftrace_graph_entry = ftrace_graph_entry_stub;
3344 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 3365 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3345 unregister_pm_notifier(&ftrace_suspend_notifier); 3366 unregister_pm_notifier(&ftrace_suspend_notifier);
3367 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3346 3368
3347 out: 3369 out:
3348 mutex_unlock(&ftrace_lock); 3370 mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5885cdfc41f3..7f6059c5aa94 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2000,17 +2000,13 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2000 u64 *ts, u64 *delta) 2000 u64 *ts, u64 *delta)
2001{ 2001{
2002 struct ring_buffer_event *event; 2002 struct ring_buffer_event *event;
2003 static int once;
2004 int ret; 2003 int ret;
2005 2004
2006 if (unlikely(*delta > (1ULL << 59) && !once++)) { 2005 WARN_ONCE(*delta > (1ULL << 59),
2007 printk(KERN_WARNING "Delta way too big! %llu" 2006 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
2008 " ts=%llu write stamp = %llu\n", 2007 (unsigned long long)*delta,
2009 (unsigned long long)*delta, 2008 (unsigned long long)*ts,
2010 (unsigned long long)*ts, 2009 (unsigned long long)cpu_buffer->write_stamp);
2011 (unsigned long long)cpu_buffer->write_stamp);
2012 WARN_ON(1);
2013 }
2014 2010
2015 /* 2011 /*
2016 * The delta is too big, we to add a 2012 * The delta is too big, we to add a
@@ -3332,23 +3328,30 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3332EXPORT_SYMBOL_GPL(ring_buffer_consume); 3328EXPORT_SYMBOL_GPL(ring_buffer_consume);
3333 3329
3334/** 3330/**
3335 * ring_buffer_read_start - start a non consuming read of the buffer 3331 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3336 * @buffer: The ring buffer to read from 3332 * @buffer: The ring buffer to read from
3337 * @cpu: The cpu buffer to iterate over 3333 * @cpu: The cpu buffer to iterate over
3338 * 3334 *
3339 * This starts up an iteration through the buffer. It also disables 3335 * This performs the initial preparations necessary to iterate
3340 * the recording to the buffer until the reading is finished. 3336 * through the buffer. Memory is allocated, buffer recording
3341 * This prevents the reading from being corrupted. This is not 3337 * is disabled, and the iterator pointer is returned to the caller.
3342 * a consuming read, so a producer is not expected.
3343 * 3338 *
3344 * Must be paired with ring_buffer_finish. 3339 * Disabling buffer recordng prevents the reading from being
3340 * corrupted. This is not a consuming read, so a producer is not
3341 * expected.
3342 *
3343 * After a sequence of ring_buffer_read_prepare calls, the user is
3344 * expected to make at least one call to ring_buffer_prepare_sync.
3345 * Afterwards, ring_buffer_read_start is invoked to get things going
3346 * for real.
3347 *
3348 * This overall must be paired with ring_buffer_finish.
3345 */ 3349 */
3346struct ring_buffer_iter * 3350struct ring_buffer_iter *
3347ring_buffer_read_start(struct ring_buffer *buffer, int cpu) 3351ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3348{ 3352{
3349 struct ring_buffer_per_cpu *cpu_buffer; 3353 struct ring_buffer_per_cpu *cpu_buffer;
3350 struct ring_buffer_iter *iter; 3354 struct ring_buffer_iter *iter;
3351 unsigned long flags;
3352 3355
3353 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3356 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3354 return NULL; 3357 return NULL;
@@ -3362,15 +3365,52 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3362 iter->cpu_buffer = cpu_buffer; 3365 iter->cpu_buffer = cpu_buffer;
3363 3366
3364 atomic_inc(&cpu_buffer->record_disabled); 3367 atomic_inc(&cpu_buffer->record_disabled);
3368
3369 return iter;
3370}
3371EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3372
3373/**
3374 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3375 *
3376 * All previously invoked ring_buffer_read_prepare calls to prepare
3377 * iterators will be synchronized. Afterwards, read_buffer_read_start
3378 * calls on those iterators are allowed.
3379 */
3380void
3381ring_buffer_read_prepare_sync(void)
3382{
3365 synchronize_sched(); 3383 synchronize_sched();
3384}
3385EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3386
3387/**
3388 * ring_buffer_read_start - start a non consuming read of the buffer
3389 * @iter: The iterator returned by ring_buffer_read_prepare
3390 *
3391 * This finalizes the startup of an iteration through the buffer.
3392 * The iterator comes from a call to ring_buffer_read_prepare and
3393 * an intervening ring_buffer_read_prepare_sync must have been
3394 * performed.
3395 *
3396 * Must be paired with ring_buffer_finish.
3397 */
3398void
3399ring_buffer_read_start(struct ring_buffer_iter *iter)
3400{
3401 struct ring_buffer_per_cpu *cpu_buffer;
3402 unsigned long flags;
3403
3404 if (!iter)
3405 return;
3406
3407 cpu_buffer = iter->cpu_buffer;
3366 3408
3367 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3409 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3368 arch_spin_lock(&cpu_buffer->lock); 3410 arch_spin_lock(&cpu_buffer->lock);
3369 rb_iter_reset(iter); 3411 rb_iter_reset(iter);
3370 arch_spin_unlock(&cpu_buffer->lock); 3412 arch_spin_unlock(&cpu_buffer->lock);
3371 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3413 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3372
3373 return iter;
3374} 3414}
3375EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3415EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3376 3416
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index dc56556b55a2..302f8a614635 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -113,7 +113,8 @@ static enum event_status read_page(int cpu)
113 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); 113 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
114 if (ret >= 0) { 114 if (ret >= 0) {
115 rpage = bpage; 115 rpage = bpage;
116 commit = local_read(&rpage->commit); 116 /* The commit may have missed event flags set, clear them */
117 commit = local_read(&rpage->commit) & 0xfffff;
117 for (i = 0; i < commit && !kill_test; i += inc) { 118 for (i = 0; i < commit && !kill_test; i += inc) {
118 119
119 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { 120 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 60f3b6289731..756d7283318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -1571,7 +1583,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1571{ 1583{
1572 struct ring_buffer *buffer = iter->tr->buffer; 1584 struct ring_buffer *buffer = iter->tr->buffer;
1573 struct trace_entry *ent, *next = NULL; 1585 struct trace_entry *ent, *next = NULL;
1574 unsigned long lost_events, next_lost = 0; 1586 unsigned long lost_events = 0, next_lost = 0;
1575 int cpu_file = iter->cpu_file; 1587 int cpu_file = iter->cpu_file;
1576 u64 next_ts = 0, ts; 1588 u64 next_ts = 0, ts;
1577 int next_cpu = -1; 1589 int next_cpu = -1;
@@ -1796,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
1796} 1808}
1797 1809
1798 1810
1799static void 1811void
1800print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1812print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1801{ 1813{
1802 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1814 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -2005,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2005 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
2006} 2018}
2007 2019
2008static int trace_empty(struct trace_iterator *iter) 2020int trace_empty(struct trace_iterator *iter)
2009{ 2021{
2010 int cpu; 2022 int cpu;
2011 2023
@@ -2072,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2072 return print_trace_fmt(iter); 2084 return print_trace_fmt(iter);
2073} 2085}
2074 2086
2087void trace_default_header(struct seq_file *m)
2088{
2089 struct trace_iterator *iter = m->private;
2090
2091 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2092 /* print nothing if the buffers are empty */
2093 if (trace_empty(iter))
2094 return;
2095 print_trace_header(m, iter);
2096 if (!(trace_flags & TRACE_ITER_VERBOSE))
2097 print_lat_help_header(m);
2098 } else {
2099 if (!(trace_flags & TRACE_ITER_VERBOSE))
2100 print_func_help_header(m);
2101 }
2102}
2103
2075static int s_show(struct seq_file *m, void *v) 2104static int s_show(struct seq_file *m, void *v)
2076{ 2105{
2077 struct trace_iterator *iter = v; 2106 struct trace_iterator *iter = v;
@@ -2084,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v)
2084 } 2113 }
2085 if (iter->trace && iter->trace->print_header) 2114 if (iter->trace && iter->trace->print_header)
2086 iter->trace->print_header(m); 2115 iter->trace->print_header(m);
2087 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2116 else
2088 /* print nothing if the buffers are empty */ 2117 trace_default_header(m);
2089 if (trace_empty(iter)) 2118
2090 return 0;
2091 print_trace_header(m, iter);
2092 if (!(trace_flags & TRACE_ITER_VERBOSE))
2093 print_lat_help_header(m);
2094 } else {
2095 if (!(trace_flags & TRACE_ITER_VERBOSE))
2096 print_func_help_header(m);
2097 }
2098 } else if (iter->leftover) { 2119 } else if (iter->leftover) {
2099 /* 2120 /*
2100 * If we filled the seq_file buffer earlier, we 2121 * If we filled the seq_file buffer earlier, we
@@ -2180,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file)
2180 2201
2181 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2202 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2182 for_each_tracing_cpu(cpu) { 2203 for_each_tracing_cpu(cpu) {
2183
2184 iter->buffer_iter[cpu] = 2204 iter->buffer_iter[cpu] =
2185 ring_buffer_read_start(iter->tr->buffer, cpu); 2205 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2206 }
2207 ring_buffer_read_prepare_sync();
2208 for_each_tracing_cpu(cpu) {
2209 ring_buffer_read_start(iter->buffer_iter[cpu]);
2186 tracing_iter_reset(iter, cpu); 2210 tracing_iter_reset(iter, cpu);
2187 } 2211 }
2188 } else { 2212 } else {
2189 cpu = iter->cpu_file; 2213 cpu = iter->cpu_file;
2190 iter->buffer_iter[cpu] = 2214 iter->buffer_iter[cpu] =
2191 ring_buffer_read_start(iter->tr->buffer, cpu); 2215 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2216 ring_buffer_read_prepare_sync();
2217 ring_buffer_read_start(iter->buffer_iter[cpu]);
2192 tracing_iter_reset(iter, cpu); 2218 tracing_iter_reset(iter, cpu);
2193 } 2219 }
2194 2220
@@ -4338,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this,
4338 unsigned long event, void *unused) 4364 unsigned long event, void *unused)
4339{ 4365{
4340 if (ftrace_dump_on_oops) 4366 if (ftrace_dump_on_oops)
4341 ftrace_dump(); 4367 ftrace_dump(ftrace_dump_on_oops);
4342 return NOTIFY_OK; 4368 return NOTIFY_OK;
4343} 4369}
4344 4370
@@ -4355,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self,
4355 switch (val) { 4381 switch (val) {
4356 case DIE_OOPS: 4382 case DIE_OOPS:
4357 if (ftrace_dump_on_oops) 4383 if (ftrace_dump_on_oops)
4358 ftrace_dump(); 4384 ftrace_dump(ftrace_dump_on_oops);
4359 break; 4385 break;
4360 default: 4386 default:
4361 break; 4387 break;
@@ -4396,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s)
4396 trace_seq_init(s); 4422 trace_seq_init(s);
4397} 4423}
4398 4424
4399static void __ftrace_dump(bool disable_tracing) 4425static void
4426__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4400{ 4427{
4401 static arch_spinlock_t ftrace_dump_lock = 4428 static arch_spinlock_t ftrace_dump_lock =
4402 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4429 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4429,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing)
4429 /* don't look at user memory in panic mode */ 4456 /* don't look at user memory in panic mode */
4430 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4457 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4431 4458
4432 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4433
4434 /* Simulate the iterator */ 4459 /* Simulate the iterator */
4435 iter.tr = &global_trace; 4460 iter.tr = &global_trace;
4436 iter.trace = current_trace; 4461 iter.trace = current_trace;
4437 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4462
4463 switch (oops_dump_mode) {
4464 case DUMP_ALL:
4465 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4466 break;
4467 case DUMP_ORIG:
4468 iter.cpu_file = raw_smp_processor_id();
4469 break;
4470 case DUMP_NONE:
4471 goto out_enable;
4472 default:
4473 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4474 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4475 }
4476
4477 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4438 4478
4439 /* 4479 /*
4440 * We need to stop all tracing on all CPUS to read the 4480 * We need to stop all tracing on all CPUS to read the
@@ -4473,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing)
4473 else 4513 else
4474 printk(KERN_TRACE "---------------------------------\n"); 4514 printk(KERN_TRACE "---------------------------------\n");
4475 4515
4516 out_enable:
4476 /* Re-enable tracing if requested */ 4517 /* Re-enable tracing if requested */
4477 if (!disable_tracing) { 4518 if (!disable_tracing) {
4478 trace_flags |= old_userobj; 4519 trace_flags |= old_userobj;
@@ -4489,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing)
4489} 4530}
4490 4531
4491/* By default: disable tracing after the dump */ 4532/* By default: disable tracing after the dump */
4492void ftrace_dump(void) 4533void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4493{ 4534{
4494 __ftrace_dump(true); 4535 __ftrace_dump(true, oops_dump_mode);
4495} 4536}
4496 4537
4497__init static int tracer_alloc_buffers(void) 4538__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3ebdb6bd2362..d1ce0bec1b3f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -364,6 +364,9 @@ void trace_function(struct trace_array *tr,
364 unsigned long ip, 364 unsigned long ip,
365 unsigned long parent_ip, 365 unsigned long parent_ip,
366 unsigned long flags, int pc); 366 unsigned long flags, int pc);
367void trace_default_header(struct seq_file *m);
368void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
369int trace_empty(struct trace_iterator *iter);
367 370
368void trace_graph_return(struct ftrace_graph_ret *trace); 371void trace_graph_return(struct ftrace_graph_ret *trace);
369int trace_graph_entry(struct ftrace_graph_ent *trace); 372int trace_graph_entry(struct ftrace_graph_ent *trace);
@@ -475,9 +478,29 @@ extern int trace_clock_id;
475 478
476/* Standard output formatting function used for function return traces */ 479/* Standard output formatting function used for function return traces */
477#ifdef CONFIG_FUNCTION_GRAPH_TRACER 480#ifdef CONFIG_FUNCTION_GRAPH_TRACER
478extern enum print_line_t print_graph_function(struct trace_iterator *iter); 481
482/* Flag options */
483#define TRACE_GRAPH_PRINT_OVERRUN 0x1
484#define TRACE_GRAPH_PRINT_CPU 0x2
485#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
486#define TRACE_GRAPH_PRINT_PROC 0x8
487#define TRACE_GRAPH_PRINT_DURATION 0x10
488#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
489
490extern enum print_line_t
491print_graph_function_flags(struct trace_iterator *iter, u32 flags);
492extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
479extern enum print_line_t 493extern enum print_line_t
480trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 494trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
495extern void graph_trace_open(struct trace_iterator *iter);
496extern void graph_trace_close(struct trace_iterator *iter);
497extern int __trace_graph_entry(struct trace_array *tr,
498 struct ftrace_graph_ent *trace,
499 unsigned long flags, int pc);
500extern void __trace_graph_return(struct trace_array *tr,
501 struct ftrace_graph_ret *trace,
502 unsigned long flags, int pc);
503
481 504
482#ifdef CONFIG_DYNAMIC_FTRACE 505#ifdef CONFIG_DYNAMIC_FTRACE
483/* TODO: make this variable */ 506/* TODO: make this variable */
@@ -508,7 +531,7 @@ static inline int ftrace_graph_addr(unsigned long addr)
508#endif /* CONFIG_DYNAMIC_FTRACE */ 531#endif /* CONFIG_DYNAMIC_FTRACE */
509#else /* CONFIG_FUNCTION_GRAPH_TRACER */ 532#else /* CONFIG_FUNCTION_GRAPH_TRACER */
510static inline enum print_line_t 533static inline enum print_line_t
511print_graph_function(struct trace_iterator *iter) 534print_graph_function_flags(struct trace_iterator *iter, u32 flags)
512{ 535{
513 return TRACE_TYPE_UNHANDLED; 536 return TRACE_TYPE_UNHANDLED;
514} 537}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 669b9c31861d..dd11c830eb84 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -40,7 +40,7 @@ struct fgraph_data {
40#define TRACE_GRAPH_PRINT_OVERHEAD 0x4 40#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
41#define TRACE_GRAPH_PRINT_PROC 0x8 41#define TRACE_GRAPH_PRINT_PROC 0x8
42#define TRACE_GRAPH_PRINT_DURATION 0x10 42#define TRACE_GRAPH_PRINT_DURATION 0x10
43#define TRACE_GRAPH_PRINT_ABS_TIME 0X20 43#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
44 44
45static struct tracer_opt trace_opts[] = { 45static struct tracer_opt trace_opts[] = {
46 /* Display overruns? (for self-debug purpose) */ 46 /* Display overruns? (for self-debug purpose) */
@@ -179,7 +179,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
179 return ret; 179 return ret;
180} 180}
181 181
182static int __trace_graph_entry(struct trace_array *tr, 182int __trace_graph_entry(struct trace_array *tr,
183 struct ftrace_graph_ent *trace, 183 struct ftrace_graph_ent *trace,
184 unsigned long flags, 184 unsigned long flags,
185 int pc) 185 int pc)
@@ -246,7 +246,7 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
246 return trace_graph_entry(trace); 246 return trace_graph_entry(trace);
247} 247}
248 248
249static void __trace_graph_return(struct trace_array *tr, 249void __trace_graph_return(struct trace_array *tr,
250 struct ftrace_graph_ret *trace, 250 struct ftrace_graph_ret *trace,
251 unsigned long flags, 251 unsigned long flags,
252 int pc) 252 int pc)
@@ -527,17 +527,18 @@ get_return_for_leaf(struct trace_iterator *iter,
527 527
528/* Signal a overhead of time execution to the output */ 528/* Signal a overhead of time execution to the output */
529static int 529static int
530print_graph_overhead(unsigned long long duration, struct trace_seq *s) 530print_graph_overhead(unsigned long long duration, struct trace_seq *s,
531 u32 flags)
531{ 532{
532 /* If duration disappear, we don't need anything */ 533 /* If duration disappear, we don't need anything */
533 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 534 if (!(flags & TRACE_GRAPH_PRINT_DURATION))
534 return 1; 535 return 1;
535 536
536 /* Non nested entry or return */ 537 /* Non nested entry or return */
537 if (duration == -1) 538 if (duration == -1)
538 return trace_seq_printf(s, " "); 539 return trace_seq_printf(s, " ");
539 540
540 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 541 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
541 /* Duration exceeded 100 msecs */ 542 /* Duration exceeded 100 msecs */
542 if (duration > 100000ULL) 543 if (duration > 100000ULL)
543 return trace_seq_printf(s, "! "); 544 return trace_seq_printf(s, "! ");
@@ -563,7 +564,7 @@ static int print_graph_abs_time(u64 t, struct trace_seq *s)
563 564
564static enum print_line_t 565static enum print_line_t
565print_graph_irq(struct trace_iterator *iter, unsigned long addr, 566print_graph_irq(struct trace_iterator *iter, unsigned long addr,
566 enum trace_type type, int cpu, pid_t pid) 567 enum trace_type type, int cpu, pid_t pid, u32 flags)
567{ 568{
568 int ret; 569 int ret;
569 struct trace_seq *s = &iter->seq; 570 struct trace_seq *s = &iter->seq;
@@ -573,21 +574,21 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
573 return TRACE_TYPE_UNHANDLED; 574 return TRACE_TYPE_UNHANDLED;
574 575
575 /* Absolute time */ 576 /* Absolute time */
576 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 577 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
577 ret = print_graph_abs_time(iter->ts, s); 578 ret = print_graph_abs_time(iter->ts, s);
578 if (!ret) 579 if (!ret)
579 return TRACE_TYPE_PARTIAL_LINE; 580 return TRACE_TYPE_PARTIAL_LINE;
580 } 581 }
581 582
582 /* Cpu */ 583 /* Cpu */
583 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 584 if (flags & TRACE_GRAPH_PRINT_CPU) {
584 ret = print_graph_cpu(s, cpu); 585 ret = print_graph_cpu(s, cpu);
585 if (ret == TRACE_TYPE_PARTIAL_LINE) 586 if (ret == TRACE_TYPE_PARTIAL_LINE)
586 return TRACE_TYPE_PARTIAL_LINE; 587 return TRACE_TYPE_PARTIAL_LINE;
587 } 588 }
588 589
589 /* Proc */ 590 /* Proc */
590 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 591 if (flags & TRACE_GRAPH_PRINT_PROC) {
591 ret = print_graph_proc(s, pid); 592 ret = print_graph_proc(s, pid);
592 if (ret == TRACE_TYPE_PARTIAL_LINE) 593 if (ret == TRACE_TYPE_PARTIAL_LINE)
593 return TRACE_TYPE_PARTIAL_LINE; 594 return TRACE_TYPE_PARTIAL_LINE;
@@ -597,7 +598,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
597 } 598 }
598 599
599 /* No overhead */ 600 /* No overhead */
600 ret = print_graph_overhead(-1, s); 601 ret = print_graph_overhead(-1, s, flags);
601 if (!ret) 602 if (!ret)
602 return TRACE_TYPE_PARTIAL_LINE; 603 return TRACE_TYPE_PARTIAL_LINE;
603 604
@@ -610,7 +611,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
610 return TRACE_TYPE_PARTIAL_LINE; 611 return TRACE_TYPE_PARTIAL_LINE;
611 612
612 /* Don't close the duration column if haven't one */ 613 /* Don't close the duration column if haven't one */
613 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 614 if (flags & TRACE_GRAPH_PRINT_DURATION)
614 trace_seq_printf(s, " |"); 615 trace_seq_printf(s, " |");
615 ret = trace_seq_printf(s, "\n"); 616 ret = trace_seq_printf(s, "\n");
616 617
@@ -680,7 +681,8 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
680static enum print_line_t 681static enum print_line_t
681print_graph_entry_leaf(struct trace_iterator *iter, 682print_graph_entry_leaf(struct trace_iterator *iter,
682 struct ftrace_graph_ent_entry *entry, 683 struct ftrace_graph_ent_entry *entry,
683 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 684 struct ftrace_graph_ret_entry *ret_entry,
685 struct trace_seq *s, u32 flags)
684{ 686{
685 struct fgraph_data *data = iter->private; 687 struct fgraph_data *data = iter->private;
686 struct ftrace_graph_ret *graph_ret; 688 struct ftrace_graph_ret *graph_ret;
@@ -712,12 +714,12 @@ print_graph_entry_leaf(struct trace_iterator *iter,
712 } 714 }
713 715
714 /* Overhead */ 716 /* Overhead */
715 ret = print_graph_overhead(duration, s); 717 ret = print_graph_overhead(duration, s, flags);
716 if (!ret) 718 if (!ret)
717 return TRACE_TYPE_PARTIAL_LINE; 719 return TRACE_TYPE_PARTIAL_LINE;
718 720
719 /* Duration */ 721 /* Duration */
720 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 722 if (flags & TRACE_GRAPH_PRINT_DURATION) {
721 ret = print_graph_duration(duration, s); 723 ret = print_graph_duration(duration, s);
722 if (ret == TRACE_TYPE_PARTIAL_LINE) 724 if (ret == TRACE_TYPE_PARTIAL_LINE)
723 return TRACE_TYPE_PARTIAL_LINE; 725 return TRACE_TYPE_PARTIAL_LINE;
@@ -740,7 +742,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
740static enum print_line_t 742static enum print_line_t
741print_graph_entry_nested(struct trace_iterator *iter, 743print_graph_entry_nested(struct trace_iterator *iter,
742 struct ftrace_graph_ent_entry *entry, 744 struct ftrace_graph_ent_entry *entry,
743 struct trace_seq *s, int cpu) 745 struct trace_seq *s, int cpu, u32 flags)
744{ 746{
745 struct ftrace_graph_ent *call = &entry->graph_ent; 747 struct ftrace_graph_ent *call = &entry->graph_ent;
746 struct fgraph_data *data = iter->private; 748 struct fgraph_data *data = iter->private;
@@ -760,12 +762,12 @@ print_graph_entry_nested(struct trace_iterator *iter,
760 } 762 }
761 763
762 /* No overhead */ 764 /* No overhead */
763 ret = print_graph_overhead(-1, s); 765 ret = print_graph_overhead(-1, s, flags);
764 if (!ret) 766 if (!ret)
765 return TRACE_TYPE_PARTIAL_LINE; 767 return TRACE_TYPE_PARTIAL_LINE;
766 768
767 /* No time */ 769 /* No time */
768 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 770 if (flags & TRACE_GRAPH_PRINT_DURATION) {
769 ret = trace_seq_printf(s, " | "); 771 ret = trace_seq_printf(s, " | ");
770 if (!ret) 772 if (!ret)
771 return TRACE_TYPE_PARTIAL_LINE; 773 return TRACE_TYPE_PARTIAL_LINE;
@@ -791,7 +793,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
791 793
792static enum print_line_t 794static enum print_line_t
793print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 795print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
794 int type, unsigned long addr) 796 int type, unsigned long addr, u32 flags)
795{ 797{
796 struct fgraph_data *data = iter->private; 798 struct fgraph_data *data = iter->private;
797 struct trace_entry *ent = iter->ent; 799 struct trace_entry *ent = iter->ent;
@@ -804,27 +806,27 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
804 806
805 if (type) { 807 if (type) {
806 /* Interrupt */ 808 /* Interrupt */
807 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 809 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
808 if (ret == TRACE_TYPE_PARTIAL_LINE) 810 if (ret == TRACE_TYPE_PARTIAL_LINE)
809 return TRACE_TYPE_PARTIAL_LINE; 811 return TRACE_TYPE_PARTIAL_LINE;
810 } 812 }
811 813
812 /* Absolute time */ 814 /* Absolute time */
813 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 815 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
814 ret = print_graph_abs_time(iter->ts, s); 816 ret = print_graph_abs_time(iter->ts, s);
815 if (!ret) 817 if (!ret)
816 return TRACE_TYPE_PARTIAL_LINE; 818 return TRACE_TYPE_PARTIAL_LINE;
817 } 819 }
818 820
819 /* Cpu */ 821 /* Cpu */
820 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 822 if (flags & TRACE_GRAPH_PRINT_CPU) {
821 ret = print_graph_cpu(s, cpu); 823 ret = print_graph_cpu(s, cpu);
822 if (ret == TRACE_TYPE_PARTIAL_LINE) 824 if (ret == TRACE_TYPE_PARTIAL_LINE)
823 return TRACE_TYPE_PARTIAL_LINE; 825 return TRACE_TYPE_PARTIAL_LINE;
824 } 826 }
825 827
826 /* Proc */ 828 /* Proc */
827 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 829 if (flags & TRACE_GRAPH_PRINT_PROC) {
828 ret = print_graph_proc(s, ent->pid); 830 ret = print_graph_proc(s, ent->pid);
829 if (ret == TRACE_TYPE_PARTIAL_LINE) 831 if (ret == TRACE_TYPE_PARTIAL_LINE)
830 return TRACE_TYPE_PARTIAL_LINE; 832 return TRACE_TYPE_PARTIAL_LINE;
@@ -846,7 +848,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
846 848
847static enum print_line_t 849static enum print_line_t
848print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 850print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
849 struct trace_iterator *iter) 851 struct trace_iterator *iter, u32 flags)
850{ 852{
851 struct fgraph_data *data = iter->private; 853 struct fgraph_data *data = iter->private;
852 struct ftrace_graph_ent *call = &field->graph_ent; 854 struct ftrace_graph_ent *call = &field->graph_ent;
@@ -854,14 +856,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
854 static enum print_line_t ret; 856 static enum print_line_t ret;
855 int cpu = iter->cpu; 857 int cpu = iter->cpu;
856 858
857 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 859 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
858 return TRACE_TYPE_PARTIAL_LINE; 860 return TRACE_TYPE_PARTIAL_LINE;
859 861
860 leaf_ret = get_return_for_leaf(iter, field); 862 leaf_ret = get_return_for_leaf(iter, field);
861 if (leaf_ret) 863 if (leaf_ret)
862 ret = print_graph_entry_leaf(iter, field, leaf_ret, s); 864 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
863 else 865 else
864 ret = print_graph_entry_nested(iter, field, s, cpu); 866 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
865 867
866 if (data) { 868 if (data) {
867 /* 869 /*
@@ -880,7 +882,8 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
880 882
881static enum print_line_t 883static enum print_line_t
882print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 884print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
883 struct trace_entry *ent, struct trace_iterator *iter) 885 struct trace_entry *ent, struct trace_iterator *iter,
886 u32 flags)
884{ 887{
885 unsigned long long duration = trace->rettime - trace->calltime; 888 unsigned long long duration = trace->rettime - trace->calltime;
886 struct fgraph_data *data = iter->private; 889 struct fgraph_data *data = iter->private;
@@ -910,16 +913,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
910 } 913 }
911 } 914 }
912 915
913 if (print_graph_prologue(iter, s, 0, 0)) 916 if (print_graph_prologue(iter, s, 0, 0, flags))
914 return TRACE_TYPE_PARTIAL_LINE; 917 return TRACE_TYPE_PARTIAL_LINE;
915 918
916 /* Overhead */ 919 /* Overhead */
917 ret = print_graph_overhead(duration, s); 920 ret = print_graph_overhead(duration, s, flags);
918 if (!ret) 921 if (!ret)
919 return TRACE_TYPE_PARTIAL_LINE; 922 return TRACE_TYPE_PARTIAL_LINE;
920 923
921 /* Duration */ 924 /* Duration */
922 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 925 if (flags & TRACE_GRAPH_PRINT_DURATION) {
923 ret = print_graph_duration(duration, s); 926 ret = print_graph_duration(duration, s);
924 if (ret == TRACE_TYPE_PARTIAL_LINE) 927 if (ret == TRACE_TYPE_PARTIAL_LINE)
925 return TRACE_TYPE_PARTIAL_LINE; 928 return TRACE_TYPE_PARTIAL_LINE;
@@ -949,14 +952,15 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
949 } 952 }
950 953
951 /* Overrun */ 954 /* Overrun */
952 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 955 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
953 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 956 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
954 trace->overrun); 957 trace->overrun);
955 if (!ret) 958 if (!ret)
956 return TRACE_TYPE_PARTIAL_LINE; 959 return TRACE_TYPE_PARTIAL_LINE;
957 } 960 }
958 961
959 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 962 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
963 cpu, pid, flags);
960 if (ret == TRACE_TYPE_PARTIAL_LINE) 964 if (ret == TRACE_TYPE_PARTIAL_LINE)
961 return TRACE_TYPE_PARTIAL_LINE; 965 return TRACE_TYPE_PARTIAL_LINE;
962 966
@@ -964,8 +968,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
964} 968}
965 969
966static enum print_line_t 970static enum print_line_t
967print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 971print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
968 struct trace_iterator *iter) 972 struct trace_iterator *iter, u32 flags)
969{ 973{
970 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 974 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
971 struct fgraph_data *data = iter->private; 975 struct fgraph_data *data = iter->private;
@@ -977,16 +981,16 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
977 if (data) 981 if (data)
978 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 982 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
979 983
980 if (print_graph_prologue(iter, s, 0, 0)) 984 if (print_graph_prologue(iter, s, 0, 0, flags))
981 return TRACE_TYPE_PARTIAL_LINE; 985 return TRACE_TYPE_PARTIAL_LINE;
982 986
983 /* No overhead */ 987 /* No overhead */
984 ret = print_graph_overhead(-1, s); 988 ret = print_graph_overhead(-1, s, flags);
985 if (!ret) 989 if (!ret)
986 return TRACE_TYPE_PARTIAL_LINE; 990 return TRACE_TYPE_PARTIAL_LINE;
987 991
988 /* No time */ 992 /* No time */
989 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 993 if (flags & TRACE_GRAPH_PRINT_DURATION) {
990 ret = trace_seq_printf(s, " | "); 994 ret = trace_seq_printf(s, " | ");
991 if (!ret) 995 if (!ret)
992 return TRACE_TYPE_PARTIAL_LINE; 996 return TRACE_TYPE_PARTIAL_LINE;
@@ -1041,7 +1045,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1041 1045
1042 1046
1043enum print_line_t 1047enum print_line_t
1044print_graph_function(struct trace_iterator *iter) 1048print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1045{ 1049{
1046 struct ftrace_graph_ent_entry *field; 1050 struct ftrace_graph_ent_entry *field;
1047 struct fgraph_data *data = iter->private; 1051 struct fgraph_data *data = iter->private;
@@ -1062,7 +1066,7 @@ print_graph_function(struct trace_iterator *iter)
1062 if (data && data->failed) { 1066 if (data && data->failed) {
1063 field = &data->ent; 1067 field = &data->ent;
1064 iter->cpu = data->cpu; 1068 iter->cpu = data->cpu;
1065 ret = print_graph_entry(field, s, iter); 1069 ret = print_graph_entry(field, s, iter, flags);
1066 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1070 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1067 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1071 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1068 ret = TRACE_TYPE_NO_CONSUME; 1072 ret = TRACE_TYPE_NO_CONSUME;
@@ -1082,32 +1086,49 @@ print_graph_function(struct trace_iterator *iter)
1082 struct ftrace_graph_ent_entry saved; 1086 struct ftrace_graph_ent_entry saved;
1083 trace_assign_type(field, entry); 1087 trace_assign_type(field, entry);
1084 saved = *field; 1088 saved = *field;
1085 return print_graph_entry(&saved, s, iter); 1089 return print_graph_entry(&saved, s, iter, flags);
1086 } 1090 }
1087 case TRACE_GRAPH_RET: { 1091 case TRACE_GRAPH_RET: {
1088 struct ftrace_graph_ret_entry *field; 1092 struct ftrace_graph_ret_entry *field;
1089 trace_assign_type(field, entry); 1093 trace_assign_type(field, entry);
1090 return print_graph_return(&field->ret, s, entry, iter); 1094 return print_graph_return(&field->ret, s, entry, iter, flags);
1091 } 1095 }
1096 case TRACE_STACK:
1097 case TRACE_FN:
1098 /* dont trace stack and functions as comments */
1099 return TRACE_TYPE_UNHANDLED;
1100
1092 default: 1101 default:
1093 return print_graph_comment(s, entry, iter); 1102 return print_graph_comment(s, entry, iter, flags);
1094 } 1103 }
1095 1104
1096 return TRACE_TYPE_HANDLED; 1105 return TRACE_TYPE_HANDLED;
1097} 1106}
1098 1107
1099static void print_lat_header(struct seq_file *s) 1108static enum print_line_t
1109print_graph_function(struct trace_iterator *iter)
1110{
1111 return print_graph_function_flags(iter, tracer_flags.val);
1112}
1113
1114static enum print_line_t
1115print_graph_function_event(struct trace_iterator *iter, int flags)
1116{
1117 return print_graph_function(iter);
1118}
1119
1120static void print_lat_header(struct seq_file *s, u32 flags)
1100{ 1121{
1101 static const char spaces[] = " " /* 16 spaces */ 1122 static const char spaces[] = " " /* 16 spaces */
1102 " " /* 4 spaces */ 1123 " " /* 4 spaces */
1103 " "; /* 17 spaces */ 1124 " "; /* 17 spaces */
1104 int size = 0; 1125 int size = 0;
1105 1126
1106 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1127 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1107 size += 16; 1128 size += 16;
1108 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1129 if (flags & TRACE_GRAPH_PRINT_CPU)
1109 size += 4; 1130 size += 4;
1110 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1131 if (flags & TRACE_GRAPH_PRINT_PROC)
1111 size += 17; 1132 size += 17;
1112 1133
1113 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1134 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
@@ -1118,43 +1139,48 @@ static void print_lat_header(struct seq_file *s)
1118 seq_printf(s, "#%.*s|||| / \n", size, spaces); 1139 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1119} 1140}
1120 1141
1121static void print_graph_headers(struct seq_file *s) 1142void print_graph_headers_flags(struct seq_file *s, u32 flags)
1122{ 1143{
1123 int lat = trace_flags & TRACE_ITER_LATENCY_FMT; 1144 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1124 1145
1125 if (lat) 1146 if (lat)
1126 print_lat_header(s); 1147 print_lat_header(s, flags);
1127 1148
1128 /* 1st line */ 1149 /* 1st line */
1129 seq_printf(s, "#"); 1150 seq_printf(s, "#");
1130 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1151 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1131 seq_printf(s, " TIME "); 1152 seq_printf(s, " TIME ");
1132 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1153 if (flags & TRACE_GRAPH_PRINT_CPU)
1133 seq_printf(s, " CPU"); 1154 seq_printf(s, " CPU");
1134 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1155 if (flags & TRACE_GRAPH_PRINT_PROC)
1135 seq_printf(s, " TASK/PID "); 1156 seq_printf(s, " TASK/PID ");
1136 if (lat) 1157 if (lat)
1137 seq_printf(s, "|||||"); 1158 seq_printf(s, "|||||");
1138 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 1159 if (flags & TRACE_GRAPH_PRINT_DURATION)
1139 seq_printf(s, " DURATION "); 1160 seq_printf(s, " DURATION ");
1140 seq_printf(s, " FUNCTION CALLS\n"); 1161 seq_printf(s, " FUNCTION CALLS\n");
1141 1162
1142 /* 2nd line */ 1163 /* 2nd line */
1143 seq_printf(s, "#"); 1164 seq_printf(s, "#");
1144 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 1165 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1145 seq_printf(s, " | "); 1166 seq_printf(s, " | ");
1146 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 1167 if (flags & TRACE_GRAPH_PRINT_CPU)
1147 seq_printf(s, " | "); 1168 seq_printf(s, " | ");
1148 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 1169 if (flags & TRACE_GRAPH_PRINT_PROC)
1149 seq_printf(s, " | | "); 1170 seq_printf(s, " | | ");
1150 if (lat) 1171 if (lat)
1151 seq_printf(s, "|||||"); 1172 seq_printf(s, "|||||");
1152 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 1173 if (flags & TRACE_GRAPH_PRINT_DURATION)
1153 seq_printf(s, " | | "); 1174 seq_printf(s, " | | ");
1154 seq_printf(s, " | | | |\n"); 1175 seq_printf(s, " | | | |\n");
1155} 1176}
1156 1177
1157static void graph_trace_open(struct trace_iterator *iter) 1178void print_graph_headers(struct seq_file *s)
1179{
1180 print_graph_headers_flags(s, tracer_flags.val);
1181}
1182
1183void graph_trace_open(struct trace_iterator *iter)
1158{ 1184{
1159 /* pid and depth on the last trace processed */ 1185 /* pid and depth on the last trace processed */
1160 struct fgraph_data *data; 1186 struct fgraph_data *data;
@@ -1189,7 +1215,7 @@ static void graph_trace_open(struct trace_iterator *iter)
1189 pr_warning("function graph tracer: not enough memory\n"); 1215 pr_warning("function graph tracer: not enough memory\n");
1190} 1216}
1191 1217
1192static void graph_trace_close(struct trace_iterator *iter) 1218void graph_trace_close(struct trace_iterator *iter)
1193{ 1219{
1194 struct fgraph_data *data = iter->private; 1220 struct fgraph_data *data = iter->private;
1195 1221
@@ -1199,6 +1225,16 @@ static void graph_trace_close(struct trace_iterator *iter)
1199 } 1225 }
1200} 1226}
1201 1227
1228static struct trace_event graph_trace_entry_event = {
1229 .type = TRACE_GRAPH_ENT,
1230 .trace = print_graph_function_event,
1231};
1232
1233static struct trace_event graph_trace_ret_event = {
1234 .type = TRACE_GRAPH_RET,
1235 .trace = print_graph_function_event,
1236};
1237
1202static struct tracer graph_trace __read_mostly = { 1238static struct tracer graph_trace __read_mostly = {
1203 .name = "function_graph", 1239 .name = "function_graph",
1204 .open = graph_trace_open, 1240 .open = graph_trace_open,
@@ -1220,6 +1256,16 @@ static __init int init_graph_trace(void)
1220{ 1256{
1221 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); 1257 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1222 1258
1259 if (!register_ftrace_event(&graph_trace_entry_event)) {
1260 pr_warning("Warning: could not register graph trace events\n");
1261 return 1;
1262 }
1263
1264 if (!register_ftrace_event(&graph_trace_ret_event)) {
1265 pr_warning("Warning: could not register graph trace events\n");
1266 return 1;
1267 }
1268
1223 return register_tracer(&graph_trace); 1269 return register_tracer(&graph_trace);
1224} 1270}
1225 1271
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2974bc7538c7..6fd486e0cef4 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -34,6 +34,9 @@ static int trace_type __read_mostly;
34 34
35static int save_lat_flag; 35static int save_lat_flag;
36 36
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
37#ifdef CONFIG_PREEMPT_TRACER 40#ifdef CONFIG_PREEMPT_TRACER
38static inline int 41static inline int
39preempt_trace(void) 42preempt_trace(void)
@@ -55,6 +58,23 @@ irq_trace(void)
55# define irq_trace() (0) 58# define irq_trace() (0)
56#endif 59#endif
57 60
61#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
58/* 78/*
59 * Sequence count - we record it when starting a measurement and 79 * Sequence count - we record it when starting a measurement and
60 * skip the latency if the sequence has changed - some other section 80 * skip the latency if the sequence has changed - some other section
@@ -108,6 +128,202 @@ static struct ftrace_ops trace_ops __read_mostly =
108}; 128};
109#endif /* CONFIG_FUNCTION_TRACER */ 129#endif /* CONFIG_FUNCTION_TRACER */
110 130
131#ifdef CONFIG_FUNCTION_GRAPH_TRACER
132static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
133{
134 int cpu;
135
136 if (!(bit & TRACE_DISPLAY_GRAPH))
137 return -EINVAL;
138
139 if (!(is_graph() ^ set))
140 return 0;
141
142 stop_irqsoff_tracer(irqsoff_trace, !set);
143
144 for_each_possible_cpu(cpu)
145 per_cpu(tracing_cpu, cpu) = 0;
146
147 tracing_max_latency = 0;
148 tracing_reset_online_cpus(irqsoff_trace);
149
150 return start_irqsoff_tracer(irqsoff_trace, set);
151}
152
153static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
154{
155 struct trace_array *tr = irqsoff_trace;
156 struct trace_array_cpu *data;
157 unsigned long flags;
158 long disabled;
159 int ret;
160 int cpu;
161 int pc;
162
163 cpu = raw_smp_processor_id();
164 if (likely(!per_cpu(tracing_cpu, cpu)))
165 return 0;
166
167 local_save_flags(flags);
168 /* slight chance to get a false positive on tracing_cpu */
169 if (!irqs_disabled_flags(flags))
170 return 0;
171
172 data = tr->data[cpu];
173 disabled = atomic_inc_return(&data->disabled);
174
175 if (likely(disabled == 1)) {
176 pc = preempt_count();
177 ret = __trace_graph_entry(tr, trace, flags, pc);
178 } else
179 ret = 0;
180
181 atomic_dec(&data->disabled);
182 return ret;
183}
184
185static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
186{
187 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data;
189 unsigned long flags;
190 long disabled;
191 int cpu;
192 int pc;
193
194 cpu = raw_smp_processor_id();
195 if (likely(!per_cpu(tracing_cpu, cpu)))
196 return;
197
198 local_save_flags(flags);
199 /* slight chance to get a false positive on tracing_cpu */
200 if (!irqs_disabled_flags(flags))
201 return;
202
203 data = tr->data[cpu];
204 disabled = atomic_inc_return(&data->disabled);
205
206 if (likely(disabled == 1)) {
207 pc = preempt_count();
208 __trace_graph_return(tr, trace, flags, pc);
209 }
210
211 atomic_dec(&data->disabled);
212}
213
214static void irqsoff_trace_open(struct trace_iterator *iter)
215{
216 if (is_graph())
217 graph_trace_open(iter);
218
219}
220
221static void irqsoff_trace_close(struct trace_iterator *iter)
222{
223 if (iter->private)
224 graph_trace_close(iter);
225}
226
227#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
228 TRACE_GRAPH_PRINT_PROC)
229
230static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231{
232 u32 flags = GRAPH_TRACER_FLAGS;
233
234 if (trace_flags & TRACE_ITER_LATENCY_FMT)
235 flags |= TRACE_GRAPH_PRINT_DURATION;
236 else
237 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
238
239 /*
240 * In graph mode call the graph tracer output function,
241 * otherwise go with the TRACE_FN event handler
242 */
243 if (is_graph())
244 return print_graph_function_flags(iter, flags);
245
246 return TRACE_TYPE_UNHANDLED;
247}
248
249static void irqsoff_print_header(struct seq_file *s)
250{
251 if (is_graph()) {
252 struct trace_iterator *iter = s->private;
253 u32 flags = GRAPH_TRACER_FLAGS;
254
255 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
256 /* print nothing if the buffers are empty */
257 if (trace_empty(iter))
258 return;
259
260 print_trace_header(s, iter);
261 flags |= TRACE_GRAPH_PRINT_DURATION;
262 } else
263 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
264
265 print_graph_headers_flags(s, flags);
266 } else
267 trace_default_header(s);
268}
269
270static void
271trace_graph_function(struct trace_array *tr,
272 unsigned long ip, unsigned long flags, int pc)
273{
274 u64 time = trace_clock_local();
275 struct ftrace_graph_ent ent = {
276 .func = ip,
277 .depth = 0,
278 };
279 struct ftrace_graph_ret ret = {
280 .func = ip,
281 .depth = 0,
282 .calltime = time,
283 .rettime = time,
284 };
285
286 __trace_graph_entry(tr, &ent, flags, pc);
287 __trace_graph_return(tr, &ret, flags, pc);
288}
289
290static void
291__trace_function(struct trace_array *tr,
292 unsigned long ip, unsigned long parent_ip,
293 unsigned long flags, int pc)
294{
295 if (!is_graph())
296 trace_function(tr, ip, parent_ip, flags, pc);
297 else {
298 trace_graph_function(tr, parent_ip, flags, pc);
299 trace_graph_function(tr, ip, flags, pc);
300 }
301}
302
303#else
304#define __trace_function trace_function
305
306static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
307{
308 return -EINVAL;
309}
310
311static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
312{
313 return -1;
314}
315
316static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
317{
318 return TRACE_TYPE_UNHANDLED;
319}
320
321static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
322static void irqsoff_print_header(struct seq_file *s) { }
323static void irqsoff_trace_open(struct trace_iterator *iter) { }
324static void irqsoff_trace_close(struct trace_iterator *iter) { }
325#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
326
111/* 327/*
112 * Should this new latency be reported/recorded? 328 * Should this new latency be reported/recorded?
113 */ 329 */
@@ -150,7 +366,7 @@ check_critical_timing(struct trace_array *tr,
150 if (!report_latency(delta)) 366 if (!report_latency(delta))
151 goto out_unlock; 367 goto out_unlock;
152 368
153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 369 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
154 /* Skip 5 functions to get to the irq/preempt enable function */ 370 /* Skip 5 functions to get to the irq/preempt enable function */
155 __trace_stack(tr, flags, 5, pc); 371 __trace_stack(tr, flags, 5, pc);
156 372
@@ -172,7 +388,7 @@ out_unlock:
172out: 388out:
173 data->critical_sequence = max_sequence; 389 data->critical_sequence = max_sequence;
174 data->preempt_timestamp = ftrace_now(cpu); 390 data->preempt_timestamp = ftrace_now(cpu);
175 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 391 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
176} 392}
177 393
178static inline void 394static inline void
@@ -204,7 +420,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
204 420
205 local_save_flags(flags); 421 local_save_flags(flags);
206 422
207 trace_function(tr, ip, parent_ip, flags, preempt_count()); 423 __trace_function(tr, ip, parent_ip, flags, preempt_count());
208 424
209 per_cpu(tracing_cpu, cpu) = 1; 425 per_cpu(tracing_cpu, cpu) = 1;
210 426
@@ -238,7 +454,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
238 atomic_inc(&data->disabled); 454 atomic_inc(&data->disabled);
239 455
240 local_save_flags(flags); 456 local_save_flags(flags);
241 trace_function(tr, ip, parent_ip, flags, preempt_count()); 457 __trace_function(tr, ip, parent_ip, flags, preempt_count());
242 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 458 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
243 data->critical_start = 0; 459 data->critical_start = 0;
244 atomic_dec(&data->disabled); 460 atomic_dec(&data->disabled);
@@ -347,19 +563,32 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
347} 563}
348#endif /* CONFIG_PREEMPT_TRACER */ 564#endif /* CONFIG_PREEMPT_TRACER */
349 565
350static void start_irqsoff_tracer(struct trace_array *tr) 566static int start_irqsoff_tracer(struct trace_array *tr, int graph)
351{ 567{
352 register_ftrace_function(&trace_ops); 568 int ret = 0;
353 if (tracing_is_enabled()) 569
570 if (!graph)
571 ret = register_ftrace_function(&trace_ops);
572 else
573 ret = register_ftrace_graph(&irqsoff_graph_return,
574 &irqsoff_graph_entry);
575
576 if (!ret && tracing_is_enabled())
354 tracer_enabled = 1; 577 tracer_enabled = 1;
355 else 578 else
356 tracer_enabled = 0; 579 tracer_enabled = 0;
580
581 return ret;
357} 582}
358 583
359static void stop_irqsoff_tracer(struct trace_array *tr) 584static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
360{ 585{
361 tracer_enabled = 0; 586 tracer_enabled = 0;
362 unregister_ftrace_function(&trace_ops); 587
588 if (!graph)
589 unregister_ftrace_function(&trace_ops);
590 else
591 unregister_ftrace_graph();
363} 592}
364 593
365static void __irqsoff_tracer_init(struct trace_array *tr) 594static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -372,12 +601,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
372 /* make sure that the tracer is visible */ 601 /* make sure that the tracer is visible */
373 smp_wmb(); 602 smp_wmb();
374 tracing_reset_online_cpus(tr); 603 tracing_reset_online_cpus(tr);
375 start_irqsoff_tracer(tr); 604
605 if (start_irqsoff_tracer(tr, is_graph()))
606 printk(KERN_ERR "failed to start irqsoff tracer\n");
376} 607}
377 608
378static void irqsoff_tracer_reset(struct trace_array *tr) 609static void irqsoff_tracer_reset(struct trace_array *tr)
379{ 610{
380 stop_irqsoff_tracer(tr); 611 stop_irqsoff_tracer(tr, is_graph());
381 612
382 if (!save_lat_flag) 613 if (!save_lat_flag)
383 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 614 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
@@ -409,9 +640,15 @@ static struct tracer irqsoff_tracer __read_mostly =
409 .start = irqsoff_tracer_start, 640 .start = irqsoff_tracer_start,
410 .stop = irqsoff_tracer_stop, 641 .stop = irqsoff_tracer_stop,
411 .print_max = 1, 642 .print_max = 1,
643 .print_header = irqsoff_print_header,
644 .print_line = irqsoff_print_line,
645 .flags = &tracer_flags,
646 .set_flag = irqsoff_set_flag,
412#ifdef CONFIG_FTRACE_SELFTEST 647#ifdef CONFIG_FTRACE_SELFTEST
413 .selftest = trace_selftest_startup_irqsoff, 648 .selftest = trace_selftest_startup_irqsoff,
414#endif 649#endif
650 .open = irqsoff_trace_open,
651 .close = irqsoff_trace_close,
415}; 652};
416# define register_irqsoff(trace) register_tracer(&trace) 653# define register_irqsoff(trace) register_tracer(&trace)
417#else 654#else
@@ -435,9 +672,15 @@ static struct tracer preemptoff_tracer __read_mostly =
435 .start = irqsoff_tracer_start, 672 .start = irqsoff_tracer_start,
436 .stop = irqsoff_tracer_stop, 673 .stop = irqsoff_tracer_stop,
437 .print_max = 1, 674 .print_max = 1,
675 .print_header = irqsoff_print_header,
676 .print_line = irqsoff_print_line,
677 .flags = &tracer_flags,
678 .set_flag = irqsoff_set_flag,
438#ifdef CONFIG_FTRACE_SELFTEST 679#ifdef CONFIG_FTRACE_SELFTEST
439 .selftest = trace_selftest_startup_preemptoff, 680 .selftest = trace_selftest_startup_preemptoff,
440#endif 681#endif
682 .open = irqsoff_trace_open,
683 .close = irqsoff_trace_close,
441}; 684};
442# define register_preemptoff(trace) register_tracer(&trace) 685# define register_preemptoff(trace) register_tracer(&trace)
443#else 686#else
@@ -463,9 +706,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
463 .start = irqsoff_tracer_start, 706 .start = irqsoff_tracer_start,
464 .stop = irqsoff_tracer_stop, 707 .stop = irqsoff_tracer_stop,
465 .print_max = 1, 708 .print_max = 1,
709 .print_header = irqsoff_print_header,
710 .print_line = irqsoff_print_line,
711 .flags = &tracer_flags,
712 .set_flag = irqsoff_set_flag,
466#ifdef CONFIG_FTRACE_SELFTEST 713#ifdef CONFIG_FTRACE_SELFTEST
467 .selftest = trace_selftest_startup_preemptirqsoff, 714 .selftest = trace_selftest_startup_preemptirqsoff,
468#endif 715#endif
716 .open = irqsoff_trace_open,
717 .close = irqsoff_trace_close,
469}; 718};
470 719
471# define register_preemptirqsoff(trace) register_tracer(&trace) 720# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8e46b3323cdc..2404c129a8c9 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -253,7 +253,7 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len)
253 void *ret; 253 void *ret;
254 254
255 if (s->full) 255 if (s->full)
256 return 0; 256 return NULL;
257 257
258 if (len > ((PAGE_SIZE - 1) - s->len)) { 258 if (len > ((PAGE_SIZE - 1) - s->len)) {
259 s->full = 1; 259 s->full = 1;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 71fa771ee4d7..250e7f9bd2f0 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -255,7 +255,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
255/* Maximum number of functions to trace before diagnosing a hang */ 255/* Maximum number of functions to trace before diagnosing a hang */
256#define GRAPH_MAX_FUNC_TEST 100000000 256#define GRAPH_MAX_FUNC_TEST 100000000
257 257
258static void __ftrace_dump(bool disable_tracing); 258static void
259__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
259static unsigned int graph_hang_thresh; 260static unsigned int graph_hang_thresh;
260 261
261/* Wrap the real function entry probe to avoid possible hanging */ 262/* Wrap the real function entry probe to avoid possible hanging */
@@ -266,7 +267,7 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
266 ftrace_graph_stop(); 267 ftrace_graph_stop();
267 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 268 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
268 if (ftrace_dump_on_oops) 269 if (ftrace_dump_on_oops)
269 __ftrace_dump(false); 270 __ftrace_dump(false, DUMP_ALL);
270 return 0; 271 return 0;
271 } 272 }
272 273