diff options
52 files changed, 1610 insertions, 368 deletions
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt new file mode 100644 index 000000000000..abdee664c0f6 --- /dev/null +++ b/Documentation/trace/events.txt | |||
@@ -0,0 +1,135 @@ | |||
1 | Event Tracing | ||
2 | |||
3 | Documentation written by Theodore Ts'o | ||
4 | |||
5 | Introduction | ||
6 | ============ | ||
7 | |||
8 | Tracepoints (see Documentation/trace/tracepoints.txt) can be used | ||
9 | without creating custom kernel modules to register probe functions | ||
10 | using the event tracing infrastructure. | ||
11 | |||
12 | Not all tracepoints can be traced using the event tracing system; | ||
13 | the kernel developer must provide code snippets which define how the | ||
14 | tracing information is saved into the tracing buffer, and how the | ||
15 | the tracing information should be printed. | ||
16 | |||
17 | Using Event Tracing | ||
18 | =================== | ||
19 | |||
20 | The events which are available for tracing can be found in the file | ||
21 | /sys/kernel/debug/tracing/available_events. | ||
22 | |||
23 | To enable a particular event, such as 'sched_wakeup', simply echo it | ||
24 | to /sys/debug/tracing/set_event. For example: | ||
25 | |||
26 | # echo sched_wakeup > /sys/kernel/debug/tracing/set_event | ||
27 | |||
28 | [ Note: events can also be enabled/disabled via the 'enabled' toggle | ||
29 | found in the /sys/kernel/tracing/events/ hierarchy of directories. ] | ||
30 | |||
31 | To disable an event, echo the event name to the set_event file prefixed | ||
32 | with an exclamation point: | ||
33 | |||
34 | # echo '!sched_wakeup' >> /sys/kernel/debug/tracing/set_event | ||
35 | |||
36 | To disable events, echo an empty line to the set_event file: | ||
37 | |||
38 | # echo > /sys/kernel/debug/tracing/set_event | ||
39 | |||
40 | The events are organized into subsystems, such as ext4, irq, sched, | ||
41 | etc., and a full event name looks like this: <subsystem>:<event>. The | ||
42 | subsystem name is optional, but it is displayed in the available_events | ||
43 | file. All of the events in a subsystem can be specified via the syntax | ||
44 | "<subsystem>:*"; for example, to enable all irq events, you can use the | ||
45 | command: | ||
46 | |||
47 | # echo 'irq:*' > /sys/kernel/debug/tracing/set_event | ||
48 | |||
49 | Defining an event-enabled tracepoint | ||
50 | ------------------------------------ | ||
51 | |||
52 | A kernel developer which wishes to define an event-enabled tracepoint | ||
53 | must declare the tracepoint using TRACE_EVENT instead of DECLARE_TRACE. | ||
54 | This is done via two header files in include/trace. For example, to | ||
55 | event-enable the jbd2 subsystem, we must create two files, | ||
56 | include/trace/jbd2.h and include/trace/jbd2_event_types.h. The | ||
57 | include/trace/jbd2.h file should be included by kernel source files that | ||
58 | will have a tracepoint inserted, and might look like this: | ||
59 | |||
60 | #ifndef _TRACE_JBD2_H | ||
61 | #define _TRACE_JBD2_H | ||
62 | |||
63 | #include <linux/jbd2.h> | ||
64 | #include <linux/tracepoint.h> | ||
65 | |||
66 | #include <trace/jbd2_event_types.h> | ||
67 | |||
68 | #endif | ||
69 | |||
70 | In a file that utilizes a jbd2 tracepoint, this header file would be | ||
71 | included. Note that you still have to use DEFINE_TRACE(). So for | ||
72 | example, if fs/jbd2/commit.c planned to use the jbd2_start_commit | ||
73 | tracepoint, it would have the following near the beginning of the file: | ||
74 | |||
75 | #include <trace/jbd2.h> | ||
76 | |||
77 | DEFINE_TRACE(jbd2_start_commit); | ||
78 | |||
79 | Then in the function that would call the tracepoint, it would call the | ||
80 | tracepoint function. (For more information, please see the tracepoint | ||
81 | documentation in Documentation/trace/tracepoints.txt): | ||
82 | |||
83 | trace_jbd2_start_commit(journal, commit_transaction); | ||
84 | |||
85 | The code snippets which allow jbd2_start_commit to be an event-enabled | ||
86 | tracepoint are placed in the file include/trace/jbd2_event_types.h: | ||
87 | |||
88 | /* use <trace/jbd2.h> instead */ | ||
89 | #ifndef TRACE_EVENT | ||
90 | # error Do not include this file directly. | ||
91 | # error Unless you know what you are doing. | ||
92 | #endif | ||
93 | |||
94 | #undef TRACE_SYSTEM | ||
95 | #define TRACE_SYSTEM jbd2 | ||
96 | |||
97 | #include <linux/jbd2.h> | ||
98 | |||
99 | TRACE_EVENT(jbd2_start_commit, | ||
100 | TP_PROTO(journal_t *journal, transaction_t *commit_transaction), | ||
101 | TP_ARGS(journal, commit_transaction), | ||
102 | TP_STRUCT__entry( | ||
103 | __array( char, devname, BDEVNAME_SIZE+24 ) | ||
104 | __field( int, transaction ) | ||
105 | ), | ||
106 | TP_fast_assign( | ||
107 | memcpy(__entry->devname, journal->j_devname, BDEVNAME_SIZE+24); | ||
108 | __entry->transaction = commit_transaction->t_tid; | ||
109 | ), | ||
110 | TP_printk("dev %s transaction %d", | ||
111 | __entry->devname, __entry->transaction) | ||
112 | ); | ||
113 | |||
114 | The TP_PROTO and TP_ARGS are unchanged from DECLARE_TRACE. The new | ||
115 | arguments to TRACE_EVENT are TP_STRUCT__entry, TP_fast_assign, and | ||
116 | TP_printk. | ||
117 | |||
118 | TP_STRUCT__entry defines the data structure which will be stored in the | ||
119 | trace buffer. Normally, fields in __entry will be arrays or simple | ||
120 | types. It is possible to place data structures in __entry --- however, | ||
121 | pointers in the data structure can not be trusted, since they will be | ||
122 | accessed sometime later by TP_printk, and if the data structure contains | ||
123 | fields that will not or cannot be used by TP_printk, this will waste | ||
124 | space in the trace buffer. In general, data structures should be | ||
125 | avoided, unless they do only contain non-pointer types and all of the | ||
126 | fields will be used by TP_printk. | ||
127 | |||
128 | TP_fast_assign defines the code snippet which saves information into the | ||
129 | __entry data structure, using the passed-in arguments defined in | ||
130 | TP_PROTO and TP_ARGS. | ||
131 | |||
132 | Finally, TP_printk will print the __entry data structure. At the time | ||
133 | when the code snippet defined by TP_printk is executed, it will not have | ||
134 | access to the TP_ARGS arguments; it can only use the information saved | ||
135 | in the __entry data structure. | ||
diff --git a/Documentation/trace/power.txt b/Documentation/trace/power.txt new file mode 100644 index 000000000000..cd805e16dc27 --- /dev/null +++ b/Documentation/trace/power.txt | |||
@@ -0,0 +1,17 @@ | |||
1 | The power tracer collects detailed information about C-state and P-state | ||
2 | transitions, instead of just looking at the high-level "average" | ||
3 | information. | ||
4 | |||
5 | There is a helper script found in scrips/tracing/power.pl in the kernel | ||
6 | sources which can be used to parse this information and create a | ||
7 | Scalable Vector Graphics (SVG) picture from the trace data. | ||
8 | |||
9 | To use this tracer: | ||
10 | |||
11 | echo 0 > /sys/kernel/debug/tracing/tracing_enabled | ||
12 | echo power > /sys/kernel/debug/tracing/current_tracer | ||
13 | echo 1 > /sys/kernel/debug/tracing/tracing_enabled | ||
14 | sleep 1 | ||
15 | echo 0 > /sys/kernel/debug/tracing/tracing_enabled | ||
16 | cat /sys/kernel/debug/tracing/trace | \ | ||
17 | perl scripts/tracing/power.pl > out.sv | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a331ec38af9e..1ac99865591c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -147,27 +147,14 @@ END(ftrace_graph_caller) | |||
147 | GLOBAL(return_to_handler) | 147 | GLOBAL(return_to_handler) |
148 | subq $80, %rsp | 148 | subq $80, %rsp |
149 | 149 | ||
150 | /* Save the return values */ | ||
150 | movq %rax, (%rsp) | 151 | movq %rax, (%rsp) |
151 | movq %rcx, 8(%rsp) | 152 | movq %rdx, 8(%rsp) |
152 | movq %rdx, 16(%rsp) | ||
153 | movq %rsi, 24(%rsp) | ||
154 | movq %rdi, 32(%rsp) | ||
155 | movq %r8, 40(%rsp) | ||
156 | movq %r9, 48(%rsp) | ||
157 | movq %r10, 56(%rsp) | ||
158 | movq %r11, 64(%rsp) | ||
159 | 153 | ||
160 | call ftrace_return_to_handler | 154 | call ftrace_return_to_handler |
161 | 155 | ||
162 | movq %rax, 72(%rsp) | 156 | movq %rax, 72(%rsp) |
163 | movq 64(%rsp), %r11 | 157 | movq 8(%rsp), %rdx |
164 | movq 56(%rsp), %r10 | ||
165 | movq 48(%rsp), %r9 | ||
166 | movq 40(%rsp), %r8 | ||
167 | movq 32(%rsp), %rdi | ||
168 | movq 24(%rsp), %rsi | ||
169 | movq 16(%rsp), %rdx | ||
170 | movq 8(%rsp), %rcx | ||
171 | movq (%rsp), %rax | 158 | movq (%rsp), %rax |
172 | addq $72, %rsp | 159 | addq $72, %rsp |
173 | retq | 160 | retq |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7fa660fd449c..7e9b1e9f711c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -61,7 +61,7 @@ | |||
61 | #define BRANCH_PROFILE() | 61 | #define BRANCH_PROFILE() |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef CONFIG_EVENT_TRACER | 64 | #ifdef CONFIG_EVENT_TRACING |
65 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | 65 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ |
66 | *(_ftrace_events) \ | 66 | *(_ftrace_events) \ |
67 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; | 67 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8a0c2f221e6b..53869bef6102 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -368,6 +368,7 @@ struct ftrace_ret_stack { | |||
368 | unsigned long ret; | 368 | unsigned long ret; |
369 | unsigned long func; | 369 | unsigned long func; |
370 | unsigned long long calltime; | 370 | unsigned long long calltime; |
371 | unsigned long long subtime; | ||
371 | }; | 372 | }; |
372 | 373 | ||
373 | /* | 374 | /* |
@@ -379,8 +380,6 @@ extern void return_to_handler(void); | |||
379 | 380 | ||
380 | extern int | 381 | extern int |
381 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | 382 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
382 | extern void | ||
383 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
384 | 383 | ||
385 | /* | 384 | /* |
386 | * Sometimes we don't want to trace a function with the function | 385 | * Sometimes we don't want to trace a function with the function |
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 000000000000..15c45a27a925 --- /dev/null +++ b/include/linux/kmemtrace.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <trace/kmem.h> | ||
13 | |||
14 | #ifdef CONFIG_KMEMTRACE | ||
15 | extern void kmemtrace_init(void); | ||
16 | #else | ||
17 | static inline void kmemtrace_init(void) | ||
18 | { | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #endif /* __KERNEL__ */ | ||
23 | |||
24 | #endif /* _LINUX_KMEMTRACE_H */ | ||
25 | |||
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e1b7b2173885..f0aa486d131c 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -68,9 +68,38 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
68 | return event->time_delta; | 68 | return event->time_delta; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* | ||
72 | * ring_buffer_event_discard can discard any event in the ring buffer. | ||
73 | * it is up to the caller to protect against a reader from | ||
74 | * consuming it or a writer from wrapping and replacing it. | ||
75 | * | ||
76 | * No external protection is needed if this is called before | ||
77 | * the event is commited. But in that case it would be better to | ||
78 | * use ring_buffer_discard_commit. | ||
79 | * | ||
80 | * Note, if an event that has not been committed is discarded | ||
81 | * with ring_buffer_event_discard, it must still be committed. | ||
82 | */ | ||
71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | 83 | void ring_buffer_event_discard(struct ring_buffer_event *event); |
72 | 84 | ||
73 | /* | 85 | /* |
86 | * ring_buffer_discard_commit will remove an event that has not | ||
87 | * ben committed yet. If this is used, then ring_buffer_unlock_commit | ||
88 | * must not be called on the discarded event. This function | ||
89 | * will try to remove the event from the ring buffer completely | ||
90 | * if another event has not been written after it. | ||
91 | * | ||
92 | * Example use: | ||
93 | * | ||
94 | * if (some_condition) | ||
95 | * ring_buffer_discard_commit(buffer, event); | ||
96 | * else | ||
97 | * ring_buffer_unlock_commit(buffer, event); | ||
98 | */ | ||
99 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | ||
100 | struct ring_buffer_event *event); | ||
101 | |||
102 | /* | ||
74 | * size is in bytes for each per CPU buffer. | 103 | * size is in bytes for each per CPU buffer. |
75 | */ | 104 | */ |
76 | struct ring_buffer * | 105 | struct ring_buffer * |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 5ac9b0bcaf9a..713f841ecaa9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <trace/kmemtrace.h> | 17 | #include <linux/kmemtrace.h> |
18 | 18 | ||
19 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
20 | struct cache_sizes { | 20 | struct cache_sizes { |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5046f90c1171..be5d40c43bd2 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | 14 | ||
15 | enum stat_item { | 15 | enum stat_item { |
16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
diff --git a/include/trace/kmem.h b/include/trace/kmem.h new file mode 100644 index 000000000000..46efc2423f03 --- /dev/null +++ b/include/trace/kmem.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _TRACE_KMEM_H | ||
2 | #define _TRACE_KMEM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/kmem_event_types.h> | ||
8 | |||
9 | #endif /* _TRACE_KMEM_H */ | ||
diff --git a/include/trace/kmem_event_types.h b/include/trace/kmem_event_types.h new file mode 100644 index 000000000000..4ff420fe4675 --- /dev/null +++ b/include/trace/kmem_event_types.h | |||
@@ -0,0 +1,193 @@ | |||
1 | |||
2 | /* use <trace/kmem.h> instead */ | ||
3 | #ifndef TRACE_EVENT | ||
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM kmem | ||
10 | |||
11 | TRACE_EVENT(kmalloc, | ||
12 | |||
13 | TP_PROTO(unsigned long call_site, | ||
14 | const void *ptr, | ||
15 | size_t bytes_req, | ||
16 | size_t bytes_alloc, | ||
17 | gfp_t gfp_flags), | ||
18 | |||
19 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
20 | |||
21 | TP_STRUCT__entry( | ||
22 | __field( unsigned long, call_site ) | ||
23 | __field( const void *, ptr ) | ||
24 | __field( size_t, bytes_req ) | ||
25 | __field( size_t, bytes_alloc ) | ||
26 | __field( gfp_t, gfp_flags ) | ||
27 | ), | ||
28 | |||
29 | TP_fast_assign( | ||
30 | __entry->call_site = call_site; | ||
31 | __entry->ptr = ptr; | ||
32 | __entry->bytes_req = bytes_req; | ||
33 | __entry->bytes_alloc = bytes_alloc; | ||
34 | __entry->gfp_flags = gfp_flags; | ||
35 | ), | ||
36 | |||
37 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x", | ||
38 | __entry->call_site, | ||
39 | __entry->ptr, | ||
40 | __entry->bytes_req, | ||
41 | __entry->bytes_alloc, | ||
42 | __entry->gfp_flags) | ||
43 | ); | ||
44 | |||
45 | TRACE_EVENT(kmem_cache_alloc, | ||
46 | |||
47 | TP_PROTO(unsigned long call_site, | ||
48 | const void *ptr, | ||
49 | size_t bytes_req, | ||
50 | size_t bytes_alloc, | ||
51 | gfp_t gfp_flags), | ||
52 | |||
53 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
54 | |||
55 | TP_STRUCT__entry( | ||
56 | __field( unsigned long, call_site ) | ||
57 | __field( const void *, ptr ) | ||
58 | __field( size_t, bytes_req ) | ||
59 | __field( size_t, bytes_alloc ) | ||
60 | __field( gfp_t, gfp_flags ) | ||
61 | ), | ||
62 | |||
63 | TP_fast_assign( | ||
64 | __entry->call_site = call_site; | ||
65 | __entry->ptr = ptr; | ||
66 | __entry->bytes_req = bytes_req; | ||
67 | __entry->bytes_alloc = bytes_alloc; | ||
68 | __entry->gfp_flags = gfp_flags; | ||
69 | ), | ||
70 | |||
71 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x", | ||
72 | __entry->call_site, | ||
73 | __entry->ptr, | ||
74 | __entry->bytes_req, | ||
75 | __entry->bytes_alloc, | ||
76 | __entry->gfp_flags) | ||
77 | ); | ||
78 | |||
79 | TRACE_EVENT(kmalloc_node, | ||
80 | |||
81 | TP_PROTO(unsigned long call_site, | ||
82 | const void *ptr, | ||
83 | size_t bytes_req, | ||
84 | size_t bytes_alloc, | ||
85 | gfp_t gfp_flags, | ||
86 | int node), | ||
87 | |||
88 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
89 | |||
90 | TP_STRUCT__entry( | ||
91 | __field( unsigned long, call_site ) | ||
92 | __field( const void *, ptr ) | ||
93 | __field( size_t, bytes_req ) | ||
94 | __field( size_t, bytes_alloc ) | ||
95 | __field( gfp_t, gfp_flags ) | ||
96 | __field( int, node ) | ||
97 | ), | ||
98 | |||
99 | TP_fast_assign( | ||
100 | __entry->call_site = call_site; | ||
101 | __entry->ptr = ptr; | ||
102 | __entry->bytes_req = bytes_req; | ||
103 | __entry->bytes_alloc = bytes_alloc; | ||
104 | __entry->gfp_flags = gfp_flags; | ||
105 | __entry->node = node; | ||
106 | ), | ||
107 | |||
108 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x node=%d", | ||
109 | __entry->call_site, | ||
110 | __entry->ptr, | ||
111 | __entry->bytes_req, | ||
112 | __entry->bytes_alloc, | ||
113 | __entry->gfp_flags, | ||
114 | __entry->node) | ||
115 | ); | ||
116 | |||
117 | TRACE_EVENT(kmem_cache_alloc_node, | ||
118 | |||
119 | TP_PROTO(unsigned long call_site, | ||
120 | const void *ptr, | ||
121 | size_t bytes_req, | ||
122 | size_t bytes_alloc, | ||
123 | gfp_t gfp_flags, | ||
124 | int node), | ||
125 | |||
126 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
127 | |||
128 | TP_STRUCT__entry( | ||
129 | __field( unsigned long, call_site ) | ||
130 | __field( const void *, ptr ) | ||
131 | __field( size_t, bytes_req ) | ||
132 | __field( size_t, bytes_alloc ) | ||
133 | __field( gfp_t, gfp_flags ) | ||
134 | __field( int, node ) | ||
135 | ), | ||
136 | |||
137 | TP_fast_assign( | ||
138 | __entry->call_site = call_site; | ||
139 | __entry->ptr = ptr; | ||
140 | __entry->bytes_req = bytes_req; | ||
141 | __entry->bytes_alloc = bytes_alloc; | ||
142 | __entry->gfp_flags = gfp_flags; | ||
143 | __entry->node = node; | ||
144 | ), | ||
145 | |||
146 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x node=%d", | ||
147 | __entry->call_site, | ||
148 | __entry->ptr, | ||
149 | __entry->bytes_req, | ||
150 | __entry->bytes_alloc, | ||
151 | __entry->gfp_flags, | ||
152 | __entry->node) | ||
153 | ); | ||
154 | |||
155 | TRACE_EVENT(kfree, | ||
156 | |||
157 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
158 | |||
159 | TP_ARGS(call_site, ptr), | ||
160 | |||
161 | TP_STRUCT__entry( | ||
162 | __field( unsigned long, call_site ) | ||
163 | __field( const void *, ptr ) | ||
164 | ), | ||
165 | |||
166 | TP_fast_assign( | ||
167 | __entry->call_site = call_site; | ||
168 | __entry->ptr = ptr; | ||
169 | ), | ||
170 | |||
171 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
172 | ); | ||
173 | |||
174 | TRACE_EVENT(kmem_cache_free, | ||
175 | |||
176 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
177 | |||
178 | TP_ARGS(call_site, ptr), | ||
179 | |||
180 | TP_STRUCT__entry( | ||
181 | __field( unsigned long, call_site ) | ||
182 | __field( const void *, ptr ) | ||
183 | ), | ||
184 | |||
185 | TP_fast_assign( | ||
186 | __entry->call_site = call_site; | ||
187 | __entry->ptr = ptr; | ||
188 | ), | ||
189 | |||
190 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
191 | ); | ||
192 | |||
193 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h deleted file mode 100644 index 28ee69f9cd46..000000000000 --- a/include/trace/kmemtrace.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/tracepoint.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | #ifdef CONFIG_KMEMTRACE | ||
16 | extern void kmemtrace_init(void); | ||
17 | #else | ||
18 | static inline void kmemtrace_init(void) | ||
19 | { | ||
20 | } | ||
21 | #endif | ||
22 | |||
23 | DECLARE_TRACE(kmalloc, | ||
24 | TP_PROTO(unsigned long call_site, | ||
25 | const void *ptr, | ||
26 | size_t bytes_req, | ||
27 | size_t bytes_alloc, | ||
28 | gfp_t gfp_flags), | ||
29 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
30 | DECLARE_TRACE(kmem_cache_alloc, | ||
31 | TP_PROTO(unsigned long call_site, | ||
32 | const void *ptr, | ||
33 | size_t bytes_req, | ||
34 | size_t bytes_alloc, | ||
35 | gfp_t gfp_flags), | ||
36 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
37 | DECLARE_TRACE(kmalloc_node, | ||
38 | TP_PROTO(unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node), | ||
44 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
45 | DECLARE_TRACE(kmem_cache_alloc_node, | ||
46 | TP_PROTO(unsigned long call_site, | ||
47 | const void *ptr, | ||
48 | size_t bytes_req, | ||
49 | size_t bytes_alloc, | ||
50 | gfp_t gfp_flags, | ||
51 | int node), | ||
52 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
53 | DECLARE_TRACE(kfree, | ||
54 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
55 | TP_ARGS(call_site, ptr)); | ||
56 | DECLARE_TRACE(kmem_cache_free, | ||
57 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
58 | TP_ARGS(call_site, ptr)); | ||
59 | |||
60 | #endif /* __KERNEL__ */ | ||
61 | |||
62 | #endif /* _LINUX_KMEMTRACE_H */ | ||
63 | |||
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h index adccfcd2ec8f..863f1e4583a6 100644 --- a/include/trace/lockdep_event_types.h +++ b/include/trace/lockdep_event_types.h | |||
@@ -32,11 +32,24 @@ TRACE_FORMAT(lock_contended, | |||
32 | TP_FMT("%s", lock->name) | 32 | TP_FMT("%s", lock->name) |
33 | ); | 33 | ); |
34 | 34 | ||
35 | TRACE_FORMAT(lock_acquired, | 35 | TRACE_EVENT(lock_acquired, |
36 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 36 | TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), |
37 | TP_ARGS(lock, ip), | 37 | |
38 | TP_FMT("%s", lock->name) | 38 | TP_ARGS(lock, ip, waittime), |
39 | ); | 39 | |
40 | TP_STRUCT__entry( | ||
41 | __field(const char *, name) | ||
42 | __field(unsigned long, wait_usec) | ||
43 | __field(unsigned long, wait_nsec_rem) | ||
44 | ), | ||
45 | TP_fast_assign( | ||
46 | __entry->name = lock->name; | ||
47 | __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); | ||
48 | __entry->wait_usec = (unsigned long) waittime; | ||
49 | ), | ||
50 | TP_printk("%s (%lu.%03lu us)", __entry->name, __entry->wait_usec, | ||
51 | __entry->wait_nsec_rem) | ||
52 | ); | ||
40 | 53 | ||
41 | #endif | 54 | #endif |
42 | #endif | 55 | #endif |
diff --git a/include/trace/skb.h b/include/trace/skb.h index b66206d9be72..d2de7174a6e8 100644 --- a/include/trace/skb.h +++ b/include/trace/skb.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/skbuff.h> | 4 | #include <linux/skbuff.h> |
5 | #include <linux/tracepoint.h> | 5 | #include <linux/tracepoint.h> |
6 | 6 | ||
7 | DECLARE_TRACE(kfree_skb, | 7 | #include <trace/skb_event_types.h> |
8 | TP_PROTO(struct sk_buff *skb, void *location), | ||
9 | TP_ARGS(skb, location)); | ||
10 | 8 | ||
11 | #endif | 9 | #endif |
diff --git a/include/trace/skb_event_types.h b/include/trace/skb_event_types.h new file mode 100644 index 000000000000..4a1c504c0e16 --- /dev/null +++ b/include/trace/skb_event_types.h | |||
@@ -0,0 +1,38 @@ | |||
1 | |||
2 | /* use <trace/skb.h> instead */ | ||
3 | #ifndef TRACE_EVENT | ||
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM skb | ||
10 | |||
11 | /* | ||
12 | * Tracepoint for free an sk_buff: | ||
13 | */ | ||
14 | TRACE_EVENT(kfree_skb, | ||
15 | |||
16 | TP_PROTO(struct sk_buff *skb, void *location), | ||
17 | |||
18 | TP_ARGS(skb, location), | ||
19 | |||
20 | TP_STRUCT__entry( | ||
21 | __field( void *, skbaddr ) | ||
22 | __field( unsigned short, protocol ) | ||
23 | __field( void *, location ) | ||
24 | ), | ||
25 | |||
26 | TP_fast_assign( | ||
27 | __entry->skbaddr = skb; | ||
28 | if (skb) { | ||
29 | __entry->protocol = ntohs(skb->protocol); | ||
30 | } | ||
31 | __entry->location = location; | ||
32 | ), | ||
33 | |||
34 | TP_printk("skbaddr=%p protocol=%u location=%p", | ||
35 | __entry->skbaddr, __entry->protocol, __entry->location) | ||
36 | ); | ||
37 | |||
38 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h index df56f5694be6..552a50e169a6 100644 --- a/include/trace/trace_event_types.h +++ b/include/trace/trace_event_types.h | |||
@@ -3,3 +3,5 @@ | |||
3 | #include <trace/sched_event_types.h> | 3 | #include <trace/sched_event_types.h> |
4 | #include <trace/irq_event_types.h> | 4 | #include <trace/irq_event_types.h> |
5 | #include <trace/lockdep_event_types.h> | 5 | #include <trace/lockdep_event_types.h> |
6 | #include <trace/skb_event_types.h> | ||
7 | #include <trace/kmem_event_types.h> | ||
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index fd13750ca4ba..13d6b85668cf 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h | |||
@@ -3,3 +3,5 @@ | |||
3 | #include <trace/sched.h> | 3 | #include <trace/sched.h> |
4 | #include <trace/irq.h> | 4 | #include <trace/irq.h> |
5 | #include <trace/lockdep.h> | 5 | #include <trace/lockdep.h> |
6 | #include <trace/skb.h> | ||
7 | #include <trace/kmem.h> | ||
diff --git a/init/main.c b/init/main.c index 3585f073d636..eece40cd8a64 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/idr.h> | 64 | #include <linux/idr.h> |
65 | #include <linux/ftrace.h> | 65 | #include <linux/ftrace.h> |
66 | #include <linux/async.h> | 66 | #include <linux/async.h> |
67 | #include <linux/kmemtrace.h> | ||
67 | #include <trace/boot.h> | 68 | #include <trace/boot.h> |
68 | 69 | ||
69 | #include <asm/io.h> | 70 | #include <asm/io.h> |
@@ -71,7 +72,6 @@ | |||
71 | #include <asm/setup.h> | 72 | #include <asm/setup.h> |
72 | #include <asm/sections.h> | 73 | #include <asm/sections.h> |
73 | #include <asm/cacheflush.h> | 74 | #include <asm/cacheflush.h> |
74 | #include <trace/kmemtrace.h> | ||
75 | 75 | ||
76 | #ifdef CONFIG_X86_LOCAL_APIC | 76 | #ifdef CONFIG_X86_LOCAL_APIC |
77 | #include <asm/smp.h> | 77 | #include <asm/smp.h> |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b0f011866969..c4582a6ea953 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3061,6 +3061,8 @@ found_it: | |||
3061 | put_lock_stats(stats); | 3061 | put_lock_stats(stats); |
3062 | } | 3062 | } |
3063 | 3063 | ||
3064 | DEFINE_TRACE(lock_acquired); | ||
3065 | |||
3064 | static void | 3066 | static void |
3065 | __lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3067 | __lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3066 | { | 3068 | { |
@@ -3099,6 +3101,8 @@ found_it: | |||
3099 | hlock->holdtime_stamp = now; | 3101 | hlock->holdtime_stamp = now; |
3100 | } | 3102 | } |
3101 | 3103 | ||
3104 | trace_lock_acquired(lock, ip, waittime); | ||
3105 | |||
3102 | stats = get_lock_stats(hlock_class(hlock)); | 3106 | stats = get_lock_stats(hlock_class(hlock)); |
3103 | if (waittime) { | 3107 | if (waittime) { |
3104 | if (hlock->read) | 3108 | if (hlock->read) |
@@ -3137,14 +3141,10 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3137 | } | 3141 | } |
3138 | EXPORT_SYMBOL_GPL(lock_contended); | 3142 | EXPORT_SYMBOL_GPL(lock_contended); |
3139 | 3143 | ||
3140 | DEFINE_TRACE(lock_acquired); | ||
3141 | |||
3142 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3144 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3143 | { | 3145 | { |
3144 | unsigned long flags; | 3146 | unsigned long flags; |
3145 | 3147 | ||
3146 | trace_lock_acquired(lock, ip); | ||
3147 | |||
3148 | if (unlikely(!lock_stat)) | 3148 | if (unlikely(!lock_stat)) |
3149 | return; | 3149 | return; |
3150 | 3150 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 417d1985e299..57981d338d1f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -48,6 +48,9 @@ config FTRACE_NMI_ENTER | |||
48 | depends on HAVE_FTRACE_NMI_ENTER | 48 | depends on HAVE_FTRACE_NMI_ENTER |
49 | default y | 49 | default y |
50 | 50 | ||
51 | config EVENT_TRACING | ||
52 | bool | ||
53 | |||
51 | config TRACING | 54 | config TRACING |
52 | bool | 55 | bool |
53 | select DEBUG_FS | 56 | select DEBUG_FS |
@@ -56,6 +59,7 @@ config TRACING | |||
56 | select TRACEPOINTS | 59 | select TRACEPOINTS |
57 | select NOP_TRACER | 60 | select NOP_TRACER |
58 | select BINARY_PRINTF | 61 | select BINARY_PRINTF |
62 | select EVENT_TRACING | ||
59 | 63 | ||
60 | # | 64 | # |
61 | # Minimum requirements an architecture has to meet for us to | 65 | # Minimum requirements an architecture has to meet for us to |
@@ -104,6 +108,7 @@ config FUNCTION_GRAPH_TRACER | |||
104 | the return value. This is done by setting the current return | 108 | the return value. This is done by setting the current return |
105 | address on the current task structure into a stack of calls. | 109 | address on the current task structure into a stack of calls. |
106 | 110 | ||
111 | |||
107 | config IRQSOFF_TRACER | 112 | config IRQSOFF_TRACER |
108 | bool "Interrupts-off Latency Tracer" | 113 | bool "Interrupts-off Latency Tracer" |
109 | default n | 114 | default n |
@@ -375,6 +380,20 @@ config DYNAMIC_FTRACE | |||
375 | were made. If so, it runs stop_machine (stops all CPUS) | 380 | were made. If so, it runs stop_machine (stops all CPUS) |
376 | and modifies the code to jump over the call to ftrace. | 381 | and modifies the code to jump over the call to ftrace. |
377 | 382 | ||
383 | config FUNCTION_PROFILER | ||
384 | bool "Kernel function profiler" | ||
385 | depends on FUNCTION_TRACER | ||
386 | default n | ||
387 | help | ||
388 | This option enables the kernel function profiler. A file is created | ||
389 | in debugfs called function_profile_enabled which defaults to zero. | ||
390 | When a 1 is echoed into this file profiling begins, and when a | ||
391 | zero is entered, profiling stops. A file in the trace_stats | ||
392 | directory called functions, that show the list of functions that | ||
393 | have been hit and their counters. | ||
394 | |||
395 | If in doubt, say N | ||
396 | |||
378 | config FTRACE_MCOUNT_RECORD | 397 | config FTRACE_MCOUNT_RECORD |
379 | def_bool y | 398 | def_bool y |
380 | depends on DYNAMIC_FTRACE | 399 | depends on DYNAMIC_FTRACE |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 2630f5121ec1..3ad367e7c97f 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -40,11 +40,11 @@ obj-$(CONFIG_POWER_TRACER) += trace_power.o | |||
40 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | 40 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o |
41 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 41 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
42 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 42 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
43 | obj-$(CONFIG_EVENT_TRACER) += trace_events.o | 43 | obj-$(CONFIG_EVENT_TRACING) += trace_events.o |
44 | obj-$(CONFIG_EVENT_TRACER) += events.o | 44 | obj-$(CONFIG_EVENT_TRACER) += events.o |
45 | obj-$(CONFIG_EVENT_TRACER) += trace_export.o | 45 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o |
46 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 46 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
47 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 47 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
48 | obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o | 48 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
49 | 49 | ||
50 | libftrace-y := ftrace.o | 50 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 921ef5d1f0ba..2b98195b338b 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -971,6 +971,16 @@ static inline const void *pdu_start(const struct trace_entry *ent) | |||
971 | return te_blk_io_trace(ent) + 1; | 971 | return te_blk_io_trace(ent) + 1; |
972 | } | 972 | } |
973 | 973 | ||
974 | static inline u32 t_action(const struct trace_entry *ent) | ||
975 | { | ||
976 | return te_blk_io_trace(ent)->action; | ||
977 | } | ||
978 | |||
979 | static inline u32 t_bytes(const struct trace_entry *ent) | ||
980 | { | ||
981 | return te_blk_io_trace(ent)->bytes; | ||
982 | } | ||
983 | |||
974 | static inline u32 t_sec(const struct trace_entry *ent) | 984 | static inline u32 t_sec(const struct trace_entry *ent) |
975 | { | 985 | { |
976 | return te_blk_io_trace(ent)->bytes >> 9; | 986 | return te_blk_io_trace(ent)->bytes >> 9; |
@@ -1031,25 +1041,87 @@ static int blk_log_action(struct trace_iterator *iter, const char *act) | |||
1031 | MAJOR(t->device), MINOR(t->device), act, rwbs); | 1041 | MAJOR(t->device), MINOR(t->device), act, rwbs); |
1032 | } | 1042 | } |
1033 | 1043 | ||
1044 | static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) | ||
1045 | { | ||
1046 | const char *pdu_buf; | ||
1047 | int pdu_len; | ||
1048 | int i, end, ret; | ||
1049 | |||
1050 | pdu_buf = pdu_start(ent); | ||
1051 | pdu_len = te_blk_io_trace(ent)->pdu_len; | ||
1052 | |||
1053 | if (!pdu_len) | ||
1054 | return 1; | ||
1055 | |||
1056 | /* find the last zero that needs to be printed */ | ||
1057 | for (end = pdu_len - 1; end >= 0; end--) | ||
1058 | if (pdu_buf[end]) | ||
1059 | break; | ||
1060 | end++; | ||
1061 | |||
1062 | if (!trace_seq_putc(s, '(')) | ||
1063 | return 0; | ||
1064 | |||
1065 | for (i = 0; i < pdu_len; i++) { | ||
1066 | |||
1067 | ret = trace_seq_printf(s, "%s%02x", | ||
1068 | i == 0 ? "" : " ", pdu_buf[i]); | ||
1069 | if (!ret) | ||
1070 | return ret; | ||
1071 | |||
1072 | /* | ||
1073 | * stop when the rest is just zeroes and indicate so | ||
1074 | * with a ".." appended | ||
1075 | */ | ||
1076 | if (i == end && end != pdu_len - 1) | ||
1077 | return trace_seq_puts(s, " ..) "); | ||
1078 | } | ||
1079 | |||
1080 | return trace_seq_puts(s, ") "); | ||
1081 | } | ||
1082 | |||
1034 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | 1083 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) |
1035 | { | 1084 | { |
1036 | char cmd[TASK_COMM_LEN]; | 1085 | char cmd[TASK_COMM_LEN]; |
1037 | 1086 | ||
1038 | trace_find_cmdline(ent->pid, cmd); | 1087 | trace_find_cmdline(ent->pid, cmd); |
1039 | 1088 | ||
1040 | if (t_sec(ent)) | 1089 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1041 | return trace_seq_printf(s, "%llu + %u [%s]\n", | 1090 | int ret; |
1042 | t_sector(ent), t_sec(ent), cmd); | 1091 | |
1043 | return trace_seq_printf(s, "[%s]\n", cmd); | 1092 | ret = trace_seq_printf(s, "%u ", t_bytes(ent)); |
1093 | if (!ret) | ||
1094 | return 0; | ||
1095 | ret = blk_log_dump_pdu(s, ent); | ||
1096 | if (!ret) | ||
1097 | return 0; | ||
1098 | return trace_seq_printf(s, "[%s]\n", cmd); | ||
1099 | } else { | ||
1100 | if (t_sec(ent)) | ||
1101 | return trace_seq_printf(s, "%llu + %u [%s]\n", | ||
1102 | t_sector(ent), t_sec(ent), cmd); | ||
1103 | return trace_seq_printf(s, "[%s]\n", cmd); | ||
1104 | } | ||
1044 | } | 1105 | } |
1045 | 1106 | ||
1046 | static int blk_log_with_error(struct trace_seq *s, | 1107 | static int blk_log_with_error(struct trace_seq *s, |
1047 | const struct trace_entry *ent) | 1108 | const struct trace_entry *ent) |
1048 | { | 1109 | { |
1049 | if (t_sec(ent)) | 1110 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
1050 | return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), | 1111 | int ret; |
1051 | t_sec(ent), t_error(ent)); | 1112 | |
1052 | return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); | 1113 | ret = blk_log_dump_pdu(s, ent); |
1114 | if (ret) | ||
1115 | return trace_seq_printf(s, "[%d]\n", t_error(ent)); | ||
1116 | return 0; | ||
1117 | } else { | ||
1118 | if (t_sec(ent)) | ||
1119 | return trace_seq_printf(s, "%llu + %u [%d]\n", | ||
1120 | t_sector(ent), | ||
1121 | t_sec(ent), t_error(ent)); | ||
1122 | return trace_seq_printf(s, "%llu [%d]\n", | ||
1123 | t_sector(ent), t_error(ent)); | ||
1124 | } | ||
1053 | } | 1125 | } |
1054 | 1126 | ||
1055 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | 1127 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) |
@@ -1182,7 +1254,7 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, | |||
1182 | } | 1254 | } |
1183 | 1255 | ||
1184 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) | 1256 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) |
1185 | ret = trace_seq_printf(s, "Bad pc action %x\n", what); | 1257 | ret = trace_seq_printf(s, "Unknown action %x\n", what); |
1186 | else { | 1258 | else { |
1187 | ret = log_action(iter, what2act[what].act[long_act]); | 1259 | ret = log_action(iter, what2act[what].act[long_act]); |
1188 | if (ret) | 1260 | if (ret) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f1ed080406c3..8e6a0b5c9940 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace_output.h" |
37 | #include "trace_stat.h" | ||
37 | 38 | ||
38 | #define FTRACE_WARN_ON(cond) \ | 39 | #define FTRACE_WARN_ON(cond) \ |
39 | do { \ | 40 | do { \ |
@@ -68,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock); | |||
68 | 69 | ||
69 | static struct ftrace_ops ftrace_list_end __read_mostly = | 70 | static struct ftrace_ops ftrace_list_end __read_mostly = |
70 | { | 71 | { |
71 | .func = ftrace_stub, | 72 | .func = ftrace_stub, |
72 | }; | 73 | }; |
73 | 74 | ||
74 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 75 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
@@ -240,6 +241,576 @@ static void ftrace_update_pid_func(void) | |||
240 | #endif | 241 | #endif |
241 | } | 242 | } |
242 | 243 | ||
244 | #ifdef CONFIG_FUNCTION_PROFILER | ||
245 | struct ftrace_profile { | ||
246 | struct hlist_node node; | ||
247 | unsigned long ip; | ||
248 | unsigned long counter; | ||
249 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
250 | unsigned long long time; | ||
251 | #endif | ||
252 | }; | ||
253 | |||
254 | struct ftrace_profile_page { | ||
255 | struct ftrace_profile_page *next; | ||
256 | unsigned long index; | ||
257 | struct ftrace_profile records[]; | ||
258 | }; | ||
259 | |||
260 | struct ftrace_profile_stat { | ||
261 | atomic_t disabled; | ||
262 | struct hlist_head *hash; | ||
263 | struct ftrace_profile_page *pages; | ||
264 | struct ftrace_profile_page *start; | ||
265 | struct tracer_stat stat; | ||
266 | }; | ||
267 | |||
268 | #define PROFILE_RECORDS_SIZE \ | ||
269 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | ||
270 | |||
271 | #define PROFILES_PER_PAGE \ | ||
272 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | ||
273 | |||
274 | static int ftrace_profile_bits __read_mostly; | ||
275 | static int ftrace_profile_enabled __read_mostly; | ||
276 | |||
277 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | ||
278 | static DEFINE_MUTEX(ftrace_profile_lock); | ||
279 | |||
280 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); | ||
281 | |||
282 | #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ | ||
283 | |||
284 | static void * | ||
285 | function_stat_next(void *v, int idx) | ||
286 | { | ||
287 | struct ftrace_profile *rec = v; | ||
288 | struct ftrace_profile_page *pg; | ||
289 | |||
290 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); | ||
291 | |||
292 | again: | ||
293 | rec++; | ||
294 | if ((void *)rec >= (void *)&pg->records[pg->index]) { | ||
295 | pg = pg->next; | ||
296 | if (!pg) | ||
297 | return NULL; | ||
298 | rec = &pg->records[0]; | ||
299 | if (!rec->counter) | ||
300 | goto again; | ||
301 | } | ||
302 | |||
303 | return rec; | ||
304 | } | ||
305 | |||
306 | static void *function_stat_start(struct tracer_stat *trace) | ||
307 | { | ||
308 | struct ftrace_profile_stat *stat = | ||
309 | container_of(trace, struct ftrace_profile_stat, stat); | ||
310 | |||
311 | if (!stat || !stat->start) | ||
312 | return NULL; | ||
313 | |||
314 | return function_stat_next(&stat->start->records[0], 0); | ||
315 | } | ||
316 | |||
317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
318 | /* function graph compares on total time */ | ||
319 | static int function_stat_cmp(void *p1, void *p2) | ||
320 | { | ||
321 | struct ftrace_profile *a = p1; | ||
322 | struct ftrace_profile *b = p2; | ||
323 | |||
324 | if (a->time < b->time) | ||
325 | return -1; | ||
326 | if (a->time > b->time) | ||
327 | return 1; | ||
328 | else | ||
329 | return 0; | ||
330 | } | ||
331 | #else | ||
332 | /* not function graph compares against hits */ | ||
333 | static int function_stat_cmp(void *p1, void *p2) | ||
334 | { | ||
335 | struct ftrace_profile *a = p1; | ||
336 | struct ftrace_profile *b = p2; | ||
337 | |||
338 | if (a->counter < b->counter) | ||
339 | return -1; | ||
340 | if (a->counter > b->counter) | ||
341 | return 1; | ||
342 | else | ||
343 | return 0; | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | static int function_stat_headers(struct seq_file *m) | ||
348 | { | ||
349 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
350 | seq_printf(m, " Function " | ||
351 | "Hit Time Avg\n" | ||
352 | " -------- " | ||
353 | "--- ---- ---\n"); | ||
354 | #else | ||
355 | seq_printf(m, " Function Hit\n" | ||
356 | " -------- ---\n"); | ||
357 | #endif | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static int function_stat_show(struct seq_file *m, void *v) | ||
362 | { | ||
363 | struct ftrace_profile *rec = v; | ||
364 | char str[KSYM_SYMBOL_LEN]; | ||
365 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
366 | static DEFINE_MUTEX(mutex); | ||
367 | static struct trace_seq s; | ||
368 | unsigned long long avg; | ||
369 | #endif | ||
370 | |||
371 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
372 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | ||
373 | |||
374 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
375 | seq_printf(m, " "); | ||
376 | avg = rec->time; | ||
377 | do_div(avg, rec->counter); | ||
378 | |||
379 | mutex_lock(&mutex); | ||
380 | trace_seq_init(&s); | ||
381 | trace_print_graph_duration(rec->time, &s); | ||
382 | trace_seq_puts(&s, " "); | ||
383 | trace_print_graph_duration(avg, &s); | ||
384 | trace_print_seq(m, &s); | ||
385 | mutex_unlock(&mutex); | ||
386 | #endif | ||
387 | seq_putc(m, '\n'); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) | ||
393 | { | ||
394 | struct ftrace_profile_page *pg; | ||
395 | |||
396 | pg = stat->pages = stat->start; | ||
397 | |||
398 | while (pg) { | ||
399 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | ||
400 | pg->index = 0; | ||
401 | pg = pg->next; | ||
402 | } | ||
403 | |||
404 | memset(stat->hash, 0, | ||
405 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); | ||
406 | } | ||
407 | |||
408 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | ||
409 | { | ||
410 | struct ftrace_profile_page *pg; | ||
411 | int functions; | ||
412 | int pages; | ||
413 | int i; | ||
414 | |||
415 | /* If we already allocated, do nothing */ | ||
416 | if (stat->pages) | ||
417 | return 0; | ||
418 | |||
419 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); | ||
420 | if (!stat->pages) | ||
421 | return -ENOMEM; | ||
422 | |||
423 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
424 | functions = ftrace_update_tot_cnt; | ||
425 | #else | ||
426 | /* | ||
427 | * We do not know the number of functions that exist because | ||
428 | * dynamic tracing is what counts them. With past experience | ||
429 | * we have around 20K functions. That should be more than enough. | ||
430 | * It is highly unlikely we will execute every function in | ||
431 | * the kernel. | ||
432 | */ | ||
433 | functions = 20000; | ||
434 | #endif | ||
435 | |||
436 | pg = stat->start = stat->pages; | ||
437 | |||
438 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); | ||
439 | |||
440 | for (i = 0; i < pages; i++) { | ||
441 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
442 | if (!pg->next) | ||
443 | goto out_free; | ||
444 | pg = pg->next; | ||
445 | } | ||
446 | |||
447 | return 0; | ||
448 | |||
449 | out_free: | ||
450 | pg = stat->start; | ||
451 | while (pg) { | ||
452 | unsigned long tmp = (unsigned long)pg; | ||
453 | |||
454 | pg = pg->next; | ||
455 | free_page(tmp); | ||
456 | } | ||
457 | |||
458 | free_page((unsigned long)stat->pages); | ||
459 | stat->pages = NULL; | ||
460 | stat->start = NULL; | ||
461 | |||
462 | return -ENOMEM; | ||
463 | } | ||
464 | |||
465 | static int ftrace_profile_init_cpu(int cpu) | ||
466 | { | ||
467 | struct ftrace_profile_stat *stat; | ||
468 | int size; | ||
469 | |||
470 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
471 | |||
472 | if (stat->hash) { | ||
473 | /* If the profile is already created, simply reset it */ | ||
474 | ftrace_profile_reset(stat); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * We are profiling all functions, but usually only a few thousand | ||
480 | * functions are hit. We'll make a hash of 1024 items. | ||
481 | */ | ||
482 | size = FTRACE_PROFILE_HASH_SIZE; | ||
483 | |||
484 | stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); | ||
485 | |||
486 | if (!stat->hash) | ||
487 | return -ENOMEM; | ||
488 | |||
489 | if (!ftrace_profile_bits) { | ||
490 | size--; | ||
491 | |||
492 | for (; size; size >>= 1) | ||
493 | ftrace_profile_bits++; | ||
494 | } | ||
495 | |||
496 | /* Preallocate the function profiling pages */ | ||
497 | if (ftrace_profile_pages_init(stat) < 0) { | ||
498 | kfree(stat->hash); | ||
499 | stat->hash = NULL; | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int ftrace_profile_init(void) | ||
507 | { | ||
508 | int cpu; | ||
509 | int ret = 0; | ||
510 | |||
511 | for_each_online_cpu(cpu) { | ||
512 | ret = ftrace_profile_init_cpu(cpu); | ||
513 | if (ret) | ||
514 | break; | ||
515 | } | ||
516 | |||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | /* interrupts must be disabled */ | ||
521 | static struct ftrace_profile * | ||
522 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | ||
523 | { | ||
524 | struct ftrace_profile *rec; | ||
525 | struct hlist_head *hhd; | ||
526 | struct hlist_node *n; | ||
527 | unsigned long key; | ||
528 | |||
529 | key = hash_long(ip, ftrace_profile_bits); | ||
530 | hhd = &stat->hash[key]; | ||
531 | |||
532 | if (hlist_empty(hhd)) | ||
533 | return NULL; | ||
534 | |||
535 | hlist_for_each_entry_rcu(rec, n, hhd, node) { | ||
536 | if (rec->ip == ip) | ||
537 | return rec; | ||
538 | } | ||
539 | |||
540 | return NULL; | ||
541 | } | ||
542 | |||
543 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, | ||
544 | struct ftrace_profile *rec) | ||
545 | { | ||
546 | unsigned long key; | ||
547 | |||
548 | key = hash_long(rec->ip, ftrace_profile_bits); | ||
549 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * The memory is already allocated, this simply finds a new record to use. | ||
554 | */ | ||
555 | static struct ftrace_profile * | ||
556 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | ||
557 | { | ||
558 | struct ftrace_profile *rec = NULL; | ||
559 | |||
560 | /* prevent recursion (from NMIs) */ | ||
561 | if (atomic_inc_return(&stat->disabled) != 1) | ||
562 | goto out; | ||
563 | |||
564 | /* | ||
565 | * Try to find the function again since an NMI | ||
566 | * could have added it | ||
567 | */ | ||
568 | rec = ftrace_find_profiled_func(stat, ip); | ||
569 | if (rec) | ||
570 | goto out; | ||
571 | |||
572 | if (stat->pages->index == PROFILES_PER_PAGE) { | ||
573 | if (!stat->pages->next) | ||
574 | goto out; | ||
575 | stat->pages = stat->pages->next; | ||
576 | } | ||
577 | |||
578 | rec = &stat->pages->records[stat->pages->index++]; | ||
579 | rec->ip = ip; | ||
580 | ftrace_add_profile(stat, rec); | ||
581 | |||
582 | out: | ||
583 | atomic_dec(&stat->disabled); | ||
584 | |||
585 | return rec; | ||
586 | } | ||
587 | |||
588 | static void | ||
589 | function_profile_call(unsigned long ip, unsigned long parent_ip) | ||
590 | { | ||
591 | struct ftrace_profile_stat *stat; | ||
592 | struct ftrace_profile *rec; | ||
593 | unsigned long flags; | ||
594 | |||
595 | if (!ftrace_profile_enabled) | ||
596 | return; | ||
597 | |||
598 | local_irq_save(flags); | ||
599 | |||
600 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
601 | if (!stat->hash) | ||
602 | goto out; | ||
603 | |||
604 | rec = ftrace_find_profiled_func(stat, ip); | ||
605 | if (!rec) { | ||
606 | rec = ftrace_profile_alloc(stat, ip); | ||
607 | if (!rec) | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | rec->counter++; | ||
612 | out: | ||
613 | local_irq_restore(flags); | ||
614 | } | ||
615 | |||
616 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
617 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | ||
618 | { | ||
619 | function_profile_call(trace->func, 0); | ||
620 | return 1; | ||
621 | } | ||
622 | |||
623 | static void profile_graph_return(struct ftrace_graph_ret *trace) | ||
624 | { | ||
625 | struct ftrace_profile_stat *stat; | ||
626 | unsigned long long calltime; | ||
627 | struct ftrace_profile *rec; | ||
628 | unsigned long flags; | ||
629 | |||
630 | local_irq_save(flags); | ||
631 | stat = &__get_cpu_var(ftrace_profile_stats); | ||
632 | if (!stat->hash) | ||
633 | goto out; | ||
634 | |||
635 | calltime = trace->rettime - trace->calltime; | ||
636 | |||
637 | if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { | ||
638 | int index; | ||
639 | |||
640 | index = trace->depth; | ||
641 | |||
642 | /* Append this call time to the parent time to subtract */ | ||
643 | if (index) | ||
644 | current->ret_stack[index - 1].subtime += calltime; | ||
645 | |||
646 | if (current->ret_stack[index].subtime < calltime) | ||
647 | calltime -= current->ret_stack[index].subtime; | ||
648 | else | ||
649 | calltime = 0; | ||
650 | } | ||
651 | |||
652 | rec = ftrace_find_profiled_func(stat, trace->func); | ||
653 | if (rec) | ||
654 | rec->time += calltime; | ||
655 | |||
656 | out: | ||
657 | local_irq_restore(flags); | ||
658 | } | ||
659 | |||
660 | static int register_ftrace_profiler(void) | ||
661 | { | ||
662 | return register_ftrace_graph(&profile_graph_return, | ||
663 | &profile_graph_entry); | ||
664 | } | ||
665 | |||
666 | static void unregister_ftrace_profiler(void) | ||
667 | { | ||
668 | unregister_ftrace_graph(); | ||
669 | } | ||
670 | #else | ||
671 | static struct ftrace_ops ftrace_profile_ops __read_mostly = | ||
672 | { | ||
673 | .func = function_profile_call, | ||
674 | }; | ||
675 | |||
676 | static int register_ftrace_profiler(void) | ||
677 | { | ||
678 | return register_ftrace_function(&ftrace_profile_ops); | ||
679 | } | ||
680 | |||
681 | static void unregister_ftrace_profiler(void) | ||
682 | { | ||
683 | unregister_ftrace_function(&ftrace_profile_ops); | ||
684 | } | ||
685 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
686 | |||
687 | static ssize_t | ||
688 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | ||
689 | size_t cnt, loff_t *ppos) | ||
690 | { | ||
691 | unsigned long val; | ||
692 | char buf[64]; /* big enough to hold a number */ | ||
693 | int ret; | ||
694 | |||
695 | if (cnt >= sizeof(buf)) | ||
696 | return -EINVAL; | ||
697 | |||
698 | if (copy_from_user(&buf, ubuf, cnt)) | ||
699 | return -EFAULT; | ||
700 | |||
701 | buf[cnt] = 0; | ||
702 | |||
703 | ret = strict_strtoul(buf, 10, &val); | ||
704 | if (ret < 0) | ||
705 | return ret; | ||
706 | |||
707 | val = !!val; | ||
708 | |||
709 | mutex_lock(&ftrace_profile_lock); | ||
710 | if (ftrace_profile_enabled ^ val) { | ||
711 | if (val) { | ||
712 | ret = ftrace_profile_init(); | ||
713 | if (ret < 0) { | ||
714 | cnt = ret; | ||
715 | goto out; | ||
716 | } | ||
717 | |||
718 | ret = register_ftrace_profiler(); | ||
719 | if (ret < 0) { | ||
720 | cnt = ret; | ||
721 | goto out; | ||
722 | } | ||
723 | ftrace_profile_enabled = 1; | ||
724 | } else { | ||
725 | ftrace_profile_enabled = 0; | ||
726 | unregister_ftrace_profiler(); | ||
727 | } | ||
728 | } | ||
729 | out: | ||
730 | mutex_unlock(&ftrace_profile_lock); | ||
731 | |||
732 | filp->f_pos += cnt; | ||
733 | |||
734 | return cnt; | ||
735 | } | ||
736 | |||
737 | static ssize_t | ||
738 | ftrace_profile_read(struct file *filp, char __user *ubuf, | ||
739 | size_t cnt, loff_t *ppos) | ||
740 | { | ||
741 | char buf[64]; /* big enough to hold a number */ | ||
742 | int r; | ||
743 | |||
744 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | ||
745 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
746 | } | ||
747 | |||
748 | static const struct file_operations ftrace_profile_fops = { | ||
749 | .open = tracing_open_generic, | ||
750 | .read = ftrace_profile_read, | ||
751 | .write = ftrace_profile_write, | ||
752 | }; | ||
753 | |||
754 | /* used to initialize the real stat files */ | ||
755 | static struct tracer_stat function_stats __initdata = { | ||
756 | .name = "functions", | ||
757 | .stat_start = function_stat_start, | ||
758 | .stat_next = function_stat_next, | ||
759 | .stat_cmp = function_stat_cmp, | ||
760 | .stat_headers = function_stat_headers, | ||
761 | .stat_show = function_stat_show | ||
762 | }; | ||
763 | |||
764 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
765 | { | ||
766 | struct ftrace_profile_stat *stat; | ||
767 | struct dentry *entry; | ||
768 | char *name; | ||
769 | int ret; | ||
770 | int cpu; | ||
771 | |||
772 | for_each_possible_cpu(cpu) { | ||
773 | stat = &per_cpu(ftrace_profile_stats, cpu); | ||
774 | |||
775 | /* allocate enough for function name + cpu number */ | ||
776 | name = kmalloc(32, GFP_KERNEL); | ||
777 | if (!name) { | ||
778 | /* | ||
779 | * The files created are permanent, if something happens | ||
780 | * we still do not free memory. | ||
781 | */ | ||
782 | kfree(stat); | ||
783 | WARN(1, | ||
784 | "Could not allocate stat file for cpu %d\n", | ||
785 | cpu); | ||
786 | return; | ||
787 | } | ||
788 | stat->stat = function_stats; | ||
789 | snprintf(name, 32, "function%d", cpu); | ||
790 | stat->stat.name = name; | ||
791 | ret = register_stat_tracer(&stat->stat); | ||
792 | if (ret) { | ||
793 | WARN(1, | ||
794 | "Could not register function stat for cpu %d\n", | ||
795 | cpu); | ||
796 | kfree(name); | ||
797 | return; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | entry = debugfs_create_file("function_profile_enabled", 0644, | ||
802 | d_tracer, NULL, &ftrace_profile_fops); | ||
803 | if (!entry) | ||
804 | pr_warning("Could not create debugfs " | ||
805 | "'function_profile_enabled' entry\n"); | ||
806 | } | ||
807 | |||
808 | #else /* CONFIG_FUNCTION_PROFILER */ | ||
809 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | ||
810 | { | ||
811 | } | ||
812 | #endif /* CONFIG_FUNCTION_PROFILER */ | ||
813 | |||
243 | /* set when tracing only a pid */ | 814 | /* set when tracing only a pid */ |
244 | struct pid *ftrace_pid_trace; | 815 | struct pid *ftrace_pid_trace; |
245 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 816 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
@@ -261,7 +832,6 @@ struct ftrace_func_probe { | |||
261 | struct rcu_head rcu; | 832 | struct rcu_head rcu; |
262 | }; | 833 | }; |
263 | 834 | ||
264 | |||
265 | enum { | 835 | enum { |
266 | FTRACE_ENABLE_CALLS = (1 << 0), | 836 | FTRACE_ENABLE_CALLS = (1 << 0), |
267 | FTRACE_DISABLE_CALLS = (1 << 1), | 837 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -1408,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1408 | 1978 | ||
1409 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1979 | static struct ftrace_ops trace_probe_ops __read_mostly = |
1410 | { | 1980 | { |
1411 | .func = function_trace_probe_call, | 1981 | .func = function_trace_probe_call, |
1412 | }; | 1982 | }; |
1413 | 1983 | ||
1414 | static int ftrace_probe_registered; | 1984 | static int ftrace_probe_registered; |
@@ -2128,38 +2698,23 @@ static const struct file_operations ftrace_graph_fops = { | |||
2128 | 2698 | ||
2129 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 2699 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
2130 | { | 2700 | { |
2131 | struct dentry *entry; | ||
2132 | 2701 | ||
2133 | entry = debugfs_create_file("available_filter_functions", 0444, | 2702 | trace_create_file("available_filter_functions", 0444, |
2134 | d_tracer, NULL, &ftrace_avail_fops); | 2703 | d_tracer, NULL, &ftrace_avail_fops); |
2135 | if (!entry) | ||
2136 | pr_warning("Could not create debugfs " | ||
2137 | "'available_filter_functions' entry\n"); | ||
2138 | 2704 | ||
2139 | entry = debugfs_create_file("failures", 0444, | 2705 | trace_create_file("failures", 0444, |
2140 | d_tracer, NULL, &ftrace_failures_fops); | 2706 | d_tracer, NULL, &ftrace_failures_fops); |
2141 | if (!entry) | ||
2142 | pr_warning("Could not create debugfs 'failures' entry\n"); | ||
2143 | 2707 | ||
2144 | entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, | 2708 | trace_create_file("set_ftrace_filter", 0644, d_tracer, |
2145 | NULL, &ftrace_filter_fops); | 2709 | NULL, &ftrace_filter_fops); |
2146 | if (!entry) | ||
2147 | pr_warning("Could not create debugfs " | ||
2148 | "'set_ftrace_filter' entry\n"); | ||
2149 | 2710 | ||
2150 | entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer, | 2711 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, |
2151 | NULL, &ftrace_notrace_fops); | 2712 | NULL, &ftrace_notrace_fops); |
2152 | if (!entry) | ||
2153 | pr_warning("Could not create debugfs " | ||
2154 | "'set_ftrace_notrace' entry\n"); | ||
2155 | 2713 | ||
2156 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2714 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2157 | entry = debugfs_create_file("set_graph_function", 0444, d_tracer, | 2715 | trace_create_file("set_graph_function", 0444, d_tracer, |
2158 | NULL, | 2716 | NULL, |
2159 | &ftrace_graph_fops); | 2717 | &ftrace_graph_fops); |
2160 | if (!entry) | ||
2161 | pr_warning("Could not create debugfs " | ||
2162 | "'set_graph_function' entry\n"); | ||
2163 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2718 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2164 | 2719 | ||
2165 | return 0; | 2720 | return 0; |
@@ -2417,7 +2972,6 @@ static const struct file_operations ftrace_pid_fops = { | |||
2417 | static __init int ftrace_init_debugfs(void) | 2972 | static __init int ftrace_init_debugfs(void) |
2418 | { | 2973 | { |
2419 | struct dentry *d_tracer; | 2974 | struct dentry *d_tracer; |
2420 | struct dentry *entry; | ||
2421 | 2975 | ||
2422 | d_tracer = tracing_init_dentry(); | 2976 | d_tracer = tracing_init_dentry(); |
2423 | if (!d_tracer) | 2977 | if (!d_tracer) |
@@ -2425,11 +2979,11 @@ static __init int ftrace_init_debugfs(void) | |||
2425 | 2979 | ||
2426 | ftrace_init_dyn_debugfs(d_tracer); | 2980 | ftrace_init_dyn_debugfs(d_tracer); |
2427 | 2981 | ||
2428 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | 2982 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
2429 | NULL, &ftrace_pid_fops); | 2983 | NULL, &ftrace_pid_fops); |
2430 | if (!entry) | 2984 | |
2431 | pr_warning("Could not create debugfs " | 2985 | ftrace_profile_debugfs(d_tracer); |
2432 | "'set_ftrace_pid' entry\n"); | 2986 | |
2433 | return 0; | 2987 | return 0; |
2434 | } | 2988 | } |
2435 | fs_initcall(ftrace_init_debugfs); | 2989 | fs_initcall(ftrace_init_debugfs); |
@@ -2538,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
2538 | 3092 | ||
2539 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3093 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2540 | 3094 | ||
2541 | static atomic_t ftrace_graph_active; | 3095 | static int ftrace_graph_active; |
2542 | static struct notifier_block ftrace_suspend_notifier; | 3096 | static struct notifier_block ftrace_suspend_notifier; |
2543 | 3097 | ||
2544 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 3098 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
@@ -2690,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2690 | mutex_lock(&ftrace_lock); | 3244 | mutex_lock(&ftrace_lock); |
2691 | 3245 | ||
2692 | /* we currently allow only one tracer registered at a time */ | 3246 | /* we currently allow only one tracer registered at a time */ |
2693 | if (atomic_read(&ftrace_graph_active)) { | 3247 | if (ftrace_graph_active) { |
2694 | ret = -EBUSY; | 3248 | ret = -EBUSY; |
2695 | goto out; | 3249 | goto out; |
2696 | } | 3250 | } |
@@ -2698,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2698 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 3252 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
2699 | register_pm_notifier(&ftrace_suspend_notifier); | 3253 | register_pm_notifier(&ftrace_suspend_notifier); |
2700 | 3254 | ||
2701 | atomic_inc(&ftrace_graph_active); | 3255 | ftrace_graph_active++; |
2702 | ret = start_graph_tracing(); | 3256 | ret = start_graph_tracing(); |
2703 | if (ret) { | 3257 | if (ret) { |
2704 | atomic_dec(&ftrace_graph_active); | 3258 | ftrace_graph_active--; |
2705 | goto out; | 3259 | goto out; |
2706 | } | 3260 | } |
2707 | 3261 | ||
@@ -2719,10 +3273,10 @@ void unregister_ftrace_graph(void) | |||
2719 | { | 3273 | { |
2720 | mutex_lock(&ftrace_lock); | 3274 | mutex_lock(&ftrace_lock); |
2721 | 3275 | ||
2722 | if (!unlikely(atomic_read(&ftrace_graph_active))) | 3276 | if (unlikely(!ftrace_graph_active)) |
2723 | goto out; | 3277 | goto out; |
2724 | 3278 | ||
2725 | atomic_dec(&ftrace_graph_active); | 3279 | ftrace_graph_active--; |
2726 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); | 3280 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); |
2727 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 3281 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
2728 | ftrace_graph_entry = ftrace_graph_entry_stub; | 3282 | ftrace_graph_entry = ftrace_graph_entry_stub; |
@@ -2736,7 +3290,7 @@ void unregister_ftrace_graph(void) | |||
2736 | /* Allocate a return stack for newly created task */ | 3290 | /* Allocate a return stack for newly created task */ |
2737 | void ftrace_graph_init_task(struct task_struct *t) | 3291 | void ftrace_graph_init_task(struct task_struct *t) |
2738 | { | 3292 | { |
2739 | if (atomic_read(&ftrace_graph_active)) { | 3293 | if (ftrace_graph_active) { |
2740 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 3294 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
2741 | * sizeof(struct ftrace_ret_stack), | 3295 | * sizeof(struct ftrace_ret_stack), |
2742 | GFP_KERNEL); | 3296 | GFP_KERNEL); |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 5011f4d91e37..86cdf671d7e2 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/dcache.h> | 12 | #include <linux/dcache.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | 14 | ||
15 | #include <trace/kmemtrace.h> | 15 | #include <linux/kmemtrace.h> |
16 | 16 | ||
17 | #include "trace_output.h" | 17 | #include "trace_output.h" |
18 | #include "trace.h" | 18 | #include "trace.h" |
@@ -42,6 +42,7 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | |||
42 | gfp_t gfp_flags, | 42 | gfp_t gfp_flags, |
43 | int node) | 43 | int node) |
44 | { | 44 | { |
45 | struct ftrace_event_call *call = &event_kmem_alloc; | ||
45 | struct trace_array *tr = kmemtrace_array; | 46 | struct trace_array *tr = kmemtrace_array; |
46 | struct kmemtrace_alloc_entry *entry; | 47 | struct kmemtrace_alloc_entry *entry; |
47 | struct ring_buffer_event *event; | 48 | struct ring_buffer_event *event; |
@@ -62,7 +63,8 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | |||
62 | entry->gfp_flags = gfp_flags; | 63 | entry->gfp_flags = gfp_flags; |
63 | entry->node = node; | 64 | entry->node = node; |
64 | 65 | ||
65 | ring_buffer_unlock_commit(tr->buffer, event); | 66 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
67 | ring_buffer_unlock_commit(tr->buffer, event); | ||
66 | 68 | ||
67 | trace_wake_up(); | 69 | trace_wake_up(); |
68 | } | 70 | } |
@@ -71,6 +73,7 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |||
71 | unsigned long call_site, | 73 | unsigned long call_site, |
72 | const void *ptr) | 74 | const void *ptr) |
73 | { | 75 | { |
76 | struct ftrace_event_call *call = &event_kmem_free; | ||
74 | struct trace_array *tr = kmemtrace_array; | 77 | struct trace_array *tr = kmemtrace_array; |
75 | struct kmemtrace_free_entry *entry; | 78 | struct kmemtrace_free_entry *entry; |
76 | struct ring_buffer_event *event; | 79 | struct ring_buffer_event *event; |
@@ -86,7 +89,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |||
86 | entry->call_site = call_site; | 89 | entry->call_site = call_site; |
87 | entry->ptr = ptr; | 90 | entry->ptr = ptr; |
88 | 91 | ||
89 | ring_buffer_unlock_commit(tr->buffer, event); | 92 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
93 | ring_buffer_unlock_commit(tr->buffer, event); | ||
90 | 94 | ||
91 | trace_wake_up(); | 95 | trace_wake_up(); |
92 | } | 96 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 960cbf44c844..f935bd5ec3e8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -205,27 +205,6 @@ static void rb_event_set_padding(struct ring_buffer_event *event) | |||
205 | event->time_delta = 0; | 205 | event->time_delta = 0; |
206 | } | 206 | } |
207 | 207 | ||
208 | /** | ||
209 | * ring_buffer_event_discard - discard an event in the ring buffer | ||
210 | * @buffer: the ring buffer | ||
211 | * @event: the event to discard | ||
212 | * | ||
213 | * Sometimes a event that is in the ring buffer needs to be ignored. | ||
214 | * This function lets the user discard an event in the ring buffer | ||
215 | * and then that event will not be read later. | ||
216 | * | ||
217 | * Note, it is up to the user to be careful with this, and protect | ||
218 | * against races. If the user discards an event that has been consumed | ||
219 | * it is possible that it could corrupt the ring buffer. | ||
220 | */ | ||
221 | void ring_buffer_event_discard(struct ring_buffer_event *event) | ||
222 | { | ||
223 | event->type = RINGBUF_TYPE_PADDING; | ||
224 | /* time delta must be non zero */ | ||
225 | if (!event->time_delta) | ||
226 | event->time_delta = 1; | ||
227 | } | ||
228 | |||
229 | static unsigned | 208 | static unsigned |
230 | rb_event_data_length(struct ring_buffer_event *event) | 209 | rb_event_data_length(struct ring_buffer_event *event) |
231 | { | 210 | { |
@@ -1571,6 +1550,110 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1571 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | 1550 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); |
1572 | 1551 | ||
1573 | /** | 1552 | /** |
1553 | * ring_buffer_event_discard - discard any event in the ring buffer | ||
1554 | * @event: the event to discard | ||
1555 | * | ||
1556 | * Sometimes a event that is in the ring buffer needs to be ignored. | ||
1557 | * This function lets the user discard an event in the ring buffer | ||
1558 | * and then that event will not be read later. | ||
1559 | * | ||
1560 | * Note, it is up to the user to be careful with this, and protect | ||
1561 | * against races. If the user discards an event that has been consumed | ||
1562 | * it is possible that it could corrupt the ring buffer. | ||
1563 | */ | ||
1564 | void ring_buffer_event_discard(struct ring_buffer_event *event) | ||
1565 | { | ||
1566 | event->type = RINGBUF_TYPE_PADDING; | ||
1567 | /* time delta must be non zero */ | ||
1568 | if (!event->time_delta) | ||
1569 | event->time_delta = 1; | ||
1570 | } | ||
1571 | EXPORT_SYMBOL_GPL(ring_buffer_event_discard); | ||
1572 | |||
1573 | /** | ||
1574 | * ring_buffer_commit_discard - discard an event that has not been committed | ||
1575 | * @buffer: the ring buffer | ||
1576 | * @event: non committed event to discard | ||
1577 | * | ||
1578 | * This is similar to ring_buffer_event_discard but must only be | ||
1579 | * performed on an event that has not been committed yet. The difference | ||
1580 | * is that this will also try to free the event from the ring buffer | ||
1581 | * if another event has not been added behind it. | ||
1582 | * | ||
1583 | * If another event has been added behind it, it will set the event | ||
1584 | * up as discarded, and perform the commit. | ||
1585 | * | ||
1586 | * If this function is called, do not call ring_buffer_unlock_commit on | ||
1587 | * the event. | ||
1588 | */ | ||
1589 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | ||
1590 | struct ring_buffer_event *event) | ||
1591 | { | ||
1592 | struct ring_buffer_per_cpu *cpu_buffer; | ||
1593 | unsigned long new_index, old_index; | ||
1594 | struct buffer_page *bpage; | ||
1595 | unsigned long index; | ||
1596 | unsigned long addr; | ||
1597 | int cpu; | ||
1598 | |||
1599 | /* The event is discarded regardless */ | ||
1600 | ring_buffer_event_discard(event); | ||
1601 | |||
1602 | /* | ||
1603 | * This must only be called if the event has not been | ||
1604 | * committed yet. Thus we can assume that preemption | ||
1605 | * is still disabled. | ||
1606 | */ | ||
1607 | RB_WARN_ON(buffer, !preempt_count()); | ||
1608 | |||
1609 | cpu = smp_processor_id(); | ||
1610 | cpu_buffer = buffer->buffers[cpu]; | ||
1611 | |||
1612 | new_index = rb_event_index(event); | ||
1613 | old_index = new_index + rb_event_length(event); | ||
1614 | addr = (unsigned long)event; | ||
1615 | addr &= PAGE_MASK; | ||
1616 | |||
1617 | bpage = cpu_buffer->tail_page; | ||
1618 | |||
1619 | if (bpage == (void *)addr && rb_page_write(bpage) == old_index) { | ||
1620 | /* | ||
1621 | * This is on the tail page. It is possible that | ||
1622 | * a write could come in and move the tail page | ||
1623 | * and write to the next page. That is fine | ||
1624 | * because we just shorten what is on this page. | ||
1625 | */ | ||
1626 | index = local_cmpxchg(&bpage->write, old_index, new_index); | ||
1627 | if (index == old_index) | ||
1628 | goto out; | ||
1629 | } | ||
1630 | |||
1631 | /* | ||
1632 | * The commit is still visible by the reader, so we | ||
1633 | * must increment entries. | ||
1634 | */ | ||
1635 | cpu_buffer->entries++; | ||
1636 | out: | ||
1637 | /* | ||
1638 | * If a write came in and pushed the tail page | ||
1639 | * we still need to update the commit pointer | ||
1640 | * if we were the commit. | ||
1641 | */ | ||
1642 | if (rb_is_commit(cpu_buffer, event)) | ||
1643 | rb_set_commit_to_write(cpu_buffer); | ||
1644 | |||
1645 | /* | ||
1646 | * Only the last preempt count needs to restore preemption. | ||
1647 | */ | ||
1648 | if (preempt_count() == 1) | ||
1649 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
1650 | else | ||
1651 | preempt_enable_no_resched_notrace(); | ||
1652 | |||
1653 | } | ||
1654 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | ||
1655 | |||
1656 | /** | ||
1574 | * ring_buffer_write - write data to the buffer without reserving | 1657 | * ring_buffer_write - write data to the buffer without reserving |
1575 | * @buffer: The ring buffer to write to. | 1658 | * @buffer: The ring buffer to write to. |
1576 | * @length: The length of the data being written (excluding the event header) | 1659 | * @length: The length of the data being written (excluding the event header) |
@@ -2845,14 +2928,11 @@ static const struct file_operations rb_simple_fops = { | |||
2845 | static __init int rb_init_debugfs(void) | 2928 | static __init int rb_init_debugfs(void) |
2846 | { | 2929 | { |
2847 | struct dentry *d_tracer; | 2930 | struct dentry *d_tracer; |
2848 | struct dentry *entry; | ||
2849 | 2931 | ||
2850 | d_tracer = tracing_init_dentry(); | 2932 | d_tracer = tracing_init_dentry(); |
2851 | 2933 | ||
2852 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | 2934 | trace_create_file("tracing_on", 0644, d_tracer, |
2853 | &ring_buffer_flags, &rb_simple_fops); | 2935 | &ring_buffer_flags, &rb_simple_fops); |
2854 | if (!entry) | ||
2855 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | ||
2856 | 2936 | ||
2857 | return 0; | 2937 | return 0; |
2858 | } | 2938 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1ce5dc6372b8..c0047fcf7076 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -171,6 +171,12 @@ static struct trace_array global_trace; | |||
171 | 171 | ||
172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
173 | 173 | ||
174 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | ||
175 | struct ring_buffer_event *event) | ||
176 | { | ||
177 | return filter_check_discard(call, rec, global_trace.buffer, event); | ||
178 | } | ||
179 | |||
174 | cycle_t ftrace_now(int cpu) | 180 | cycle_t ftrace_now(int cpu) |
175 | { | 181 | { |
176 | u64 ts; | 182 | u64 ts; |
@@ -255,7 +261,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
255 | 261 | ||
256 | /* trace_flags holds trace_options default values */ | 262 | /* trace_flags holds trace_options default values */ |
257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 263 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 264 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
265 | TRACE_ITER_GRAPH_TIME; | ||
259 | 266 | ||
260 | /** | 267 | /** |
261 | * trace_wake_up - wake up tasks waiting for trace input | 268 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -317,6 +324,7 @@ static const char *trace_options[] = { | |||
317 | "latency-format", | 324 | "latency-format", |
318 | "global-clock", | 325 | "global-clock", |
319 | "sleep-time", | 326 | "sleep-time", |
327 | "graph-time", | ||
320 | NULL | 328 | NULL |
321 | }; | 329 | }; |
322 | 330 | ||
@@ -402,17 +410,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
402 | return cnt; | 410 | return cnt; |
403 | } | 411 | } |
404 | 412 | ||
405 | static void | ||
406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
407 | { | ||
408 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
409 | |||
410 | s->buffer[len] = 0; | ||
411 | seq_puts(m, s->buffer); | ||
412 | |||
413 | trace_seq_init(s); | ||
414 | } | ||
415 | |||
416 | /** | 413 | /** |
417 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 414 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
418 | * @tr: tracer | 415 | * @tr: tracer |
@@ -893,13 +890,18 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | |||
893 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 890 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, |
894 | unsigned long flags, int pc) | 891 | unsigned long flags, int pc) |
895 | { | 892 | { |
896 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 893 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); |
897 | } | 894 | } |
898 | 895 | ||
899 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 896 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, |
900 | unsigned long flags, int pc) | 897 | unsigned long flags, int pc) |
901 | { | 898 | { |
902 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 899 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); |
900 | } | ||
901 | |||
902 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | ||
903 | { | ||
904 | ring_buffer_discard_commit(global_trace.buffer, event); | ||
903 | } | 905 | } |
904 | 906 | ||
905 | void | 907 | void |
@@ -907,6 +909,7 @@ trace_function(struct trace_array *tr, | |||
907 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 909 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
908 | int pc) | 910 | int pc) |
909 | { | 911 | { |
912 | struct ftrace_event_call *call = &event_function; | ||
910 | struct ring_buffer_event *event; | 913 | struct ring_buffer_event *event; |
911 | struct ftrace_entry *entry; | 914 | struct ftrace_entry *entry; |
912 | 915 | ||
@@ -921,7 +924,9 @@ trace_function(struct trace_array *tr, | |||
921 | entry = ring_buffer_event_data(event); | 924 | entry = ring_buffer_event_data(event); |
922 | entry->ip = ip; | 925 | entry->ip = ip; |
923 | entry->parent_ip = parent_ip; | 926 | entry->parent_ip = parent_ip; |
924 | ring_buffer_unlock_commit(tr->buffer, event); | 927 | |
928 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
929 | ring_buffer_unlock_commit(tr->buffer, event); | ||
925 | } | 930 | } |
926 | 931 | ||
927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 932 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -930,6 +935,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
930 | unsigned long flags, | 935 | unsigned long flags, |
931 | int pc) | 936 | int pc) |
932 | { | 937 | { |
938 | struct ftrace_event_call *call = &event_funcgraph_entry; | ||
933 | struct ring_buffer_event *event; | 939 | struct ring_buffer_event *event; |
934 | struct ftrace_graph_ent_entry *entry; | 940 | struct ftrace_graph_ent_entry *entry; |
935 | 941 | ||
@@ -942,7 +948,8 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
942 | return 0; | 948 | return 0; |
943 | entry = ring_buffer_event_data(event); | 949 | entry = ring_buffer_event_data(event); |
944 | entry->graph_ent = *trace; | 950 | entry->graph_ent = *trace; |
945 | ring_buffer_unlock_commit(global_trace.buffer, event); | 951 | if (!filter_current_check_discard(call, entry, event)) |
952 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
946 | 953 | ||
947 | return 1; | 954 | return 1; |
948 | } | 955 | } |
@@ -952,6 +959,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
952 | unsigned long flags, | 959 | unsigned long flags, |
953 | int pc) | 960 | int pc) |
954 | { | 961 | { |
962 | struct ftrace_event_call *call = &event_funcgraph_exit; | ||
955 | struct ring_buffer_event *event; | 963 | struct ring_buffer_event *event; |
956 | struct ftrace_graph_ret_entry *entry; | 964 | struct ftrace_graph_ret_entry *entry; |
957 | 965 | ||
@@ -964,7 +972,8 @@ static void __trace_graph_return(struct trace_array *tr, | |||
964 | return; | 972 | return; |
965 | entry = ring_buffer_event_data(event); | 973 | entry = ring_buffer_event_data(event); |
966 | entry->ret = *trace; | 974 | entry->ret = *trace; |
967 | ring_buffer_unlock_commit(global_trace.buffer, event); | 975 | if (!filter_current_check_discard(call, entry, event)) |
976 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
968 | } | 977 | } |
969 | #endif | 978 | #endif |
970 | 979 | ||
@@ -982,6 +991,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
982 | int skip, int pc) | 991 | int skip, int pc) |
983 | { | 992 | { |
984 | #ifdef CONFIG_STACKTRACE | 993 | #ifdef CONFIG_STACKTRACE |
994 | struct ftrace_event_call *call = &event_kernel_stack; | ||
985 | struct ring_buffer_event *event; | 995 | struct ring_buffer_event *event; |
986 | struct stack_entry *entry; | 996 | struct stack_entry *entry; |
987 | struct stack_trace trace; | 997 | struct stack_trace trace; |
@@ -999,7 +1009,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
999 | trace.entries = entry->caller; | 1009 | trace.entries = entry->caller; |
1000 | 1010 | ||
1001 | save_stack_trace(&trace); | 1011 | save_stack_trace(&trace); |
1002 | ring_buffer_unlock_commit(tr->buffer, event); | 1012 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1013 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1003 | #endif | 1014 | #endif |
1004 | } | 1015 | } |
1005 | 1016 | ||
@@ -1024,6 +1035,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1024 | unsigned long flags, int pc) | 1035 | unsigned long flags, int pc) |
1025 | { | 1036 | { |
1026 | #ifdef CONFIG_STACKTRACE | 1037 | #ifdef CONFIG_STACKTRACE |
1038 | struct ftrace_event_call *call = &event_user_stack; | ||
1027 | struct ring_buffer_event *event; | 1039 | struct ring_buffer_event *event; |
1028 | struct userstack_entry *entry; | 1040 | struct userstack_entry *entry; |
1029 | struct stack_trace trace; | 1041 | struct stack_trace trace; |
@@ -1045,7 +1057,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1045 | trace.entries = entry->caller; | 1057 | trace.entries = entry->caller; |
1046 | 1058 | ||
1047 | save_stack_trace_user(&trace); | 1059 | save_stack_trace_user(&trace); |
1048 | ring_buffer_unlock_commit(tr->buffer, event); | 1060 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1061 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1049 | #endif | 1062 | #endif |
1050 | } | 1063 | } |
1051 | 1064 | ||
@@ -1089,6 +1102,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1089 | struct task_struct *next, | 1102 | struct task_struct *next, |
1090 | unsigned long flags, int pc) | 1103 | unsigned long flags, int pc) |
1091 | { | 1104 | { |
1105 | struct ftrace_event_call *call = &event_context_switch; | ||
1092 | struct ring_buffer_event *event; | 1106 | struct ring_buffer_event *event; |
1093 | struct ctx_switch_entry *entry; | 1107 | struct ctx_switch_entry *entry; |
1094 | 1108 | ||
@@ -1104,7 +1118,9 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1104 | entry->next_prio = next->prio; | 1118 | entry->next_prio = next->prio; |
1105 | entry->next_state = next->state; | 1119 | entry->next_state = next->state; |
1106 | entry->next_cpu = task_cpu(next); | 1120 | entry->next_cpu = task_cpu(next); |
1107 | trace_buffer_unlock_commit(tr, event, flags, pc); | 1121 | |
1122 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
1123 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
1108 | } | 1124 | } |
1109 | 1125 | ||
1110 | void | 1126 | void |
@@ -1113,6 +1129,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1113 | struct task_struct *curr, | 1129 | struct task_struct *curr, |
1114 | unsigned long flags, int pc) | 1130 | unsigned long flags, int pc) |
1115 | { | 1131 | { |
1132 | struct ftrace_event_call *call = &event_wakeup; | ||
1116 | struct ring_buffer_event *event; | 1133 | struct ring_buffer_event *event; |
1117 | struct ctx_switch_entry *entry; | 1134 | struct ctx_switch_entry *entry; |
1118 | 1135 | ||
@@ -1129,7 +1146,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1129 | entry->next_state = wakee->state; | 1146 | entry->next_state = wakee->state; |
1130 | entry->next_cpu = task_cpu(wakee); | 1147 | entry->next_cpu = task_cpu(wakee); |
1131 | 1148 | ||
1132 | ring_buffer_unlock_commit(tr->buffer, event); | 1149 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1150 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1133 | ftrace_trace_stack(tr, flags, 6, pc); | 1151 | ftrace_trace_stack(tr, flags, 6, pc); |
1134 | ftrace_trace_userstack(tr, flags, pc); | 1152 | ftrace_trace_userstack(tr, flags, pc); |
1135 | } | 1153 | } |
@@ -1230,6 +1248,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1230 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1248 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
1231 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1249 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1232 | 1250 | ||
1251 | struct ftrace_event_call *call = &event_bprint; | ||
1233 | struct ring_buffer_event *event; | 1252 | struct ring_buffer_event *event; |
1234 | struct trace_array *tr = &global_trace; | 1253 | struct trace_array *tr = &global_trace; |
1235 | struct trace_array_cpu *data; | 1254 | struct trace_array_cpu *data; |
@@ -1269,7 +1288,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1269 | entry->fmt = fmt; | 1288 | entry->fmt = fmt; |
1270 | 1289 | ||
1271 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1290 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1272 | ring_buffer_unlock_commit(tr->buffer, event); | 1291 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1292 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1273 | 1293 | ||
1274 | out_unlock: | 1294 | out_unlock: |
1275 | __raw_spin_unlock(&trace_buf_lock); | 1295 | __raw_spin_unlock(&trace_buf_lock); |
@@ -1288,6 +1308,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1288 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1308 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
1289 | static char trace_buf[TRACE_BUF_SIZE]; | 1309 | static char trace_buf[TRACE_BUF_SIZE]; |
1290 | 1310 | ||
1311 | struct ftrace_event_call *call = &event_print; | ||
1291 | struct ring_buffer_event *event; | 1312 | struct ring_buffer_event *event; |
1292 | struct trace_array *tr = &global_trace; | 1313 | struct trace_array *tr = &global_trace; |
1293 | struct trace_array_cpu *data; | 1314 | struct trace_array_cpu *data; |
@@ -1323,7 +1344,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1323 | 1344 | ||
1324 | memcpy(&entry->buf, trace_buf, len); | 1345 | memcpy(&entry->buf, trace_buf, len); |
1325 | entry->buf[len] = 0; | 1346 | entry->buf[len] = 0; |
1326 | ring_buffer_unlock_commit(tr->buffer, event); | 1347 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1348 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1327 | 1349 | ||
1328 | out_unlock: | 1350 | out_unlock: |
1329 | __raw_spin_unlock(&trace_buf_lock); | 1351 | __raw_spin_unlock(&trace_buf_lock); |
@@ -3596,7 +3618,7 @@ struct dentry *tracing_dentry_percpu(void) | |||
3596 | static void tracing_init_debugfs_percpu(long cpu) | 3618 | static void tracing_init_debugfs_percpu(long cpu) |
3597 | { | 3619 | { |
3598 | struct dentry *d_percpu = tracing_dentry_percpu(); | 3620 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3599 | struct dentry *entry, *d_cpu; | 3621 | struct dentry *d_cpu; |
3600 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 3622 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ |
3601 | char cpu_dir[7]; | 3623 | char cpu_dir[7]; |
3602 | 3624 | ||
@@ -3611,21 +3633,15 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3611 | } | 3633 | } |
3612 | 3634 | ||
3613 | /* per cpu trace_pipe */ | 3635 | /* per cpu trace_pipe */ |
3614 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | 3636 | trace_create_file("trace_pipe", 0444, d_cpu, |
3615 | (void *) cpu, &tracing_pipe_fops); | 3637 | (void *) cpu, &tracing_pipe_fops); |
3616 | if (!entry) | ||
3617 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
3618 | 3638 | ||
3619 | /* per cpu trace */ | 3639 | /* per cpu trace */ |
3620 | entry = debugfs_create_file("trace", 0644, d_cpu, | 3640 | trace_create_file("trace", 0644, d_cpu, |
3621 | (void *) cpu, &tracing_fops); | 3641 | (void *) cpu, &tracing_fops); |
3622 | if (!entry) | ||
3623 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
3624 | 3642 | ||
3625 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | 3643 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
3626 | (void *) cpu, &tracing_buffers_fops); | 3644 | (void *) cpu, &tracing_buffers_fops); |
3627 | if (!entry) | ||
3628 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | ||
3629 | } | 3645 | } |
3630 | 3646 | ||
3631 | #ifdef CONFIG_FTRACE_SELFTEST | 3647 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -3781,6 +3797,22 @@ static const struct file_operations trace_options_core_fops = { | |||
3781 | .write = trace_options_core_write, | 3797 | .write = trace_options_core_write, |
3782 | }; | 3798 | }; |
3783 | 3799 | ||
3800 | struct dentry *trace_create_file(const char *name, | ||
3801 | mode_t mode, | ||
3802 | struct dentry *parent, | ||
3803 | void *data, | ||
3804 | const struct file_operations *fops) | ||
3805 | { | ||
3806 | struct dentry *ret; | ||
3807 | |||
3808 | ret = debugfs_create_file(name, mode, parent, data, fops); | ||
3809 | if (!ret) | ||
3810 | pr_warning("Could not create debugfs '%s' entry\n", name); | ||
3811 | |||
3812 | return ret; | ||
3813 | } | ||
3814 | |||
3815 | |||
3784 | static struct dentry *trace_options_init_dentry(void) | 3816 | static struct dentry *trace_options_init_dentry(void) |
3785 | { | 3817 | { |
3786 | struct dentry *d_tracer; | 3818 | struct dentry *d_tracer; |
@@ -3808,7 +3840,6 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3808 | struct tracer_opt *opt) | 3840 | struct tracer_opt *opt) |
3809 | { | 3841 | { |
3810 | struct dentry *t_options; | 3842 | struct dentry *t_options; |
3811 | struct dentry *entry; | ||
3812 | 3843 | ||
3813 | t_options = trace_options_init_dentry(); | 3844 | t_options = trace_options_init_dentry(); |
3814 | if (!t_options) | 3845 | if (!t_options) |
@@ -3817,11 +3848,9 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3817 | topt->flags = flags; | 3848 | topt->flags = flags; |
3818 | topt->opt = opt; | 3849 | topt->opt = opt; |
3819 | 3850 | ||
3820 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | 3851 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
3821 | &trace_options_fops); | 3852 | &trace_options_fops); |
3822 | 3853 | ||
3823 | topt->entry = entry; | ||
3824 | |||
3825 | } | 3854 | } |
3826 | 3855 | ||
3827 | static struct trace_option_dentry * | 3856 | static struct trace_option_dentry * |
@@ -3876,123 +3905,81 @@ static struct dentry * | |||
3876 | create_trace_option_core_file(const char *option, long index) | 3905 | create_trace_option_core_file(const char *option, long index) |
3877 | { | 3906 | { |
3878 | struct dentry *t_options; | 3907 | struct dentry *t_options; |
3879 | struct dentry *entry; | ||
3880 | 3908 | ||
3881 | t_options = trace_options_init_dentry(); | 3909 | t_options = trace_options_init_dentry(); |
3882 | if (!t_options) | 3910 | if (!t_options) |
3883 | return NULL; | 3911 | return NULL; |
3884 | 3912 | ||
3885 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | 3913 | return trace_create_file(option, 0644, t_options, (void *)index, |
3886 | &trace_options_core_fops); | 3914 | &trace_options_core_fops); |
3887 | |||
3888 | return entry; | ||
3889 | } | 3915 | } |
3890 | 3916 | ||
3891 | static __init void create_trace_options_dir(void) | 3917 | static __init void create_trace_options_dir(void) |
3892 | { | 3918 | { |
3893 | struct dentry *t_options; | 3919 | struct dentry *t_options; |
3894 | struct dentry *entry; | ||
3895 | int i; | 3920 | int i; |
3896 | 3921 | ||
3897 | t_options = trace_options_init_dentry(); | 3922 | t_options = trace_options_init_dentry(); |
3898 | if (!t_options) | 3923 | if (!t_options) |
3899 | return; | 3924 | return; |
3900 | 3925 | ||
3901 | for (i = 0; trace_options[i]; i++) { | 3926 | for (i = 0; trace_options[i]; i++) |
3902 | entry = create_trace_option_core_file(trace_options[i], i); | 3927 | create_trace_option_core_file(trace_options[i], i); |
3903 | if (!entry) | ||
3904 | pr_warning("Could not create debugfs %s entry\n", | ||
3905 | trace_options[i]); | ||
3906 | } | ||
3907 | } | 3928 | } |
3908 | 3929 | ||
3909 | static __init int tracer_init_debugfs(void) | 3930 | static __init int tracer_init_debugfs(void) |
3910 | { | 3931 | { |
3911 | struct dentry *d_tracer; | 3932 | struct dentry *d_tracer; |
3912 | struct dentry *entry; | ||
3913 | int cpu; | 3933 | int cpu; |
3914 | 3934 | ||
3915 | d_tracer = tracing_init_dentry(); | 3935 | d_tracer = tracing_init_dentry(); |
3916 | 3936 | ||
3917 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | 3937 | trace_create_file("tracing_enabled", 0644, d_tracer, |
3918 | &global_trace, &tracing_ctrl_fops); | 3938 | &global_trace, &tracing_ctrl_fops); |
3919 | if (!entry) | ||
3920 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | ||
3921 | 3939 | ||
3922 | entry = debugfs_create_file("trace_options", 0644, d_tracer, | 3940 | trace_create_file("trace_options", 0644, d_tracer, |
3923 | NULL, &tracing_iter_fops); | 3941 | NULL, &tracing_iter_fops); |
3924 | if (!entry) | ||
3925 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | ||
3926 | 3942 | ||
3927 | create_trace_options_dir(); | 3943 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
3944 | NULL, &tracing_cpumask_fops); | ||
3945 | |||
3946 | trace_create_file("trace", 0644, d_tracer, | ||
3947 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | ||
3948 | |||
3949 | trace_create_file("available_tracers", 0444, d_tracer, | ||
3950 | &global_trace, &show_traces_fops); | ||
3951 | |||
3952 | trace_create_file("current_tracer", 0444, d_tracer, | ||
3953 | &global_trace, &set_tracer_fops); | ||
3954 | |||
3955 | trace_create_file("tracing_max_latency", 0644, d_tracer, | ||
3956 | &tracing_max_latency, &tracing_max_lat_fops); | ||
3957 | |||
3958 | trace_create_file("tracing_thresh", 0644, d_tracer, | ||
3959 | &tracing_thresh, &tracing_max_lat_fops); | ||
3928 | 3960 | ||
3929 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3961 | trace_create_file("README", 0644, d_tracer, |
3930 | NULL, &tracing_cpumask_fops); | 3962 | NULL, &tracing_readme_fops); |
3931 | if (!entry) | 3963 | |
3932 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 3964 | trace_create_file("trace_pipe", 0444, d_tracer, |
3933 | |||
3934 | entry = debugfs_create_file("trace", 0644, d_tracer, | ||
3935 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | ||
3936 | if (!entry) | ||
3937 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
3938 | |||
3939 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | ||
3940 | &global_trace, &show_traces_fops); | ||
3941 | if (!entry) | ||
3942 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); | ||
3943 | |||
3944 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | ||
3945 | &global_trace, &set_tracer_fops); | ||
3946 | if (!entry) | ||
3947 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); | ||
3948 | |||
3949 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | ||
3950 | &tracing_max_latency, | ||
3951 | &tracing_max_lat_fops); | ||
3952 | if (!entry) | ||
3953 | pr_warning("Could not create debugfs " | ||
3954 | "'tracing_max_latency' entry\n"); | ||
3955 | |||
3956 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | ||
3957 | &tracing_thresh, &tracing_max_lat_fops); | ||
3958 | if (!entry) | ||
3959 | pr_warning("Could not create debugfs " | ||
3960 | "'tracing_thresh' entry\n"); | ||
3961 | entry = debugfs_create_file("README", 0644, d_tracer, | ||
3962 | NULL, &tracing_readme_fops); | ||
3963 | if (!entry) | ||
3964 | pr_warning("Could not create debugfs 'README' entry\n"); | ||
3965 | |||
3966 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | ||
3967 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 3965 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
3968 | if (!entry) | 3966 | |
3969 | pr_warning("Could not create debugfs " | 3967 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
3970 | "'trace_pipe' entry\n"); | 3968 | &global_trace, &tracing_entries_fops); |
3971 | 3969 | ||
3972 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, | 3970 | trace_create_file("trace_marker", 0220, d_tracer, |
3973 | &global_trace, &tracing_entries_fops); | 3971 | NULL, &tracing_mark_fops); |
3974 | if (!entry) | ||
3975 | pr_warning("Could not create debugfs " | ||
3976 | "'buffer_size_kb' entry\n"); | ||
3977 | |||
3978 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | ||
3979 | NULL, &tracing_mark_fops); | ||
3980 | if (!entry) | ||
3981 | pr_warning("Could not create debugfs " | ||
3982 | "'trace_marker' entry\n"); | ||
3983 | 3972 | ||
3984 | #ifdef CONFIG_DYNAMIC_FTRACE | 3973 | #ifdef CONFIG_DYNAMIC_FTRACE |
3985 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3974 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
3986 | &ftrace_update_tot_cnt, | 3975 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
3987 | &tracing_dyn_info_fops); | ||
3988 | if (!entry) | ||
3989 | pr_warning("Could not create debugfs " | ||
3990 | "'dyn_ftrace_total_info' entry\n"); | ||
3991 | #endif | 3976 | #endif |
3992 | #ifdef CONFIG_SYSPROF_TRACER | 3977 | #ifdef CONFIG_SYSPROF_TRACER |
3993 | init_tracer_sysprof_debugfs(d_tracer); | 3978 | init_tracer_sysprof_debugfs(d_tracer); |
3994 | #endif | 3979 | #endif |
3995 | 3980 | ||
3981 | create_trace_options_dir(); | ||
3982 | |||
3996 | for_each_tracing_cpu(cpu) | 3983 | for_each_tracing_cpu(cpu) |
3997 | tracing_init_debugfs_percpu(cpu); | 3984 | tracing_init_debugfs_percpu(cpu); |
3998 | 3985 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e685ac2b2ba1..9729d14767d8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
12 | #include <trace/kmemtrace.h> | 12 | #include <linux/kmemtrace.h> |
13 | #include <trace/power.h> | 13 | #include <trace/power.h> |
14 | 14 | ||
15 | enum trace_type { | 15 | enum trace_type { |
@@ -470,6 +470,12 @@ void trace_wake_up(void); | |||
470 | void tracing_reset(struct trace_array *tr, int cpu); | 470 | void tracing_reset(struct trace_array *tr, int cpu); |
471 | void tracing_reset_online_cpus(struct trace_array *tr); | 471 | void tracing_reset_online_cpus(struct trace_array *tr); |
472 | int tracing_open_generic(struct inode *inode, struct file *filp); | 472 | int tracing_open_generic(struct inode *inode, struct file *filp); |
473 | struct dentry *trace_create_file(const char *name, | ||
474 | mode_t mode, | ||
475 | struct dentry *parent, | ||
476 | void *data, | ||
477 | const struct file_operations *fops); | ||
478 | |||
473 | struct dentry *tracing_init_dentry(void); | 479 | struct dentry *tracing_init_dentry(void); |
474 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 480 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
475 | 481 | ||
@@ -491,6 +497,7 @@ void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | |||
491 | unsigned long flags, int pc); | 497 | unsigned long flags, int pc); |
492 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 498 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, |
493 | unsigned long flags, int pc); | 499 | unsigned long flags, int pc); |
500 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | ||
494 | 501 | ||
495 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 502 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
496 | struct trace_array_cpu *data); | 503 | struct trace_array_cpu *data); |
@@ -613,6 +620,8 @@ extern unsigned long trace_flags; | |||
613 | /* Standard output formatting function used for function return traces */ | 620 | /* Standard output formatting function used for function return traces */ |
614 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 621 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
615 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 622 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
623 | extern enum print_line_t | ||
624 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | ||
616 | 625 | ||
617 | #ifdef CONFIG_DYNAMIC_FTRACE | 626 | #ifdef CONFIG_DYNAMIC_FTRACE |
618 | /* TODO: make this variable */ | 627 | /* TODO: make this variable */ |
@@ -644,7 +653,6 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
644 | return 1; | 653 | return 1; |
645 | } | 654 | } |
646 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 655 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
647 | |||
648 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 656 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
649 | static inline enum print_line_t | 657 | static inline enum print_line_t |
650 | print_graph_function(struct trace_iterator *iter) | 658 | print_graph_function(struct trace_iterator *iter) |
@@ -692,6 +700,7 @@ enum trace_iterator_flags { | |||
692 | TRACE_ITER_LATENCY_FMT = 0x40000, | 700 | TRACE_ITER_LATENCY_FMT = 0x40000, |
693 | TRACE_ITER_GLOBAL_CLK = 0x80000, | 701 | TRACE_ITER_GLOBAL_CLK = 0x80000, |
694 | TRACE_ITER_SLEEP_TIME = 0x100000, | 702 | TRACE_ITER_SLEEP_TIME = 0x100000, |
703 | TRACE_ITER_GRAPH_TIME = 0x200000, | ||
695 | }; | 704 | }; |
696 | 705 | ||
697 | /* | 706 | /* |
@@ -857,6 +866,29 @@ extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | |||
857 | extern void filter_free_subsystem_preds(struct event_subsystem *system); | 866 | extern void filter_free_subsystem_preds(struct event_subsystem *system); |
858 | extern int filter_add_subsystem_pred(struct event_subsystem *system, | 867 | extern int filter_add_subsystem_pred(struct event_subsystem *system, |
859 | struct filter_pred *pred); | 868 | struct filter_pred *pred); |
869 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
870 | void *rec, | ||
871 | struct ring_buffer_event *event); | ||
872 | |||
873 | static inline int | ||
874 | filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
875 | struct ring_buffer *buffer, | ||
876 | struct ring_buffer_event *event) | ||
877 | { | ||
878 | if (unlikely(call->preds) && !filter_match_preds(call, rec)) { | ||
879 | ring_buffer_discard_commit(buffer, event); | ||
880 | return 1; | ||
881 | } | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | #define __common_field(type, item) \ | ||
887 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
888 | offsetof(typeof(field.ent), item), \ | ||
889 | sizeof(field.ent.item)); \ | ||
890 | if (ret) \ | ||
891 | return ret; | ||
860 | 892 | ||
861 | void event_trace_printk(unsigned long ip, const char *fmt, ...); | 893 | void event_trace_printk(unsigned long ip, const char *fmt, ...); |
862 | extern struct ftrace_event_call __start_ftrace_events[]; | 894 | extern struct ftrace_event_call __start_ftrace_events[]; |
@@ -889,4 +921,11 @@ do { \ | |||
889 | __trace_printk(ip, fmt, ##args); \ | 921 | __trace_printk(ip, fmt, ##args); \ |
890 | } while (0) | 922 | } while (0) |
891 | 923 | ||
924 | #undef TRACE_EVENT_FORMAT | ||
925 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
926 | extern struct ftrace_event_call event_##call; | ||
927 | #undef TRACE_EVENT_FORMAT_NOFILTER | ||
928 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt) | ||
929 | #include "trace_event_types.h" | ||
930 | |||
892 | #endif /* _LINUX_KERNEL_TRACE_H */ | 931 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 7a30fc4c3642..a29ef23ffb47 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <linux/kallsyms.h> | 11 | #include <linux/kallsyms.h> |
12 | #include <linux/time.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | #include "trace_output.h" | 15 | #include "trace_output.h" |
@@ -67,7 +68,7 @@ initcall_call_print_line(struct trace_iterator *iter) | |||
67 | trace_assign_type(field, entry); | 68 | trace_assign_type(field, entry); |
68 | call = &field->boot_call; | 69 | call = &field->boot_call; |
69 | ts = iter->ts; | 70 | ts = iter->ts; |
70 | nsec_rem = do_div(ts, 1000000000); | 71 | nsec_rem = do_div(ts, NSEC_PER_SEC); |
71 | 72 | ||
72 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | 73 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", |
73 | (unsigned long)ts, nsec_rem, call->func, call->caller); | 74 | (unsigned long)ts, nsec_rem, call->func, call->caller); |
@@ -92,7 +93,7 @@ initcall_ret_print_line(struct trace_iterator *iter) | |||
92 | trace_assign_type(field, entry); | 93 | trace_assign_type(field, entry); |
93 | init_ret = &field->boot_ret; | 94 | init_ret = &field->boot_ret; |
94 | ts = iter->ts; | 95 | ts = iter->ts; |
95 | nsec_rem = do_div(ts, 1000000000); | 96 | nsec_rem = do_div(ts, NSEC_PER_SEC); |
96 | 97 | ||
97 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | 98 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " |
98 | "returned %d after %llu msecs\n", | 99 | "returned %d after %llu msecs\n", |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..8e64e604f5a7 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -30,6 +30,7 @@ static struct trace_array *branch_tracer; | |||
30 | static void | 30 | static void |
31 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 31 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
32 | { | 32 | { |
33 | struct ftrace_event_call *call = &event_branch; | ||
33 | struct trace_array *tr = branch_tracer; | 34 | struct trace_array *tr = branch_tracer; |
34 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
35 | struct trace_branch *entry; | 36 | struct trace_branch *entry; |
@@ -73,7 +74,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
73 | entry->line = f->line; | 74 | entry->line = f->line; |
74 | entry->correct = val == expect; | 75 | entry->correct = val == expect; |
75 | 76 | ||
76 | ring_buffer_unlock_commit(tr->buffer, event); | 77 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
78 | ring_buffer_unlock_commit(tr->buffer, event); | ||
77 | 79 | ||
78 | out: | 80 | out: |
79 | atomic_dec(&tr->data[cpu]->disabled); | 81 | atomic_dec(&tr->data[cpu]->disabled); |
@@ -263,7 +265,7 @@ static int branch_stat_show(struct seq_file *m, void *v) | |||
263 | return 0; | 265 | return 0; |
264 | } | 266 | } |
265 | 267 | ||
266 | static void *annotated_branch_stat_start(void) | 268 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
267 | { | 269 | { |
268 | return __start_annotated_branch_profile; | 270 | return __start_annotated_branch_profile; |
269 | } | 271 | } |
@@ -338,7 +340,7 @@ static int all_branch_stat_headers(struct seq_file *m) | |||
338 | return 0; | 340 | return 0; |
339 | } | 341 | } |
340 | 342 | ||
341 | static void *all_branch_stat_start(void) | 343 | static void *all_branch_stat_start(struct tracer_stat *trace) |
342 | { | 344 | { |
343 | return __start_branch_profile; | 345 | return __start_branch_profile; |
344 | } | 346 | } |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 22cba9970776..199de9c74229 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -28,4 +28,3 @@ void ftrace_profile_disable(int event_id) | |||
28 | return event->profile_disable(event); | 28 | return event->profile_disable(event); |
29 | } | 29 | } |
30 | } | 30 | } |
31 | |||
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index fd78bee71dd7..cfcecc4fd86d 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h | |||
@@ -57,7 +57,7 @@ TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore, | |||
57 | TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]") | 57 | TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]") |
58 | ); | 58 | ); |
59 | 59 | ||
60 | TRACE_EVENT_FORMAT(special, TRACE_SPECIAL, special_entry, ignore, | 60 | TRACE_EVENT_FORMAT_NOFILTER(special, TRACE_SPECIAL, special_entry, ignore, |
61 | TRACE_STRUCT( | 61 | TRACE_STRUCT( |
62 | TRACE_FIELD(unsigned long, arg1, arg1) | 62 | TRACE_FIELD(unsigned long, arg1, arg1) |
63 | TRACE_FIELD(unsigned long, arg2, arg2) | 63 | TRACE_FIELD(unsigned long, arg2, arg2) |
@@ -122,8 +122,10 @@ TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, | |||
122 | TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore, | 122 | TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore, |
123 | TRACE_STRUCT( | 123 | TRACE_STRUCT( |
124 | TRACE_FIELD(unsigned int, line, line) | 124 | TRACE_FIELD(unsigned int, line, line) |
125 | TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, func) | 125 | TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, |
126 | TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, file) | 126 | TRACE_FUNC_SIZE+1, func) |
127 | TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, | ||
128 | TRACE_FUNC_SIZE+1, file) | ||
127 | TRACE_FIELD(char, correct, correct) | 129 | TRACE_FIELD(char, correct, correct) |
128 | ), | 130 | ), |
129 | TP_RAW_FMT("%u:%s:%s (%u)") | 131 | TP_RAW_FMT("%u:%s:%s (%u)") |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 576f4fa2af0d..789e14eb09a5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -684,6 +684,7 @@ static struct dentry * | |||
684 | event_subsystem_dir(const char *name, struct dentry *d_events) | 684 | event_subsystem_dir(const char *name, struct dentry *d_events) |
685 | { | 685 | { |
686 | struct event_subsystem *system; | 686 | struct event_subsystem *system; |
687 | struct dentry *entry; | ||
687 | 688 | ||
688 | /* First see if we did not already create this dir */ | 689 | /* First see if we did not already create this dir */ |
689 | list_for_each_entry(system, &event_subsystems, list) { | 690 | list_for_each_entry(system, &event_subsystems, list) { |
@@ -712,6 +713,12 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
712 | 713 | ||
713 | system->preds = NULL; | 714 | system->preds = NULL; |
714 | 715 | ||
716 | entry = debugfs_create_file("filter", 0644, system->entry, system, | ||
717 | &ftrace_subsystem_filter_fops); | ||
718 | if (!entry) | ||
719 | pr_warning("Could not create debugfs " | ||
720 | "'%s/filter' entry\n", name); | ||
721 | |||
715 | return system->entry; | 722 | return system->entry; |
716 | } | 723 | } |
717 | 724 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e03cbf1e38f3..9f8ecca34a59 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -185,7 +185,7 @@ void filter_free_subsystem_preds(struct event_subsystem *system) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | events_for_each(call) { | 187 | events_for_each(call) { |
188 | if (!call->name || !call->regfunc) | 188 | if (!call->define_fields) |
189 | continue; | 189 | continue; |
190 | 190 | ||
191 | if (!strcmp(call->system, system->name)) | 191 | if (!strcmp(call->system, system->name)) |
@@ -324,7 +324,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system, | |||
324 | events_for_each(call) { | 324 | events_for_each(call) { |
325 | int err; | 325 | int err; |
326 | 326 | ||
327 | if (!call->name || !call->regfunc) | 327 | if (!call->define_fields) |
328 | continue; | 328 | continue; |
329 | 329 | ||
330 | if (strcmp(call->system, system->name)) | 330 | if (strcmp(call->system, system->name)) |
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index d363c6672c6c..02fb710193ed 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h | |||
@@ -146,13 +146,6 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
146 | if (ret) \ | 146 | if (ret) \ |
147 | return ret; | 147 | return ret; |
148 | 148 | ||
149 | #define __common_field(type, item) \ | ||
150 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
151 | offsetof(typeof(field.ent), item), \ | ||
152 | sizeof(field.ent.item)); \ | ||
153 | if (ret) \ | ||
154 | return ret; | ||
155 | |||
156 | #undef TRACE_EVENT | 149 | #undef TRACE_EVENT |
157 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 150 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
158 | int \ | 151 | int \ |
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index 9d2fa78cecca..b2b298269eb0 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h | |||
@@ -222,11 +222,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
222 | \ | 222 | \ |
223 | assign; \ | 223 | assign; \ |
224 | \ | 224 | \ |
225 | if (call->preds && !filter_match_preds(call, entry)) \ | 225 | if (!filter_current_check_discard(call, entry, event)) \ |
226 | ring_buffer_event_discard(event); \ | 226 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ |
227 | \ | ||
228 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | ||
229 | \ | ||
230 | } \ | 227 | } \ |
231 | \ | 228 | \ |
232 | static int ftrace_raw_reg_event_##call(void) \ | 229 | static int ftrace_raw_reg_event_##call(void) \ |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 07a22c33ebf3..77c494f5e1d6 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | 31 | ||
32 | #undef TRACE_FIELD_SPECIAL | 32 | #undef TRACE_FIELD_SPECIAL |
33 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | 33 | #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ |
34 | ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \ | 34 | ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \ |
35 | "offset:%u;\tsize:%u;\n", \ | 35 | "offset:%u;\tsize:%u;\n", \ |
36 | (unsigned int)offsetof(typeof(field), item), \ | 36 | (unsigned int)offsetof(typeof(field), item), \ |
@@ -65,6 +65,22 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
65 | return ret; \ | 65 | return ret; \ |
66 | } | 66 | } |
67 | 67 | ||
68 | #undef TRACE_EVENT_FORMAT_NOFILTER | ||
69 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | ||
70 | tpfmt) \ | ||
71 | static int \ | ||
72 | ftrace_format_##call(struct trace_seq *s) \ | ||
73 | { \ | ||
74 | struct args field; \ | ||
75 | int ret; \ | ||
76 | \ | ||
77 | tstruct; \ | ||
78 | \ | ||
79 | trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \ | ||
80 | \ | ||
81 | return ret; \ | ||
82 | } | ||
83 | |||
68 | #include "trace_event_types.h" | 84 | #include "trace_event_types.h" |
69 | 85 | ||
70 | #undef TRACE_ZERO_CHAR | 86 | #undef TRACE_ZERO_CHAR |
@@ -85,18 +101,86 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
85 | #define TRACE_ENTRY entry | 101 | #define TRACE_ENTRY entry |
86 | 102 | ||
87 | #undef TRACE_FIELD_SPECIAL | 103 | #undef TRACE_FIELD_SPECIAL |
88 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | 104 | #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ |
89 | cmd; | 105 | cmd; |
90 | 106 | ||
91 | #undef TRACE_EVENT_FORMAT | 107 | #undef TRACE_EVENT_FORMAT |
92 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | 108 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ |
109 | int ftrace_define_fields_##call(void); \ | ||
110 | static int ftrace_raw_init_event_##call(void); \ | ||
111 | \ | ||
112 | struct ftrace_event_call __used \ | ||
113 | __attribute__((__aligned__(4))) \ | ||
114 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
115 | .name = #call, \ | ||
116 | .id = proto, \ | ||
117 | .system = __stringify(TRACE_SYSTEM), \ | ||
118 | .raw_init = ftrace_raw_init_event_##call, \ | ||
119 | .show_format = ftrace_format_##call, \ | ||
120 | .define_fields = ftrace_define_fields_##call, \ | ||
121 | }; \ | ||
122 | static int ftrace_raw_init_event_##call(void) \ | ||
123 | { \ | ||
124 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
125 | return 0; \ | ||
126 | } \ | ||
127 | |||
128 | #undef TRACE_EVENT_FORMAT_NOFILTER | ||
129 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | ||
130 | tpfmt) \ | ||
93 | \ | 131 | \ |
94 | static struct ftrace_event_call __used \ | 132 | struct ftrace_event_call __used \ |
95 | __attribute__((__aligned__(4))) \ | 133 | __attribute__((__aligned__(4))) \ |
96 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 134 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
97 | .name = #call, \ | 135 | .name = #call, \ |
98 | .id = proto, \ | 136 | .id = proto, \ |
99 | .system = __stringify(TRACE_SYSTEM), \ | 137 | .system = __stringify(TRACE_SYSTEM), \ |
100 | .show_format = ftrace_format_##call, \ | 138 | .show_format = ftrace_format_##call, \ |
139 | }; | ||
140 | |||
141 | #include "trace_event_types.h" | ||
142 | |||
143 | #undef TRACE_FIELD | ||
144 | #define TRACE_FIELD(type, item, assign) \ | ||
145 | ret = trace_define_field(event_call, #type, #item, \ | ||
146 | offsetof(typeof(field), item), \ | ||
147 | sizeof(field.item)); \ | ||
148 | if (ret) \ | ||
149 | return ret; | ||
150 | |||
151 | #undef TRACE_FIELD_SPECIAL | ||
152 | #define TRACE_FIELD_SPECIAL(type, item, len, cmd) \ | ||
153 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | ||
154 | offsetof(typeof(field), item), \ | ||
155 | sizeof(field.item)); \ | ||
156 | if (ret) \ | ||
157 | return ret; | ||
158 | |||
159 | #undef TRACE_FIELD_ZERO_CHAR | ||
160 | #define TRACE_FIELD_ZERO_CHAR(item) | ||
161 | |||
162 | #undef TRACE_EVENT_FORMAT | ||
163 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
164 | int \ | ||
165 | ftrace_define_fields_##call(void) \ | ||
166 | { \ | ||
167 | struct ftrace_event_call *event_call = &event_##call; \ | ||
168 | struct args field; \ | ||
169 | int ret; \ | ||
170 | \ | ||
171 | __common_field(unsigned char, type); \ | ||
172 | __common_field(unsigned char, flags); \ | ||
173 | __common_field(unsigned char, preempt_count); \ | ||
174 | __common_field(int, pid); \ | ||
175 | __common_field(int, tgid); \ | ||
176 | \ | ||
177 | tstruct; \ | ||
178 | \ | ||
179 | return ret; \ | ||
101 | } | 180 | } |
181 | |||
182 | #undef TRACE_EVENT_FORMAT_NOFILTER | ||
183 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ | ||
184 | tpfmt) | ||
185 | |||
102 | #include "trace_event_types.h" | 186 | #include "trace_event_types.h" |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..10f6ad7d85f6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
78 | current->ret_stack[index].ret = ret; | 78 | current->ret_stack[index].ret = ret; |
79 | current->ret_stack[index].func = func; | 79 | current->ret_stack[index].func = func; |
80 | current->ret_stack[index].calltime = calltime; | 80 | current->ret_stack[index].calltime = calltime; |
81 | current->ret_stack[index].subtime = 0; | ||
81 | *depth = index; | 82 | *depth = index; |
82 | 83 | ||
83 | return 0; | 84 | return 0; |
84 | } | 85 | } |
85 | 86 | ||
86 | /* Retrieve a function return address to the trace stack on thread info.*/ | 87 | /* Retrieve a function return address to the trace stack on thread info.*/ |
87 | void | 88 | static void |
88 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | 89 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) |
89 | { | 90 | { |
90 | int index; | 91 | int index; |
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | |||
104 | trace->calltime = current->ret_stack[index].calltime; | 105 | trace->calltime = current->ret_stack[index].calltime; |
105 | trace->overrun = atomic_read(¤t->trace_overrun); | 106 | trace->overrun = atomic_read(¤t->trace_overrun); |
106 | trace->depth = index; | 107 | trace->depth = index; |
107 | barrier(); | ||
108 | current->curr_ret_stack--; | ||
109 | |||
110 | } | 108 | } |
111 | 109 | ||
112 | /* | 110 | /* |
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void) | |||
121 | ftrace_pop_return_trace(&trace, &ret); | 119 | ftrace_pop_return_trace(&trace, &ret); |
122 | trace.rettime = trace_clock_local(); | 120 | trace.rettime = trace_clock_local(); |
123 | ftrace_graph_return(&trace); | 121 | ftrace_graph_return(&trace); |
122 | barrier(); | ||
123 | current->curr_ret_stack--; | ||
124 | 124 | ||
125 | if (unlikely(!ret)) { | 125 | if (unlikely(!ret)) { |
126 | ftrace_graph_stop(); | 126 | ftrace_graph_stop(); |
@@ -426,8 +426,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
426 | return TRACE_TYPE_HANDLED; | 426 | return TRACE_TYPE_HANDLED; |
427 | } | 427 | } |
428 | 428 | ||
429 | static enum print_line_t | 429 | enum print_line_t |
430 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | 430 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
431 | { | 431 | { |
432 | unsigned long nsecs_rem = do_div(duration, 1000); | 432 | unsigned long nsecs_rem = do_div(duration, 1000); |
433 | /* log10(ULONG_MAX) + '\0' */ | 433 | /* log10(ULONG_MAX) + '\0' */ |
@@ -464,12 +464,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
464 | if (!ret) | 464 | if (!ret) |
465 | return TRACE_TYPE_PARTIAL_LINE; | 465 | return TRACE_TYPE_PARTIAL_LINE; |
466 | } | 466 | } |
467 | return TRACE_TYPE_HANDLED; | ||
468 | } | ||
469 | |||
470 | static enum print_line_t | ||
471 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
472 | { | ||
473 | int ret; | ||
474 | |||
475 | ret = trace_print_graph_duration(duration, s); | ||
476 | if (ret != TRACE_TYPE_HANDLED) | ||
477 | return ret; | ||
467 | 478 | ||
468 | ret = trace_seq_printf(s, "| "); | 479 | ret = trace_seq_printf(s, "| "); |
469 | if (!ret) | 480 | if (!ret) |
470 | return TRACE_TYPE_PARTIAL_LINE; | 481 | return TRACE_TYPE_PARTIAL_LINE; |
471 | return TRACE_TYPE_HANDLED; | ||
472 | 482 | ||
483 | return TRACE_TYPE_HANDLED; | ||
473 | } | 484 | } |
474 | 485 | ||
475 | /* Case of a leaf function on its call entry */ | 486 | /* Case of a leaf function on its call entry */ |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 7bfdf4c2347f..8683d50a753a 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -168,6 +168,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
168 | 168 | ||
169 | void trace_hw_branch(u64 from, u64 to) | 169 | void trace_hw_branch(u64 from, u64 to) |
170 | { | 170 | { |
171 | struct ftrace_event_call *call = &event_hw_branch; | ||
171 | struct trace_array *tr = hw_branch_trace; | 172 | struct trace_array *tr = hw_branch_trace; |
172 | struct ring_buffer_event *event; | 173 | struct ring_buffer_event *event; |
173 | struct hw_branch_entry *entry; | 174 | struct hw_branch_entry *entry; |
@@ -194,7 +195,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
194 | entry->ent.type = TRACE_HW_BRANCHES; | 195 | entry->ent.type = TRACE_HW_BRANCHES; |
195 | entry->from = from; | 196 | entry->from = from; |
196 | entry->to = to; | 197 | entry->to = to; |
197 | trace_buffer_unlock_commit(tr, event, 0, 0); | 198 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
199 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
198 | 200 | ||
199 | out: | 201 | out: |
200 | atomic_dec(&tr->data[cpu]->disabled); | 202 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 8e37fcddd8b4..d53b45ed0806 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/time.h> | ||
13 | |||
12 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
13 | 15 | ||
14 | #include "trace.h" | 16 | #include "trace.h" |
@@ -174,7 +176,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
174 | struct mmiotrace_rw *rw; | 176 | struct mmiotrace_rw *rw; |
175 | struct trace_seq *s = &iter->seq; | 177 | struct trace_seq *s = &iter->seq; |
176 | unsigned long long t = ns2usecs(iter->ts); | 178 | unsigned long long t = ns2usecs(iter->ts); |
177 | unsigned long usec_rem = do_div(t, 1000000ULL); | 179 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
178 | unsigned secs = (unsigned long)t; | 180 | unsigned secs = (unsigned long)t; |
179 | int ret = 1; | 181 | int ret = 1; |
180 | 182 | ||
@@ -221,7 +223,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
221 | struct mmiotrace_map *m; | 223 | struct mmiotrace_map *m; |
222 | struct trace_seq *s = &iter->seq; | 224 | struct trace_seq *s = &iter->seq; |
223 | unsigned long long t = ns2usecs(iter->ts); | 225 | unsigned long long t = ns2usecs(iter->ts); |
224 | unsigned long usec_rem = do_div(t, 1000000ULL); | 226 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
225 | unsigned secs = (unsigned long)t; | 227 | unsigned secs = (unsigned long)t; |
226 | int ret; | 228 | int ret; |
227 | 229 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 64b54a59c55b..0e70fb07ca78 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -19,6 +19,16 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
19 | 19 | ||
20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 20 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
21 | 21 | ||
22 | void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
23 | { | ||
24 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
25 | |||
26 | s->buffer[len] = 0; | ||
27 | seq_puts(m, s->buffer); | ||
28 | |||
29 | trace_seq_init(s); | ||
30 | } | ||
31 | |||
22 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 32 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
23 | { | 33 | { |
24 | struct trace_seq *s = &iter->seq; | 34 | struct trace_seq *s = &iter->seq; |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index e0bde39c2dd9..91630217fb46 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -20,6 +20,8 @@ trace_print_bprintk_msg_only(struct trace_iterator *iter); | |||
20 | extern enum print_line_t | 20 | extern enum print_line_t |
21 | trace_print_printk_msg_only(struct trace_iterator *iter); | 21 | trace_print_printk_msg_only(struct trace_iterator *iter); |
22 | 22 | ||
23 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
24 | |||
23 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 25 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
24 | __attribute__ ((format (printf, 2, 3))); | 26 | __attribute__ ((format (printf, 2, 3))); |
25 | extern int | 27 | extern int |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index bae791ebcc51..810a5b7cf1c5 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -36,6 +36,7 @@ static void probe_power_start(struct power_trace *it, unsigned int type, | |||
36 | 36 | ||
37 | static void probe_power_end(struct power_trace *it) | 37 | static void probe_power_end(struct power_trace *it) |
38 | { | 38 | { |
39 | struct ftrace_event_call *call = &event_power; | ||
39 | struct ring_buffer_event *event; | 40 | struct ring_buffer_event *event; |
40 | struct trace_power *entry; | 41 | struct trace_power *entry; |
41 | struct trace_array_cpu *data; | 42 | struct trace_array_cpu *data; |
@@ -54,7 +55,8 @@ static void probe_power_end(struct power_trace *it) | |||
54 | goto out; | 55 | goto out; |
55 | entry = ring_buffer_event_data(event); | 56 | entry = ring_buffer_event_data(event); |
56 | entry->state_data = *it; | 57 | entry->state_data = *it; |
57 | trace_buffer_unlock_commit(tr, event, 0, 0); | 58 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
59 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
58 | out: | 60 | out: |
59 | preempt_enable(); | 61 | preempt_enable(); |
60 | } | 62 | } |
@@ -62,6 +64,7 @@ static void probe_power_end(struct power_trace *it) | |||
62 | static void probe_power_mark(struct power_trace *it, unsigned int type, | 64 | static void probe_power_mark(struct power_trace *it, unsigned int type, |
63 | unsigned int level) | 65 | unsigned int level) |
64 | { | 66 | { |
67 | struct ftrace_event_call *call = &event_power; | ||
65 | struct ring_buffer_event *event; | 68 | struct ring_buffer_event *event; |
66 | struct trace_power *entry; | 69 | struct trace_power *entry; |
67 | struct trace_array_cpu *data; | 70 | struct trace_array_cpu *data; |
@@ -84,7 +87,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
84 | goto out; | 87 | goto out; |
85 | entry = ring_buffer_event_data(event); | 88 | entry = ring_buffer_event_data(event); |
86 | entry->state_data = *it; | 89 | entry->state_data = *it; |
87 | trace_buffer_unlock_commit(tr, event, 0, 0); | 90 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
91 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
88 | out: | 92 | out: |
89 | preempt_enable(); | 93 | preempt_enable(); |
90 | } | 94 | } |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index eb81556107fe..9bece9687b62 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -245,17 +245,13 @@ static const struct file_operations ftrace_formats_fops = { | |||
245 | static __init int init_trace_printk_function_export(void) | 245 | static __init int init_trace_printk_function_export(void) |
246 | { | 246 | { |
247 | struct dentry *d_tracer; | 247 | struct dentry *d_tracer; |
248 | struct dentry *entry; | ||
249 | 248 | ||
250 | d_tracer = tracing_init_dentry(); | 249 | d_tracer = tracing_init_dentry(); |
251 | if (!d_tracer) | 250 | if (!d_tracer) |
252 | return 0; | 251 | return 0; |
253 | 252 | ||
254 | entry = debugfs_create_file("printk_formats", 0444, d_tracer, | 253 | trace_create_file("printk_formats", 0444, d_tracer, |
255 | NULL, &ftrace_formats_fops); | 254 | NULL, &ftrace_formats_fops); |
256 | if (!entry) | ||
257 | pr_warning("Could not create debugfs " | ||
258 | "'printk_formats' entry\n"); | ||
259 | 255 | ||
260 | return 0; | 256 | return 0; |
261 | } | 257 | } |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 9117cea6f1ae..9d8cccdfaa06 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -29,13 +29,13 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
29 | int cpu; | 29 | int cpu; |
30 | int pc; | 30 | int pc; |
31 | 31 | ||
32 | if (!sched_ref || sched_stopped) | 32 | if (unlikely(!sched_ref)) |
33 | return; | 33 | return; |
34 | 34 | ||
35 | tracing_record_cmdline(prev); | 35 | tracing_record_cmdline(prev); |
36 | tracing_record_cmdline(next); | 36 | tracing_record_cmdline(next); |
37 | 37 | ||
38 | if (!tracer_enabled) | 38 | if (!tracer_enabled || sched_stopped) |
39 | return; | 39 | return; |
40 | 40 | ||
41 | pc = preempt_count(); | 41 | pc = preempt_count(); |
@@ -56,15 +56,15 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | |||
56 | unsigned long flags; | 56 | unsigned long flags; |
57 | int cpu, pc; | 57 | int cpu, pc; |
58 | 58 | ||
59 | if (!likely(tracer_enabled)) | 59 | if (unlikely(!sched_ref)) |
60 | return; | 60 | return; |
61 | 61 | ||
62 | pc = preempt_count(); | ||
63 | tracing_record_cmdline(current); | 62 | tracing_record_cmdline(current); |
64 | 63 | ||
65 | if (sched_stopped) | 64 | if (!tracer_enabled || sched_stopped) |
66 | return; | 65 | return; |
67 | 66 | ||
67 | pc = preempt_count(); | ||
68 | local_irq_save(flags); | 68 | local_irq_save(flags); |
69 | cpu = raw_smp_processor_id(); | 69 | cpu = raw_smp_processor_id(); |
70 | data = ctx_trace->data[cpu]; | 70 | data = ctx_trace->data[cpu]; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index c750f65f9661..1796f00524e1 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -352,19 +352,14 @@ __setup("stacktrace", enable_stacktrace); | |||
352 | static __init int stack_trace_init(void) | 352 | static __init int stack_trace_init(void) |
353 | { | 353 | { |
354 | struct dentry *d_tracer; | 354 | struct dentry *d_tracer; |
355 | struct dentry *entry; | ||
356 | 355 | ||
357 | d_tracer = tracing_init_dentry(); | 356 | d_tracer = tracing_init_dentry(); |
358 | 357 | ||
359 | entry = debugfs_create_file("stack_max_size", 0644, d_tracer, | 358 | trace_create_file("stack_max_size", 0644, d_tracer, |
360 | &max_stack_size, &stack_max_size_fops); | 359 | &max_stack_size, &stack_max_size_fops); |
361 | if (!entry) | ||
362 | pr_warning("Could not create debugfs 'stack_max_size' entry\n"); | ||
363 | 360 | ||
364 | entry = debugfs_create_file("stack_trace", 0444, d_tracer, | 361 | trace_create_file("stack_trace", 0444, d_tracer, |
365 | NULL, &stack_trace_fops); | 362 | NULL, &stack_trace_fops); |
366 | if (!entry) | ||
367 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | ||
368 | 363 | ||
369 | if (stack_tracer_enabled) | 364 | if (stack_tracer_enabled) |
370 | register_ftrace_function(&trace_ops); | 365 | register_ftrace_function(&trace_ops); |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index acdebd771a93..fdde3a4a94cd 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -85,7 +85,7 @@ static int stat_seq_init(struct tracer_stat_session *session) | |||
85 | if (!ts->stat_cmp) | 85 | if (!ts->stat_cmp) |
86 | ts->stat_cmp = dummy_cmp; | 86 | ts->stat_cmp = dummy_cmp; |
87 | 87 | ||
88 | stat = ts->stat_start(); | 88 | stat = ts->stat_start(ts); |
89 | if (!stat) | 89 | if (!stat) |
90 | goto exit; | 90 | goto exit; |
91 | 91 | ||
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h index 202274cf7f3d..f3546a2cd826 100644 --- a/kernel/trace/trace_stat.h +++ b/kernel/trace/trace_stat.h | |||
@@ -12,7 +12,7 @@ struct tracer_stat { | |||
12 | /* The name of your stat file */ | 12 | /* The name of your stat file */ |
13 | const char *name; | 13 | const char *name; |
14 | /* Iteration over statistic entries */ | 14 | /* Iteration over statistic entries */ |
15 | void *(*stat_start)(void); | 15 | void *(*stat_start)(struct tracer_stat *trace); |
16 | void *(*stat_next)(void *prev, int idx); | 16 | void *(*stat_next)(void *prev, int idx); |
17 | /* Compare two entries for stats sorting */ | 17 | /* Compare two entries for stats sorting */ |
18 | int (*stat_cmp)(void *p1, void *p2); | 18 | int (*stat_cmp)(void *p1, void *p2); |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 91fd19c2149f..e04b76cc238a 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -321,11 +321,7 @@ static const struct file_operations sysprof_sample_fops = { | |||
321 | 321 | ||
322 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | 322 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) |
323 | { | 323 | { |
324 | struct dentry *entry; | ||
325 | 324 | ||
326 | entry = debugfs_create_file("sysprof_sample_period", 0644, | 325 | trace_create_file("sysprof_sample_period", 0644, |
327 | d_tracer, NULL, &sysprof_sample_fops); | 326 | d_tracer, NULL, &sysprof_sample_fops); |
328 | if (entry) | ||
329 | return; | ||
330 | pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n"); | ||
331 | } | 327 | } |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 797201e4a137..984b9175c13d 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -152,7 +152,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | |||
152 | return ret; | 152 | return ret; |
153 | } | 153 | } |
154 | 154 | ||
155 | static void *workqueue_stat_start(void) | 155 | static void *workqueue_stat_start(struct tracer_stat *trace) |
156 | { | 156 | { |
157 | int cpu; | 157 | int cpu; |
158 | void *ret = NULL; | 158 | void *ret = NULL; |
@@ -102,7 +102,7 @@ | |||
102 | #include <linux/cpu.h> | 102 | #include <linux/cpu.h> |
103 | #include <linux/sysctl.h> | 103 | #include <linux/sysctl.h> |
104 | #include <linux/module.h> | 104 | #include <linux/module.h> |
105 | #include <trace/kmemtrace.h> | 105 | #include <linux/kmemtrace.h> |
106 | #include <linux/rcupdate.h> | 106 | #include <linux/rcupdate.h> |
107 | #include <linux/string.h> | 107 | #include <linux/string.h> |
108 | #include <linux/uaccess.h> | 108 | #include <linux/uaccess.h> |
@@ -65,7 +65,7 @@ | |||
65 | #include <linux/module.h> | 65 | #include <linux/module.h> |
66 | #include <linux/rcupdate.h> | 66 | #include <linux/rcupdate.h> |
67 | #include <linux/list.h> | 67 | #include <linux/list.h> |
68 | #include <trace/kmemtrace.h> | 68 | #include <linux/kmemtrace.h> |
69 | #include <asm/atomic.h> | 69 | #include <asm/atomic.h> |
70 | 70 | ||
71 | /* | 71 | /* |
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <trace/kmemtrace.h> | 19 | #include <linux/kmemtrace.h> |
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/cpuset.h> | 21 | #include <linux/cpuset.h> |
22 | #include <linux/mempolicy.h> | 22 | #include <linux/mempolicy.h> |