aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 17:02:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 17:02:43 -0400
commite382608254e06c8109f40044f5e693f2e04f3899 (patch)
tree7c46c58a5a15d19a312c35a8e70e69d1cbd93236 /include/trace
parentfcbc1777ce8b5edf831c1eca16c1a63c1e4f39fb (diff)
parentb44754d8262d3aab842998cf747f44fe6090be9f (diff)
Merge tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This patch series contains several clean ups and even a new trace clock "monitonic raw". Also some enhancements to make the ring buffer even faster. But the biggest and most noticeable change is the renaming of the ftrace* files, structures and variables that have to deal with trace events. Over the years I've had several developers tell me about their confusion with what ftrace is compared to events. Technically, "ftrace" is the infrastructure to do the function hooks, which include tracing and also helps with live kernel patching. But the trace events are a separate entity altogether, and the files that affect the trace events should not be named "ftrace". These include: include/trace/ftrace.h -> include/trace/trace_events.h include/linux/ftrace_event.h -> include/linux/trace_events.h Also, functions that are specific for trace events have also been renamed: ftrace_print_*() -> trace_print_*() (un)register_ftrace_event() -> (un)register_trace_event() ftrace_event_name() -> trace_event_name() ftrace_trigger_soft_disabled() -> trace_trigger_soft_disabled() ftrace_define_fields_##call() -> trace_define_fields_##call() ftrace_get_offsets_##call() -> trace_get_offsets_##call() Structures have been renamed: ftrace_event_file -> trace_event_file ftrace_event_{call,class} -> trace_event_{call,class} ftrace_event_buffer -> trace_event_buffer ftrace_subsystem_dir -> trace_subsystem_dir ftrace_event_raw_##call -> trace_event_raw_##call ftrace_event_data_offset_##call-> trace_event_data_offset_##call ftrace_event_type_funcs_##call -> trace_event_type_funcs_##call And a few various variables and flags have also been updated. This has been sitting in linux-next for some time, and I have not heard a single complaint about this rename breaking anything. Mostly because these functions, variables and structures are mostly internal to the tracing system and are seldom (if ever) used by anything external to that" * tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits) ring_buffer: Allow to exit the ring buffer benchmark immediately ring-buffer-benchmark: Fix the wrong type ring-buffer-benchmark: Fix the wrong param in module_param ring-buffer: Add enum names for the context levels ring-buffer: Remove useless unused tracing_off_permanent() ring-buffer: Give NMIs a chance to lock the reader_lock ring-buffer: Add trace_recursive checks to ring_buffer_write() ring-buffer: Allways do the trace_recursive checks ring-buffer: Move recursive check to per_cpu descriptor ring-buffer: Add unlikelys to make fast path the default tracing: Rename ftrace_get_offsets_##call() to trace_event_get_offsets_##call() tracing: Rename ftrace_define_fields_##call() to trace_event_define_fields_##call() tracing: Rename ftrace_event_type_funcs_##call to trace_event_type_funcs_##call tracing: Rename ftrace_data_offset_##call to trace_event_data_offset_##call tracing: Rename ftrace_raw_##call event structures to trace_event_raw_##call tracing: Rename ftrace_trigger_soft_disabled() to trace_trigger_soft_disabled() tracing: Rename FTRACE_EVENT_FL_* flags to EVENT_FILE_FL_* tracing: Rename struct ftrace_subsystem_dir to trace_subsystem_dir tracing: Rename ftrace_event_name() to trace_event_name() tracing: Rename FTRACE_MAX_EVENT to TRACE_EVENT_TYPE_MAX ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h3
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/perf.h350
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_events.h (renamed from include/trace/ftrace.h)413
5 files changed, 387 insertions, 387 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 02e1003568a4..09b3880105a9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -87,7 +87,8 @@
87#define DECLARE_TRACE(name, proto, args) 87#define DECLARE_TRACE(name, proto, args)
88 88
89#ifdef CONFIG_EVENT_TRACING 89#ifdef CONFIG_EVENT_TRACING
90#include <trace/ftrace.h> 90#include <trace/trace_events.h>
91#include <trace/perf.h>
91#endif 92#endif
92 93
93#undef TRACE_EVENT 94#undef TRACE_EVENT
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 630d1e5e4de0..284244ebfe8d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -7,7 +7,7 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/pm_qos.h> 8#include <linux/pm_qos.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
10#include <linux/ftrace_event.h> 10#include <linux/trace_events.h>
11 11
12#define TPS(x) tracepoint_string(x) 12#define TPS(x) tracepoint_string(x)
13 13
diff --git a/include/trace/perf.h b/include/trace/perf.h
new file mode 100644
index 000000000000..1b5443cebedc
--- /dev/null
+++ b/include/trace/perf.h
@@ -0,0 +1,350 @@
1/*
2 * Stage 4 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * For those macros defined with TRACE_EVENT:
7 *
8 * static struct trace_event_call event_<call>;
9 *
10 * static void trace_event_raw_event_<call>(void *__data, proto)
11 * {
12 * struct trace_event_file *trace_file = __data;
13 * struct trace_event_call *event_call = trace_file->event_call;
14 * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
15 * unsigned long eflags = trace_file->flags;
16 * enum event_trigger_type __tt = ETT_NONE;
17 * struct ring_buffer_event *event;
18 * struct trace_event_raw_<call> *entry; <-- defined in stage 1
19 * struct ring_buffer *buffer;
20 * unsigned long irq_flags;
21 * int __data_size;
22 * int pc;
23 *
24 * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
25 * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
26 * event_triggers_call(trace_file, NULL);
27 * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
28 * return;
29 * }
30 *
31 * local_save_flags(irq_flags);
32 * pc = preempt_count();
33 *
34 * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
35 *
36 * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
37 * event_<call>->event.type,
38 * sizeof(*entry) + __data_size,
39 * irq_flags, pc);
40 * if (!event)
41 * return;
42 * entry = ring_buffer_event_data(event);
43 *
44 * { <assign>; } <-- Here we assign the entries by the __field and
45 * __array macros.
46 *
47 * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
48 * __tt = event_triggers_call(trace_file, entry);
49 *
50 * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
51 * &trace_file->flags))
52 * ring_buffer_discard_commit(buffer, event);
53 * else if (!filter_check_discard(trace_file, entry, buffer, event))
54 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
55 *
56 * if (__tt)
57 * event_triggers_post_call(trace_file, __tt);
58 * }
59 *
60 * static struct trace_event ftrace_event_type_<call> = {
61 * .trace = trace_raw_output_<call>, <-- stage 2
62 * };
63 *
64 * static char print_fmt_<call>[] = <TP_printk>;
65 *
66 * static struct trace_event_class __used event_class_<template> = {
67 * .system = "<system>",
68 * .define_fields = trace_event_define_fields_<call>,
69 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
70 * .raw_init = trace_event_raw_init,
71 * .probe = trace_event_raw_event_##call,
72 * .reg = trace_event_reg,
73 * };
74 *
75 * static struct trace_event_call event_<call> = {
76 * .class = event_class_<template>,
77 * {
78 * .tp = &__tracepoint_<call>,
79 * },
80 * .event = &ftrace_event_type_<call>,
81 * .print_fmt = print_fmt_<call>,
82 * .flags = TRACE_EVENT_FL_TRACEPOINT,
83 * };
84 * // its only safe to use pointers when doing linker tricks to
85 * // create an array.
86 * static struct trace_event_call __used
87 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
88 *
89 */
90
91#ifdef CONFIG_PERF_EVENTS
92
93#define _TRACE_PERF_PROTO(call, proto) \
94 static notrace void \
95 perf_trace_##call(void *__data, proto);
96
97#define _TRACE_PERF_INIT(call) \
98 .perf_probe = perf_trace_##call,
99
100#else
101#define _TRACE_PERF_PROTO(call, proto)
102#define _TRACE_PERF_INIT(call)
103#endif /* CONFIG_PERF_EVENTS */
104
105#undef __entry
106#define __entry entry
107
108#undef __field
109#define __field(type, item)
110
111#undef __field_struct
112#define __field_struct(type, item)
113
114#undef __array
115#define __array(type, item, len)
116
117#undef __dynamic_array
118#define __dynamic_array(type, item, len) \
119 __entry->__data_loc_##item = __data_offsets.item;
120
121#undef __string
122#define __string(item, src) __dynamic_array(char, item, -1)
123
124#undef __assign_str
125#define __assign_str(dst, src) \
126 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
127
128#undef __bitmask
129#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
130
131#undef __get_bitmask
132#define __get_bitmask(field) (char *)__get_dynamic_array(field)
133
134#undef __assign_bitmask
135#define __assign_bitmask(dst, src, nr_bits) \
136 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
137
138#undef TP_fast_assign
139#define TP_fast_assign(args...) args
140
141#undef __perf_addr
142#define __perf_addr(a) (a)
143
144#undef __perf_count
145#define __perf_count(c) (c)
146
147#undef __perf_task
148#define __perf_task(t) (t)
149
150#undef DECLARE_EVENT_CLASS
151#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
152 \
153static notrace void \
154trace_event_raw_event_##call(void *__data, proto) \
155{ \
156 struct trace_event_file *trace_file = __data; \
157 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
158 struct trace_event_buffer fbuffer; \
159 struct trace_event_raw_##call *entry; \
160 int __data_size; \
161 \
162 if (trace_trigger_soft_disabled(trace_file)) \
163 return; \
164 \
165 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
166 \
167 entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
168 sizeof(*entry) + __data_size); \
169 \
170 if (!entry) \
171 return; \
172 \
173 tstruct \
174 \
175 { assign; } \
176 \
177 trace_event_buffer_commit(&fbuffer); \
178}
179/*
180 * The ftrace_test_probe is compiled out, it is only here as a build time check
181 * to make sure that if the tracepoint handling changes, the ftrace probe will
182 * fail to compile unless it too is updated.
183 */
184
185#undef DEFINE_EVENT
186#define DEFINE_EVENT(template, call, proto, args) \
187static inline void ftrace_test_probe_##call(void) \
188{ \
189 check_trace_callback_type_##call(trace_event_raw_event_##template); \
190}
191
192#undef DEFINE_EVENT_PRINT
193#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
194
195#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
196
197#undef __entry
198#define __entry REC
199
200#undef __print_flags
201#undef __print_symbolic
202#undef __print_hex
203#undef __get_dynamic_array
204#undef __get_dynamic_array_len
205#undef __get_str
206#undef __get_bitmask
207#undef __print_array
208
209#undef TP_printk
210#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
211
212#undef DECLARE_EVENT_CLASS
213#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
214_TRACE_PERF_PROTO(call, PARAMS(proto)); \
215static char print_fmt_##call[] = print; \
216static struct trace_event_class __used __refdata event_class_##call = { \
217 .system = TRACE_SYSTEM_STRING, \
218 .define_fields = trace_event_define_fields_##call, \
219 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
220 .raw_init = trace_event_raw_init, \
221 .probe = trace_event_raw_event_##call, \
222 .reg = trace_event_reg, \
223 _TRACE_PERF_INIT(call) \
224};
225
226#undef DEFINE_EVENT
227#define DEFINE_EVENT(template, call, proto, args) \
228 \
229static struct trace_event_call __used event_##call = { \
230 .class = &event_class_##template, \
231 { \
232 .tp = &__tracepoint_##call, \
233 }, \
234 .event.funcs = &trace_event_type_funcs_##template, \
235 .print_fmt = print_fmt_##template, \
236 .flags = TRACE_EVENT_FL_TRACEPOINT, \
237}; \
238static struct trace_event_call __used \
239__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
240
241#undef DEFINE_EVENT_PRINT
242#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
243 \
244static char print_fmt_##call[] = print; \
245 \
246static struct trace_event_call __used event_##call = { \
247 .class = &event_class_##template, \
248 { \
249 .tp = &__tracepoint_##call, \
250 }, \
251 .event.funcs = &trace_event_type_funcs_##call, \
252 .print_fmt = print_fmt_##call, \
253 .flags = TRACE_EVENT_FL_TRACEPOINT, \
254}; \
255static struct trace_event_call __used \
256__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
257
258#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
259
260#undef TRACE_SYSTEM_VAR
261
262#ifdef CONFIG_PERF_EVENTS
263
264#undef __entry
265#define __entry entry
266
267#undef __get_dynamic_array
268#define __get_dynamic_array(field) \
269 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
270
271#undef __get_dynamic_array_len
272#define __get_dynamic_array_len(field) \
273 ((__entry->__data_loc_##field >> 16) & 0xffff)
274
275#undef __get_str
276#define __get_str(field) (char *)__get_dynamic_array(field)
277
278#undef __get_bitmask
279#define __get_bitmask(field) (char *)__get_dynamic_array(field)
280
281#undef __perf_addr
282#define __perf_addr(a) (__addr = (a))
283
284#undef __perf_count
285#define __perf_count(c) (__count = (c))
286
287#undef __perf_task
288#define __perf_task(t) (__task = (t))
289
290#undef DECLARE_EVENT_CLASS
291#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
292static notrace void \
293perf_trace_##call(void *__data, proto) \
294{ \
295 struct trace_event_call *event_call = __data; \
296 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
297 struct trace_event_raw_##call *entry; \
298 struct pt_regs *__regs; \
299 u64 __addr = 0, __count = 1; \
300 struct task_struct *__task = NULL; \
301 struct hlist_head *head; \
302 int __entry_size; \
303 int __data_size; \
304 int rctx; \
305 \
306 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
307 \
308 head = this_cpu_ptr(event_call->perf_events); \
309 if (__builtin_constant_p(!__task) && !__task && \
310 hlist_empty(head)) \
311 return; \
312 \
313 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
314 sizeof(u64)); \
315 __entry_size -= sizeof(u32); \
316 \
317 entry = perf_trace_buf_prepare(__entry_size, \
318 event_call->event.type, &__regs, &rctx); \
319 if (!entry) \
320 return; \
321 \
322 perf_fetch_caller_regs(__regs); \
323 \
324 tstruct \
325 \
326 { assign; } \
327 \
328 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
329 __count, __regs, head, __task); \
330}
331
332/*
333 * This part is compiled out, it is only here as a build time check
334 * to make sure that if the tracepoint handling changes, the
335 * perf probe will fail to compile unless it too is updated.
336 */
337#undef DEFINE_EVENT
338#define DEFINE_EVENT(template, call, proto, args) \
339static inline void perf_test_probe_##call(void) \
340{ \
341 check_trace_callback_type_##call(perf_trace_##template); \
342}
343
344
345#undef DEFINE_EVENT_PRINT
346#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
347 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
348
349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
350#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 9674145e2f6a..7434f0f5d3f6 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <linux/unistd.h> 5#include <linux/unistd.h>
6#include <linux/ftrace_event.h> 6#include <linux/trace_events.h>
7#include <linux/thread_info.h> 7#include <linux/thread_info.h>
8 8
9#include <asm/ptrace.h> 9#include <asm/ptrace.h>
@@ -29,8 +29,8 @@ struct syscall_metadata {
29 const char **args; 29 const char **args;
30 struct list_head enter_fields; 30 struct list_head enter_fields;
31 31
32 struct ftrace_event_call *enter_event; 32 struct trace_event_call *enter_event;
33 struct ftrace_event_call *exit_event; 33 struct trace_event_call *exit_event;
34}; 34};
35 35
36#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) 36#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h
index 37d4b10b111d..43be3b0e44d3 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/trace_events.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Override the macros in <trace/trace_events.h> to include the following: 4 * Override the macros in <trace/trace_events.h> to include the following:
5 * 5 *
6 * struct ftrace_raw_<call> { 6 * struct trace_event_raw_<call> {
7 * struct trace_entry ent; 7 * struct trace_entry ent;
8 * <type> <item>; 8 * <type> <item>;
9 * <type2> <item2>[<len>]; 9 * <type2> <item2>[<len>];
@@ -16,7 +16,7 @@
16 * in the structure. 16 * in the structure.
17 */ 17 */
18 18
19#include <linux/ftrace_event.h> 19#include <linux/trace_events.h>
20 20
21#ifndef TRACE_SYSTEM_VAR 21#ifndef TRACE_SYSTEM_VAR
22#define TRACE_SYSTEM_VAR TRACE_SYSTEM 22#define TRACE_SYSTEM_VAR TRACE_SYSTEM
@@ -95,17 +95,17 @@ TRACE_MAKE_SYSTEM_STR();
95 95
96#undef DECLARE_EVENT_CLASS 96#undef DECLARE_EVENT_CLASS
97#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 97#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
98 struct ftrace_raw_##name { \ 98 struct trace_event_raw_##name { \
99 struct trace_entry ent; \ 99 struct trace_entry ent; \
100 tstruct \ 100 tstruct \
101 char __data[0]; \ 101 char __data[0]; \
102 }; \ 102 }; \
103 \ 103 \
104 static struct ftrace_event_class event_class_##name; 104 static struct trace_event_class event_class_##name;
105 105
106#undef DEFINE_EVENT 106#undef DEFINE_EVENT
107#define DEFINE_EVENT(template, name, proto, args) \ 107#define DEFINE_EVENT(template, name, proto, args) \
108 static struct ftrace_event_call __used \ 108 static struct trace_event_call __used \
109 __attribute__((__aligned__(4))) event_##name 109 __attribute__((__aligned__(4))) event_##name
110 110
111#undef DEFINE_EVENT_FN 111#undef DEFINE_EVENT_FN
@@ -138,7 +138,7 @@ TRACE_MAKE_SYSTEM_STR();
138 * 138 *
139 * Include the following: 139 * Include the following:
140 * 140 *
141 * struct ftrace_data_offsets_<call> { 141 * struct trace_event_data_offsets_<call> {
142 * u32 <item1>; 142 * u32 <item1>;
143 * u32 <item2>; 143 * u32 <item2>;
144 * [...] 144 * [...]
@@ -178,7 +178,7 @@ TRACE_MAKE_SYSTEM_STR();
178 178
179#undef DECLARE_EVENT_CLASS 179#undef DECLARE_EVENT_CLASS
180#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 180#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
181 struct ftrace_data_offsets_##call { \ 181 struct trace_event_data_offsets_##call { \
182 tstruct; \ 182 tstruct; \
183 }; 183 };
184 184
@@ -203,10 +203,10 @@ TRACE_MAKE_SYSTEM_STR();
203 * Override the macros in <trace/trace_events.h> to include the following: 203 * Override the macros in <trace/trace_events.h> to include the following:
204 * 204 *
205 * enum print_line_t 205 * enum print_line_t
206 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 206 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
207 * { 207 * {
208 * struct trace_seq *s = &iter->seq; 208 * struct trace_seq *s = &iter->seq;
209 * struct ftrace_raw_<call> *field; <-- defined in stage 1 209 * struct trace_event_raw_<call> *field; <-- defined in stage 1
210 * struct trace_entry *entry; 210 * struct trace_entry *entry;
211 * struct trace_seq *p = &iter->tmp_seq; 211 * struct trace_seq *p = &iter->tmp_seq;
212 * int ret; 212 * int ret;
@@ -258,7 +258,7 @@ TRACE_MAKE_SYSTEM_STR();
258 void *__bitmask = __get_dynamic_array(field); \ 258 void *__bitmask = __get_dynamic_array(field); \
259 unsigned int __bitmask_size; \ 259 unsigned int __bitmask_size; \
260 __bitmask_size = __get_dynamic_array_len(field); \ 260 __bitmask_size = __get_dynamic_array_len(field); \
261 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 261 trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
262 }) 262 })
263 263
264#undef __print_flags 264#undef __print_flags
@@ -266,7 +266,7 @@ TRACE_MAKE_SYSTEM_STR();
266 ({ \ 266 ({ \
267 static const struct trace_print_flags __flags[] = \ 267 static const struct trace_print_flags __flags[] = \
268 { flag_array, { -1, NULL }}; \ 268 { flag_array, { -1, NULL }}; \
269 ftrace_print_flags_seq(p, delim, flag, __flags); \ 269 trace_print_flags_seq(p, delim, flag, __flags); \
270 }) 270 })
271 271
272#undef __print_symbolic 272#undef __print_symbolic
@@ -274,7 +274,7 @@ TRACE_MAKE_SYSTEM_STR();
274 ({ \ 274 ({ \
275 static const struct trace_print_flags symbols[] = \ 275 static const struct trace_print_flags symbols[] = \
276 { symbol_array, { -1, NULL }}; \ 276 { symbol_array, { -1, NULL }}; \
277 ftrace_print_symbols_seq(p, value, symbols); \ 277 trace_print_symbols_seq(p, value, symbols); \
278 }) 278 })
279 279
280#undef __print_symbolic_u64 280#undef __print_symbolic_u64
@@ -283,7 +283,7 @@ TRACE_MAKE_SYSTEM_STR();
283 ({ \ 283 ({ \
284 static const struct trace_print_flags_u64 symbols[] = \ 284 static const struct trace_print_flags_u64 symbols[] = \
285 { symbol_array, { -1, NULL } }; \ 285 { symbol_array, { -1, NULL } }; \
286 ftrace_print_symbols_seq_u64(p, value, symbols); \ 286 trace_print_symbols_seq_u64(p, value, symbols); \
287 }) 287 })
288#else 288#else
289#define __print_symbolic_u64(value, symbol_array...) \ 289#define __print_symbolic_u64(value, symbol_array...) \
@@ -291,30 +291,30 @@ TRACE_MAKE_SYSTEM_STR();
291#endif 291#endif
292 292
293#undef __print_hex 293#undef __print_hex
294#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 294#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
295 295
296#undef __print_array 296#undef __print_array
297#define __print_array(array, count, el_size) \ 297#define __print_array(array, count, el_size) \
298 ({ \ 298 ({ \
299 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ 299 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
300 el_size != 4 && el_size != 8); \ 300 el_size != 4 && el_size != 8); \
301 ftrace_print_array_seq(p, array, count, el_size); \ 301 trace_print_array_seq(p, array, count, el_size); \
302 }) 302 })
303 303
304#undef DECLARE_EVENT_CLASS 304#undef DECLARE_EVENT_CLASS
305#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 305#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
306static notrace enum print_line_t \ 306static notrace enum print_line_t \
307ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 307trace_raw_output_##call(struct trace_iterator *iter, int flags, \
308 struct trace_event *trace_event) \ 308 struct trace_event *trace_event) \
309{ \ 309{ \
310 struct trace_seq *s = &iter->seq; \ 310 struct trace_seq *s = &iter->seq; \
311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
312 struct ftrace_raw_##call *field; \ 312 struct trace_event_raw_##call *field; \
313 int ret; \ 313 int ret; \
314 \ 314 \
315 field = (typeof(field))iter->ent; \ 315 field = (typeof(field))iter->ent; \
316 \ 316 \
317 ret = ftrace_raw_output_prep(iter, trace_event); \ 317 ret = trace_raw_output_prep(iter, trace_event); \
318 if (ret != TRACE_TYPE_HANDLED) \ 318 if (ret != TRACE_TYPE_HANDLED) \
319 return ret; \ 319 return ret; \
320 \ 320 \
@@ -322,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
322 \ 322 \
323 return trace_handle_return(s); \ 323 return trace_handle_return(s); \
324} \ 324} \
325static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 325static struct trace_event_functions trace_event_type_funcs_##call = { \
326 .trace = ftrace_raw_output_##call, \ 326 .trace = trace_raw_output_##call, \
327}; 327};
328 328
329#undef DEFINE_EVENT_PRINT 329#undef DEFINE_EVENT_PRINT
330#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 330#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
331static notrace enum print_line_t \ 331static notrace enum print_line_t \
332ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 332trace_raw_output_##call(struct trace_iterator *iter, int flags, \
333 struct trace_event *event) \ 333 struct trace_event *event) \
334{ \ 334{ \
335 struct ftrace_raw_##template *field; \ 335 struct trace_event_raw_##template *field; \
336 struct trace_entry *entry; \ 336 struct trace_entry *entry; \
337 struct trace_seq *p = &iter->tmp_seq; \ 337 struct trace_seq *p = &iter->tmp_seq; \
338 \ 338 \
@@ -346,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
346 field = (typeof(field))entry; \ 346 field = (typeof(field))entry; \
347 \ 347 \
348 trace_seq_init(p); \ 348 trace_seq_init(p); \
349 return ftrace_output_call(iter, #call, print); \ 349 return trace_output_call(iter, #call, print); \
350} \ 350} \
351static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 351static struct trace_event_functions trace_event_type_funcs_##call = { \
352 .trace = ftrace_raw_output_##call, \ 352 .trace = trace_raw_output_##call, \
353}; 353};
354 354
355#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 355#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -407,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
407#undef DECLARE_EVENT_CLASS 407#undef DECLARE_EVENT_CLASS
408#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 408#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
409static int notrace __init \ 409static int notrace __init \
410ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 410trace_event_define_fields_##call(struct trace_event_call *event_call) \
411{ \ 411{ \
412 struct ftrace_raw_##call field; \ 412 struct trace_event_raw_##call field; \
413 int ret; \ 413 int ret; \
414 \ 414 \
415 tstruct; \ 415 tstruct; \
@@ -485,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
485 485
486#undef DECLARE_EVENT_CLASS 486#undef DECLARE_EVENT_CLASS
487#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 487#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
488static inline notrace int ftrace_get_offsets_##call( \ 488static inline notrace int trace_event_get_offsets_##call( \
489 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 489 struct trace_event_data_offsets_##call *__data_offsets, proto) \
490{ \ 490{ \
491 int __data_size = 0; \ 491 int __data_size = 0; \
492 int __maybe_unused __item_length; \ 492 int __maybe_unused __item_length; \
493 struct ftrace_raw_##call __maybe_unused *entry; \ 493 struct trace_event_raw_##call __maybe_unused *entry; \
494 \ 494 \
495 tstruct; \ 495 tstruct; \
496 \ 496 \
@@ -506,354 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \
506 506
507#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 507#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
508 508
509/*
510 * Stage 4 of the trace events.
511 *
512 * Override the macros in <trace/trace_events.h> to include the following:
513 *
514 * For those macros defined with TRACE_EVENT:
515 *
516 * static struct ftrace_event_call event_<call>;
517 *
518 * static void ftrace_raw_event_<call>(void *__data, proto)
519 * {
520 * struct ftrace_event_file *ftrace_file = __data;
521 * struct ftrace_event_call *event_call = ftrace_file->event_call;
522 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
523 * unsigned long eflags = ftrace_file->flags;
524 * enum event_trigger_type __tt = ETT_NONE;
525 * struct ring_buffer_event *event;
526 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
527 * struct ring_buffer *buffer;
528 * unsigned long irq_flags;
529 * int __data_size;
530 * int pc;
531 *
532 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
533 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
534 * event_triggers_call(ftrace_file, NULL);
535 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
536 * return;
537 * }
538 *
539 * local_save_flags(irq_flags);
540 * pc = preempt_count();
541 *
542 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
543 *
544 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
545 * event_<call>->event.type,
546 * sizeof(*entry) + __data_size,
547 * irq_flags, pc);
548 * if (!event)
549 * return;
550 * entry = ring_buffer_event_data(event);
551 *
552 * { <assign>; } <-- Here we assign the entries by the __field and
553 * __array macros.
554 *
555 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
556 * __tt = event_triggers_call(ftrace_file, entry);
557 *
558 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
559 * &ftrace_file->flags))
560 * ring_buffer_discard_commit(buffer, event);
561 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
562 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
563 *
564 * if (__tt)
565 * event_triggers_post_call(ftrace_file, __tt);
566 * }
567 *
568 * static struct trace_event ftrace_event_type_<call> = {
569 * .trace = ftrace_raw_output_<call>, <-- stage 2
570 * };
571 *
572 * static char print_fmt_<call>[] = <TP_printk>;
573 *
574 * static struct ftrace_event_class __used event_class_<template> = {
575 * .system = "<system>",
576 * .define_fields = ftrace_define_fields_<call>,
577 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
578 * .raw_init = trace_event_raw_init,
579 * .probe = ftrace_raw_event_##call,
580 * .reg = ftrace_event_reg,
581 * };
582 *
583 * static struct ftrace_event_call event_<call> = {
584 * .class = event_class_<template>,
585 * {
586 * .tp = &__tracepoint_<call>,
587 * },
588 * .event = &ftrace_event_type_<call>,
589 * .print_fmt = print_fmt_<call>,
590 * .flags = TRACE_EVENT_FL_TRACEPOINT,
591 * };
592 * // its only safe to use pointers when doing linker tricks to
593 * // create an array.
594 * static struct ftrace_event_call __used
595 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
596 *
597 */
598
599#ifdef CONFIG_PERF_EVENTS
600
601#define _TRACE_PERF_PROTO(call, proto) \
602 static notrace void \
603 perf_trace_##call(void *__data, proto);
604
605#define _TRACE_PERF_INIT(call) \
606 .perf_probe = perf_trace_##call,
607
608#else
609#define _TRACE_PERF_PROTO(call, proto)
610#define _TRACE_PERF_INIT(call)
611#endif /* CONFIG_PERF_EVENTS */
612
613#undef __entry
614#define __entry entry
615
616#undef __field
617#define __field(type, item)
618
619#undef __field_struct
620#define __field_struct(type, item)
621
622#undef __array
623#define __array(type, item, len)
624
625#undef __dynamic_array
626#define __dynamic_array(type, item, len) \
627 __entry->__data_loc_##item = __data_offsets.item;
628
629#undef __string
630#define __string(item, src) __dynamic_array(char, item, -1)
631
632#undef __assign_str
633#define __assign_str(dst, src) \
634 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
635
636#undef __bitmask
637#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
638
639#undef __get_bitmask
640#define __get_bitmask(field) (char *)__get_dynamic_array(field)
641
642#undef __assign_bitmask
643#define __assign_bitmask(dst, src, nr_bits) \
644 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
645
646#undef TP_fast_assign
647#define TP_fast_assign(args...) args
648
649#undef __perf_addr
650#define __perf_addr(a) (a)
651
652#undef __perf_count
653#define __perf_count(c) (c)
654
655#undef __perf_task
656#define __perf_task(t) (t)
657
658#undef DECLARE_EVENT_CLASS
659#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
660 \
661static notrace void \
662ftrace_raw_event_##call(void *__data, proto) \
663{ \
664 struct ftrace_event_file *ftrace_file = __data; \
665 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
666 struct ftrace_event_buffer fbuffer; \
667 struct ftrace_raw_##call *entry; \
668 int __data_size; \
669 \
670 if (ftrace_trigger_soft_disabled(ftrace_file)) \
671 return; \
672 \
673 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
674 \
675 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
676 sizeof(*entry) + __data_size); \
677 \
678 if (!entry) \
679 return; \
680 \
681 tstruct \
682 \
683 { assign; } \
684 \
685 ftrace_event_buffer_commit(&fbuffer); \
686}
687/*
688 * The ftrace_test_probe is compiled out, it is only here as a build time check
689 * to make sure that if the tracepoint handling changes, the ftrace probe will
690 * fail to compile unless it too is updated.
691 */
692
693#undef DEFINE_EVENT
694#define DEFINE_EVENT(template, call, proto, args) \
695static inline void ftrace_test_probe_##call(void) \
696{ \
697 check_trace_callback_type_##call(ftrace_raw_event_##template); \
698}
699
700#undef DEFINE_EVENT_PRINT
701#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
702
703#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
704
705#undef __entry
706#define __entry REC
707
708#undef __print_flags
709#undef __print_symbolic
710#undef __print_hex
711#undef __get_dynamic_array
712#undef __get_dynamic_array_len
713#undef __get_str
714#undef __get_bitmask
715#undef __print_array
716
717#undef TP_printk
718#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
719
720#undef DECLARE_EVENT_CLASS
721#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
722_TRACE_PERF_PROTO(call, PARAMS(proto)); \
723static char print_fmt_##call[] = print; \
724static struct ftrace_event_class __used __refdata event_class_##call = { \
725 .system = TRACE_SYSTEM_STRING, \
726 .define_fields = ftrace_define_fields_##call, \
727 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
728 .raw_init = trace_event_raw_init, \
729 .probe = ftrace_raw_event_##call, \
730 .reg = ftrace_event_reg, \
731 _TRACE_PERF_INIT(call) \
732};
733
734#undef DEFINE_EVENT
735#define DEFINE_EVENT(template, call, proto, args) \
736 \
737static struct ftrace_event_call __used event_##call = { \
738 .class = &event_class_##template, \
739 { \
740 .tp = &__tracepoint_##call, \
741 }, \
742 .event.funcs = &ftrace_event_type_funcs_##template, \
743 .print_fmt = print_fmt_##template, \
744 .flags = TRACE_EVENT_FL_TRACEPOINT, \
745}; \
746static struct ftrace_event_call __used \
747__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
748
749#undef DEFINE_EVENT_PRINT
750#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
751 \
752static char print_fmt_##call[] = print; \
753 \
754static struct ftrace_event_call __used event_##call = { \
755 .class = &event_class_##template, \
756 { \
757 .tp = &__tracepoint_##call, \
758 }, \
759 .event.funcs = &ftrace_event_type_funcs_##call, \
760 .print_fmt = print_fmt_##call, \
761 .flags = TRACE_EVENT_FL_TRACEPOINT, \
762}; \
763static struct ftrace_event_call __used \
764__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
765
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767
768#undef TRACE_SYSTEM_VAR
769
770#ifdef CONFIG_PERF_EVENTS
771
772#undef __entry
773#define __entry entry
774
775#undef __get_dynamic_array
776#define __get_dynamic_array(field) \
777 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
778
779#undef __get_dynamic_array_len
780#define __get_dynamic_array_len(field) \
781 ((__entry->__data_loc_##field >> 16) & 0xffff)
782
783#undef __get_str
784#define __get_str(field) (char *)__get_dynamic_array(field)
785
786#undef __get_bitmask
787#define __get_bitmask(field) (char *)__get_dynamic_array(field)
788
789#undef __perf_addr
790#define __perf_addr(a) (__addr = (a))
791
792#undef __perf_count
793#define __perf_count(c) (__count = (c))
794
795#undef __perf_task
796#define __perf_task(t) (__task = (t))
797
798#undef DECLARE_EVENT_CLASS
799#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
800static notrace void \
801perf_trace_##call(void *__data, proto) \
802{ \
803 struct ftrace_event_call *event_call = __data; \
804 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
805 struct ftrace_raw_##call *entry; \
806 struct pt_regs *__regs; \
807 u64 __addr = 0, __count = 1; \
808 struct task_struct *__task = NULL; \
809 struct hlist_head *head; \
810 int __entry_size; \
811 int __data_size; \
812 int rctx; \
813 \
814 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
815 \
816 head = this_cpu_ptr(event_call->perf_events); \
817 if (__builtin_constant_p(!__task) && !__task && \
818 hlist_empty(head)) \
819 return; \
820 \
821 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
822 sizeof(u64)); \
823 __entry_size -= sizeof(u32); \
824 \
825 entry = perf_trace_buf_prepare(__entry_size, \
826 event_call->event.type, &__regs, &rctx); \
827 if (!entry) \
828 return; \
829 \
830 perf_fetch_caller_regs(__regs); \
831 \
832 tstruct \
833 \
834 { assign; } \
835 \
836 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
837 __count, __regs, head, __task); \
838}
839
840/*
841 * This part is compiled out, it is only here as a build time check
842 * to make sure that if the tracepoint handling changes, the
843 * perf probe will fail to compile unless it too is updated.
844 */
845#undef DEFINE_EVENT
846#define DEFINE_EVENT(template, call, proto, args) \
847static inline void perf_test_probe_##call(void) \
848{ \
849 check_trace_callback_type_##call(perf_trace_##template); \
850}
851
852
853#undef DEFINE_EVENT_PRINT
854#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
855 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
856
857#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
858#endif /* CONFIG_PERF_EVENTS */
859