diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:24:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:24:03 -0400 |
commit | 483e3cd6a34ad2d7e41100bc1b98614ac42a4567 (patch) | |
tree | ef544ccdd1e95991c32fd8b656714583b7398371 /include | |
parent | 774a694f8cd08115d130a290d73c6d8563f26b1b (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (105 commits)
ring-buffer: only enable ring_buffer_swap_cpu when needed
ring-buffer: check for swapped buffers in start of committing
tracing: report error in trace if we fail to swap latency buffer
tracing: add trace_array_printk for internal tracers to use
tracing: pass around ring buffer instead of tracer
tracing: make tracing_reset safe for external use
tracing: use timestamp to determine start of latency traces
tracing: Remove mentioning of legacy latency_trace file from documentation
tracing/filters: Defer pred allocation, fix memory leak
tracing: remove users of tracing_reset
tracing: disable buffers and synchronize_sched before resetting
tracing: disable update max tracer while reading trace
tracing: print out start and stop in latency traces
ring-buffer: disable all cpu buffers when one finds a problem
ring-buffer: do not count discarded events
ring-buffer: remove ring_buffer_event_discard
ring-buffer: fix ring_buffer_read crossing pages
ring-buffer: remove unnecessary cpu_relax
ring-buffer: do not swap buffers during a commit
ring-buffer: do not reset while in a commit
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace_event.h | 51 | ||||
-rw-r--r-- | include/linux/module.h | 14 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 2 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 24 | ||||
-rw-r--r-- | include/linux/syscalls.h | 131 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 29 | ||||
-rw-r--r-- | include/trace/define_trace.h | 7 | ||||
-rw-r--r-- | include/trace/events/module.h | 126 | ||||
-rw-r--r-- | include/trace/events/sched.h | 12 | ||||
-rw-r--r-- | include/trace/events/syscalls.h | 70 | ||||
-rw-r--r-- | include/trace/ftrace.h | 93 | ||||
-rw-r--r-- | include/trace/syscall.h | 48 |
12 files changed, 518 insertions, 89 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index a81170de7f6b..23f7179bf74e 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -93,16 +93,22 @@ void tracing_generic_entry_update(struct trace_entry *entry, | |||
93 | unsigned long flags, | 93 | unsigned long flags, |
94 | int pc); | 94 | int pc); |
95 | struct ring_buffer_event * | 95 | struct ring_buffer_event * |
96 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 96 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, |
97 | int type, unsigned long len, | ||
97 | unsigned long flags, int pc); | 98 | unsigned long flags, int pc); |
98 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 99 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
100 | struct ring_buffer_event *event, | ||
99 | unsigned long flags, int pc); | 101 | unsigned long flags, int pc); |
100 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 102 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
103 | struct ring_buffer_event *event, | ||
101 | unsigned long flags, int pc); | 104 | unsigned long flags, int pc); |
102 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | 105 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
106 | struct ring_buffer_event *event); | ||
103 | 107 | ||
104 | void tracing_record_cmdline(struct task_struct *tsk); | 108 | void tracing_record_cmdline(struct task_struct *tsk); |
105 | 109 | ||
110 | struct event_filter; | ||
111 | |||
106 | struct ftrace_event_call { | 112 | struct ftrace_event_call { |
107 | struct list_head list; | 113 | struct list_head list; |
108 | char *name; | 114 | char *name; |
@@ -110,16 +116,18 @@ struct ftrace_event_call { | |||
110 | struct dentry *dir; | 116 | struct dentry *dir; |
111 | struct trace_event *event; | 117 | struct trace_event *event; |
112 | int enabled; | 118 | int enabled; |
113 | int (*regfunc)(void); | 119 | int (*regfunc)(void *); |
114 | void (*unregfunc)(void); | 120 | void (*unregfunc)(void *); |
115 | int id; | 121 | int id; |
116 | int (*raw_init)(void); | 122 | int (*raw_init)(void); |
117 | int (*show_format)(struct trace_seq *s); | 123 | int (*show_format)(struct ftrace_event_call *call, |
118 | int (*define_fields)(void); | 124 | struct trace_seq *s); |
125 | int (*define_fields)(struct ftrace_event_call *); | ||
119 | struct list_head fields; | 126 | struct list_head fields; |
120 | int filter_active; | 127 | int filter_active; |
121 | void *filter; | 128 | struct event_filter *filter; |
122 | void *mod; | 129 | void *mod; |
130 | void *data; | ||
123 | 131 | ||
124 | atomic_t profile_count; | 132 | atomic_t profile_count; |
125 | int (*profile_enable)(struct ftrace_event_call *); | 133 | int (*profile_enable)(struct ftrace_event_call *); |
@@ -129,15 +137,25 @@ struct ftrace_event_call { | |||
129 | #define MAX_FILTER_PRED 32 | 137 | #define MAX_FILTER_PRED 32 |
130 | #define MAX_FILTER_STR_VAL 128 | 138 | #define MAX_FILTER_STR_VAL 128 |
131 | 139 | ||
132 | extern int init_preds(struct ftrace_event_call *call); | ||
133 | extern void destroy_preds(struct ftrace_event_call *call); | 140 | extern void destroy_preds(struct ftrace_event_call *call); |
134 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | 141 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); |
135 | extern int filter_current_check_discard(struct ftrace_event_call *call, | 142 | extern int filter_current_check_discard(struct ring_buffer *buffer, |
143 | struct ftrace_event_call *call, | ||
136 | void *rec, | 144 | void *rec, |
137 | struct ring_buffer_event *event); | 145 | struct ring_buffer_event *event); |
138 | 146 | ||
139 | extern int trace_define_field(struct ftrace_event_call *call, char *type, | 147 | enum { |
140 | char *name, int offset, int size, int is_signed); | 148 | FILTER_OTHER = 0, |
149 | FILTER_STATIC_STRING, | ||
150 | FILTER_DYN_STRING, | ||
151 | FILTER_PTR_STRING, | ||
152 | }; | ||
153 | |||
154 | extern int trace_define_field(struct ftrace_event_call *call, | ||
155 | const char *type, const char *name, | ||
156 | int offset, int size, int is_signed, | ||
157 | int filter_type); | ||
158 | extern int trace_define_common_fields(struct ftrace_event_call *call); | ||
141 | 159 | ||
142 | #define is_signed_type(type) (((type)(-1)) < 0) | 160 | #define is_signed_type(type) (((type)(-1)) < 0) |
143 | 161 | ||
@@ -162,11 +180,4 @@ do { \ | |||
162 | __trace_printk(ip, fmt, ##args); \ | 180 | __trace_printk(ip, fmt, ##args); \ |
163 | } while (0) | 181 | } while (0) |
164 | 182 | ||
165 | #define __common_field(type, item, is_signed) \ | ||
166 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
167 | offsetof(typeof(field.ent), item), \ | ||
168 | sizeof(field.ent.item), is_signed); \ | ||
169 | if (ret) \ | ||
170 | return ret; | ||
171 | |||
172 | #endif /* _LINUX_FTRACE_EVENT_H */ | 183 | #endif /* _LINUX_FTRACE_EVENT_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 098bdb7bfacf..f8f92d015efe 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -17,10 +17,12 @@ | |||
17 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
18 | #include <linux/marker.h> | 18 | #include <linux/marker.h> |
19 | #include <linux/tracepoint.h> | 19 | #include <linux/tracepoint.h> |
20 | #include <asm/local.h> | ||
21 | 20 | ||
21 | #include <asm/local.h> | ||
22 | #include <asm/module.h> | 22 | #include <asm/module.h> |
23 | 23 | ||
24 | #include <trace/events/module.h> | ||
25 | |||
24 | /* Not Yet Implemented */ | 26 | /* Not Yet Implemented */ |
25 | #define MODULE_SUPPORTED_DEVICE(name) | 27 | #define MODULE_SUPPORTED_DEVICE(name) |
26 | 28 | ||
@@ -462,7 +464,10 @@ static inline local_t *__module_ref_addr(struct module *mod, int cpu) | |||
462 | static inline void __module_get(struct module *module) | 464 | static inline void __module_get(struct module *module) |
463 | { | 465 | { |
464 | if (module) { | 466 | if (module) { |
465 | local_inc(__module_ref_addr(module, get_cpu())); | 467 | unsigned int cpu = get_cpu(); |
468 | local_inc(__module_ref_addr(module, cpu)); | ||
469 | trace_module_get(module, _THIS_IP_, | ||
470 | local_read(__module_ref_addr(module, cpu))); | ||
466 | put_cpu(); | 471 | put_cpu(); |
467 | } | 472 | } |
468 | } | 473 | } |
@@ -473,8 +478,11 @@ static inline int try_module_get(struct module *module) | |||
473 | 478 | ||
474 | if (module) { | 479 | if (module) { |
475 | unsigned int cpu = get_cpu(); | 480 | unsigned int cpu = get_cpu(); |
476 | if (likely(module_is_live(module))) | 481 | if (likely(module_is_live(module))) { |
477 | local_inc(__module_ref_addr(module, cpu)); | 482 | local_inc(__module_ref_addr(module, cpu)); |
483 | trace_module_get(module, _THIS_IP_, | ||
484 | local_read(__module_ref_addr(module, cpu))); | ||
485 | } | ||
478 | else | 486 | else |
479 | ret = 0; | 487 | ret = 0; |
480 | put_cpu(); | 488 | put_cpu(); |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index e022b847c90d..972f90d7a32f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -766,6 +766,8 @@ extern int sysctl_perf_counter_mlock; | |||
766 | extern int sysctl_perf_counter_sample_rate; | 766 | extern int sysctl_perf_counter_sample_rate; |
767 | 767 | ||
768 | extern void perf_counter_init(void); | 768 | extern void perf_counter_init(void); |
769 | extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, | ||
770 | void *record, int entry_size); | ||
769 | 771 | ||
770 | #ifndef perf_misc_flags | 772 | #ifndef perf_misc_flags |
771 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | 773 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 29f8599e6bea..5fcc31ed5771 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -75,20 +75,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * ring_buffer_event_discard can discard any event in the ring buffer. | ||
79 | * it is up to the caller to protect against a reader from | ||
80 | * consuming it or a writer from wrapping and replacing it. | ||
81 | * | ||
82 | * No external protection is needed if this is called before | ||
83 | * the event is commited. But in that case it would be better to | ||
84 | * use ring_buffer_discard_commit. | ||
85 | * | ||
86 | * Note, if an event that has not been committed is discarded | ||
87 | * with ring_buffer_event_discard, it must still be committed. | ||
88 | */ | ||
89 | void ring_buffer_event_discard(struct ring_buffer_event *event); | ||
90 | |||
91 | /* | ||
92 | * ring_buffer_discard_commit will remove an event that has not | 78 | * ring_buffer_discard_commit will remove an event that has not |
93 | * ben committed yet. If this is used, then ring_buffer_unlock_commit | 79 | * ben committed yet. If this is used, then ring_buffer_unlock_commit |
94 | * must not be called on the discarded event. This function | 80 | * must not be called on the discarded event. This function |
@@ -154,8 +140,17 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer); | |||
154 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); | 140 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); |
155 | void ring_buffer_reset(struct ring_buffer *buffer); | 141 | void ring_buffer_reset(struct ring_buffer *buffer); |
156 | 142 | ||
143 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | ||
157 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | 144 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, |
158 | struct ring_buffer *buffer_b, int cpu); | 145 | struct ring_buffer *buffer_b, int cpu); |
146 | #else | ||
147 | static inline int | ||
148 | ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | ||
149 | struct ring_buffer *buffer_b, int cpu) | ||
150 | { | ||
151 | return -ENODEV; | ||
152 | } | ||
153 | #endif | ||
159 | 154 | ||
160 | int ring_buffer_empty(struct ring_buffer *buffer); | 155 | int ring_buffer_empty(struct ring_buffer *buffer); |
161 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); | 156 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); |
@@ -170,7 +165,6 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | |||
170 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 165 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
171 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
172 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); |
173 | unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu); | ||
174 | 168 | ||
175 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 169 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
176 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 170 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 80de7003d8c2..a8e37821cc60 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -64,6 +64,7 @@ struct perf_counter_attr; | |||
64 | #include <linux/sem.h> | 64 | #include <linux/sem.h> |
65 | #include <asm/siginfo.h> | 65 | #include <asm/siginfo.h> |
66 | #include <asm/signal.h> | 66 | #include <asm/signal.h> |
67 | #include <linux/unistd.h> | ||
67 | #include <linux/quota.h> | 68 | #include <linux/quota.h> |
68 | #include <linux/key.h> | 69 | #include <linux/key.h> |
69 | #include <trace/syscall.h> | 70 | #include <trace/syscall.h> |
@@ -97,6 +98,53 @@ struct perf_counter_attr; | |||
97 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) | 98 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) |
98 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 99 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
99 | 100 | ||
101 | #ifdef CONFIG_EVENT_PROFILE | ||
102 | #define TRACE_SYS_ENTER_PROFILE(sname) \ | ||
103 | static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \ | ||
104 | { \ | ||
105 | int ret = 0; \ | ||
106 | if (!atomic_inc_return(&event_enter_##sname.profile_count)) \ | ||
107 | ret = reg_prof_syscall_enter("sys"#sname); \ | ||
108 | return ret; \ | ||
109 | } \ | ||
110 | \ | ||
111 | static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\ | ||
112 | { \ | ||
113 | if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \ | ||
114 | unreg_prof_syscall_enter("sys"#sname); \ | ||
115 | } | ||
116 | |||
117 | #define TRACE_SYS_EXIT_PROFILE(sname) \ | ||
118 | static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \ | ||
119 | { \ | ||
120 | int ret = 0; \ | ||
121 | if (!atomic_inc_return(&event_exit_##sname.profile_count)) \ | ||
122 | ret = reg_prof_syscall_exit("sys"#sname); \ | ||
123 | return ret; \ | ||
124 | } \ | ||
125 | \ | ||
126 | static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ | ||
127 | { \ | ||
128 | if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \ | ||
129 | unreg_prof_syscall_exit("sys"#sname); \ | ||
130 | } | ||
131 | |||
132 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | ||
133 | .profile_count = ATOMIC_INIT(-1), \ | ||
134 | .profile_enable = prof_sysenter_enable_##sname, \ | ||
135 | .profile_disable = prof_sysenter_disable_##sname, | ||
136 | |||
137 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | ||
138 | .profile_count = ATOMIC_INIT(-1), \ | ||
139 | .profile_enable = prof_sysexit_enable_##sname, \ | ||
140 | .profile_disable = prof_sysexit_disable_##sname, | ||
141 | #else | ||
142 | #define TRACE_SYS_ENTER_PROFILE(sname) | ||
143 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) | ||
144 | #define TRACE_SYS_EXIT_PROFILE(sname) | ||
145 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) | ||
146 | #endif | ||
147 | |||
100 | #ifdef CONFIG_FTRACE_SYSCALLS | 148 | #ifdef CONFIG_FTRACE_SYSCALLS |
101 | #define __SC_STR_ADECL1(t, a) #a | 149 | #define __SC_STR_ADECL1(t, a) #a |
102 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) | 150 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) |
@@ -112,7 +160,81 @@ struct perf_counter_attr; | |||
112 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) | 160 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) |
113 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) | 161 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) |
114 | 162 | ||
163 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | ||
164 | static struct ftrace_event_call event_enter_##sname; \ | ||
165 | struct trace_event enter_syscall_print_##sname = { \ | ||
166 | .trace = print_syscall_enter, \ | ||
167 | }; \ | ||
168 | static int init_enter_##sname(void) \ | ||
169 | { \ | ||
170 | int num, id; \ | ||
171 | num = syscall_name_to_nr("sys"#sname); \ | ||
172 | if (num < 0) \ | ||
173 | return -ENOSYS; \ | ||
174 | id = register_ftrace_event(&enter_syscall_print_##sname);\ | ||
175 | if (!id) \ | ||
176 | return -ENODEV; \ | ||
177 | event_enter_##sname.id = id; \ | ||
178 | set_syscall_enter_id(num, id); \ | ||
179 | INIT_LIST_HEAD(&event_enter_##sname.fields); \ | ||
180 | return 0; \ | ||
181 | } \ | ||
182 | TRACE_SYS_ENTER_PROFILE(sname); \ | ||
183 | static struct ftrace_event_call __used \ | ||
184 | __attribute__((__aligned__(4))) \ | ||
185 | __attribute__((section("_ftrace_events"))) \ | ||
186 | event_enter_##sname = { \ | ||
187 | .name = "sys_enter"#sname, \ | ||
188 | .system = "syscalls", \ | ||
189 | .event = &event_syscall_enter, \ | ||
190 | .raw_init = init_enter_##sname, \ | ||
191 | .show_format = syscall_enter_format, \ | ||
192 | .define_fields = syscall_enter_define_fields, \ | ||
193 | .regfunc = reg_event_syscall_enter, \ | ||
194 | .unregfunc = unreg_event_syscall_enter, \ | ||
195 | .data = "sys"#sname, \ | ||
196 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | ||
197 | } | ||
198 | |||
199 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | ||
200 | static struct ftrace_event_call event_exit_##sname; \ | ||
201 | struct trace_event exit_syscall_print_##sname = { \ | ||
202 | .trace = print_syscall_exit, \ | ||
203 | }; \ | ||
204 | static int init_exit_##sname(void) \ | ||
205 | { \ | ||
206 | int num, id; \ | ||
207 | num = syscall_name_to_nr("sys"#sname); \ | ||
208 | if (num < 0) \ | ||
209 | return -ENOSYS; \ | ||
210 | id = register_ftrace_event(&exit_syscall_print_##sname);\ | ||
211 | if (!id) \ | ||
212 | return -ENODEV; \ | ||
213 | event_exit_##sname.id = id; \ | ||
214 | set_syscall_exit_id(num, id); \ | ||
215 | INIT_LIST_HEAD(&event_exit_##sname.fields); \ | ||
216 | return 0; \ | ||
217 | } \ | ||
218 | TRACE_SYS_EXIT_PROFILE(sname); \ | ||
219 | static struct ftrace_event_call __used \ | ||
220 | __attribute__((__aligned__(4))) \ | ||
221 | __attribute__((section("_ftrace_events"))) \ | ||
222 | event_exit_##sname = { \ | ||
223 | .name = "sys_exit"#sname, \ | ||
224 | .system = "syscalls", \ | ||
225 | .event = &event_syscall_exit, \ | ||
226 | .raw_init = init_exit_##sname, \ | ||
227 | .show_format = syscall_exit_format, \ | ||
228 | .define_fields = syscall_exit_define_fields, \ | ||
229 | .regfunc = reg_event_syscall_exit, \ | ||
230 | .unregfunc = unreg_event_syscall_exit, \ | ||
231 | .data = "sys"#sname, \ | ||
232 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | ||
233 | } | ||
234 | |||
115 | #define SYSCALL_METADATA(sname, nb) \ | 235 | #define SYSCALL_METADATA(sname, nb) \ |
236 | SYSCALL_TRACE_ENTER_EVENT(sname); \ | ||
237 | SYSCALL_TRACE_EXIT_EVENT(sname); \ | ||
116 | static const struct syscall_metadata __used \ | 238 | static const struct syscall_metadata __used \ |
117 | __attribute__((__aligned__(4))) \ | 239 | __attribute__((__aligned__(4))) \ |
118 | __attribute__((section("__syscalls_metadata"))) \ | 240 | __attribute__((section("__syscalls_metadata"))) \ |
@@ -121,18 +243,23 @@ struct perf_counter_attr; | |||
121 | .nb_args = nb, \ | 243 | .nb_args = nb, \ |
122 | .types = types_##sname, \ | 244 | .types = types_##sname, \ |
123 | .args = args_##sname, \ | 245 | .args = args_##sname, \ |
124 | } | 246 | .enter_event = &event_enter_##sname, \ |
247 | .exit_event = &event_exit_##sname, \ | ||
248 | }; | ||
125 | 249 | ||
126 | #define SYSCALL_DEFINE0(sname) \ | 250 | #define SYSCALL_DEFINE0(sname) \ |
251 | SYSCALL_TRACE_ENTER_EVENT(_##sname); \ | ||
252 | SYSCALL_TRACE_EXIT_EVENT(_##sname); \ | ||
127 | static const struct syscall_metadata __used \ | 253 | static const struct syscall_metadata __used \ |
128 | __attribute__((__aligned__(4))) \ | 254 | __attribute__((__aligned__(4))) \ |
129 | __attribute__((section("__syscalls_metadata"))) \ | 255 | __attribute__((section("__syscalls_metadata"))) \ |
130 | __syscall_meta_##sname = { \ | 256 | __syscall_meta_##sname = { \ |
131 | .name = "sys_"#sname, \ | 257 | .name = "sys_"#sname, \ |
132 | .nb_args = 0, \ | 258 | .nb_args = 0, \ |
259 | .enter_event = &event_enter__##sname, \ | ||
260 | .exit_event = &event_exit__##sname, \ | ||
133 | }; \ | 261 | }; \ |
134 | asmlinkage long sys_##sname(void) | 262 | asmlinkage long sys_##sname(void) |
135 | |||
136 | #else | 263 | #else |
137 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) | 264 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) |
138 | #endif | 265 | #endif |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index b9dc4ca0246f..63a3f7a80580 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -23,6 +23,8 @@ struct tracepoint; | |||
23 | struct tracepoint { | 23 | struct tracepoint { |
24 | const char *name; /* Tracepoint name */ | 24 | const char *name; /* Tracepoint name */ |
25 | int state; /* State. */ | 25 | int state; /* State. */ |
26 | void (*regfunc)(void); | ||
27 | void (*unregfunc)(void); | ||
26 | void **funcs; | 28 | void **funcs; |
27 | } __attribute__((aligned(32))); /* | 29 | } __attribute__((aligned(32))); /* |
28 | * Aligned on 32 bytes because it is | 30 | * Aligned on 32 bytes because it is |
@@ -78,12 +80,16 @@ struct tracepoint { | |||
78 | return tracepoint_probe_unregister(#name, (void *)probe);\ | 80 | return tracepoint_probe_unregister(#name, (void *)probe);\ |
79 | } | 81 | } |
80 | 82 | ||
81 | #define DEFINE_TRACE(name) \ | 83 | |
84 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | ||
82 | static const char __tpstrtab_##name[] \ | 85 | static const char __tpstrtab_##name[] \ |
83 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 86 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
84 | struct tracepoint __tracepoint_##name \ | 87 | struct tracepoint __tracepoint_##name \ |
85 | __attribute__((section("__tracepoints"), aligned(32))) = \ | 88 | __attribute__((section("__tracepoints"), aligned(32))) = \ |
86 | { __tpstrtab_##name, 0, NULL } | 89 | { __tpstrtab_##name, 0, reg, unreg, NULL } |
90 | |||
91 | #define DEFINE_TRACE(name) \ | ||
92 | DEFINE_TRACE_FN(name, NULL, NULL); | ||
87 | 93 | ||
88 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ | 94 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ |
89 | EXPORT_SYMBOL_GPL(__tracepoint_##name) | 95 | EXPORT_SYMBOL_GPL(__tracepoint_##name) |
@@ -108,6 +114,7 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, | |||
108 | return -ENOSYS; \ | 114 | return -ENOSYS; \ |
109 | } | 115 | } |
110 | 116 | ||
117 | #define DEFINE_TRACE_FN(name, reg, unreg) | ||
111 | #define DEFINE_TRACE(name) | 118 | #define DEFINE_TRACE(name) |
112 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) | 119 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) |
113 | #define EXPORT_TRACEPOINT_SYMBOL(name) | 120 | #define EXPORT_TRACEPOINT_SYMBOL(name) |
@@ -158,6 +165,15 @@ static inline void tracepoint_synchronize_unregister(void) | |||
158 | 165 | ||
159 | #define PARAMS(args...) args | 166 | #define PARAMS(args...) args |
160 | 167 | ||
168 | #endif /* _LINUX_TRACEPOINT_H */ | ||
169 | |||
170 | /* | ||
171 | * Note: we keep the TRACE_EVENT outside the include file ifdef protection. | ||
172 | * This is due to the way trace events work. If a file includes two | ||
173 | * trace event headers under one "CREATE_TRACE_POINTS" the first include | ||
174 | * will override the TRACE_EVENT and break the second include. | ||
175 | */ | ||
176 | |||
161 | #ifndef TRACE_EVENT | 177 | #ifndef TRACE_EVENT |
162 | /* | 178 | /* |
163 | * For use with the TRACE_EVENT macro: | 179 | * For use with the TRACE_EVENT macro: |
@@ -259,10 +275,15 @@ static inline void tracepoint_synchronize_unregister(void) | |||
259 | * can also by used by generic instrumentation like SystemTap), and | 275 | * can also by used by generic instrumentation like SystemTap), and |
260 | * it is also used to expose a structured trace record in | 276 | * it is also used to expose a structured trace record in |
261 | * /sys/kernel/debug/tracing/events/. | 277 | * /sys/kernel/debug/tracing/events/. |
278 | * | ||
279 | * A set of (un)registration functions can be passed to the variant | ||
280 | * TRACE_EVENT_FN to perform any (un)registration work. | ||
262 | */ | 281 | */ |
263 | 282 | ||
264 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | 283 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
265 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 284 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
266 | #endif | 285 | #define TRACE_EVENT_FN(name, proto, args, struct, \ |
286 | assign, print, reg, unreg) \ | ||
287 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
267 | 288 | ||
268 | #endif | 289 | #endif /* ifdef TRACE_EVENT (see note above) */ |
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index f7a7ae1e8f90..2a4b3bf74033 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
@@ -26,6 +26,11 @@ | |||
26 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | 26 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ |
27 | DEFINE_TRACE(name) | 27 | DEFINE_TRACE(name) |
28 | 28 | ||
29 | #undef TRACE_EVENT_FN | ||
30 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | ||
31 | assign, print, reg, unreg) \ | ||
32 | DEFINE_TRACE_FN(name, reg, unreg) | ||
33 | |||
29 | #undef DECLARE_TRACE | 34 | #undef DECLARE_TRACE |
30 | #define DECLARE_TRACE(name, proto, args) \ | 35 | #define DECLARE_TRACE(name, proto, args) \ |
31 | DEFINE_TRACE(name) | 36 | DEFINE_TRACE(name) |
@@ -56,6 +61,8 @@ | |||
56 | #include <trace/ftrace.h> | 61 | #include <trace/ftrace.h> |
57 | #endif | 62 | #endif |
58 | 63 | ||
64 | #undef TRACE_EVENT | ||
65 | #undef TRACE_EVENT_FN | ||
59 | #undef TRACE_HEADER_MULTI_READ | 66 | #undef TRACE_HEADER_MULTI_READ |
60 | 67 | ||
61 | /* Only undef what we defined in this file */ | 68 | /* Only undef what we defined in this file */ |
diff --git a/include/trace/events/module.h b/include/trace/events/module.h new file mode 100644 index 000000000000..84160fb18478 --- /dev/null +++ b/include/trace/events/module.h | |||
@@ -0,0 +1,126 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM module | ||
3 | |||
4 | #if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_MODULE_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | #ifdef CONFIG_MODULES | ||
10 | |||
11 | struct module; | ||
12 | |||
13 | #define show_module_flags(flags) __print_flags(flags, "", \ | ||
14 | { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \ | ||
15 | { (1UL << TAINT_FORCED_MODULE), "F" }, \ | ||
16 | { (1UL << TAINT_CRAP), "C" }) | ||
17 | |||
18 | TRACE_EVENT(module_load, | ||
19 | |||
20 | TP_PROTO(struct module *mod), | ||
21 | |||
22 | TP_ARGS(mod), | ||
23 | |||
24 | TP_STRUCT__entry( | ||
25 | __field( unsigned int, taints ) | ||
26 | __string( name, mod->name ) | ||
27 | ), | ||
28 | |||
29 | TP_fast_assign( | ||
30 | __entry->taints = mod->taints; | ||
31 | __assign_str(name, mod->name); | ||
32 | ), | ||
33 | |||
34 | TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints)) | ||
35 | ); | ||
36 | |||
37 | TRACE_EVENT(module_free, | ||
38 | |||
39 | TP_PROTO(struct module *mod), | ||
40 | |||
41 | TP_ARGS(mod), | ||
42 | |||
43 | TP_STRUCT__entry( | ||
44 | __string( name, mod->name ) | ||
45 | ), | ||
46 | |||
47 | TP_fast_assign( | ||
48 | __assign_str(name, mod->name); | ||
49 | ), | ||
50 | |||
51 | TP_printk("%s", __get_str(name)) | ||
52 | ); | ||
53 | |||
54 | TRACE_EVENT(module_get, | ||
55 | |||
56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | ||
57 | |||
58 | TP_ARGS(mod, ip, refcnt), | ||
59 | |||
60 | TP_STRUCT__entry( | ||
61 | __field( unsigned long, ip ) | ||
62 | __field( int, refcnt ) | ||
63 | __string( name, mod->name ) | ||
64 | ), | ||
65 | |||
66 | TP_fast_assign( | ||
67 | __entry->ip = ip; | ||
68 | __entry->refcnt = refcnt; | ||
69 | __assign_str(name, mod->name); | ||
70 | ), | ||
71 | |||
72 | TP_printk("%s call_site=%pf refcnt=%d", | ||
73 | __get_str(name), (void *)__entry->ip, __entry->refcnt) | ||
74 | ); | ||
75 | |||
76 | TRACE_EVENT(module_put, | ||
77 | |||
78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | ||
79 | |||
80 | TP_ARGS(mod, ip, refcnt), | ||
81 | |||
82 | TP_STRUCT__entry( | ||
83 | __field( unsigned long, ip ) | ||
84 | __field( int, refcnt ) | ||
85 | __string( name, mod->name ) | ||
86 | ), | ||
87 | |||
88 | TP_fast_assign( | ||
89 | __entry->ip = ip; | ||
90 | __entry->refcnt = refcnt; | ||
91 | __assign_str(name, mod->name); | ||
92 | ), | ||
93 | |||
94 | TP_printk("%s call_site=%pf refcnt=%d", | ||
95 | __get_str(name), (void *)__entry->ip, __entry->refcnt) | ||
96 | ); | ||
97 | |||
98 | TRACE_EVENT(module_request, | ||
99 | |||
100 | TP_PROTO(char *name, bool wait, unsigned long ip), | ||
101 | |||
102 | TP_ARGS(name, wait, ip), | ||
103 | |||
104 | TP_STRUCT__entry( | ||
105 | __field( bool, wait ) | ||
106 | __field( unsigned long, ip ) | ||
107 | __string( name, name ) | ||
108 | ), | ||
109 | |||
110 | TP_fast_assign( | ||
111 | __entry->wait = wait; | ||
112 | __entry->ip = ip; | ||
113 | __assign_str(name, name); | ||
114 | ), | ||
115 | |||
116 | TP_printk("%s wait=%d call_site=%pf", | ||
117 | __get_str(name), (int)__entry->wait, (void *)__entry->ip) | ||
118 | ); | ||
119 | |||
120 | #endif /* CONFIG_MODULES */ | ||
121 | |||
122 | #endif /* _TRACE_MODULE_H */ | ||
123 | |||
124 | /* This part must be outside protection */ | ||
125 | #include <trace/define_trace.h> | ||
126 | |||
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index a4c369ec328f..b48f1ad7c946 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -94,6 +94,7 @@ TRACE_EVENT(sched_wakeup, | |||
94 | __field( pid_t, pid ) | 94 | __field( pid_t, pid ) |
95 | __field( int, prio ) | 95 | __field( int, prio ) |
96 | __field( int, success ) | 96 | __field( int, success ) |
97 | __field( int, cpu ) | ||
97 | ), | 98 | ), |
98 | 99 | ||
99 | TP_fast_assign( | 100 | TP_fast_assign( |
@@ -101,11 +102,12 @@ TRACE_EVENT(sched_wakeup, | |||
101 | __entry->pid = p->pid; | 102 | __entry->pid = p->pid; |
102 | __entry->prio = p->prio; | 103 | __entry->prio = p->prio; |
103 | __entry->success = success; | 104 | __entry->success = success; |
105 | __entry->cpu = task_cpu(p); | ||
104 | ), | 106 | ), |
105 | 107 | ||
106 | TP_printk("task %s:%d [%d] success=%d", | 108 | TP_printk("task %s:%d [%d] success=%d [%03d]", |
107 | __entry->comm, __entry->pid, __entry->prio, | 109 | __entry->comm, __entry->pid, __entry->prio, |
108 | __entry->success) | 110 | __entry->success, __entry->cpu) |
109 | ); | 111 | ); |
110 | 112 | ||
111 | /* | 113 | /* |
@@ -125,6 +127,7 @@ TRACE_EVENT(sched_wakeup_new, | |||
125 | __field( pid_t, pid ) | 127 | __field( pid_t, pid ) |
126 | __field( int, prio ) | 128 | __field( int, prio ) |
127 | __field( int, success ) | 129 | __field( int, success ) |
130 | __field( int, cpu ) | ||
128 | ), | 131 | ), |
129 | 132 | ||
130 | TP_fast_assign( | 133 | TP_fast_assign( |
@@ -132,11 +135,12 @@ TRACE_EVENT(sched_wakeup_new, | |||
132 | __entry->pid = p->pid; | 135 | __entry->pid = p->pid; |
133 | __entry->prio = p->prio; | 136 | __entry->prio = p->prio; |
134 | __entry->success = success; | 137 | __entry->success = success; |
138 | __entry->cpu = task_cpu(p); | ||
135 | ), | 139 | ), |
136 | 140 | ||
137 | TP_printk("task %s:%d [%d] success=%d", | 141 | TP_printk("task %s:%d [%d] success=%d [%03d]", |
138 | __entry->comm, __entry->pid, __entry->prio, | 142 | __entry->comm, __entry->pid, __entry->prio, |
139 | __entry->success) | 143 | __entry->success, __entry->cpu) |
140 | ); | 144 | ); |
141 | 145 | ||
142 | /* | 146 | /* |
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h new file mode 100644 index 000000000000..397dff2dbd5a --- /dev/null +++ b/include/trace/events/syscalls.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM syscalls | ||
3 | |||
4 | #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_EVENTS_SYSCALLS_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/syscall.h> | ||
11 | |||
12 | |||
13 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | ||
14 | |||
15 | extern void syscall_regfunc(void); | ||
16 | extern void syscall_unregfunc(void); | ||
17 | |||
18 | TRACE_EVENT_FN(sys_enter, | ||
19 | |||
20 | TP_PROTO(struct pt_regs *regs, long id), | ||
21 | |||
22 | TP_ARGS(regs, id), | ||
23 | |||
24 | TP_STRUCT__entry( | ||
25 | __field( long, id ) | ||
26 | __array( unsigned long, args, 6 ) | ||
27 | ), | ||
28 | |||
29 | TP_fast_assign( | ||
30 | __entry->id = id; | ||
31 | syscall_get_arguments(current, regs, 0, 6, __entry->args); | ||
32 | ), | ||
33 | |||
34 | TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", | ||
35 | __entry->id, | ||
36 | __entry->args[0], __entry->args[1], __entry->args[2], | ||
37 | __entry->args[3], __entry->args[4], __entry->args[5]), | ||
38 | |||
39 | syscall_regfunc, syscall_unregfunc | ||
40 | ); | ||
41 | |||
42 | TRACE_EVENT_FN(sys_exit, | ||
43 | |||
44 | TP_PROTO(struct pt_regs *regs, long ret), | ||
45 | |||
46 | TP_ARGS(regs, ret), | ||
47 | |||
48 | TP_STRUCT__entry( | ||
49 | __field( long, id ) | ||
50 | __field( long, ret ) | ||
51 | ), | ||
52 | |||
53 | TP_fast_assign( | ||
54 | __entry->id = syscall_get_nr(current, regs); | ||
55 | __entry->ret = ret; | ||
56 | ), | ||
57 | |||
58 | TP_printk("NR %ld = %ld", | ||
59 | __entry->id, __entry->ret), | ||
60 | |||
61 | syscall_regfunc, syscall_unregfunc | ||
62 | ); | ||
63 | |||
64 | #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ | ||
65 | |||
66 | #endif /* _TRACE_EVENTS_SYSCALLS_H */ | ||
67 | |||
68 | /* This part must be outside protection */ | ||
69 | #include <trace/define_trace.h> | ||
70 | |||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f64fbaae781a..308bafd93325 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -21,11 +21,14 @@ | |||
21 | #undef __field | 21 | #undef __field |
22 | #define __field(type, item) type item; | 22 | #define __field(type, item) type item; |
23 | 23 | ||
24 | #undef __field_ext | ||
25 | #define __field_ext(type, item, filter_type) type item; | ||
26 | |||
24 | #undef __array | 27 | #undef __array |
25 | #define __array(type, item, len) type item[len]; | 28 | #define __array(type, item, len) type item[len]; |
26 | 29 | ||
27 | #undef __dynamic_array | 30 | #undef __dynamic_array |
28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | 31 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; |
29 | 32 | ||
30 | #undef __string | 33 | #undef __string |
31 | #define __string(item, src) __dynamic_array(char, item, -1) | 34 | #define __string(item, src) __dynamic_array(char, item, -1) |
@@ -42,6 +45,16 @@ | |||
42 | }; \ | 45 | }; \ |
43 | static struct ftrace_event_call event_##name | 46 | static struct ftrace_event_call event_##name |
44 | 47 | ||
48 | #undef __cpparg | ||
49 | #define __cpparg(arg...) arg | ||
50 | |||
51 | /* Callbacks are meaningless to ftrace. */ | ||
52 | #undef TRACE_EVENT_FN | ||
53 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | ||
54 | assign, print, reg, unreg) \ | ||
55 | TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ | ||
56 | __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ | ||
57 | |||
45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 58 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
46 | 59 | ||
47 | 60 | ||
@@ -51,23 +64,27 @@ | |||
51 | * Include the following: | 64 | * Include the following: |
52 | * | 65 | * |
53 | * struct ftrace_data_offsets_<call> { | 66 | * struct ftrace_data_offsets_<call> { |
54 | * int <item1>; | 67 | * u32 <item1>; |
55 | * int <item2>; | 68 | * u32 <item2>; |
56 | * [...] | 69 | * [...] |
57 | * }; | 70 | * }; |
58 | * | 71 | * |
59 | * The __dynamic_array() macro will create each int <item>, this is | 72 | * The __dynamic_array() macro will create each u32 <item>, this is |
60 | * to keep the offset of each array from the beginning of the event. | 73 | * to keep the offset of each array from the beginning of the event. |
74 | * The size of an array is also encoded, in the higher 16 bits of <item>. | ||
61 | */ | 75 | */ |
62 | 76 | ||
63 | #undef __field | 77 | #undef __field |
64 | #define __field(type, item); | 78 | #define __field(type, item) |
79 | |||
80 | #undef __field_ext | ||
81 | #define __field_ext(type, item, filter_type) | ||
65 | 82 | ||
66 | #undef __array | 83 | #undef __array |
67 | #define __array(type, item, len) | 84 | #define __array(type, item, len) |
68 | 85 | ||
69 | #undef __dynamic_array | 86 | #undef __dynamic_array |
70 | #define __dynamic_array(type, item, len) int item; | 87 | #define __dynamic_array(type, item, len) u32 item; |
71 | 88 | ||
72 | #undef __string | 89 | #undef __string |
73 | #define __string(item, src) __dynamic_array(char, item, -1) | 90 | #define __string(item, src) __dynamic_array(char, item, -1) |
@@ -109,6 +126,9 @@ | |||
109 | if (!ret) \ | 126 | if (!ret) \ |
110 | return 0; | 127 | return 0; |
111 | 128 | ||
129 | #undef __field_ext | ||
130 | #define __field_ext(type, item, filter_type) __field(type, item) | ||
131 | |||
112 | #undef __array | 132 | #undef __array |
113 | #define __array(type, item, len) \ | 133 | #define __array(type, item, len) \ |
114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | 134 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ |
@@ -120,7 +140,7 @@ | |||
120 | 140 | ||
121 | #undef __dynamic_array | 141 | #undef __dynamic_array |
122 | #define __dynamic_array(type, item, len) \ | 142 | #define __dynamic_array(type, item, len) \ |
123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | 143 | ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ |
124 | "offset:%u;\tsize:%u;\n", \ | 144 | "offset:%u;\tsize:%u;\n", \ |
125 | (unsigned int)offsetof(typeof(field), \ | 145 | (unsigned int)offsetof(typeof(field), \ |
126 | __data_loc_##item), \ | 146 | __data_loc_##item), \ |
@@ -150,7 +170,8 @@ | |||
150 | #undef TRACE_EVENT | 170 | #undef TRACE_EVENT |
151 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 171 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
152 | static int \ | 172 | static int \ |
153 | ftrace_format_##call(struct trace_seq *s) \ | 173 | ftrace_format_##call(struct ftrace_event_call *unused, \ |
174 | struct trace_seq *s) \ | ||
154 | { \ | 175 | { \ |
155 | struct ftrace_raw_##call field __attribute__((unused)); \ | 176 | struct ftrace_raw_##call field __attribute__((unused)); \ |
156 | int ret = 0; \ | 177 | int ret = 0; \ |
@@ -210,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \ | |||
210 | 231 | ||
211 | #undef __get_dynamic_array | 232 | #undef __get_dynamic_array |
212 | #define __get_dynamic_array(field) \ | 233 | #define __get_dynamic_array(field) \ |
213 | ((void *)__entry + __entry->__data_loc_##field) | 234 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
214 | 235 | ||
215 | #undef __get_str | 236 | #undef __get_str |
216 | #define __get_str(field) (char *)__get_dynamic_array(field) | 237 | #define __get_str(field) (char *)__get_dynamic_array(field) |
@@ -263,28 +284,33 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
263 | 284 | ||
264 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 285 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
265 | 286 | ||
266 | #undef __field | 287 | #undef __field_ext |
267 | #define __field(type, item) \ | 288 | #define __field_ext(type, item, filter_type) \ |
268 | ret = trace_define_field(event_call, #type, #item, \ | 289 | ret = trace_define_field(event_call, #type, #item, \ |
269 | offsetof(typeof(field), item), \ | 290 | offsetof(typeof(field), item), \ |
270 | sizeof(field.item), is_signed_type(type)); \ | 291 | sizeof(field.item), \ |
292 | is_signed_type(type), filter_type); \ | ||
271 | if (ret) \ | 293 | if (ret) \ |
272 | return ret; | 294 | return ret; |
273 | 295 | ||
296 | #undef __field | ||
297 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | ||
298 | |||
274 | #undef __array | 299 | #undef __array |
275 | #define __array(type, item, len) \ | 300 | #define __array(type, item, len) \ |
276 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 301 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
277 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 302 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
278 | offsetof(typeof(field), item), \ | 303 | offsetof(typeof(field), item), \ |
279 | sizeof(field.item), 0); \ | 304 | sizeof(field.item), 0, FILTER_OTHER); \ |
280 | if (ret) \ | 305 | if (ret) \ |
281 | return ret; | 306 | return ret; |
282 | 307 | ||
283 | #undef __dynamic_array | 308 | #undef __dynamic_array |
284 | #define __dynamic_array(type, item, len) \ | 309 | #define __dynamic_array(type, item, len) \ |
285 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | 310 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ |
286 | offsetof(typeof(field), __data_loc_##item), \ | 311 | offsetof(typeof(field), __data_loc_##item), \ |
287 | sizeof(field.__data_loc_##item), 0); | 312 | sizeof(field.__data_loc_##item), 0, \ |
313 | FILTER_OTHER); | ||
288 | 314 | ||
289 | #undef __string | 315 | #undef __string |
290 | #define __string(item, src) __dynamic_array(char, item, -1) | 316 | #define __string(item, src) __dynamic_array(char, item, -1) |
@@ -292,17 +318,14 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
292 | #undef TRACE_EVENT | 318 | #undef TRACE_EVENT |
293 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
294 | int \ | 320 | int \ |
295 | ftrace_define_fields_##call(void) \ | 321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
296 | { \ | 322 | { \ |
297 | struct ftrace_raw_##call field; \ | 323 | struct ftrace_raw_##call field; \ |
298 | struct ftrace_event_call *event_call = &event_##call; \ | ||
299 | int ret; \ | 324 | int ret; \ |
300 | \ | 325 | \ |
301 | __common_field(int, type, 1); \ | 326 | ret = trace_define_common_fields(event_call); \ |
302 | __common_field(unsigned char, flags, 0); \ | 327 | if (ret) \ |
303 | __common_field(unsigned char, preempt_count, 0); \ | 328 | return ret; \ |
304 | __common_field(int, pid, 1); \ | ||
305 | __common_field(int, tgid, 1); \ | ||
306 | \ | 329 | \ |
307 | tstruct; \ | 330 | tstruct; \ |
308 | \ | 331 | \ |
@@ -321,6 +344,9 @@ ftrace_define_fields_##call(void) \ | |||
321 | #undef __field | 344 | #undef __field |
322 | #define __field(type, item) | 345 | #define __field(type, item) |
323 | 346 | ||
347 | #undef __field_ext | ||
348 | #define __field_ext(type, item, filter_type) | ||
349 | |||
324 | #undef __array | 350 | #undef __array |
325 | #define __array(type, item, len) | 351 | #define __array(type, item, len) |
326 | 352 | ||
@@ -328,6 +354,7 @@ ftrace_define_fields_##call(void) \ | |||
328 | #define __dynamic_array(type, item, len) \ | 354 | #define __dynamic_array(type, item, len) \ |
329 | __data_offsets->item = __data_size + \ | 355 | __data_offsets->item = __data_size + \ |
330 | offsetof(typeof(*entry), __data); \ | 356 | offsetof(typeof(*entry), __data); \ |
357 | __data_offsets->item |= (len * sizeof(type)) << 16; \ | ||
331 | __data_size += (len) * sizeof(type); | 358 | __data_size += (len) * sizeof(type); |
332 | 359 | ||
333 | #undef __string | 360 | #undef __string |
@@ -433,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
433 | * { | 460 | * { |
434 | * struct ring_buffer_event *event; | 461 | * struct ring_buffer_event *event; |
435 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
463 | * struct ring_buffer *buffer; | ||
436 | * unsigned long irq_flags; | 464 | * unsigned long irq_flags; |
437 | * int pc; | 465 | * int pc; |
438 | * | 466 | * |
439 | * local_save_flags(irq_flags); | 467 | * local_save_flags(irq_flags); |
440 | * pc = preempt_count(); | 468 | * pc = preempt_count(); |
441 | * | 469 | * |
442 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 470 | * event = trace_current_buffer_lock_reserve(&buffer, |
471 | * event_<call>.id, | ||
443 | * sizeof(struct ftrace_raw_<call>), | 472 | * sizeof(struct ftrace_raw_<call>), |
444 | * irq_flags, pc); | 473 | * irq_flags, pc); |
445 | * if (!event) | 474 | * if (!event) |
@@ -449,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
449 | * <assign>; <-- Here we assign the entries by the __field and | 478 | * <assign>; <-- Here we assign the entries by the __field and |
450 | * __array macros. | 479 | * __array macros. |
451 | * | 480 | * |
452 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 481 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
453 | * } | 482 | * } |
454 | * | 483 | * |
455 | * static int ftrace_raw_reg_event_<call>(void) | 484 | * static int ftrace_raw_reg_event_<call>(void) |
@@ -541,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
541 | struct ftrace_event_call *event_call = &event_##call; \ | 570 | struct ftrace_event_call *event_call = &event_##call; \ |
542 | struct ring_buffer_event *event; \ | 571 | struct ring_buffer_event *event; \ |
543 | struct ftrace_raw_##call *entry; \ | 572 | struct ftrace_raw_##call *entry; \ |
573 | struct ring_buffer *buffer; \ | ||
544 | unsigned long irq_flags; \ | 574 | unsigned long irq_flags; \ |
545 | int __data_size; \ | 575 | int __data_size; \ |
546 | int pc; \ | 576 | int pc; \ |
@@ -550,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
550 | \ | 580 | \ |
551 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 581 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
552 | \ | 582 | \ |
553 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 583 | event = trace_current_buffer_lock_reserve(&buffer, \ |
584 | event_##call.id, \ | ||
554 | sizeof(*entry) + __data_size, \ | 585 | sizeof(*entry) + __data_size, \ |
555 | irq_flags, pc); \ | 586 | irq_flags, pc); \ |
556 | if (!event) \ | 587 | if (!event) \ |
@@ -562,11 +593,12 @@ static void ftrace_raw_event_##call(proto) \ | |||
562 | \ | 593 | \ |
563 | { assign; } \ | 594 | { assign; } \ |
564 | \ | 595 | \ |
565 | if (!filter_current_check_discard(event_call, entry, event)) \ | 596 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
566 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 597 | trace_nowake_buffer_unlock_commit(buffer, \ |
598 | event, irq_flags, pc); \ | ||
567 | } \ | 599 | } \ |
568 | \ | 600 | \ |
569 | static int ftrace_raw_reg_event_##call(void) \ | 601 | static int ftrace_raw_reg_event_##call(void *ptr) \ |
570 | { \ | 602 | { \ |
571 | int ret; \ | 603 | int ret; \ |
572 | \ | 604 | \ |
@@ -577,7 +609,7 @@ static int ftrace_raw_reg_event_##call(void) \ | |||
577 | return ret; \ | 609 | return ret; \ |
578 | } \ | 610 | } \ |
579 | \ | 611 | \ |
580 | static void ftrace_raw_unreg_event_##call(void) \ | 612 | static void ftrace_raw_unreg_event_##call(void *ptr) \ |
581 | { \ | 613 | { \ |
582 | unregister_trace_##call(ftrace_raw_event_##call); \ | 614 | unregister_trace_##call(ftrace_raw_event_##call); \ |
583 | } \ | 615 | } \ |
@@ -595,7 +627,6 @@ static int ftrace_raw_init_event_##call(void) \ | |||
595 | return -ENODEV; \ | 627 | return -ENODEV; \ |
596 | event_##call.id = id; \ | 628 | event_##call.id = id; \ |
597 | INIT_LIST_HEAD(&event_##call.fields); \ | 629 | INIT_LIST_HEAD(&event_##call.fields); \ |
598 | init_preds(&event_##call); \ | ||
599 | return 0; \ | 630 | return 0; \ |
600 | } \ | 631 | } \ |
601 | \ | 632 | \ |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 8cfe515cbc47..5dc283ba5ae0 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -1,8 +1,13 @@ | |||
1 | #ifndef _TRACE_SYSCALL_H | 1 | #ifndef _TRACE_SYSCALL_H |
2 | #define _TRACE_SYSCALL_H | 2 | #define _TRACE_SYSCALL_H |
3 | 3 | ||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/unistd.h> | ||
6 | #include <linux/ftrace_event.h> | ||
7 | |||
4 | #include <asm/ptrace.h> | 8 | #include <asm/ptrace.h> |
5 | 9 | ||
10 | |||
6 | /* | 11 | /* |
7 | * A syscall entry in the ftrace syscalls array. | 12 | * A syscall entry in the ftrace syscalls array. |
8 | * | 13 | * |
@@ -10,26 +15,49 @@ | |||
10 | * @nb_args: number of parameters it takes | 15 | * @nb_args: number of parameters it takes |
11 | * @types: list of types as strings | 16 | * @types: list of types as strings |
12 | * @args: list of args as strings (args[i] matches types[i]) | 17 | * @args: list of args as strings (args[i] matches types[i]) |
18 | * @enter_id: associated ftrace enter event id | ||
19 | * @exit_id: associated ftrace exit event id | ||
20 | * @enter_event: associated syscall_enter trace event | ||
21 | * @exit_event: associated syscall_exit trace event | ||
13 | */ | 22 | */ |
14 | struct syscall_metadata { | 23 | struct syscall_metadata { |
15 | const char *name; | 24 | const char *name; |
16 | int nb_args; | 25 | int nb_args; |
17 | const char **types; | 26 | const char **types; |
18 | const char **args; | 27 | const char **args; |
28 | int enter_id; | ||
29 | int exit_id; | ||
30 | |||
31 | struct ftrace_event_call *enter_event; | ||
32 | struct ftrace_event_call *exit_event; | ||
19 | }; | 33 | }; |
20 | 34 | ||
21 | #ifdef CONFIG_FTRACE_SYSCALLS | 35 | #ifdef CONFIG_FTRACE_SYSCALLS |
22 | extern void arch_init_ftrace_syscalls(void); | ||
23 | extern struct syscall_metadata *syscall_nr_to_meta(int nr); | 36 | extern struct syscall_metadata *syscall_nr_to_meta(int nr); |
24 | extern void start_ftrace_syscalls(void); | 37 | extern int syscall_name_to_nr(char *name); |
25 | extern void stop_ftrace_syscalls(void); | 38 | void set_syscall_enter_id(int num, int id); |
26 | extern void ftrace_syscall_enter(struct pt_regs *regs); | 39 | void set_syscall_exit_id(int num, int id); |
27 | extern void ftrace_syscall_exit(struct pt_regs *regs); | 40 | extern struct trace_event event_syscall_enter; |
28 | #else | 41 | extern struct trace_event event_syscall_exit; |
29 | static inline void start_ftrace_syscalls(void) { } | 42 | extern int reg_event_syscall_enter(void *ptr); |
30 | static inline void stop_ftrace_syscalls(void) { } | 43 | extern void unreg_event_syscall_enter(void *ptr); |
31 | static inline void ftrace_syscall_enter(struct pt_regs *regs) { } | 44 | extern int reg_event_syscall_exit(void *ptr); |
32 | static inline void ftrace_syscall_exit(struct pt_regs *regs) { } | 45 | extern void unreg_event_syscall_exit(void *ptr); |
46 | extern int syscall_enter_format(struct ftrace_event_call *call, | ||
47 | struct trace_seq *s); | ||
48 | extern int syscall_exit_format(struct ftrace_event_call *call, | ||
49 | struct trace_seq *s); | ||
50 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); | ||
51 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); | ||
52 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); | ||
53 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | ||
54 | #endif | ||
55 | #ifdef CONFIG_EVENT_PROFILE | ||
56 | int reg_prof_syscall_enter(char *name); | ||
57 | void unreg_prof_syscall_enter(char *name); | ||
58 | int reg_prof_syscall_exit(char *name); | ||
59 | void unreg_prof_syscall_exit(char *name); | ||
60 | |||
33 | #endif | 61 | #endif |
34 | 62 | ||
35 | #endif /* _TRACE_SYSCALL_H */ | 63 | #endif /* _TRACE_SYSCALL_H */ |